Spaces:
Paused
Paused
Firs Push
Browse files- Model_loading.py +24 -0
- Models/TD_3k_yolo_150_wandb.pt +3 -0
- Models/invoice_yolo_100_7classes.pt +3 -0
- TD.py +66 -0
- Tr_ocr.py +45 -0
- app.py +148 -0
- info_det_ocr.py +75 -0
- req.txt +12 -0
- tsr.py +212 -0
Model_loading.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import TrOCRProcessor, VisionEncoderDecoderModel
|
| 2 |
+
|
| 3 |
+
# Singleton class for TrOCR model and processor
|
| 4 |
+
class OCRModelSingleton:
|
| 5 |
+
_instance = None
|
| 6 |
+
|
| 7 |
+
def __init__(self):
|
| 8 |
+
if OCRModelSingleton._instance is not None:
|
| 9 |
+
raise Exception("This class is a singleton!")
|
| 10 |
+
else:
|
| 11 |
+
OCRModelSingleton._instance = self
|
| 12 |
+
self.processor = TrOCRProcessor.from_pretrained('microsoft/trocr-large-printed')
|
| 13 |
+
self.model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-large-printed')
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
def get_instance():
|
| 17 |
+
if OCRModelSingleton._instance is None:
|
| 18 |
+
OCRModelSingleton()
|
| 19 |
+
return OCRModelSingleton._instance
|
| 20 |
+
|
| 21 |
+
# Automatically initialize the shared processor and model
|
| 22 |
+
ocr_model_instance = OCRModelSingleton.get_instance()
|
| 23 |
+
processor_tr_ocr = ocr_model_instance.processor
|
| 24 |
+
trocr_model = ocr_model_instance.model
|
Models/TD_3k_yolo_150_wandb.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb91d6f1cdcaa3bf0799b22c2948438ba43526693dddca794a103fb5040506ff
|
| 3 |
+
size 52040315
|
Models/invoice_yolo_100_7classes.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b31bbfff063a656ec5c7856c22cf88feae555ddd18b0984f8c85a83dc6832732
|
| 3 |
+
size 52065770
|
TD.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
warnings.filterwarnings("ignore")
|
| 3 |
+
|
| 4 |
+
from ultralytics import YOLO
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# Define a path relative to the script's location
|
| 12 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 13 |
+
model_path = os.path.join(BASE_DIR, "Models", "TD_3k_yolo_150_wandb.pt")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
## Load the YOLO model
|
| 17 |
+
model1 = YOLO(model_path)
|
| 18 |
+
|
| 19 |
+
def TD_model1(image_arr, padding=5, confidence_threshold=0.5):
|
| 20 |
+
# Convert to RGB (for PIL cropping)
|
| 21 |
+
image_rgb = cv2.cvtColor(image_arr, cv2.COLOR_BGR2RGB)
|
| 22 |
+
|
| 23 |
+
# Perform object detection
|
| 24 |
+
results = model1.predict(image_arr)
|
| 25 |
+
|
| 26 |
+
# Check if any tables were detected
|
| 27 |
+
if len(results[0].boxes) == 0:
|
| 28 |
+
return [] # No tables detected
|
| 29 |
+
|
| 30 |
+
# Extract bounding boxes and confidence scores
|
| 31 |
+
boxes = results[0].boxes.xyxy.cpu().numpy() # Bounding boxes
|
| 32 |
+
confidences = results[0].boxes.conf.cpu().numpy() # Confidence scores
|
| 33 |
+
|
| 34 |
+
# Filter predictions by confidence threshold
|
| 35 |
+
valid_boxes = []
|
| 36 |
+
for i, box in enumerate(boxes):
|
| 37 |
+
if confidences[i] >= confidence_threshold:
|
| 38 |
+
valid_boxes.append((box, confidences[i]))
|
| 39 |
+
|
| 40 |
+
# If no valid boxes, return an empty list
|
| 41 |
+
if not valid_boxes:
|
| 42 |
+
return []
|
| 43 |
+
|
| 44 |
+
# Sort the boxes by confidence in descending order
|
| 45 |
+
valid_boxes = sorted(valid_boxes, key=lambda x: x[1], reverse=True)
|
| 46 |
+
|
| 47 |
+
# Crop the image based on the valid bounding boxes
|
| 48 |
+
cropped_images = []
|
| 49 |
+
image_pil = Image.fromarray(image_rgb) # Convert NumPy array to PIL image
|
| 50 |
+
|
| 51 |
+
for i, (box, conf) in enumerate(valid_boxes):
|
| 52 |
+
x1, y1, x2, y2 = box
|
| 53 |
+
# Add padding to the bounding box coordinates (with boundary checks)
|
| 54 |
+
x1 = max(0, x1 - padding)
|
| 55 |
+
y1 = max(0, y1 - padding)
|
| 56 |
+
x2 = min(image_pil.width, x2 + padding)
|
| 57 |
+
y2 = min(image_pil.height, y2 + padding)
|
| 58 |
+
|
| 59 |
+
cropped_image = image_pil.crop((x1, y1, x2, y2)) # Crop using the bounding box coordinates
|
| 60 |
+
cropped_images.append(cropped_image)
|
| 61 |
+
|
| 62 |
+
return cropped_images
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
|
Tr_ocr.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Tr OCR
|
| 2 |
+
import numpy as np
|
| 3 |
+
from tqdm.auto import tqdm
|
| 4 |
+
from Model_loading import processor_tr_ocr, trocr_model
|
| 5 |
+
import torch
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def apply_TRocr(cropped_image,cell_coordinates):
|
| 11 |
+
"""
|
| 12 |
+
Apply TrOCR to the given cell coordinates.
|
| 13 |
+
|
| 14 |
+
Parameters:
|
| 15 |
+
- cell_coordinates: List of coordinates for cropping cells from an image.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
- data: A list of extracted text for each cell.
|
| 19 |
+
"""
|
| 20 |
+
data = [] # Initialize a list to store data for each row
|
| 21 |
+
|
| 22 |
+
for idx, row in enumerate(tqdm(cell_coordinates)):
|
| 23 |
+
row_text = [] # List to store text for the current row
|
| 24 |
+
for cell in row["cells"]:
|
| 25 |
+
# Crop cell out of the image
|
| 26 |
+
cell_image = np.array(cropped_image.crop(cell["cell"]))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# Convert the cell image to RGB format for TrOCR processing
|
| 30 |
+
pil_image = Image.fromarray(cell_image).convert("RGB")
|
| 31 |
+
pixel_values = processor_tr_ocr(images=pil_image, return_tensors="pt").pixel_values
|
| 32 |
+
|
| 33 |
+
# Generate text predictions using TrOCR model
|
| 34 |
+
generated_ids = trocr_model.generate(pixel_values)
|
| 35 |
+
text = processor_tr_ocr.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 36 |
+
|
| 37 |
+
if not text:
|
| 38 |
+
text = "NAN" # Append "NAN" if no text is detected
|
| 39 |
+
|
| 40 |
+
row_text.append(text)
|
| 41 |
+
|
| 42 |
+
# Append the row's text list to the data list
|
| 43 |
+
data.append(row_text)
|
| 44 |
+
|
| 45 |
+
return data
|
app.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from PIL import Image
|
| 3 |
+
import numpy as np
|
| 4 |
+
import io
|
| 5 |
+
import pandas as pd
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import subprocess
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
import ultralytics
|
| 12 |
+
except ImportError:
|
| 13 |
+
subprocess.run(["pip", "install", "ultralytics"])
|
| 14 |
+
|
| 15 |
+
# Import your models and utility functions
|
| 16 |
+
from TD import TD_model1
|
| 17 |
+
from tsr import TSR, get_cell_coordinates_by_row, apply_ocr
|
| 18 |
+
from info_det_ocr import info_det_and_ocr
|
| 19 |
+
from Tr_ocr import apply_TRocr
|
| 20 |
+
|
| 21 |
+
# Set page config
|
| 22 |
+
st.set_page_config(
|
| 23 |
+
page_title="INDRA OCR",
|
| 24 |
+
page_icon="🧾",
|
| 25 |
+
layout="wide",
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
# Customizing background color and font color
|
| 29 |
+
st.markdown("""
|
| 30 |
+
<style>
|
| 31 |
+
body {
|
| 32 |
+
background-color: blue; /* Change to your desired background color */
|
| 33 |
+
color: #333333; /* Change to your desired font color */
|
| 34 |
+
}
|
| 35 |
+
</style>
|
| 36 |
+
""", unsafe_allow_html=True)
|
| 37 |
+
|
| 38 |
+
# App Header
|
| 39 |
+
st.markdown(
|
| 40 |
+
"""
|
| 41 |
+
<style>
|
| 42 |
+
.main-title {
|
| 43 |
+
font-size: 3.5rem; /* Increased font size */
|
| 44 |
+
color: white;
|
| 45 |
+
text-align: center;
|
| 46 |
+
font-weight: bold;
|
| 47 |
+
margin-bottom: 10px;
|
| 48 |
+
}
|
| 49 |
+
.subtitle {
|
| 50 |
+
font-size: 1.2rem;
|
| 51 |
+
color: #555555;
|
| 52 |
+
text-align: center;
|
| 53 |
+
margin-bottom: 20px;
|
| 54 |
+
}
|
| 55 |
+
</style>
|
| 56 |
+
<div class="main-title">INDRA OCR: Intelligent Invoice Data Recognition and Automation</div>
|
| 57 |
+
""",
|
| 58 |
+
unsafe_allow_html=True,
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# Create two columns for layout: one for the file uploader and another for the image and extracted data
|
| 62 |
+
col1, col2 = st.columns([1, 3]) # Adjust the ratio to control the width of the columns
|
| 63 |
+
|
| 64 |
+
with col1:
|
| 65 |
+
# File uploader for image (placed in the left column, smaller button)
|
| 66 |
+
uploaded_file = st.file_uploader(
|
| 67 |
+
"Upload Your Invoice (JPG, JPEG, PNG):", type=["jpg", "jpeg", "png"], label_visibility="collapsed"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
with col2:
|
| 71 |
+
# Display the uploaded image (right column)
|
| 72 |
+
if uploaded_file is not None:
|
| 73 |
+
# If a new image is uploaded, reset the session state
|
| 74 |
+
if 'uploaded_image' not in st.session_state or st.session_state.uploaded_image != uploaded_file:
|
| 75 |
+
st.session_state.uploaded_image = uploaded_file
|
| 76 |
+
st.session_state.processed_data = None
|
| 77 |
+
st.session_state.cropped_image = None
|
| 78 |
+
st.session_state.cell_coordinates = None
|
| 79 |
+
st.session_state.df = None
|
| 80 |
+
|
| 81 |
+
# Process the image only if it hasn't been processed yet
|
| 82 |
+
if st.session_state.processed_data is None:
|
| 83 |
+
# Read the image file as a PIL image
|
| 84 |
+
image = Image.open(uploaded_file)
|
| 85 |
+
|
| 86 |
+
# Resize the image to make it smaller for display
|
| 87 |
+
image.thumbnail((800, 800)) # Resize to fit within the 800px limit
|
| 88 |
+
|
| 89 |
+
# Convert the image to OpenCV format (NumPy array)
|
| 90 |
+
image_arr = np.array(image)
|
| 91 |
+
|
| 92 |
+
# Display the uploaded image
|
| 93 |
+
st.image(image, caption="Uploaded Image", width= 500)
|
| 94 |
+
|
| 95 |
+
# Run info detection and OCR
|
| 96 |
+
detected_data = info_det_and_ocr(image_arr)
|
| 97 |
+
st.session_state.processed_data = detected_data
|
| 98 |
+
|
| 99 |
+
# Run the Table detection model and crop images
|
| 100 |
+
cropped_image = TD_model1(image_arr)
|
| 101 |
+
|
| 102 |
+
if isinstance(cropped_image, list):
|
| 103 |
+
cropped_image = cropped_image[0] # Extract the image from the list
|
| 104 |
+
|
| 105 |
+
st.session_state.cropped_image = cropped_image
|
| 106 |
+
|
| 107 |
+
# Run Table Structure Recognition (TSR)
|
| 108 |
+
output_image, cells = TSR(cropped_image)
|
| 109 |
+
|
| 110 |
+
# Get cell coordinates and perform OCR on table cells
|
| 111 |
+
cell_coordinates = get_cell_coordinates_by_row(cells)
|
| 112 |
+
st.session_state.cell_coordinates = cell_coordinates
|
| 113 |
+
|
| 114 |
+
# Use TR OCR for extracting table data
|
| 115 |
+
data = apply_TRocr(cropped_image, cell_coordinates)
|
| 116 |
+
|
| 117 |
+
# Store the DataFrame in session state for CSV download
|
| 118 |
+
st.session_state.df = pd.DataFrame(data)
|
| 119 |
+
|
| 120 |
+
# Show extracted data if the dataframe is available
|
| 121 |
+
if st.session_state.df is not None:
|
| 122 |
+
st.markdown("### Extracted Data Table:")
|
| 123 |
+
st.dataframe(st.session_state.df) # Display the dataframe as a table
|
| 124 |
+
|
| 125 |
+
# Show download button if the data has been processed
|
| 126 |
+
st.markdown("### Download Extracted Data")
|
| 127 |
+
# Convert DataFrame to CSV and store in-memory buffer
|
| 128 |
+
csv_buffer = io.StringIO()
|
| 129 |
+
st.session_state.df.to_csv(csv_buffer, index=False)
|
| 130 |
+
|
| 131 |
+
# Get the CSV data as bytes
|
| 132 |
+
csv_data = csv_buffer.getvalue().encode('utf-8')
|
| 133 |
+
|
| 134 |
+
# Create a download button in Streamlit to download the CSV file
|
| 135 |
+
st.download_button(
|
| 136 |
+
label="Download CSV",
|
| 137 |
+
data=csv_data,
|
| 138 |
+
file_name="extracted_table_data.csv",
|
| 139 |
+
mime="text/csv",
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
# Footer
|
| 143 |
+
st.markdown(
|
| 144 |
+
"""
|
| 145 |
+
<div class="footer">© 2024 INDRA OCR | Designed for efficient invoice processing</div>
|
| 146 |
+
""",
|
| 147 |
+
unsafe_allow_html=True,
|
| 148 |
+
)
|
info_det_ocr.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
from ultralytics import YOLO
|
| 7 |
+
from Model_loading import processor_tr_ocr, trocr_model
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# Define a path relative to the script's location
|
| 12 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 13 |
+
model_path = os.path.join(BASE_DIR, "Models", "invoice_yolo_100_7classes.pt")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Load the YOLO model
|
| 17 |
+
|
| 18 |
+
info_model = YOLO(model_path)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Function for performing OCR with TrOCR
|
| 23 |
+
def ocr_with_transformer(image):
|
| 24 |
+
# Convert the image to RGB and process it for TrOCR
|
| 25 |
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
| 26 |
+
pixel_values = processor_tr_ocr(images=image_rgb, return_tensors="pt").pixel_values
|
| 27 |
+
generated_ids = trocr_model.generate(pixel_values)
|
| 28 |
+
|
| 29 |
+
# Decode the generated text from the model
|
| 30 |
+
generated_text = processor_tr_ocr.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
| 31 |
+
return generated_text
|
| 32 |
+
|
| 33 |
+
# Main function for detection, cropping, and OCR
|
| 34 |
+
def info_det_and_ocr(image_arr, conf_threshold=0.5):
|
| 35 |
+
# Initialize the dictionary to store detected class names and text
|
| 36 |
+
detected_data = {}
|
| 37 |
+
|
| 38 |
+
# Perform inference using YOLO
|
| 39 |
+
results = info_model.predict(image_arr)
|
| 40 |
+
|
| 41 |
+
boxes = results[0].boxes.xyxy.cpu().numpy() # Bounding boxes
|
| 42 |
+
scores = results[0].boxes.conf.cpu().numpy() # Confidence scores
|
| 43 |
+
class_ids = results[0].boxes.cls.cpu().numpy().astype(int) # Class IDs
|
| 44 |
+
class_names = results[0].names # Class names dictionary
|
| 45 |
+
|
| 46 |
+
# Iterate over the detected objects
|
| 47 |
+
for i, box in enumerate(boxes):
|
| 48 |
+
score = scores[i]
|
| 49 |
+
if score < conf_threshold:
|
| 50 |
+
continue # Skip low confidence detections
|
| 51 |
+
|
| 52 |
+
x1, y1, x2, y2 = map(int, box) # Convert bounding box to integers
|
| 53 |
+
class_id = class_ids[i]
|
| 54 |
+
class_name = class_names[class_id]
|
| 55 |
+
|
| 56 |
+
# Crop the detected region
|
| 57 |
+
cropped_region = image_arr[y1:y2, x1:x2]
|
| 58 |
+
|
| 59 |
+
# Perform OCR using TrOCR on the cropped region
|
| 60 |
+
detected_text = ocr_with_transformer(cropped_region)
|
| 61 |
+
|
| 62 |
+
# Save the detected text into the dictionary with class name as key
|
| 63 |
+
detected_data[class_name] = detected_text
|
| 64 |
+
|
| 65 |
+
# Optional: Display cropped image for debugging
|
| 66 |
+
#plt.imshow(cv2.cvtColor(cropped_region, cv2.COLOR_BGR2RGB))
|
| 67 |
+
#plt.title(f"Detected: {class_name}")
|
| 68 |
+
#plt.show()
|
| 69 |
+
|
| 70 |
+
return detected_data
|
| 71 |
+
|
| 72 |
+
# You can now call this function by passing the image as a numpy array.
|
| 73 |
+
# Example:
|
| 74 |
+
# extracted_data = info_det_and_ocr(image_arr)
|
| 75 |
+
# print(extracted_data)
|
req.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
streamlit
|
| 3 |
+
Pillow
|
| 4 |
+
numpy
|
| 5 |
+
pandas
|
| 6 |
+
transformers
|
| 7 |
+
ultralytics
|
| 8 |
+
opencv-python-headless
|
| 9 |
+
tqdm
|
| 10 |
+
torch
|
| 11 |
+
torchvision
|
| 12 |
+
easyocr
|
tsr.py
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import cv2
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import numpy as np
|
| 5 |
+
from io import BytesIO
|
| 6 |
+
import torch
|
| 7 |
+
from ultralytics import YOLO
|
| 8 |
+
from torchvision import transforms
|
| 9 |
+
from transformers import TableTransformerForObjectDetection
|
| 10 |
+
from PIL import ImageDraw
|
| 11 |
+
import numpy as np
|
| 12 |
+
import csv
|
| 13 |
+
import easyocr
|
| 14 |
+
from tqdm.auto import tqdm
|
| 15 |
+
import csv
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 19 |
+
# new v1.1 checkpoints require no timm anymore
|
| 20 |
+
structure_model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-structure-recognition-v1.1-all")
|
| 21 |
+
structure_model.to(device)
|
| 22 |
+
print("")
|
| 23 |
+
|
| 24 |
+
from torchvision import transforms
|
| 25 |
+
|
| 26 |
+
class MaxResize(object):
|
| 27 |
+
def __init__(self, max_size=800):
|
| 28 |
+
self.max_size = max_size
|
| 29 |
+
|
| 30 |
+
def __call__(self, image):
|
| 31 |
+
width, height = image.size
|
| 32 |
+
current_max_size = max(width, height)
|
| 33 |
+
scale = self.max_size / current_max_size
|
| 34 |
+
resized_image = image.resize((int(round(scale*width)), int(round(scale*height))))
|
| 35 |
+
|
| 36 |
+
return resized_image
|
| 37 |
+
|
| 38 |
+
detection_transform = transforms.Compose([
|
| 39 |
+
MaxResize(800),
|
| 40 |
+
transforms.ToTensor(),
|
| 41 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
| 42 |
+
])
|
| 43 |
+
|
| 44 |
+
# for output bounding box post-processing
|
| 45 |
+
def box_cxcywh_to_xyxy(x):
|
| 46 |
+
x_c, y_c, w, h = x.unbind(-1)
|
| 47 |
+
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
|
| 48 |
+
return torch.stack(b, dim=1)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def rescale_bboxes(out_bbox, size):
|
| 52 |
+
img_w, img_h = size
|
| 53 |
+
b = box_cxcywh_to_xyxy(out_bbox)
|
| 54 |
+
b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
|
| 55 |
+
return b
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
# update id2label to include "no object"
|
| 59 |
+
id2label = structure_model.config.id2label
|
| 60 |
+
id2label[len(structure_model.config.id2label)] = "no object"
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def outputs_to_objects(outputs, img_size, id2label):
|
| 64 |
+
m = outputs.logits.softmax(-1).max(-1)
|
| 65 |
+
pred_labels = list(m.indices.detach().cpu().numpy())[0]
|
| 66 |
+
pred_scores = list(m.values.detach().cpu().numpy())[0]
|
| 67 |
+
pred_bboxes = outputs['pred_boxes'].detach().cpu()[0]
|
| 68 |
+
pred_bboxes = [elem.tolist() for elem in rescale_bboxes(pred_bboxes, img_size)]
|
| 69 |
+
|
| 70 |
+
objects = []
|
| 71 |
+
for label, score, bbox in zip(pred_labels, pred_scores, pred_bboxes):
|
| 72 |
+
class_label = id2label[int(label)]
|
| 73 |
+
if not class_label == 'no object':
|
| 74 |
+
objects.append({'label': class_label, 'score': float(score),
|
| 75 |
+
'bbox': [float(elem) for elem in bbox]})
|
| 76 |
+
|
| 77 |
+
return objects
|
| 78 |
+
|
| 79 |
+
structure_transform = transforms.Compose([
|
| 80 |
+
MaxResize(1000),
|
| 81 |
+
transforms.ToTensor(),
|
| 82 |
+
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
| 83 |
+
])
|
| 84 |
+
|
| 85 |
+
def TSR(cropped_image):
|
| 86 |
+
pixel_values = structure_transform(cropped_image).unsqueeze(0)
|
| 87 |
+
pixel_values = pixel_values.to(device)
|
| 88 |
+
print(pixel_values.shape)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
# forward pass
|
| 92 |
+
with torch.no_grad():
|
| 93 |
+
outputs = structure_model(pixel_values)
|
| 94 |
+
|
| 95 |
+
# update id2label to include "no object"
|
| 96 |
+
structure_id2label = structure_model.config.id2label
|
| 97 |
+
structure_id2label[len(structure_id2label)] = "no object"
|
| 98 |
+
|
| 99 |
+
cells = outputs_to_objects(outputs, cropped_image.size, structure_id2label)
|
| 100 |
+
#print(cells)
|
| 101 |
+
|
| 102 |
+
cropped_table_visualized = cropped_image.copy()
|
| 103 |
+
draw = ImageDraw.Draw(cropped_table_visualized)
|
| 104 |
+
|
| 105 |
+
for cell in cells:
|
| 106 |
+
draw.rectangle(cell["bbox"], outline="red")
|
| 107 |
+
|
| 108 |
+
return cropped_table_visualized , cells
|
| 109 |
+
############# Visualizing rows and columns on cropped image
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
## Modified
|
| 113 |
+
|
| 114 |
+
def get_cell_coordinates_by_row(table_data):
|
| 115 |
+
# Extract rows and columns
|
| 116 |
+
rows = [entry for entry in table_data if entry['label'] == 'table row']
|
| 117 |
+
columns = [entry for entry in table_data if entry['label'] == 'table column']
|
| 118 |
+
|
| 119 |
+
# Sort rows and columns by their Y and X coordinates, respectively
|
| 120 |
+
rows.sort(key=lambda x: x['bbox'][1])
|
| 121 |
+
columns.sort(key=lambda x: x['bbox'][0])
|
| 122 |
+
|
| 123 |
+
# Function to find cell coordinates
|
| 124 |
+
def find_cell_coordinates(row, column):
|
| 125 |
+
# Use the row's Y coordinates for the cell's top and bottom
|
| 126 |
+
cell_ymin = row['bbox'][1]
|
| 127 |
+
cell_ymax = row['bbox'][3] # Adjust as needed for better height
|
| 128 |
+
|
| 129 |
+
# Use the column's X coordinates for the cell's left and right
|
| 130 |
+
cell_xmin = column['bbox'][0]
|
| 131 |
+
cell_xmax = column['bbox'][2]
|
| 132 |
+
|
| 133 |
+
return [cell_xmin, cell_ymin, cell_xmax, cell_ymax]
|
| 134 |
+
|
| 135 |
+
# Generate cell coordinates and count cells in each row
|
| 136 |
+
cell_coordinates = []
|
| 137 |
+
|
| 138 |
+
for row in rows:
|
| 139 |
+
row_cells = []
|
| 140 |
+
for column in columns:
|
| 141 |
+
cell_bbox = find_cell_coordinates(row, column)
|
| 142 |
+
row_cells.append({'column': column['bbox'], 'cell': cell_bbox})
|
| 143 |
+
|
| 144 |
+
# Sort cells in the row by X coordinate
|
| 145 |
+
row_cells.sort(key=lambda x: x['column'][0])
|
| 146 |
+
|
| 147 |
+
# Append row information to cell_coordinates
|
| 148 |
+
cell_coordinates.append({'row': row['bbox'], 'cells': row_cells, 'cell_count': len(row_cells)})
|
| 149 |
+
|
| 150 |
+
# Sort rows from top to bottom
|
| 151 |
+
cell_coordinates.sort(key=lambda x: x['row'][1])
|
| 152 |
+
|
| 153 |
+
# Debugging: Print intermediate results
|
| 154 |
+
#for i, row_info in enumerate(cell_coordinates):
|
| 155 |
+
# print(f"Row {i}: {row_info['row']}, Cell Count: {row_info['cell_count']}")
|
| 156 |
+
# for cell in row_info['cells']:
|
| 157 |
+
# print(f" Cell Bounding Box: {cell['cell']}, Column Bounding Box: {cell['column']}")
|
| 158 |
+
|
| 159 |
+
return cell_coordinates
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
# Initialize the EasyOCR reader
|
| 163 |
+
reader = easyocr.Reader(['en']) # this needs to run only once to load the model into memory
|
| 164 |
+
|
| 165 |
+
def apply_ocr(cell_coordinates,cropped_image):
|
| 166 |
+
# Initialize a list to store data for each row
|
| 167 |
+
data = []
|
| 168 |
+
|
| 169 |
+
for idx, row in enumerate(tqdm(cell_coordinates)):
|
| 170 |
+
row_text = [] # List to store text for the current row
|
| 171 |
+
for cell in row["cells"]:
|
| 172 |
+
# Crop cell out of the image
|
| 173 |
+
cell_image = np.array(cropped_image.crop(cell["cell"]))
|
| 174 |
+
# Apply OCR
|
| 175 |
+
result = reader.readtext(np.array(cell_image))
|
| 176 |
+
if len(result) > 0:
|
| 177 |
+
# Extract and join the detected text
|
| 178 |
+
text = " ".join([x[1] for x in result])
|
| 179 |
+
row_text.append(text)
|
| 180 |
+
else:
|
| 181 |
+
row_text.append("NAN") # Append empty string if no text is detected
|
| 182 |
+
|
| 183 |
+
# Append the row's text list to the data list
|
| 184 |
+
data.append(row_text)
|
| 185 |
+
|
| 186 |
+
return data
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# Print the extracted text for each row
|
| 190 |
+
"""for idx, row_data in enumerate(data):
|
| 191 |
+
print(f"Row {idx + 1}: {row_data}")"""
|
| 192 |
+
|
| 193 |
+
def op_csv(data):
|
| 194 |
+
|
| 195 |
+
# Define the output CSV file path
|
| 196 |
+
output_csv_file = 'extract.csv'
|
| 197 |
+
|
| 198 |
+
# Write the data to a CSV file
|
| 199 |
+
try:
|
| 200 |
+
with open(output_csv_file, mode='w', newline='', encoding='utf-8') as file:
|
| 201 |
+
writer = csv.writer(file)
|
| 202 |
+
|
| 203 |
+
for row in data:
|
| 204 |
+
writer.writerow(row) # Write each row individually
|
| 205 |
+
|
| 206 |
+
print(f"Data successfully written to {output_csv_file}")
|
| 207 |
+
return output_csv_file
|
| 208 |
+
except Exception as e:
|
| 209 |
+
print(f"An error occurred: {e}")
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
|