"""
Most of the code in this file is derived from the paper "Image-based table recognition: data, model, and evaluation".
The original paper can be accessed at: https://arxiv.org/pdf/1911.10683.
The code is available at: https://github.com/ibm-aur-nlp/PubTabNet.
A slight modification has been added to the code to improve the evaluation process.
"""
import re
import distance
import numpy as np
from concurrent.futures import ThreadPoolExecutor
try:
from scipy.optimize import linear_sum_assignment
except Exception as _e:
linear_sum_assignment = None
from lxml import etree, html
from collections import deque
from apted.helpers import Tree
from apted import APTED, Config
class TableTree(Tree):
"""Table Tree class for APTED"""
def __init__(self, tag, colspan=None, rowspan=None, content=None, *children):
self.tag = tag
self.colspan = colspan
self.rowspan = rowspan
self.content = content
self.children = list(children)
def bracket(self):
"""Show tree using brackets notation"""
if self.tag == 'td':
result = '"tag": %s, "colspan": %d, "rowspan": %d, "text": %s' % \
(self.tag, self.colspan, self.rowspan, self.content)
else:
result = '"tag": %s' % self.tag
for child in self.children:
result += child.bracket()
return "{{{}}}".format(result)
def visualize(self, indent=0, prefix="", is_last=True):
"""Visualize tree structure in ASCII art format
Args:
indent (int): Current indentation level
prefix (str): Prefix for tree branches
is_last (bool): Whether this is the last child of its parent
Returns:
str: ASCII tree visualization
"""
# Prepare node information
if self.tag == 'td':
content_preview = ''
if self.content:
content_str = ''.join(self.content) if isinstance(self.content, list) else str(self.content)
content_preview = content_str[:30] + '...' if len(content_str) > 30 else content_str
content_preview = f' "{content_preview}"' if content_preview else ''
node_info = f"{self.tag}"
attrs = []
if self.colspan and self.colspan > 1:
attrs.append(f"colspan={self.colspan}")
if self.rowspan and self.rowspan > 1:
attrs.append(f"rowspan={self.rowspan}")
if attrs:
node_info += f" [{', '.join(attrs)}]"
if content_preview:
node_info += content_preview
else:
node_info = self.tag
# Build the tree line
if indent == 0:
result = f"{node_info}\n"
else:
connector = "└── " if is_last else "├── "
result = f"{prefix}{connector}{node_info}\n"
# Process children
for i, child in enumerate(self.children):
is_last_child = (i == len(self.children) - 1)
if indent == 0:
child_prefix = ""
else:
child_prefix = prefix + (" " if is_last else "│ ")
result += child.visualize(indent + 1, child_prefix, is_last_child)
return result
class CustomConfig(Config):
"""Custom Configuration for APTED"""
@staticmethod
def maximum(*sequences):
"""Get maximum possible value"""
return max(map(len, sequences))
def normalized_distance(self, *sequences):
"""Get distance from 0 to 1"""
return float(distance.levenshtein(*sequences)) / self.maximum(*sequences)
def rename(self, node1, node2):
"""Compares attributes of trees"""
if (node1.tag != node2.tag) or \
(node1.colspan != node2.colspan) or \
(node1.rowspan != node2.rowspan):
return 1.
if node1.tag == 'td':
if node1.content or node2.content:
return self.normalized_distance(
node1.content, node2.content
)
return 0.
class TEDSEvaluator(object):
"""Tree Edit Distance basead Similarity"""
def __init__(
self,
structure_only=False,
n_jobs=1,
ignore_nodes=None,
):
assert isinstance(n_jobs, int) and (n_jobs >= 1), (
'n_jobs must be an integer greather than 1'
)
self.structure_only = structure_only
self.n_jobs = n_jobs
self.ignore_nodes = ignore_nodes
self.__tokens__ = []
def tokenize(self, node):
"""Tokenizes table cells"""
self.__tokens__.append('<%s>' % node.tag)
if node.text is not None:
self.__tokens__ += list(node.text)
for n in node.getchildren():
self.tokenize(n)
if node.tag != 'unk':
self.__tokens__.append('%s>' % node.tag)
if node.tag != 'td' and node.tail is not None:
self.__tokens__ += list(node.tail)
def load_html_tree(self, node, parent=None):
"""Converts HTML tree to the format required by apted
This version treats nested tables as separate tree nodes rather than content.
"""
global __tokens__
if node.tag == 'td':
# Check if td contains nested table(s)
nested_tables = [n for n in node.getchildren() if n.tag == 'table']
if nested_tables:
# td has nested table(s) - create td node and add tables as children
if self.structure_only:
cell = []
else:
self.__tokens__ = []
if node.text is not None:
self.__tokens__ += list(node.text)
for n in node.getchildren():
if n.tag != 'table':
self.tokenize(n)
if n.tail is not None:
self.__tokens__ += list(n.tail)
cell = self.__tokens__.copy() if self.__tokens__ else []
new_node = TableTree(
node.tag,
int(node.attrib.get('colspan', '1')),
int(node.attrib.get('rowspan', '1')),
cell, *deque()
)
# Add nested tables as children
if parent is not None:
parent.children.append(new_node)
for table in nested_tables:
self.load_html_tree(table, new_node)
else:
if self.structure_only:
cell = []
else:
self.__tokens__ = []
self.tokenize(node)
cell = self.__tokens__[1:-1].copy()
new_node = TableTree(
node.tag,
int(node.attrib.get('colspan', '1')),
int(node.attrib.get('rowspan', '1')),
cell, *deque()
)
if parent is not None:
parent.children.append(new_node)
else:
new_node = TableTree(node.tag, None, None, None, *deque())
if parent is not None:
parent.children.append(new_node)
for n in node.getchildren():
self.load_html_tree(n, new_node)
if parent is None:
return new_node
def evaluate(self, pred, true):
"""Computes TEDS score between the prediction and the ground truth of a given sample"""
if (not pred) or (not true):
return 0.0
parser = html.HTMLParser(remove_comments=True, encoding='utf-8')
pred = html.fromstring(pred, parser=parser)
true = html.fromstring(true, parser=parser)
if pred.xpath('body/table') and true.xpath('body/table'):
pred_tables = pred.xpath('body/table')
true_tables = true.xpath('body/table')
# Default behavior: if multiple tables present, compare concatenated wrappers by matching counts
# Here keep legacy single-table by choosing the first only when both are singletons
if len(pred_tables) == 1 and len(true_tables) == 1:
pred = pred_tables[0]
true = true_tables[0]
else:
# Fallback: wrap the entire body as a single root for structural comparison
pred = pred.xpath('body')[0]
true = true.xpath('body')[0]
if self.ignore_nodes:
etree.strip_tags(pred, *self.ignore_nodes)
etree.strip_tags(true, *self.ignore_nodes)
n_nodes_pred = len(pred.xpath('.//*'))
n_nodes_true = len(true.xpath('.//*'))
n_nodes = max(n_nodes_pred, n_nodes_true)
tree_pred = self.load_html_tree(pred)
tree_true = self.load_html_tree(true)
distance = APTED(tree_pred, tree_true, CustomConfig()).compute_edit_distance()
return 1.0 - (float(distance) / n_nodes)
else:
return 0.0
def get_table_contents(text):
# Regular expression to capture content within
tags
table_contents = re.findall(r'', text, flags=re.DOTALL)
if len(table_contents) == 0:
table_contents = [text]
return table_contents
def extract_tables(data : dict) -> str:
"""Extract tables from the dictionary data.
Args:
data (dict): The data to extract tables from.
Returns:
str: The extracted tables from the data and a boolean indicating if the data has a table.
"""
# return as is if data is a string
html = ''
for elem in data['elements']:
if elem['category'].lower() == 'table':
table_html_elements = get_table_contents(elem['content']['html'])
for table_html in table_html_elements:
html += f''
html += ''
return html
def extract_table_list(data: dict):
"""Return a list of individual HTML snippets from a doc dict.
Each returned entry is a complete single-table HTML fragment (wrapped with tags).
"""
tables: list = []
elements = (data or {}).get('elements', []) or []
for elem in elements:
try:
category = (elem or {}).get('category', '')
if isinstance(category, str) and category.lower() == 'table':
html_contents = ((elem or {}).get('content', {}) or {}).get('html') or ''
if isinstance(html_contents, str):
tables.append(html_contents)
elif isinstance(html_contents, list):
for html_content in html_contents:
if isinstance(html_content, str):
tables.append(html_content)
except Exception:
continue
return tables
def _simplify_single_table(table_elem):
"""
Simplify a single table element. (Recursive handling of nested tables)
Args:
table_elem: lxml element - MUST be a element
Returns:
lxml element: The simplified table element
"""
# 1. Remove all attributes of the table element
table_elem.attrib.clear()
# 2. Remove thead, tbody, tfoot wrappers (keep their children)
# Only process wrappers that belong to this table, not nested tables
for wrapper_tag in ('thead', 'tbody', 'tfoot'):
# Find all wrappers but only process those directly under this table
for wrapper in list(table_elem.xpath(f'./{wrapper_tag}')):
parent = wrapper.getparent()
index = list(parent).index(wrapper)
for child in list(wrapper):
parent.insert(index, child)
index += 1
parent.remove(wrapper)
# 3. Get direct tr children, excluding those in nested tables
direct_rows = []
for tr in table_elem.xpath('.//tr'):
# Find the closest table ancestor
parent = tr.getparent()
while parent is not None and parent.tag != 'table':
parent = parent.getparent()
# Only include if the closest table ancestor is our table_elem
if parent is table_elem:
direct_rows.append(tr)
# 4. Remove attributes of the tr and cell except for colspan and rowspan
for tr in direct_rows:
tr.attrib.clear()
for cell in tr:
if cell.tag in ('th', 'td'): # Replace th with td
if cell.tag == 'th':
cell.tag = 'td'
# Keep only colspan and rowspan attributes
new_attrib = {}
if 'colspan' in cell.attrib:
new_attrib['colspan'] = cell.attrib['colspan']
if 'rowspan' in cell.attrib:
new_attrib['rowspan'] = cell.attrib['rowspan']
cell.attrib.clear()
cell.attrib.update(new_attrib)
# Check if there is a nested table
nested_tables = cell.xpath('.//table')
# Recursively handle nested tables
for nested in nested_tables:
_simplify_single_table(nested)
# Remove unnecessary tags (keep content, remove tag wrapper)
# These tags are stripped but their text content is preserved
unnecessary_tags = [
'div', 'span', 'p', 'br', 'b', 'i', 'strong', 'em', 'u',
'font', 'a', 'sup', 'sub', 'small', 'big', 'center',
'label', 'section', 'article', 'header', 'footer', 'nav'
]
etree.strip_tags(cell, *unnecessary_tags)
# Get text content (include text of all sub-tags)
text_content = cell.text_content()
if text_content:
text_content = text_content.strip().replace('\xa0', '').replace(' ', '').strip()
# If completely empty, set it as an empty cell (no text and no nested table)
if (not text_content or text_content == '') and not nested_tables:
for child in list(cell):
cell.remove(child)
cell.text = ''
cell.tail = None
return table_elem
def preprocess_table(table_html_list):
"""
Preprocess the HTML table list to the basic structure.
Recursively handle nested tables (TINT).
Args:
table_html_list (list): List of HTML table strings
Returns:
list: Simplified list of HTML table strings
"""
preprocessed_tables = []
for html_string in table_html_list:
try:
parser = html.HTMLParser(remove_comments=True, encoding='utf-8')
# Extract outermost if exists, otherwise wrap with
table_start = html_string.find('')
if table_start != -1 and table_end != -1:
# Extract the outermost table
html_string = html_string[table_start:table_end + len('
')]
else:
# No table tag found, wrap content with
html_string = f''
root = html.fromstring(html_string, parser=parser)
# root itself might be the table element
if root.tag == 'table':
table = root
else:
table = root.xpath('.//table')[0]
# Simplify the table
table = _simplify_single_table(table)
table_string = etree.tostring(table, encoding='unicode', method='html')
table_string = '' + re.sub(r'>\s+<', '><', table_string).strip() + ''
preprocessed_tables.append(table_string)
except Exception as e:
print(f"[WARNING] Failed to simplify table: {e}, {html_string}")
preprocessed_tables.append(html_string)
return preprocessed_tables
def _compute_single_pair_score(args):
"""Helper function to compute score for a single (i, j) pair."""
i, j, gt_table, pred_table, evaluator = args
try:
s = float(evaluator.evaluate(pred_table, gt_table))
except Exception:
s = 0.0
return i, j, s
def _compute_teds_s_score(args):
"""Helper function to compute TEDS-S score for a matched pair."""
gt_table, pred_table, evaluator = args
try:
return float(evaluator.evaluate(pred_table, gt_table))
except Exception:
return 0.0
def _hungarian_match_tables_by_score(
gt_tables: list,
pred_tables: list,
evaluator: TEDSEvaluator,
min_match_score: float = 0.1,
max_workers: int = 1,
):
"""Hungarian one-to-one matching of GT and Pred tables using the provided evaluator.
Returns list of tuples: (gt_idx, pred_idx, score)
"""
matches: list = []
if not gt_tables or not pred_tables:
return matches
if linear_sum_assignment is None:
# Fallback: no scipy available, return empty
return matches
n = max(len(gt_tables), len(pred_tables))
cost = np.zeros((n, n), dtype=float)
score_mat = np.zeros((n, n), dtype=float)
# Initialize all costs to 1.0 (dummy pairs)
cost.fill(1.0)
# Build list of valid (i, j) pairs to compute
tasks = [
(i, j, gt_tables[i], pred_tables[j], evaluator)
for i in range(len(gt_tables))
for j in range(len(pred_tables))
]
# Use ThreadPoolExecutor for parallel score computation within this process
if tasks:
with ThreadPoolExecutor(max_workers=max_workers) as executor:
results = list(executor.map(_compute_single_pair_score, tasks))
for i, j, s in results:
score_mat[i, j] = s
cost[i, j] = 1.0 - s
row_ind, col_ind = linear_sum_assignment(cost)
for i, j in zip(row_ind, col_ind):
if i < len(gt_tables) and j < len(pred_tables):
s = float(score_mat[i, j])
if s >= min_match_score:
matches.append((i, j, s))
return matches
def has_table_content(html_data : str) -> bool:
"""Check if the table has content between and .
Args:
html_data (str): The html data to check.
Returns:
bool: True if the table has content, False otherwise
"""
has_content = True
if html_data.replace('', '').replace('', '') == '':
has_content = False
return has_content
def prepare_table_dataset(gt_data, pred_data):
"""Prepare the tables for evaluation.
Args:
gt_data (dict): The ground truth dataset to evaluate.
pred_data (dict): The predicted dataset to evaluate.
Returns:
tuple (list, list): The list of ground truth and predicted tables.
"""
gt_table_list = []
pred_table_list = []
for image_key in gt_data.keys():
gt_elem = gt_data.get(image_key)
pred_elem = pred_data.get(image_key)
gt_tables = extract_tables(gt_elem)
pred_tables = extract_tables(pred_elem)
if not has_table_content(gt_tables):
continue
gt_table_list.append(gt_tables)
pred_table_list.append(pred_tables)
return gt_table_list, pred_table_list
def evaluate_table(
gt : dict,
pred : dict,
min_match_score: float = 0.0,
max_workers: int = 1,
) -> tuple:
"""Evaluate the table of the gt against the pred.
Args:
gt (dict): The gt layout to evaluate.
pred (dict): The pred layout to evaluate against.
Returns:
tuple(float, float, float, dict): The Table F1, TEDS, TEDS-S scores and per-image results.
"""
avg_teds_score = 0.0
avg_teds_s_score = 0.0
eval_s = TEDSEvaluator(structure_only=True)
eval_full = TEDSEvaluator(structure_only=False)
n_gt_tables = 0
n_pred_tables = 0
n_matched_tables = 0
teds_scores = []
teds_s_scores = []
per_image_scores = {}
for image_key in gt.keys():
gt_elem = gt.get(image_key)
pred_elem = pred.get(image_key)
gt_tables = extract_table_list(gt_elem)
pred_tables = extract_table_list(pred_elem)
n_gt_tables += len(gt_tables)
n_pred_tables += len(pred_tables)
# Initialize per-image result
per_image_scores[image_key] = {
"n_gt_tables": int(len(gt_tables)),
"n_pred_tables": int(len(pred_tables)),
"n_matched_tables": 0,
"matched_tables": []
}
if not gt_tables and not pred_tables:
continue
# Simplify tables before comparison
gt_tables = preprocess_table(gt_tables)
pred_tables = preprocess_table(pred_tables)
# TEDS (structure+content) for matching via Hungarian
matches = _hungarian_match_tables_by_score(
gt_tables, pred_tables, eval_full,
min_match_score=min_match_score,
max_workers=max_workers,
)
if matches:
n_matched_tables += len(matches)
per_image_scores[image_key]["n_matched_tables"] = int(len(matches))
# Extract TEDS scores from matches
teds_scores.extend([s for _, _, s in matches])
# Parallel computation of TEDS-S scores for matched pairs
teds_s_tasks = [(gt_tables[i], pred_tables[j], eval_s) for i, j, _ in matches]
with ThreadPoolExecutor(max_workers=max_workers) as executor:
teds_s_results = list(executor.map(_compute_teds_s_score, teds_s_tasks))
# Store results and per-image details
for (i, j, teds_score), teds_s_score in zip(matches, teds_s_results):
teds_s_scores.append(teds_s_score)
per_image_scores[image_key]["matched_tables"].append({
"gt_table_idx": int(i),
"pred_table_idx": int(j),
"teds_score": float(teds_score),
"teds_s_score": float(teds_s_score)
})
if len(teds_scores) > 0:
table_f1_score = 2 * n_matched_tables / (n_gt_tables + n_pred_tables)
avg_teds_score = sum(teds_scores) / len(teds_scores)
avg_teds_s_score = sum(teds_s_scores) / len(teds_s_scores)
else:
print('[Warning] No matched tables found in the ground truth and prediction datasets.')
table_f1_score = 0.0
avg_teds_score = 0.0
avg_teds_s_score = 0.0
return table_f1_score, avg_teds_score, avg_teds_s_score, per_image_scores