Datasets:

ArXiv:
License:
dp-bench / src /layout_evaluation.py
mer9ury
refactor: Sync evaluation pipeline, scripts, and docs from private repo
1e9a6e5
raw
history blame
5 kB
from rapidfuzz import fuzz
from src.geometry import is_included
def _normalize_coords(coordinates):
"""Convert coordinates to list of [x, y] pairs.
Accepts either [{"x": x, "y": y}, ...] or [[x, y], ...].
"""
return [
[coord["x"], coord["y"]] if isinstance(coord, dict) else list(coord)
for coord in coordinates
]
def calc_nid(
gt_text : list,
pred_text : list,
) -> float:
"""Calculate the Normalized InDel score between the gt and pred text.
Args:
gt_text (str): The string of gt text to compare.
pred_text (str): The string of pred text to compare.
Returns:
float: The nid score between gt and pred text.
"""
# if gt and pred is empty, return 1
if len(gt_text) == 0 and len(pred_text) == 0:
score = 1
# if pred is empty while gt is not, return 0
elif len(gt_text) > 0 and len(pred_text) == 0:
score = 0
else:
score = fuzz.ratio(gt_text, pred_text)
return score
def extract_text(
gt_data : dict,
pred_data : dict,
ignore_classes : list = [],
strings_to_remove : list = ["\n"],
filter_by_gt_area : bool = True,
) -> tuple:
"""Extract text from both GT and prediction data, optionally filtering out
predictions that fall within GT ignored regions.
Args:
gt_data (dict): The GT data to extract text from.
pred_data (dict): The prediction data to extract text from.
ignore_classes (list): A list of classes to ignore during extraction.
strings_to_remove (list): A list of strings to remove from the extracted text.
filter_by_gt_area (bool): If True, filter out prediction text within GT ignored regions.
If False, only filter by category. Defaults to True.
Returns:
tuple: (gt_text, pred_text) - The concatenated text extracted from GT and predictions.
"""
ignore_classes = [x.lower() for x in ignore_classes]
# Collect GT ignored regions' coordinates (only if spatial filtering is enabled)
gt_ignored_regions = []
if filter_by_gt_area:
for elem in gt_data["elements"]:
if elem["category"].lower() in ignore_classes:
coords = _normalize_coords(elem["coordinates"])
gt_ignored_regions.append(coords)
# Extract GT text (excluding ignored classes)
gt_text = ""
for elem in gt_data["elements"]:
if elem["category"].lower() in ignore_classes:
continue
gt_text += elem["content"]["text"] + ' '
# Extract prediction text (excluding ignored classes AND optionally elements within GT ignored regions)
pred_text = ""
if pred_data is not None:
for elem in pred_data["elements"]:
if elem["category"].lower() in ignore_classes:
continue
# Check if this prediction element is included in any GT ignored region (only if enabled)
if filter_by_gt_area:
elem_coords = _normalize_coords(elem["coordinates"])
is_in_ignored_region = False
for ignored_region in gt_ignored_regions:
if is_included(ignored_region, elem_coords, soft=0.2):
is_in_ignored_region = True
break
if is_in_ignored_region:
continue
pred_text += elem["content"]["text"] + ' '
# Remove unwanted strings from both texts
for string in strings_to_remove:
gt_text = gt_text.replace(string, '')
pred_text = pred_text.replace(string, '')
return gt_text, pred_text
def evaluate_layout(
gt : dict,
pred : dict,
ignore_classes : list = [],
filter_by_gt_area : bool = True,
) -> tuple:
"""Evaluate the layout of the gt against the pred.
Args:
gt (dict): The gt layout to evaluate.
pred (dict): The pred layout to evaluate against.
ignore_classes (list): A list of classes to ignore during evaluation.
filter_by_gt_area (bool): If True, filter out prediction text within GT ignored regions.
If False, only filter by category. Defaults to True.
Returns:
tuple: (avg_score, per_image_scores) - The average layout evaluation score and per-image scores dict.
"""
scores = []
per_image_scores = {}
for image_key in gt.keys():
gt_data = gt.get(image_key)
pred_data = pred.get(image_key)
gt_text, pred_text = extract_text(gt_data, pred_data, ignore_classes, filter_by_gt_area=filter_by_gt_area)
score = calc_nid(gt_text, pred_text)
scores.append(score)
per_image_scores[image_key] = {
"nid_score": score / 100.0
}
if len(scores) > 0:
avg_score = sum(scores) / (len(scores) * 100)
else:
avg_score = 0
return avg_score, per_image_scores