Datasets:

ArXiv:
License:
dp-bench / src /utils.py
mer9ury
refactor: Sync evaluation pipeline, scripts, and docs from private repo
1e9a6e5
raw
history blame
9.73 kB
import json
from pathlib import Path
from typing import List, Dict, Any
def read_file(path: str, supported_formats: str = ".json") -> dict:
"""Read a file and return its content as a string
Args:
path (str): the path to the file to read
supported_formats (str, optional): the supported file formats. Defaults to ".json".
Returns:
dict: the json content of the file
Raises:
FileNotFoundError: if the file does not exist
ValueError: if the file format is not supported
"""
path = Path(path)
# check if the file exists and is a file
if not path.exists() or not path.is_file():
raise FileNotFoundError(f"File {path} not found")
# check if the file format is supported
if path.suffix not in supported_formats:
raise ValueError(f"File format {path.suffix} not supported")
with path.open("r") as file:
file_content = json.load(file)
return file_content
def create_directory(path: str) -> None:
"""Create a directory if it does not exist
Args:
path (str): the path to the directory to create
"""
path = Path(path)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
def read_file_paths(path: str, supported_formats: List[str] = [".jpg"]) -> List[str]:
"""Read files in a directory and return their content as a list of strings
Args:
path (str): the path to the directory containing the file paths to read
supported_formats (List[str], optional): the supported file formats. Defaults to [".jpg"].
Returns:
list: list of valid file paths
Raises:
FileNotFoundError: if the directory does not exist
"""
path = Path(path)
# check if the directory exists and is a directory
if not path.exists() or not path.is_dir():
raise FileNotFoundError(f"Directory {path} not found")
# get the list of files in the directory
file_paths = [file for file in path.iterdir() if file.is_file()]
# filter file paths based on the supported formats
if supported_formats:
file_paths = [file for file in file_paths if file.suffix in supported_formats]
else:
file_paths = []
return file_paths
def check_dataset_format(data: dict, image_key: str, is_prediction: bool = False) -> bool:
"""Check the format of the dataset
Args:
data (dict): the gt/prediction dataset to check
image_key (str): the image name acting as the key in the dataset
is_prediction (bool): if True, skip entries with errors instead of raising
Returns:
bool: True if valid, False if entry has error (only when is_prediction=True)
Raises:
ValueError: if a key is missing in the dataset (only when is_prediction=False)
"""
# For predictions, allow entries with "error" key - they will be skipped
if is_prediction and "error" in data[image_key]:
return False
if data[image_key].get("elements") is None:
if is_prediction:
return False
raise ValueError(
f"{image_key} does not have 'elements' key in the json file. "
"Check if you are passing the correct data."
)
elements = data[image_key]["elements"]
for elem in elements:
if elem.get("category") is None:
if is_prediction:
return False
raise ValueError(
f"{image_key} does not have 'category' key in the ground truth file. "
"Check if you are passing the correct data."
)
if elem.get("content") is None:
if is_prediction:
return False
raise ValueError(
f"{image_key} does not have 'content' key in the ground truth file. "
"Check if you are passing the correct data."
)
else:
content = elem["content"]
if content.get("text") is None:
if is_prediction:
return False
raise ValueError(
f"{image_key} does not have 'text' key in the ground truth file. "
"Check if you are passing the correct data."
)
return True
def check_data_validity(gt_data: dict, pred_data: dict) -> tuple:
"""Check the validity of the ground truth and prediction data
Args:
gt_data (dict): the ground truth data
pred_data (dict): the prediction data
Returns:
tuple: (valid_keys, error_keys, missing_keys) - lists of valid, error, and missing prediction keys
Raises:
ValueError: if the ground truth or prediction data is invalid
"""
if not gt_data:
raise ValueError("Ground truth data is empty")
if not pred_data:
raise ValueError("Prediction data is empty")
# Check ground truth format (must be valid)
for image_key in gt_data.keys():
check_dataset_format(gt_data, image_key, is_prediction=False)
# Check prediction format (allow errors)
valid_keys = []
error_keys = []
missing_keys = []
for image_key in gt_data.keys():
pred_elem = pred_data.get(image_key)
if pred_elem is None:
missing_keys.append(image_key)
elif check_dataset_format(pred_data, image_key, is_prediction=True):
valid_keys.append(image_key)
else:
error_keys.append(image_key)
return valid_keys, error_keys, missing_keys
def _merge_bboxes(bboxes: List[List[float]]) -> List[float]:
"""
Merge multiple bounding boxes into a single union bbox.
Args:
bboxes: List of bboxes in 4-corner format [{'x': x1, 'y': y1}, {'x': x2, 'y': y1}, {'x': x2, 'y': y2}, {'x': x1, 'y': y2}]
Returns:
Union bbox covering all input boxes: [x1_min, y1_min, x2_max, y2_max]
"""
if not bboxes:
return [0.0, 0.0, 0.0, 0.0]
x1_min = min(bbox[0]['x'] for bbox in bboxes)
y1_min = min(bbox[1]['y'] for bbox in bboxes)
x2_max = max(bbox[2]['x'] for bbox in bboxes)
y2_max = max(bbox[3]['y'] for bbox in bboxes)
return [x1_min, y1_min, x2_max, y2_max]
def preprocess_merged_tables(data: Dict[str, Any]) -> Dict[str, Any]:
"""
Preprocess data to merge tables according to merged_tables information.
For each document with merged_tables:
1. Remove child tables from elements list
2. Keep the representative (first) table
3. Replace representative table's HTML with merged_html
4. Update representative table's BBox to union of all child bboxes
Args:
data: Dictionary of document data in OAC format
Returns:
Preprocessed data with merged tables
"""
processed_data = {}
for doc_key, doc in data.items():
if not isinstance(doc, dict):
processed_data[doc_key] = doc
continue
# Deep copy to avoid modifying original
doc_copy = json.loads(json.dumps(doc))
merged_tables = doc_copy.get("merged_tables", [])
if not merged_tables:
# No merging needed
processed_data[doc_key] = doc_copy
continue
elements = doc_copy.get("elements", [])
if not elements:
processed_data[doc_key] = doc_copy
continue
# Build element "page-id" to index mapping for quick lookup
# Format: "page-id" (e.g., "1-7") -> element index
key_to_idx = {}
for idx, elem in enumerate(elements):
page = elem.get("page")
elem_id = elem.get("id")
if page is not None and elem_id is not None:
key = f"{int(page)}-{int(elem_id)}"
key_to_idx[key] = idx
# Track indices to remove (child tables that are merged)
indices_to_remove = set()
# Process each merge group
for merge_group in merged_tables:
table_ids = merge_group.get("table_ids", [])
if len(table_ids) < 2:
continue
# table_ids are strings in format "page-id" (e.g., "1-7", "2-11")
# No conversion needed, use as-is
table_ids = sorted([str(tid) for tid in table_ids])
# Get merged HTML - check both 'html' and 'table_html' keys
merged_html = merge_group.get("table_html") or merge_group.get("html")
# Find representative table (first one in the list)
representative_id = table_ids[0]
representative_idx = key_to_idx.get(representative_id)
# Collect bboxes from all tables in the merge group
bboxes_to_merge = []
for tid in table_ids:
idx = key_to_idx.get(tid)
elem = elements[idx]
bboxes_to_merge.append(elem.get("coordinates"))
if tid != representative_id:
indices_to_remove.add(idx)
# Update representative table
elements[representative_idx]["content"]["html"] = merged_html
elements[representative_idx]["coordinates"] = _merge_bboxes(bboxes_to_merge)
# Remove child tables (in reverse order to maintain indices)
new_elements = [
elem for idx, elem in enumerate(elements)
if idx not in indices_to_remove
]
doc_copy["elements"] = new_elements
# Remove merged_tables key as it's been applied
doc_copy.pop("merged_tables", None)
processed_data[doc_key] = doc_copy
return processed_data