| from rapidfuzz import fuzz |
| from src.geometry import is_included |
|
|
|
|
| def _normalize_coords(coordinates): |
| """Convert coordinates to list of [x, y] pairs. |
| |
| Accepts either [{"x": x, "y": y}, ...] or [[x, y], ...]. |
| """ |
| return [ |
| [coord["x"], coord["y"]] if isinstance(coord, dict) else list(coord) |
| for coord in coordinates |
| ] |
|
|
| def calc_nid( |
| gt_text : list, |
| pred_text : list, |
| ) -> float: |
| """Calculate the Normalized InDel score between the gt and pred text. |
| |
| Args: |
| gt_text (str): The string of gt text to compare. |
| pred_text (str): The string of pred text to compare. |
| |
| Returns: |
| float: The nid score between gt and pred text. |
| """ |
|
|
| |
| if len(gt_text) == 0 and len(pred_text) == 0: |
| score = 1 |
| |
| elif len(gt_text) > 0 and len(pred_text) == 0: |
| score = 0 |
| else: |
| score = fuzz.ratio(gt_text, pred_text) |
|
|
| return score |
|
|
|
|
| def extract_text( |
| gt_data : dict, |
| pred_data : dict, |
| ignore_classes : list = [], |
| strings_to_remove : list = ["\n"], |
| filter_by_gt_area : bool = True, |
| ) -> tuple: |
| """Extract text from both GT and prediction data, optionally filtering out |
| predictions that fall within GT ignored regions. |
| |
| Args: |
| gt_data (dict): The GT data to extract text from. |
| pred_data (dict): The prediction data to extract text from. |
| ignore_classes (list): A list of classes to ignore during extraction. |
| strings_to_remove (list): A list of strings to remove from the extracted text. |
| filter_by_gt_area (bool): If True, filter out prediction text within GT ignored regions. |
| If False, only filter by category. Defaults to True. |
| |
| Returns: |
| tuple: (gt_text, pred_text) - The concatenated text extracted from GT and predictions. |
| """ |
|
|
| ignore_classes = [x.lower() for x in ignore_classes] |
|
|
| |
| gt_ignored_regions = [] |
| if filter_by_gt_area: |
| for elem in gt_data["elements"]: |
| if elem["category"].lower() in ignore_classes: |
| coords = _normalize_coords(elem["coordinates"]) |
| gt_ignored_regions.append(coords) |
|
|
| |
| gt_text = "" |
| for elem in gt_data["elements"]: |
| if elem["category"].lower() in ignore_classes: |
| continue |
| gt_text += elem["content"]["text"] + ' ' |
|
|
| |
| pred_text = "" |
| if pred_data is not None: |
| for elem in pred_data["elements"]: |
| if elem["category"].lower() in ignore_classes: |
| continue |
| |
| |
| if filter_by_gt_area: |
| elem_coords = _normalize_coords(elem["coordinates"]) |
| is_in_ignored_region = False |
| |
| for ignored_region in gt_ignored_regions: |
| if is_included(ignored_region, elem_coords, soft=0.2): |
| is_in_ignored_region = True |
| break |
| |
| if is_in_ignored_region: |
| continue |
| |
| pred_text += elem["content"]["text"] + ' ' |
|
|
| |
| for string in strings_to_remove: |
| gt_text = gt_text.replace(string, '') |
| pred_text = pred_text.replace(string, '') |
|
|
| return gt_text, pred_text |
|
|
|
|
| def evaluate_layout( |
| gt : dict, |
| pred : dict, |
| ignore_classes : list = [], |
| filter_by_gt_area : bool = True, |
| ) -> tuple: |
| """Evaluate the layout of the gt against the pred. |
| |
| Args: |
| gt (dict): The gt layout to evaluate. |
| pred (dict): The pred layout to evaluate against. |
| ignore_classes (list): A list of classes to ignore during evaluation. |
| filter_by_gt_area (bool): If True, filter out prediction text within GT ignored regions. |
| If False, only filter by category. Defaults to True. |
| |
| Returns: |
| tuple: (avg_score, per_image_scores) - The average layout evaluation score and per-image scores dict. |
| """ |
| scores = [] |
| per_image_scores = {} |
| |
| for image_key in gt.keys(): |
| gt_data = gt.get(image_key) |
| pred_data = pred.get(image_key) |
|
|
| gt_text, pred_text = extract_text(gt_data, pred_data, ignore_classes, filter_by_gt_area=filter_by_gt_area) |
|
|
| score = calc_nid(gt_text, pred_text) |
|
|
| scores.append(score) |
| per_image_scores[image_key] = { |
| "nid_score": score / 100.0 |
| } |
|
|
| if len(scores) > 0: |
| avg_score = sum(scores) / (len(scores) * 100) |
| else: |
| avg_score = 0 |
|
|
| return avg_score, per_image_scores |
|
|