Spaces:
Sleeping
Sleeping
| # app.py | |
| from __future__ import annotations | |
| import os | |
| import math | |
| import re | |
| import time | |
| import statistics | |
| from dataclasses import dataclass | |
| from typing import Any, Dict, List, Tuple, Optional | |
| import gradio as gr | |
| import pymupdf as fitz # PyMuPDF | |
| import pikepdf | |
| from PIL import Image, ImageDraw, ImageFont | |
| # Advanced analysis modules | |
| from advanced_analysis import ( | |
| analyze_content_stream, | |
| analyze_screen_reader, | |
| analyze_paragraphs, | |
| analyze_structure_tree, | |
| analyze_block_tag_mapping, | |
| create_block_choices | |
| ) | |
| # ----------------------------- | |
| # Color Palettes for Adaptive Contrast | |
| # ----------------------------- | |
| # For light backgrounds (use dark overlays) | |
| LIGHT_BG_COLORS = { | |
| 'block_border': (0, 0, 139, 255), # Dark blue | |
| 'span_border': (139, 0, 0, 255), # Dark red | |
| 'text_label': (0, 0, 0, 255), # Black | |
| 'math_highlight': (139, 0, 139, 255), # Dark magenta | |
| } | |
| # For dark backgrounds (use light overlays) | |
| DARK_BG_COLORS = { | |
| 'block_border': (255, 255, 0, 255), # Yellow | |
| 'span_border': (0, 255, 255, 255), # Cyan | |
| 'text_label': (255, 255, 255, 255), # White | |
| 'math_highlight': (255, 0, 255, 255), # Magenta | |
| } | |
| # Cache for background colors to avoid re-sampling | |
| _bg_color_cache: Dict[Tuple[str, int], Tuple[float, float, float]] = {} | |
| # ----------------------------- | |
| # Help Text and Explanations | |
| # ----------------------------- | |
| DIAGNOSTIC_HELP = { | |
| "tagged_pdf": "Tagged PDFs include structure tags (headings, lists, reading order). Screen readers use these for navigation. Untagged PDFs force assistive tech to guess.", | |
| "likely_scanned_image_page": "No extractable text + images present = scanned. Screen readers need OCR or alt text.", | |
| "has_type3_fonts": "Type3 fonts lack proper encoding. Causes broken copy/paste and screen reader pronunciation.", | |
| "suspicious_garbled_text": "Replacement chars (�) detected. Indicates missing ToUnicode maps.", | |
| "likely_text_as_vector_outlines": "Text rendered as vector paths. Screen readers cannot read.", | |
| "multi_column_guess": "Multiple columns detected. Untagged multi-column PDFs usually have wrong reading order.", | |
| } | |
| ORDERING_MODE_HELP = { | |
| "raw": "Extraction order: How PyMuPDF found blocks (often = creation order, not reading order)", | |
| "tblr": "Top-to-bottom, left-to-right geometric sorting. Good for simple single-column docs.", | |
| "columns": "Two-column heuristic: Clusters by x-position, reads left column then right. Simple heuristic, may fail on complex layouts.", | |
| } | |
| # ----------------------------- | |
| # Utilities | |
| # ----------------------------- | |
| def _clamp(v: float, lo: float, hi: float) -> float: | |
| return max(lo, min(hi, v)) | |
| def _rect_i(rect: Tuple[float, float, float, float]) -> Tuple[int, int, int, int]: | |
| x0, y0, x1, y1 = rect | |
| return (int(round(x0)), int(round(y0)), int(round(x1)), int(round(y1))) | |
| # Removed _safe_str and _looks_like_math from app.py as they are now in layout_utils | |
| # but keeping them here might be needed if other local functions use them without prefix. | |
| # Checking usage... | |
| # _safe_str is used in pdf_struct_report | |
| # _looks_like_math is used in render_page_with_overlay | |
| # Since we imported them from layout_utils above, we can remove the definitions here. | |
| # ----------------------------- | |
| # Background Color Sampling for Adaptive Contrast | |
| # ----------------------------- | |
| # ----------------------------- | |
| # Background Color Sampling for Adaptive Contrast | |
| # ----------------------------- | |
| def sample_background_color(page: fitz.Page, dpi: int = 72) -> Tuple[float, float, float]: | |
| """ | |
| Sample the page background color at multiple points. | |
| Returns average RGB values (0-255 range). | |
| Samples 9 points: corners, edge midpoints, and center. | |
| Uses low DPI for performance and skips areas with text blocks. | |
| """ | |
| # Render page at low DPI for performance | |
| pix = page.get_pixmap(dpi=dpi, alpha=False) | |
| img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) | |
| width, height = img.size | |
| # Define 9 sample points: corners, edges, center | |
| sample_points = [ | |
| (int(width * 0.05), int(height * 0.05)), # Top-left | |
| (int(width * 0.5), int(height * 0.05)), # Top-center | |
| (int(width * 0.95), int(height * 0.05)), # Top-right | |
| (int(width * 0.05), int(height * 0.5)), # Mid-left | |
| (int(width * 0.5), int(height * 0.5)), # Center | |
| (int(width * 0.95), int(height * 0.5)), # Mid-right | |
| (int(width * 0.05), int(height * 0.95)), # Bottom-left | |
| (int(width * 0.5), int(height * 0.95)), # Bottom-center | |
| (int(width * 0.95), int(height * 0.95)), # Bottom-right | |
| ] | |
| # Sample colors at each point | |
| r_values, g_values, b_values = [], [], [] | |
| for x, y in sample_points: | |
| try: | |
| pixel = img.getpixel((x, y)) | |
| r_values.append(pixel[0]) | |
| g_values.append(pixel[1]) | |
| b_values.append(pixel[2]) | |
| except Exception: | |
| pass # Skip invalid points | |
| # Use median to avoid outliers | |
| if r_values: | |
| r_avg = statistics.median(r_values) | |
| g_avg = statistics.median(g_values) | |
| b_avg = statistics.median(b_values) | |
| return (r_avg, g_avg, b_avg) | |
| # Fallback to white background | |
| return (255.0, 255.0, 255.0) | |
| def calculate_luminance(rgb: Tuple[float, float, float]) -> float: | |
| """ | |
| Calculate relative luminance using WCAG formula. | |
| L = 0.2126*R + 0.7152*G + 0.0722*B | |
| Returns value 0-1 where 0 is darkest, 1 is lightest. | |
| """ | |
| r, g, b = rgb | |
| return 0.2126 * (r / 255.0) + 0.7152 * (g / 255.0) + 0.0722 * (b / 255.0) | |
| def get_contrast_colors(luminance: float) -> Dict[str, Tuple[int, int, int, int]]: | |
| """ | |
| Return color palette based on background luminance. | |
| Light backgrounds (>0.5) get dark overlays. | |
| Dark backgrounds (≤0.5) get light overlays. | |
| """ | |
| return LIGHT_BG_COLORS if luminance > 0.5 else DARK_BG_COLORS | |
| # Moving layout logic to layout_utils.py | |
| from layout_utils import ( | |
| SpanInfo, | |
| BlockInfo, | |
| extract_blocks_spans, | |
| order_blocks, | |
| _safe_str, | |
| _looks_like_math, | |
| PageDiagnostic, | |
| BatchAnalysisResult | |
| ) | |
| # Re-exporting for compatibility if needed, using the imported names directly from now on. | |
| # ----------------------------- | |
| # PDF structural checks (pikepdf) | |
| # ----------------------------- | |
| def pdf_struct_report(pdf_path: str) -> Dict[str, Any]: | |
| out: Dict[str, Any] = {} | |
| with pikepdf.open(pdf_path) as pdf: | |
| root = pdf.Root | |
| out["pdf_version"] = _safe_str(pdf.pdf_version) | |
| out["num_pages"] = len(pdf.pages) | |
| out["has_struct_tree_root"] = ("/StructTreeRoot" in root) | |
| out["has_markinfo"] = ("/MarkInfo" in root) | |
| # MarkInfo flags can exist even if not truly well-tagged, but still useful. | |
| try: | |
| if out["has_markinfo"]: | |
| mi = root["/MarkInfo"] | |
| out["markinfo"] = {k: _safe_str(v) for k, v in dict(mi).items()} | |
| except Exception as e: | |
| out["markinfo_error"] = _safe_str(e) | |
| # Optional Content Groups (layers) can affect visibility / reading order | |
| try: | |
| out["has_ocproperties"] = ("/OCProperties" in root) | |
| if out["has_ocproperties"]: | |
| ocp = root["/OCProperties"] | |
| out["ocg_count_guess"] = len(ocp.get("/OCGs", [])) | |
| except Exception as e: | |
| out["ocg_error"] = _safe_str(e) | |
| # Quick per-page signals: fonts/xobjects presence | |
| page_signals = [] | |
| for i, page in enumerate(pdf.pages, start=1): | |
| sig = {"page": i} | |
| try: | |
| res = page.get("/Resources", pikepdf.Dictionary()) | |
| fonts = res.get("/Font", pikepdf.Dictionary()) | |
| xobj = res.get("/XObject", pikepdf.Dictionary()) | |
| sig["font_count"] = len(fonts) if isinstance(fonts, pikepdf.Dictionary) else 0 | |
| sig["xobject_count"] = len(xobj) if isinstance(xobj, pikepdf.Dictionary) else 0 | |
| sig["has_contents"] = ("/Contents" in page) | |
| except Exception as e: | |
| sig["error"] = _safe_str(e) | |
| page_signals.append(sig) | |
| out["page_signals"] = page_signals | |
| return out | |
| # ----------------------------- | |
| # Layout extraction + ordering (PyMuPDF) | |
| # ----------------------------- | |
| def render_page_with_overlay( | |
| doc: fitz.Document, | |
| page_index: int, | |
| blocks: List[BlockInfo], | |
| order_mode: str, | |
| dpi: int, | |
| show_spans: bool, | |
| highlight_math: bool, | |
| auto_contrast: bool = True, | |
| ) -> Image.Image: | |
| page = doc[page_index] | |
| # Determine adaptive colors based on background | |
| if auto_contrast: | |
| # Check cache first | |
| cache_key = (doc.name, page_index) | |
| if cache_key in _bg_color_cache: | |
| bg_rgb = _bg_color_cache[cache_key] | |
| else: | |
| bg_rgb = sample_background_color(page, dpi=72) | |
| _bg_color_cache[cache_key] = bg_rgb | |
| luminance = calculate_luminance(bg_rgb) | |
| colors = get_contrast_colors(luminance) | |
| else: | |
| # Fallback to light background colors (dark overlays) | |
| colors = LIGHT_BG_COLORS | |
| pix = page.get_pixmap(dpi=dpi, alpha=False) | |
| img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) | |
| draw = ImageDraw.Draw(img) | |
| ordered = order_blocks(blocks, order_mode) | |
| scale = dpi / 72.0 | |
| # Try to use a default font; if not available, PIL will still draw text. | |
| try: | |
| font = ImageFont.load_default() | |
| except Exception: | |
| font = None | |
| def _scale_rect(rect): | |
| x0, y0, x1, y1 = rect | |
| return (int(x0 * scale), int(y0 * scale), int(x1 * scale), int(y1 * scale)) | |
| for rank, (idx, b) in enumerate(ordered, start=1): | |
| r = _scale_rect(b.bbox) | |
| is_text = (b.block_type == 0 and b.text.strip() != "") | |
| is_math = is_text and _looks_like_math(b.text) | |
| # Use adaptive colors | |
| border_color = colors['math_highlight'] if (is_text and highlight_math and is_math) else colors['block_border'] | |
| draw.rectangle(r, outline=border_color, width=2) | |
| label = f"{rank}:{idx}" | |
| if is_text and highlight_math and is_math: | |
| label += " [MATH?]" | |
| draw.text((r[0] + 2, max(0, r[1] - 12)), label, fill=colors['text_label'], font=font) | |
| if show_spans and b.block_type == 0: | |
| for sp in b.spans: | |
| sr = _scale_rect(sp.bbox) | |
| draw.rectangle(sr, outline=colors['span_border'], width=1) | |
| return img | |
| def render_paragraph_overlay( | |
| pdf_path: str, | |
| page_index: int, | |
| dpi: int, | |
| visual_paragraphs: List[List[int]], | |
| semantic_paragraphs: List[Any] | |
| ) -> Image.Image: | |
| """ | |
| Render page with color-coded paragraph visualizations. | |
| Args: | |
| pdf_path: Path to PDF file | |
| page_index: 0-based page index | |
| dpi: Rendering DPI | |
| visual_paragraphs: List of visual paragraph groups (block indices) | |
| semantic_paragraphs: List of semantic paragraph StructureNodes | |
| Returns: | |
| PIL Image with paragraph overlays | |
| """ | |
| doc = fitz.open(pdf_path) | |
| page = doc[page_index] | |
| # Render base image | |
| pix = page.get_pixmap(dpi=dpi) | |
| img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) | |
| draw = ImageDraw.Draw(img, 'RGBA') | |
| # Extract blocks for bounding boxes | |
| blocks = extract_blocks_spans(doc, page_index) | |
| # Scale factor from PDF points to pixels | |
| scale = dpi / 72.0 | |
| def _rect_i(bbox): | |
| """Convert PDF bbox to pixel coordinates.""" | |
| x0, y0, x1, y1 = bbox | |
| return (int(x0 * scale), int(y0 * scale), int(x1 * scale), int(y1 * scale)) | |
| # Draw visual paragraphs (green = matched, red = unmatched) | |
| # For simplicity, we'll draw all visual paragraphs in green with transparency | |
| for para_blocks in visual_paragraphs: | |
| # Calculate bounding box for entire paragraph | |
| if not para_blocks: | |
| continue | |
| min_x0 = min(blocks[i].bbox[0] for i in para_blocks if i < len(blocks)) | |
| min_y0 = min(blocks[i].bbox[1] for i in para_blocks if i < len(blocks)) | |
| max_x1 = max(blocks[i].bbox[2] for i in para_blocks if i < len(blocks)) | |
| max_y1 = max(blocks[i].bbox[3] for i in para_blocks if i < len(blocks)) | |
| r = _rect_i((min_x0, min_y0, max_x1, max_y1)) | |
| # Green with transparency for visual paragraphs | |
| draw.rectangle(r, outline=(0, 255, 0, 255), width=3, fill=(0, 255, 0, 30)) | |
| # Draw semantic paragraph indicators (blue borders) | |
| # Note: semantic_paragraphs don't have direct bboxes, so we'll just count them | |
| # In a more complete implementation, we'd map MCIDs to blocks | |
| doc.close() | |
| return img | |
| # ----------------------------- | |
| # Heuristic "problems" report | |
| # ----------------------------- | |
| def diagnose_page(doc: fitz.Document, page_index: int, struct: Dict[str, Any]) -> Dict[str, Any]: | |
| page = doc[page_index] | |
| text = page.get_text("text").strip() | |
| blocks = extract_blocks_spans(doc, page_index) | |
| # Count image blocks | |
| img_blocks = sum(1 for b in blocks if b.block_type == 1) | |
| # Fonts / encoding hints | |
| fonts = page.get_fonts(full=True) # list tuples with font info | |
| has_type3 = any("Type3" in str(f) for f in fonts) | |
| # ToUnicode missing is tricky from PyMuPDF; we use heuristics: | |
| # - lots of replacement chars or empty extraction despite visible text | |
| bad_chars = sum(text.count(ch) for ch in ["\uFFFD", "\u0000"]) | |
| suspicious_garbled = bad_chars > 0 or ("�" in text) | |
| # "Scanned" heuristic: no text but has images | |
| likely_scanned = (len(text) < 10 and img_blocks > 0) | |
| # "Text as outlines" heuristic: no extractable text and no images, | |
| # but page has drawings (vector paths). We can check drawings. | |
| drawings = page.get_drawings() | |
| likely_outlines = (len(text) < 10 and img_blocks == 0 and len(drawings) > 10) | |
| # Tagging | |
| tagged = bool(struct.get("has_struct_tree_root")) | |
| # Multi-column heuristic: many blocks and their x-centers cluster | |
| x_centers = [] | |
| for b in blocks: | |
| if b.block_type == 0 and b.text.strip(): | |
| x0, y0, x1, y1 = b.bbox | |
| x_centers.append((x0 + x1) / 2.0) | |
| multi_column_guess = False | |
| if len(x_centers) >= 10: | |
| xs = sorted(x_centers) | |
| # crude: if spread is large and there's a big gap near mid | |
| spread = xs[-1] - xs[0] | |
| gaps = [xs[i+1] - xs[i] for i in range(len(xs)-1)] | |
| multi_column_guess = (spread > 200 and max(gaps) > 80) | |
| return { | |
| "page": page_index + 1, | |
| "tagged_pdf": tagged, | |
| "text_len": len(text), | |
| "image_block_count": img_blocks, | |
| "font_count": len(fonts), | |
| "has_type3_fonts": has_type3, | |
| "suspicious_garbled_text": suspicious_garbled, | |
| "likely_scanned_image_page": likely_scanned, | |
| "likely_text_as_vector_outlines": likely_outlines, | |
| "multi_column_guess": multi_column_guess, | |
| } | |
| # ----------------------------- | |
| # Batch Analysis Functions | |
| # ----------------------------- | |
| def diagnose_all_pages( | |
| pdf_path: str, | |
| max_pages: Optional[int] = None, | |
| sample_rate: int = 1, | |
| progress = None, | |
| ) -> BatchAnalysisResult: | |
| """ | |
| Analyze all pages (or sampled subset). | |
| Args: | |
| pdf_path: Path to PDF file | |
| max_pages: Maximum pages to analyze (None = all) | |
| sample_rate: Analyze every Nth page (1 = all pages) | |
| progress: Gradio progress tracker | |
| Returns: | |
| BatchAnalysisResult with aggregated statistics | |
| """ | |
| struct = pdf_struct_report(pdf_path) | |
| with fitz.open(pdf_path) as doc: | |
| total = len(doc) | |
| pages_to_analyze = min(total, max_pages) if max_pages else total | |
| results = [] | |
| for i in range(0, pages_to_analyze, sample_rate): | |
| if progress: | |
| progress((i + 1) / pages_to_analyze, | |
| desc=f"Analyzing page {i+1}/{pages_to_analyze}") | |
| # Diagnose page with timing | |
| start = time.time() | |
| diag = diagnose_page(doc, i, struct) | |
| elapsed = (time.time() - start) * 1000 | |
| # Convert to PageDiagnostic dataclass | |
| page_diag = PageDiagnostic( | |
| page_num=diag["page"], | |
| tagged_pdf=diag["tagged_pdf"], | |
| text_len=diag["text_len"], | |
| image_block_count=diag["image_block_count"], | |
| font_count=diag["font_count"], | |
| has_type3_fonts=diag["has_type3_fonts"], | |
| suspicious_garbled_text=diag["suspicious_garbled_text"], | |
| likely_scanned_image_page=diag["likely_scanned_image_page"], | |
| likely_text_as_vector_outlines=diag["likely_text_as_vector_outlines"], | |
| multi_column_guess=diag["multi_column_guess"], | |
| processing_time_ms=int(elapsed), | |
| ) | |
| results.append(page_diag) | |
| # Aggregate statistics | |
| return aggregate_results(results, total) | |
| def aggregate_results( | |
| results: List[PageDiagnostic], | |
| total_pages: int | |
| ) -> BatchAnalysisResult: | |
| """ | |
| Aggregate per-page diagnostics into summary statistics. | |
| """ | |
| summary_stats = { | |
| 'scanned_pages': sum(1 for r in results if r.likely_scanned_image_page), | |
| 'type3_font_pages': sum(1 for r in results if r.has_type3_fonts), | |
| 'garbled_text_pages': sum(1 for r in results if r.suspicious_garbled_text), | |
| 'multi_column_pages': sum(1 for r in results if r.multi_column_guess), | |
| 'outline_pages': sum(1 for r in results if r.likely_text_as_vector_outlines), | |
| } | |
| # Find critical pages (3+ issues) | |
| critical_pages = [] | |
| for r in results: | |
| issue_count = sum([ | |
| r.likely_scanned_image_page, | |
| r.has_type3_fonts, | |
| r.suspicious_garbled_text, | |
| r.multi_column_guess, | |
| r.likely_text_as_vector_outlines | |
| ]) | |
| if issue_count >= 3: | |
| critical_pages.append(r.page_num) | |
| # Detect common issues (affecting >50% of pages) | |
| common_issues = [] | |
| threshold = len(results) * 0.5 | |
| for issue, count in summary_stats.items(): | |
| if count > threshold: | |
| common_issues.append(issue) | |
| total_time = sum(r.processing_time_ms for r in results) / 1000.0 | |
| return BatchAnalysisResult( | |
| total_pages=total_pages, | |
| pages_analyzed=len(results), | |
| summary_stats=summary_stats, | |
| per_page_results=results, | |
| common_issues=common_issues, | |
| critical_pages=critical_pages, | |
| processing_time_sec=total_time | |
| ) | |
| def format_batch_summary_markdown(batch: BatchAnalysisResult) -> str: | |
| """Create executive summary in Markdown format.""" | |
| md = f"""## Batch Analysis Summary | |
| **Document Statistics:** | |
| - Total pages: {batch.total_pages} | |
| - Pages analyzed: {batch.pages_analyzed} | |
| - Processing time: {batch.processing_time_sec:.1f} seconds | |
| **Issues Found:** | |
| """ | |
| md += "\n\n**Detailed Breakdown:**\n" | |
| # Define issues and their readable names | |
| from layout_utils import PageDiagnostic | |
| issue_map = { | |
| 'likely_scanned_image_page': 'Scanned Pages', | |
| 'has_type3_fonts': 'Type3 Fonts', | |
| 'suspicious_garbled_text': 'Garbled Text', | |
| 'multi_column_guess': 'Multi-Column (Untagged)', | |
| 'likely_text_as_vector_outlines': 'Text as Outlines' | |
| } | |
| for issue_attr, issue_name in issue_map.items(): | |
| # Find pages with this issue | |
| affected_pages = [] | |
| for p in batch.per_page_results: | |
| if getattr(p, issue_attr, False): | |
| affected_pages.append(p.page_num) | |
| if affected_pages: | |
| icon = "❌" | |
| count = len(affected_pages) | |
| pct = (count / batch.pages_analyzed) * 100 if batch.pages_analyzed > 0 else 0 | |
| # Format page list (truncate if too long) | |
| page_list_str = ", ".join(map(str, affected_pages[:30])) | |
| if len(affected_pages) > 30: | |
| page_list_str += f" ... ({len(affected_pages) - 30} more)" | |
| md += f"\n### {icon} {issue_name}: {count} pages ({pct:.1f}%)\n" | |
| md += f"**Pages**: {page_list_str}\n" | |
| return md | |
| def format_batch_results_table(batch: BatchAnalysisResult) -> str: | |
| """Format batch results as HTML table.""" | |
| html = """ | |
| <style> | |
| .batch-table { border-collapse: collapse; width: 100%; font-size: 12px; } | |
| .batch-table th { background-color: #f0f0f0; padding: 8px; text-align: left; border: 1px solid #ddd; } | |
| .batch-table td { padding: 6px; border: 1px solid #ddd; text-align: center; } | |
| .issue-yes { background-color: #ffcccc; color: #cc0000; } | |
| .issue-no { background-color: #ccffcc; color: #006600; } | |
| </style> | |
| <table class="batch-table"> | |
| <tr> | |
| <th>Page</th> | |
| <th>Text Len</th> | |
| <th>Scanned</th> | |
| <th>Type3</th> | |
| <th>Garbled</th> | |
| <th>Outlines</th> | |
| <th>Multi-Col</th> | |
| <th>Time (ms)</th> | |
| </tr> | |
| """ | |
| for p in batch.per_page_results: | |
| scanned = '<span class="issue-yes">YES</span>' if p.likely_scanned_image_page else '<span class="issue-no">NO</span>' | |
| type3 = '<span class="issue-yes">YES</span>' if p.has_type3_fonts else '<span class="issue-no">NO</span>' | |
| garbled = '<span class="issue-yes">YES</span>' if p.suspicious_garbled_text else '<span class="issue-no">NO</span>' | |
| outlines = '<span class="issue-yes">YES</span>' if p.likely_text_as_vector_outlines else '<span class="issue-no">NO</span>' | |
| multicol = '<span class="issue-yes">YES</span>' if p.multi_column_guess else '<span class="issue-no">NO</span>' | |
| html += f""" | |
| <tr> | |
| <td><strong>{p.page_num}</strong></td> | |
| <td>{p.text_len}</td> | |
| <td>{scanned}</td> | |
| <td>{type3}</td> | |
| <td>{garbled}</td> | |
| <td>{outlines}</td> | |
| <td>{multicol}</td> | |
| <td>{p.processing_time_ms}</td> | |
| </tr> | |
| """ | |
| html += "</table>" | |
| return html | |
| def format_batch_results_chart(batch: BatchAnalysisResult) -> Dict[str, Any]: | |
| """Format batch results for Plotly bar chart.""" | |
| import plotly.graph_objects as go | |
| issue_names = [k.replace('_', ' ').title() for k in batch.summary_stats.keys()] | |
| counts = list(batch.summary_stats.values()) | |
| fig = go.Figure(data=[ | |
| go.Bar( | |
| x=issue_names, | |
| y=counts, | |
| marker_color=['#ff6b6b', '#ee5a6f', '#f06595', '#cc5de8', '#845ef7'], | |
| text=counts, | |
| textposition='auto', | |
| ) | |
| ]) | |
| fig.update_layout( | |
| title="Issues by Type", | |
| xaxis_title="Issue Type", | |
| yaxis_title="Number of Pages", | |
| showlegend=False, | |
| height=400, | |
| ) | |
| return fig | |
| # ----------------------------- | |
| # Result Formatting | |
| # ----------------------------- | |
| def format_diagnostic_summary(diag: Dict[str, Any], struct: Dict[str, Any]) -> str: | |
| """ | |
| Generate rich Markdown summary with explanations and severity icons. | |
| Returns formatted string with ✓, ⚠️, ❌ indicators. | |
| """ | |
| hints = [] | |
| # Check each diagnostic and add with appropriate icon and explanation | |
| if not struct.get("has_struct_tree_root"): | |
| hints.append(f"⚠️ **Untagged PDF**: {DIAGNOSTIC_HELP['tagged_pdf']}") | |
| if diag["likely_scanned_image_page"]: | |
| hints.append(f"❌ **Scanned Page**: {DIAGNOSTIC_HELP['likely_scanned_image_page']}") | |
| if diag["likely_text_as_vector_outlines"]: | |
| hints.append(f"❌ **Text as Outlines**: {DIAGNOSTIC_HELP['likely_text_as_vector_outlines']}") | |
| if diag["has_type3_fonts"]: | |
| hints.append(f"⚠️ **Type3 Fonts**: {DIAGNOSTIC_HELP['has_type3_fonts']}") | |
| if diag["suspicious_garbled_text"]: | |
| hints.append(f"⚠️ **Garbled Text**: {DIAGNOSTIC_HELP['suspicious_garbled_text']}") | |
| if diag["multi_column_guess"] and not struct.get("has_struct_tree_root"): | |
| hints.append(f"⚠️ **Multi-Column Layout**: {DIAGNOSTIC_HELP['multi_column_guess']}") | |
| if hints: | |
| return "\n\n".join(hints) | |
| else: | |
| return "✓ **No obvious red flags detected**\n\nNote: This doesn't guarantee full accessibility. Manual review is still recommended for alt text, math content, and proper tag structure." | |
| # ----------------------------- | |
| # Gradio callbacks | |
| # ----------------------------- | |
| def load_pdf(fileobj) -> Tuple[str, int, str]: | |
| """ | |
| Robustly load a PDF file and return its path, page count, and status message. | |
| Handles Gradio FileData objects, string paths (from examples), and None. | |
| """ | |
| if fileobj is None: | |
| return "", 0, "Waiting for PDF upload..." | |
| # Extract path from Gadio FileData or use string directly | |
| if isinstance(fileobj, str): | |
| pdf_path = fileobj | |
| elif hasattr(fileobj, "path"): | |
| pdf_path = fileobj.path | |
| elif hasattr(fileobj, "name"): | |
| pdf_path = fileobj.name | |
| else: | |
| pdf_path = str(fileobj) | |
| if not pdf_path or not os.path.exists(pdf_path): | |
| return "", 0, f"Error: File not found at {pdf_path}" | |
| try: | |
| with fitz.open(pdf_path) as doc: | |
| n = doc.page_count | |
| return pdf_path, n, f"✓ Loaded: {os.path.basename(pdf_path)} ({n} pages)" | |
| except Exception as e: | |
| return "", 0, f"❌ Error loading PDF: {str(e)}" | |
| def analyze(pdf_path: str, page_num: int, dpi: int, order_mode: str, show_spans: bool, highlight_math: bool): | |
| if not pdf_path: | |
| return None, {}, "Upload a PDF first.", "" | |
| # page_num is 1-based in UI | |
| page_index = max(0, int(page_num) - 1) | |
| struct = pdf_struct_report(pdf_path) | |
| with fitz.open(pdf_path) as doc: | |
| page_index = min(page_index, doc.page_count - 1) | |
| blocks = extract_blocks_spans(doc, page_index) | |
| overlay = render_page_with_overlay( | |
| doc=doc, | |
| page_index=page_index, | |
| blocks=blocks, | |
| order_mode=order_mode, | |
| dpi=int(dpi), | |
| show_spans=bool(show_spans), | |
| highlight_math=bool(highlight_math), | |
| ) | |
| diag = diagnose_page(doc, page_index, struct) | |
| # Build "reading order text" preview | |
| ordered = order_blocks(blocks, order_mode) | |
| preview_lines = [] | |
| for rank, (idx, b) in enumerate(ordered, start=1): | |
| if b.block_type == 0 and b.text.strip(): | |
| t = b.text.replace("\n", " ").strip() | |
| if len(t) > 160: | |
| t = t[:160] + "…" | |
| preview_lines.append(f"{rank:03d} [{idx}] {t}") | |
| elif b.block_type == 1: | |
| preview_lines.append(f"{rank:03d} [{idx}] <IMAGE BLOCK>") | |
| preview = "\n".join(preview_lines[:200]) | |
| # Combine reports | |
| report = { | |
| "pdf_struct": { | |
| "pdf_version": struct.get("pdf_version"), | |
| "num_pages": struct.get("num_pages"), | |
| "has_struct_tree_root": struct.get("has_struct_tree_root"), | |
| "has_markinfo": struct.get("has_markinfo"), | |
| "markinfo": struct.get("markinfo", None), | |
| "has_ocproperties": struct.get("has_ocproperties", None), | |
| "ocg_count_guess": struct.get("ocg_count_guess", None), | |
| }, | |
| "page_diagnosis": diag, | |
| "page_signals_first_5": struct.get("page_signals", [])[:5], | |
| "reading_order_preview": preview, | |
| } | |
| # Generate formatted summary with icons and explanations | |
| summary = format_diagnostic_summary(diag, struct) | |
| # Check for compatibility and prepend warning if needed | |
| if not struct.get("has_struct_tree_root"): | |
| summary = "### ⚠️ Accessibility Alert: Untagged Document\n\n" + \ | |
| "**This document is likely incompatible with screen readers.**\n\n" + \ | |
| "It lacks the 'structure tree' (tags) required for accessibility tools to understand headings, paragraphs, and reading order.\n\n" + \ | |
| "**What you can do:**\n" + \ | |
| "- **Remediate**: Open the original source file (Word, PowerPoint) and save as 'PDF (Best for electronic distribution and accessibility)'\n" + \ | |
| "- **Retrofit**: Use Adobe Acrobat Pro's 'Accessibility' tool to auto-tag the document.\n\n" + \ | |
| "---\n\n" + summary | |
| if diag["likely_scanned_image_page"]: | |
| summary = "### ❌ Critical Issue: Scanned Page\n\n" + \ | |
| "**This page appears to be an image with no readable text.**\n\n" + \ | |
| "Screen readers cannot read this content at all.\n\n" + \ | |
| "**Action Required**: Perform Optical Character Recognition (OCR) using Adobe Acrobat or an OCR tool to make the text selectable and readable.\n\n" + \ | |
| "---\n\n" + summary | |
| return overlay, report, summary, preview | |
| def analyze_batch_with_progress( | |
| pdf_path: str, | |
| max_pages: int, | |
| sample_rate: int, | |
| progress=gr.Progress() | |
| ): | |
| """ | |
| Run batch analysis with progress tracking. | |
| Returns: (summary_markdown, chart_data, table_html, json_report, status_message) | |
| """ | |
| if not pdf_path: | |
| return "Upload a PDF first.", None, "", {}, "Error: No PDF loaded" | |
| # Run analysis | |
| batch = diagnose_all_pages(pdf_path, int(max_pages), int(sample_rate), progress) | |
| # Format outputs | |
| summary = format_batch_summary_markdown(batch) | |
| chart = format_batch_results_chart(batch) | |
| table = format_batch_results_table(batch) | |
| json_report = batch.to_dict() | |
| status = f"✓ Analyzed {batch.pages_analyzed}/{batch.total_pages} pages in {batch.processing_time_sec:.1f}s" | |
| return summary, chart, table, json_report, status | |
| # ----------------------------- | |
| # UI | |
| # ----------------------------- | |
| # ----------------------------- | |
| # UI | |
| # ----------------------------- | |
| with gr.Blocks(title="PDF Structure Inspector") as demo: | |
| gr.Markdown( | |
| """ | |
| # PDF Structure Inspector (screen reader / reading order / math debugging) | |
| """ | |
| ) | |
| # 1. Top Bar: Loader & Global Stats | |
| with gr.Row(): | |
| pdf_file = gr.File(label="Upload PDF", file_types=[".pdf"], scale=1) | |
| with gr.Column(scale=2): | |
| status = gr.Textbox(label="Status", interactive=False) | |
| # Hidden states | |
| pdf_path = gr.Textbox(visible=False) | |
| page_count = gr.Number(visible=False) | |
| gr.Examples( | |
| examples=["test_document.pdf", "18.1 Notes.pdf", "logic.pdf"], | |
| inputs=pdf_file | |
| ) | |
| # 2. Control Panel | |
| with gr.Row(variant="panel"): | |
| with gr.Column(scale=2): | |
| page_num = gr.Slider(label="Page Number", minimum=1, maximum=1, value=1, step=1) | |
| with gr.Column(scale=1): | |
| dpi = gr.Slider(label="Zoom (DPI)", minimum=72, maximum=300, value=150, step=1) | |
| with gr.Column(scale=1): | |
| order_mode = gr.Dropdown( | |
| ["raw", "tblr", "columns"], value="raw", label="Reading Order", | |
| info="Strategy for untagged content" | |
| ) | |
| with gr.Column(scale=2, min_width=200): | |
| with gr.Row(): | |
| show_spans = gr.Checkbox(label="Show Spans", value=False) | |
| highlight_math = gr.Checkbox(label="Highlight Math", value=True) | |
| run_btn = gr.Button("Forced Refresh", variant="secondary", size="sm") | |
| # 3. Main Workspace (Split View) | |
| with gr.Row(): | |
| # LEFT: Visualization (Persistent) | |
| with gr.Column(scale=6): | |
| gr.Markdown("### 1. Visual Inspection") | |
| overlay_img = gr.Image(label="Page Analysis Overlay (Live)", type="pil", interactive=False, height=800) | |
| summary = gr.Markdown(elem_classes=["result-markdown"]) | |
| # RIGHT: Tools (Contextual) | |
| with gr.Column(scale=5): | |
| gr.Markdown("### 2. Deep Dive Tools") | |
| with gr.Tabs(): | |
| # --- TAB 1: DETAILS --- | |
| with gr.Tab("Details & Structure"): | |
| with gr.Accordion("Reading Order Preview", open=True): | |
| reading_order_preview = gr.Textbox( | |
| label="Detected text flow", | |
| lines=20, | |
| interactive=False, | |
| info="This is the order text will be fed to accessibility tools (if untagged)." | |
| ) | |
| with gr.Accordion("Full Technical Report (JSON)", open=False): | |
| report = gr.JSON(label="Page Report") | |
| with gr.Accordion("Help: Understanding Diagnostics", open=False): | |
| gr.Markdown(""" | |
| ### What Each Diagnostic Means | |
| **🏷️ Tagged PDF**: Tagged PDFs include structure tags (headings, lists, reading order) that screen readers use for navigation. Untagged PDFs force assistive technology to guess the reading order based on visual layout, often leading to incorrect results. | |
| **📄 Scanned Pages**: Pages with no extractable text but containing images are likely scanned documents. Screen readers cannot read images without OCR (Optical Character Recognition) or alternative text descriptions. | |
| **🔤 Type3 Fonts**: Type3 fonts are custom bitmap fonts that often lack proper character encoding mappings. This causes: | |
| - Broken copy/paste (you get garbage characters) | |
| - Screen readers cannot pronounce text correctly | |
| - Text search doesn't work | |
| **🔀 Garbled Text**: Replacement characters () indicate missing or incorrect ToUnicode mappings in the PDF. Screen readers will mispronounce affected text. | |
| **✏️ Text as Outlines**: When text is rendered as vector paths instead of actual text, screen readers cannot extract or read it. The document appears to have text visually but is inaccessible. | |
| **📰 Multi-Column Layouts**: Documents with multiple columns pose reading order challenges. Without proper tagging, screen readers may read across columns horizontally instead of completing one column before moving to the next. | |
| ### Reading Order Modes | |
| **Raw**: Extraction order, how PyMuPDF found blocks (often = creation order). | |
| **TBLR**: Top-to-bottom, left-to-right geometric sorting. | |
| **Columns**: Two-column heuristic (clusters by x-position). | |
| """) | |
| # --- TAB 2: ADVANCED --- | |
| with gr.Tab("Advanced Tools"): | |
| gr.Markdown("Power-user features for deep PDF inspection.") | |
| # 1. Content Stream | |
| with gr.Accordion("1. Content Stream Inspector", open=False): | |
| gr.Markdown("**Inspect raw PDF content stream operators for a specific block**") | |
| cs_block_dropdown = gr.Dropdown(label="Select Block", choices=[], info="Choose a block to inspect") | |
| cs_inspect_btn = gr.Button("Extract Operators", size="sm") | |
| with gr.Tabs(): | |
| with gr.Tab("Formatted"): | |
| cs_operator_display = gr.Markdown() | |
| with gr.Tab("Raw"): | |
| cs_raw_stream = gr.Code(label="Raw Stream") | |
| # 2. Screen Reader | |
| with gr.Accordion("2. Screen Reader Simulator", open=True): | |
| gr.Markdown("**Simulate how NVDA or JAWS would read this page**") | |
| with gr.Row(): | |
| sr_reader = gr.Radio(["NVDA", "JAWS"], value="NVDA", label="Reader", scale=1) | |
| sr_detail = gr.Radio(["minimal", "default", "verbose"], value="default", label="Detail", scale=1) | |
| sr_order = gr.Radio(["raw", "tblr", "columns"], value="tblr", label="Fallback Order", scale=1) | |
| sr_btn = gr.Button("Generate Transcript", variant="primary") | |
| with gr.Tabs(): | |
| with gr.Tab("Transcript"): | |
| sr_transcript = gr.Textbox(lines=15, label="Output", interactive=False) | |
| with gr.Tab("Analysis"): | |
| sr_analysis = gr.Markdown() | |
| # 3. Paragraph Detection | |
| with gr.Accordion("3. Paragraph Detection", open=False): | |
| gr.Markdown("**Compare visual paragraphs vs semantic paragraph tags**") | |
| para_threshold = gr.Slider(label="Gap Threshold", minimum=5, maximum=30, value=15, step=1) | |
| para_btn = gr.Button("Analyze Paragraphs") | |
| para_overlay = gr.Image(label="Paragraph Visualization", type="pil", height=400) | |
| with gr.Row(): | |
| para_visual = gr.Number(label="Visual", interactive=False) | |
| para_semantic = gr.Number(label="Semantic <P>", interactive=False) | |
| para_score = gr.Number(label="Match Quality", interactive=False) | |
| para_mismatches = gr.Markdown() | |
| # 4. Structure Tree | |
| with gr.Accordion("4. Structure Tree Visualizer", open=False): | |
| gr.Markdown("**Display the complete PDF tag hierarchy**") | |
| struct_btn = gr.Button("Extract Tree") | |
| with gr.Tabs(): | |
| with gr.Tab("Diagram"): | |
| struct_plot = gr.Plot() | |
| with gr.Tab("Text View"): | |
| struct_text = gr.Textbox(lines=20) | |
| with gr.Tab("Stats"): | |
| struct_stats = gr.Markdown() | |
| # 5. Mapping | |
| with gr.Accordion("5. Block-to-Tag Mapping", open=False): | |
| gr.Markdown("**Link visual blocks to structure tree elements**") | |
| map_btn = gr.Button("Map Blocks") | |
| map_message = gr.Markdown() | |
| map_table = gr.DataFrame(headers=["Block #", "Tag Type", "MCID", "Alt Text"]) | |
| # --- TAB 3: BATCH --- | |
| with gr.Tab("Batch Analysis"): | |
| with gr.Row(): | |
| batch_max_pages = gr.Slider(label="Max pages", minimum=1, maximum=500, value=100) | |
| batch_sample_rate = gr.Slider(label="Sample rate", minimum=1, maximum=10, value=1) | |
| batch_run_btn = gr.Button("Analyze All Pages", variant="primary") | |
| batch_progress = gr.Textbox(label="Progress", interactive=False) | |
| with gr.Accordion("Summary", open=True): | |
| batch_summary_md = gr.Markdown() | |
| with gr.Accordion("Details", open=False): | |
| batch_chart = gr.Plot() | |
| batch_table = gr.HTML() | |
| batch_json = gr.JSON(visible=False) | |
| # --- CALLBACKS & WIRING --- | |
| def _on_file_change(f): | |
| path, n, msg = load_pdf(f) | |
| if not path: | |
| return path, n, msg, gr.update(maximum=1, value=1) | |
| return path, n, msg, gr.update(maximum=n, value=1) | |
| # Main Analysis Inputs/Outputs | |
| # Note: analyze() now returns (overlay, report, summary, preview) | |
| analysis_inputs = [pdf_path, page_num, dpi, order_mode, show_spans, highlight_math] | |
| analysis_outputs = [overlay_img, report, summary, reading_order_preview] | |
| # Upload & Example Triggers | |
| pdf_file.change(_on_file_change, inputs=[pdf_file], outputs=[pdf_path, page_count, status, page_num]) \ | |
| .then(analyze, inputs=analysis_inputs, outputs=analysis_outputs) | |
| # Reactive Event Listeners | |
| # Note: page_num.change is strictly better for 'Exploration' than release, | |
| # as it updates while typing or stepping. | |
| page_num.change(analyze, inputs=analysis_inputs, outputs=analysis_outputs) | |
| dpi.release(analyze, inputs=analysis_inputs, outputs=analysis_outputs) # DPI is heavy, use release | |
| order_mode.change(analyze, inputs=analysis_inputs, outputs=analysis_outputs) | |
| show_spans.change(analyze, inputs=analysis_inputs, outputs=analysis_outputs) | |
| highlight_math.change(analyze, inputs=analysis_inputs, outputs=analysis_outputs) | |
| run_btn.click(analyze, inputs=analysis_inputs, outputs=analysis_outputs) | |
| # Advanced Analysis Helper Functions (Closures to capture inputs if needed, or just pure) | |
| def update_block_dropdown(pdf_path_val, page_num_val): | |
| """Update block dropdown when page changes.""" | |
| if not pdf_path_val: | |
| return gr.update(choices=[], value=None) | |
| try: | |
| with fitz.open(pdf_path_val) as doc: | |
| blocks = extract_blocks_spans(doc, page_num_val - 1) | |
| if not blocks: | |
| return gr.update(choices=[], value=None) | |
| choices = create_block_choices(blocks) | |
| return gr.update(choices=choices, value=0 if choices else None) | |
| except: | |
| return gr.update(choices=[], value=None) | |
| def run_content_stream_inspector(pdf_path_val, page_num_val, block_idx): | |
| if not pdf_path_val or block_idx is None: | |
| return "Please select a block", "" | |
| try: | |
| with fitz.open(pdf_path_val) as doc: | |
| blocks = extract_blocks_spans(doc, page_num_val - 1) | |
| result = analyze_content_stream(pdf_path_val, page_num_val - 1, block_idx, blocks) | |
| if result.get('error'): | |
| return result['message'], "" | |
| return result['formatted'], result['raw'] | |
| except Exception as e: | |
| return f"## Error\n\n{str(e)}", "" | |
| def run_screen_reader_sim(pdf_path_val, page_num_val, reader, detail, order): | |
| if not pdf_path_val: | |
| return "Please upload a PDF first", "" | |
| try: | |
| with fitz.open(pdf_path_val) as doc: | |
| blocks = extract_blocks_spans(doc, page_num_val - 1) | |
| result = analyze_screen_reader(pdf_path_val, page_num_val - 1, blocks, reader, detail, order) | |
| if result.get('error'): | |
| return result.get('message', 'Error'), "" | |
| return result['transcript'], result['analysis'] | |
| except Exception as e: | |
| return f"## Error\n\n{str(e)}", "" | |
| def run_paragraph_detection(pdf_path_val, page_num_val, dpi_val, threshold): | |
| if not pdf_path_val: | |
| return None, 0, 0, 0.0, "Please upload a PDF first" | |
| try: | |
| with fitz.open(pdf_path_val) as doc: | |
| blocks = extract_blocks_spans(doc, page_num_val - 1) | |
| result = analyze_paragraphs(pdf_path_val, page_num_val - 1, blocks, threshold) | |
| if result.get('error'): | |
| return None, 0, 0, 0.0, result.get('message', 'Error') | |
| overlay = render_paragraph_overlay( | |
| pdf_path_val, page_num_val - 1, dpi_val, | |
| result['visual_paragraphs'], result['semantic_paragraphs'] | |
| ) | |
| return ( | |
| overlay, result['visual_count'], result['semantic_count'], | |
| result['match_score'], result['mismatches'] | |
| ) | |
| except Exception as e: | |
| return None, 0, 0, 0.0, f"## Error\n\n{str(e)}" | |
| def run_structure_tree_extraction(pdf_path_val): | |
| if not pdf_path_val: | |
| return None, "Please upload a PDF first", "" | |
| try: | |
| result = analyze_structure_tree(pdf_path_val) | |
| if result.get('error'): | |
| return None, result['message'], "" | |
| return result['plot_data'], result['text_view'], result['statistics'] | |
| except Exception as e: | |
| return None, f"## Error\n\n{str(e)}", "" | |
| def run_block_tag_mapping(pdf_path_val, page_num_val): | |
| if not pdf_path_val: | |
| return "Please upload a PDF first", [] | |
| try: | |
| with fitz.open(pdf_path_val) as doc: | |
| blocks = extract_blocks_spans(doc, page_num_val - 1) | |
| result = analyze_block_tag_mapping(pdf_path_val, page_num_val - 1, blocks) | |
| if result.get('error'): | |
| return result.get('message', 'Error'), [] | |
| return result['message'], result['mappings'] | |
| except Exception as e: | |
| return f"## Error\n\n{str(e)}", [] | |
| # 5. Advanced Tool Wiring | |
| # Update dropdown when page changes | |
| page_num.change(update_block_dropdown, inputs=[pdf_path, page_num], outputs=[cs_block_dropdown]) | |
| # Clear stale results when page changes (User Request: "Did it reset?") | |
| # We clear the outputs of advanced tools so users know they need to regenerate | |
| def clear_stale(): | |
| return None, "", None, "", None, 0, 0, 0, "", None, "", "" | |
| # Actually, let's keep it simple. Just clearing the main ones users look at. | |
| page_num.change( | |
| lambda: ("", ""), | |
| outputs=[sr_transcript, sr_analysis] | |
| ) | |
| # Also clear paragraph overlay? | |
| page_num.change( | |
| lambda: None, | |
| outputs=[para_overlay] | |
| ) | |
| cs_inspect_btn.click( | |
| run_content_stream_inspector, | |
| inputs=[pdf_path, page_num, cs_block_dropdown], | |
| outputs=[cs_operator_display, cs_raw_stream] | |
| ) | |
| sr_btn.click( | |
| run_screen_reader_sim, | |
| inputs=[pdf_path, page_num, sr_reader, sr_detail, sr_order], | |
| outputs=[sr_transcript, sr_analysis] | |
| ) | |
| para_btn.click( | |
| run_paragraph_detection, | |
| inputs=[pdf_path, page_num, dpi, para_threshold], | |
| outputs=[para_overlay, para_visual, para_semantic, para_score, para_mismatches] | |
| ) | |
| struct_btn.click( | |
| run_structure_tree_extraction, | |
| inputs=[pdf_path], | |
| outputs=[struct_plot, struct_text, struct_stats] | |
| ) | |
| map_btn.click( | |
| run_block_tag_mapping, | |
| inputs=[pdf_path, page_num], | |
| outputs=[map_message, map_table] | |
| ) | |
| batch_run_btn.click( | |
| analyze_batch_with_progress, | |
| inputs=[pdf_path, batch_max_pages, batch_sample_rate], | |
| outputs=[batch_summary_md, batch_chart, batch_table, batch_json, batch_progress] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch( | |
| ssr_mode=False, | |
| css=".result-markdown { font-size: 14px; } .help-md { font-size: 12px; color: #666; }" | |
| ) | |