Spaces:
Sleeping
Sleeping
| """ | |
| Robust barcode reader for images and PDFs. | |
| Strategy (in order): | |
| 1) PDF -> extract embedded image XObjects at native resolution (no raster loss) and decode. | |
| 2) If nothing found, rasterize PDF page(s) at high DPI (400/600/900) and decode. | |
| 3) For plain images, decode directly. | |
| Engines: | |
| - Primary: ZXing-CPP (zxingcpp) -> no system packages required | |
| - Fallback: OpenCV contrib barcode (if available) | |
| Outputs are normalized dicts: | |
| { 'engine', 'source', 'page', 'type', 'text', 'polygon': [[x,y] * 4] } | |
| """ | |
| from __future__ import annotations | |
| import io | |
| import os | |
| from typing import Any, Dict, List, Tuple, Optional | |
| import numpy as np | |
| from PIL import Image | |
| import cv2 | |
| # ---------- Engines ---------- | |
| HAS_ZXING = False | |
| try: | |
| import zxingcpp # pip install zxing-cpp | |
| HAS_ZXING = True | |
| except Exception: | |
| zxingcpp = None | |
| HAS_ZXING = False | |
| HAS_OCV_BARCODE = hasattr(cv2, "barcode") and hasattr(getattr(cv2, "barcode"), "BarcodeDetector") | |
| # ---------- PDF (PyMuPDF) ---------- | |
| try: | |
| import fitz # PyMuPDF | |
| HAS_PYMUPDF = True | |
| except Exception: | |
| fitz = None | |
| HAS_PYMUPDF = False | |
| # ========================= | |
| # Utils | |
| # ========================= | |
| def _to_bgr(img: Image.Image) -> np.ndarray: | |
| arr = np.array(img.convert("RGB")) | |
| return cv2.cvtColor(arr, cv2.COLOR_RGB2BGR) | |
| def _as_gray(arr_bgr: np.ndarray) -> np.ndarray: | |
| return cv2.cvtColor(arr_bgr, cv2.COLOR_BGR2GRAY) | |
| def _preprocess_candidates(bgr: np.ndarray) -> List[np.ndarray]: | |
| """ | |
| Generate a small set of preprocess variants to improve 1D and 2D decoding. | |
| Keep this list short—HF Spaces need to stay responsive. | |
| """ | |
| out = [bgr] | |
| h, w = bgr.shape[:2] | |
| # Slight sharpening helps thin 1D bars | |
| k = np.array([[0, -1, 0], | |
| [-1, 5, -1], | |
| [0, -1, 0]], dtype=np.float32) | |
| sharp = cv2.filter2D(bgr, -1, k) | |
| out.append(sharp) | |
| # CLAHE on gray | |
| g = _as_gray(bgr) | |
| clahe = cv2.createCLAHE(clipLimit=2.5, tileGridSize=(8, 8)).apply(g) | |
| out.append(cv2.cvtColor(clahe, cv2.COLOR_GRAY2BGR)) | |
| # Slight upscale for tiny barcodes | |
| if max(h, w) < 1600: | |
| up = cv2.resize(bgr, (0, 0), fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC) | |
| out.append(up) | |
| return out | |
| def _norm_polygon(pts: Any, w: int, h: int) -> List[List[float]]: | |
| """ | |
| Normalize whatever the engine returns into 4 point polygon [[x,y],...]. | |
| If fewer than 4 points are given, approximate with a bounding box. | |
| """ | |
| try: | |
| p = np.array(pts, dtype=np.float32).reshape(-1, 2) | |
| if p.shape[0] >= 4: | |
| p = p[:4] | |
| else: | |
| # make a box | |
| x1, y1 = p.min(axis=0) | |
| x2, y2 = p.max(axis=0) | |
| p = np.array([[x1, y1], [x2, y1], [x2, y2], [x1, y2]], dtype=np.float32) | |
| except Exception: | |
| p = np.array([[0, 0], [w, 0], [w, h], [0, h]], dtype=np.float32) | |
| return p.astype(float).tolist() | |
| def _dedupe(results: List[Dict[str, Any]]) -> List[Dict[str, Any]]: | |
| """ | |
| Deduplicate by (text, type) and polygon IoU. | |
| """ | |
| keep: List[Dict[str, Any]] = [] | |
| def iou(a, b): | |
| ax = np.array(a["polygon"], dtype=np.float32) | |
| bx = np.array(b["polygon"], dtype=np.float32) | |
| a_min = ax.min(axis=0); a_max = ax.max(axis=0) | |
| b_min = bx.min(axis=0); b_max = bx.max(axis=0) | |
| inter_min = np.maximum(a_min, b_min) | |
| inter_max = np.minimum(a_max, b_max) | |
| wh = np.maximum(inter_max - inter_min, 0) | |
| inter = wh[0] * wh[1] | |
| a_area = (a_max - a_min).prod() | |
| b_area = (b_max - b_min).prod() | |
| union = max(a_area + b_area - inter, 1e-6) | |
| return float(inter / union) | |
| for r in results: | |
| dup = False | |
| for k in keep: | |
| if r["text"] == k["text"] and r["type"] == k["type"] and iou(r, k) > 0.7: | |
| dup = True | |
| break | |
| if not dup: | |
| keep.append(r) | |
| return keep | |
| # ========================= | |
| # Decoders | |
| # ========================= | |
| def _decode_zxing(bgr: np.ndarray) -> List[Dict[str, Any]]: | |
| if not HAS_ZXING: | |
| return [] | |
| hits: List[Dict[str, Any]] = [] | |
| # ZXing works on gray or color; we'll try a couple of variants | |
| for candidate in _preprocess_candidates(bgr): | |
| try: | |
| res = zxingcpp.read_barcodes(candidate) # returns list | |
| except Exception: | |
| continue | |
| for r in res or []: | |
| try: | |
| fmt = getattr(r.format, "name", str(r.format)) | |
| except Exception: | |
| fmt = str(r.format) | |
| poly = [] | |
| try: | |
| pos = r.position # list of points with .x/.y | |
| poly = [[float(pt.x), float(pt.y)] for pt in pos] | |
| except Exception: | |
| h, w = candidate.shape[:2] | |
| poly = _norm_polygon([], w, h) | |
| hits.append({ | |
| "engine": "zxingcpp", | |
| "type": fmt, | |
| "text": r.text or "", | |
| "polygon": poly, | |
| }) | |
| if hits: | |
| break # good enough | |
| return hits | |
| def _decode_opencv(bgr: np.ndarray) -> List[Dict[str, Any]]: | |
| if not HAS_OCV_BARCODE: | |
| return [] | |
| det = cv2.barcode.BarcodeDetector() | |
| hits: List[Dict[str, Any]] = [] | |
| for candidate in _preprocess_candidates(bgr): | |
| gray = _as_gray(candidate) | |
| ok, infos, types, corners = det.detectAndDecode(gray) | |
| if not ok: | |
| continue | |
| for txt, typ, pts in zip(infos, types, corners): | |
| if not txt: | |
| continue | |
| h, w = candidate.shape[:2] | |
| poly = _norm_polygon(pts, w, h) | |
| hits.append({ | |
| "engine": "opencv_barcode", | |
| "type": typ, | |
| "text": txt, | |
| "polygon": poly, | |
| }) | |
| if hits: | |
| break | |
| return hits | |
| def _decode_any(bgr: np.ndarray) -> List[Dict[str, Any]]: | |
| # Prefer ZXing; it's generally stronger across symbologies | |
| res = _decode_zxing(bgr) | |
| if res: | |
| return res | |
| return _decode_opencv(bgr) | |
| # ========================= | |
| # Image & PDF readers | |
| # ========================= | |
| def _pdf_extract_xobject_images(path: str, page_index: Optional[int] = None) -> List[Tuple[int, np.ndarray]]: | |
| """ | |
| Return (page, image_bgr) tuples for image XObjects extracted at native resolution. | |
| """ | |
| if not HAS_PYMUPDF: | |
| return [] | |
| out: List[Tuple[int, np.ndarray]] = [] | |
| doc = fitz.open(path) | |
| pages = range(len(doc)) if page_index is None else [page_index] | |
| for pno in pages: | |
| page = doc[pno] | |
| for info in page.get_images(full=True): | |
| xref = info[0] | |
| pix = fitz.Pixmap(doc, xref) | |
| # Convert to RGB if not already | |
| if pix.n >= 4: # includes alpha or CMYK+alpha | |
| pix = fitz.Pixmap(fitz.csRGB, pix) | |
| pil = Image.open(io.BytesIO(pix.tobytes("png"))).convert("RGB") | |
| out.append((pno, _to_bgr(pil))) | |
| doc.close() | |
| return out | |
| def _pdf_render_page(path: str, page: int, dpi: int) -> np.ndarray: | |
| """ | |
| Rasterize one page at the given DPI (for vector codes). | |
| """ | |
| if not HAS_PYMUPDF: | |
| raise RuntimeError("PyMuPDF not available; cannot rasterize PDF.") | |
| doc = fitz.open(path) | |
| if page >= len(doc): | |
| doc.close() | |
| raise ValueError(f"Page {page} out of range; PDF has {len(doc)} pages.") | |
| pg = doc[page] | |
| scale = dpi / 72.0 | |
| mat = fitz.Matrix(scale, scale) | |
| pix = pg.get_pixmap(matrix=mat, alpha=False) | |
| pil = Image.open(io.BytesIO(pix.tobytes("png"))).convert("RGB") | |
| doc.close() | |
| return _to_bgr(pil) | |
| def _decode_image_path(path: str) -> List[Dict[str, Any]]: | |
| pil = Image.open(path).convert("RGB") | |
| bgr = _to_bgr(pil) | |
| hits = _decode_any(bgr) | |
| for h in hits: | |
| h.update({"source": "image", "page": 0}) | |
| return _dedupe(hits) | |
| def _decode_pdf_path(path: str, max_pages: int = 8, raster_dpis: Tuple[int, ...] = (400, 600, 900)) -> List[Dict[str, Any]]: | |
| results: List[Dict[str, Any]] = [] | |
| # 1) Try original embedded images first | |
| for pno, img_bgr in _pdf_extract_xobject_images(path): | |
| hits = _decode_any(img_bgr) | |
| for h in hits: | |
| h.update({"source": "pdf_xobject_image", "page": pno}) | |
| results.extend(hits) | |
| if results: | |
| return _dedupe(results) | |
| # 2) Fallback: rasterize pages at increasing DPIs | |
| if not HAS_PYMUPDF: | |
| # No way to rasterize; return empty | |
| return [] | |
| doc = fitz.open(path) | |
| n = min(len(doc), max_pages) | |
| doc.close() | |
| for dpi in raster_dpis: | |
| for pno in range(n): | |
| img_bgr = _pdf_render_page(path, pno, dpi=dpi) | |
| hits = _decode_any(img_bgr) | |
| for h in hits: | |
| h.update({"source": f"pdf_raster_{dpi}dpi", "page": pno}) | |
| results.extend(hits) | |
| if results: | |
| break | |
| return _dedupe(results) | |
| # ========================= | |
| # Public API | |
| # ========================= | |
| def read_barcodes_from_path(path: str, | |
| max_pages: int = 8, | |
| raster_dpis: Tuple[int, ...] = (400, 600, 900)) -> List[Dict[str, Any]]: | |
| """ | |
| Auto-detect by extension, decode barcodes, and return a list of dicts: | |
| {engine, source, page, type, text, polygon} | |
| """ | |
| ext = os.path.splitext(path.lower())[1] | |
| if ext == ".pdf": | |
| return _decode_pdf_path(path, max_pages=max_pages, raster_dpis=raster_dpis) | |
| else: | |
| return _decode_image_path(path) | |
| # ========================= | |
| # Optional: drawing helper | |
| # ========================= | |
| def draw_barcodes(bgr: np.ndarray, detections: List[Dict[str, Any]]) -> np.ndarray: | |
| out = bgr.copy() | |
| for d in detections: | |
| poly = np.array(d["polygon"], dtype=np.int32).reshape(-1, 1, 2) | |
| cv2.polylines(out, [poly], True, (0, 255, 0), 2) | |
| txt = f'{d["type"]}: {d["text"]}' | |
| x, y = poly[0, 0, 0], poly[0, 0, 1] | |
| cv2.putText(out, txt[:48], (x, max(15, y - 6)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 50, 255), 1, cv2.LINE_AA) | |
| return out | |