| """ |
| Base class and utilities for inference scripts. |
| |
| This module provides: |
| - BaseInference: A base class that captures common inference patterns |
| - HttpClientInference: Base class for HTTP-based APIs (uses httpx) |
| - print_performance_metrics: Utility for printing performance metrics |
| - process_files_concurrently: Utility for concurrent file processing |
| - create_argument_parser: Common CLI argument parser |
| """ |
| import os |
| import json |
| import asyncio |
| import time |
| import random |
| import argparse |
| from abc import ABC, abstractmethod |
| from pathlib import Path |
| from typing import List, Tuple, Callable, Any, Optional, Dict |
| from enum import Enum |
|
|
| from utils import ( |
| load_json_file, |
| read_file_paths, |
| validate_json_save_path, |
| get_interim_dir_path, |
| save_interim_result, |
| load_interim_result, |
| collect_all_interim_results |
| ) |
|
|
| from doc_grouping import ( |
| group_pages_to_documents, |
| parse_ext_mapping, |
| is_multi_page_dataset, |
| ) |
|
|
|
|
| class ErrorType(Enum): |
| """Error categories for better error tracking.""" |
| TIMEOUT = "timeout" |
| API_ERROR = "api_error" |
| NETWORK_ERROR = "network_error" |
| VALIDATION_ERROR = "validation_error" |
| UNKNOWN_ERROR = "unknown_error" |
|
|
|
|
| def create_argument_parser(description: str = "Document inference script") -> argparse.ArgumentParser: |
| """Create a common argument parser with standard arguments. |
| |
| Args: |
| description: Description for the argument parser |
| |
| Returns: |
| argparse.ArgumentParser with common arguments configured |
| """ |
| parser = argparse.ArgumentParser(description=description) |
| parser.add_argument( |
| "--data-path", |
| type=str, default="", required=True, |
| help="Path containing the documents to process" |
| ) |
| parser.add_argument( |
| "--save-path", |
| type=str, default="", required=True, |
| help="Path to save the results" |
| ) |
| parser.add_argument( |
| "--input-formats", |
| type=str, nargs='+', |
| default=[".pdf", ".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".heic"], |
| help="Supported input file formats" |
| ) |
| parser.add_argument( |
| "--concurrent", |
| type=int, default=None, |
| help="Number of concurrent API requests (enables concurrent mode if specified)" |
| ) |
| parser.add_argument( |
| "--sampling-rate", |
| type=float, default=1.0, |
| help="Fraction of files to process (0.0-1.0, default 1.0 = all files)" |
| ) |
| parser.add_argument( |
| "--request-timeout", |
| type=float, default=600, |
| help="Timeout in seconds for API requests (default 600)" |
| ) |
| parser.add_argument( |
| "--random-seed", |
| type=int, default=None, |
| help="Random seed for reproducible sampling (default None = random)" |
| ) |
| parser.add_argument( |
| "--model", |
| type=str, default=None, |
| help="Model name to use for inference (default depends on provider)" |
| ) |
| parser.add_argument( |
| "--mode", |
| type=str, default=None, |
| help="Inference mode (e.g., 'standard', 'enhanced', 'agentic'). None if not applicable." |
| ) |
| parser.add_argument( |
| "--group-by-document", |
| action=argparse.BooleanOptionalAction, |
| default=False, |
| help="Group per-page results into document-level entries (default: False)" |
| ) |
| parser.add_argument( |
| "--file-ext-mapping", |
| type=str, |
| default=None, |
| help="File extension mapping for document grouping, e.g., 'jpg:pdf' or 'jpg->pdf,png->pdf'" |
| ) |
| return parser |
|
|
|
|
| def parse_args_with_extra(parser: argparse.ArgumentParser) -> argparse.Namespace: |
| """Parse arguments, gracefully ignoring unrecognized ones. |
| |
| This allows extra CLI arguments (e.g. --dpi, --jpeg-quality) to be passed |
| down from run_all.py / infer_all.py without breaking scripts that don't |
| understand them. Unrecognized arguments are logged to stderr and silently |
| discarded. |
| |
| Args: |
| parser: ArgumentParser to use for parsing |
| |
| Returns: |
| argparse.Namespace with recognized arguments |
| """ |
| args, unknown = parser.parse_known_args() |
| if unknown: |
| import sys as _sys |
| print(f"[INFO] Ignoring unrecognized arguments: {unknown}", file=_sys.stderr) |
| return args |
|
|
|
|
| def print_performance_metrics( |
| sample_latencies: List[float], |
| total_elapsed_time: float, |
| concurrent_limit: Optional[int] = None, |
| num_total: Optional[int] = None, |
| num_errors: int = 0 |
| ): |
| """Print performance metrics for concurrent processing. |
| |
| Args: |
| sample_latencies: List of latencies for each successful sample |
| total_elapsed_time: Total time elapsed for all processing |
| concurrent_limit: Optional concurrent limit (for display) |
| num_total: Optional total number of samples |
| num_errors: Number of failed samples |
| """ |
| num_successful = len(sample_latencies) |
| total_samples = num_total if num_total is not None else (num_successful + num_errors) |
| success_rate = (num_successful / total_samples * 100) if total_samples > 0 else 0 |
| |
| print("="*60) |
| print("PERFORMANCE METRICS") |
| if concurrent_limit is not None: |
| print(f"Concurrent Limit: {concurrent_limit}") |
| |
| print(f"\nSuccess Rate: {success_rate:.2f}% ({num_successful}/{total_samples})") |
| |
| if num_successful > 0: |
| |
| avg_latency = sum(sample_latencies) / num_successful |
| min_latency = min(sample_latencies) |
| max_latency = max(sample_latencies) |
| |
| print(f"\nLatency (sec/sample):") |
| print(f" - Average: {avg_latency:.2f} sec/sample") |
| print(f" - Min: {min_latency:.2f} sec/sample") |
| print(f" - Max: {max_latency:.2f} sec/sample") |
| |
| |
| throughput_per_min = (num_successful / total_elapsed_time) * 60 |
| |
| print(f"\nThroughput:") |
| print(f" - {throughput_per_min:.2f} samples/min") |
| |
| print(f"\nTotal Processing Time: {total_elapsed_time:.2f} seconds") |
| print("="*60) |
|
|
|
|
| def categorize_error(error: Exception) -> ErrorType: |
| """Categorize an exception into error types. |
| |
| Args: |
| error: Exception to categorize |
| |
| Returns: |
| ErrorType enum value |
| """ |
| error_str = str(error).lower() |
| error_type = type(error).__name__ |
| |
| if isinstance(error, asyncio.TimeoutError) or "timeout" in error_str: |
| return ErrorType.TIMEOUT |
| elif isinstance(error, (ConnectionError, OSError)) or "connection" in error_str or "network" in error_str: |
| return ErrorType.NETWORK_ERROR |
| elif "api" in error_str or "http" in error_str or "status" in error_str: |
| return ErrorType.API_ERROR |
| elif "validation" in error_str or "invalid" in error_str: |
| return ErrorType.VALIDATION_ERROR |
| else: |
| return ErrorType.UNKNOWN_ERROR |
|
|
|
|
| async def process_files_concurrently( |
| paths: List, |
| process_single_file_fn: Callable, |
| concurrent_limit: int = 4, |
| processed_data: Optional[dict] = None, |
| *args, **kwargs |
| ) -> Tuple[dict, List[str], List[float], dict]: |
| """Process multiple files concurrently with semaphore-based rate limiting. |
| |
| Args: |
| paths: List of file paths to process |
| process_single_file_fn: Async function that processes a single file |
| Should accept (filepath, file_index, total_files, *args, **kwargs) and return |
| (filename, result, latency) or (filename, None, 0) on error |
| concurrent_limit: Maximum number of concurrent operations |
| processed_data: Optional dict of already processed data (to skip) |
| *args, **kwargs: Additional arguments to pass to process_single_file_fn |
| |
| Returns: |
| Tuple of (result_dict, error_files, sample_latencies, error_details) |
| error_details: dict mapping filename to (error_type, error_message) |
| """ |
| if processed_data is None: |
| processed_data = {} |
| |
| error_files = [] |
| error_details = {} |
| sample_latencies = [] |
| result_dict = {} |
| |
| |
| tasks = [] |
| for idx, filepath in enumerate(paths, 1): |
| task = process_single_file_fn(filepath, idx, len(paths), *args, **kwargs) |
| tasks.append(task) |
| |
| |
| |
| interrupted = False |
| try: |
| results = await asyncio.gather(*tasks, return_exceptions=True) |
| except KeyboardInterrupt: |
| interrupted = True |
| print("\n⚠️ KeyboardInterrupt detected! Collecting completed results...") |
| |
| |
| for task in tasks: |
| if not task.done(): |
| task.cancel() |
| |
| |
| await asyncio.sleep(0.1) |
| |
| |
| results = [] |
| for task in tasks: |
| if task.done() and not task.cancelled(): |
| try: |
| results.append(task.result()) |
| except Exception as e: |
| results.append(e) |
| |
| print(f"✓ Collected {len([r for r in results if not isinstance(r, Exception)])} completed results out of {len(paths)} total files") |
| |
| |
| for i, result in enumerate(results): |
| if isinstance(result, Exception): |
| error_type = categorize_error(result) |
| error_msg = str(result) |
| |
| if i < len(paths): |
| filename = paths[i].name |
| error_files.append(filename) |
| error_details[filename] = (error_type, error_msg) |
| print(f"Error in task ({error_type.value}): {error_msg}") |
| continue |
| |
| filename, result_data, latency = result |
| |
| if result_data is not None: |
| result_dict[filename] = result_data |
| if latency > 0: |
| sample_latencies.append(latency) |
| elif latency == 0 and filename not in processed_data: |
| error_files.append(filename) |
| |
| |
| |
| if interrupted: |
| print("⚠️ Processing was interrupted. Partial results collected.") |
| |
| return result_dict, error_files, sample_latencies, error_details |
|
|
|
|
| class BaseInference(ABC): |
| """Base class for all inference implementations. |
| |
| This class provides common functionality for: |
| - Initialization (save_path, interim_dir, processed_data) |
| - Concurrent mode setup (semaphore) |
| - Result collection and saving (_collect_and_save_results) |
| - Async inference orchestration (infer_async) |
| - Sync/async mode dispatching (infer) |
| |
| Subclasses must implement: |
| - post_process(): Process raw API results into standard format |
| - _call_api_async(): Make async API call for a file |
| - _call_api_sync(): Make sync API call for a file |
| """ |
| |
| |
| DEFAULT_INPUT_FORMATS = [".pdf", ".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".heic"] |
| |
| def __init__( |
| self, |
| save_path, |
| input_formats=None, |
| concurrent_limit=None, |
| sampling_rate=1.0, |
| request_timeout=600, |
| random_seed=None, |
| group_by_document=False, |
| file_ext_mapping=None |
| ): |
| """Initialize the base inference class |
| |
| Args: |
| save_path (str): the json path to save the results |
| input_formats (list, optional): the supported file formats. |
| concurrent_limit (int, optional): maximum number of concurrent API requests (enables concurrent mode) |
| sampling_rate (float, optional): fraction of files to process (0.0-1.0, default 1.0 = all files) |
| request_timeout (float, optional): timeout in seconds for API requests (default 600) |
| random_seed (int, optional): random seed for reproducible sampling (default None = random) |
| group_by_document (bool, optional): group per-page results into document-level entries (default False) |
| file_ext_mapping (str or dict, optional): file extension mapping for document grouping |
| """ |
| if input_formats is None: |
| input_formats = self.DEFAULT_INPUT_FORMATS |
| |
| self.formats = input_formats |
| self.concurrent_limit = concurrent_limit |
| self.sampling_rate = max(0.0, min(1.0, sampling_rate)) |
| self.request_timeout = request_timeout |
| self.random_seed = random_seed |
| |
| |
| self.group_by_document = group_by_document |
| if isinstance(file_ext_mapping, str): |
| self.file_ext_mapping = parse_ext_mapping(file_ext_mapping) if file_ext_mapping else {} |
| else: |
| self.file_ext_mapping = file_ext_mapping or {} |
| |
| |
| validate_json_save_path(save_path) |
| self.save_path = save_path |
| self.interim_dir = get_interim_dir_path(save_path) |
| os.makedirs(self.interim_dir, exist_ok=True) |
| self.processed_data = load_json_file(save_path) |
| |
| |
| if concurrent_limit is not None: |
| self.semaphore = asyncio.Semaphore(concurrent_limit) |
| |
| @abstractmethod |
| def post_process(self, data: Dict) -> Dict: |
| """Post-process the raw API response to match the standard format. |
| |
| This method must be implemented by subclasses to convert API-specific |
| response formats into the standard format. |
| |
| Args: |
| data (dict): raw API response data, keyed by filename |
| |
| Returns: |
| dict: processed data in standard format, keyed by filename |
| """ |
| pass |
| |
| def _merge_processed_data(self, processed_dict: Dict) -> Dict: |
| """Merge previously processed data into the result dict. |
| |
| This is a common operation at the end of post_process(). |
| Subclasses should call this at the end of their post_process implementation. |
| |
| Args: |
| processed_dict: The dict of newly processed results |
| |
| Returns: |
| The merged dict including previously processed data |
| """ |
| for key in self.processed_data: |
| if key not in processed_dict: |
| processed_dict[key] = self.processed_data[key] |
| return processed_dict |
| |
| @abstractmethod |
| async def _call_api_async(self, filepath, *args, **kwargs): |
| """Make the actual async API call for a file. |
| |
| This method must be implemented by subclasses to perform the actual API call. |
| It should NOT handle interim result checking or saving - that's done by the base class. |
| |
| Args: |
| filepath: Path object to the file |
| *args, **kwargs: Additional arguments (e.g., client for HTTP requests) |
| |
| Returns: |
| The raw API response data (will be wrapped by base class) |
| |
| Raises: |
| Exception: If API call fails |
| """ |
| pass |
| |
| async def process_single_file(self, filepath, file_index, total_files, *args, **kwargs): |
| """Process a single file asynchronously (for concurrent mode). |
| |
| This wrapper method handles: |
| - Checking if already processed |
| - Checking interim results |
| - Semaphore (if concurrent_limit is set) |
| - Timing |
| - File size tracking |
| - Saving interim results |
| - Error handling and categorization |
| |
| Subclasses only need to implement _call_api_async(). |
| |
| Args: |
| filepath: Path object to the file |
| file_index: Current file index (for logging) |
| total_files: Total number of files |
| *args, **kwargs: Additional arguments passed to _call_api_async |
| |
| Returns: |
| tuple: (filename, result_data, latency) or (filename, None, 0) on error |
| """ |
| filename = filepath.name |
| file_size_mb = filepath.stat().st_size / (1024 * 1024) |
| print(f"({file_index}/{total_files}) Processing {filepath} ({file_size_mb:.2f} MB)") |
| |
| |
| if filename in self.processed_data.keys(): |
| print(f"'{filename}' is already in the loaded dictionary. Skipping this sample") |
| return (filename, None, 0) |
| |
| |
| existing_result = load_interim_result(self.interim_dir, filename) |
| if existing_result is not None: |
| print(f"'{filename}' interim result already exists. Skipping API call to save costs.") |
| return (filename, None, 0) |
| |
| try: |
| |
| |
| if self.concurrent_limit is not None and hasattr(self, 'semaphore'): |
| async with self.semaphore: |
| |
| sample_start_time = time.time() |
| result_data = await asyncio.wait_for( |
| self._call_api_async(filepath, *args, **kwargs), |
| timeout=self.request_timeout |
| ) |
| else: |
| |
| sample_start_time = time.time() |
| result_data = await asyncio.wait_for( |
| self._call_api_async(filepath, *args, **kwargs), |
| timeout=self.request_timeout |
| ) |
| |
| sample_latency = time.time() - sample_start_time |
| |
| |
| result_with_time = { |
| "data": result_data, |
| "time_sec": sample_latency, |
| "file_size_mb": round(file_size_mb, 2) |
| } |
| save_interim_result(self.interim_dir, filename, result_with_time) |
| pct = file_index / total_files * 100 |
| print(f"✓ ({file_index}/{total_files}, {pct:.1f}%) Saved '{filename}' (took {sample_latency:.2f}s)") |
| |
| |
| return (filename, result_with_time, sample_latency) |
| |
| except asyncio.TimeoutError: |
| error_type = ErrorType.TIMEOUT |
| error_msg = f"Request timeout after {self.request_timeout}s" |
| print(f"✗ {filename} - {error_type.value}: {error_msg}") |
| |
| raise asyncio.TimeoutError(error_msg) |
| except Exception as e: |
| error_type = categorize_error(e) |
| error_msg = str(e) |
| print(f"✗ {filename} - {error_type.value}: {error_msg}") |
| |
| raise |
| |
| @abstractmethod |
| def _call_api_sync(self, filepath, *args, **kwargs): |
| """Make the actual sync API call for a file. |
| |
| This method must be implemented by subclasses to perform the actual API call. |
| It should NOT handle interim result checking or saving - that's done by the base class. |
| |
| Args: |
| filepath: Path object to the file |
| *args, **kwargs: Additional arguments |
| |
| Returns: |
| The raw API response data (will be wrapped by base class) |
| |
| Raises: |
| Exception: If API call fails |
| """ |
| pass |
| |
| def process_file_sequential(self, filepath, file_index, total_files, *args, **kwargs): |
| """Process a single file sequentially (for sync mode). |
| |
| This wrapper method handles: |
| - Checking if already processed |
| - Checking interim results |
| - Timing |
| - File size tracking |
| - Saving interim results |
| - Error handling and categorization |
| |
| Subclasses only need to implement _call_api_sync(). |
| |
| Args: |
| filepath: Path object to the file |
| file_index: Current file index (for logging) |
| total_files: Total number of files |
| *args, **kwargs: Additional arguments passed to _call_api_sync |
| |
| Returns: |
| tuple: (filename, result_data, latency) or (filename, None, 0) on error |
| """ |
| filename = filepath.name |
| file_size_mb = filepath.stat().st_size / (1024 * 1024) |
| sample_start_time = time.time() |
| |
| try: |
| |
| |
| |
| result_data = self._call_api_sync(filepath, *args, **kwargs) |
| |
| sample_latency = time.time() - sample_start_time |
| |
| |
| result_with_time = { |
| "data": result_data, |
| "time_sec": sample_latency, |
| "file_size_mb": round(file_size_mb, 2) |
| } |
| save_interim_result(self.interim_dir, filename, result_with_time) |
| pct = file_index / total_files * 100 |
| print(f"✓ ({file_index}/{total_files}, {pct:.1f}%) Saved '{filename}' (took {sample_latency:.2f}s)") |
| |
| |
| return (filename, result_with_time, sample_latency) |
| |
| except Exception as e: |
| error_type = categorize_error(e) |
| error_msg = str(e) |
| print(f"✗ {filename} - {error_type.value}: {error_msg}") |
| |
| raise |
| |
| def _collect_and_save_results(self, raw_results, sample_latencies, total_elapsed_time, error_files, error_details=None): |
| """Common method to collect interim results, post-process, and save final results. |
| |
| Used by both sync and async modes. This method: |
| 1. Collects all interim results from disk |
| 2. Merges with current run results |
| 3. Unwraps data from interim result format |
| 4. Post-processes results |
| 5. Preserves timing information |
| 6. Saves final results |
| 7. Prints performance metrics |
| |
| Args: |
| raw_results (dict): Results from current run, keyed by filename |
| sample_latencies (list): List of latencies for successful samples |
| total_elapsed_time (float): Total time elapsed for processing |
| error_files (list): List of filenames that had errors |
| error_details (dict, optional): Dict mapping filename to (ErrorType, error_message) |
| |
| Returns: |
| dict: Final processed results |
| """ |
| if error_details is None: |
| error_details = {} |
| |
| |
| print("Collecting all interim results...") |
| collected_results = collect_all_interim_results(self.interim_dir) |
| |
| |
| for key, value in raw_results.items(): |
| collected_results[key] = value |
| |
| raw_results = collected_results |
| |
| |
| unwrapped_results = {} |
| for key, value in raw_results.items(): |
| if isinstance(value, dict) and "data" in value: |
| unwrapped_results[key] = value["data"] |
| else: |
| unwrapped_results[key] = value |
| |
| |
| final_results = self.post_process(unwrapped_results) |
| |
| |
| for key in final_results: |
| if key in raw_results: |
| raw_result = raw_results[key] |
| if isinstance(raw_result, dict): |
| if "time_sec" in raw_result and isinstance(final_results[key], dict): |
| final_results[key]["time_sec"] = raw_result["time_sec"] |
| if "file_size_mb" in raw_result and isinstance(final_results[key], dict): |
| final_results[key]["file_size_mb"] = raw_result["file_size_mb"] |
| |
| |
| if self.group_by_document: |
| |
| data_keys = [k for k in final_results.keys() if not k.startswith("_")] |
| if data_keys and is_multi_page_dataset(data_keys): |
| print("Grouping per-page results into document-level entries...") |
| |
| |
| metadata_keys = [k for k in final_results.keys() if k.startswith("_")] |
| metadata = {k: final_results[k] for k in metadata_keys} |
| page_data = {k: final_results[k] for k in data_keys} |
| |
| |
| grouped_results = group_pages_to_documents( |
| page_data, |
| file_ext_mapping=self.file_ext_mapping, |
| elements_key="elements", |
| include_merged_tables=True, |
| ) |
| |
| |
| grouped_results.update(metadata) |
| final_results = grouped_results |
| |
| print(f"Grouped {len(data_keys)} pages into {len(grouped_results) - len(metadata_keys)} documents") |
| |
| |
| |
| |
| all_latencies = [] |
| for key, value in raw_results.items(): |
| if isinstance(value, dict) and "time_sec" in value: |
| all_latencies.append(value["time_sec"]) |
| |
| |
| |
| |
| if all_latencies: |
| sample_latencies = all_latencies |
| |
| num_successful = len(sample_latencies) |
| num_total = num_successful + len(error_files) |
| |
| |
| |
| if total_elapsed_time < 1.0 and sample_latencies: |
| sum_latencies = sum(sample_latencies) |
| concurrent_limit = self.concurrent_limit or 1 |
| total_elapsed_time = sum_latencies / concurrent_limit |
| |
| |
| final_results["_metadata"] = { |
| "total_elapsed_time_sec": round(total_elapsed_time, 4), |
| "concurrent_limit": self.concurrent_limit, |
| "num_files": num_total, |
| "num_successful": num_successful, |
| "num_errors": len(error_files) |
| } |
| |
| |
| with open(self.save_path, "w", encoding="utf-8") as f: |
| json.dump(final_results, f, ensure_ascii=False, indent=4) |
| |
| print_performance_metrics( |
| sample_latencies, |
| total_elapsed_time, |
| self.concurrent_limit if self.concurrent_limit is not None else None, |
| num_total, |
| len(error_files) |
| ) |
| |
| |
| if error_files: |
| print(f"\nErrors ({len(error_files)} files):") |
| error_by_type = {} |
| for error_file in error_files: |
| if error_file in error_details: |
| error_type, error_msg = error_details[error_file] |
| if error_type not in error_by_type: |
| error_by_type[error_type] = [] |
| error_by_type[error_type].append((error_file, error_msg)) |
| else: |
| if ErrorType.UNKNOWN_ERROR not in error_by_type: |
| error_by_type[ErrorType.UNKNOWN_ERROR] = [] |
| error_by_type[ErrorType.UNKNOWN_ERROR].append((error_file, "Unknown error")) |
| |
| for error_type, errors in error_by_type.items(): |
| print(f" {error_type.value.upper()} ({len(errors)} files):") |
| for error_file, error_msg in errors[:5]: |
| print(f" - {error_file}: {error_msg}") |
| if len(errors) > 5: |
| print(f" ... and {len(errors) - 5} more") |
| |
| print("Finished processing all documents") |
| print("Results saved to: {}".format(self.save_path)) |
| print("Interim results saved to: {}".format(self.interim_dir)) |
| print("Number of errors: {}".format(len(error_files))) |
| print("Total processed files: {}".format(len(final_results))) |
| |
| return final_results |
| |
| async def infer_async(self, file_path, *args, **kwargs): |
| """Infer the layout of documents with concurrent processing. |
| |
| This method orchestrates concurrent file processing using process_files_concurrently. |
| It can be overridden by subclasses if they need custom async behavior. |
| |
| Args: |
| file_path (str): the path to the file or directory containing the documents to process |
| *args, **kwargs: Additional arguments to pass to process_single_file |
| |
| Returns: |
| dict: Final processed results |
| """ |
| paths = read_file_paths(file_path, supported_formats=self.formats) |
|
|
| |
| if self.sampling_rate < 1.0 and len(paths) > 0: |
| if self.random_seed is not None: |
| random.seed(self.random_seed) |
| sample_size = max(1, int(len(paths) * self.sampling_rate)) |
| paths = random.sample(paths, sample_size) |
| print(f"Sampling {self.sampling_rate * 100:.1f}% of files: {len(paths)} out of {len(read_file_paths(file_path, supported_formats=self.formats))} total files") |
| |
| error_files = [] |
| sample_latencies = [] |
| total_start_time = time.time() |
| |
| |
| result_dict, error_files, sample_latencies, error_details = await process_files_concurrently( |
| paths, |
| self.process_single_file, |
| self.concurrent_limit, |
| self.processed_data, |
| *args, |
| **kwargs |
| ) |
| |
| |
| total_elapsed_time = time.time() - total_start_time |
| final_results = self._collect_and_save_results( |
| result_dict, sample_latencies, total_elapsed_time, error_files, error_details |
| ) |
| |
| return final_results |
| |
| def infer(self, file_path, *args, **kwargs): |
| """Infer the layout of the documents in the given file path. |
| |
| This method dispatches to async mode if concurrent_limit is set, |
| otherwise runs sequential processing. |
| |
| Args: |
| file_path (str): the path to the file or directory containing the documents to process |
| *args, **kwargs: Additional arguments (passed to infer_async or process_file_sequential) |
| |
| Returns: |
| dict: Final processed results (or None for sequential mode without return) |
| """ |
| |
| if self.concurrent_limit is not None: |
| return asyncio.run(self.infer_async(file_path, *args, **kwargs)) |
| |
| |
| return self._infer_sequential(file_path, *args, **kwargs) |
| |
| def _infer_sequential(self, file_path, *args, **kwargs): |
| """Internal method for sequential inference. |
| |
| This template method can be overridden by subclasses for custom sequential behavior. |
| Default implementation processes files one by one using process_file_sequential. |
| |
| Args: |
| file_path (str): the path to the file or directory containing the documents to process |
| *args, **kwargs: Additional arguments passed to process_file_sequential |
| """ |
| paths = read_file_paths(file_path, supported_formats=self.formats) |
|
|
| |
| if self.sampling_rate < 1.0 and len(paths) > 0: |
| if self.random_seed is not None: |
| random.seed(self.random_seed) |
| sample_size = max(1, int(len(paths) * self.sampling_rate)) |
| paths = random.sample(paths, sample_size) |
| print(f"Sampling {self.sampling_rate * 100:.1f}% of files: {len(paths)} out of {len(read_file_paths(file_path, supported_formats=self.formats))} total files") |
| |
| error_files = [] |
| error_details = {} |
| sample_latencies = [] |
| total_start_time = time.time() |
| |
| try: |
| for idx, filepath in enumerate(paths, 1): |
| filename = filepath.name |
| |
| |
| existing_result = load_interim_result(self.interim_dir, filename) |
| if existing_result is not None: |
| print(f"'{filename}' interim result already exists. Skipping API call to save costs.") |
| continue |
| |
| |
| try: |
| filename_result, result_data, latency = self.process_file_sequential( |
| filepath, idx, len(paths), *args, **kwargs |
| ) |
| |
| if result_data is not None and latency > 0: |
| sample_latencies.append(latency) |
| elif latency == 0: |
| error_files.append(filename) |
| except KeyboardInterrupt: |
| raise |
| except Exception as e: |
| error_type = categorize_error(e) |
| error_details[filename] = (error_type, str(e)) |
| error_files.append(filename) |
| continue |
| except KeyboardInterrupt: |
| print("\n⚠️ KeyboardInterrupt detected! Saving partial results...") |
| print(f"✓ Processed {len(sample_latencies)} files before interruption") |
| |
| |
| total_elapsed_time = time.time() - total_start_time |
| |
| |
| |
| raw_results = {} |
| |
| final_results = self._collect_and_save_results( |
| raw_results, sample_latencies, total_elapsed_time, error_files, error_details |
| ) |
| |
| return final_results |
|
|
|
|
| class HttpClientInference(BaseInference): |
| """Base class for HTTP-based API services (Upstage, LlamaParse). |
| |
| This class provides: |
| - Automatic httpx.AsyncClient management for async mode |
| - Common pattern for overriding infer_async with client context |
| |
| Subclasses should implement _call_api_async(filepath, client) and _call_api_sync(filepath). |
| """ |
| |
| async def infer_async(self, file_path, *args, **kwargs): |
| """Infer the layout of documents with concurrent processing. |
| |
| Creates an httpx.AsyncClient and passes it to the parent's infer_async. |
| |
| Args: |
| file_path (str): the path to the file or directory containing the documents to process |
| |
| Returns: |
| dict: Final processed results |
| """ |
| import httpx |
| async with httpx.AsyncClient() as client: |
| return await super().infer_async(file_path, client=client, *args, **kwargs) |
|
|