import gradio as gr import pikepdf import os import zipfile import shutil from pathlib import Path import uuid from datetime import datetime, timedelta import logging import threading import time from typing import Tuple, List, Optional import subprocess import json # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # Configuration - FIXED VALUES TARGET_SEGMENT_SIZE_MB = 4.5 # Target size for each segment MAX_ALLOWED_SIZE_MB = 5.0 # Maximum allowed size - discard if larger TARGET_SEGMENT_SIZE_BYTES = int(TARGET_SEGMENT_SIZE_MB * 1024 * 1024) # 4.5MB in bytes MAX_ALLOWED_SIZE_BYTES = int(MAX_ALLOWED_SIZE_MB * 1024 * 1024) # 5MB in bytes TEMP_DIR = Path("temp_files") CLEANUP_AFTER_MINUTES = 10 # Create temp directory TEMP_DIR.mkdir(exist_ok=True) # Store user sessions for cleanup user_sessions = {} class PDFProcessor: """Handle PDF splitting using qpdf directly for performance""" @staticmethod def get_pdf_info(pdf_path: Path) -> dict: """Get PDF info using qpdf""" try: result = subprocess.run( ["qpdf", "--show-npages", str(pdf_path)], capture_output=True, text=True, check=True ) return {"total_pages": int(result.stdout.strip())} except subprocess.CalledProcessError as e: logger.error(f"Error getting PDF info: {e}") raise @staticmethod def split_pdf_by_size(input_path: Path, output_dir: Path, progress_callback=None) -> Tuple[List[Path], dict]: """ Split PDF using qpdf directly (like your bash script) for much better performance """ kept_files = [] stats = { "total_pages": 0, "segments_created": 0, "segments_discarded": 0, "original_size_mb": input_path.stat().st_size / 1024 / 1024, "total_output_size_mb": 0, "largest_segment_mb": 0, "smallest_segment_mb": float('inf') } try: # Get total pages using qpdf pdf_info = PDFProcessor.get_pdf_info(input_path) total_pages = pdf_info["total_pages"] stats["total_pages"] = total_pages if total_pages == 0: return kept_files, stats logger.info(f"Starting split: {total_pages} pages, original size: {stats['original_size_mb']:.2f} MB") start_page = 1 # qpdf uses 1-based indexing part = 1 while start_page <= total_pages: if progress_callback: progress = ((start_page - 1) / total_pages) progress_callback(progress, f"Processing segment {part}...") # Binary search for the right number of pages low = start_page high = min(start_page + 100, total_pages) # Start with max 100 pages best_end = start_page best_size = 0 # First, quickly find a rough upper bound test_file = output_dir / f"test_{part}.pdf" while low <= high: mid = (low + high) // 2 # Create test segment using qpdf try: subprocess.run( ["qpdf", "--empty", "--pages", str(input_path), f"{start_page}-{mid}", "--", str(test_file)], capture_output=True, check=True, timeout=10 # 10 second timeout ) # Check file size if test_file.exists(): size = test_file.stat().st_size if size <= MAX_ALLOWED_SIZE_BYTES: best_end = mid best_size = size if size < TARGET_SEGMENT_SIZE_BYTES * 0.9: # Less than 90% of target low = mid + 1 # Try more pages else: break # Good enough, close to target else: high = mid - 1 # Too big, try fewer pages # Clean up test file test_file.unlink() except subprocess.CalledProcessError as e: logger.error(f"qpdf error: {e}") if test_file.exists(): test_file.unlink() high = mid - 1 except subprocess.TimeoutExpired: logger.error(f"qpdf timeout for pages {start_page}-{mid}") if test_file.exists(): test_file.unlink() high = mid - 1 # Create final segment with best found size if best_end >= start_page: final_filename = f"segment_{part:03d}_p{start_page}-{best_end}.pdf" final_path = output_dir / final_filename try: # Create final segment subprocess.run( ["qpdf", "--empty", "--pages", str(input_path), f"{start_page}-{best_end}", "--", str(final_path), "--compress-streams=y", "--object-streams=generate"], capture_output=True, check=True, timeout=30 ) if final_path.exists(): final_size = final_path.stat().st_size final_size_mb = final_size / 1024 / 1024 if final_size <= MAX_ALLOWED_SIZE_BYTES: kept_files.append(final_path) stats["segments_created"] += 1 stats["total_output_size_mb"] += final_size_mb stats["largest_segment_mb"] = max(stats["largest_segment_mb"], final_size_mb) stats["smallest_segment_mb"] = min(stats["smallest_segment_mb"], final_size_mb) logger.info(f"Created segment {part}: {final_size_mb:.2f} MB (pages {start_page}-{best_end})") else: # Single page over 5MB final_path.unlink() stats["segments_discarded"] += 1 logger.warning(f"Segment {part} exceeded 5MB limit - discarded") except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e: logger.error(f"Error creating final segment: {e}") if final_path.exists(): final_path.unlink() start_page = best_end + 1 else: # Single page is too large, skip it logger.warning(f"Page {start_page} exceeds size limit - skipping") stats["segments_discarded"] += 1 start_page += 1 part += 1 if stats["smallest_segment_mb"] == float('inf'): stats["smallest_segment_mb"] = 0 if progress_callback: progress_callback(1.0, "Splitting complete!") logger.info(f"Completed: {stats['segments_created']} segments created, {stats['segments_discarded']} discarded") except Exception as e: logger.error(f"Error in split_pdf_by_size: {str(e)}") raise return kept_files, stats class SessionManager: """Manage user sessions and cleanup""" @staticmethod def create_session(session_id: str) -> Path: """Create a new user session directory""" session_dir = TEMP_DIR / session_id session_dir.mkdir(exist_ok=True) user_sessions[session_id] = { "created": datetime.now(), "dir": session_dir } return session_dir @staticmethod def cleanup_old_sessions(): """Remove old session directories""" current_time = datetime.now() sessions_to_remove = [] for session_id, session_info in user_sessions.items(): if current_time - session_info["created"] > timedelta(minutes=CLEANUP_AFTER_MINUTES): try: shutil.rmtree(session_info["dir"], ignore_errors=True) sessions_to_remove.append(session_id) logger.info(f"Cleaned up session: {session_id}") except Exception as e: logger.error(f"Error cleaning session {session_id}: {e}") for session_id in sessions_to_remove: del user_sessions[session_id] # Start cleanup thread def cleanup_worker(): """Background thread for cleaning old files""" while True: try: SessionManager.cleanup_old_sessions() time.sleep(60) # Check every minute except Exception as e: logger.error(f"Cleanup error: {e}") cleanup_thread = threading.Thread(target=cleanup_worker, daemon=True) cleanup_thread.start() def process_pdf(file_obj, progress=gr.Progress()) -> Tuple[Optional[str], str, str]: """ Main processing function for Gradio interface Returns: (zip_file_path, statistics_html, status_message) """ if file_obj is None: return None, "", "⚠️ Please upload a PDF file" session_id = str(uuid.uuid4())[:8] session_dir = SessionManager.create_session(session_id) try: # Update progress progress(0.1, "Initializing...") # Save uploaded file input_path = session_dir / "input.pdf" # Handle both file path string and file object if isinstance(file_obj, str): shutil.copy(file_obj, input_path) else: with open(input_path, 'wb') as f: f.write(file_obj.read() if hasattr(file_obj, 'read') else file_obj) # Verify it's a valid PDF progress(0.2, "Verifying PDF...") with pikepdf.open(input_path) as pdf: page_count = len(pdf.pages) logger.info(f"Valid PDF with {page_count} pages") # Create output directory output_dir = session_dir / "output" output_dir.mkdir(exist_ok=True) # Split PDF with size constraints progress(0.3, "Splitting PDF into 4.5MB segments...") def update_progress(value, message): scaled_progress = 0.3 + (value * 0.5) progress(scaled_progress, message) output_files, stats = PDFProcessor.split_pdf_by_size( input_path, output_dir, progress_callback=update_progress ) if not output_files: return None, "", "❌ No valid segments created (all segments exceeded 5MB limit)" # Create ZIP file progress(0.9, "Creating ZIP archive...") zip_filename = f"pdf_segments_{session_id}.zip" zip_path = session_dir / zip_filename with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf: for file_path in output_files: zipf.write(file_path, file_path.name) # CRITICAL: Verify the ZIP file was created if not zip_path.exists(): raise Exception("ZIP file creation failed") # Log for debugging logger.info(f"ZIP file created at: {zip_path}") logger.info(f"ZIP file size: {zip_path.stat().st_size / 1024 / 1024:.2f} MB") # Generate statistics with proper styling stats_html = f"""
| 📄 Total Pages: | {stats['total_pages']} |
| ✅ Segments Created (≤5MB): | {stats['segments_created']} |
| ❌ Segments Discarded (>5MB): | {stats['segments_discarded']} |
✨ Your file has been split successfully! Click the download button below.