import gradio as gr import torch import os import tempfile import shutil from PIL import Image import numpy as np from pathlib import Path import sys import copy from options.test_options import TestOptions from data import create_dataset from models import create_model try: from best_ldr import compute_metrics_for_images, score_records except ImportError: # This is handled globally but kept here for local context raise ImportError("Could not import from best_ldr.py. Make sure the file is in the same directory as app.py.") print("--- Initializing LDR-to-HDR Model (this may take a moment) ---") # --- Documentation Strings --- USAGE_GUIDELINES = """ ## 1. Quick Start Guide: Generating an HDR Image This tool uses a sophisticated AI model (CycleGAN) to translate the characteristics of a single, optimally selected Low Dynamic Range (LDR) image into a High Dynamic Range (HDR) output. 1. **Upload:** Use the 'Upload Bracketed LDR Images' box to upload **at least two** images of the same scene, taken at different exposures (bracketed). 2. **Run:** Click the **"Process Images"** button. 3. **Review:** * The model first runs an analysis to select the 'Best LDR'. * The selected LDR is then processed, and the 'Final HDR Image' will appear. """ INPUT_EXPLANATION = """ ## 2. Input Requirements and Best Practices | Input Field | Purpose | Requirement | | :--- | :--- | :--- | | **LDR Images** | A set of images of the same scene captured with different exposure values (bracketing). | Must be 2 or more standard image files (JPG, PNG). | ### Best Practices for Input Images * **Bracketing is Key:** The quality of the final HDR output heavily depends on the diversity and quality of the input bracket set (underexposed, correctly exposed, and overexposed). * **Scene Consistency:** All uploaded images must be of the **exact same scene** and taken from the **exact same camera position** (tripod recommended). Motion between frames will lead to conversion artifacts. * **Resolution:** While the model processes images internally, uploading high-resolution sources ensures the final scaled 1024xN output maintains sharp detail. """ TECHNICAL_GUIDANCE = """ ## 3. The Best LDR Selection Algorithm (Internal Logic) Unlike traditional HDR merging, this application first selects the single 'Best LDR' image from your uploads and then translates *that specific image* into HDR using a deep learning model. The selection process scores each image based on the following weighted metrics: | Metric | Weight | Description | | :--- | :--- | :--- | | **Clipped Pixels** | 35% | Penalizes images with over-saturated whites or completely black shadows. | | **Coverage** | 25% | Measures the range of usable tones across the image. | | **Exposure** | 15% | Measures closeness to ideal scene brightness. | | **Sharpness** | 15% | Measures overall clarity and focus of the image. | | **Noise** | 10% | Penalizes excessive grain or image noise. | The image with the highest composite score is chosen for the final AI conversion. """ OUTPUT_EXPLANATION = """ ## 4. Expected Outputs and Interpretation | Output Field | Description | Guidance | | :--- | :--- | :--- | | **Uploaded Images** | A gallery showing all LDR images provided as input. | Confirms which files were successfully loaded and analyzed by the scoring algorithm. | | **Final HDR Image** | The resulting image generated by the **CycleGAN** translation model. | This image should exhibit enhanced detail in very bright and very dark areas, greater overall contrast, and richer color vibrancy compared to the original LDRs. | ### Note on Resolution The inference process scales the selected LDR image to **1024 pixels wide** internally, maintaining the original aspect ratio, before running the conversion model. The final output resolution will match this scaled size. """ # --- Global Setup: Load the CycleGAN model once when the app starts --- # We need to satisfy the parser's requirement for a dataroot at startup if '--dataroot' not in sys.argv: sys.argv.extend(['--dataroot', './dummy_dataroot_for_init']) # Load the base options opt = TestOptions().parse() # Manually override settings for our model opt.name = 'ldr2hdr_cyclegan_728' opt.model = 'test' opt.netG = 'resnet_9blocks' opt.norm = 'instance' opt.no_dropout = True opt.checkpoints_dir = './checkpoints' opt.gpu_ids = [0] if torch.cuda.is_available() else [] opt.device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu') # Create the model using these options model = create_model(opt) model.setup(opt) model.eval() print("--- Model Loaded Successfully ---") # --- The Main Gradio Processing Function --- def process_images_to_hdr(list_of_temp_files): """ The main workflow: select best LDR, run inference, and return results for the UI. """ if not list_of_temp_files: raise gr.Error("Please upload your bracketed LDR images.") if len(list_of_temp_files) < 2: gr.Warning("For best results, upload at least 2 bracketed LDR images.") uploaded_filepaths = [Path(f.name) for f in list_of_temp_files] try: # --- Step 1: Select the Best LDR --- print(f"Analyzing {len(uploaded_filepaths)} uploaded images...") weights = {"clipped": 0.35, "coverage": 0.25, "exposure": 0.15, "sharpness": 0.15, "noise": 0.10} records = compute_metrics_for_images(uploaded_filepaths, resize_max=1024) # Check if the list of records is valid before scoring valid_records = [r for r in records if r is not None] if not valid_records: raise gr.Error("Could not process any uploaded images (ensure they are valid image files).") scored_records = score_records(valid_records, weights) if not scored_records: # This should ideally be caught by the valid_records check, but remains a safeguard raise gr.Error("Could not read or score any of the uploaded images.") best_ldr_record = scored_records[0] best_ldr_path = best_ldr_record['path'] print(f"Best LDR selected: {os.path.basename(best_ldr_path)} (Score: {best_ldr_record['score']:.4f})") # --- Step 2: Run Inference --- print("Running Full Image (High-Res Scaled) Inference...") # We only need the one set of options now inference_options = { 'preprocess': 'scale_width', 'load_size': 1024, # Generate the high-resolution, full image 'crop_size': 728 # This value is ignored but required by the parser } # Deep copy the base options to avoid modifying the global state local_opt = copy.deepcopy(opt) local_opt.num_threads = 0 # disable multiprocessing local_opt.batch_size = 1 # safety local_opt.serial_batches = True for key, value in inference_options.items(): setattr(local_opt, key, value) # Run the model with tempfile.TemporaryDirectory() as temp_dir: shutil.copy(best_ldr_path, temp_dir) local_opt.dataroot = temp_dir local_opt.num_test = 1 dataset = create_dataset(local_opt) for i, data in enumerate(dataset): model.set_input(data) model.test() visuals = model.get_current_visuals() for label, image_tensor in visuals.items(): if label == 'fake': image_numpy = (np.transpose(image_tensor.cpu().float().numpy()[0], (1, 2, 0)) + 1) / 2.0 * 255.0 final_hdr_image = Image.fromarray(image_numpy.astype(np.uint8)) print("Conversion to HDR successful.") # Return the gallery of inputs and the single final HDR image return uploaded_filepaths, final_hdr_image except Exception as e: print(f"An error occurred: {e}") raise gr.Error(f"An error occurred during processing: {e}") # --- Create and Launch the Gradio Interface --- with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}") as demo: gr.Markdown( """ # LDR Bracketing to HDR Converter Upload a set of bracketed LDR images. The app will automatically select the best one and convert it to a vibrant, full-resolution HDR image. """ ) # Add Guidelines with gr.Accordion("Tips & User Guidelines", open=False): gr.Markdown(USAGE_GUIDELINES) gr.Markdown("---") gr.Markdown(INPUT_EXPLANATION) gr.Markdown("---") gr.Markdown(TECHNICAL_GUIDANCE) gr.Markdown("---") gr.Markdown(OUTPUT_EXPLANATION) with gr.Row(): with gr.Column(scale=1): # --- INPUT --- gr.Markdown("## Step 1: Upload LDR Images") input_files = gr.Files( label="Bracketed LDR Images", file_types=["image"] ) gr.Markdown("## Step 2: Click Process Images") process_button = gr.Button("Process Images", variant="primary") # with gr.Row(): with gr.Column(scale=2): gr.Markdown("## Generated HDR Result") with gr.Accordion("See Your Uploaded Images", open=False): input_gallery = gr.Gallery(label="Uploaded Images", show_label=False, columns=[2, 3], height="auto") output_image = gr.Image(label="Final HDR Image", type="pil", interactive=False, show_download_button=True) process_button.click( fn=process_images_to_hdr, inputs=input_files, outputs=[input_gallery, output_image] ) # gr.Markdown("### Examples") # gr.Examples( # examples=[ # [ # "./sample_data/ldr5.jpg", # "./sample_data/ldr2.jpeg", # "./sample_data/ldr1.jpg", # "./sample_data/ldr6.jpg", # ] # ], # inputs=input_files, # label="Click on an image to test" # ) # --- Find the base directory for robust path resolution --- BASE_DIR = os.path.dirname(os.path.abspath(__file__)) SAMPLE_DATA_DIR = os.path.join(BASE_DIR, "sample_data") EXAMPLE_FILES = [ os.path.join(SAMPLE_DATA_DIR, "ldr5.jpg"), os.path.join(SAMPLE_DATA_DIR, "ldr2.jpeg"), os.path.join(SAMPLE_DATA_DIR, "ldr1.jpg"), os.path.join(SAMPLE_DATA_DIR, "ldr6.jpg"), ] # ... inside the gr.Blocks demo ... gr.Markdown("### Examples") gr.Examples( # Correct structure: # examples=[ [ [value for input 1] ] ] # Since input_files accepts a LIST of files, the value is that list. examples=[ [EXAMPLE_FILES] ], inputs=[input_files], # inputs must be a list of components label="Click to load these LDR images" ) print("--- Launching Gradio App ---") demo.launch( server_name="0.0.0.0", server_port=7860 )