muhammadhamza-stack commited on
Commit
7258846
·
1 Parent(s): 2232b2c

refine and dockerize the app

Browse files
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpg filter=lfs diff=lfs merge=lfs -text
37
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ venv/
2
+ checkpoints
Dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ ENV PYTHONDONTWRITEBYTECODE=1
4
+ ENV PYTHONUNBUFFERED=1
5
+
6
+ WORKDIR /app
7
+
8
+ # Required for OpenCV image display & ultralytics
9
+ RUN apt-get update && apt-get install -y \
10
+ libgl1 \
11
+ libglib2.0-0 \
12
+ git \
13
+ curl \
14
+ && rm -rf /var/lib/apt/lists/*
15
+
16
+ COPY requirements.txt .
17
+
18
+ RUN pip install --upgrade pip \
19
+ && pip install --no-cache-dir -r requirements.txt
20
+
21
+ COPY . .
22
+
23
+ EXPOSE 7861
24
+
25
+ CMD ["python", "app.py"]
README.md CHANGED
@@ -3,7 +3,7 @@ title: ReaLens
3
  emoji: 🐢
4
  colorFrom: pink
5
  colorTo: pink
6
- sdk: gradio
7
  sdk_version: 5.43.1
8
  app_file: app.py
9
  pinned: false
 
3
  emoji: 🐢
4
  colorFrom: pink
5
  colorTo: pink
6
+ sdk: docker
7
  sdk_version: 5.43.1
8
  app_file: app.py
9
  pinned: false
app.py CHANGED
@@ -1,6 +1,3 @@
1
-
2
-
3
-
4
  # import gradio as gr
5
  # import torch
6
  # import os
@@ -172,11 +169,6 @@
172
 
173
 
174
 
175
-
176
-
177
-
178
-
179
-
180
  import gradio as gr
181
  import torch
182
  import os
@@ -187,18 +179,73 @@ import numpy as np
187
  from pathlib import Path
188
  import sys
189
  import copy
190
-
191
- # --- Import logic from your project ---
192
  from options.test_options import TestOptions
193
  from data import create_dataset
194
  from models import create_model
195
  try:
196
  from best_ldr import compute_metrics_for_images, score_records
197
  except ImportError:
 
198
  raise ImportError("Could not import from best_ldr.py. Make sure the file is in the same directory as app.py.")
199
 
200
  print("--- Initializing LDR-to-HDR Model (this may take a moment) ---")
201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  # --- Global Setup: Load the CycleGAN model once when the app starts ---
203
 
204
  # We need to satisfy the parser's requirement for a dataroot at startup
@@ -244,8 +291,16 @@ def process_images_to_hdr(list_of_temp_files):
244
  print(f"Analyzing {len(uploaded_filepaths)} uploaded images...")
245
  weights = {"clipped": 0.35, "coverage": 0.25, "exposure": 0.15, "sharpness": 0.15, "noise": 0.10}
246
  records = compute_metrics_for_images(uploaded_filepaths, resize_max=1024)
247
- scored_records = score_records(records, weights)
 
 
 
 
 
 
 
248
  if not scored_records:
 
249
  raise gr.Error("Could not read or score any of the uploaded images.")
250
 
251
  best_ldr_record = scored_records[0]
@@ -264,6 +319,9 @@ def process_images_to_hdr(list_of_temp_files):
264
 
265
  # Deep copy the base options to avoid modifying the global state
266
  local_opt = copy.deepcopy(opt)
 
 
 
267
  for key, value in inference_options.items():
268
  setattr(local_opt, key, value)
269
 
@@ -301,20 +359,35 @@ with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}")
301
  """
302
  )
303
 
 
 
 
 
 
 
 
 
 
 
304
  with gr.Row():
305
- with gr.Column(scale=1, min_width=350):
306
  # --- INPUT ---
 
307
  input_files = gr.Files(
308
- label="Upload Bracketed LDR Images",
309
  file_types=["image"]
310
  )
 
311
  process_button = gr.Button("Process Images", variant="primary")
312
- with gr.Accordion("See Your Uploaded Images", open=True):
313
- input_gallery = gr.Gallery(label="Uploaded Images", show_label=False, columns=[2, 3], height="auto")
 
314
 
315
  with gr.Column(scale=2):
316
- # --- OUTPUT ---
317
  gr.Markdown("## Generated HDR Result")
 
 
 
318
  output_image = gr.Image(label="Final HDR Image", type="pil", interactive=False, show_download_button=True)
319
 
320
  process_button.click(
@@ -327,13 +400,43 @@ with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}")
327
  # gr.Examples(
328
  # examples=[
329
  # [
330
- # "../pix2pix_dataset/testA/077A2406.jpg",
331
- # "../pix2pix_dataset/testA/077A4049.jpg",
332
- # "../pix2pix_dataset/testA/077A4073.jpg"
 
333
  # ]
334
  # ],
335
- # inputs=input_files
 
336
  # )
337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  print("--- Launching Gradio App ---")
339
- demo.launch(share=True)
 
 
 
 
 
 
 
1
  # import gradio as gr
2
  # import torch
3
  # import os
 
169
 
170
 
171
 
 
 
 
 
 
172
  import gradio as gr
173
  import torch
174
  import os
 
179
  from pathlib import Path
180
  import sys
181
  import copy
 
 
182
  from options.test_options import TestOptions
183
  from data import create_dataset
184
  from models import create_model
185
  try:
186
  from best_ldr import compute_metrics_for_images, score_records
187
  except ImportError:
188
+ # This is handled globally but kept here for local context
189
  raise ImportError("Could not import from best_ldr.py. Make sure the file is in the same directory as app.py.")
190
 
191
  print("--- Initializing LDR-to-HDR Model (this may take a moment) ---")
192
 
193
+ # --- Documentation Strings ---
194
+
195
+ USAGE_GUIDELINES = """
196
+ ## 1. Quick Start Guide: Generating an HDR Image
197
+ This tool uses a sophisticated AI model (CycleGAN) to translate the characteristics of a single, optimally selected Low Dynamic Range (LDR) image into a High Dynamic Range (HDR) output.
198
+
199
+ 1. **Upload:** Use the 'Upload Bracketed LDR Images' box to upload **at least two** images of the same scene, taken at different exposures (bracketed).
200
+ 2. **Run:** Click the **"Process Images"** button.
201
+ 3. **Review:**
202
+ * The model first runs an analysis to select the 'Best LDR'.
203
+ * The selected LDR is then processed, and the 'Final HDR Image' will appear.
204
+ """
205
+
206
+ INPUT_EXPLANATION = """
207
+ ## 2. Input Requirements and Best Practices
208
+
209
+ | Input Field | Purpose | Requirement |
210
+ | :--- | :--- | :--- |
211
+ | **LDR Images** | A set of images of the same scene captured with different exposure values (bracketing). | Must be 2 or more standard image files (JPG, PNG). |
212
+
213
+ ### Best Practices for Input Images
214
+ * **Bracketing is Key:** The quality of the final HDR output heavily depends on the diversity and quality of the input bracket set (underexposed, correctly exposed, and overexposed).
215
+ * **Scene Consistency:** All uploaded images must be of the **exact same scene** and taken from the **exact same camera position** (tripod recommended). Motion between frames will lead to conversion artifacts.
216
+ * **Resolution:** While the model processes images internally, uploading high-resolution sources ensures the final scaled 1024xN output maintains sharp detail.
217
+ """
218
+
219
+ TECHNICAL_GUIDANCE = """
220
+ ## 3. The Best LDR Selection Algorithm (Internal Logic)
221
+
222
+ Unlike traditional HDR merging, this application first selects the single 'Best LDR' image from your uploads and then translates *that specific image* into HDR using a deep learning model.
223
+
224
+ The selection process scores each image based on the following weighted metrics:
225
+
226
+ | Metric | Weight | Description |
227
+ | :--- | :--- | :--- |
228
+ | **Clipped Pixels** | 35% | Penalizes images with over-saturated whites or completely black shadows. |
229
+ | **Coverage** | 25% | Measures the range of usable tones across the image. |
230
+ | **Exposure** | 15% | Measures closeness to ideal scene brightness. |
231
+ | **Sharpness** | 15% | Measures overall clarity and focus of the image. |
232
+ | **Noise** | 10% | Penalizes excessive grain or image noise. |
233
+
234
+ The image with the highest composite score is chosen for the final AI conversion.
235
+ """
236
+
237
+ OUTPUT_EXPLANATION = """
238
+ ## 4. Expected Outputs and Interpretation
239
+
240
+ | Output Field | Description | Guidance |
241
+ | :--- | :--- | :--- |
242
+ | **Uploaded Images** | A gallery showing all LDR images provided as input. | Confirms which files were successfully loaded and analyzed by the scoring algorithm. |
243
+ | **Final HDR Image** | The resulting image generated by the **CycleGAN** translation model. | This image should exhibit enhanced detail in very bright and very dark areas, greater overall contrast, and richer color vibrancy compared to the original LDRs. |
244
+
245
+ ### Note on Resolution
246
+ The inference process scales the selected LDR image to **1024 pixels wide** internally, maintaining the original aspect ratio, before running the conversion model. The final output resolution will match this scaled size.
247
+ """
248
+
249
  # --- Global Setup: Load the CycleGAN model once when the app starts ---
250
 
251
  # We need to satisfy the parser's requirement for a dataroot at startup
 
291
  print(f"Analyzing {len(uploaded_filepaths)} uploaded images...")
292
  weights = {"clipped": 0.35, "coverage": 0.25, "exposure": 0.15, "sharpness": 0.15, "noise": 0.10}
293
  records = compute_metrics_for_images(uploaded_filepaths, resize_max=1024)
294
+
295
+ # Check if the list of records is valid before scoring
296
+ valid_records = [r for r in records if r is not None]
297
+ if not valid_records:
298
+ raise gr.Error("Could not process any uploaded images (ensure they are valid image files).")
299
+
300
+ scored_records = score_records(valid_records, weights)
301
+
302
  if not scored_records:
303
+ # This should ideally be caught by the valid_records check, but remains a safeguard
304
  raise gr.Error("Could not read or score any of the uploaded images.")
305
 
306
  best_ldr_record = scored_records[0]
 
319
 
320
  # Deep copy the base options to avoid modifying the global state
321
  local_opt = copy.deepcopy(opt)
322
+ local_opt.num_threads = 0 # disable multiprocessing
323
+ local_opt.batch_size = 1 # safety
324
+ local_opt.serial_batches = True
325
  for key, value in inference_options.items():
326
  setattr(local_opt, key, value)
327
 
 
359
  """
360
  )
361
 
362
+ # Add Guidelines
363
+ with gr.Accordion("Tips & User Guidelines", open=False):
364
+ gr.Markdown(USAGE_GUIDELINES)
365
+ gr.Markdown("---")
366
+ gr.Markdown(INPUT_EXPLANATION)
367
+ gr.Markdown("---")
368
+ gr.Markdown(TECHNICAL_GUIDANCE)
369
+ gr.Markdown("---")
370
+ gr.Markdown(OUTPUT_EXPLANATION)
371
+
372
  with gr.Row():
373
+ with gr.Column(scale=1):
374
  # --- INPUT ---
375
+ gr.Markdown("## Step 1: Upload LDR Images")
376
  input_files = gr.Files(
377
+ label="Bracketed LDR Images",
378
  file_types=["image"]
379
  )
380
+ gr.Markdown("## Step 2: Click Process Images")
381
  process_button = gr.Button("Process Images", variant="primary")
382
+
383
+ # with gr.Row():
384
+
385
 
386
  with gr.Column(scale=2):
 
387
  gr.Markdown("## Generated HDR Result")
388
+ with gr.Accordion("See Your Uploaded Images", open=False):
389
+ input_gallery = gr.Gallery(label="Uploaded Images", show_label=False, columns=[2, 3], height="auto")
390
+
391
  output_image = gr.Image(label="Final HDR Image", type="pil", interactive=False, show_download_button=True)
392
 
393
  process_button.click(
 
400
  # gr.Examples(
401
  # examples=[
402
  # [
403
+ # "./sample_data/ldr5.jpg",
404
+ # "./sample_data/ldr2.jpeg",
405
+ # "./sample_data/ldr1.jpg",
406
+ # "./sample_data/ldr6.jpg",
407
  # ]
408
  # ],
409
+ # inputs=input_files,
410
+ # label="Click on an image to test"
411
  # )
412
 
413
+ # --- Find the base directory for robust path resolution ---
414
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
415
+ SAMPLE_DATA_DIR = os.path.join(BASE_DIR, "sample_data")
416
+
417
+ EXAMPLE_FILES = [
418
+ os.path.join(SAMPLE_DATA_DIR, "ldr5.jpg"),
419
+ os.path.join(SAMPLE_DATA_DIR, "ldr2.jpeg"),
420
+ os.path.join(SAMPLE_DATA_DIR, "ldr1.jpg"),
421
+ os.path.join(SAMPLE_DATA_DIR, "ldr6.jpg"),
422
+ ]
423
+
424
+ # ... inside the gr.Blocks demo ...
425
+
426
+ gr.Markdown("### Examples")
427
+ gr.Examples(
428
+ # Correct structure:
429
+ # examples=[ [ [value for input 1] ] ]
430
+ # Since input_files accepts a LIST of files, the value is that list.
431
+ examples=[
432
+ [EXAMPLE_FILES]
433
+ ],
434
+ inputs=[input_files], # inputs must be a list of components
435
+ label="Click to load these LDR images"
436
+ )
437
+
438
  print("--- Launching Gradio App ---")
439
+ demo.launch(
440
+ server_name="0.0.0.0",
441
+ server_port=7861
442
+ )
requirements.txt CHANGED
@@ -1,8 +1,75 @@
1
- # requirements.txt
2
-
3
- gradio
4
- numpy
5
- opencv-python
6
- pillow
7
- torch
8
- torchvision
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.2.1
2
+ altair==5.5.0
3
+ annotated-doc==0.0.4
4
+ annotated-types==0.7.0
5
+ anyio==4.12.1
6
+ attrs==25.4.0
7
+ certifi==2026.1.4
8
+ charset-normalizer==3.4.4
9
+ click==8.1.8
10
+ contourpy==1.3.0
11
+ cycler==0.12.1
12
+ exceptiongroup==1.3.1
13
+ fastapi==0.128.0
14
+ ffmpy==1.0.0
15
+ filelock==3.19.1
16
+ fonttools==4.60.2
17
+ fsspec==2025.10.0
18
+ gradio==3.50.2
19
+ gradio_client==0.6.1
20
+ h11==0.16.0
21
+ hf-xet==1.2.0
22
+ httpcore==1.0.9
23
+ httpx==0.28.1
24
+ huggingface_hub==1.3.1
25
+ idna==3.11
26
+ importlib_resources==6.5.2
27
+ Jinja2==3.1.6
28
+ jsonschema==4.25.1
29
+ jsonschema-specifications==2025.9.1
30
+ kiwisolver==1.4.7
31
+ markdown-it-py==3.0.0
32
+ MarkupSafe==2.1.5
33
+ matplotlib==3.9.4
34
+ mdurl==0.1.2
35
+ mpmath==1.3.0
36
+ narwhals==2.15.0
37
+ networkx==3.2.1
38
+ numpy==1.26.4
39
+ opencv-python==4.12.0.88
40
+ orjson==3.11.5
41
+ packaging==25.0
42
+ pandas==2.3.3
43
+ pillow==10.4.0
44
+ pydantic==2.12.5
45
+ pydantic_core==2.41.5
46
+ pydub==0.25.1
47
+ Pygments==2.19.2
48
+ pyparsing==3.3.1
49
+ python-dateutil==2.9.0.post0
50
+ python-multipart==0.0.20
51
+ pytz==2025.2
52
+ PyYAML==6.0.3
53
+ referencing==0.36.2
54
+ requests==2.32.5
55
+ rich==14.2.0
56
+ rpds-py==0.27.1
57
+ ruff==0.14.11
58
+ semantic-version==2.10.0
59
+ shellingham==1.5.4
60
+ six==1.17.0
61
+ starlette==0.49.3
62
+ sympy==1.14.0
63
+ tomlkit==0.12.0
64
+ torch==2.2.2
65
+ torchvision==0.17.2
66
+ tqdm==4.67.1
67
+ typer==0.21.1
68
+ typer-slim==0.21.1
69
+ typing-inspection==0.4.2
70
+ typing_extensions==4.15.0
71
+ tzdata==2025.3
72
+ urllib3==2.6.3
73
+ uvicorn==0.39.0
74
+ websockets==11.0.3
75
+ zipp==3.23.0
sample_data/ldr1.jpg ADDED

Git LFS Details

  • SHA256: 5ad6c171e84893a33c4387dcd1326fccea83ed8956dc049b1ae4d8e6347561c3
  • Pointer size: 130 Bytes
  • Size of remote file: 37.1 kB
sample_data/ldr2.jpeg ADDED

Git LFS Details

  • SHA256: e0a23183ee3f1e0bd291e1799f3096ced422122ecaf46ed172ff111bd1f64820
  • Pointer size: 129 Bytes
  • Size of remote file: 4.92 kB
sample_data/ldr5.jpg ADDED

Git LFS Details

  • SHA256: e3b9ecbc614cbaed9818002689dc0e4681a7dec7bf85ffdc377466ebe19f1e0b
  • Pointer size: 130 Bytes
  • Size of remote file: 40 kB
sample_data/ldr6.jpg ADDED

Git LFS Details

  • SHA256: b322058b8f731163ff43fa46c940d7266e161dd3828ea903f88693dc32dcb52d
  • Pointer size: 130 Bytes
  • Size of remote file: 10.8 kB