kebincontreras commited on
Commit
d3188fd
verified
1 Parent(s): 1906d8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -81
app.py CHANGED
@@ -6,14 +6,19 @@ import numpy as np
6
  import torchvision.transforms as transforms
7
  import torch
8
  from utils import *
9
- #from utils import flip_odd_lines, modulo, center_modulo, unmodulo, hard_thresholding, stripe_estimation, recons
10
  from utils import modulo
11
  import cv2
12
-
13
  import matplotlib.pyplot as plt
14
 
15
- def process_image(image, model_id, image_size, conf_threshold, correction, sat_factor, kernel_size, DO, t, vertical):
 
16
 
 
 
 
 
 
 
17
  original_image = np.array(image)
18
  original_image = original_image - original_image.min()
19
  original_image = original_image / original_image.max()
@@ -24,39 +29,35 @@ def process_image(image, model_id, image_size, conf_threshold, correction, sat_f
24
  scaling = 1.0
25
  original_image = cv2.resize(original_image, (0, 0), fx=scaling, fy=scaling)
26
 
27
-
28
  blurred_image = apply_blur(original_image / 255.0, kernel_size)
29
  clipped_image = clip_image(blurred_image, correction, sat_factor)
30
 
31
- img_tensor = torch.tensor(blurred_image, dtype=torch.float32 ).permute(2, 0, 1).unsqueeze(0)
32
- img_tensor = modulo( img_tensor * sat_factor, L=1.0)
33
 
34
  wrapped_image = img_tensor.squeeze(0).permute(1, 2, 0).numpy()
35
  wrapped_image = (wrapped_image*255).astype(np.uint8)
36
 
37
- original_annotated, original_detections = yolov10_inference(original_image, model_id, image_size, conf_threshold)
38
- clipped_annotated, clipped_detections = yolov10_inference((clipped_image*255.0).astype(np.uint8), "yolov10n", image_size, conf_threshold)
39
- wrapped_annotated, wrapped_detections = yolov10_inference(wrapped_image, model_id, image_size, conf_threshold)
40
 
41
- # Assuming `recons` is a function in `utils.py`
42
  recon_image = recons(img_tensor, DO=1, L=1.0, vertical=(vertical == "True"), t=t)
43
  recon_image_pil = transforms.ToPILImage()(recon_image.squeeze(0))
44
  recon_image_np = np.array(recon_image_pil).astype(np.uint8)
45
 
 
46
 
47
- recon_annotated, recon_detections = yolov10_inference(recon_image_np, model_id, image_size, conf_threshold)
48
-
49
-
50
-
51
- return original_annotated, clipped_annotated, wrapped_annotated, recon_annotated,
52
-
53
 
 
54
 
55
  def app():
56
- # Variables para ajustar el tama帽o de las im谩genes
57
- image_scaler = 0.55 # Cambia este valor para ajustar el tama帽o de las im谩genes
58
- image_width = int(600 * image_scaler) # Ancho de la imagen
59
- image_height = int(200 * image_scaler) # Alto de la imagen
60
 
61
  with gr.Blocks(css=f"""
62
  .fixed-size-image img {{
@@ -72,7 +73,7 @@ def app():
72
  .gr-column {{
73
  display: flex;
74
  flex-direction: column;
75
- align-items: center; /* Centramos todo en la columna */
76
  padding: 0 !important;
77
  margin: 0 !important;
78
  }}
@@ -101,40 +102,31 @@ def app():
101
  background-color: #e94e42;
102
  }}
103
  """) as demo:
104
- # T铆tulo completamente centrado
105
  gr.Markdown("## Modulo Imaging for Computer Vision", elem_id="centered-title")
106
 
107
- # Organizar los art铆culos en dos columnas con botones centrados bajo el t铆tulo
108
  with gr.Row():
109
- # Primer art铆culo con su bot贸n centrado en su columna
110
  with gr.Column():
111
  gr.Markdown("### High Dynamic Range Modulo Imaging for Robust Object Detection in Autonomous Driving", elem_id="centered-text")
112
  gr.HTML('<a href="https://openreview.net/pdf?id=2GqZFx2I7s" target="_blank" class="custom-button btn-grey">Article</a>')
113
 
114
- # Segundo art铆culo con su bot贸n centrado en su columna
115
  with gr.Column():
116
  gr.Markdown("### Autoregressive High-Order Finite Difference Modulo Imaging: High-Dynamic Range for Computer Vision Applications", elem_id="centered-text")
117
  gr.HTML('<a href="https://cvlai.net/aim/2024/" target="_blank" class="custom-button btn-red">Article</a>')
118
 
119
- # Ajustes de sliders y controles eliminados, valores fijos
120
  image = gr.Image(type="pil", label="Upload Image")
121
  model_id = gr.Dropdown(label="Model", choices=["yolov10n", "yolov10s", "yolov10m", "yolov10b", "yolov10l", "yolov10x"], value="yolov10x")
122
 
123
- # Valores fijos
124
- conf_threshold = 0.85
125
- correction = 1.0
126
- kernel_size = 7
127
- DO = "1"
128
- image_size = 640
129
- t = 0.7
130
- vertical = "True"
131
-
132
- # Mantener el control del nivel de saturaci贸n
133
  sat_factor = gr.Slider(label="Saturation Factor", minimum=1.0, maximum=5.0, step=0.1, value=2.0)
 
 
 
134
 
 
 
 
 
135
  process_button = gr.Button("Process Image")
136
 
137
- # Mostrar las im谩genes en dos columnas, dos im谩genes por columna
138
  with gr.Row():
139
  with gr.Column():
140
  output_original = gr.Image(label="Original + blur")
@@ -143,56 +135,14 @@ def app():
143
  output_wrap = gr.Image(label="Wrapped")
144
  output_recons = gr.Image(label="Reconstructed")
145
 
146
-
147
-
148
-
149
-
150
  process_button.click(
151
  fn=process_image,
152
- inputs=[image, model_id, image_size, conf_threshold, correction, sat_factor, kernel_size, DO, t, vertical],
153
  outputs=[output_original, output_clip, output_wrap, output_recons]
154
  )
155
 
156
-
157
-
158
-
159
-
160
-
161
-
162
-
163
-
164
-
165
-
166
-
167
-
168
- # A帽adir bot贸n que despliega c贸mo citar el art铆culo en formato BibTeX
169
- def show_bibtex_citation():
170
- bibtex_citation = '''@inproceedings{contreras2024modulo,
171
- title={High Dynamic Range Modulo Imaging for Robust Object Detection in Autonomous Driving},
172
- author={Contreras, Kebin and Monroy, Brayan and Bacca, Jorge},
173
- booktitle={Autonomous Driving Conference},
174
- year={2024},
175
- address={Bucaramanga, Colombia},
176
- publisher={Universidad Industrial de Santander}
177
- }'''
178
- return bibtex_citation
179
-
180
- # A帽adir bot贸n para copiar el texto
181
- citation_button = gr.Button("How to Cite this Article (BibTeX)")
182
- citation_text = gr.Textbox(visible=False, label="BibTeX Citation", interactive=False)
183
- copy_button = gr.Button("Copy Citation", visible=False)
184
-
185
- # Mostrar la cita en formato BibTeX y habilitar bot贸n de copiar
186
- def copy_citation(citation):
187
- return gr.update(visible=True), citation
188
-
189
- citation_button.click(show_bibtex_citation, outputs=[citation_text])
190
- copy_button.click(lambda x: x, inputs=citation_text, outputs=citation_text)
191
-
192
-
193
-
194
  return demo
195
 
196
-
197
  if __name__ == "__main__":
198
  app().launch()
 
 
6
  import torchvision.transforms as transforms
7
  import torch
8
  from utils import *
 
9
  from utils import modulo
10
  import cv2
 
11
  import matplotlib.pyplot as plt
12
 
13
+ # Definir el tama帽o de la imagen constante
14
+ IMAGE_SIZE = 640 # Asignar el valor constante para image_size
15
 
16
+ def process_image(image, model_id, sat_factor, DO, t, vertical):
17
+ # Definir valores fijos dentro de la funci贸n
18
+ conf_threshold = 0.85
19
+ correction = 1.0
20
+ kernel_size = 7 # Definir kernel_size como constante
21
+
22
  original_image = np.array(image)
23
  original_image = original_image - original_image.min()
24
  original_image = original_image / original_image.max()
 
29
  scaling = 1.0
30
  original_image = cv2.resize(original_image, (0, 0), fx=scaling, fy=scaling)
31
 
 
32
  blurred_image = apply_blur(original_image / 255.0, kernel_size)
33
  clipped_image = clip_image(blurred_image, correction, sat_factor)
34
 
35
+ img_tensor = torch.tensor(blurred_image, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0)
36
+ img_tensor = modulo(img_tensor * sat_factor, L=1.0)
37
 
38
  wrapped_image = img_tensor.squeeze(0).permute(1, 2, 0).numpy()
39
  wrapped_image = (wrapped_image*255).astype(np.uint8)
40
 
41
+ original_annotated, original_detections = yolov10_inference(original_image, model_id, IMAGE_SIZE, conf_threshold)
42
+ clipped_annotated, clipped_detections = yolov10_inference((clipped_image*255.0).astype(np.uint8), "yolov10n", IMAGE_SIZE, conf_threshold)
43
+ wrapped_annotated, wrapped_detections = yolov10_inference(wrapped_image, model_id, IMAGE_SIZE, conf_threshold)
44
 
 
45
  recon_image = recons(img_tensor, DO=1, L=1.0, vertical=(vertical == "True"), t=t)
46
  recon_image_pil = transforms.ToPILImage()(recon_image.squeeze(0))
47
  recon_image_np = np.array(recon_image_pil).astype(np.uint8)
48
 
49
+ recon_annotated, recon_detections = yolov10_inference(recon_image_np, model_id, IMAGE_SIZE, conf_threshold)
50
 
51
+ metrics_clip = calculate_detection_metrics(original_detections, clipped_detections)
52
+ metrics_wrap = calculate_detection_metrics(original_detections, wrapped_detections)
53
+ metrics_recons = calculate_detection_metrics(original_detections, recon_detections)
 
 
 
54
 
55
+ return original_annotated, clipped_annotated, wrapped_annotated, recon_annotated, metrics_clip, metrics_wrap, metrics_recons
56
 
57
  def app():
58
+ image_scaler = 0.55
59
+ image_width = int(600 * image_scaler)
60
+ image_height = int(200 * image_scaler)
 
61
 
62
  with gr.Blocks(css=f"""
63
  .fixed-size-image img {{
 
73
  .gr-column {{
74
  display: flex;
75
  flex-direction: column;
76
+ align-items: center;
77
  padding: 0 !important;
78
  margin: 0 !important;
79
  }}
 
102
  background-color: #e94e42;
103
  }}
104
  """) as demo:
 
105
  gr.Markdown("## Modulo Imaging for Computer Vision", elem_id="centered-title")
106
 
 
107
  with gr.Row():
 
108
  with gr.Column():
109
  gr.Markdown("### High Dynamic Range Modulo Imaging for Robust Object Detection in Autonomous Driving", elem_id="centered-text")
110
  gr.HTML('<a href="https://openreview.net/pdf?id=2GqZFx2I7s" target="_blank" class="custom-button btn-grey">Article</a>')
111
 
 
112
  with gr.Column():
113
  gr.Markdown("### Autoregressive High-Order Finite Difference Modulo Imaging: High-Dynamic Range for Computer Vision Applications", elem_id="centered-text")
114
  gr.HTML('<a href="https://cvlai.net/aim/2024/" target="_blank" class="custom-button btn-red">Article</a>')
115
 
 
116
  image = gr.Image(type="pil", label="Upload Image")
117
  model_id = gr.Dropdown(label="Model", choices=["yolov10n", "yolov10s", "yolov10m", "yolov10b", "yolov10l", "yolov10x"], value="yolov10x")
118
 
 
 
 
 
 
 
 
 
 
 
119
  sat_factor = gr.Slider(label="Saturation Factor", minimum=1.0, maximum=5.0, step=0.1, value=2.0)
120
+ DO = gr.Radio(label="DO", choices=["1", "2"], value="1")
121
+ t = gr.Slider(label="t", minimum=0.0, maximum=1.0, step=0.1, value=0.7)
122
+ vertical = gr.Radio(label="Vertical", choices=["True", "False"], value="True")
123
 
124
+ with gr.Row():
125
+ spud_button = gr.Button("SPUD", elem_id="spud-button")
126
+ ahfd_button = gr.Button("AHFD", elem_id="ahfd-button")
127
+
128
  process_button = gr.Button("Process Image")
129
 
 
130
  with gr.Row():
131
  with gr.Column():
132
  output_original = gr.Image(label="Original + blur")
 
135
  output_wrap = gr.Image(label="Wrapped")
136
  output_recons = gr.Image(label="Reconstructed")
137
 
 
 
 
 
138
  process_button.click(
139
  fn=process_image,
140
+ inputs=[image, model_id, sat_factor, DO, t, vertical],
141
  outputs=[output_original, output_clip, output_wrap, output_recons]
142
  )
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  return demo
145
 
 
146
  if __name__ == "__main__":
147
  app().launch()
148
+