ihabooe commited on
Commit
341ca31
·
verified ·
1 Parent(s): 1b3c7fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -12
app.py CHANGED
@@ -24,11 +24,13 @@ print(f"Model loaded on {device}")
24
  OUTPUT_DIR = "output_images"
25
  os.makedirs(OUTPUT_DIR, exist_ok=True)
26
 
 
 
 
27
  # Resize the input image for model compatibility
28
- def resize_image(image):
29
  image = image.convert('RGB')
30
- model_input_size = (1024, 1024)
31
- image = image.resize(model_input_size, Image.BILINEAR)
32
  return image
33
 
34
  # Background removal process
@@ -41,8 +43,10 @@ def process(image, progress=gr.Progress()):
41
  # Prepare the input
42
  progress(0.1, desc="Preparing image...")
43
  orig_image = Image.fromarray(image)
44
- w, h = orig_im_size = orig_image.size
45
- image = resize_image(orig_image)
 
 
46
  im_np = np.array(image)
47
  im_tensor = torch.tensor(im_np, dtype=torch.float32).permute(2, 0, 1)
48
  im_tensor = torch.unsqueeze(im_tensor, 0)
@@ -86,11 +90,8 @@ def process(image, progress=gr.Progress()):
86
 
87
  progress(1.0, desc="Done!")
88
 
89
- # Return the processed image and the file for download
90
  return output_array, gr.update(visible=True, value=filepath, interactive=True)
91
 
92
- # ... previous code remains the same until the description ...
93
-
94
  # Gradio interface setup
95
  title = "Background Removal Tool"
96
  description = """
@@ -105,7 +106,7 @@ description = """
105
  box-shadow: 0 0 20px rgba(0, 255, 255, 0.2);
106
  }
107
  .title-text {
108
- color: #ffffff;
109
  font-family: 'Orbitron', sans-serif;
110
  font-size: 2.5em;
111
  margin: 20px 0;
@@ -113,7 +114,7 @@ description = """
113
  animation: title-pulse 2s infinite alternate;
114
  }
115
  .subtitle-text {
116
- color: #ffffff;
117
  font-family: 'Roboto Mono', monospace;
118
  font-size: 1.2em;
119
  margin-top: 10px;
@@ -167,6 +168,9 @@ with gr.Blocks(css="""
167
 
168
  /* Input/Output areas */
169
  .input-image, .output-image {
 
 
 
170
  background: rgba(18, 18, 56, 0.7) !important;
171
  border: 2px solid var(--neon-cyan) !important;
172
  border-radius: 12px !important;
@@ -174,6 +178,12 @@ with gr.Blocks(css="""
174
  overflow: hidden !important;
175
  }
176
 
 
 
 
 
 
 
177
  .input-image:hover, .output-image:hover {
178
  box-shadow: 0 0 20px rgba(0, 255, 255, 0.4) !important;
179
  transform: translateY(-2px) !important;
@@ -232,7 +242,9 @@ with gr.Blocks(css="""
232
  type="numpy",
233
  label="Upload Your Image",
234
  elem_id="input-image",
235
- elem_classes="input-image"
 
 
236
  )
237
 
238
  with gr.Column(scale=1):
@@ -240,7 +252,9 @@ with gr.Blocks(css="""
240
  type="numpy",
241
  label="Result",
242
  elem_id="output-image",
243
- elem_classes="output-image"
 
 
244
  )
245
 
246
  with gr.Row(elem_classes="download-container"):
 
24
  OUTPUT_DIR = "output_images"
25
  os.makedirs(OUTPUT_DIR, exist_ok=True)
26
 
27
+ # Define fixed size for all images
28
+ FIXED_SIZE = (512, 512)
29
+
30
  # Resize the input image for model compatibility
31
+ def resize_image(image, size=FIXED_SIZE):
32
  image = image.convert('RGB')
33
+ image = image.resize(size, Image.LANCZOS)
 
34
  return image
35
 
36
  # Background removal process
 
43
  # Prepare the input
44
  progress(0.1, desc="Preparing image...")
45
  orig_image = Image.fromarray(image)
46
+ # Resize input image to fixed size
47
+ orig_image = resize_image(orig_image, FIXED_SIZE)
48
+ w, h = FIXED_SIZE
49
+ image = orig_image
50
  im_np = np.array(image)
51
  im_tensor = torch.tensor(im_np, dtype=torch.float32).permute(2, 0, 1)
52
  im_tensor = torch.unsqueeze(im_tensor, 0)
 
90
 
91
  progress(1.0, desc="Done!")
92
 
 
93
  return output_array, gr.update(visible=True, value=filepath, interactive=True)
94
 
 
 
95
  # Gradio interface setup
96
  title = "Background Removal Tool"
97
  description = """
 
106
  box-shadow: 0 0 20px rgba(0, 255, 255, 0.2);
107
  }
108
  .title-text {
109
+ color: #ff00de;
110
  font-family: 'Orbitron', sans-serif;
111
  font-size: 2.5em;
112
  margin: 20px 0;
 
114
  animation: title-pulse 2s infinite alternate;
115
  }
116
  .subtitle-text {
117
+ color: #00ffff;
118
  font-family: 'Roboto Mono', monospace;
119
  font-size: 1.2em;
120
  margin-top: 10px;
 
168
 
169
  /* Input/Output areas */
170
  .input-image, .output-image {
171
+ width: 512px !important;
172
+ height: 512px !important;
173
+ object-fit: contain !important;
174
  background: rgba(18, 18, 56, 0.7) !important;
175
  border: 2px solid var(--neon-cyan) !important;
176
  border-radius: 12px !important;
 
178
  overflow: hidden !important;
179
  }
180
 
181
+ .input-image img, .output-image img {
182
+ width: 100% !important;
183
+ height: 100% !important;
184
+ object-fit: contain !important;
185
+ }
186
+
187
  .input-image:hover, .output-image:hover {
188
  box-shadow: 0 0 20px rgba(0, 255, 255, 0.4) !important;
189
  transform: translateY(-2px) !important;
 
242
  type="numpy",
243
  label="Upload Your Image",
244
  elem_id="input-image",
245
+ elem_classes="input-image",
246
+ height=512,
247
+ width=512
248
  )
249
 
250
  with gr.Column(scale=1):
 
252
  type="numpy",
253
  label="Result",
254
  elem_id="output-image",
255
+ elem_classes="output-image",
256
+ height=512,
257
+ width=512
258
  )
259
 
260
  with gr.Row(elem_classes="download-container"):