dlaima commited on
Commit
c7839f4
·
verified ·
1 Parent(s): 7de564a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -5,23 +5,25 @@ import torch
5
  import numpy as np
6
  from PIL import Image
7
 
8
- # Load the depth estimation model
9
  depth_estimator = pipeline(task="depth-estimation", model="Intel/dpt-hybrid-midas")
10
 
11
- # Function to process the image and return depth map
12
  def launch(input_image):
13
  try:
 
 
 
14
  # Ensure the input image is in RGB mode
15
  if input_image.mode != "RGB":
16
  input_image = input_image.convert("RGB")
17
 
18
- # Run the image segmentation model
19
  out = depth_estimator(input_image)
20
 
21
- # Assuming output contains the segmentation mask or predicted depth map
22
  predicted_output = out["predicted_depth"] if "predicted_depth" in out else out["segmentation_mask"]
23
 
24
- # Resize the output to match the input image size
25
  predicted_output_resized = torch.nn.functional.interpolate(
26
  predicted_output.unsqueeze(0), # Add batch dimension
27
  size=input_image.size[::-1], # Match input image size (H, W)
@@ -42,15 +44,13 @@ def launch(input_image):
42
  print(f"Error processing the image: {str(e)}")
43
  return "An error occurred while processing the image."
44
 
45
- # Define the Gradio interface
46
  iface = gr.Interface(
47
  fn=launch,
48
  inputs=gr.Image(type="pil"),
49
- outputs=gr.Image(type="pil", image_size=(640, 480)) # Specify size for output
50
  )
51
 
52
-
53
-
54
  # Launch the interface
55
  iface.launch()
56
 
 
5
  import numpy as np
6
  from PIL import Image
7
 
8
+ # Load the depth estimation model or segmentation model
9
  depth_estimator = pipeline(task="depth-estimation", model="Intel/dpt-hybrid-midas")
10
 
 
11
  def launch(input_image):
12
  try:
13
+ # Resize the input image to a fixed size (e.g., 640x480)
14
+ input_image = input_image.resize((640, 480))
15
+
16
  # Ensure the input image is in RGB mode
17
  if input_image.mode != "RGB":
18
  input_image = input_image.convert("RGB")
19
 
20
+ # Run the image segmentation model (or depth estimation)
21
  out = depth_estimator(input_image)
22
 
23
+ # Assuming the output contains the predicted depth or segmentation mask
24
  predicted_output = out["predicted_depth"] if "predicted_depth" in out else out["segmentation_mask"]
25
 
26
+ # Resize the output to match the input image size (H, W)
27
  predicted_output_resized = torch.nn.functional.interpolate(
28
  predicted_output.unsqueeze(0), # Add batch dimension
29
  size=input_image.size[::-1], # Match input image size (H, W)
 
44
  print(f"Error processing the image: {str(e)}")
45
  return "An error occurred while processing the image."
46
 
47
+ # Define the Gradio interface without 'image_size' argument
48
  iface = gr.Interface(
49
  fn=launch,
50
  inputs=gr.Image(type="pil"),
51
+ outputs=gr.Image(type="pil") # Remove image_size argument
52
  )
53
 
 
 
54
  # Launch the interface
55
  iface.launch()
56