Amould commited on
Commit
4205531
·
verified ·
1 Parent(s): 1cf063b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -10
app.py CHANGED
@@ -52,24 +52,25 @@ def imgnt_reg(img1,img2):#, model_selected):
52
  'M_i' :M_i }
53
  #[moving_images/255, fixed_images/255]
54
  pred = IR_Model_tst(model_inputs)
55
- img_out = wrap_imge_cropped(pred['Affine_mtrx'], fixed_images, dim1=224, dim2=128)
56
  registered_img = torchvision.transforms.ToPILImage()(img_out[0])
57
  if with_points:
58
  x0_source, y0_source = generate_standard_elips(N_samples= 100)
59
  x_source = destandarize_point(x0_source, dim=dim, flip = False)
60
  y_source = destandarize_point(y0_source, dim=dim, flip = False)
61
- source_im_w_points = wrap_points(fixed_images, x_source, y_source, l=1)
 
62
  M_Predicted = workaround_matrix(pred['Affine_mtrx'].detach(), acc = 0.5/crop_ratio)
63
  x0_transformed, y0_transformed = transform_standard_points(M_Predicted[0], x0_source, y0_source)
64
  x_transformed = destandarize_point(x0_transformed, dim=dim, flip = False)
65
  y_transformed = destandarize_point(y0_transformed, dim=dim, flip = False)
66
- wrapped_img = wrap_points(img_out, x_transformed, y_transformed, l=1)
67
- img_out2 = wrapped_img
68
  marked_image = torchvision.transforms.ToPILImage()(img_out2[0])
69
  else:
70
- marked_image = torchvision.transforms.ToPILImage()(torch.zeros(3,10,10))
71
- return [registered_img, marked_image]
72
-
73
 
74
 
75
  with gr.Blocks() as demo:
@@ -98,11 +99,14 @@ with gr.Blocks() as demo:
98
  #type = "filepath",
99
  elem_id = "image-out"
100
  )
101
- out_image2 = gr.Image(label = "ٌMarked image",
102
- elem_id = "image-out"
 
 
 
103
  )
104
  inputs = [image_1, image_2]#, model_list]
105
- out_image = [out_image1, out_image2]
106
  iface = gr.Interface(fn=imgnt_reg, inputs=inputs,outputs=out_image,
107
  title="Imagenet registration V2",
108
  description="Upload 2 images to generate a registered one:",
 
52
  'M_i' :M_i }
53
  #[moving_images/255, fixed_images/255]
54
  pred = IR_Model_tst(model_inputs)
55
+ img_out = wrap_imge_cropped(pred['Affine_mtrx'].detach(), fixed_images, dim1=224, dim2=128)
56
  registered_img = torchvision.transforms.ToPILImage()(img_out[0])
57
  if with_points:
58
  x0_source, y0_source = generate_standard_elips(N_samples= 100)
59
  x_source = destandarize_point(x0_source, dim=dim, flip = False)
60
  y_source = destandarize_point(y0_source, dim=dim, flip = False)
61
+ source_im_w_points = wrap_points(fixed_images.detach(), x_source, y_source, l=1)
62
+
63
  M_Predicted = workaround_matrix(pred['Affine_mtrx'].detach(), acc = 0.5/crop_ratio)
64
  x0_transformed, y0_transformed = transform_standard_points(M_Predicted[0], x0_source, y0_source)
65
  x_transformed = destandarize_point(x0_transformed, dim=dim, flip = False)
66
  y_transformed = destandarize_point(y0_transformed, dim=dim, flip = False)
67
+ wrapped_img = wrap_points(img_out.detach(), x_transformed, y_transformed, l=1)
68
+ img_out2 = wrapped_img.detach()
69
  marked_image = torchvision.transforms.ToPILImage()(img_out2[0])
70
  else:
71
+ #source_im_w_points = torchvision.transforms.ToPILImage()(torch.zeros(3,128,128))
72
+ marked_image = torchvision.transforms.ToPILImage()(torch.zeros(3,128,128))
73
+ return [registered_img,source_im_w_points, marked_image]
74
 
75
 
76
  with gr.Blocks() as demo:
 
99
  #type = "filepath",
100
  elem_id = "image-out"
101
  )
102
+ out_image2 = gr.Image(label = "ٌMarked source image",
103
+ elem_id = "image-out2"
104
+ )
105
+ out_image3 = gr.Image(label = "ٌMarked wrapped image",
106
+ elem_id = "image-out3"
107
  )
108
  inputs = [image_1, image_2]#, model_list]
109
+ out_image = [out_image1, out_image2, out_image3]
110
  iface = gr.Interface(fn=imgnt_reg, inputs=inputs,outputs=out_image,
111
  title="Imagenet registration V2",
112
  description="Upload 2 images to generate a registered one:",