Amould commited on
Commit
af4e700
·
verified ·
1 Parent(s): 7222107

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -52
app.py CHANGED
@@ -36,58 +36,6 @@ IR_Model_tst.load_state_dict(torch.load(file_savingfolder+'IR_Model'+ext+'.pth',
36
  #IR_Model_tst.to(device)
37
  IR_Model_tst.eval()
38
 
39
-
40
-
41
- def standarize_point(d, dim=128, flip = False):
42
- if flip:
43
- d = -d
44
- return d/dim - 0.5
45
-
46
- def destandarize_point(d, dim=128, flip = False):
47
- if flip:
48
- d = -d
49
- return dim*(d + 0.5)
50
-
51
- def generate_standard_elips(N_samples = 100, a= 1,b = 1):
52
- radius = 0.25
53
- center = 0
54
- N_samples1 = int(N_samples/2 - 1)
55
- N_samples2 = N_samples - N_samples1
56
- x1 = torch.distributions.uniform.Uniform(center-radius,center + radius).sample([N_samples1])
57
- x1_ordered = torch.sort(x1).values
58
- y1 = center + b*torch.sqrt(radius**2 - ((x1_ordered-center)/a)**2)
59
- x2 = torch.distributions.uniform.Uniform(center-radius,center + radius).sample([N_samples2])
60
- x2_ordered = torch.sort(x2, descending=True).values
61
- y2 = center - b*torch.sqrt(radius**2 - ((x2_ordered-center)/a)**2)
62
- x = torch.cat([x1_ordered, x2_ordered])
63
- y = torch.cat([y1, y2])
64
- return x, y
65
-
66
- def transform_standard_points(Affine_mat, x,y):
67
- XY = torch.ones([3,x.shape[0]])
68
- XY[0,:]= x
69
- XY[1,:]= y
70
- XYt = torch.matmul(Affine_mat.to('cpu').detach(),XY)
71
- xt0 = XYt[0]
72
- yt0 = XYt[1]
73
- return xt0, yt0
74
-
75
- def wrap_points(img, x_source, y_source, l=1):
76
- for i in range(len(y_source)):
77
- x0 = x_source[i].int()
78
- y0 = y_source[i].int()
79
- img[:,:,x0-l:x0+l,y0-l:y0+l] = 0
80
- return img
81
-
82
-
83
- def wrap_imge_cropped(Affine_mtrx, source_img, dim1=224, dim2=128):
84
- source_img224 = torch.nn.ZeroPad2d(int((dim1-dim2)/2))(source_img)
85
- grd = torch.nn.functional.affine_grid(Affine_mtrx, size=source_img224.shape,align_corners=False)
86
- wrapped_img = torch.nn.functional.grid_sample(source_img224, grid=grd,
87
- mode='bilinear', padding_mode='zeros', align_corners=False)
88
- wrapped_img = torchvision.transforms.CenterCrop((dim2, dim2))(wrapped_img)
89
- return wrapped_img
90
-
91
  def imgnt_reg(img1,img2):#, model_selected):
92
  #fixed_images = np.empty((1, 128, 128, 3))
93
  #moving_images = np.empty((1, 128, 128, 3))
 
36
  #IR_Model_tst.to(device)
37
  IR_Model_tst.eval()
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  def imgnt_reg(img1,img2):#, model_selected):
40
  #fixed_images = np.empty((1, 128, 128, 3))
41
  #moving_images = np.empty((1, 128, 128, 3))