Spaces:
Sleeping
Sleeping
File size: 6,684 Bytes
a22da3a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 | from mhr.common import *
from mhr.custom_transform import *
def getGaborFilters(ksize, n_output, sigma_ratio_func, theta_ratio_func, lamda_ratio_func, gamma=0.5, psi=0, show=False):
filters = []
sigma = np.pi/2.0 # gaussian window width
theta = np.pi/2.0 # direction of cosine (raid)
lamda = np.pi/2.0 # wavelength of cosine
for i in range(n_output):
#print(i, sigma_ratio_func(i), theta_ratio_func(i), lamda_ratio_func(i))
kernel = cv.getGaborKernel((ksize,ksize),
sigma*sigma_ratio_func(i),
theta*theta_ratio_func(i),
lamda*lamda_ratio_func(i), gamma, psi, ktype=cv.CV_32F)
filters.append(kernel)
return filters
class TorchModelSaver:
def __init__(self):
pass
def save(self, model, path):
torch.save(model, path)
def load(self, path):
return torch.load(path)
class SklearnModelSaver:
def __init__(self):
pass
def save(self, model, path):
pickle.dumps(model, path)
def load(self, path):
return pickle.loads(path)
# for whole
class GaborFeatureNet(nn.Module):
def __init__(self, num_classes, show_filters=False, show_images=False):
super(GaborFeatureNet, self).__init__()
# config of gabor filters
ksize = 20
n_output = 12
sigma_func = lambda x: (x//4)/2+1
theta_func = lambda x: (x%4)/2
lamda_func = lambda x: x//4+1
filters = getGaborFilters(ksize, n_output,
sigma_func, theta_func, lamda_func
)
self.conv1 = torch.nn.Conv2d(1, n_output, (ksize,ksize),stride=1, bias=False)
self.conv1.weight.data = torch.Tensor(filters).unsqueeze(1)
self.pool1 = nn.Sigmoid()
self.pool2 = nn.MaxPool2d(5)
self.pool3 = nn.MaxPool2d(2)
#set_parameter_requires_grad(self.features, True)#�̶�������ȡ�����
for p in self.parameters():
p.requires_grad = False
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(12*34*34 , 1024),
nn.ReLU(),
nn.Linear(1024, 1024),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(1024, num_classes)
)
def forward(self, img):
img = self.conv1(img)
img = self.pool1(img)
img = self.pool2(img) + self.pool2(-1*img)
img = self.pool3(img)
img = self.classifier(img)
return img
class WholeModelMgr:
def __init__(self, num_classes):
self.model = GaborFeatureNet(num_classes)
self.saver = TorchModelSaver()
def save(self, path):
self.saver.save(self.model, path)
def load(self, path):
self.model = self.saver.load(path)
# for hole
class GaborFeatureGen(nn.Module):
def __init__(self, num_classes, show_filters=False, show_images=False):
super(GaborFeatureGen, self).__init__()
# config of gabor filters
ksize = 20
n_output = 12
sigma_func = lambda x: ((x//4)/2+2)/4
theta_func = lambda x: (x%4)/2
lamda_func = lambda x: (x//4+1)/2
self.show_filters = show_filters
self.show_images = show_images
self.wins, self.dirs = self._get_wins_dirs(n_output, theta_func)
filters = getGaborFilters(ksize, n_output,
sigma_func, theta_func, lamda_func,
psi=np.pi/2)
self.conv1 = torch.nn.Conv2d(1, n_output, (ksize,ksize),stride=1,padding='same', bias=False)
self.conv1.weight.data = torch.Tensor(filters).unsqueeze(1)
self.pool1 = nn.Sigmoid()
self.pool2 = nn.MaxPool2d(1)
self.pool3 = nn.MaxPool2d(2)
if show_filters:
self._show_img(self.wins, self.dirs, filters)
def forward(self, img):
img = self.conv1(img)
img = self.pool1(img)
#img = self.pool2(img) + self.pool2(-1*img)
img = self.pool3(img)
if self.show_images:
self._show_img(self.wins, self.dirs, img[0])
return nn.Flatten()(img)
def _show_img(self, wins,dirs,imgs):
plt.figure(1)
for i in range(len(imgs)):
plt.subplot(wins, dirs, i+1)
if type(imgs[i]) is np.ndarray:
plt.imshow(imgs[i], cmap=plt.get_cmap('gray'))
else:
plt.imshow(T.functional.to_pil_image(imgs[i]), cmap=plt.get_cmap('gray'))
plt.show()
def _get_wins_dirs(self, n_output, theta_func):
dirs = len(set([ theta_func(i) for i in range(n_output) ]))
return n_output//dirs, dirs
class HoleModelMgr:
def __init__(self, n_clusters):
self.feat_model = GaborFeatureGen(0)
self.model = KMeans(n_clusters)
self.saver = SklearnModelSaver()
def save(self, path):
self.saver.save(self.model, path)
def load(self, path):
self.model = self.saver.load(path)
# for skill
class MyTrRecognizeNet(torch.nn.Module):
def __init__(self, image_padding):
super(MyTrRecognizeNet, self).__init__()
ip = image_padding if image_padding is not None else 0
nm_p = [5, 22+16*0]
nm_h = 18
nm_w = 16*10
lv_p = [29, 193]
lv_h = 18
lv_w = 44
#print([ x-ip for x in nm_p], nm_h+ip*2, nm_w+ip*2)
#print([ x-ip for x in lv_p], lv_h+ip*2, lv_w+ip*2)
self.tsfm_nm = T.Compose([
TensorCut([ x-ip for x in nm_p], nm_h+ip*2, nm_w+ip*2), # ����
])
self.tsfm_lv = T.Compose([
TensorCut([ x-ip for x in lv_p], lv_h+ip*2, lv_w+ip*2), # ����
])
def batch_forward(self, imgs):
return [ self.single_forward(img) for img in imgs ]
def single_forward(self, img):
img_nm = self.tsfm_nm(img.clone().detach())
img_lv = self.tsfm_lv(img.clone().detach())
nm = tr.recognize(T.functional.to_pil_image(img_nm))
lv = tr.recognize(T.functional.to_pil_image(img_lv))
return nm,lv
def forward(self, img):
if len(img.shape) == 4:
return self.batch_forward(img)
elif len(img.shape) == 3:
return self.single_forward(img)
def forward_bak(self, img):
if len(img.shape) == 4:
img = img.squeeze(0)
img_nm = self.tsfm_nm(img.clone().detach())
img_lv = self.tsfm_lv(img.clone().detach())
nm = tr.recognize(T.functional.to_pil_image(img_nm))
lv = tr.recognize(T.functional.to_pil_image(img_lv))
return nm,lv
|