code
stringlengths
17
6.64M
def print_descriptor_similarity(image_description_similarity, index, label, label_name, label_type='provided'): print(f'Total similarity to {label_name} ({label_type} label) descriptors:') print(f'Average: {(100.0 * aggregate_similarity(image_description_similarity[label][index].unsqueeze(0)).item())}') label_descriptors = gpt_descriptions[label_name] for (k, v) in sorted(zip(label_descriptors, image_description_similarity[label][index]), key=(lambda x: x[1]), reverse=True): k = unmodify_dict[label_name][k] print(f'{k} {(100.0 * v)}')
def print_max_descriptor_similarity(image_description_similarity, index, label, label_name): (max_similarity, argmax) = image_description_similarity[label][index].max(dim=0) label_descriptors = gpt_descriptions[label_name] print(f'I saw a {label_name} because I saw {unmodify_dict[label_name][label_descriptors[argmax.item()]]} with score: {max_similarity.item()}')
def show_misclassified_images(images, labels, predictions, n=None, image_description_similarity=None, image_labels_similarity=None, true_label_to_consider: int=None, predicted_label_to_consider: int=None): misclassified_indices = yield_misclassified_indices(images, labels=labels, predictions=predictions, true_label_to_consider=true_label_to_consider, predicted_label_to_consider=predicted_label_to_consider) if (misclassified_indices is None): return show_from_indices(misclassified_indices, images, labels, predictions, n=n, image_description_similarity=image_description_similarity, image_labels_similarity=image_labels_similarity)
def yield_misclassified_indices(images, labels, predictions, true_label_to_consider=None, predicted_label_to_consider=None): misclassified_indicators = (predictions.cpu() != labels.cpu()) if (true_label_to_consider is not None): misclassified_indicators = (misclassified_indicators & (labels.cpu() == true_label_to_consider)) if (predicted_label_to_consider is not None): misclassified_indicators = (misclassified_indicators & (predictions.cpu() == predicted_label_to_consider)) if (misclassified_indicators.sum() == 0): output_string = 'No misclassified images found' if (true_label_to_consider is not None): output_string += f' with true label {label_to_classname[true_label_to_consider]}' if (predicted_label_to_consider is not None): output_string += f' with predicted label {label_to_classname[predicted_label_to_consider]}' print((output_string + '.')) return misclassified_indices = torch.arange(images.shape[0])[misclassified_indicators] return misclassified_indices
def predict_and_show_explanations(images, model, labels=None, description_encodings=None, label_encodings=None, device=None): if (type(images) == Image): images = tfms(images) if (images.device != device): images = images.to(device) labels = labels.to(device) image_encodings = model.encode_image(images) image_encodings = F.normalize(image_encodings) image_labels_similarity = (image_encodings @ label_encodings.T) clip_predictions = image_labels_similarity.argmax(dim=1) n_classes = len(description_encodings) image_description_similarity = ([None] * n_classes) image_description_similarity_cumulative = ([None] * n_classes) for (i, (k, v)) in enumerate(description_encodings.items()): dot_product_matrix = (image_encodings @ v.T) image_description_similarity[i] = dot_product_matrix image_description_similarity_cumulative[i] = aggregate_similarity(image_description_similarity[i]) cumulative_tensor = torch.stack(image_description_similarity_cumulative, dim=1) descr_predictions = cumulative_tensor.argmax(dim=1) show_from_indices(torch.arange(images.shape[0]), images, labels, descr_predictions, clip_predictions, image_description_similarity=image_description_similarity, image_labels_similarity=image_labels_similarity)
def load_json(filename): if (not filename.endswith('.json')): filename += '.json' with open(filename, 'r') as fp: return json.load(fp)
def wordify(string): word = string.replace('_', ' ') return word
def make_descriptor_sentence(descriptor): if (descriptor.startswith('a') or descriptor.startswith('an')): return f'which is {descriptor}' elif (descriptor.startswith('has') or descriptor.startswith('often') or descriptor.startswith('typically') or descriptor.startswith('may') or descriptor.startswith('can')): return f'which {descriptor}' elif descriptor.startswith('used'): return f'which is {descriptor}' else: return f'which has {descriptor}'
def modify_descriptor(descriptor, apply_changes): if apply_changes: return make_descriptor_sentence(descriptor) return descriptor
def load_gpt_descriptions(hparams, classes_to_load=None): gpt_descriptions_unordered = load_json(hparams['descriptor_fname']) unmodify_dict = {} if (classes_to_load is not None): gpt_descriptions = {c: gpt_descriptions_unordered[c] for c in classes_to_load} else: gpt_descriptions = gpt_descriptions_unordered if (hparams['category_name_inclusion'] is not None): if (classes_to_load is not None): keys_to_remove = [k for k in gpt_descriptions.keys() if (k not in classes_to_load)] for k in keys_to_remove: print(f'Skipping descriptions for "{k}", not in classes to load') gpt_descriptions.pop(k) for (i, (k, v)) in enumerate(gpt_descriptions.items()): if (len(v) == 0): v = [''] word_to_add = wordify(k) if (hparams['category_name_inclusion'] == 'append'): build_descriptor_string = (lambda item: f"{modify_descriptor(item, hparams['apply_descriptor_modification'])}{hparams['between_text']}{word_to_add}") elif (hparams['category_name_inclusion'] == 'prepend'): build_descriptor_string = (lambda item: f"{hparams['before_text']}{word_to_add}{hparams['between_text']}{modify_descriptor(item, hparams['apply_descriptor_modification'])}{hparams['after_text']}") else: build_descriptor_string = (lambda item: modify_descriptor(item, hparams['apply_descriptor_modification'])) unmodify_dict[k] = {build_descriptor_string(item): item for item in v} gpt_descriptions[k] = [build_descriptor_string(item) for item in v] if (i == 0): print(f''' Example description for class {k}: "{gpt_descriptions[k][0]}" ''') return (gpt_descriptions, unmodify_dict)
def seed_everything(seed: int): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True
def denormalize(images, means=(0.485, 0.456, 0.406), stds=(0.229, 0.224, 0.225)): means = torch.tensor(means).reshape(1, 3, 1, 1) stds = torch.tensor(stds).reshape(1, 3, 1, 1) return ((images * stds) + means)
def show_single_image(image): (fig, ax) = plt.subplots(figsize=(12, 12)) ax.set_xticks([]) ax.set_yticks([]) denorm_image = denormalize(image.unsqueeze(0).cpu(), *stats) ax.imshow(denorm_image.squeeze().permute(1, 2, 0).clamp(0, 1)) plt.show()
class Agent(): def __init__(self, state_size, is_eval=False, model_name=''): self.state_size = state_size self.action_size = 5 self.memory = deque(maxlen=2000) self.inventory1 = [] self.inventory2 = [] self.model_name = model_name self.is_eval = is_eval self.gamma = 0.95 self.epsilon = 1.0 self.epsilon_min = 0.01 self.epsilon_decay = 0.995 self.model = (load_model(('models/' + model_name)) if is_eval else self._model()) def _model(self): model = Sequential() model.add(Dense(units=64, input_dim=self.state_size, activation='relu')) model.add(Dense(units=32, activation='relu')) model.add(Dense(units=8, activation='relu')) model.add(Dense(self.action_size, activation='linear')) model.compile(loss='mse', optimizer=Adam(lr=0.0001)) return model def act(self, state): if ((not self.is_eval) and (random.random() <= self.epsilon)): return random.randrange(self.action_size) options = self.model.predict(state) return np.argmax(options[0]) def expReplay(self, batch_size): mini_batch = [] l = len(self.memory) mini_batch = random.sample(self.memory, batch_size) for (state, action, reward, next_state, done) in mini_batch: target = reward if (not done): target = (reward + (self.gamma * np.amax(self.model.predict(next_state)[0]))) target_f = self.model.predict(state) target_f[0][action] = target self.model.fit(state, target_f, epochs=1, verbose=0) if (self.epsilon > self.epsilon_min): self.epsilon *= self.epsilon_decay
def formatPrice(n): return (('-$' if (n < 0) else '$') + '{0:.2f}'.format(abs(n)))
def getStockDataVec(key): vec = [] lines = open((('data/' + key) + '.txt'), 'r').read().splitlines() for line in lines[1:]: vec.append(float(line.split(',')[4])) return vec
def getStockVolVec(key): vol = [] lines = open((('data/' + key) + '.txt'), 'r').read().splitlines() for line in lines[1:]: vol.append(float(line.split(',')[5])) return vol
def sigmoid(x): return (1 / (1 + math.exp((- x))))
class State(): def __init__(self, data1, data2, Bal_stock1, Bal_stock2, open_cash, timestep): self.Stock1Price = data1[timestep] self.Stock2Price = data2[timestep] self.Stock1Blnc = Bal_stock1 self.Stock2Blnc = Bal_stock2 self.open_cash = open_cash self.fiveday_stock1 = self.five_day_window(data1, timestep) self.fiveday_stock2 = self.five_day_window(data2, timestep) self.portfolio_value = self.portfolio_value() def portfolio_value(self): pvalue = 0 v1 = (self.Stock1Price * float(self.Stock1Blnc)) v2 = (self.Stock2Price * float(self.Stock2Blnc)) v3 = float(self.open_cash) return ((v1 + v2) + v3) def next_opening_price(self): return [data1[(timestep + 1)], data2[(timestep + 1)]] def five_day_window(self, data, timestep): step = timestep if (step < 5): return data[0] stock_5days = np.mean(data[(step - 5):step]) return stock_5days def reset(self): self.Stock1Price = 151.25 self.Stock2Price = 21.845 self.Stock1Blnc = 34 self.Stock2Blnc = 221 self.open_cash = 10000 self.fiveday_stock1 = 151.25 self.fiveday_stock2 = 21.845 self.portfolio_value = 10000 def getState(self): res = [] res.append(self.Stock1Price) res.append(self.Stock2Price) res.append(self.Stock1Blnc) res.append(self.Stock2Blnc) res.append(self.open_cash) res.append(self.fiveday_stock1) res.append(self.fiveday_stock2) res.append(self.portfolio_value) res1 = np.array([res]) return res1
def get_available_gpus(): local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if (x.device_type == 'GPU')]
def get_available_gpus(): local_device_protos = device_lib.list_local_devices() return [x.name for x in local_device_protos if (x.device_type == 'GPU')]
def adjust_multilabel(y, is_pred=False): y_adjusted = [] for y_c in y: y_test_curr = ([0] * 19) if (is_pred == True): y_c = target_vaiables_id2topic_dict[np.argmax(y_c)] for tag in y_c.split(','): topic_index = topics_list.index(tag) y_test_curr[topic_index] = 1 y_adjusted.append(y_test_curr) return y_adjusted
def get_metrics(y_test, y_pred_tag, for_classifier=True): y_test = adjust_multilabel(y_test) y_test = np.array(y_test) y_pred_tag = adjust_multilabel(y_pred_tag, is_pred=True) y_pred_tag = np.array(y_pred_tag) spl = random.randint(1, 1000) print('y_test', y_test.shape) print('y_test\n', y_test[spl:(spl + 10)]) print('y_pred_tag', y_pred_tag.shape) print('y_pred_tag\n', y_pred_tag[spl:(spl + 10)]) for f1_type in ['macro', 'weighted']: curr_type_f1 = f1_score(y_test, y_pred_tag, average=f1_type) print('f1 type {} value is {}'.format(f1_type, round(curr_type_f1, 2))) if (for_classifier == True): average = 'macro' else: average = None f1 = f1_score(y_test, y_pred_tag, average=average) return f1
def get_count(topic): return topic2count_dict[topic]
def is_manual(text): if (text in manual_set): return 'crowd_source' else: return 'semi_automative'
def round_val(val): if (val < ROUND_THRESHOLD): return 0 else: return 1
def depersonalize(text): text = str(text) url_regex = '(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:\'\\".,<>?«»“”‘’]))' text = re.sub(url_regex, 'url', text) text = re.sub('id[\\d]*', '', text) text = re.sub('> ?', '', text) text = re.sub('@[\\w]*', '', text) text = re.sub('[\\d]+', 'NUMBER', text) text = re.sub('>>', '', text) text = re.sub('[\t|\n|\r]', '', text) return text
def adjust_multilabel(y, is_pred=False): y_adjusted = [] for y_c in y: y_test_curr = ([0] * 19) index = str(int(np.argmax(y_c))) y_c = target_vaiables_id2topic_dict[index] return y_c
def get_labels(dataframe): labels = [] for (i, el) in dataframe.iterrows(): current_sample_labels = [] any_class = False for clm in necessary_columns: if (el[clm] == 1): any_class = True current_sample_labels.append(clm) if (any_class == False): current_sample_labels.append('none') current_sample_labels = ','.join(current_sample_labels) labels.append(current_sample_labels) return labels
class UnsafeData(Dataset): def __init__(self, texts, targets, tokenizer, max_len): super().__init__() self.texts = texts self.targets = targets self.max_len = max_len self.tokenizer = tokenizer def __len__(self): return len(self.texts) def __getitem__(self, index): x = self.texts[index] enc_dict = self.tokenizer(x, truncation=True, max_length=self.max_len, padding='max_length') item = {key: torch.tensor(val).long() for (key, val) in enc_dict.items()} item['labels'] = torch.tensor(self.targets[index]).long() return item
def adjust_multilabel(y, is_pred=False): y_adjusted = [] for y_c in y: y_test_curr = ([0] * 19) if (is_pred == True): y_c = target_vaiables_id2topic_dict[np.argmax(y_c)] else: y_c = target_vaiables_id2topic_dict[y_c] for tag in y_c.split(','): topic_index = topics_list.index(tag) y_test_curr[topic_index] = 1 y_adjusted.append(y_test_curr) return y_adjusted
def compute_metrics(pred): labels = pred.label_ids labels = adjust_multilabel(labels, is_pred=False) preds = pred.predictions preds = adjust_multilabel(preds, is_pred=True) (precision, recall, f1, _) = precision_recall_fscore_support(labels, preds, average='weighted', zero_division=0) acc = accuracy_score(labels, preds) return {'accuracy': acc, 'f1': f1, 'precision': precision, 'recall': recall}
def cleanup(): gc.collect() torch.cuda.empty_cache()
def is_manual(text): if (text in manual_set): return 'crowd_source' else: return 'semi_automative'
def round_val(val): if (val < ROUND_THRESHOLD): return 0 else: return 1
class MyDataset(torch.utils.data.Dataset): def __init__(self, imList, labelList, transform=None): self.imList = imList self.labelList = labelList self.transform = transform def __len__(self): return len(self.imList) def __getitem__(self, idx): image_name = self.imList[idx] label_name = self.labelList[idx] image1 = nib.load(image_name).get_data() image2 = nib.load(image_name.replace('flair', 't1')).get_data() image3 = nib.load(image_name.replace('flair', 't1ce')).get_data() image4 = nib.load(image_name.replace('flair', 't2')).get_data() label = nib.load(label_name).get_data() if self.transform: [image1, image2, image3, image4, label] = self.transform(image1, image2, image3, image4, label) return (image1, image2, image3, image4, label)
class iouEval(): def __init__(self, nClasses): self.nClasses = nClasses self.reset() def reset(self): self.overall_acc = 0 self.per_class_acc = np.zeros(self.nClasses, dtype=np.float32) self.per_class_iu = np.zeros(self.nClasses, dtype=np.float32) self.mIOU = 0 self.batchCount = 1 def fast_hist(self, a, b): k = ((a >= 0) & (a < self.nClasses)) return np.bincount(((self.nClasses * a[k].astype(int)) + b[k]), minlength=(self.nClasses ** 2)).reshape(self.nClasses, self.nClasses) def compute_hist(self, predict, gth): hist = self.fast_hist(gth, predict) return hist def addBatch(self, predict, gth): predict = predict.cpu().numpy().flatten() gth = gth.cpu().numpy().flatten() epsilon = 1e-08 hist = self.compute_hist(predict, gth) overall_acc = (np.diag(hist).sum() / (hist.sum() + epsilon)) per_class_acc = (np.diag(hist) / (hist.sum(1) + epsilon)) per_class_iu = (np.diag(hist) / (((hist.sum(1) + hist.sum(0)) - np.diag(hist)) + epsilon)) mIou = np.nanmean(per_class_iu) self.overall_acc += overall_acc self.per_class_acc += per_class_acc self.per_class_iu += per_class_iu self.mIOU += mIou self.batchCount += 1 def getMetric(self): overall_acc = (self.overall_acc / self.batchCount) per_class_acc = (self.per_class_acc / self.batchCount) per_class_iu = (self.per_class_iu / self.batchCount) mIOU = (self.mIOU / self.batchCount) return (overall_acc, per_class_acc, per_class_iu, mIOU)
class CBR(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1): super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv3d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False) self.bn = nn.BatchNorm3d(nOut, momentum=0.95, eps=0.001) self.act = nn.ReLU(inplace=True) def forward(self, input): output = self.conv(input) output = self.bn(output) output = self.act(output) return output
class CB(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1): super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv3d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False) self.bn = nn.BatchNorm3d(nOut, momentum=0.95, eps=0.001) def forward(self, input): output = self.conv(input) output = self.bn(output) return output
class C(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1, groups=1): super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv3d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, groups=groups) def forward(self, input): output = self.conv(input) return output
class DownSamplerA(nn.Module): def __init__(self, nIn, nOut): super().__init__() self.conv = CBR(nIn, nOut, 3, 2) def forward(self, input): output = self.conv(input) return output
class DownSamplerB(nn.Module): def __init__(self, nIn, nOut): super().__init__() k = 4 n = int((nOut / k)) n1 = (nOut - ((k - 1) * n)) self.c1 = nn.Sequential(CBR(nIn, n, 1, 1), C(n, n, 3, 2)) self.d1 = CDilated(n, n1, 3, 1, 1) self.d2 = CDilated(n, n, 3, 1, 2) self.d4 = CDilated(n, n, 3, 1, 3) self.d8 = CDilated(n, n, 3, 1, 4) self.bn = BR(nOut) def forward(self, input): output1 = self.c1(input) d1 = self.d1(output1) d2 = self.d2(output1) d4 = self.d4(output1) d8 = self.d8(output1) add1 = d2 add2 = (add1 + d4) add3 = (add2 + d8) combine = torch.cat([d1, add1, add2, add3], 1) if (input.size() == combine.size()): combine = (input + combine) output = self.bn(combine) return output
class BR(nn.Module): def __init__(self, nOut): super().__init__() self.bn = nn.BatchNorm3d(nOut, momentum=0.95, eps=0.001) self.act = nn.ReLU(inplace=True) def forward(self, input): output = self.bn(input) output = self.act(output) return output
class CDilated(nn.Module): def __init__(self, nIn, nOut, kSize, stride=1, d=1, groups=1): super().__init__() padding = (int(((kSize - 1) / 2)) * d) self.conv = nn.Conv3d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False, dilation=d, groups=groups) def forward(self, input): return self.conv(input)
class InputProjectionA(nn.Module): '\n This class projects the input image to the same spatial dimensions as the feature map.\n For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then\n this class will generate an output of 56x56x3\n ' def __init__(self, samplingTimes): '\n :param samplingTimes: The rate at which you want to down-sample the image\n ' super().__init__() self.pool = nn.ModuleList() for i in range(0, samplingTimes): self.pool.append(nn.AvgPool3d(3, stride=2, padding=1)) def forward(self, input): '\n :param input: Input RGB Image\n :return: down-sampled image (pyramid-based approach)\n ' for pool in self.pool: input = pool(input) return input
class DilatedParllelResidualBlockB1(nn.Module): def __init__(self, nIn, nOut, stride=1): super().__init__() k = 4 n = int((nOut / k)) n1 = (nOut - ((k - 1) * n)) self.c1 = CBR(nIn, n, 1, 1) self.d1 = CDilated(n, n1, 3, stride, 1) self.d2 = CDilated(n, n, 3, stride, 1) self.d4 = CDilated(n, n, 3, stride, 2) self.d8 = CDilated(n, n, 3, stride, 2) self.bn = nn.BatchNorm3d(nOut) def forward(self, input): output1 = self.c1(input) d1 = self.d1(output1) d2 = self.d2(output1) d4 = self.d4(output1) d8 = self.d8(output1) add1 = d2 add2 = (add1 + d4) add3 = (add2 + d8) combine = self.bn(torch.cat([d1, add1, add2, add3], 1)) if (input.size() == combine.size()): combine = (input + combine) output = F.relu(combine, inplace=True) return output
class ASPBlock(nn.Module): def __init__(self, nIn, nOut, stride=1): super().__init__() self.d1 = CB(nIn, nOut, 3, 1) self.d2 = CB(nIn, nOut, 5, 1) self.d4 = CB(nIn, nOut, 7, 1) self.d8 = CB(nIn, nOut, 9, 1) self.act = nn.ReLU(inplace=True) def forward(self, input): d1 = self.d1(input) d2 = self.d2(input) d3 = self.d4(input) d4 = self.d8(input) combine = (((d1 + d2) + d3) + d4) if (input.size() == combine.size()): combine = (input + combine) output = self.act(combine) return output
class UpSampler(nn.Module): '\n Up-sample the feature maps by 2\n ' def __init__(self, nIn, nOut): super().__init__() self.up = CBR(nIn, nOut, 3, 1) def forward(self, inp): return F.upsample(self.up(inp), mode='trilinear', scale_factor=2)
class PSPDec(nn.Module): '\n Inspired or Adapted from Pyramid Scene Network paper\n ' def __init__(self, nIn, nOut, downSize): super().__init__() self.scale = downSize self.features = CBR(nIn, nOut, 3, 1) def forward(self, x): assert (x.dim() == 5) inp_size = x.size() (out_dim1, out_dim2, out_dim3) = (int((inp_size[2] * self.scale)), int((inp_size[3] * self.scale)), int((inp_size[4] * self.scale))) x_down = F.adaptive_avg_pool3d(x, output_size=(out_dim1, out_dim2, out_dim3)) return F.upsample(self.features(x_down), size=(inp_size[2], inp_size[3], inp_size[4]), mode='trilinear')
class ESPNet(nn.Module): def __init__(self, classes=4, channels=1): super().__init__() self.input1 = InputProjectionA(1) self.input2 = InputProjectionA(1) initial = 16 config = [32, 128, 256, 256] reps = [2, 2, 3] self.level0 = CBR(channels, initial, 7, 2) self.level1 = nn.ModuleList() for i in range(reps[0]): if (i == 0): self.level1.append(DilatedParllelResidualBlockB1(initial, config[0])) else: self.level1.append(DilatedParllelResidualBlockB1(config[0], config[0])) self.level2 = DilatedParllelResidualBlockB1(config[0], config[1], stride=2) self.level_2 = nn.ModuleList() for i in range(0, reps[1]): self.level_2.append(DilatedParllelResidualBlockB1(config[1], config[1])) self.level3_0 = DilatedParllelResidualBlockB1(config[1], config[2], stride=2) self.level_3 = nn.ModuleList() for i in range(0, reps[2]): self.level_3.append(DilatedParllelResidualBlockB1(config[2], config[2])) self.up_l3_l2 = UpSampler(config[2], config[1]) self.merge_l2 = DilatedParllelResidualBlockB1((2 * config[1]), config[1]) self.dec_l2 = nn.ModuleList() for i in range(0, reps[0]): self.dec_l2.append(DilatedParllelResidualBlockB1(config[1], config[1])) self.up_l2_l1 = UpSampler(config[1], config[0]) self.merge_l1 = DilatedParllelResidualBlockB1((2 * config[0]), config[0]) self.dec_l1 = nn.ModuleList() for i in range(0, reps[0]): self.dec_l1.append(DilatedParllelResidualBlockB1(config[0], config[0])) self.dec_l1.append(CBR(config[0], classes, 3, 1)) self.dec_l1.append(ASPBlock(classes, classes)) self.pspModules = nn.ModuleList() scales = [0.2, 0.4, 0.6, 0.8] for sc in scales: self.pspModules.append(PSPDec(classes, classes, sc)) self.classifier = self.classifier = nn.Sequential(CBR(((len(scales) + 1) * classes), classes, 3, 1), ASPBlock(classes, classes), nn.Upsample(scale_factor=2), CBR(classes, classes, 7, 1), C(classes, classes, 1, 1)) for m in self.modules(): if isinstance(m, nn.Conv3d): n = (((m.kernel_size[0] * m.kernel_size[1]) * m.kernel_size[2]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) if isinstance(m, nn.ConvTranspose3d): n = (((m.kernel_size[0] * m.kernel_size[1]) * m.kernel_size[2]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, input1, inp_res=(128, 128, 128), inpSt2=False): dim0 = input1.size(2) dim1 = input1.size(3) dim2 = input1.size(4) if (self.training or (inp_res is None)): inp_res = ((math.ceil((dim0 / 8)) * 8), (math.ceil((dim1 / 8)) * 8), (math.ceil((dim2 / 8)) * 8)) if inp_res: input1 = F.adaptive_avg_pool3d(input1, output_size=inp_res) out_l0 = self.level0(input1) for (i, layer) in enumerate(self.level1): if (i == 0): out_l1 = layer(out_l0) else: out_l1 = layer(out_l1) out_l2_down = self.level2(out_l1) for (i, layer) in enumerate(self.level_2): if (i == 0): out_l2 = layer(out_l2_down) else: out_l2 = layer(out_l2) del out_l2_down out_l3_down = self.level3_0(out_l2) for (i, layer) in enumerate(self.level_3): if (i == 0): out_l3 = layer(out_l3_down) else: out_l3 = layer(out_l3) del out_l3_down dec_l3_l2 = self.up_l3_l2(out_l3) merge_l2 = self.merge_l2(torch.cat([dec_l3_l2, out_l2], 1)) for (i, layer) in enumerate(self.dec_l2): if (i == 0): dec_l2 = layer(merge_l2) else: dec_l2 = layer(dec_l2) dec_l2_l1 = self.up_l2_l1(dec_l2) merge_l1 = self.merge_l1(torch.cat([dec_l2_l1, out_l1], 1)) for (i, layer) in enumerate(self.dec_l1): if (i == 0): dec_l1 = layer(merge_l1) else: dec_l1 = layer(dec_l1) psp_outs = dec_l1.clone() for layer in self.pspModules: out_psp = layer(dec_l1) psp_outs = torch.cat([psp_outs, out_psp], 1) decoded = self.classifier(psp_outs) return F.upsample(decoded, size=(dim0, dim1, dim2), mode='trilinear')
class ScaleToFixed(object): '\n All images after removing redundard black pixels are of different sizes.\n Fix their size, so that we can group them in batches\n ' def __init__(self, dimA, dimB, dimC): self.dimA = dimA self.dimB = dimB self.dimC = dimC def __call__(self, image, imageA, imageB, imageC, label): image = skTrans.resize(image, (self.dimA, self.dimB, self.dimC), order=1, preserve_range=True) imageA = skTrans.resize(imageA, (self.dimA, self.dimB, self.dimC), order=1, preserve_range=True) imageB = skTrans.resize(imageB, (self.dimA, self.dimB, self.dimC), order=1, preserve_range=True) imageC = skTrans.resize(imageC, (self.dimA, self.dimB, self.dimC), order=1, preserve_range=True) label = skTrans.resize(label, (self.dimA, self.dimB, self.dimC), order=0, preserve_range=True) return [image, imageA, imageB, imageC, label]
class RandomFlip(object): 'Randomly flips (horizontally as well as vertically) the given PIL.Image with a probability of 0.5\n ' def __call__(self, image, imageA, imageB, imageC, label): if (random.random() < 0.5): flip_type = np.random.randint(0, 3) image = np.flip(image, flip_type) imageA = np.flip(imageA, flip_type) imageB = np.flip(imageB, flip_type) imageC = np.flip(imageC, flip_type) label = np.flip(label, flip_type) return [image, imageA, imageB, imageC, label]
class MinMaxNormalize(object): 'Min-Max normalization\n ' def __call__(self, image, imageA, imageB, imageC, label): def norm(im): im = im.astype(np.float32) min_v = np.min(im) max_v = np.max(im) im = ((im - min_v) / (max_v - min_v)) return im image = norm(image) imageA = norm(imageA) imageB = norm(imageB) imageC = norm(imageC) return [image, imageA, imageB, imageC, label]
class ToTensor(object): def __init__(self, scale=1): self.scale = scale def __call__(self, image, imageA, imageB, imageC, label): image = image.astype(np.float32) imageA = imageA.astype(np.float32) imageB = imageB.astype(np.float32) imageC = imageC.astype(np.float32) image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2])) imageA = imageA.reshape((1, imageA.shape[0], imageA.shape[1], imageA.shape[2])) imageB = imageB.reshape((1, imageB.shape[0], imageB.shape[1], imageB.shape[2])) imageC = imageC.reshape((1, imageC.shape[0], imageC.shape[1], imageC.shape[2])) dims = label.shape label = skTrans.resize(label, (int((dims[0] / self.scale)), int((dims[1] / self.scale)), int((dims[2] / self.scale))), order=0, preserve_range=True) label[(label == 4)] = 3 image_tensor = torch.from_numpy(image) image_tensorA = torch.from_numpy(imageA) image_tensorB = torch.from_numpy(imageB) image_tensorC = torch.from_numpy(imageC) label_tensor = torch.LongTensor(np.array(label, dtype=np.int)) return [image_tensor, image_tensorA, image_tensorB, image_tensorC, label_tensor]
class Compose(object): 'Composes several transforms together.\n ' def __init__(self, transforms): self.transforms = transforms def __call__(self, *args): for t in self.transforms: args = t(*args) return args
def make_dot(var, params=None): ' Produces Graphviz representation of PyTorch autograd graph\n Blue nodes are the Variables that require grad, orange are Tensors\n saved for backward in torch.autograd.Function\n Args:\n var: output Variable\n params: dict of (name, Variable) to add names to node that\n require grad (TODO: make optional)\n ' if (params is not None): assert isinstance(params.values()[0], Variable) param_map = {id(v): k for (k, v) in params.items()} node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size='12,12')) seen = set() def size_to_str(size): return (('(' + ', '.join([('%d' % v) for v in size])) + ')') def add_nodes(var): if (var not in seen): if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u = var.variable name = (param_map[id(u)] if (params is not None) else '') node_name = ('%s\n %s' % (name, size_to_str(u.size()))) dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u in var.next_functions: if (u[0] is not None): dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) add_nodes(var.grad_fn) return dot
class LoadData(): def __init__(self, data_dir, data_dir_val, classes, cached_data_file, normVal=1.1): self.data_dir = data_dir self.data_dir_val = data_dir_val self.classes = classes self.classWeights = np.ones(self.classes, dtype=np.float32) self.normVal = normVal self.trainImList = list() self.valImList = list() self.trainAnnotList = list() self.valAnnotList = list() self.cached_data_file = cached_data_file def compute_class_weights(self, histogram): normHist = (histogram / np.sum(histogram)) for i in range(self.classes): self.classWeights[i] = (1 / np.log((self.normVal + normHist[i]))) def readFile(self, fileName, trainStg=False): if trainStg: global_hist = np.zeros(self.classes, dtype=np.float32) no_files = 0 data_dir = (self.data_dir if trainStg else self.data_dir_val) with open(((data_dir + '/') + fileName), 'r') as textFile: for line in textFile: line_arr = line.split(',') img_file = ((data_dir.strip() + '/') + line_arr[0].strip()).strip() label_file = ((data_dir.strip() + '/') + line_arr[1].strip()).strip() label_img = nib.load(label_file).get_data() label_img[(label_img == 4)] = 3 unique_values = np.unique(label_img) max_val = max(unique_values) min_val = min(unique_values) if trainStg: hist = np.histogram(label_img, self.classes) global_hist += hist[0] self.trainImList.append(img_file) self.trainAnnotList.append(label_file) else: self.valImList.append(img_file) self.valAnnotList.append(label_file) if ((max_val > (self.classes - 1)) or (min_val < 0)): print('Some problem with labels. Please check.') print(('Label Image ID: ' + label_file)) no_files += 1 if trainStg: self.compute_class_weights(global_hist) return 0 def processData(self): print('Processing training data') return_val = self.readFile('train.txt', True) print('Processing validation data') return_val1 = self.readFile('val.txt') print('Pickling data') if ((return_val == 0) and (return_val1 == 0)): data_dict = dict() data_dict['trainIm'] = self.trainImList data_dict['trainAnnot'] = self.trainAnnotList data_dict['valIm'] = self.valImList data_dict['valAnnot'] = self.valAnnotList data_dict['classWeights'] = self.classWeights pickle.dump(data_dict, open(self.cached_data_file, 'wb')) return data_dict return None
def val(args, val_loader, model, criterion): model.eval() iouEvalVal = iouEval(args.classes) epoch_loss = [] total_batches = len(val_loader) for (i, (inp, inputA, inputB, inputC, target)) in enumerate(val_loader): start_time = time.time() input = torch.cat([inp, inputA, inputB, inputC], 1) if (args.onGPU == True): input = input.cuda() target = target.cuda() input_var = torch.autograd.Variable(input, volatile=True) target_var = torch.autograd.Variable(target, volatile=True) output = model(input_var) loss = criterion(output, target_var) epoch_loss.append(loss.data[0]) time_taken = (time.time() - start_time) iouEvalVal.addBatch(output.max(1)[1].data, target_var.data) print(('[%d/%d] loss: %.3f time: %.2f' % (i, total_batches, loss.data[0], time_taken))) average_epoch_loss_val = np.mean(epoch_loss) (overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalVal.getMetric() return (average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU)
def train(args, train_loader, model, criterion, optimizer, epoch): model.train() iouEvalTrain = iouEval(args.classes) epoch_loss = [] total_batches = len(train_loader) for (i, (inp, inputA, inputB, inputC, target)) in enumerate(train_loader): start_time = time.time() input = torch.cat([inp, inputA, inputB, inputC], 1) if (args.onGPU == True): input = input.cuda() target = target.cuda() input_var = torch.autograd.Variable(input) target_var = torch.autograd.Variable(target) output = model(input_var) optimizer.zero_grad() loss = criterion(output, target_var) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss.append(loss.data[0]) time_taken = (time.time() - start_time) iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data) print(('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.data[0], time_taken))) average_epoch_loss_train = np.mean(epoch_loss) (overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalTrain.getMetric() return (average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU)
def save_checkpoint(state, filenameCheckpoint='checkpoint.pth.tar'): torch.save(state, filenameCheckpoint)
def trainValidateSegmentation(args): print(('Data file: ' + str(args.cached_data_file))) print(args) if (not os.path.isfile(args.cached_data_file)): dataLoader = ld.LoadData(args.data_dir, args.data_dir_val, args.classes, args.cached_data_file) data = dataLoader.processData() if (data is None): print('Error while pickling data. Please check.') exit((- 1)) else: data = pickle.load(open(args.cached_data_file, 'rb')) print('=> Loading the model') model = net.ESPNet(classes=args.classes, channels=args.channels) args.savedir = (args.savedir + os.sep) if args.onGPU: model = model.cuda() if (not os.path.exists(args.savedir)): os.mkdir(args.savedir) if args.onGPU: model = model.cuda() if args.visualizeNet: import VisualizeGraph as viz x = Variable(torch.randn(1, args.channels, args.inDepth, args.inWidth, args.inHeight)) if args.onGPU: x = x.cuda() y = model(x, (128, 128, 128)) g = viz.make_dot(y) g.render(((args.savedir + os.sep) + 'model'), view=False) total_paramters = 0 for parameter in model.parameters(): i = len(parameter.size()) p = 1 for j in range(i): p *= parameter.size(j) total_paramters += p print(('Parameters: ' + str(total_paramters))) weight = torch.from_numpy(data['classWeights']) print('Class Imbalance Weights') print(weight) criteria = torch.nn.CrossEntropyLoss(weight) if args.onGPU: criteria = criteria.cuda() trainDatasetA = myTransforms.Compose([myTransforms.MinMaxNormalize(), myTransforms.ScaleToFixed(dimA=144, dimB=144, dimC=144), myTransforms.RandomFlip(), myTransforms.ToTensor(args.scaleIn)]) trainDatasetB = myTransforms.Compose([myTransforms.MinMaxNormalize(), myTransforms.ScaleToFixed(dimA=96, dimB=96, dimC=96), myTransforms.RandomFlip(), myTransforms.ToTensor(args.scaleIn)]) trainDatasetC = myTransforms.Compose([myTransforms.MinMaxNormalize(), myTransforms.ScaleToFixed(dimA=args.inWidth, dimB=args.inHeight, dimC=args.inDepth), myTransforms.RandomFlip(), myTransforms.ToTensor(args.scaleIn)]) valDataset = myTransforms.Compose([myTransforms.MinMaxNormalize(), myTransforms.ScaleToFixed(dimA=args.inWidth, dimB=args.inHeight, dimC=args.inDepth), myTransforms.ToTensor(args.scaleIn)]) trainLoaderA = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDatasetA), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=False) trainLoaderB = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDatasetB), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=False) trainLoaderC = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDatasetC), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=False) valLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['valIm'], data['valAnnot'], transform=valDataset), batch_size=1, shuffle=False, num_workers=args.num_workers, pin_memory=False) optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), model.parameters()), args.lr, (0.9, 0.999), eps=1e-08, weight_decay=0.0002) if (args.onGPU == True): cudnn.benchmark = True start_epoch = 0 stored_loss = 100000000.0 if args.resume: if os.path.isfile(args.resumeLoc): print("=> loading checkpoint '{}'".format(args.resumeLoc)) checkpoint = torch.load(args.resumeLoc) start_epoch = checkpoint['epoch'] stored_loss = checkpoint['stored_loss'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) logFileLoc = (args.savedir + args.logFile) if os.path.isfile(logFileLoc): logger = open(logFileLoc, 'a') logger.write(('Parameters: %s' % str(total_paramters))) logger.write(('\n%s\t%s\t%s\t%s\t%s\t' % ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))) logger.flush() else: logger = open(logFileLoc, 'w') logger.write(('Arguments: %s' % str(args))) logger.write(('\n Parameters: %s' % str(total_paramters))) logger.write(('\n%s\t%s\t%s\t%s\t%s\t' % ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))) logger.flush() scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_loss, gamma=0.5) best_val_acc = 0 loader_idxs = [0, 1, 2] for epoch in range(start_epoch, args.max_epochs): scheduler.step(epoch) lr = 0 for param_group in optimizer.param_groups: lr = param_group['lr'] print('Running epoch {} with learning rate {:.5f}'.format(epoch, lr)) if (epoch > 0): np.random.shuffle(loader_idxs) for l_id in loader_idxs: if (l_id == 0): train(args, trainLoaderA, model, criteria, optimizer, epoch) elif (l_id == 1): train(args, trainLoaderB, model, criteria, optimizer, epoch) else: (lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr) = train(args, trainLoaderC, model, criteria, optimizer, epoch) (lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val) = val(args, valLoader, model, criteria) print('saving checkpoint') save_checkpoint({'epoch': (epoch + 1), 'arch': str(model), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'lossTr': lossTr, 'lossVal': lossVal, 'iouTr': mIOU_tr, 'iouVal': mIOU_val, 'stored_loss': stored_loss}, (args.savedir + '/checkpoint.pth.tar')) if (mIOU_val >= best_val_acc): best_val_acc = mIOU_val torch.save(model.state_dict(), (args.savedir + '/best_model.pth')) with open((((args.savedir + 'acc_') + str(epoch)) + '.txt'), 'w') as log: log.write(('\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f' % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val))) log.write('\n') log.write(('Per Class Training Acc: ' + str(per_class_acc_tr))) log.write('\n') log.write(('Per Class Validation Acc: ' + str(per_class_acc_val))) log.write('\n') log.write(('Per Class Training mIOU: ' + str(per_class_iu_tr))) log.write('\n') log.write(('Per Class Validation mIOU: ' + str(per_class_iu_val))) logger.write(('\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.6f' % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val, lr))) logger.flush() print((('Epoch : ' + str(epoch)) + ' Details')) print(('\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f' % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val))) logger.close()
class CBR(nn.Module): '\n This class defines the convolution layer with batch normalization and PReLU activation\n ' def __init__(self, nIn, nOut, kSize, stride=1): '\n\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: stride rate for down-sampling. Default is 1\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) self.bn = nn.BatchNorm2d(nOut, eps=0.001) self.act = nn.PReLU(nOut) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) output = self.bn(output) output = self.act(output) return output
class BR(nn.Module): '\n This class groups the batch normalization and PReLU activation\n ' def __init__(self, nOut): '\n :param nOut: output feature maps\n ' super().__init__() self.bn = nn.BatchNorm2d(nOut, eps=0.001) self.act = nn.PReLU(nOut) def forward(self, input): '\n :param input: input feature map\n :return: normalized and thresholded feature map\n ' output = self.bn(input) output = self.act(output) return output
class CB(nn.Module): '\n This class groups the convolution and batch normalization\n ' def __init__(self, nIn, nOut, kSize, stride=1): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optinal stide for down-sampling\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) self.bn = nn.BatchNorm2d(nOut, eps=0.001) def forward(self, input): '\n\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) output = self.bn(output) return output
class C(nn.Module): '\n This class is for a convolutional layer.\n ' def __init__(self, nIn, nOut, kSize, stride=1): '\n\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optional stride rate for down-sampling\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) return output
class CDilated(nn.Module): '\n This class defines the dilated convolution.\n ' def __init__(self, nIn, nOut, kSize, stride=1, d=1): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optional stride rate for down-sampling\n :param d: optional dilation rate\n ' super().__init__() padding = (int(((kSize - 1) / 2)) * d) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False, dilation=d) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) return output
class DownSamplerB(nn.Module): def __init__(self, nIn, nOut): super().__init__() n = int((nOut / 5)) n1 = (nOut - (4 * n)) self.c1 = C(nIn, n, 3, 2) self.d1 = CDilated(n, n1, 3, 1, 1) self.d2 = CDilated(n, n, 3, 1, 2) self.d4 = CDilated(n, n, 3, 1, 4) self.d8 = CDilated(n, n, 3, 1, 8) self.d16 = CDilated(n, n, 3, 1, 16) self.bn = nn.BatchNorm2d(nOut, eps=0.001) self.act = nn.PReLU(nOut) def forward(self, input): output1 = self.c1(input) d1 = self.d1(output1) d2 = self.d2(output1) d4 = self.d4(output1) d8 = self.d8(output1) d16 = self.d16(output1) add1 = d2 add2 = (add1 + d4) add3 = (add2 + d8) add4 = (add3 + d16) combine = torch.cat([d1, add1, add2, add3, add4], 1) output = self.bn(combine) output = self.act(output) return output
class DilatedParllelResidualBlockB(nn.Module): '\n This class defines the ESP block, which is based on the following principle\n Reduce ---> Split ---> Transform --> Merge\n ' def __init__(self, nIn, nOut, add=True): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param add: if true, add a residual connection through identity operation. You can use projection too as\n in ResNet paper, but we avoid to use it if the dimensions are not the same because we do not want to\n increase the module complexity\n ' super().__init__() n = int((nOut / 5)) n1 = (nOut - (4 * n)) self.c1 = C(nIn, n, 1, 1) self.d1 = CDilated(n, n1, 3, 1, 1) self.d2 = CDilated(n, n, 3, 1, 2) self.d4 = CDilated(n, n, 3, 1, 4) self.d8 = CDilated(n, n, 3, 1, 8) self.d16 = CDilated(n, n, 3, 1, 16) self.bn = BR(nOut) self.add = add def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output1 = self.c1(input) d1 = self.d1(output1) d2 = self.d2(output1) d4 = self.d4(output1) d8 = self.d8(output1) d16 = self.d16(output1) add1 = d2 add2 = (add1 + d4) add3 = (add2 + d8) add4 = (add3 + d16) combine = torch.cat([d1, add1, add2, add3, add4], 1) if self.add: combine = (input + combine) output = self.bn(combine) return output
class InputProjectionA(nn.Module): '\n This class projects the input image to the same spatial dimensions as the feature map.\n For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then\n this class will generate an output of 56x56x3\n ' def __init__(self, samplingTimes): '\n :param samplingTimes: The rate at which you want to down-sample the image\n ' super().__init__() self.pool = nn.ModuleList() for i in range(0, samplingTimes): self.pool.append(nn.AvgPool2d(3, stride=2, padding=1)) def forward(self, input): '\n :param input: Input RGB Image\n :return: down-sampled image (pyramid-based approach)\n ' for pool in self.pool: input = pool(input) return input
class ESPNet_Encoder(nn.Module): '\n This class defines the ESPNet-C network in the paper\n ' def __init__(self, classes=20, p=5, q=3): '\n :param classes: number of classes in the dataset. Default is 20 for the cityscapes\n :param p: depth multiplier\n :param q: depth multiplier\n ' super().__init__() self.level1 = CBR(3, 16, 3, 2) self.sample1 = InputProjectionA(1) self.sample2 = InputProjectionA(2) self.b1 = BR((16 + 3)) self.level2_0 = DownSamplerB((16 + 3), 64) self.level2 = nn.ModuleList() for i in range(0, p): self.level2.append(DilatedParllelResidualBlockB(64, 64)) self.b2 = BR((128 + 3)) self.level3_0 = DownSamplerB((128 + 3), 128) self.level3 = nn.ModuleList() for i in range(0, q): self.level3.append(DilatedParllelResidualBlockB(128, 128)) self.b3 = BR(256) self.classifier = C(256, classes, 1, 1) def forward(self, input): '\n :param input: Receives the input RGB image\n :return: the transformed feature map with spatial dimensions 1/8th of the input image\n ' output0 = self.level1(input) inp1 = self.sample1(input) inp2 = self.sample2(input) output0_cat = self.b1(torch.cat([output0, inp1], 1)) output1_0 = self.level2_0(output0_cat) for (i, layer) in enumerate(self.level2): if (i == 0): output1 = layer(output1_0) else: output1 = layer(output1) output1_cat = self.b2(torch.cat([output1, output1_0, inp2], 1)) output2_0 = self.level3_0(output1_cat) for (i, layer) in enumerate(self.level3): if (i == 0): output2 = layer(output2_0) else: output2 = layer(output2) output2_cat = self.b3(torch.cat([output2_0, output2], 1)) classifier = self.classifier(output2_cat) return classifier
class ESPNet(nn.Module): '\n This class defines the ESPNet network\n ' def __init__(self, classes=20, p=2, q=3, encoderFile=None): '\n :param classes: number of classes in the dataset. Default is 20 for the cityscapes\n :param p: depth multiplier\n :param q: depth multiplier\n :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNet-C and then attached the\n RUM-based light weight decoder. See paper for more details.\n ' super().__init__() self.encoder = ESPNet_Encoder(classes, p, q) if (encoderFile != None): self.encoder.load_state_dict(torch.load(encoderFile)) print('Encoder loaded!') self.modules = [] for (i, m) in enumerate(self.encoder.children()): self.modules.append(m) self.level3_C = C((128 + 3), classes, 1, 1) self.br = nn.BatchNorm2d(classes, eps=0.001) self.conv = CBR((16 + classes), classes, 3, 1) self.up_l3 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False)) self.combine_l2_l3 = nn.Sequential(BR((2 * classes)), DilatedParllelResidualBlockB((2 * classes), classes, add=False)) self.up_l2 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False), BR(classes)) self.classifier = nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False) def forward(self, input): '\n :param input: RGB image\n :return: transformed feature map\n ' output0 = self.modules[0](input) inp1 = self.modules[1](input) inp2 = self.modules[2](input) output0_cat = self.modules[3](torch.cat([output0, inp1], 1)) output1_0 = self.modules[4](output0_cat) for (i, layer) in enumerate(self.modules[5]): if (i == 0): output1 = layer(output1_0) else: output1 = layer(output1) output1_cat = self.modules[6](torch.cat([output1, output1_0, inp2], 1)) output2_0 = self.modules[7](output1_cat) for (i, layer) in enumerate(self.modules[8]): if (i == 0): output2 = layer(output2_0) else: output2 = layer(output2) output2_cat = self.modules[9](torch.cat([output2_0, output2], 1)) output2_c = self.up_l3(self.br(self.modules[10](output2_cat))) output1_C = self.level3_C(output1_cat) comb_l2_l3 = self.up_l2(self.combine_l2_l3(torch.cat([output1_C, output2_c], 1))) concat_features = self.conv(torch.cat([comb_l2_l3, output0], 1)) classifier = self.classifier(concat_features) return classifier
class CrossEntropyLoss2d(nn.Module): '\n This file defines a cross entropy loss for 2D images\n ' def __init__(self, weight=None): '\n :param weight: 1D weight vector to deal with the class-imbalance\n ' super().__init__() self.loss = nn.NLLLoss2d(weight) def forward(self, outputs, targets): return self.loss(F.log_softmax(outputs, 1), targets)
class MyDataset(torch.utils.data.Dataset): '\n Class to load the dataset\n ' def __init__(self, imList, labelList, transform=None): '\n :param imList: image list (Note that these lists have been processed and pickled using the loadData.py)\n :param labelList: label list (Note that these lists have been processed and pickled using the loadData.py)\n :param transform: Type of transformation. SEe Transforms.py for supported transformations\n ' self.imList = imList self.labelList = labelList self.transform = transform def __len__(self): return len(self.imList) def __getitem__(self, idx): '\n\n :param idx: Index of the image file\n :return: returns the image and corresponding label file.\n ' image_name = self.imList[idx] label_name = self.labelList[idx] image = cv2.imread(image_name) label = cv2.imread(label_name, 0) if self.transform: [image, label] = self.transform(image, label) return (image, label)
class iouEval(): def __init__(self, nClasses): self.nClasses = nClasses self.reset() def reset(self): self.overall_acc = 0 self.per_class_acc = np.zeros(self.nClasses, dtype=np.float32) self.per_class_iu = np.zeros(self.nClasses, dtype=np.float32) self.mIOU = 0 self.batchCount = 1 def fast_hist(self, a, b): k = ((a >= 0) & (a < self.nClasses)) return np.bincount(((self.nClasses * a[k].astype(int)) + b[k]), minlength=(self.nClasses ** 2)).reshape(self.nClasses, self.nClasses) def compute_hist(self, predict, gth): hist = self.fast_hist(gth, predict) return hist def addBatch(self, predict, gth): predict = predict.cpu().numpy().flatten() gth = gth.cpu().numpy().flatten() epsilon = 1e-08 hist = self.compute_hist(predict, gth) overall_acc = (np.diag(hist).sum() / (hist.sum() + epsilon)) per_class_acc = (np.diag(hist) / (hist.sum(1) + epsilon)) per_class_iu = (np.diag(hist) / (((hist.sum(1) + hist.sum(0)) - np.diag(hist)) + epsilon)) mIou = np.nanmean(per_class_iu) self.overall_acc += overall_acc self.per_class_acc += per_class_acc self.per_class_iu += per_class_iu self.mIOU += mIou self.batchCount += 1 def getMetric(self): overall_acc = (self.overall_acc / self.batchCount) per_class_acc = (self.per_class_acc / self.batchCount) per_class_iu = (self.per_class_iu / self.batchCount) mIOU = (self.mIOU / self.batchCount) return (overall_acc, per_class_acc, per_class_iu, mIOU)
class CBR(nn.Module): '\n This class defines the convolution layer with batch normalization and PReLU activation\n ' def __init__(self, nIn, nOut, kSize, stride=1): '\n\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: stride rate for down-sampling. Default is 1\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) self.bn = nn.BatchNorm2d(nOut, eps=0.001) self.act = nn.PReLU(nOut) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) output = self.bn(output) output = self.act(output) return output
class BR(nn.Module): '\n This class groups the batch normalization and PReLU activation\n ' def __init__(self, nOut): '\n :param nOut: output feature maps\n ' super().__init__() self.bn = nn.BatchNorm2d(nOut, eps=0.001) self.act = nn.PReLU(nOut) def forward(self, input): '\n :param input: input feature map\n :return: normalized and thresholded feature map\n ' output = self.bn(input) output = self.act(output) return output
class CB(nn.Module): '\n This class groups the convolution and batch normalization\n ' def __init__(self, nIn, nOut, kSize, stride=1): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optinal stide for down-sampling\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) self.bn = nn.BatchNorm2d(nOut, eps=0.001) def forward(self, input): '\n\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) output = self.bn(output) return output
class C(nn.Module): '\n This class is for a convolutional layer.\n ' def __init__(self, nIn, nOut, kSize, stride=1): '\n\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optional stride rate for down-sampling\n ' super().__init__() padding = int(((kSize - 1) / 2)) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) return output
class CDilated(nn.Module): '\n This class defines the dilated convolution.\n ' def __init__(self, nIn, nOut, kSize, stride=1, d=1): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param kSize: kernel size\n :param stride: optional stride rate for down-sampling\n :param d: optional dilation rate\n ' super().__init__() padding = (int(((kSize - 1) / 2)) * d) self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False, dilation=d) def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output = self.conv(input) return output
class DownSamplerB(nn.Module): def __init__(self, nIn, nOut): super().__init__() n = int((nOut / 5)) n1 = (nOut - (4 * n)) self.c1 = C(nIn, n, 3, 2) self.d1 = CDilated(n, n1, 3, 1, 1) self.d2 = CDilated(n, n, 3, 1, 2) self.d4 = CDilated(n, n, 3, 1, 4) self.d8 = CDilated(n, n, 3, 1, 8) self.d16 = CDilated(n, n, 3, 1, 16) self.bn = nn.BatchNorm2d(nOut, eps=0.001) self.act = nn.PReLU(nOut) def forward(self, input): output1 = self.c1(input) d1 = self.d1(output1) d2 = self.d2(output1) d4 = self.d4(output1) d8 = self.d8(output1) d16 = self.d16(output1) add1 = d2 add2 = (add1 + d4) add3 = (add2 + d8) add4 = (add3 + d16) combine = torch.cat([d1, add1, add2, add3, add4], 1) output = self.bn(combine) output = self.act(output) return output
class DilatedParllelResidualBlockB(nn.Module): '\n This class defines the ESP block, which is based on the following principle\n Reduce ---> Split ---> Transform --> Merge\n ' def __init__(self, nIn, nOut, add=True): '\n :param nIn: number of input channels\n :param nOut: number of output channels\n :param add: if true, add a residual connection through identity operation. You can use projection too as\n in ResNet paper, but we avoid to use it if the dimensions are not the same because we do not want to\n increase the module complexity\n ' super().__init__() n = int((nOut / 5)) n1 = (nOut - (4 * n)) self.c1 = C(nIn, n, 1, 1) self.d1 = CDilated(n, n1, 3, 1, 1) self.d2 = CDilated(n, n, 3, 1, 2) self.d4 = CDilated(n, n, 3, 1, 4) self.d8 = CDilated(n, n, 3, 1, 8) self.d16 = CDilated(n, n, 3, 1, 16) self.bn = BR(nOut) self.add = add def forward(self, input): '\n :param input: input feature map\n :return: transformed feature map\n ' output1 = self.c1(input) d1 = self.d1(output1) d2 = self.d2(output1) d4 = self.d4(output1) d8 = self.d8(output1) d16 = self.d16(output1) add1 = d2 add2 = (add1 + d4) add3 = (add2 + d8) add4 = (add3 + d16) combine = torch.cat([d1, add1, add2, add3, add4], 1) if self.add: combine = (input + combine) output = self.bn(combine) return output
class InputProjectionA(nn.Module): '\n This class projects the input image to the same spatial dimensions as the feature map.\n For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then\n this class will generate an output of 56x56x3\n ' def __init__(self, samplingTimes): '\n :param samplingTimes: The rate at which you want to down-sample the image\n ' super().__init__() self.pool = nn.ModuleList() for i in range(0, samplingTimes): self.pool.append(nn.AvgPool2d(3, stride=2, padding=1)) def forward(self, input): '\n :param input: Input RGB Image\n :return: down-sampled image (pyramid-based approach)\n ' for pool in self.pool: input = pool(input) return input
class ESPNet_Encoder(nn.Module): '\n This class defines the ESPNet-C network in the paper\n ' def __init__(self, classes=20, p=5, q=3): '\n :param classes: number of classes in the dataset. Default is 20 for the cityscapes\n :param p: depth multiplier\n :param q: depth multiplier\n ' super().__init__() self.level1 = CBR(3, 16, 3, 2) self.sample1 = InputProjectionA(1) self.sample2 = InputProjectionA(2) self.b1 = BR((16 + 3)) self.level2_0 = DownSamplerB((16 + 3), 64) self.level2 = nn.ModuleList() for i in range(0, p): self.level2.append(DilatedParllelResidualBlockB(64, 64)) self.b2 = BR((128 + 3)) self.level3_0 = DownSamplerB((128 + 3), 128) self.level3 = nn.ModuleList() for i in range(0, q): self.level3.append(DilatedParllelResidualBlockB(128, 128)) self.b3 = BR(256) self.classifier = C(256, classes, 1, 1) def forward(self, input): '\n :param input: Receives the input RGB image\n :return: the transformed feature map with spatial dimensions 1/8th of the input image\n ' output0 = self.level1(input) inp1 = self.sample1(input) inp2 = self.sample2(input) output0_cat = self.b1(torch.cat([output0, inp1], 1)) output1_0 = self.level2_0(output0_cat) for (i, layer) in enumerate(self.level2): if (i == 0): output1 = layer(output1_0) else: output1 = layer(output1) output1_cat = self.b2(torch.cat([output1, output1_0, inp2], 1)) output2_0 = self.level3_0(output1_cat) for (i, layer) in enumerate(self.level3): if (i == 0): output2 = layer(output2_0) else: output2 = layer(output2) output2_cat = self.b3(torch.cat([output2_0, output2], 1)) classifier = self.classifier(output2_cat) return classifier
class ESPNet(nn.Module): '\n This class defines the ESPNet network\n ' def __init__(self, classes=20, p=2, q=3, encoderFile=None): '\n :param classes: number of classes in the dataset. Default is 20 for the cityscapes\n :param p: depth multiplier\n :param q: depth multiplier\n :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNet-C and then attached the\n RUM-based light weight decoder. See paper for more details.\n ' super().__init__() self.encoder = ESPNet_Encoder(classes, p, q) if (encoderFile != None): self.encoder.load_state_dict(torch.load(encoderFile)) print('Encoder loaded!') self.modules = [] for (i, m) in enumerate(self.encoder.children()): self.modules.append(m) self.level3_C = C((128 + 3), classes, 1, 1) self.br = nn.BatchNorm2d(classes, eps=0.001) self.conv = CBR((19 + classes), classes, 3, 1) self.up_l3 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False)) self.combine_l2_l3 = nn.Sequential(BR((2 * classes)), DilatedParllelResidualBlockB((2 * classes), classes, add=False)) self.up_l2 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False), BR(classes)) self.classifier = nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False) def forward(self, input): '\n :param input: RGB image\n :return: transformed feature map\n ' output0 = self.modules[0](input) inp1 = self.modules[1](input) inp2 = self.modules[2](input) output0_cat = self.modules[3](torch.cat([output0, inp1], 1)) output1_0 = self.modules[4](output0_cat) for (i, layer) in enumerate(self.modules[5]): if (i == 0): output1 = layer(output1_0) else: output1 = layer(output1) output1_cat = self.modules[6](torch.cat([output1, output1_0, inp2], 1)) output2_0 = self.modules[7](output1_cat) for (i, layer) in enumerate(self.modules[8]): if (i == 0): output2 = layer(output2_0) else: output2 = layer(output2) output2_cat = self.modules[9](torch.cat([output2_0, output2], 1)) output2_c = self.up_l3(self.br(self.modules[10](output2_cat))) output1_C = self.level3_C(output1_cat) comb_l2_l3 = self.up_l2(self.combine_l2_l3(torch.cat([output1_C, output2_c], 1))) concat_features = self.conv(torch.cat([comb_l2_l3, output0_cat], 1)) classifier = self.classifier(concat_features) return classifier
def make_dot(var, params=None): ' Produces Graphviz representation of PyTorch autograd graph\n Blue nodes are the Variables that require grad, orange are Tensors\n saved for backward in torch.autograd.Function\n Args:\n var: output Variable\n params: dict of (name, Variable) to add names to node that\n require grad (TODO: make optional)\n ' if (params is not None): assert isinstance(params.values()[0], Variable) param_map = {id(v): k for (k, v) in params.items()} node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2') dot = Digraph(node_attr=node_attr, graph_attr=dict(size='12,12')) seen = set() def size_to_str(size): return (('(' + ', '.join([('%d' % v) for v in size])) + ')') def add_nodes(var): if (var not in seen): if torch.is_tensor(var): dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange') elif hasattr(var, 'variable'): u = var.variable name = (param_map[id(u)] if (params is not None) else '') node_name = ('%s\n %s' % (name, size_to_str(u.size()))) dot.node(str(id(var)), node_name, fillcolor='lightblue') else: dot.node(str(id(var)), str(type(var).__name__)) seen.add(var) if hasattr(var, 'next_functions'): for u in var.next_functions: if (u[0] is not None): dot.edge(str(id(u[0])), str(id(var))) add_nodes(u[0]) if hasattr(var, 'saved_tensors'): for t in var.saved_tensors: dot.edge(str(id(t)), str(id(var))) add_nodes(t) add_nodes(var.grad_fn) return dot
def val(args, val_loader, model, criterion): '\n :param args: general arguments\n :param val_loader: loaded for validation dataset\n :param model: model\n :param criterion: loss function\n :return: average epoch loss, overall pixel-wise accuracy, per class accuracy, per class iu, and mIOU\n ' model.eval() iouEvalVal = iouEval(args.classes) epoch_loss = [] total_batches = len(val_loader) for (i, (input, target)) in enumerate(val_loader): start_time = time.time() if (args.onGPU == True): input = input.cuda() target = target.cuda() input_var = torch.autograd.Variable(input, volatile=True) target_var = torch.autograd.Variable(target, volatile=True) output = model(input_var) loss = criterion(output, target_var) epoch_loss.append(loss.data[0]) time_taken = (time.time() - start_time) iouEvalVal.addBatch(output.max(1)[1].data, target_var.data) print(('[%d/%d] loss: %.3f time: %.2f' % (i, total_batches, loss.data[0], time_taken))) average_epoch_loss_val = (sum(epoch_loss) / len(epoch_loss)) (overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalVal.getMetric() return (average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU)
def train(args, train_loader, model, criterion, optimizer, epoch): '\n :param args: general arguments\n :param train_loader: loaded for training dataset\n :param model: model\n :param criterion: loss function\n :param optimizer: optimization algo, such as ADAM or SGD\n :param epoch: epoch number\n :return: average epoch loss, overall pixel-wise accuracy, per class accuracy, per class iu, and mIOU\n ' model.train() iouEvalTrain = iouEval(args.classes) epoch_loss = [] total_batches = len(train_loader) for (i, (input, target)) in enumerate(train_loader): start_time = time.time() if (args.onGPU == True): input = input.cuda() target = target.cuda() input_var = torch.autograd.Variable(input) target_var = torch.autograd.Variable(target) output = model(input_var) optimizer.zero_grad() loss = criterion(output, target_var) optimizer.zero_grad() loss.backward() optimizer.step() epoch_loss.append(loss.data[0]) time_taken = (time.time() - start_time) iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data) print(('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.data[0], time_taken))) average_epoch_loss_train = (sum(epoch_loss) / len(epoch_loss)) (overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalTrain.getMetric() return (average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU)
def save_checkpoint(state, filenameCheckpoint='checkpoint.pth.tar'): '\n helper function to save the checkpoint\n :param state: model state\n :param filenameCheckpoint: where to save the checkpoint\n :return: nothing\n ' torch.save(state, filenameCheckpoint)
def netParams(model): '\n helper function to see total network parameters\n :param model: model\n :return: total network parameters\n ' total_paramters = 0 for parameter in model.parameters(): i = len(parameter.size()) p = 1 for j in range(i): p *= parameter.size(j) total_paramters += p return total_paramters
def trainValidateSegmentation(args): '\n Main function for trainign and validation\n :param args: global arguments\n :return: None\n ' if (not os.path.isfile(args.cached_data_file)): dataLoad = ld.LoadData(args.data_dir, args.classes, args.cached_data_file) data = dataLoad.processData() if (data is None): print('Error while pickling data. Please check.') exit((- 1)) else: data = pickle.load(open(args.cached_data_file, 'rb')) q = args.q p = args.p if (not args.decoder): model = net.ESPNet_Encoder(args.classes, p=p, q=q) args.savedir = (((((args.savedir + '_enc_') + str(p)) + '_') + str(q)) + '/') else: model = net.ESPNet(args.classes, p=p, q=q, encoderFile=args.pretrained) args.savedir = (((((args.savedir + '_dec_') + str(p)) + '_') + str(q)) + '/') if args.onGPU: model = model.cuda() if (not os.path.exists(args.savedir)): os.mkdir(args.savedir) if args.visualizeNet: x = Variable(torch.randn(1, 3, args.inWidth, args.inHeight)) if args.onGPU: x = x.cuda() y = model.forward(x) g = viz.make_dot(y) g.render((args.savedir + 'model.png'), view=False) total_paramters = netParams(model) print(('Total network parameters: ' + str(total_paramters))) weight = torch.from_numpy(data['classWeights']) if args.onGPU: weight = weight.cuda() criteria = CrossEntropyLoss2d(weight) if args.onGPU: criteria = criteria.cuda() print('Data statistics') print(data['mean'], data['std']) print(data['classWeights']) trainDataset_main = myTransforms.Compose([myTransforms.Normalize(mean=data['mean'], std=data['std']), myTransforms.Scale(1024, 512), myTransforms.RandomCropResize(32), myTransforms.RandomFlip(), myTransforms.ToTensor(args.scaleIn)]) trainDataset_scale1 = myTransforms.Compose([myTransforms.Normalize(mean=data['mean'], std=data['std']), myTransforms.Scale(1536, 768), myTransforms.RandomCropResize(100), myTransforms.RandomFlip(), myTransforms.ToTensor(args.scaleIn)]) trainDataset_scale2 = myTransforms.Compose([myTransforms.Normalize(mean=data['mean'], std=data['std']), myTransforms.Scale(1280, 720), myTransforms.RandomCropResize(100), myTransforms.RandomFlip(), myTransforms.ToTensor(args.scaleIn)]) trainDataset_scale3 = myTransforms.Compose([myTransforms.Normalize(mean=data['mean'], std=data['std']), myTransforms.Scale(768, 384), myTransforms.RandomCropResize(32), myTransforms.RandomFlip(), myTransforms.ToTensor(args.scaleIn)]) trainDataset_scale4 = myTransforms.Compose([myTransforms.Normalize(mean=data['mean'], std=data['std']), myTransforms.Scale(512, 256), myTransforms.RandomFlip(), myTransforms.ToTensor(args.scaleIn)]) valDataset = myTransforms.Compose([myTransforms.Normalize(mean=data['mean'], std=data['std']), myTransforms.Scale(1024, 512), myTransforms.ToTensor(args.scaleIn)]) trainLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_main), batch_size=(args.batch_size + 2), shuffle=True, num_workers=args.num_workers, pin_memory=True) trainLoader_scale1 = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale1), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) trainLoader_scale2 = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale2), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) trainLoader_scale3 = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale3), batch_size=(args.batch_size + 4), shuffle=True, num_workers=args.num_workers, pin_memory=True) trainLoader_scale4 = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDataset_scale4), batch_size=(args.batch_size + 4), shuffle=True, num_workers=args.num_workers, pin_memory=True) valLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['valIm'], data['valAnnot'], transform=valDataset), batch_size=(args.batch_size + 4), shuffle=False, num_workers=args.num_workers, pin_memory=True) if args.onGPU: cudnn.benchmark = True start_epoch = 0 if args.resume: if os.path.isfile(args.resumeLoc): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resumeLoc) start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) logFileLoc = (args.savedir + args.logFile) if os.path.isfile(logFileLoc): logger = open(logFileLoc, 'a') else: logger = open(logFileLoc, 'w') logger.write(('Parameters: %s' % str(total_paramters))) logger.write(('\n%s\t%s\t%s\t%s\t%s\t' % ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val'))) logger.flush() optimizer = torch.optim.Adam(model.parameters(), args.lr, (0.9, 0.999), eps=1e-08, weight_decay=0.0005) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_loss, gamma=0.5) for epoch in range(start_epoch, args.max_epochs): scheduler.step(epoch) lr = 0 for param_group in optimizer.param_groups: lr = param_group['lr'] print(('Learning rate: ' + str(lr))) train(args, trainLoader_scale1, model, criteria, optimizer, epoch) train(args, trainLoader_scale2, model, criteria, optimizer, epoch) train(args, trainLoader_scale4, model, criteria, optimizer, epoch) train(args, trainLoader_scale3, model, criteria, optimizer, epoch) (lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr) = train(args, trainLoader, model, criteria, optimizer, epoch) (lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val) = val(args, valLoader, model, criteria) save_checkpoint({'epoch': (epoch + 1), 'arch': str(model), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'lossTr': lossTr, 'lossVal': lossVal, 'iouTr': mIOU_tr, 'iouVal': mIOU_val, 'lr': lr}, (args.savedir + 'checkpoint.pth.tar')) model_file_name = (((args.savedir + '/model_') + str((epoch + 1))) + '.pth') torch.save(model.state_dict(), model_file_name) with open((((args.savedir + 'acc_') + str(epoch)) + '.txt'), 'w') as log: log.write(('\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f' % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val))) log.write('\n') log.write(('Per Class Training Acc: ' + str(per_class_acc_tr))) log.write('\n') log.write(('Per Class Validation Acc: ' + str(per_class_acc_val))) log.write('\n') log.write(('Per Class Training mIOU: ' + str(per_class_iu_tr))) log.write('\n') log.write(('Per Class Validation mIOU: ' + str(per_class_iu_val))) logger.write(('\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.7f' % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val, lr))) logger.flush() print((('Epoch : ' + str(epoch)) + ' Details')) print(('\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f' % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val))) logger.close()
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model', help='full path to the .h5 model (download from https://goo.gl/ciEYZi)', required=True) parser.add_argument('--image', help='full path to the image', required=True) parser.add_argument('--output', help='full path to the output label image', default=None) args = parser.parse_args() saved_model_path = args.model input_file = args.image output_file = (args.output or (input_file + '_labels.png')) model = get_crfrnn_model_def() model.load_weights(saved_model_path) (img_data, img_h, img_w, original_size) = util.get_preprocessed_image(input_file) probs = model.predict(img_data, verbose=False)[0] segmentation = util.get_label_image(probs, img_h, img_w, original_size) segmentation.save(output_file)
def main(): input_file = 'image.jpg' output_file = 'labels.png' saved_model_path = 'crfrnn_keras_model.h5' model = get_crfrnn_model_def() model.load_weights(saved_model_path) (img_data, img_h, img_w, size) = util.get_preprocessed_image(input_file) probs = model.predict(img_data, verbose=False)[0] segmentation = util.get_label_image(probs, img_h, img_w, size) segmentation.save(output_file)
def get_crfrnn_model_def(): ' Returns Keras CRN-RNN model definition.\n\n Currently, only 500 x 500 images are supported. However, one can get this to\n work with different image sizes by adjusting the parameters of the Cropping2D layers\n below.\n ' (channels, height, width) = (3, 500, 500) input_shape = (height, width, 3) img_input = Input(shape=input_shape) x = ZeroPadding2D(padding=(100, 100))(img_input) x = Conv2D(64, (3, 3), activation='relu', padding='valid', name='conv1_1')(x) x = Conv2D(64, (3, 3), activation='relu', padding='same', name='conv1_2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_1')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='conv2_2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool2', padding='same')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_1')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_2')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='conv3_3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool3', padding='same')(x) pool3 = x x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv4_3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool4', padding='same')(x) pool4 = x x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5_3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='pool5', padding='same')(x) x = Conv2D(4096, (7, 7), activation='relu', padding='valid', name='fc6')(x) x = Dropout(0.5)(x) x = Conv2D(4096, (1, 1), activation='relu', padding='valid', name='fc7')(x) x = Dropout(0.5)(x) x = Conv2D(21, (1, 1), padding='valid', name='score-fr')(x) score2 = Conv2DTranspose(21, (4, 4), strides=2, name='score2')(x) score_pool4 = Conv2D(21, (1, 1), name='score-pool4')(pool4) score_pool4c = Cropping2D((5, 5))(score_pool4) score_fused = Add()([score2, score_pool4c]) score4 = Conv2DTranspose(21, (4, 4), strides=2, name='score4', use_bias=False)(score_fused) score_pool3 = Conv2D(21, (1, 1), name='score-pool3')(pool3) score_pool3c = Cropping2D((9, 9))(score_pool3) score_final = Add()([score4, score_pool3c]) upsample = Conv2DTranspose(21, (16, 16), strides=8, name='upsample', use_bias=False)(score_final) upscore = Cropping2D(((31, 37), (31, 37)))(upsample) output = CrfRnnLayer(image_dims=(height, width), num_classes=21, theta_alpha=160.0, theta_beta=3.0, theta_gamma=3.0, num_iterations=10, name='crfrnn')([upscore, img_input]) model = Model(img_input, output, name='crfrnn_net') return model
@ops.RegisterGradient('HighDimFilter') def _high_dim_filter_grad(op, grad): ' Gradients for the HighDimFilter op. We only need to calculate the gradients\n w.r.t. the first input (unaries) as we never need to backprop errors to the\n second input (RGB values of the image).\n\n Args:\n op: The `high_dim_filter` operation that we are differentiating.\n grad: Gradients with respect to the output of the `high_dim_filter` op.\n\n Returns:\n Gradients with respect to the input of `high_dim_filter`.\n ' rgb = op.inputs[1] grad_vals = custom_module.high_dim_filter(grad, rgb, bilateral=op.get_attr('bilateral'), theta_alpha=op.get_attr('theta_alpha'), theta_beta=op.get_attr('theta_beta'), theta_gamma=op.get_attr('theta_gamma'), backwards=True) return [grad_vals, tf.zeros_like(rgb)]
class HighDimGradTest(tf.test.TestCase): def test_high_dim_filter_grad(self): x_shape = [5, 10, 10] unary_np = np.random.randn(*x_shape).astype(np.float32) rgb_np = np.random.randint(low=0, high=256, size=x_shape).astype(np.float32) with self.test_session(): unary_tf = constant_op.constant(unary_np) rgb_tf = constant_op.constant(rgb_np) y_tf = custom_module.high_dim_filter(unary_tf, rgb_tf, bilateral=True, theta_alpha=1000.0, theta_beta=1000.0, theta_gamma=1000.0) out = gradient_checker.compute_gradient([unary_tf, rgb_tf], [x_shape, x_shape], y_tf, x_shape) computed = out[0][0].flatten() estimated = out[0][1].flatten() mask = (computed != 0) computed = computed[mask] estimated = estimated[mask] difference = (computed - estimated) measure1 = (np.mean(difference) / np.mean(computed)) measure2 = (np.max(difference) / np.max(computed)) print('Gradient check: measure1 = {:.6f}, measure2 = {:.6f}'.format(measure1, measure2)) self.assertLess(measure1, 0.001, 'Errors found in the gradient computation.') self.assertLess(measure2, 0.02, 'Errors found in the gradient computation.') print('Gradient check: success!')
def safe_readline(f): pos = f.tell() while True: try: return f.readline() except UnicodeDecodeError: pos -= 1 f.seek(pos)
class Binarizer(): @staticmethod def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=(- 1), already_numberized=False): (nseq, ntok) = (0, 0) replaced = Counter() def replaced_consumer(word, idx): if ((idx == dict.unk_index) and (word != dict.unk_word)): replaced.update([word]) with open(filename, 'r', encoding='utf-8') as f: f.seek(offset) line = safe_readline(f) while line: if ((end > 0) and (f.tell() > end)): break if already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if reverse_order: id_list.reverse() if append_eos: id_list.append(dict.eos()) ids = torch.IntTensor(id_list) else: ids = dict.encode_line(line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order) nseq += 1 ntok += len(ids) consumer(ids) line = f.readline() return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced} @staticmethod def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=(- 1)): nseq = 0 with open(filename, 'r') as f: f.seek(offset) line = safe_readline(f) while line: if ((end > 0) and (f.tell() > end)): break ids = alignment_parser(line) nseq += 1 consumer(ids) line = f.readline() return {'nseq': nseq} @staticmethod def find_offsets(filename, num_chunks): with open(filename, 'r', encoding='utf-8') as f: size = os.fstat(f.fileno()).st_size chunk_size = (size // num_chunks) offsets = [0 for _ in range((num_chunks + 1))] for i in range(1, num_chunks): f.seek((chunk_size * i)) safe_readline(f) offsets[i] = f.tell() return offsets
class BleuStat(ctypes.Structure): _fields_ = [('reflen', ctypes.c_size_t), ('predlen', ctypes.c_size_t), ('match1', ctypes.c_size_t), ('count1', ctypes.c_size_t), ('match2', ctypes.c_size_t), ('count2', ctypes.c_size_t), ('match3', ctypes.c_size_t), ('count3', ctypes.c_size_t), ('match4', ctypes.c_size_t), ('count4', ctypes.c_size_t)]
class SacrebleuScorer(object): def __init__(self): import sacrebleu self.sacrebleu = sacrebleu self.reset() def reset(self, one_init=False): if one_init: raise NotImplementedError self.ref = [] self.sys = [] def add_string(self, ref, pred): self.ref.append(ref) self.sys.append(pred) def score(self, order=4): return self.result_string(order).score def result_string(self, order=4): if (order != 4): raise NotImplementedError return self.sacrebleu.corpus_bleu(self.sys, [self.ref])
class Scorer(object): def __init__(self, pad, eos, unk): self.stat = BleuStat() self.pad = pad self.eos = eos self.unk = unk self.reset() def reset(self, one_init=False): if one_init: C.bleu_one_init(ctypes.byref(self.stat)) else: C.bleu_zero_init(ctypes.byref(self.stat)) def add(self, ref, pred): if (not isinstance(ref, torch.IntTensor)): raise TypeError('ref must be a torch.IntTensor (got {})'.format(type(ref))) if (not isinstance(pred, torch.IntTensor)): raise TypeError('pred must be a torch.IntTensor(got {})'.format(type(pred))) rref = ref.clone() assert (not rref.lt(0).any()) rref[rref.eq(self.unk)] = (- 999) rref = rref.contiguous().view((- 1)) pred = pred.contiguous().view((- 1)) C.bleu_add(ctypes.byref(self.stat), ctypes.c_size_t(rref.size(0)), ctypes.c_void_p(rref.data_ptr()), ctypes.c_size_t(pred.size(0)), ctypes.c_void_p(pred.data_ptr()), ctypes.c_int(self.pad), ctypes.c_int(self.eos)) def score(self, order=4): psum = sum(((math.log(p) if (p > 0) else float('-Inf')) for p in self.precision()[:order])) return ((self.brevity() * math.exp((psum / order))) * 100) def precision(self): def ratio(a, b): return ((a / b) if (b > 0) else 0) return [ratio(self.stat.match1, self.stat.count1), ratio(self.stat.match2, self.stat.count2), ratio(self.stat.match3, self.stat.count3), ratio(self.stat.match4, self.stat.count4)] def brevity(self): r = (self.stat.reflen / self.stat.predlen) return min(1, math.exp((1 - r))) def result_string(self, order=4): assert (order <= 4), "BLEU scores for order > 4 aren't supported" fmt = 'BLEU{} = {:2.2f}, {:2.1f}' for _ in range(1, order): fmt += '/{:2.1f}' fmt += ' (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})' bleup = [(p * 100) for p in self.precision()[:order]] return fmt.format(order, self.score(order=order), *bleup, self.brevity(), (self.stat.predlen / self.stat.reflen), self.stat.predlen, self.stat.reflen)