code stringlengths 17 6.64M |
|---|
class TestReproducibility(unittest.TestCase):
def _test_reproducibility(self, name, extra_flags=None, delta=0.0001, resume_checkpoint='checkpoint1.pt', max_epoch=3):
if (extra_flags is None):
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
with self.assertLogs() as logs:
test_binaries.create_dummy_data(data_dir)
test_binaries.preprocess_translation_data(data_dir)
with self.assertLogs() as logs:
test_binaries.train_translation_model(data_dir, 'fconv_iwslt_de_en', (['--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch)] + extra_flags))
(train_log, valid_log) = map((lambda rec: json.loads(rec.msg)), logs.records[(- 4):(- 2)])
os.rename(os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, 'checkpoint_last.pt'))
with self.assertLogs() as logs:
test_binaries.train_translation_model(data_dir, 'fconv_iwslt_de_en', (['--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', str(max_epoch)] + extra_flags))
(train_res_log, valid_res_log) = map((lambda rec: json.loads(rec.msg)), logs.records[(- 4):(- 2)])
for k in ['train_loss', 'train_ppl', 'train_num_updates', 'train_gnorm']:
self.assertAlmostEqual(float(train_log[k]), float(train_res_log[k]), delta=delta)
for k in ['valid_loss', 'valid_ppl', 'valid_num_updates', 'valid_best_loss']:
self.assertAlmostEqual(float(valid_log[k]), float(valid_res_log[k]), delta=delta)
def test_reproducibility(self):
self._test_reproducibility('test_reproducibility')
@unittest.skipIf((not torch.cuda.is_available()), 'test requires a GPU')
def test_reproducibility_fp16(self):
self._test_reproducibility('test_reproducibility_fp16', ['--fp16', '--fp16-init-scale', '4096'], delta=0.011)
@unittest.skipIf((not torch.cuda.is_available()), 'test requires a GPU')
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility('test_reproducibility_memory_efficient_fp16', ['--memory-efficient-fp16', '--fp16-init-scale', '4096'])
def test_mid_epoch_reproducibility(self):
self._test_reproducibility('test_mid_epoch_reproducibility', ['--save-interval-updates', '3'], resume_checkpoint='checkpoint_1_3.pt', max_epoch=1)
|
class TestResamplingDataset(unittest.TestCase):
def setUp(self):
self.strings = ['ab', 'c', 'def', 'ghij']
self.weights = [4.0, 2.0, 7.0, 1.5]
self.size_ratio = 2
self.dataset = ListDataset(self.strings, np.array([len(s) for s in self.strings]))
def _test_common(self, resampling_dataset, iters):
assert (len(self.dataset) == len(self.strings) == len(self.weights))
assert (len(resampling_dataset) == (self.size_ratio * len(self.strings)))
results = {'ordered_by_size': True, 'max_distribution_diff': 0.0}
totalfreqs = 0
freqs = collections.defaultdict(int)
for epoch_num in range(iters):
resampling_dataset.set_epoch(epoch_num)
indices = resampling_dataset.ordered_indices()
assert (len(indices) == len(resampling_dataset))
prev_size = (- 1)
for i in indices:
cur_size = resampling_dataset.size(i)
assert (resampling_dataset[i] == resampling_dataset[i])
assert (cur_size == len(resampling_dataset[i]))
freqs[resampling_dataset[i]] += 1
totalfreqs += 1
if (prev_size > cur_size):
results['ordered_by_size'] = False
prev_size = cur_size
assert (set(freqs.keys()) == set(self.strings))
for (s, weight) in zip(self.strings, self.weights):
freq = (freqs[s] / totalfreqs)
expected_freq = (weight / sum(self.weights))
results['max_distribution_diff'] = max(results['max_distribution_diff'], abs((expected_freq - freq)))
return results
def test_resampling_dataset_batch_by_size_false(self):
resampling_dataset = ResamplingDataset(self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=False, seed=0)
results = self._test_common(resampling_dataset, iters=1000)
assert (not results['ordered_by_size'])
assert (results['max_distribution_diff'] < 0.02)
def test_resampling_dataset_batch_by_size_true(self):
resampling_dataset = ResamplingDataset(self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=True, seed=0)
results = self._test_common(resampling_dataset, iters=1000)
assert results['ordered_by_size']
assert (results['max_distribution_diff'] < 0.02)
|
class TestSequenceGeneratorBase(unittest.TestCase):
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= (pos_scores.numel() ** lenpen)
self.assertLess(abs((score - hypo['score'])), 1e-06)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertEqual(t1.ne(t2).long().sum(), 0)
|
class TestSequenceGenerator(TestSequenceGeneratorBase):
def setUp(self):
(self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model) = test_utils.sequence_generator_setup()
self.sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}}
def test_with_normalization(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2, normalize_scores=False)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2, max_len_b=2)
hypos = generator.generate([self.model], self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
|
class TestDiverseBeamSearch(TestSequenceGeneratorBase):
def setUp(self):
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
self.src_tokens = torch.LongTensor([[self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos]])
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [torch.FloatTensor([[0.0, unk, 0.9, 0.1], [0.0, unk, 0.9, 0.1], [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3]]), torch.FloatTensor([[0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4]]), torch.FloatTensor([[1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0]])]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_diverse_beam_search(self):
search_strategy = search.DiverseBeamSearch(self.tgt_dict, num_groups=2, diversity_strength=0.0)
generator = SequenceGenerator(self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.generate([self.model], sample)
(eos, w1, w2) = (self.eos, self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0])
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0])
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9])
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9])
|
class TestDiverseSiblingsSearch(TestDiverseBeamSearch):
def assertHypoScore(self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
pos_scores.sub_((torch.Tensor(sibling_rank) * diversity_rate))
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= (pos_scores.numel() ** lenpen)
self.assertLess(abs((score - hypo['score'])), 1e-06)
def test_diverse_beam_search(self):
search_strategy = search.DiverseSiblingsSearch(self.tgt_dict, diversity_rate=0.5)
generator = SequenceGenerator(self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.generate([self.model], sample)
(eos, w1, w2) = (self.eos, self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5)
self.assertHypoTokens(hypos[0][1], [w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5)
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5)
|
class TestTopPSamplingSearch(TestSequenceGeneratorBase):
def setUp(self):
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
self.src_tokens = torch.LongTensor([[self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos]])
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.0
self.min_top2_prob = 0.75
self.min_top1_prob = 0.4
w1_prob = self.min_top1_prob
w2_prob = (self.min_top2_prob - self.min_top1_prob)
eos_prob = (1 - self.min_top2_prob)
args.beam_probs = [torch.FloatTensor([[0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0]]), torch.FloatTensor([[eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob]]), torch.FloatTensor([[1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0]])]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_topp_sampling_search_low_prob(self):
low_sampling_topp = (self.min_top1_prob / 2.0)
search_strategy = search.Sampling(self.tgt_dict, sampling_topp=low_sampling_topp)
generator = SequenceGenerator(self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.generate([self.model], sample)
(eos, w1) = (self.eos, self.w1)
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0])
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0])
self.assertHypoTokens(hypos[1][0], [w1, w1, eos])
self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0])
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0])
def test_topp_sampling_search_high_prob(self):
high_sampling_topp = ((self.min_top1_prob + self.min_top2_prob) / 2.0)
search_strategy = search.Sampling(self.tgt_dict, sampling_topp=high_sampling_topp)
generator = SequenceGenerator(self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.generate([self.model], sample)
(eos, w1, w2) = (self.eos, self.w1, self.w2)
self.assertTrue((self.hypoTokens(hypos[0][0], [w1, w1, eos]) or self.hypoTokens(hypos[0][0], [w1, w2, eos])))
self.assertTrue((self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0])))
self.assertTrue((self.hypoTokens(hypos[0][1], [w1, w1, eos]) or self.hypoTokens(hypos[0][1], [w1, w2, eos])))
self.assertTrue((self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0])))
self.assertTrue((self.hypoTokens(hypos[1][0], [w1, w1, eos]) or self.hypoTokens(hypos[1][0], [w1, w2, eos])))
self.assertTrue((self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0])))
self.assertTrue((self.hypoTokens(hypos[1][1], [w1, w1, eos]) or self.hypoTokens(hypos[1][1], [w1, w2, eos])))
self.assertTrue((self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0])))
def hypoTokens(self, hypo, tokens):
return self.tensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
if (not self.almostEqual(hypo['positional_scores'], pos_scores)):
return False
if (pos_scores.numel() != hypo['tokens'].numel()):
return False
score = pos_scores.sum()
if normalized:
score /= (pos_scores.numel() ** lenpen)
return (abs((score - hypo['score'])) < 1e-06)
def almostEqual(self, t1, t2):
return ((t1.size() == t2.size()) and ((t1 - t2).abs().max() < 0.0001))
def tensorEqual(self, t1, t2):
return ((t1.size() == t2.size()) and (t1.ne(t2).long().sum() == 0))
|
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
data = [{'source': torch.LongTensor([w1, w2, eos]), 'target': torch.LongTensor([w1, w2, w1, eos])}, {'source': torch.LongTensor([w2, eos]), 'target': torch.LongTensor([w2, w1, eos])}, {'source': torch.LongTensor([w2, eos]), 'target': torch.LongTensor([w2, eos])}]
data_itr = test_utils.dummy_dataloader(data)
args = argparse.Namespace()
unk = 0.0
args.beam_probs = [torch.FloatTensor([[0.0, unk, 0.6, 0.4], [0.0, unk, 0.4, 0.6], [0.0, unk, 0.7, 0.3]]), torch.FloatTensor([[0.0, unk, 0.2, 0.7], [0.0, unk, 0.8, 0.2], [0.7, unk, 0.1, 0.2]]), torch.FloatTensor([[0.1, unk, 0.5, 0.4], [0.15, unk, 0.15, 0.7], [0.0, unk, 0.0, 0.0]]), torch.FloatTensor([[0.9, unk, 0.05, 0.05], [0.0, unk, 0.0, 0.0], [0.0, unk, 0.0, 0.0]])]
expected_scores = [[0.6, 0.7, 0.5, 0.9], [0.6, 0.8, 0.15], [0.3, 0.7]]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer(task.target_dictionary)
for sample in data_itr:
hypos = task.inference_step(scorer, [model], sample)
for (id, hypos_id) in zip(sample['id'].tolist(), hypos):
self.assertHypoTokens(hypos_id[0], data[id]['target'])
self.assertHypoScore(hypos_id[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= (pos_scores.numel() ** lenpen)
self.assertLess(abs((score - hypo['score'])), 1e-06)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertEqual(t1.ne(t2).long().sum(), 0)
|
class TestSparseMultiheadAttention(unittest.TestCase):
def test_sparse_multihead_attention(self):
attn_weights = torch.randn(1, 8, 8)
bidirectional_sparse_mask = torch.tensor([[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0]])
bidirectional_attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=True)
bidirectional_attention_sparse_mask = bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask))
sparse_mask = torch.tensor([[0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf')], [0, 0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, float('-inf'), float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, float('-inf')], [float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0]])
attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=False)
attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(attention_sparse_mask, sparse_mask))
|
class TestTokenBlockDataset(unittest.TestCase):
def _build_dataset(self, data, **kwargs):
sizes = [len(x) for x in data]
underlying_ds = test_utils.TestDataset(data)
return TokenBlockDataset(underlying_ds, sizes, **kwargs)
def test_eos_break_mode(self):
data = [torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long)]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [1])
self.assertEqual(ds[2].tolist(), [8, 7, 6, 1])
data = [torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long)]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1])
self.assertEqual(ds[2].tolist(), [1])
def test_block_break_mode(self):
data = [torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long)]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='none')
self.assertEqual(ds[0].tolist(), [5, 4, 3])
self.assertEqual(ds[1].tolist(), [2, 1, 8])
self.assertEqual(ds[2].tolist(), [7, 6, 1])
self.assertEqual(ds[3].tolist(), [9, 1])
def test_complete_break_mode(self):
data = [torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long)]
ds = self._build_dataset(data, block_size=6, pad=0, eos=1, break_mode='complete')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1])
data = [torch.tensor([4, 3, 2, 1], dtype=torch.long), torch.tensor([5, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([6, 1], dtype=torch.long)]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='complete')
self.assertEqual(ds[0].tolist(), [4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [5, 1, 1])
self.assertEqual(ds[2].tolist(), [6, 1])
|
def mock_trainer(epoch, num_updates, iterations_in_epoch):
trainer = MagicMock()
trainer.load_checkpoint.return_value = {'train_iterator': {'epoch': epoch, 'iterations_in_epoch': iterations_in_epoch, 'shuffle': False}}
trainer.get_num_updates.return_value = num_updates
return trainer
|
def mock_dict():
d = MagicMock()
d.pad.return_value = 1
d.eos.return_value = 2
d.unk.return_value = 3
return d
|
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size))).view(1, (- 1))
tokens_ds = data.TokenBlockDataset(tokens, sizes=[tokens.size((- 1))], block_size=1, pad=0, eos=1, include_targets=False)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False)
epoch_itr = data.EpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)])
return (trainer, epoch_itr)
|
class TestLoadCheckpoint(unittest.TestCase):
def setUp(self):
self.args_mock = MagicMock()
self.args_mock.optimizer_overrides = '{}'
self.args_mock.reset_dataloader = False
self.args_mock.reset_meters = False
self.args_mock.reset_optimizer = False
self.patches = {'os.makedirs': MagicMock(), 'os.path.join': MagicMock(), 'os.path.isfile': MagicMock(return_value=True), 'os.path.isabs': MagicMock(return_value=False)}
self.applied_patches = [patch(p, d) for (p, d) in self.patches.items()]
[p.start() for p in self.applied_patches]
def test_load_partial_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
(trainer, epoch_itr) = get_trainer_and_epoch_itr(2, 150, 200, 50)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
(_, epoch_itr) = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 50)
self.assertEqual(epoch_itr.iterations_in_epoch, 51)
for _ in range((150 - 52)):
next(itr)
self.assertEqual(epoch_itr.iterations_in_epoch, 149)
self.assertTrue(itr.has_next())
next(itr)
self.assertFalse(itr.has_next())
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertTrue(itr.has_next())
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
def test_load_full_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
(trainer, epoch_itr) = get_trainer_and_epoch_itr(2, 150, 300, 150)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
(_, epoch_itr) = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def test_load_no_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
(trainer, epoch_itr) = get_trainer_and_epoch_itr(0, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
self.patches['os.path.isfile'].return_value = False
(_, epoch_itr) = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 1)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def tearDown(self):
patch.stopall()
|
class TestUtils(unittest.TestCase):
def test_convert_padding_direction(self):
pad = 1
left_pad = torch.LongTensor([[2, 3, 4, 5, 6], [1, 7, 8, 9, 10], [1, 1, 1, 11, 12]])
right_pad = torch.LongTensor([[2, 3, 4, 5, 6], [7, 8, 9, 10, 1], [11, 12, 1, 1, 1]])
self.assertAlmostEqual(right_pad, utils.convert_padding_direction(left_pad, pad, left_to_right=True))
self.assertAlmostEqual(left_pad, utils.convert_padding_direction(right_pad, pad, right_to_left=True))
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor([[9, 9, 9, 9, 9], [1, 9, 9, 9, 9], [1, 1, 1, 9, 9]])
left_pad_output = torch.LongTensor([[2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [1, 1, 1, 2, 3]])
right_pad_input = torch.LongTensor([[9, 9, 9, 9, 9], [9, 9, 9, 9, 1], [9, 9, 1, 1, 1]])
right_pad_output = torch.LongTensor([[2, 3, 4, 5, 6], [2, 3, 4, 5, 1], [2, 3, 1, 1, 1]])
self.assertAlmostEqual(left_pad_output, utils.make_positions(left_pad_input, pad))
self.assertAlmostEqual(right_pad_output, utils.make_positions(right_pad_input, pad))
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess(utils.item((t1 - t2).abs().max()), 0.0001)
|
class CrfRnnNet(Fcn8s):
'\n The full CRF-RNN network with the FCN-8s backbone as described in the paper:\n\n Conditional Random Fields as Recurrent Neural Networks,\n S. Zheng, S. Jayasumana, B. Romera-Paredes, V. Vineet, Z. Su, D. Du, C. Huang and P. Torr,\n ICCV 2015 (https://arxiv.org/abs/1502.03240).\n '
def __init__(self):
super(CrfRnnNet, self).__init__()
self.crfrnn = CrfRnn(num_labels=21, num_iterations=10)
def forward(self, image):
out = super(CrfRnnNet, self).forward(image)
return self.crfrnn(image, out)
|
class CrfRnn(nn.Module):
'\n PyTorch implementation of the CRF-RNN module described in the paper:\n\n Conditional Random Fields as Recurrent Neural Networks,\n S. Zheng, S. Jayasumana, B. Romera-Paredes, V. Vineet, Z. Su, D. Du, C. Huang and P. Torr,\n ICCV 2015 (https://arxiv.org/abs/1502.03240).\n '
def __init__(self, num_labels, num_iterations=5, crf_init_params=None):
'\n Create a new instance of the CRF-RNN layer.\n\n Args:\n num_labels: Number of semantic labels in the dataset\n num_iterations: Number of mean-field iterations to perform\n crf_init_params: CRF initialization parameters\n '
super(CrfRnn, self).__init__()
if (crf_init_params is None):
crf_init_params = DenseCRFParams()
self.params = crf_init_params
self.num_iterations = num_iterations
self._softmax = torch.nn.Softmax(dim=0)
self.num_labels = num_labels
self.spatial_ker_weights = nn.Parameter((crf_init_params.spatial_ker_weight * torch.eye(num_labels, dtype=torch.float32)))
self.bilateral_ker_weights = nn.Parameter((crf_init_params.bilateral_ker_weight * torch.eye(num_labels, dtype=torch.float32)))
self.compatibility_matrix = nn.Parameter(torch.eye(num_labels, dtype=torch.float32))
def forward(self, image, logits):
'\n Perform CRF inference.\n\n Args:\n image: Tensor of shape (3, h, w) containing the RGB image\n logits: Tensor of shape (num_classes, h, w) containing the unary logits\n Returns:\n log-Q distributions (logits) after CRF inference\n '
if (logits.shape[0] != 1):
raise ValueError('Only batch size 1 is currently supported!')
image = image[0]
logits = logits[0]
spatial_filter = SpatialFilter(image, gamma=self.params.gamma)
bilateral_filter = BilateralFilter(image, alpha=self.params.alpha, beta=self.params.beta)
(_, h, w) = image.shape
cur_logits = logits
for _ in range(self.num_iterations):
q_values = self._softmax(cur_logits)
spatial_out = torch.mm(self.spatial_ker_weights, spatial_filter.apply(q_values).view(self.num_labels, (- 1)))
bilateral_out = torch.mm(self.bilateral_ker_weights, bilateral_filter.apply(q_values).view(self.num_labels, (- 1)))
msg_passing_out = (spatial_out + bilateral_out)
msg_passing_out = torch.mm(self.compatibility_matrix, msg_passing_out).view(self.num_labels, h, w)
cur_logits = (msg_passing_out + logits)
return torch.unsqueeze(cur_logits, 0)
|
class PermutoFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, q_in, features):
q_out = permuto_cpp.forward(q_in, features)[0]
ctx.save_for_backward(features)
return q_out
@staticmethod
def backward(ctx, grad_q_out):
feature_saved = ctx.saved_tensors[0]
grad_q_back = permuto_cpp.backward(grad_q_out.contiguous(), feature_saved.contiguous())[0]
return (grad_q_back, None)
|
def _spatial_features(image, sigma):
'\n Return the spatial features as a Tensor\n\n Args:\n image: Image as a Tensor of shape (channels, height, wight)\n sigma: Bandwidth parameter\n\n Returns:\n Tensor of shape [h, w, 2] with spatial features\n '
sigma = float(sigma)
(_, h, w) = image.size()
x = torch.arange(start=0, end=w, dtype=torch.float32, device=_CPU)
xx = (x.repeat([h, 1]) / sigma)
y = torch.arange(start=0, end=h, dtype=torch.float32, device=torch.device('cpu')).view((- 1), 1)
yy = (y.repeat([1, w]) / sigma)
return torch.stack([xx, yy], dim=2)
|
class AbstractFilter(ABC):
'\n Super-class for permutohedral-based Gaussian filters\n '
def __init__(self, image):
self.features = self._calc_features(image)
self.norm = self._calc_norm(image)
def apply(self, input_):
output = PermutoFunction.apply(input_, self.features)
return (output * self.norm)
@abstractmethod
def _calc_features(self, image):
pass
def _calc_norm(self, image):
(_, h, w) = image.size()
all_ones = torch.ones((1, h, w), dtype=torch.float32, device=_CPU)
norm = PermutoFunction.apply(all_ones, self.features)
return (1.0 / (norm + _EPS))
|
class SpatialFilter(AbstractFilter):
'\n Gaussian filter in the spatial ([x, y]) domain\n '
def __init__(self, image, gamma):
'\n Create new instance\n\n Args:\n image: Image tensor of shape (3, height, width)\n gamma: Standard deviation\n '
self.gamma = gamma
super(SpatialFilter, self).__init__(image)
def _calc_features(self, image):
return _spatial_features(image, self.gamma)
|
class BilateralFilter(AbstractFilter):
'\n Gaussian filter in the bilateral ([r, g, b, x, y]) domain\n '
def __init__(self, image, alpha, beta):
'\n Create new instance\n\n Args:\n image: Image tensor of shape (3, height, width)\n alpha: Smoothness (spatial) sigma\n beta: Appearance (color) sigma\n '
self.alpha = alpha
self.beta = beta
super(BilateralFilter, self).__init__(image)
def _calc_features(self, image):
xy = _spatial_features(image, self.alpha)
rgb = (image / float(self.beta)).permute(1, 2, 0)
return torch.cat([xy, rgb], dim=2)
|
class DenseCRFParams(object):
'\n Parameters for the DenseCRF model\n '
def __init__(self, alpha=160.0, beta=3.0, gamma=3.0, spatial_ker_weight=3.0, bilateral_ker_weight=5.0):
'\n Default values were taken from https://github.com/sadeepj/crfasrnn_keras. More details about these parameters\n can be found in https://arxiv.org/pdf/1210.5644.pdf\n\n Args:\n alpha: Bandwidth for the spatial component of the bilateral filter\n beta: Bandwidth for the color component of the bilateral filter\n gamma: Bandwidth for the spatial filter\n spatial_ker_weight: Spatial kernel weight\n bilateral_ker_weight: Bilateral kernel weight\n '
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self.spatial_ker_weight = spatial_ker_weight
self.bilateral_ker_weight = bilateral_ker_weight
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', help='Path to the .pth file (download from https://tinyurl.com/crfasrnn-weights-pth)', required=True)
parser.add_argument('--image', help='Path to the input image', required=True)
parser.add_argument('--output', help='Path to the output label image', default=None)
args = parser.parse_args()
(img_data, img_h, img_w, size) = util.get_preprocessed_image(args.image)
output_file = (args.output or (args.imaage + '_labels.png'))
model = CrfRnnNet()
model.load_state_dict(torch.load(args.weights))
model.eval()
out = model.forward(torch.from_numpy(img_data))
probs = out.detach().numpy()[0]
label_im = util.get_label_image(probs, img_h, img_w, size)
label_im.save(output_file)
|
def main():
input_file = 'image.jpg'
output_file = 'labels.png'
(img_data, img_h, img_w, size) = util.get_preprocessed_image(input_file)
saved_weights_path = 'crfasrnn_weights.pth'
model = CrfRnnNet()
model.load_state_dict(torch.load(saved_weights_path))
model.eval()
out = model.forward(torch.from_numpy(img_data))
probs = out.detach().numpy()[0]
label_im = util.get_label_image(probs, img_h, img_w, size)
label_im.save(output_file)
|
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None):
super().__init__()
self.loss = nn.NLLLoss2d(weight)
def forward(self, outputs, targets):
return self.loss(F.log_softmax(outputs), targets)
|
class MyDataset(torch.utils.data.Dataset):
def __init__(self, imList, labelList, transform=None):
self.imList = imList
self.labelList = labelList
self.transform = transform
def __len__(self):
return len(self.imList)
def __getitem__(self, idx):
image_name = self.imList[idx]
label_name = self.labelList[idx]
image = cv2.imread(image_name)
label = cv2.imread(label_name, 0)
if self.transform:
[image, label] = self.transform(image, label)
return (image, label)
|
class iouEval():
def __init__(self, nClasses):
self.nClasses = nClasses
self.reset()
def reset(self):
self.overall_acc = 0
self.per_class_acc = np.zeros(self.nClasses, dtype=np.float32)
self.per_class_iu = np.zeros(self.nClasses, dtype=np.float32)
self.mIOU = 0
self.batchCount = 1
def fast_hist(self, a, b):
k = ((a >= 0) & (a < self.nClasses))
return np.bincount(((self.nClasses * a[k].astype(int)) + b[k]), minlength=(self.nClasses ** 2)).reshape(self.nClasses, self.nClasses)
def compute_hist(self, predict, gth):
hist = self.fast_hist(gth, predict)
return hist
def addBatch(self, predict, gth):
predict = predict.cpu().numpy().flatten()
gth = gth.cpu().numpy().flatten()
epsilon = 1e-08
hist = self.compute_hist(predict, gth)
overall_acc = (np.diag(hist).sum() / (hist.sum() + epsilon))
per_class_acc = (np.diag(hist) / (hist.sum(1) + epsilon))
per_class_iu = (np.diag(hist) / (((hist.sum(1) + hist.sum(0)) - np.diag(hist)) + epsilon))
mIou = np.nanmean(per_class_iu)
self.overall_acc += overall_acc
self.per_class_acc += per_class_acc
self.per_class_iu += per_class_iu
self.mIOU += mIou
self.batchCount += 1
def getMetric(self):
overall_acc = (self.overall_acc / self.batchCount)
per_class_acc = (self.per_class_acc / self.batchCount)
per_class_iu = (self.per_class_iu / self.batchCount)
mIOU = (self.mIOU / self.batchCount)
return (overall_acc, per_class_acc, per_class_iu, mIOU)
|
class CBR(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
output = self.act(output)
return output
|
class CB(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return output
|
class C(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
def forward(self, input):
output = self.conv(input)
return output
|
class BasicResidualBlock(nn.Module):
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
self.c1 = CBR(nIn, nOut, 3, 1)
self.c2 = CB(nOut, nOut, 3, 1)
self.act = nn.ReLU(True)
def forward(self, input):
output = self.c1(input)
output = self.c2(output)
output = (input + output)
output = self.act(output)
return output
|
class DownSamplerA(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
self.conv = CBR(nIn, nOut, 3, 2)
def forward(self, input):
output = self.conv(input)
return output
|
class BR(nn.Module):
def __init__(self, nOut):
super().__init__()
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output = self.bn(input)
output = self.act(output)
return output
|
class CDilated(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1, d=1):
super().__init__()
padding = (int(((kSize - 1) / 2)) * d)
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False, dilation=d)
def forward(self, input):
output = self.conv(input)
return output
|
class DilatedParllelResidualBlockB1(nn.Module):
'\n ESP Block from ESPNet. See details here: ESPNet: Efficient Spatial Pyramid of Dilated Convolutions for Semantic Segmentation\n Link: https://arxiv.org/abs/1803.06815\n '
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
k = 4
n = int((nOut / k))
n1 = (nOut - ((k - 1) * n))
self.c1 = C(nIn, n, 3, 1)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output1 = self.c1(input)
d1 = self.d1(output1)
d2 = self.d2(output1)
d4 = self.d4(output1)
d8 = self.d8(output1)
add1 = d2
add2 = (add1 + d4)
add3 = (add2 + d8)
combine = torch.cat([d1, add1, add2, add3], 1)
combine_in_out = (input + combine)
output = self.bn(combine_in_out)
output = self.act(output)
return output
|
class PSPDec(nn.Module):
'\n Inspired or Adapted from Pyramid Scene Network paper\n Link: https://arxiv.org/abs/1612.01105\n '
def __init__(self, nIn, nOut, downSize, upSize=48):
super().__init__()
self.features = nn.Sequential(nn.AdaptiveAvgPool2d(downSize), nn.Conv2d(nIn, nOut, 1, bias=False), nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Upsample(size=upSize, mode='bilinear'))
def forward(self, x):
return self.features(x)
|
class ResNetC1(nn.Module):
'\n This model uses ESP blocks for encoding and PSP blocks for decoding\n '
def __init__(self, classes):
super().__init__()
self.level1 = CBR(3, 16, 7, 2)
self.p01 = PSPDec((16 + classes), classes, 160, 192)
self.p02 = PSPDec((16 + classes), classes, 128, 192)
self.p03 = PSPDec((16 + classes), classes, 96, 192)
self.p04 = PSPDec((16 + classes), classes, 72, 192)
self.class_0 = nn.Sequential(nn.Conv2d((16 + (5 * classes)), classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 7, padding=3, bias=False))
self.level2 = DownSamplerA(16, 128)
self.level2_0 = DilatedParllelResidualBlockB1(128, 128)
self.level2_1 = DilatedParllelResidualBlockB1(128, 128)
self.p10 = PSPDec((8 + 256), 64, 80, 96)
self.p20 = PSPDec((8 + 256), 64, 64, 96)
self.p30 = PSPDec((8 + 256), 64, 48, 96)
self.p40 = PSPDec((8 + 256), 64, 36, 96)
self.class_1 = nn.Sequential(nn.Conv2d(((8 + 256) + (64 * 4)), classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True))
self.br_2 = BR(256)
self.level3_0 = DownSamplerA(256, 256)
self.level3_1 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level3_2 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level4_1 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level4_2 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level4_3 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.p1 = PSPDec(512, 128, 40)
self.p2 = PSPDec(512, 128, 32)
self.p3 = PSPDec(512, 128, 24)
self.p4 = PSPDec(512, 128, 18)
self.br_4 = BR(512)
self.classifier = nn.Sequential(nn.Conv2d((512 + (4 * 128)), 128, 1, padding=0, bias=False), nn.BatchNorm2d(128, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(128, classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True))
self.upsample_1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_3 = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, input1):
output0 = self.level1(input1)
output1_0 = self.level2(output0)
output1 = self.level2_0(output1_0)
output1 = self.level2_1(output1)
output1 = self.br_2(torch.cat([output1_0, output1], 1))
output2_0 = self.level3_0(output1)
output2 = self.level3_1(output2_0)
output2 = self.level3_2(output2)
output3 = self.level4_1(output2)
output3 = self.level4_2(output3)
output3 = self.level4_3(output3)
output3 = self.br_4(torch.cat([output2_0, output3], 1))
output3 = self.classifier(torch.cat([output3, self.p1(output3), self.p2(output3), self.p3(output3), self.p4(output3)], 1))
output3 = self.upsample_3(output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.class_1(torch.cat([combine_up_23, self.p10(combine_up_23), self.p20(combine_up_23), self.p30(combine_up_23), self.p40(combine_up_23)], 1))
output23_hook = self.upsample_2(output23_hook)
combine_up = torch.cat([output0, output23_hook], 1)
output0_hook = self.class_0(torch.cat([combine_up, self.p01(combine_up), self.p02(combine_up), self.p03(combine_up), self.p04(combine_up)], 1))
classifier = self.upsample_1(output0_hook)
return classifier
|
class ResNetD1(nn.Module):
'\n This model uses ResNet blocks for encoding and PSP blocks for decoding\n '
def __init__(self, classes):
super().__init__()
self.level1 = CBR(3, 16, 7, 2)
self.p01 = PSPDec((16 + classes), classes, 160, 192)
self.p02 = PSPDec((16 + classes), classes, 128, 192)
self.p03 = PSPDec((16 + classes), classes, 96, 192)
self.p04 = PSPDec((16 + classes), classes, 72, 192)
self.class_0 = nn.Sequential(nn.Conv2d((16 + (5 * classes)), classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 7, padding=3, bias=False))
self.level2 = DownSamplerA(16, 128)
self.level2_0 = BasicResidualBlock(128, 128)
self.level2_1 = BasicResidualBlock(128, 128)
self.p10 = PSPDec((8 + 256), 64, 80, 96)
self.p20 = PSPDec((8 + 256), 64, 64, 96)
self.p30 = PSPDec((8 + 256), 64, 48, 96)
self.p40 = PSPDec((8 + 256), 64, 36, 96)
self.class_1 = nn.Sequential(nn.Conv2d(((8 + 256) + (64 * 4)), classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True))
self.br_2 = BR(256)
self.level3_0 = DownSamplerA(256, 256)
self.level3_1 = BasicResidualBlock(256, 256, 0.3)
self.level3_2 = BasicResidualBlock(256, 256, 0.3)
self.level4_1 = BasicResidualBlock(256, 256, 0.3)
self.level4_2 = BasicResidualBlock(256, 256, 0.3)
self.level4_3 = BasicResidualBlock(256, 256, 0.3)
self.p1 = PSPDec(512, 128, 40)
self.p2 = PSPDec(512, 128, 32)
self.p3 = PSPDec(512, 128, 24)
self.p4 = PSPDec(512, 128, 18)
self.br_4 = BR(512)
self.classifier = nn.Sequential(nn.Conv2d((512 + (128 * 4)), 128, 1, padding=0, bias=False), nn.BatchNorm2d(128, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(128, classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True))
self.upsample_1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_3 = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, input1):
output0 = self.level1(input1)
output1_0 = self.level2(output0)
output1 = self.level2_0(output1_0)
output1 = self.level2_1(output1)
output1 = self.br_2(torch.cat([output1_0, output1], 1))
output2_0 = self.level3_0(output1)
output2 = self.level3_1(output2_0)
output2 = self.level3_2(output2)
output3 = self.level4_1(output2)
output3 = self.level4_2(output3)
output3 = self.level4_3(output3)
output3 = self.br_4(torch.cat([output2_0, output3], 1))
output3 = self.classifier(torch.cat([output3, self.p1(output3), self.p2(output3), self.p3(output3), self.p4(output3)], 1))
output3 = self.upsample_3(output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.class_1(torch.cat([combine_up_23, self.p10(combine_up_23), self.p20(combine_up_23), self.p30(combine_up_23), self.p40(combine_up_23)], 1))
output23_hook = self.upsample_2(output23_hook)
combine_up = torch.cat([output23_hook, output0], 1)
output0_hook = self.class_0(torch.cat([combine_up, self.p01(combine_up), self.p02(combine_up), self.p03(combine_up), self.p04(combine_up)], 1))
classifier = self.upsample_1(output0_hook)
return classifier
|
def make_dot(var, params=None):
' Produces Graphviz representation of PyTorch autograd graph\n Blue nodes are the Variables that require grad, orange are Tensors\n saved for backward in torch.autograd.Function\n Args:\n var: output Variable\n params: dict of (name, Variable) to add names to node that\n require grad (TODO: make optional)\n '
if (params is not None):
assert isinstance(params.values()[0], Variable)
param_map = {id(v): k for (k, v) in params.items()}
node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size='12,12'))
seen = set()
def size_to_str(size):
return (('(' + ', '.join([('%d' % v) for v in size])) + ')')
def add_nodes(var):
if (var not in seen):
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
name = (param_map[id(u)] if (params is not None) else '')
node_name = ('%s\n %s' % (name, size_to_str(u.size())))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if (u[0] is not None):
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
add_nodes(var.grad_fn)
return dot
|
def val(args, val_loader, model, criterion):
model.eval()
iouEvalVal = iouEval(args.classes)
epoch_loss = []
total_batches = len(val_loader)
for (i, (input, target)) in enumerate(val_loader):
start_time = time.time()
if (args.onGPU == True):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
output = model(input_var)
loss = criterion(output, target_var)
epoch_loss.append(loss.data[0])
time_taken = (time.time() - start_time)
iouEvalVal.addBatch(output.max(1)[1].data, target_var.data)
print(('[%d/%d] loss: %.3f time: %.2f' % (i, total_batches, loss.data[0], time_taken)))
average_epoch_loss_val = (sum(epoch_loss) / len(epoch_loss))
(overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalVal.getMetric()
return (average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU)
|
def train(args, train_loader, model, criterion, optimizer, epoch):
model.train()
iouEvalTrain = iouEval(args.classes)
epoch_loss = []
total_batches = len(train_loader)
for (i, (input, target)) in enumerate(train_loader):
start_time = time.time()
if (args.onGPU == True):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
output = model(input_var)
optimizer.zero_grad()
loss = criterion(output, target_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
time_taken = (time.time() - start_time)
iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)
print(('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.data[0], time_taken)))
average_epoch_loss_train = (sum(epoch_loss) / len(epoch_loss))
(overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalTrain.getMetric()
return (average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU)
|
def save_checkpoint(state, filenameCheckpoint='checkpoint.pth.tar'):
torch.save(state, filenameCheckpoint)
|
def trainValidateSegmentation(args):
if (not os.path.isfile(args.cached_data_file)):
dataLoader = ld.LoadData(args.data_dir, args.classes, args.cached_data_file)
if (dataLoader is None):
print('Error while processing the data. Please check')
exit((- 1))
data = dataLoader.processData()
else:
data = pickle.load(open(args.cached_data_file, 'rb'))
if (args.modelType == 'C1'):
model = net.ResNetC1(args.classes)
elif (args.modelType == 'D1'):
model = net.ResNetD1(args.classes)
else:
print('Please select the correct model. Exiting!!')
exit((- 1))
args.savedir = ((args.savedir + args.modelType) + '/')
if (args.onGPU == True):
model = model.cuda()
if (not os.path.exists(args.savedir)):
os.mkdir(args.savedir)
if (args.onGPU == True):
model = model.cuda()
if (args.visualizeNet == True):
x = Variable(torch.randn(1, 3, args.inWidth, args.inHeight))
if (args.onGPU == True):
x = x.cuda()
y = model.forward(x)
g = viz.make_dot(y)
g.render((args.savedir + '/model.png'), view=False)
n_param = sum([np.prod(param.size()) for param in model.parameters()])
print(('Network parameters: ' + str(n_param)))
print('Weights to handle class-imbalance')
weight = torch.from_numpy(data['classWeights'])
print(weight)
if (args.onGPU == True):
weight = weight.cuda()
criteria = CrossEntropyLoss2d(weight)
if (args.onGPU == True):
criteria = criteria.cuda()
trainDatasetNoZoom = myTransforms.Compose([myTransforms.RandomCropResize(20), myTransforms.RandomHorizontalFlip(), myTransforms.ToTensor(args.scaleIn)])
trainDatasetWithZoom = myTransforms.Compose([myTransforms.Zoom(512, 512), myTransforms.RandomCropResize(20), myTransforms.RandomHorizontalFlip(), myTransforms.ToTensor(args.scaleIn)])
valDataset = myTransforms.Compose([myTransforms.ToTensor(args.scaleIn)])
trainLoaderNoZoom = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDatasetNoZoom), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
trainLoaderWithZoom = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], transform=trainDatasetWithZoom), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
valLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['valIm'], data['valAnnot'], transform=valDataset), batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.0005)
if (args.onGPU == True):
cudnn.benchmark = True
start_epoch = 0
if args.resume:
if os.path.isfile(args.resumeLoc):
print("=> loading checkpoint '{}'".format(args.resumeLoc))
checkpoint = torch.load(args.resumeLoc)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
logFileLoc = ((args.savedir + os.sep) + args.logFile)
if os.path.isfile(logFileLoc):
logger = open(logFileLoc, 'a')
logger.write(('Parameters: %s' % str(n_param)))
logger.write(('\n%s\t%s\t%s\t%s\t%s\t' % ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val')))
logger.flush()
else:
logger = open(logFileLoc, 'w')
logger.write(('Parameters: %s' % str(n_param)))
logger.write(('\n%s\t%s\t%s\t%s\t%s\t' % ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val')))
logger.flush()
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_loss, gamma=0.1)
for epoch in range(start_epoch, args.max_epochs):
scheduler.step(epoch)
lr = 0
for param_group in optimizer.param_groups:
lr = param_group['lr']
train(args, trainLoaderWithZoom, model, criteria, optimizer, epoch)
(lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr) = train(args, trainLoaderNoZoom, model, criteria, optimizer, epoch)
(lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val) = val(args, valLoader, model, criteria)
save_checkpoint({'epoch': (epoch + 1), 'arch': str(model), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'lossTr': lossTr, 'lossVal': lossVal, 'iouTr': mIOU_tr, 'iouVal': mIOU_val}, (args.savedir + '/checkpoint.pth.tar'))
model_file_name = (((args.savedir + '/model_') + str((epoch + 1))) + '.pth')
torch.save(model.state_dict(), model_file_name)
with open((((args.savedir + 'acc_') + str(epoch)) + '.txt'), 'w') as log:
log.write(('\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f' % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val)))
log.write('\n')
log.write(('Per Class Training Acc: ' + str(per_class_acc_tr)))
log.write('\n')
log.write(('Per Class Validation Acc: ' + str(per_class_acc_val)))
log.write('\n')
log.write(('Per Class Training mIOU: ' + str(per_class_iu_tr)))
log.write('\n')
log.write(('Per Class Validation mIOU: ' + str(per_class_iu_val)))
logger.write(('\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f' % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val, lr)))
logger.flush()
print((('Epoch : ' + str(epoch)) + ' Details'))
print(('\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f' % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val)))
logger.close()
|
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None):
super().__init__()
self.loss = nn.NLLLoss2d(weight)
def forward(self, outputs, targets):
return self.loss(F.log_softmax(outputs), targets)
|
class MyDataset(torch.utils.data.Dataset):
def __init__(self, imList, labelList, diagList, transform=None):
self.imList = imList
self.labelList = labelList
self.diagList = diagList
self.transform = transform
def __len__(self):
return len(self.imList)
def __getitem__(self, idx):
image_name = self.imList[idx]
label_name = self.labelList[idx]
image = cv2.imread(image_name)
label = cv2.imread(label_name, 0)
label2 = self.diagList[idx]
if self.transform:
[image, label, label2] = self.transform(image, label, label2)
return (image, label, label2)
|
class iouEval():
def __init__(self, nClasses):
self.nClasses = nClasses
self.reset()
def reset(self):
self.overall_acc = 0
self.per_class_acc = np.zeros(self.nClasses, dtype=np.float32)
self.per_class_iu = np.zeros(self.nClasses, dtype=np.float32)
self.mIOU = 0
self.batchCount = 1
def fast_hist(self, a, b):
k = ((a >= 0) & (a < self.nClasses))
return np.bincount(((self.nClasses * a[k].astype(int)) + b[k]), minlength=(self.nClasses ** 2)).reshape(self.nClasses, self.nClasses)
def compute_hist(self, predict, gth):
hist = self.fast_hist(gth, predict)
return hist
def addBatch(self, predict, gth):
predict = predict.cpu().numpy().flatten()
gth = gth.cpu().numpy().flatten()
epsilon = 1e-08
hist = self.compute_hist(predict, gth)
overall_acc = (np.diag(hist).sum() / (hist.sum() + epsilon))
per_class_acc = (np.diag(hist) / (hist.sum(1) + epsilon))
per_class_iu = (np.diag(hist) / (((hist.sum(1) + hist.sum(0)) - np.diag(hist)) + epsilon))
mIou = np.nanmean(per_class_iu)
self.overall_acc += overall_acc
self.per_class_acc += per_class_acc
self.per_class_iu += per_class_iu
self.mIOU += mIou
self.batchCount += 1
def getMetric(self):
overall_acc = (self.overall_acc / self.batchCount)
per_class_acc = (self.per_class_acc / self.batchCount)
per_class_iu = (self.per_class_iu / self.batchCount)
mIOU = (self.mIOU / self.batchCount)
return (overall_acc, per_class_acc, per_class_iu, mIOU)
|
class CBR(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
output = self.act(output)
return output
|
class CB(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
return output
|
class C(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1):
super().__init__()
padding = int(((kSize - 1) / 2))
self.conv = nn.Conv2d(nIn, nOut, kSize, stride=stride, padding=padding, bias=False)
def forward(self, input):
output = self.conv(input)
return output
|
class DownSampler(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
self.conv = nn.Conv2d(nIn, (nOut - nIn), 3, stride=2, padding=1, bias=False)
self.pool = nn.AvgPool2d(3, stride=2, padding=1)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output = torch.cat([self.conv(input), self.pool(input)], 1)
output = self.bn(output)
output = self.act(output)
return output
|
class BasicResidualBlock(nn.Module):
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
self.c1 = CBR(nIn, nOut, 3, 1)
self.c2 = CB(nOut, nOut, 3, 1)
self.act = nn.ReLU(True)
def forward(self, input):
output = self.c1(input)
output = self.c2(output)
output = (input + output)
output = self.act(output)
return output
|
class DownSamplerA(nn.Module):
def __init__(self, nIn, nOut):
super().__init__()
self.conv = CBR(nIn, nOut, 3, 2)
def forward(self, input):
output = self.conv(input)
return output
|
class BR(nn.Module):
def __init__(self, nOut):
super().__init__()
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output = self.bn(input)
output = self.act(output)
return output
|
class CDilated(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1, d=1):
super().__init__()
padding = (int(((kSize - 1) / 2)) * d)
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False, dilation=d)
def forward(self, input):
output = self.conv(input)
return output
|
class CDilated1(nn.Module):
def __init__(self, nIn, nOut, kSize, stride=1, d=1):
super().__init__()
padding = (int(((kSize - 1) / 2)) * d)
self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False, dilation=d)
self.br = BR(nOut)
def forward(self, input):
output = self.conv(input)
return self.br(output)
|
class DilatedParllelResidualBlockB(nn.Module):
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
n = int((nOut / 5))
n1 = (nOut - (4 * n))
self.c1 = C(nIn, n, 1, 1)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.d16 = CDilated(n, n, 3, 1, 16)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output1 = self.c1(input)
d1 = self.d1(output1)
d2 = self.d2(output1)
d4 = self.d4(output1)
d8 = self.d8(output1)
d16 = self.d16(output1)
add1 = d2
add2 = (add1 + d4)
add3 = (add2 + d8)
add4 = (add3 + d16)
combine = torch.cat([d1, add1, add2, add3, add4], 1)
combine_in_out = (input + combine)
output = self.bn(combine_in_out)
output = self.act(output)
return output
|
class DilatedParllelResidualBlockB1(nn.Module):
def __init__(self, nIn, nOut, prob=0.03):
super().__init__()
n = int((nOut / 4))
n1 = (nOut - (3 * n))
self.c1 = C(nIn, n, 3, 1)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.d16 = CDilated(n, n, 3, 1, 16)
self.bn = nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001)
self.act = nn.ReLU(True)
def forward(self, input):
output1 = self.c1(input)
d1 = self.d1(output1)
d2 = self.d2(output1)
d4 = self.d4(output1)
d8 = self.d8(output1)
add1 = d2
add2 = (add1 + d4)
add3 = (add2 + d8)
combine = torch.cat([d1, add1, add2, add3], 1)
combine_in_out = (input + combine)
output = self.bn(combine_in_out)
output = self.act(output)
return output
|
class PSPDec(nn.Module):
def __init__(self, nIn, nOut, downSize, upSize=48):
super().__init__()
self.features = nn.Sequential(nn.AdaptiveAvgPool2d(downSize), nn.Conv2d(nIn, nOut, 1, bias=False), nn.BatchNorm2d(nOut, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Upsample(size=upSize, mode='bilinear'))
def forward(self, x):
return self.features(x)
|
class ResNetC1(nn.Module):
'\n Segmentation model with ESP as the encoding block.\n This is the same as in stage 1\n '
def __init__(self, classes):
super().__init__()
self.level1 = CBR(3, 16, 7, 2)
self.p01 = PSPDec((16 + classes), classes, 160, 192)
self.p02 = PSPDec((16 + classes), classes, 128, 192)
self.p03 = PSPDec((16 + classes), classes, 96, 192)
self.p04 = PSPDec((16 + classes), classes, 72, 192)
self.class_0 = nn.Sequential(nn.Conv2d((16 + (5 * classes)), classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 7, padding=3, bias=False))
self.level2 = DownSamplerA(16, 128)
self.level2_0 = DilatedParllelResidualBlockB1(128, 128)
self.level2_1 = DilatedParllelResidualBlockB1(128, 128)
self.p10 = PSPDec((8 + 256), 64, 80, 96)
self.p20 = PSPDec((8 + 256), 64, 64, 96)
self.p30 = PSPDec((8 + 256), 64, 48, 96)
self.p40 = PSPDec((8 + 256), 64, 36, 96)
self.class_1 = nn.Sequential(nn.Conv2d(((8 + 256) + (64 * 4)), classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True))
self.br_2 = BR(256)
self.level3_0 = DownSamplerA(256, 256)
self.level3_1 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level3_2 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level4_1 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level4_2 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.level4_3 = DilatedParllelResidualBlockB1(256, 256, 0.3)
self.p1 = PSPDec(512, 128, 40)
self.p2 = PSPDec(512, 128, 32)
self.p3 = PSPDec(512, 128, 24)
self.p4 = PSPDec(512, 128, 18)
self.br_4 = BR(512)
self.classifier = nn.Sequential(nn.Conv2d((512 + (4 * 128)), 128, 1, padding=0, bias=False), nn.BatchNorm2d(128, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(128, classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True))
self.upsample_1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_3 = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, input1):
output0 = self.level1(input1)
output1_0 = self.level2(output0)
output1 = self.level2_0(output1_0)
output1 = self.level2_1(output1)
output1 = self.br_2(torch.cat([output1_0, output1], 1))
output2_0 = self.level3_0(output1)
output2 = self.level3_1(output2_0)
output2 = self.level3_2(output2)
output3 = self.level4_1(output2)
output3 = self.level4_2(output3)
output3 = self.level4_3(output3)
output3 = self.br_4(torch.cat([output2_0, output3], 1))
output3 = self.classifier(torch.cat([output3, self.p1(output3), self.p2(output3), self.p3(output3), self.p4(output3)], 1))
output3 = self.upsample_3(output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.class_1(torch.cat([combine_up_23, self.p10(combine_up_23), self.p20(combine_up_23), self.p30(combine_up_23), self.p40(combine_up_23)], 1))
output23_hook = self.upsample_2(output23_hook)
combine_up = torch.cat([output0, output23_hook], 1)
output0_hook = self.class_0(torch.cat([combine_up, self.p01(combine_up), self.p02(combine_up), self.p03(combine_up), self.p04(combine_up)], 1))
classifier = self.upsample_1(output0_hook)
return classifier
|
class ResNetC1_YNet(nn.Module):
'\n Jointly learning the segmentation and classification with ESP as encoding blocks\n '
def __init__(self, classes, diagClasses, segNetFile=None):
super().__init__()
self.level4_0 = DownSamplerA(512, 128)
self.level4_1 = DilatedParllelResidualBlockB1(128, 128, 0.3)
self.level4_2 = DilatedParllelResidualBlockB1(128, 128, 0.3)
self.br_con_4 = BR(256)
self.level5_0 = DownSamplerA(256, 64)
self.level5_1 = DilatedParllelResidualBlockB1(64, 64, 0.3)
self.level5_2 = DilatedParllelResidualBlockB1(64, 64, 0.3)
self.br_con_5 = BR(128)
self.global_Avg = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Linear(128, 64)
self.fc2 = nn.Linear(64, diagClasses)
self.segNet = ResNetC1(classes)
if (segNetFile is not None):
print('Loading pre-trained segmentation model')
self.segNet.load_state_dict(torch.load(segNetFile))
self.modules = []
for (i, m) in enumerate(self.segNet.children()):
self.modules.append(m)
def forward(self, input1):
output0 = self.modules[0](input1)
output1_0 = self.modules[6](output0)
output1 = self.modules[7](output1_0)
output1 = self.modules[8](output1)
output1 = self.modules[14](torch.cat([output1_0, output1], 1))
output2_0 = self.modules[15](output1)
output2 = self.modules[16](output2_0)
output2 = self.modules[17](output2)
output3 = self.modules[18](output2)
output3 = self.modules[19](output3)
output3 = self.modules[20](output3)
output3_hook = self.modules[25](torch.cat([output2_0, output3], 1))
output3 = self.modules[26](torch.cat([output3_hook, self.modules[21](output3_hook), self.modules[22](output3_hook), self.modules[23](output3_hook), self.modules[24](output3_hook)], 1))
output3 = self.modules[29](output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.modules[13](torch.cat([combine_up_23, self.modules[9](combine_up_23), self.modules[10](combine_up_23), self.modules[11](combine_up_23), self.modules[12](combine_up_23)], 1))
output23_hook = self.modules[28](output23_hook)
combine_up = torch.cat([output0, output23_hook], 1)
output0_hook = self.modules[5](torch.cat([combine_up, self.modules[1](combine_up), self.modules[2](combine_up), self.modules[3](combine_up), self.modules[4](combine_up)], 1))
classifier = self.modules[27](output0_hook)
l5_0 = self.level4_0(output3_hook)
l5_1 = self.level4_1(l5_0)
l5_2 = self.level4_2(l5_1)
l5_con = self.br_con_4(torch.cat([l5_0, l5_2], 1))
l6_0 = self.level5_0(l5_con)
l6_1 = self.level5_1(l6_0)
l6_2 = self.level5_2(l6_1)
l6_con = self.br_con_5(torch.cat([l6_0, l6_2], 1))
glbAvg = self.global_Avg(l6_con)
flatten = glbAvg.view(glbAvg.size(0), (- 1))
fc1 = self.fc1(flatten)
diagClass = self.fc2(fc1)
return (classifier, diagClass)
|
class ResNetD1(nn.Module):
'\n Segmentation model with RCB as encoding blocks.\n This is the same as in Stage 1\n '
def __init__(self, classes):
super().__init__()
self.level1 = CBR(3, 16, 7, 2)
self.p01 = PSPDec((16 + classes), classes, 160, 192)
self.p02 = PSPDec((16 + classes), classes, 128, 192)
self.p03 = PSPDec((16 + classes), classes, 96, 192)
self.p04 = PSPDec((16 + classes), classes, 72, 192)
self.class_0 = nn.Sequential(nn.Conv2d((16 + (5 * classes)), classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 7, padding=3, bias=False))
self.level2 = DownSamplerA(16, 128)
self.level2_0 = BasicResidualBlock(128, 128)
self.level2_1 = BasicResidualBlock(128, 128)
self.p10 = PSPDec((8 + 256), 64, 80, 96)
self.p20 = PSPDec((8 + 256), 64, 64, 96)
self.p30 = PSPDec((8 + 256), 64, 48, 96)
self.p40 = PSPDec((8 + 256), 64, 36, 96)
self.class_1 = nn.Sequential(nn.Conv2d(((8 + 256) + (64 * 4)), classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True))
self.br_2 = BR(256)
self.level3_0 = DownSamplerA(256, 256)
self.level3_1 = BasicResidualBlock(256, 256, 0.3)
self.level3_2 = BasicResidualBlock(256, 256, 0.3)
self.level4_1 = BasicResidualBlock(256, 256, 0.3)
self.level4_2 = BasicResidualBlock(256, 256, 0.3)
self.level4_3 = BasicResidualBlock(256, 256, 0.3)
self.p1 = PSPDec(512, 128, 40)
self.p2 = PSPDec(512, 128, 32)
self.p3 = PSPDec(512, 128, 24)
self.p4 = PSPDec(512, 128, 18)
self.br_4 = BR(512)
self.classifier = nn.Sequential(nn.Conv2d((512 + (128 * 4)), 128, 1, padding=0, bias=False), nn.BatchNorm2d(128, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(128, classes, 3, padding=1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True), nn.Conv2d(classes, classes, 1, bias=False), nn.BatchNorm2d(classes, momentum=0.95, eps=0.001), nn.ReLU(True))
self.upsample_1 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.upsample_3 = nn.Upsample(scale_factor=2, mode='bilinear')
def forward(self, input1):
output0 = self.level1(input1)
output1_0 = self.level2(output0)
output1 = self.level2_0(output1_0)
output1 = self.level2_1(output1)
output1 = self.br_2(torch.cat([output1_0, output1], 1))
output2_0 = self.level3_0(output1)
output2 = self.level3_1(output2_0)
output2 = self.level3_2(output2)
output3 = self.level4_1(output2)
output3 = self.level4_2(output3)
output3 = self.level4_3(output3)
output3 = self.br_4(torch.cat([output2_0, output3], 1))
output3 = self.classifier(torch.cat([output3, self.p1(output3), self.p2(output3), self.p3(output3), self.p4(output3)], 1))
output3 = self.upsample_3(output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.class_1(torch.cat([combine_up_23, self.p10(combine_up_23), self.p20(combine_up_23), self.p30(combine_up_23), self.p40(combine_up_23)], 1))
output23_hook = self.upsample_2(output23_hook)
combine_up = torch.cat([output23_hook, output0], 1)
output0_hook = self.class_0(torch.cat([combine_up, self.p01(combine_up), self.p02(combine_up), self.p03(combine_up), self.p04(combine_up)], 1))
classifier = self.upsample_1(output0_hook)
return classifier
|
class ResNetD1_YNet(nn.Module):
'\n Jointly learning the segmentation and classification with RCB as encoding blocks\n '
def __init__(self, classes, diagClasses, segNetFile=None):
super().__init__()
self.level4_0 = DownSamplerA(512, 128)
self.level4_1 = BasicResidualBlock(128, 128, 0.3)
self.level4_2 = BasicResidualBlock(128, 128, 0.3)
self.br_con_4 = BR(256)
self.level5_0 = DownSamplerA(256, 64)
self.level5_1 = BasicResidualBlock(64, 64, 0.3)
self.level5_2 = BasicResidualBlock(64, 64, 0.3)
self.br_con_5 = BR(128)
self.global_Avg = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Linear(128, 64)
self.fc2 = nn.Linear(64, diagClasses)
self.segNet = ResNetD1(classes)
if (segNetFile is not None):
print('Loading segmentation pre-trained model')
self.segNet.load_state_dict(torch.load(segNetFile))
self.modules = []
for (i, m) in enumerate(self.segNet.children()):
self.modules.append(m)
def forward(self, input1):
output0 = self.modules[0](input1)
output1_0 = self.modules[6](output0)
output1 = self.modules[7](output1_0)
output1 = self.modules[8](output1)
output1 = self.modules[14](torch.cat([output1_0, output1], 1))
output2_0 = self.modules[15](output1)
output2 = self.modules[16](output2_0)
output2 = self.modules[17](output2)
output3 = self.modules[18](output2)
output3 = self.modules[19](output3)
output3 = self.modules[20](output3)
output3_hook = self.modules[25](torch.cat([output2_0, output3], 1))
output3 = self.modules[26](torch.cat([output3_hook, self.modules[21](output3_hook), self.modules[22](output3_hook), self.modules[23](output3_hook), self.modules[24](output3_hook)], 1))
output3 = self.modules[29](output3)
combine_up_23 = torch.cat([output3, output1], 1)
output23_hook = self.modules[13](torch.cat([combine_up_23, self.modules[9](combine_up_23), self.modules[10](combine_up_23), self.modules[11](combine_up_23), self.modules[12](combine_up_23)], 1))
output23_hook = self.modules[28](output23_hook)
combine_up = torch.cat([output0, output23_hook], 1)
output0_hook = self.modules[5](torch.cat([combine_up, self.modules[1](combine_up), self.modules[2](combine_up), self.modules[3](combine_up), self.modules[4](combine_up)], 1))
classifier = self.modules[27](output0_hook)
l5_0 = self.level4_0(output3_hook)
l5_1 = self.level4_1(l5_0)
l5_2 = self.level4_2(l5_1)
l5_con = self.br_con_4(torch.cat([l5_0, l5_2], 1))
l6_0 = self.level5_0(l5_con)
l6_1 = self.level5_1(l6_0)
l6_2 = self.level5_2(l6_1)
l6_con = self.br_con_5(torch.cat([l6_0, l6_2], 1))
glbAvg = self.global_Avg(l6_con)
flatten = glbAvg.view(glbAvg.size(0), (- 1))
fc1 = self.fc1(flatten)
diagClass = self.fc2(fc1)
return (classifier, diagClass)
|
def make_dot(var, params=None):
' Produces Graphviz representation of PyTorch autograd graph\n Blue nodes are the Variables that require grad, orange are Tensors\n saved for backward in torch.autograd.Function\n Args:\n var: output Variable\n params: dict of (name, Variable) to add names to node that\n require grad (TODO: make optional)\n '
if (params is not None):
assert isinstance(params.values()[0], Variable)
param_map = {id(v): k for (k, v) in params.items()}
node_attr = dict(style='filled', shape='box', align='left', fontsize='12', ranksep='0.1', height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size='12,12'))
seen = set()
def size_to_str(size):
return (('(' + ', '.join([('%d' % v) for v in size])) + ')')
def add_nodes(var):
if (var not in seen):
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
name = (param_map[id(u)] if (params is not None) else '')
node_name = ('%s\n %s' % (name, size_to_str(u.size())))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if (u[0] is not None):
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
add_nodes(var.grad_fn)
return dot
|
def val(args, val_loader, model, criterion, criterion1):
model.eval()
iouEvalVal = iouEval(args.classes)
iouDiagEvalVal = iouEval(args.diagClasses)
epoch_loss = []
class_loss = []
total_batches = len(val_loader)
for (i, (input, target, target2)) in enumerate(val_loader):
start_time = time.time()
if (args.onGPU == True):
input = input.cuda()
target = target.cuda()
target2 = target2.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
target2_var = torch.autograd.Variable(target2)
(output, output1) = model(input_var)
loss = criterion(output, target_var)
loss1 = criterion1(output1, target2_var)
epoch_loss.append(loss.data[0])
class_loss.append(loss1.data[0])
time_taken = (time.time() - start_time)
iouEvalVal.addBatch(output.max(1)[1].data, target_var.data)
iouDiagEvalVal.addBatch(output1.max(1)[1].data, target2_var.data)
print(('[%d/%d] loss: %.3f time: %.2f' % (i, total_batches, loss.data[0], time_taken)))
average_epoch_loss_val = (sum(epoch_loss) / len(epoch_loss))
average_epoch_class_loss = (sum(class_loss) / len(class_loss))
(overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalVal.getMetric()
(overall_acc1, per_class_acc1, per_class_iu1, mIOU1) = iouDiagEvalVal.getMetric()
return (average_epoch_loss_val, overall_acc, per_class_acc, per_class_iu, mIOU, average_epoch_class_loss, overall_acc1, per_class_acc1, per_class_iu1, mIOU1)
|
def train(args, train_loader, model, criterion, criterion1, optimizer, epoch):
model.train()
iouEvalTrain = iouEval(args.classes)
iouDiagEvalTrain = iouEval(args.diagClasses)
epoch_loss = []
class_loss = []
total_batches = len(train_loader)
for (i, (input, target, target2)) in enumerate(train_loader):
start_time = time.time()
if (args.onGPU == True):
input = input.cuda()
target = target.cuda()
target2 = target2.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
target2_var = torch.autograd.Variable(target2)
(output, output1) = model(input_var)
optimizer.zero_grad()
loss = criterion(output, target_var)
loss1 = criterion1(output1, target2_var)
optimizer.zero_grad()
loss1.backward(retain_graph=True)
loss.backward()
optimizer.step()
epoch_loss.append(loss.data[0])
class_loss.append(loss1.data[0])
time_taken = (time.time() - start_time)
iouEvalTrain.addBatch(output.max(1)[1].data, target_var.data)
iouDiagEvalTrain.addBatch(output1.max(1)[1].data, target2_var.data)
print(('[%d/%d] loss: %.3f time:%.2f' % (i, total_batches, loss.data[0], time_taken)))
average_epoch_loss_train = (sum(epoch_loss) / len(epoch_loss))
average_epoch_class_loss = (sum(class_loss) / len(class_loss))
(overall_acc, per_class_acc, per_class_iu, mIOU) = iouEvalTrain.getMetric()
(overall_acc1, per_class_acc1, per_class_iu1, mIOU1) = iouDiagEvalTrain.getMetric()
return (average_epoch_loss_train, overall_acc, per_class_acc, per_class_iu, mIOU, average_epoch_class_loss, overall_acc1, per_class_acc1, per_class_iu1, mIOU1)
|
def save_checkpoint(state, filenameCheckpoint='checkpoint.pth.tar'):
torch.save(state, filenameCheckpoint)
|
def trainValidateSegmentation(args):
if (not os.path.isfile(args.cached_data_file)):
dataLoader = ld.LoadData(args.data_dir, args.classes, args.diagClasses, args.cached_data_file)
if (dataLoader == None):
print('Error while caching the data. Please check')
exit((- 1))
data = dataLoader.processData()
else:
data = pickle.load(open(args.cached_data_file, 'rb'))
if (args.modelType == 'C1'):
model = net.ResNetC1_YNet(args.classes, args.diagClasses, args.pretrainedSeg)
elif (args.modelType == 'D1'):
model = net.ResNetD1_YNet(args.classes, args.diagClasses, args.pretrainedSeg)
else:
print('Please select the correct model. Exiting!!')
exit((- 1))
if (args.onGPU == True):
model = model.cuda()
if (not os.path.exists(args.savedir)):
os.mkdir(args.savedir)
if (args.visualizeNet == True):
x = Variable(torch.randn(1, 3, args.inWidth, args.inHeight))
if (args.onGPU == True):
x = x.cuda()
(y, y1) = model.forward(x)
g = viz.make_dot(y)
g1 = viz.make_dot(y1)
g.render((args.savedir + '/model_seg.png'), view=False)
g1.render((args.savedir + '/model_class.png'), view=False)
n_param = sum([np.prod(param.size()) for param in model.parameters()])
print(('Network parameters: ' + str(n_param)))
print('Weights to handle class-imbalance')
weight = torch.from_numpy(data['classWeights'])
print(weight)
criteria = CrossEntropyLoss2d(weight)
if (args.onGPU == True):
weight = weight.cuda()
criteria1 = torch.nn.CrossEntropyLoss()
if (args.onGPU == True):
criteria = criteria.cuda()
criteria1 = criteria1.cuda()
trainDataset = myTransforms.Compose([myTransforms.RandomCropResize(20), myTransforms.RandomHorizontalFlip(), myTransforms.ToTensor(args.scaleIn)])
trainDataset3 = myTransforms.Compose([myTransforms.Zoom(512, 512), myTransforms.RandomCropResize(20), myTransforms.RandomHorizontalFlip(), myTransforms.ToTensor(args.scaleIn)])
valDataset = myTransforms.Compose([myTransforms.ToTensor(args.scaleIn)])
trainLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], data['trainDiag'], transform=trainDataset), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
trainLoader3 = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['trainIm'], data['trainAnnot'], data['trainDiag'], transform=trainDataset3), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
valLoader = torch.utils.data.DataLoader(myDataLoader.MyDataset(data['valIm'], data['valAnnot'], data['valDiag'], transform=valDataset), batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.0005)
if (args.onGPU == True):
cudnn.benchmark = True
start_epoch = 0
if args.resume:
if os.path.isfile(args.resumeLoc):
print("=> loading checkpoint '{}'".format(args.resumeLoc))
checkpoint = torch.load(args.resumeLoc)
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
logFileLoc = (args.savedir + args.logFile)
if os.path.isfile(logFileLoc):
logger = open(logFileLoc, 'a')
else:
logger = open(logFileLoc, 'w')
logger.write(('Parameters: %s' % str(n_param)))
logger.write(('\n%s\t%s\t%s\t%s\t%s\t' % ('Epoch', 'Loss(Tr)', 'Loss(val)', 'mIOU (tr)', 'mIOU (val')))
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_loss, gamma=0.1)
for epoch in range(start_epoch, args.max_epochs):
scheduler.step(epoch)
lr = 0
for param_group in optimizer.param_groups:
lr = param_group['lr']
train(args, trainLoader3, model, criteria, criteria1, optimizer, epoch)
(lossTr, overall_acc_tr, per_class_acc_tr, per_class_iu_tr, mIOU_tr, lossTr1, overall_acc_tr1, per_class_acc_tr1, per_class_iu_tr1, mIOU_tr1) = train(args, trainLoader, model, criteria, criteria1, optimizer, epoch)
(lossVal, overall_acc_val, per_class_acc_val, per_class_iu_val, mIOU_val, lossVal1, overall_acc_val1, per_class_acc_val1, per_class_iu_val1, mIOU_val1) = val(args, valLoader, model, criteria, criteria1)
save_checkpoint({'epoch': (epoch + 1), 'arch': str(model), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'lossTr': lossTr, 'lossVal': lossVal, 'iouTr': mIOU_tr, 'iouVal': mIOU_val}, (args.savedir + '/checkpoint.pth.tar'))
model_file_name = (((args.savedir + '/model_') + str((epoch + 1))) + '.pth')
torch.save(model.state_dict(), model_file_name)
with open((((args.savedir + 'acc_') + str(epoch)) + '.txt'), 'w') as log:
log.write(('\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f' % (epoch, overall_acc_tr, overall_acc_val, mIOU_tr, mIOU_val)))
log.write('\n')
log.write(('Per Class Training Acc: ' + str(per_class_acc_tr)))
log.write('\n')
log.write(('Per Class Validation Acc: ' + str(per_class_acc_val)))
log.write('\n')
log.write(('Per Class Training mIOU: ' + str(per_class_iu_tr)))
log.write('\n')
log.write(('Per Class Validation mIOU: ' + str(per_class_iu_val)))
log.write('Classification Results')
log.write(('\nEpoch: %d\t Overall Acc (Tr): %.4f\t Overall Acc (Val): %.4f\t mIOU (Tr): %.4f\t mIOU (Val): %.4f' % (epoch, overall_acc_tr1, overall_acc_val1, mIOU_tr1, mIOU_val1)))
log.write('\n')
log.write(('Per Class Training Acc: ' + str(per_class_acc_tr1)))
log.write('\n')
log.write(('Per Class Validation Acc: ' + str(per_class_acc_val1)))
log.write('\n')
log.write(('Per Class Training mIOU: ' + str(per_class_iu_tr1)))
log.write('\n')
log.write(('Per Class Validation mIOU: ' + str(per_class_iu_val1)))
logger.write(('\n%d\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f\t\t%.4f' % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val, lr)))
logger.flush()
print((('Epoch : ' + str(epoch)) + ' Details'))
print(('\nEpoch No.: %d\tTrain Loss = %.4f\tVal Loss = %.4f\t mIOU(tr) = %.4f\t mIOU(val) = %.4f' % (epoch, lossTr, lossVal, mIOU_tr, mIOU_val)))
logger.close()
|
class Data():
def __init__(self, args):
kwargs = {}
if (not args.cpu):
kwargs['collate_fn'] = default_collate
kwargs['pin_memory'] = True
else:
kwargs['collate_fn'] = default_collate
kwargs['pin_memory'] = False
self.loader_train = None
if (not args.test_only):
module_train = import_module(('data.' + args.data_train.lower()))
trainset = getattr(module_train, args.data_train)(args)
self.loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs)
if (args.data_test in ['Set5', 'Set14', 'B100', 'Urban100']):
if (not args.benchmark_noise):
module_test = import_module('data.benchmark')
testset = getattr(module_test, 'Benchmark')(args, train=False)
else:
module_test = import_module('data.benchmark_noise')
testset = getattr(module_test, 'BenchmarkNoise')(args, train=False)
else:
module_test = import_module(('data.' + args.data_test.lower()))
testset = getattr(module_test, args.data_test)(args, train=False)
self.loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, **kwargs)
|
class Benchmark(srdata.SRData):
def __init__(self, args, train=True):
super(Benchmark, self).__init__(args, train, benchmark=True)
def _scan(self):
list_hr = []
list_lr = [[] for _ in self.scale]
for entry in os.scandir(self.dir_hr):
filename = os.path.splitext(entry.name)[0]
list_hr.append(os.path.join(self.dir_hr, (filename + self.ext)))
for (si, s) in enumerate(self.scale):
list_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}x{}{}'.format(s, filename, s, self.ext)))
list_hr.sort()
for l in list_lr:
l.sort()
return (list_hr, list_lr)
def _set_filesystem(self, dir_data):
self.apath = os.path.join(dir_data, 'benchmark', self.args.data_test)
self.dir_hr = os.path.join(self.apath, 'HR')
self.dir_lr = os.path.join(self.apath, 'LR_bicubic')
self.ext = '.png'
|
class Demo(data.Dataset):
def __init__(self, args, train=False):
self.args = args
self.name = 'Demo'
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.benchmark = False
self.filelist = []
for f in os.listdir(args.dir_demo):
if ((f.find('.png') >= 0) or (f.find('.jp') >= 0)):
self.filelist.append(os.path.join(args.dir_demo, f))
self.filelist.sort()
def __getitem__(self, idx):
filename = os.path.split(self.filelist[idx])[(- 1)]
(filename, _) = os.path.splitext(filename)
lr = misc.imread(self.filelist[idx])
lr = common.set_channel([lr], self.args.n_colors)[0]
return (common.np2Tensor([lr], self.args.rgb_range)[0], (- 1), filename)
def __len__(self):
return len(self.filelist)
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
|
class DIV2K(srdata.SRData):
def __init__(self, args, train=True):
super(DIV2K, self).__init__(args, train)
self.repeat = (args.test_every // (args.n_train // args.batch_size))
def _scan(self):
list_hr = []
list_lr = [[] for _ in self.scale]
if self.train:
idx_begin = 0
idx_end = self.args.n_train
else:
idx_begin = self.args.n_train
idx_end = (self.args.offset_val + self.args.n_val)
for i in range((idx_begin + 1), (idx_end + 1)):
filename = '{:0>4}'.format(i)
list_hr.append(os.path.join(self.dir_hr, (filename + self.ext)))
for (si, s) in enumerate(self.scale):
list_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}x{}{}'.format(s, filename, s, self.ext)))
return (list_hr, list_lr)
def _set_filesystem(self, dir_data):
self.apath = (dir_data + '/DIV2K')
self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR')
self.dir_lr = os.path.join(self.apath, 'DIV2K_train_LR_bicubic')
self.ext = '.png'
def _name_hrbin(self):
return os.path.join(self.apath, 'bin', '{}_bin_HR.npy'.format(self.split))
def _name_lrbin(self, scale):
return os.path.join(self.apath, 'bin', '{}_bin_LR_X{}.npy'.format(self.split, scale))
def __len__(self):
if self.train:
return (len(self.images_hr) * self.repeat)
else:
return len(self.images_hr)
def _get_index(self, idx):
if self.train:
return (idx % len(self.images_hr))
else:
return idx
|
class MyImage(data.Dataset):
def __init__(self, args, train=False):
self.args = args
self.train = False
self.name = 'MyImage'
self.scale = args.scale
self.idx_scale = 0
apath = ((((args.testpath + '/') + args.testset) + '/x') + str(args.scale[0]))
self.filelist = []
self.imnamelist = []
if (not train):
for f in os.listdir(apath):
try:
filename = os.path.join(apath, f)
misc.imread(filename)
self.filelist.append(filename)
self.imnamelist.append(f)
except:
pass
def __getitem__(self, idx):
filename = os.path.split(self.filelist[idx])[(- 1)]
(filename, _) = os.path.splitext(filename)
lr = misc.imread(self.filelist[idx])
lr = common.set_channel([lr], self.args.n_colors)[0]
return (common.np2Tensor([lr], self.args.rgb_range)[0], (- 1), filename)
def __len__(self):
return len(self.filelist)
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
|
def _ms_loop(dataset, index_queue, data_queue, done_event, collate_fn, scale, seed, init_fn, worker_id):
try:
collate._use_shared_memory = True
signal_handling._set_worker_signal_handlers()
torch.set_num_threads(1)
random.seed(seed)
torch.manual_seed(seed)
data_queue.cancel_join_thread()
if (init_fn is not None):
init_fn(worker_id)
watchdog = ManagerWatchdog()
while watchdog.is_alive():
try:
r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL)
except queue.Empty:
continue
if (r is None):
assert done_event.is_set()
return
elif done_event.is_set():
continue
(idx, batch_indices) = r
try:
idx_scale = 0
if ((len(scale) > 1) and dataset.train):
idx_scale = random.randrange(0, len(scale))
dataset.set_scale(idx_scale)
samples = collate_fn([dataset[i] for i in batch_indices])
samples.append(idx_scale)
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
del samples
except KeyboardInterrupt:
pass
|
class _MSDataLoaderIter(_DataLoaderIter):
def __init__(self, loader):
self.dataset = loader.dataset
self.scale = loader.scale
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = (loader.pin_memory and torch.cuda.is_available())
self.timeout = loader.timeout
self.sample_iter = iter(self.batch_sampler)
base_seed = torch.LongTensor(1).random_().item()
if (self.num_workers > 0):
self.worker_init_fn = loader.worker_init_fn
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.Queue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.done_event = multiprocessing.Event()
base_seed = torch.LongTensor(1).random_()[0]
self.index_queues = []
self.workers = []
for i in range(self.num_workers):
index_queue = multiprocessing.Queue()
index_queue.cancel_join_thread()
w = multiprocessing.Process(target=_ms_loop, args=(self.dataset, index_queue, self.worker_result_queue, self.done_event, self.collate_fn, self.scale, (base_seed + i), self.worker_init_fn, i))
w.daemon = True
w.start()
self.index_queues.append(index_queue)
self.workers.append(w)
if self.pin_memory:
self.data_queue = queue.Queue()
pin_memory_thread = threading.Thread(target=_utils.pin_memory._pin_memory_loop, args=(self.worker_result_queue, self.data_queue, torch.cuda.current_device(), self.done_event))
pin_memory_thread.daemon = True
pin_memory_thread.start()
self.pin_memory_thread = pin_memory_thread
else:
self.data_queue = self.worker_result_queue
_utils.signal_handling._set_worker_pids(id(self), tuple((w.pid for w in self.workers)))
_utils.signal_handling._set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range((2 * self.num_workers)):
self._put_indices()
|
class MSDataLoader(DataLoader):
def __init__(self, cfg, *args, **kwargs):
super(MSDataLoader, self).__init__(*args, **kwargs, num_workers=cfg.n_threads)
self.scale = cfg.scale
def __iter__(self):
return _MSDataLoaderIter(self)
|
class Adversarial(nn.Module):
def __init__(self, args, gan_type):
super(Adversarial, self).__init__()
self.gan_type = gan_type
self.gan_k = args.gan_k
self.discriminator = discriminator.Discriminator(args, gan_type)
if (gan_type != 'WGAN_GP'):
self.optimizer = utility.make_optimizer(args, self.discriminator)
else:
self.optimizer = optim.Adam(self.discriminator.parameters(), betas=(0, 0.9), eps=1e-08, lr=1e-05)
self.scheduler = utility.make_scheduler(args, self.optimizer)
def forward(self, fake, real):
fake_detach = fake.detach()
self.loss = 0
for _ in range(self.gan_k):
self.optimizer.zero_grad()
d_fake = self.discriminator(fake_detach)
d_real = self.discriminator(real)
if (self.gan_type == 'GAN'):
label_fake = torch.zeros_like(d_fake)
label_real = torch.ones_like(d_real)
loss_d = (F.binary_cross_entropy_with_logits(d_fake, label_fake) + F.binary_cross_entropy_with_logits(d_real, label_real))
elif (self.gan_type.find('WGAN') >= 0):
loss_d = (d_fake - d_real).mean()
if (self.gan_type.find('GP') >= 0):
epsilon = torch.rand_like(fake).view((- 1), 1, 1, 1)
hat = (fake_detach.mul((1 - epsilon)) + real.mul(epsilon))
hat.requires_grad = True
d_hat = self.discriminator(hat)
gradients = torch.autograd.grad(outputs=d_hat.sum(), inputs=hat, retain_graph=True, create_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), (- 1))
gradient_norm = gradients.norm(2, dim=1)
gradient_penalty = (10 * gradient_norm.sub(1).pow(2).mean())
loss_d += gradient_penalty
self.loss += loss_d.item()
loss_d.backward()
self.optimizer.step()
if (self.gan_type == 'WGAN'):
for p in self.discriminator.parameters():
p.data.clamp_((- 1), 1)
self.loss /= self.gan_k
d_fake_for_g = self.discriminator(fake)
if (self.gan_type == 'GAN'):
loss_g = F.binary_cross_entropy_with_logits(d_fake_for_g, label_real)
elif (self.gan_type.find('WGAN') >= 0):
loss_g = (- d_fake_for_g.mean())
return loss_g
def state_dict(self, *args, **kwargs):
state_discriminator = self.discriminator.state_dict(*args, **kwargs)
state_optimizer = self.optimizer.state_dict()
return dict(**state_discriminator, **state_optimizer)
|
class Discriminator(nn.Module):
def __init__(self, args, gan_type='GAN'):
super(Discriminator, self).__init__()
in_channels = 3
out_channels = 64
depth = 7
bn = True
act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
m_features = [common.BasicBlock(args.n_colors, out_channels, 3, bn=bn, act=act)]
for i in range(depth):
in_channels = out_channels
if ((i % 2) == 1):
stride = 1
out_channels *= 2
else:
stride = 2
m_features.append(common.BasicBlock(in_channels, out_channels, 3, stride=stride, bn=bn, act=act))
self.features = nn.Sequential(*m_features)
patch_size = (args.patch_size // (2 ** ((depth + 1) // 2)))
m_classifier = [nn.Linear((out_channels * (patch_size ** 2)), 1024), act, nn.Linear(1024, 1)]
self.classifier = nn.Sequential(*m_classifier)
def forward(self, x):
features = self.features(x)
output = self.classifier(features.view(features.size(0), (- 1)))
return output
|
class VGG(nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if (conv_index == '22'):
self.vgg = nn.Sequential(*modules[:8])
elif (conv_index == '54'):
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = ((0.229 * rgb_range), (0.224 * rgb_range), (0.225 * rgb_range))
self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std)
self.vgg.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.mse_loss(vgg_sr, vgg_hr)
return loss
|
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias)
|
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=(- 1)):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = ((sign * rgb_range) * torch.Tensor(rgb_mean))
self.bias.data.div_(std)
self.requires_grad = False
|
class BasicBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=False, bn=True, act=nn.ReLU(True)):
m = [nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), stride=stride, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if (act is not None):
m.append(act)
super(BasicBlock, self).__init__(*m)
|
class ResBlock(nn.Module):
def __init__(self, conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if (i == 0):
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
|
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if ((scale & (scale - 1)) == 0):
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feat, (4 * n_feat), 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if act:
m.append(act())
elif (scale == 3):
m.append(conv(n_feat, (9 * n_feat), 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if act:
m.append(act())
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
|
def make_model(args, parent=False):
return DRLN(args)
|
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.c1 = ops.BasicBlock(channel, (channel // reduction), 3, 1, 3, 3)
self.c2 = ops.BasicBlock(channel, (channel // reduction), 3, 1, 5, 5)
self.c3 = ops.BasicBlock(channel, (channel // reduction), 3, 1, 7, 7)
self.c4 = ops.BasicBlockSig(((channel // reduction) * 3), channel, 3, 1, 1)
def forward(self, x):
y = self.avg_pool(x)
c1 = self.c1(y)
c2 = self.c2(y)
c3 = self.c3(y)
c_out = torch.cat([c1, c2, c3], dim=1)
y = self.c4(c_out)
return (x * y)
|
class Block(nn.Module):
def __init__(self, in_channels, out_channels, group=1):
super(Block, self).__init__()
self.r1 = ops.ResidualBlock(in_channels, out_channels)
self.r2 = ops.ResidualBlock((in_channels * 2), (out_channels * 2))
self.r3 = ops.ResidualBlock((in_channels * 4), (out_channels * 4))
self.g = ops.BasicBlock((in_channels * 8), out_channels, 1, 1, 0)
self.ca = CALayer(in_channels)
def forward(self, x):
c0 = x
r1 = self.r1(c0)
c1 = torch.cat([c0, r1], dim=1)
r2 = self.r2(c1)
c2 = torch.cat([c1, r2], dim=1)
r3 = self.r3(c2)
c3 = torch.cat([c2, r3], dim=1)
g = self.g(c3)
out = self.ca(g)
return out
|
class DRLN(nn.Module):
def __init__(self, args):
super(DRLN, self).__init__()
self.scale = args.scale[0]
chs = 64
self.sub_mean = ops.MeanShift((0.4488, 0.4371, 0.404), sub=True)
self.add_mean = ops.MeanShift((0.4488, 0.4371, 0.404), sub=False)
self.head = nn.Conv2d(3, chs, 3, 1, 1)
self.b1 = Block(chs, chs)
self.b2 = Block(chs, chs)
self.b3 = Block(chs, chs)
self.b4 = Block(chs, chs)
self.b5 = Block(chs, chs)
self.b6 = Block(chs, chs)
self.b7 = Block(chs, chs)
self.b8 = Block(chs, chs)
self.b9 = Block(chs, chs)
self.b10 = Block(chs, chs)
self.b11 = Block(chs, chs)
self.b12 = Block(chs, chs)
self.b13 = Block(chs, chs)
self.b14 = Block(chs, chs)
self.b15 = Block(chs, chs)
self.b16 = Block(chs, chs)
self.b17 = Block(chs, chs)
self.b18 = Block(chs, chs)
self.b19 = Block(chs, chs)
self.b20 = Block(chs, chs)
self.c1 = ops.BasicBlock((chs * 2), chs, 3, 1, 1)
self.c2 = ops.BasicBlock((chs * 3), chs, 3, 1, 1)
self.c3 = ops.BasicBlock((chs * 4), chs, 3, 1, 1)
self.c4 = ops.BasicBlock((chs * 2), chs, 3, 1, 1)
self.c5 = ops.BasicBlock((chs * 3), chs, 3, 1, 1)
self.c6 = ops.BasicBlock((chs * 4), chs, 3, 1, 1)
self.c7 = ops.BasicBlock((chs * 2), chs, 3, 1, 1)
self.c8 = ops.BasicBlock((chs * 3), chs, 3, 1, 1)
self.c9 = ops.BasicBlock((chs * 4), chs, 3, 1, 1)
self.c10 = ops.BasicBlock((chs * 2), chs, 3, 1, 1)
self.c11 = ops.BasicBlock((chs * 3), chs, 3, 1, 1)
self.c12 = ops.BasicBlock((chs * 4), chs, 3, 1, 1)
self.c13 = ops.BasicBlock((chs * 2), chs, 3, 1, 1)
self.c14 = ops.BasicBlock((chs * 3), chs, 3, 1, 1)
self.c15 = ops.BasicBlock((chs * 4), chs, 3, 1, 1)
self.c16 = ops.BasicBlock((chs * 5), chs, 3, 1, 1)
self.c17 = ops.BasicBlock((chs * 2), chs, 3, 1, 1)
self.c18 = ops.BasicBlock((chs * 3), chs, 3, 1, 1)
self.c19 = ops.BasicBlock((chs * 4), chs, 3, 1, 1)
self.c20 = ops.BasicBlock((chs * 5), chs, 3, 1, 1)
self.upsample = ops.UpsampleBlock(chs, self.scale, multi_scale=False)
self.tail = nn.Conv2d(chs, 3, 3, 1, 1)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
a1 = (o3 + c0)
b4 = self.b4(a1)
c4 = torch.cat([o3, b4], dim=1)
o4 = self.c4(c4)
b5 = self.b5(a1)
c5 = torch.cat([c4, b5], dim=1)
o5 = self.c5(c5)
b6 = self.b6(o5)
c6 = torch.cat([c5, b6], dim=1)
o6 = self.c6(c6)
a2 = (o6 + a1)
b7 = self.b7(a2)
c7 = torch.cat([o6, b7], dim=1)
o7 = self.c7(c7)
b8 = self.b8(o7)
c8 = torch.cat([c7, b8], dim=1)
o8 = self.c8(c8)
b9 = self.b9(o8)
c9 = torch.cat([c8, b9], dim=1)
o9 = self.c9(c9)
a3 = (o9 + a2)
b10 = self.b10(a3)
c10 = torch.cat([o9, b10], dim=1)
o10 = self.c10(c10)
b11 = self.b11(o10)
c11 = torch.cat([c10, b11], dim=1)
o11 = self.c11(c11)
b12 = self.b12(o11)
c12 = torch.cat([c11, b12], dim=1)
o12 = self.c12(c12)
a4 = (o12 + a3)
b13 = self.b13(a4)
c13 = torch.cat([o12, b13], dim=1)
o13 = self.c13(c13)
b14 = self.b14(o13)
c14 = torch.cat([c13, b14], dim=1)
o14 = self.c14(c14)
b15 = self.b15(o14)
c15 = torch.cat([c14, b15], dim=1)
o15 = self.c15(c15)
b16 = self.b16(o15)
c16 = torch.cat([c15, b16], dim=1)
o16 = self.c16(c16)
a5 = (o16 + a4)
b17 = self.b17(a5)
c17 = torch.cat([o16, b17], dim=1)
o17 = self.c17(c17)
b18 = self.b18(o17)
c18 = torch.cat([c17, b18], dim=1)
o18 = self.c18(c18)
b19 = self.b19(o18)
c19 = torch.cat([c18, b19], dim=1)
o19 = self.c19(c19)
b20 = self.b20(o19)
c20 = torch.cat([c19, b20], dim=1)
o20 = self.c20(c20)
a6 = (o20 + a5)
b_out = (a6 + x)
out = self.upsample(b_out, scale=self.scale)
out = self.tail(out)
f_out = self.add_mean(out)
return f_out
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if ((name.find('tail') >= 0) or (name.find('upsample') >= 0)):
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
elif strict:
if (name.find('tail') == (- 1)):
raise KeyError('unexpected key "{}" in state_dict'.format(name))
if strict:
missing = (set(own_state.keys()) - set(state_dict.keys()))
if (len(missing) > 0):
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
|
def init_weights(modules):
pass
|
class MeanShift(nn.Module):
def __init__(self, mean_rgb, sub):
super(MeanShift, self).__init__()
sign = ((- 1) if sub else 1)
r = (mean_rgb[0] * sign)
g = (mean_rgb[1] * sign)
b = (mean_rgb[2] * sign)
self.shifter = nn.Conv2d(3, 3, 1, 1, 0)
self.shifter.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.shifter.bias.data = torch.Tensor([r, g, b])
for params in self.shifter.parameters():
params.requires_grad = False
def forward(self, x):
x = self.shifter(x)
return x
|
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1, dilation=1):
super(BasicBlock, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad, dilation), nn.ReLU(inplace=True))
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
|
class GBasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1, dilation=1):
super(GBasicBlock, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad, dilation, groups=4), nn.ReLU(inplace=True))
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
|
class BasicBlockSig(nn.Module):
def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1):
super(BasicBlockSig, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad), nn.Sigmoid())
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
|
class GBasicBlockSig(nn.Module):
def __init__(self, in_channels, out_channels, ksize=3, stride=1, pad=1):
super(GBasicBlockSig, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad, groups=4), nn.Sigmoid())
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
|
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ResidualBlock, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, 3, 1, 1))
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
out = F.relu((out + x))
return out
|
class GResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(GResidualBlock, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1, groups=4), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, 1, 1, 0))
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
out = F.relu((out + x))
return out
|
class EResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, group=1):
super(EResidualBlock, self).__init__()
self.body = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, 1, 1, 0))
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
out = F.relu((out + x))
return out
|
class ConvertBlock(nn.Module):
def __init__(self, in_channels, out_channels, blocks):
super(ConvertBlock, self).__init__()
self.body = nn.Sequential(nn.Conv2d((in_channels * blocks), ((out_channels * blocks) // 2), 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(((out_channels * blocks) // 2), ((out_channels * blocks) // 4), 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(((out_channels * blocks) // 4), out_channels, 3, 1, 1))
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
|
class UpsampleBlock(nn.Module):
def __init__(self, n_channels, scale, multi_scale, group=1):
super(UpsampleBlock, self).__init__()
if multi_scale:
self.up2 = _UpsampleBlock(n_channels, scale=2, group=group)
self.up3 = _UpsampleBlock(n_channels, scale=3, group=group)
self.up4 = _UpsampleBlock(n_channels, scale=4, group=group)
else:
self.up = _UpsampleBlock(n_channels, scale=scale, group=group)
self.multi_scale = multi_scale
def forward(self, x, scale):
if self.multi_scale:
if (scale == 2):
return self.up2(x)
elif (scale == 3):
return self.up3(x)
elif (scale == 4):
return self.up4(x)
else:
return self.up(x)
|
class _UpsampleBlock(nn.Module):
def __init__(self, n_channels, scale, group=1):
super(_UpsampleBlock, self).__init__()
modules = []
if ((scale == 2) or (scale == 4) or (scale == 8)):
for _ in range(int(math.log(scale, 2))):
modules += [nn.Conv2d(n_channels, (4 * n_channels), 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
modules += [nn.PixelShuffle(2)]
elif (scale == 3):
modules += [nn.Conv2d(n_channels, (9 * n_channels), 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
modules += [nn.PixelShuffle(3)]
self.body = nn.Sequential(*modules)
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.