code stringlengths 101 5.91M |
|---|
def ReadFile():
interval = 1000000
truth_list = []
actual_list = []
event_id_list = []
truth_cdf = {}
actual_cdf = {}
x = [[], []]
y = [[], []]
source_file = '/home/myc/workspace/MorphStream-Stock/application/src/main/java/benchmark/datagenerator/apps/SHJ/dataset/stock_dataset_v2.csv'
fp = open(source_file)
event_ts_offset = {}
inputEvents = fp.readlines()
for inputEvent in inputEvents:
textArr = inputEvent.split(',')
event_ts_offset[int(textArr[0])] = int(textArr[1])
file = '/home/myc/workspace/MorphStream-Stock/test'
fp = open(file)
lines = fp.readlines()
fp.close()
for line in lines:
if ('++++++ Completed:' in line):
textArr = line.split(' ')
actual_list.append(int(textArr[2]))
truth_list.append(int(textArr[4]))
event_id_list.append(int(textArr[6]))
start_ts = truth_list[0]
for i in range(0, len(truth_list)):
event_offset = event_ts_offset[event_id_list[i]]
print(event_offset)
truth_index = (int(((truth_list[i] - start_ts) / interval)) + event_offset)
actual_index = (int(((actual_list[i] - start_ts) / interval)) + event_offset)
if (truth_index not in truth_cdf):
truth_cdf[truth_index] = 0
truth_cdf[truth_index] += 1
if (actual_index not in actual_cdf):
actual_cdf[actual_index] = 0
actual_cdf[actual_index] += 1
x[0] = list(truth_cdf.keys())
x[1] = list(actual_cdf.keys())
sum = 0
for key in truth_cdf:
sum += truth_cdf[key]
y[0].append(sum)
sum = 0
for key in actual_cdf:
sum += actual_cdf[key]
y[1].append(sum)
return (x, y) |
class SCConv(nn.Module):
def __init__(self, inplanes, planes, stride, padding, dilation, groups, pooling_r, norm_layer):
super(SCConv, self).__init__()
self.k2 = nn.Sequential(nn.AvgPool2d(kernel_size=pooling_r, stride=pooling_r), nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=padding, dilation=dilation, groups=groups, bias=False), norm_layer(planes))
self.k3 = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=3, stride=1, padding=padding, dilation=dilation, groups=groups, bias=False), norm_layer(planes))
self.k4 = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False), norm_layer(planes))
def forward(self, x):
identity = x
out = torch.sigmoid(torch.add(identity, F.interpolate(self.k2(x), identity.size()[2:])))
out = torch.mul(self.k3(x), out)
out = self.k4(out)
return out |
_tf
class TFDistilBertModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = ((TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification) if is_tf_available() else None)
test_pruning = True
test_torchscript = True
test_resize_embeddings = True
test_head_masking = True
class TFDistilBertModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = DistilBertConfig(vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range)
return (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels)
def create_and_check_distilbert_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFDistilBertModel(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask}
outputs = model(inputs)
sequence_output = outputs[0]
inputs = [input_ids, input_mask]
(sequence_output,) = model(inputs)
result = {'sequence_output': sequence_output.numpy()}
self.parent.assertListEqual(list(result['sequence_output'].shape), [self.batch_size, self.seq_length, self.hidden_size])
def create_and_check_distilbert_for_masked_lm(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFDistilBertForMaskedLM(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask}
(prediction_scores,) = model(inputs)
result = {'prediction_scores': prediction_scores.numpy()}
self.parent.assertListEqual(list(result['prediction_scores'].shape), [self.batch_size, self.seq_length, self.vocab_size])
def create_and_check_distilbert_for_question_answering(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFDistilBertForQuestionAnswering(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask}
(start_logits, end_logits) = model(inputs)
result = {'start_logits': start_logits.numpy(), 'end_logits': end_logits.numpy()}
self.parent.assertListEqual(list(result['start_logits'].shape), [self.batch_size, self.seq_length])
self.parent.assertListEqual(list(result['end_logits'].shape), [self.batch_size, self.seq_length])
def create_and_check_distilbert_for_sequence_classification(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
config.num_labels = self.num_labels
model = TFDistilBertForSequenceClassification(config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask}
(logits,) = model(inputs)
result = {'logits': logits.numpy()}
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.num_labels])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return (config, inputs_dict)
def setUp(self):
self.model_tester = TFDistilBertModelTest.TFDistilBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_distilbert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) |
def get_config():
parser = ArgumentParser()
parser = common_config(parser)
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate')
parser.add_argument('--weight_decay', type=float, default=0, help='Weight decay')
parser.add_argument('--max_steps', type=int, default=10000, help='Number of training steps')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size')
parser.add_argument('--dropout', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--k', type=int, default=4, help='k factor of Wide ResNet')
return parser.parse_args() |
def test_fails_on_dim_mismatch():
with pytest.raises(ValueError):
GridArchive(solution_dim=10, dims=([10] * 2), ranges=([((- 1), 1)] * 3)) |
class UniformMutation(Mutation[FloatSolution]):
def __init__(self, probability: float, perturbation: float=0.5):
super(UniformMutation, self).__init__(probability=probability)
self.perturbation = perturbation
def execute(self, solution: FloatSolution) -> FloatSolution:
Check.that((type(solution) is FloatSolution), 'Solution type invalid')
for i in range(solution.number_of_variables):
rand = random.random()
if (rand <= self.probability):
tmp = ((random.random() - 0.5) * self.perturbation)
tmp += solution.variables[i]
if (tmp < solution.lower_bound[i]):
tmp = solution.lower_bound[i]
elif (tmp > solution.upper_bound[i]):
tmp = solution.upper_bound[i]
solution.variables[i] = tmp
return solution
def get_name(self):
return 'Uniform mutation' |
def get_lean_files(paths: List[Path]) -> List[Path]:
file_paths = []
for p in paths:
for file_name in p.glob('**/*.lean'):
file_paths.append(file_name)
return file_paths |
def Huffman_Encoding(data):
symbol_with_probs = Calculate_Probability(data)
symbols = symbol_with_probs.keys()
probabilities = symbol_with_probs.values()
(print('==symbols: ', symbols) if DEBUG else None)
(print('==probabilities: ', probabilities) if DEBUG else None)
nodes = []
for symbol in symbols:
nodes.append(Node(symbol_with_probs.get(symbol), symbol))
while (len(nodes) > 1):
nodes = sorted(nodes, key=(lambda x: x.prob))
right = nodes[0]
left = nodes[1]
left.code = 0
right.code = 1
newNode = Node((left.prob + right.prob), (left.symbol + right.symbol), left, right)
nodes.remove(left)
nodes.remove(right)
nodes.append(newNode)
huffman_encoding = Calculate_Codes(nodes[0])
(print('symbols with codes', huffman_encoding) if DEBUG else None)
gain = Total_Gain(data, huffman_encoding)
encoded_output = Output_Encoded(data, huffman_encoding)
return (encoded_output, nodes[0], gain) |
def test_audio_dataset_archive(mocker):
data = AudioDataModule()
mocked_archive = mocker.patch(f'{TESTED_MODULE}.data_utils.create_tarfile')
data.archive_dataset('test.tar.gz')
mocked_archive.assert_called_once_with('test.tar.gz', data.data_dir) |
def make_default_index_mapper(special_symbols=SPECIAL_SYMBOLS):
mapper = {}
if special_symbols:
assert (type(special_symbols) == dict), 'Need to provide dict as special symbols mapping.'
for (_symbol, _id) in special_symbols.items():
mapper[_symbol] = _id
return mapper |
def test(args, io):
test_loader = DataLoader(ModelNet40(args, partition='test'), batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device(('cuda' if args.cuda else 'cpu'))
model = DGCNN(args).to(device)
model = nn.DataParallel(model)
model.load_state_dict(torch.load(args.model_path))
model = model.eval()
test_acc = 0.0
count = 0.0
test_true = []
test_pred = []
for (data, label) in test_loader:
(data, label) = (data.to(device), label.to(device).squeeze())
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = ('Test :: test acc: %.6f, test avg acc: %.6f' % (test_acc, avg_per_class_acc))
io.cprint(outstr) |
def _check_matrix_is_sparse(func):
(func)
def wrapper(*args, **kwargs):
if (('accept_sparse' in kwargs) and (not sparse.isspmatrix(args[0]))):
raise TypeError('A dense matrix was passed in, but sparsedata is required.')
result = func(*args, **kwargs)
return result
return wrapper |
def get_spurious_datasets(dataset_dir, img_size=224, interpolation=InterpolationMode.BICUBIC, bs=128, num_workers=1):
transform = get_imageNet_augmentation(type='no_crop', out_size=img_size, interpolation=interpolation)
dataset = SpuriousDataset(dataset_dir, transform)
loader = DataLoader(dataset, batch_size=bs, shuffle=False, num_workers=num_workers)
return loader |
def tps(gold, pred, label):
(tp, fp, fn) = (0, 0, 0)
for (g, p) in zip(gold, pred):
if ((g == label) and (g == p)):
tp += 1
elif ((p == label) and (g != p)):
fp += 1
elif ((g == label) and (g != p)):
fn += 1
return (tp, fp, fn) |
def sample_hull(hull, domain, isDomainFinite):
u = stats.uniform.rvs()
if (hull[5][0] >= u):
if (hull[3][0] == 0):
if isDomainFinite[0]:
thissample = (domain[0] + ((u / hull[5][0]) * (hull[4][0] - domain[0])))
else:
thissample =
else:
thissample = (hull[4][0] + ((1.0 / hull[3][0]) * numpy.log((1.0 - (((hull[3][0] * hull[0]) * (hull[5][0] - u)) / numpy.exp(hull[6][0]))))))
else:
if (len(hull[5]) == 1):
indx = 0
else:
indx = 1
while ((indx < len(hull[5])) and (hull[5][indx] < u)):
indx = (indx + 1)
indx = (indx - 1)
if (numpy.fabs(hull[3][(indx + 1)]) == 0):
if (indx != (len(hull[5]) - 1)):
thissample = (hull[4][indx] + (((u - hull[5][indx]) / (hull[5][(indx + 1)] - hull[5][indx])) * (hull[4][(indx + 1)] - hull[4][indx])))
elif isDomainFinite[1]:
thissample = (hull[4][indx] + (((u - hull[5][indx]) / (1.0 - hull[5][indx])) * (domain[1] - hull[4][indx])))
else:
thissample = 100000
else:
thissample = (hull[4][indx] + ((1.0 / hull[3][(indx + 1)]) * numpy.log((1.0 + (((hull[3][(indx + 1)] * hull[0]) * (u - hull[5][indx])) / numpy.exp(hull[6][indx]))))))
return thissample |
def require_torch_non_multi_gpu(test_case):
if (not is_torch_available()):
return unittest.skip('test requires PyTorch')(test_case)
import torch
return unittest.skipUnless((torch.cuda.device_count() < 2), 'test requires 0 or 1 GPU')(test_case) |
def log_deferred(op, log_id, every_n=1, first_n=None):
prefix = ':::MLPv0.5.0 [{}]'.format(log_id)
if ((first_n is not None) and (first_n == 1)):
return tf.compat.v1.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)
counter = tf.Variable((tf.zeros(shape=(), dtype=tf.int32) - 1), aggregation=tf.VariableAggregation.MEAN)
increment = tf.compat.v1.assign_add(counter, 1, use_locking=True)
return tf.cond(pred=tf.equal(tf.math.mod(increment, every_n), 0), true_fn=(lambda : tf.compat.v1.Print(op, [tf.timestamp(), op], message=prefix, first_n=first_n)), false_fn=(lambda : op)) |
class BEV_UNet(nn.Module):
def __init__(self, n_class, n_height, dilation, bilinear, group_conv, input_batch_norm, dropout, circular_padding, dropblock):
super(BEV_UNet, self).__init__()
self.inc = inconv(64, 64, dilation, input_batch_norm, circular_padding)
self.down1 = down(64, 128, dilation, group_conv, circular_padding)
self.down2 = down(256, 256, dilation, group_conv, circular_padding)
self.down3 = down(512, 512, dilation, group_conv, circular_padding)
self.down4 = down(1024, 512, dilation, group_conv, circular_padding)
self.up1 = up(1536, 512, circular_padding, bilinear=bilinear, group_conv=group_conv, use_dropblock=dropblock, drop_p=dropout)
self.up2 = up(1024, 256, circular_padding, bilinear=bilinear, group_conv=group_conv, use_dropblock=dropblock, drop_p=dropout)
self.up3 = up(512, 128, circular_padding, bilinear=bilinear, group_conv=group_conv, use_dropblock=dropblock, drop_p=dropout)
self.up4 = up(192, 128, circular_padding, bilinear=bilinear, group_conv=group_conv, use_dropblock=dropblock, drop_p=dropout)
self.dropout = nn.Dropout(p=(0.0 if dropblock else dropout))
self.outc = outconv(128, n_class)
def forward(self, x, x_ds1, x_ds2, x_ds3):
x1 = self.inc(x)
x2 = self.down1(x1)
x2_cat = torch.cat((x2, self.channel_reduction(x_ds1, x2.shape[1])), dim=1)
x3 = self.down2(x2_cat)
x3_cat = torch.cat((x3, self.channel_reduction(x_ds2, x3.shape[1])), dim=1)
x4 = self.down3(x3_cat)
x4_cat = torch.cat((x4, x_ds3), dim=1)
x5 = self.down4(x4_cat)
x = self.up1(x5, x4_cat)
x = self.up2(x, x3_cat)
x = self.up3(x, x2_cat)
x = self.up4(x, x1)
x = self.outc(self.dropout(x))
return x
def channel_reduction(x, out_channels):
(B, in_channels, H, W) = x.shape
assert (((in_channels % out_channels) == 0) and (in_channels >= out_channels))
x = x.view(B, out_channels, (- 1), H, W)
x = x.sum(dim=2)
return x |
def resnet101(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(torch.load(os.path.join(models_dir, model_name['resnet101'])))
return model |
class TFNoBadWordsLogitsProcessor(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def freeze(module):
if (module is None):
return None
org = []
module.eval()
for p in module.parameters():
org.append(p.requires_grad)
p.requires_grad_(False)
return org |
class LearnedPositionalEmbedding(nn.Embedding):
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
def forward(self, input, incremental_state=None, positions=None):
assert ((positions is None) or (self.padding_idx is None)), 'If positions is pre-computed then padding_idx should not be set.'
if (positions is None):
if (incremental_state is not None):
positions = input.data.new(1, 1).fill_((self.padding_idx + input.size(1)))
else:
positions = utils.make_positions(input.data, self.padding_idx, onnx_trace=self.onnx_trace)
return super().forward(positions)
def max_positions(self):
if (self.padding_idx is not None):
return ((self.num_embeddings - self.padding_idx) - 1)
else:
return self.num_embeddings |
def train(args, run_opts):
arg_string = pprint.pformat(vars(args))
logger.info('Arguments for the experiment\n{0}'.format(arg_string))
shutil.copy('{0}/phones.txt'.format(args.ali_dir), args.dir)
num_jobs = common_lib.get_number_of_jobs(args.ali_dir)
feat_dim = common_lib.get_feat_dim(args.feat_dir)
ivector_dim = common_lib.get_ivector_dim(args.online_ivector_dir)
ivector_id = common_lib.get_ivector_extractor_id(args.online_ivector_dir)
common_lib.execute_command('utils/split_data.sh {0} {1}'.format(args.feat_dir, num_jobs))
shutil.copy('{0}/tree'.format(args.ali_dir), args.dir)
with open('{0}/num_jobs'.format(args.dir), 'w') as f:
f.write('{}'.format(num_jobs))
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
if (args.input_model is None):
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
variables = common_train_lib.parse_generic_config_vars_file(var_file)
else:
variables = common_train_lib.get_input_model_info(args.input_model)
try:
model_left_context = variables['model_left_context']
model_right_context = variables['model_right_context']
except KeyError as e:
raise Exception('KeyError {0}: Variables need to be defined in {1}'.format(str(e), '{0}/configs'.format(args.dir)))
left_context = (args.chunk_left_context + model_left_context)
right_context = (args.chunk_right_context + model_right_context)
left_context_initial = ((args.chunk_left_context_initial + model_left_context) if (args.chunk_left_context_initial >= 0) else (- 1))
right_context_final = ((args.chunk_right_context_final + model_right_context) if (args.chunk_right_context_final >= 0) else (- 1))
if ((args.stage <= (- 5)) and (args.input_model is None)):
logger.info('Initializing a basic network for estimating preconditioning matrix')
common_lib.execute_command('{command} {dir}/log/nnet_init.log nnet3-init --srand=-2 {dir}/configs/init.config {dir}/init.raw'.format(command=run_opts.command, dir=args.dir))
default_egs_dir = '{0}/egs'.format(args.dir)
if ((args.stage <= (- 4)) and (args.egs_dir is None)):
logger.info('Generating egs')
if (args.feat_dir is None):
raise Exception("--feat-dir option is required if you don't supply --egs-dir")
train_lib.acoustic_model.generate_egs(data=args.feat_dir, alidir=args.ali_dir, egs_dir=default_egs_dir, left_context=left_context, right_context=right_context, left_context_initial=left_context_initial, right_context_final=right_context_final, run_opts=run_opts, frames_per_eg_str=args.chunk_width, srand=args.srand, egs_opts=args.egs_opts, cmvn_opts=args.cmvn_opts, online_ivector_dir=args.online_ivector_dir, samples_per_iter=args.samples_per_iter, stage=args.egs_stage)
if (args.egs_dir is None):
egs_dir = default_egs_dir
else:
egs_dir = args.egs_dir
[egs_left_context, egs_right_context, frames_per_eg_str, num_archives] = common_train_lib.verify_egs_dir(egs_dir, feat_dim, ivector_dim, ivector_id, left_context, right_context, left_context_initial, right_context_final)
if (args.chunk_width != frames_per_eg_str):
raise Exception('mismatch between --egs.chunk-width and the frames_per_eg in the egs dir {0} vs {1}'.format(args.chunk_width, frames_per_eg_str))
if (args.num_jobs_final > num_archives):
raise Exception('num_jobs_final cannot exceed the number of archives in the egs directory')
common_train_lib.copy_egs_properties_to_exp_dir(egs_dir, args.dir)
if ((args.stage <= (- 3)) and (args.input_model is None)):
logger.info('Computing the preconditioning matrix for input features')
train_lib.common.compute_preconditioning_matrix(args.dir, egs_dir, num_archives, run_opts, max_lda_jobs=args.max_lda_jobs, rand_prune=args.rand_prune)
if ((args.stage <= (- 2)) and (args.input_model is None)):
logger.info('Computing initial vector for FixedScaleComponent before softmax, using priors^{prior_scale} and rescaling to average 1'.format(prior_scale=args.presoftmax_prior_scale_power))
common_train_lib.compute_presoftmax_prior_scale(args.dir, args.ali_dir, num_jobs, run_opts, presoftmax_prior_scale_power=args.presoftmax_prior_scale_power)
if (args.stage <= (- 1)):
logger.info('Preparing the initial acoustic model.')
train_lib.acoustic_model.prepare_initial_acoustic_model(args.dir, args.ali_dir, run_opts, input_model=args.input_model)
num_archives_to_process = int((args.num_epochs * num_archives))
num_archives_processed = 0
num_iters = int(((num_archives_to_process * 2) / (args.num_jobs_initial + args.num_jobs_final)))
if args.do_final_combination:
models_to_combine = common_train_lib.get_model_combine_iters(num_iters, args.num_epochs, num_archives, args.max_models_combine, args.num_jobs_final)
else:
models_to_combine = None
min_deriv_time = None
max_deriv_time_relative = None
if (args.deriv_truncate_margin is not None):
min_deriv_time = ((- args.deriv_truncate_margin) - model_left_context)
max_deriv_time_relative = (args.deriv_truncate_margin + model_right_context)
logger.info('Training will run for {0} epochs = {1} iterations'.format(args.num_epochs, num_iters))
for iter in range(num_iters):
if ((args.exit_stage is not None) and (iter == args.exit_stage)):
logger.info('Exiting early due to --exit-stage {0}'.format(iter))
return
current_num_jobs = common_train_lib.get_current_num_jobs(iter, num_iters, args.num_jobs_initial, args.num_jobs_step, args.num_jobs_final)
if (args.stage <= iter):
model_file = '{dir}/{iter}.mdl'.format(dir=args.dir, iter=iter)
lrate = common_train_lib.get_learning_rate(iter, current_num_jobs, num_iters, num_archives_processed, num_archives_to_process, args.initial_effective_lrate, args.final_effective_lrate)
shrinkage_value = (1.0 - (args.proportional_shrink * lrate))
if (shrinkage_value <= 0.5):
raise Exception('proportional-shrink={0} is too large, it gives shrink-value={1}'.format(args.proportional_shrink, shrinkage_value))
if (args.shrink_value < shrinkage_value):
shrinkage_value = (args.shrink_value if common_train_lib.should_do_shrinkage(iter, model_file, args.shrink_saturation_threshold) else 1.0)
percent = ((num_archives_processed * 100.0) / num_archives_to_process)
epoch = ((num_archives_processed * args.num_epochs) / num_archives_to_process)
shrink_info_str = ''
if (shrinkage_value != 1.0):
shrink_info_str = 'shrink: {0:0.5f}'.format(shrinkage_value)
logger.info('Iter: {0}/{1} Jobs: {2} Epoch: {3:0.2f}/{4:0.1f} ({5:0.1f}% complete) lr: {6:0.6f} {7}'.format(iter, (num_iters - 1), current_num_jobs, epoch, args.num_epochs, percent, lrate, shrink_info_str))
train_lib.common.train_one_iteration(dir=args.dir, iter=iter, srand=args.srand, egs_dir=egs_dir, num_jobs=current_num_jobs, num_archives_processed=num_archives_processed, num_archives=num_archives, learning_rate=lrate, dropout_edit_string=common_train_lib.get_dropout_edit_string(args.dropout_schedule, (float(num_archives_processed) / num_archives_to_process), iter), train_opts=' '.join(args.train_opts), shrinkage_value=shrinkage_value, minibatch_size_str=args.num_chunk_per_minibatch, min_deriv_time=min_deriv_time, max_deriv_time_relative=max_deriv_time_relative, momentum=args.momentum, max_param_change=args.max_param_change, shuffle_buffer_size=args.shuffle_buffer_size, run_opts=run_opts, backstitch_training_scale=args.backstitch_training_scale, backstitch_training_interval=args.backstitch_training_interval, compute_per_dim_accuracy=args.compute_per_dim_accuracy)
if args.cleanup:
common_train_lib.remove_model(args.dir, (iter - 2), num_iters, models_to_combine, args.preserve_model_interval)
if (args.email is not None):
reporting_iter_interval = (num_iters * args.reporting_interval)
if ((iter % reporting_iter_interval) == 0):
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(args.dir)
message = report
subject = 'Update : Expt {dir} : Iter {iter}'.format(dir=args.dir, iter=iter)
common_lib.send_mail(message, subject, args.email)
num_archives_processed = (num_archives_processed + current_num_jobs)
if (args.stage <= num_iters):
if args.do_final_combination:
logger.info('Doing final combination to produce final.mdl')
train_lib.common.combine_models(dir=args.dir, num_iters=num_iters, models_to_combine=models_to_combine, egs_dir=egs_dir, run_opts=run_opts, minibatch_size_str=args.num_chunk_per_minibatch, chunk_width=args.chunk_width, max_objective_evaluations=args.max_objective_evaluations, compute_per_dim_accuracy=args.compute_per_dim_accuracy)
if (args.stage <= (num_iters + 1)):
logger.info('Getting average posterior for purposes of adjusting the priors.')
real_iter = ('combined' if args.do_final_combination else num_iters)
avg_post_vec_file = train_lib.common.compute_average_posterior(dir=args.dir, iter=real_iter, egs_dir=egs_dir, num_archives=num_archives, prior_subset_size=args.prior_subset_size, run_opts=run_opts)
logger.info('Re-adjusting priors based on computed posteriors')
combined_or_last_numbered_model = '{dir}/{iter}.mdl'.format(dir=args.dir, iter=real_iter)
final_model = '{dir}/final.mdl'.format(dir=args.dir)
train_lib.common.adjust_am_priors(args.dir, combined_or_last_numbered_model, avg_post_vec_file, final_model, run_opts)
if args.cleanup:
logger.info('Cleaning up the experiment directory {0}'.format(args.dir))
remove_egs = args.remove_egs
if (args.egs_dir is not None):
remove_egs = False
common_train_lib.clean_nnet_dir(nnet_dir=args.dir, num_iters=num_iters, egs_dir=egs_dir, preserve_model_interval=args.preserve_model_interval, remove_egs=remove_egs)
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(args.dir)
if (args.email is not None):
common_lib.send_mail(report, 'Update : Expt {0} : complete'.format(args.dir), args.email)
with open('{dir}/accuracy.report'.format(dir=args.dir), 'w') as f:
f.write(report)
common_lib.execute_command('steps/info/nnet3_dir_info.pl {0}'.format(args.dir)) |
class ParameterModule(nn.Module):
def __init__(self, init_value):
super().__init__()
self.param = torch.nn.Parameter(init_value) |
def train(net, trainloader, optimizer, criterion, device):
net.train()
train_loss = 0
correct = 0
total = 0
train_pred = []
train_true = []
time_cost = datetime.datetime.now()
for (batch_idx, (data, label)) in enumerate(trainloader):
(data, label) = (data.to(device), label.to(device).squeeze())
data = data.permute(0, 2, 1)
optimizer.zero_grad()
logits = net(data)
loss = criterion(logits, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 1)
optimizer.step()
train_loss += loss.item()
preds = logits.max(dim=1)[1]
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
total += label.size(0)
correct += preds.eq(label).sum().item()
progress_bar(batch_idx, len(trainloader), ('Loss: %.3f | Acc: %.3f%% (%d/%d)' % ((train_loss / (batch_idx + 1)), ((100.0 * correct) / total), correct, total)))
time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
return {'loss': float(('%.3f' % (train_loss / (batch_idx + 1)))), 'acc': float(('%.3f' % (100.0 * metrics.accuracy_score(train_true, train_pred)))), 'acc_avg': float(('%.3f' % (100.0 * metrics.balanced_accuracy_score(train_true, train_pred)))), 'time': time_cost} |
def weighted_l1_loss(inputs, targets, weights=None):
loss = F.l1_loss(inputs, targets, reduce=False)
if (weights is not None):
loss *= weights.expand_as(loss)
loss = torch.mean(loss)
return loss |
def main():
parser = argparse.ArgumentParser(description='Synchronize files in the Ithemal directory to a running AWS EC2 instance')
direction_group = parser.add_mutually_exclusive_group(required=True)
direction_group.add_argument('--to', help='Send files to the instance', default=False, action='store_true')
direction_group.add_argument('--from', help='Pull files from the instance', default=False, action='store_true')
parser.add_argument('identity', help='Identity to use to connect')
parser.add_argument('--all', help='Synchronize with all instances', default=False, action='store_true')
parser.add_argument('file', help='Files to synchronize', nargs='+')
args = parser.parse_args()
if args.to:
direction = 'to'
else:
direction = 'from'
synchronizer = InstanceSynchronizer(args.identity, direction, args.file)
if args.all:
for instance in synchronizer.get_running_instances():
synchronizer.connect_to_instance(instance)
else:
connect_instance.interactively_connect_to_instance(synchronizer) |
def suppress_stdout():
with open(os.devnull, 'w') as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
(yield)
finally:
sys.stdout = old_stdout |
class ResUNetBN2Cv2(ResUNet2v2):
NORM_TYPE = 'BN'
CHANNELS = [None, 32, 64, 128, 256]
TR_CHANNELS = [None, 64, 64, 64, 128] |
def train_poker_approx_best_response_nfsp(br_player, ray_head_address, scenario, general_trainer_config_overrrides, br_policy_config_overrides, get_stopping_condition, avg_policy_specs_for_players: Dict[(int, StrategySpec)], results_dir: str, trainer_class_override=None, br_policy_class_override=None, print_train_results: bool=True):
env_class = scenario.env_class
env_config = scenario.env_config
other_player = (1 - br_player)
env_config['discrete_actions_for_players'] = [other_player]
policy_classes: Dict[(str, Type[Policy])] = scenario.policy_classes
if (br_policy_class_override is not None):
policy_classes['best_response'] = br_policy_class_override
get_trainer_config = scenario.get_trainer_config
should_log_result_fn = scenario.ray_should_log_result_filter
init_ray_for_scenario(scenario=scenario, head_address=ray_head_address, logging_level=logging.INFO)
def log(message, level=logging.INFO):
logger.log(level, message)
def select_policy(agent_id):
if (agent_id == br_player):
return 'best_response'
else:
return f'average_policy'
tmp_env = env_class(env_config=env_config)
all_discrete_action_env_config = env_config.copy()
all_discrete_action_env_config['discrete_actions_for_players'] = [0, 1]
all_discrete_action_tmp_env = env_class(env_config)
avg_policy_model_config = get_trainer_config(all_discrete_action_tmp_env)['model']
from ray.rllib.agents.ppo import PPOTrainer, PPOTorchPolicy
from grl.rl_apps.scenarios.trainer_configs.loss_game_configs import loss_game_psro_ppo_params
br_trainer_config = {'log_level': 'INFO', 'env': env_class, 'env_config': env_config, 'gamma': 1.0, 'num_gpus': 0.0, 'num_workers': 0, 'num_gpus_per_worker': 0.0, 'num_envs_per_worker': 1, 'multiagent': {'policies_to_train': ['best_response'], 'policies': {'average_policy': (policy_classes['average_policy'], tmp_env.observation_space, tmp_env.discrete_action_space, {'model': avg_policy_model_config, 'explore': False}), 'best_response': (PPOTorchPolicy, tmp_env.observation_space, tmp_env.continuous_action_space, {})}, 'policy_mapping_fn': select_policy}}
br_trainer_config = merge_dicts(br_trainer_config, loss_game_psro_ppo_params(tmp_env))
br_trainer = PPOTrainer(config=br_trainer_config, logger_creator=get_trainer_logger_creator(base_dir=results_dir, scenario_name='approx_br', should_log_result_fn=should_log_result_fn))
def _set_avg_policy(worker: RolloutWorker):
avg_policy = worker.policy_map['average_policy']
load_pure_strat(policy=avg_policy, pure_strat_spec=avg_policy_specs_for_players[(1 - br_player)])
br_trainer.workers.foreach_worker(_set_avg_policy)
br_trainer.latest_avg_trainer_result = None
train_iter_count = 0
stopping_condition: StoppingCondition = get_stopping_condition()
max_reward = None
while True:
train_iter_results = br_trainer.train()
br_reward_this_iter = train_iter_results['policy_reward_mean'][f'best_response']
if ((max_reward is None) or (br_reward_this_iter > max_reward)):
max_reward = br_reward_this_iter
train_iter_count += 1
if print_train_results:
if ('hist_stats' in train_iter_results):
del train_iter_results['hist_stats']
if ('td_error' in train_iter_results['info']['learner']['best_response']):
del train_iter_results['info']['learner']['best_response']['td_error']
print(pretty_dict_str(train_iter_results))
log(f'Trainer logdir is {br_trainer.logdir}')
if stopping_condition.should_stop_this_iter(latest_trainer_result=train_iter_results):
print('stopping condition met.')
break
return (max_reward, None) |
class AlternateCorrBlock():
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.pyramid = [(fmap1, fmap2)]
for i in range(self.num_levels):
fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
self.pyramid.append((None, fmap2))
def __call__(self, coords):
coords = coords.permute(0, 2, 3, 1)
(B, H, W, _) = coords.shape
dim = self.pyramid[0][0].shape[1]
corr_list = []
for i in range(self.num_levels):
r = self.radius
fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous().float()
fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous().float()
coords_i = (coords / (2 ** i)).reshape(B, 1, H, W, 2).contiguous()
(corr,) = DirectCorr.apply(fmap1_i, fmap2_i, coords_i)
corr_list.append(corr.squeeze(1))
corr = torch.stack(corr_list, dim=1)
corr = corr.reshape(B, (- 1), H, W)
return (corr / torch.sqrt(torch.tensor(dim).float())) |
def create_transform(input_size, is_training=False, use_prefetcher=False, color_jitter=0.4, auto_augment=None, interpolation='bilinear', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, re_prob=0.0, re_mode='const', re_count=1, re_num_splits=0, crop_pct=None, tf_preprocessing=False, separate=False, less_aggressive_scale_aug=False):
if isinstance(input_size, tuple):
img_size = input_size[(- 2):]
else:
img_size = input_size
if (tf_preprocessing and use_prefetcher):
assert (not separate), 'Separate transforms not supported for TF preprocessing'
from timm.data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(is_training=is_training, size=img_size, interpolation=interpolation)
elif is_training:
transform = transforms_imagenet_train(img_size, color_jitter=color_jitter, auto_augment=auto_augment, interpolation=interpolation, use_prefetcher=use_prefetcher, mean=mean, std=std, re_prob=re_prob, re_mode=re_mode, re_count=re_count, re_num_splits=re_num_splits, separate=separate, less_aggressive_scale_aug=less_aggressive_scale_aug)
else:
assert (not separate), 'Separate transforms not supported for validation preprocessing'
transform = transforms_imagenet_eval(img_size, interpolation=interpolation, use_prefetcher=use_prefetcher, mean=mean, std=std, crop_pct=crop_pct)
return transform |
def ResNet18(input_shape=None, input_tensor=None, weights=None, classes=1000, stride_size=2, init_filters=64, include_top=False, repetitions=(2, 2, 2, 2), **kwargs):
return ResNet(MODELS_PARAMS['resnet18'], input_shape=input_shape, input_tensor=input_tensor, include_top=include_top, classes=classes, stride_size=stride_size, init_filters=init_filters, weights=weights, repetitions=repetitions, **kwargs) |
def train(config_path, device):
config = parse_config(config_path)
prepare_seed(seed=config['train'].get('seed', 777))
data_config = config['data']
if (data_config['title'].lower() == 'mnist'):
(train_loader, test_loader, model) = prepare_mnist(config)
else:
(train_loader, test_loader, model) = prepare_rnn(config)
model = model.to(device)
train_config = config['train']
save_config = config['save']
kl_config = config['kl']
temperature_config = config['temperature']
model_dir = save_config['model_dir']
os.makedirs(model_dir, exist_ok=True)
os.makedirs(save_config['log_dir'], exist_ok=True)
optimizer = {'encoder': torch.optim.Adam(model.encoder_parameters(), lr=train_config['lr']), 'decoder': torch.optim.Adam(model.decoder_parameters(), lr=train_config['lr'])}
scheduler_class = (MultiStepLR if isinstance(train_config['lr_reduce_epochs'], (list, tuple)) else StepLR)
scheduler = {'encoder': scheduler_class(optimizer['encoder'], train_config['lr_reduce_epochs'], train_config['lr_reduce_gamma']), 'decoder': scheduler_class(optimizer['decoder'], train_config['lr_reduce_epochs'], train_config['lr_reduce_gamma'])}
logger = SummaryWriter(save_config['log_dir'])
kl_weight = LinearGrowth(**kl_config)
temperature = LinearGrowth(**temperature_config)
epoch_verbose = (train_config.get('verbose', None) == 'epoch')
batch_verbose = (not epoch_verbose)
pretrain = train_config.get('pretrain', 0)
if (pretrain != 0):
pretrain_weight = LinearGrowth(0, 1, 0, pretrain)
fine_tune = train_config.get('fune_tune', 0)
for epoch in tqdm(range(((train_config['epochs'] + pretrain) + fine_tune)), disable=(not epoch_verbose)):
fine_tune = (epoch >= (train_config['epochs'] + pretrain))
current_temperature = temperature(epoch)
if (epoch < pretrain):
w = pretrain_weight(epoch)
loss_weights = {'argmax_nll': w, 'sample_nll': (1 - w)}
elif (train_config['mode'] == 'argmax'):
loss_weights = {'argmax_nll': 1}
logger.add_scalar('temperature', current_temperature, epoch)
else:
loss_weights = {'sample_nll': 1}
loss_weights['kl_loss'] = kl_weight(epoch)
logger.add_scalar('kl_weight', loss_weights['kl_loss'], epoch)
scheduler['encoder'].step()
scheduler['decoder'].step()
train_epoch(model, loss_weights, epoch, train_loader, True, current_temperature, logger, optimizer, batch_verbose, clamp=train_config.get('clamp'), fine_tune=fine_tune)
if (test_loader is not None):
with torch.no_grad():
train_epoch(model, loss_weights, epoch, test_loader, False, current_temperature, logger, optimizer, batch_verbose, clamp=train_config.get('clamp'), fine_tune=fine_tune)
if (train_config.get('checkpoint', 'epoch') == 'epoch'):
path = f'{model_dir}/checkpoint_{(epoch + 1)}.pt'
else:
path = f'{model_dir}/checkpoint.pt'
model.save(path)
model.save(f'{model_dir}/checkpoint.pt')
logger.close() |
def predict_by_split():
args.batch_size = max(args.batch_size, (torch.cuda.device_count() * 1024))
assert os.path.exists(args.valid_path)
assert os.path.exists(args.train_path)
assert os.path.exists(args.eval_model_path)
predictor = BertPredictor()
predictor.load(ckt_path=args.eval_model_path, use_data_parallel=True)
_dump_entity_embeddings(predictor)
entity_tensor = _load_entity_embeddings().cuda()
forward_metrics = eval_single_direction(predictor, entity_tensor=entity_tensor, eval_forward=True, batch_size=32)
backward_metrics = eval_single_direction(predictor, entity_tensor=entity_tensor, eval_forward=False, batch_size=32)
metrics = {k: round(((forward_metrics[k] + backward_metrics[k]) / 2), 4) for k in forward_metrics}
logger.info('Averaged metrics: {}'.format(metrics))
(prefix, basename) = (os.path.dirname(args.eval_model_path), os.path.basename(args.eval_model_path))
split = os.path.basename(args.valid_path)
with open('{}/metrics_{}_{}.json'.format(prefix, split, basename), 'w', encoding='utf-8') as writer:
writer.write('forward metrics: {}\n'.format(json.dumps(forward_metrics)))
writer.write('backward metrics: {}\n'.format(json.dumps(backward_metrics)))
writer.write('average metrics: {}\n'.format(json.dumps(metrics))) |
def get_next_double_solution(idx, vrblvl=0):
if (vrblvl > 0):
print('in get_next_double_solution, idx :', idx)
phc = get_phcfun()
aaa = pointer(c_int32(idx))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> get_next_double_solution calls phc', end='')
retval = phc(525, aaa, bbb, ccc, vrb)
size = bbb[0]
if (vrblvl > 0):
print(', return value :', retval)
print('-> get_next_double_solution, size :', size)
soldata = create_string_buffer(b'', (4 * size))
if (vrblvl > 0):
print('-> get_next_double_solution calls phc', end='')
retval = phc(533, bbb, soldata, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
result = int4a2str(soldata, False)
return result |
def main():
import sys
if (len(sys.argv) != 3):
print('Usage: python test_read_write_dense.py path/to/dense/input.bin path/to/dense/output.bin')
return
print(('Checking consistency of reading and writing dense arrays ' + '(depth maps / normal maps) ...'))
path_to_dense_input = sys.argv[1]
path_to_dense_output = sys.argv[2]
dense_input = read_array(path_to_dense_input)
print(('Input shape: ' + str(dense_input.shape)))
write_array(dense_input, path_to_dense_output)
dense_output = read_array(path_to_dense_output)
np.testing.assert_array_equal(dense_input, dense_output)
print('... dense arrays are equal.') |
def main():
parser = argparse.ArgumentParser(description='PyTorch Segmentation Model Training')
args = parser_params.add_parser_params(parser)
print(args)
torch.manual_seed(args.seed)
trainer = Trainer(args)
start_time = time.time()
trainer.validation()
total_time = (time.time() - start_time)
print('The validation time is {:.5f} sec'.format(total_time)) |
def prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):
arch = None
if (model_cfg is not None):
arch = (model_cfg._name if isinstance(model_cfg, DictConfig) else getattr(model_cfg, 'arch', None))
if ((not model_cfg) or (arch is None) or (arch == 'ptt_transformer')):
return state_dict
encoder_layers_to_keep = getattr(model_cfg, 'encoder_layers_to_keep', None)
decoder_layers_to_keep = getattr(model_cfg, 'decoder_layers_to_keep', None)
if ((not encoder_layers_to_keep) and (not decoder_layers_to_keep)):
return state_dict
logger.info('Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop')
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted((int(layer_string) for layer_string in layers_to_keep.split(',')))
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile('^{layer}.*\\.layers\\.(\\d+)'.format(layer=layer_name))
return {'substitution_regex': regex, 'mapping_dict': mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, 'encoder'))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, 'decoder'))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search('\\.layers\\.(\\d+)\\.', layer_name)
if (not match):
new_state_dict[layer_name] = state_dict[layer_name]
continue
original_layer_number = match.group(1)
for pruning_pass in pruning_passes:
if ((original_layer_number in pruning_pass['mapping_dict']) and pruning_pass['substitution_regex'].search(layer_name)):
new_layer_number = pruning_pass['mapping_dict'][original_layer_number]
substitution_match = pruning_pass['substitution_regex'].search(layer_name)
new_state_key = ((layer_name[:substitution_match.start(1)] + new_layer_number) + layer_name[substitution_match.end(1):])
new_state_dict[new_state_key] = state_dict[layer_name]
if isinstance(model_cfg, DictConfig):
context = open_dict(model_cfg)
else:
context = contextlib.ExitStack()
with context:
if hasattr(model_cfg, 'encoder_layers_to_keep'):
model_cfg.encoder_layers_to_keep = None
if hasattr(model_cfg, 'decoder_layers_to_keep'):
model_cfg.decoder_layers_to_keep = None
return new_state_dict |
def parse_stories(lines, only_supporting=False):
data = []
story = []
for line in lines:
line = str.lower(line)
(nid, line) = line.split(' ', 1)
nid = int(nid)
if (nid == 1):
story = []
if ('\t' in line):
(q, a, supporting) = line.split('\t')
q = tokenize(q)
a = [a]
substory = None
if (q[(- 1)] == '?'):
q = q[:(- 1)]
if only_supporting:
supporting = map(int, supporting.split(''))
substory = [story[(i - 1)] for i in supporting]
else:
substory = [x for x in story if x]
data.append((substory[::(- 1)], q, a))
story.append('')
else:
sent = tokenize(line)
if (sent[(- 1)] == '.'):
sent = sent[:(- 1)]
story.append(sent)
return data |
class Attention_block(nn.Module):
def __init__(self, F_g, F_l, F_int):
super(Attention_block, self).__init__()
self.W_g = nn.Sequential(nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(F_int))
self.W_x = nn.Sequential(nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(F_int))
self.psi = nn.Sequential(nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(1), nn.Sigmoid())
self.relu = nn.ReLU(inplace=True)
def forward(self, g, x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.relu((g1 + x1))
psi = self.psi(psi)
return (x * psi) |
def get_atom_map_nums(rxn_str) -> typing.Set[int]:
mol = Chem.MolFromSmiles(rxn_str)
return set([a.GetPropsAsDict()['molAtomMapNumber'] for a in mol.GetAtoms()]) |
.parametrize('gpu2gpu', [False, True])
def test_env(gpu2gpu):
import habitat_sim
if (gpu2gpu and (not habitat_sim.cuda_enabled)):
pytest.skip('GPU-GPU requires CUDA')
config = get_config(CFG_TEST)
if (not os.path.exists(config.SIMULATOR.SCENE)):
pytest.skip('Please download Habitat test data to data folder.')
config.defrost()
config.SIMULATOR.HABITAT_SIM_V0.GPU_GPU = gpu2gpu
config.freeze()
with habitat.Env(config=config, dataset=None) as env:
env.episodes = [NavigationEpisode(episode_id='0', scene_id=config.SIMULATOR.SCENE, start_position=[(- 3.0133917), 0., 7.3064547], start_rotation=[0, 0.163276, 0, 0.98658], goals=[NavigationGoal(position=[(- 3.0133917), 0., 7.3064547])], info={'geodesic_distance': 0.001})]
env.reset()
for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
env.step(sample_non_stop_action(env.action_space))
assert (env.episode_over is True), 'episode should be over after max_episode_steps'
env.reset()
env.step(action={'action': StopAction.name})
assert (env.episode_over is True), 'episode should be over after STOP action' |
def resnet1202_svhn(num_classes=10, **kwargs):
return get_resnet_cifar(num_classes=num_classes, blocks=1202, bottleneck=False, model_name='resnet1202_svhn', **kwargs) |
def iou_x1y1x2y2(bbox1, bbox2):
from shapely.geometry import Polygon
bbox1_a = [[bbox1[0], bbox1[1]], [bbox1[2], bbox1[1]], [bbox1[2], bbox1[3]], [bbox1[0], bbox1[3]]]
bbox2_a = [[bbox2[0], bbox2[1]], [bbox2[2], bbox2[1]], [bbox2[2], bbox2[3]], [bbox2[0], bbox2[3]]]
poly_1 = Polygon(bbox1_a)
poly_2 = Polygon(bbox2_a)
iou = (poly_1.intersection(poly_2).area / poly_1.union(poly_2).area)
return iou |
class FMRegression(FactorizationMachine, RegressorMixin):
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123, l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0):
super(FMRegression, self).__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank, random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.l2_reg = l2_reg
self.task = 'regression'
def fit(self, X_train, y_train, n_more_iter=0):
check_consistent_length(X_train, y_train)
y_train = check_array(y_train, ensure_2d=False, dtype=np.float64)
X_train = check_array(X_train, accept_sparse='csc', dtype=np.float64, order='F')
self.n_iter = (self.n_iter + n_more_iter)
if (n_more_iter > 0):
_check_warm_start(self, X_train)
self.warm_start = True
(self.w0_, self.w_, self.V_) = ffm.ffm_als_fit(self, X_train, y_train)
if (self.iter_count != 0):
self.iter_count = (self.iter_count + n_more_iter)
else:
self.iter_count = self.n_iter
self.warm_start = False
return self |
.register('ShuffleNetV2')
def build_sfv2_backbone(cfg):
arch = cfg.MODEL.BACKBONE.ARCH
in_channels = cfg.MODEL.BACKBONE.IN_PLANES
base_channels = cfg.MODEL.BACKBONE.BASE_PLANES
round_nearest = cfg.MODEL.COMPRESSION.ROUND_NEAREST
(block_layer, stage_channels, stage_blocks, out_channels) = copy.deepcopy(arch_settings[arch])
down_samples = cfg.MODEL.BACKBONE.DOWNSAMPLES
conv_layer = get_conv(cfg)
norm_layer = get_norm(cfg)
act_layer = get_act(cfg)
return ShuffleNetV2Backbone(in_channels=in_channels, base_channels=base_channels, out_channels=out_channels, stage_channels=stage_channels, stage_blocks=stage_blocks, downsamples=down_samples, block_layer=block_layer, conv_layer=conv_layer, norm_layer=norm_layer, act_layer=act_layer) |
class BN_Conv_layer(object):
def __init__(self, batch_sz, numpy_rng, tnkern=5, bfilter_sz=5, tfilter_sz=5, bnkern=1, poolsize=(2, 2)):
self.filter_shape = (tnkern, bnkern, tfilter_sz, tfilter_sz)
self.eta = theano.shared(np.ones((bnkern,), dtype=theano.config.floatX), name='eta')
self.beta = theano.shared(np.zeros((bnkern,), dtype=theano.config.floatX), name='beta')
self.stat_mean = theano.shared(np.zeros((bnkern,), dtype=theano.config.floatX), name='running_avg')
self.stat_std = theano.shared(np.zeros((bnkern,), dtype=theano.config.floatX), name='running_std')
self.init_conv_filters(numpy_rng, bfilter_sz, poolsize)
self.params += [self.eta, self.beta]
def init_conv_filters(self, numpy_rng, D, poolsize):
fan_in = np.prod(self.filter_shape[1:])
fan_out = ((self.filter_shape[0] * np.prod(self.filter_shape[2:])) / np.prod(poolsize))
W_bound = np.sqrt((6.0 / (fan_in + fan_out)))
self.W = theano.shared(init_conv_weights((- W_bound), W_bound, self.filter_shape, numpy_rng), borrow=True, name='W_conv')
self.params = [self.W]
def collect_statistics(self, X):
stat_mean = T.mean(X, axis=0)
stat_std = T.std(X, axis=0)
updates_stats = [(self.stat_mean, stat_mean), (self.stat_std, stat_std)]
return updates_stats
def conv(self, X, subsample=(2, 2), border_mode=(2, 2), atype='sigmoid', testF=False):
ConH0 = dnn_conv(X, self.W.dimshuffle(1, 0, 2, 3), subsample=subsample, border_mode=border_mode)
if testF:
ConH1 = ((ConH0 - self.stat_mean.dimshuffle('x', 0, 'x', 'x')) / (self.stat_std.dimshuffle('x', 0, 'x', 'x') + TINY))
else:
mean = ConH0.mean(axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
std = ConH0.std(axis=[0, 2, 3]).dimshuffle('x', 0, 'x', 'x')
ConH1 = ((ConH0 - mean) / (std + TINY))
ConH2 = ((self.eta.dimshuffle('x', 0, 'x', 'x') * ConH1) + self.beta.dimshuffle('x', 0, 'x', 'x'))
return activation_fn_th(ConH2, atype=atype) |
.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_points_in_polygons():
points = np.array([[300.0, 300.0], [400.0, 400.0], [100.0, 100], [300, 250], [100, 0]])
polygons = np.array([[200.0, 200.0, 400.0, 400.0, 500.0, 200.0, 400.0, 100.0], [400.0, 400.0, 500.0, 500.0, 600.0, 300.0, 500.0, 200.0], [300.0, 300.0, 600.0, 700.0, 700.0, 700.0, 700.0, 100.0]])
expected_output = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
points = torch.from_numpy(points).cuda().float()
polygons = torch.from_numpy(polygons).cuda().float()
expected_output = torch.from_numpy(expected_output).cuda().float()
assert torch.allclose(points_in_polygons(points, polygons), expected_output, 0.001) |
def aa_color(letter):
if (letter in ['C']):
return 'green'
elif (letter in ['F', 'W', 'Y']):
return [(199 / 256.0), (182 / 256.0), 0.0, 1.0]
elif (letter in ['Q', 'N', 'S', 'T']):
return 'purple'
elif (letter in ['V', 'L', 'I', 'M']):
return 'black'
elif (letter in ['K', 'R', 'H']):
return 'blue'
elif (letter in ['D', 'E']):
return 'red'
elif (letter in ['A', 'P', 'G']):
return 'grey'
elif (letter in ['$\\boxminus$']):
return 'black'
else:
return 'black' |
def load_model(path, epoch=None, use_adjacent=False):
from nets.attention_model import AttentionModel, student_AttentionModel
from nets.pointer_network import PointerNetwork
if isinstance(path, list):
(encoder_model_filename, decoder_model_filename) = (path[0], path[1])
(encoder_path, decoder_path) = (os.path.dirname(encoder_model_filename), os.path.dirname(decoder_model_filename))
args = load_args(os.path.join(encoder_path, 'args.json'))
else:
if os.path.isfile(path):
model_filename = path
path = os.path.dirname(model_filename)
elif os.path.isdir(path):
if (epoch is None):
epoch = max((int(os.path.splitext(filename)[0].split('-')[1]) for filename in os.listdir(path) if (os.path.splitext(filename)[1] == '.pt')))
model_filename = os.path.join(path, 'epoch-{}.pt'.format(epoch))
else:
assert False, '{} is not a valid directory or file'.format(path)
args = load_args(os.path.join(path, 'args.json'))
problem = load_problem(args['problem'])
model_class = {'attention': (AttentionModel if (not use_adjacent) else student_AttentionModel), 'pointer': PointerNetwork}.get(args.get('model', 'attention'), None)
assert (model_class is not None), 'Unknown model: {}'.format(model_class)
model = model_class(args['student_embedding_dim'], args['student_hidden_dim'], problem, n_encode_layers=args['student_n_encode_layers'], mask_inner=True, mask_logits=True, normalization=args['student_normalization'], tanh_clipping=args['tanh_clipping'], checkpoint_encoder=args.get('checkpoint_encoder', False), shrink_size=args.get('shrink_size', None))
if isinstance(path, list):
print(' [*] Loading encoder from {}'.format(path[0]))
print(' [*] Loading decoder from {}'.format(path[1]))
(enc_load_data, dec_load_data) = (torch_load_cpu(encoder_model_filename), torch_load_cpu(decoder_model_filename))
enc_state_dict = (enc_load_data.get('model', {}) if (not isinstance(enc_load_data, OrderedDict)) else enc_load_data)
dec_state_dict = (dec_load_data.get('model', {}) if (not isinstance(dec_load_data, OrderedDict)) else dec_load_data)
new_state_dict = OrderedDict()
for (k, v) in enc_state_dict.items():
if ('project' in k):
break
new_state_dict[k] = v
for (k, v) in dec_state_dict.items():
if (('project' in k) and ('W_hidden' not in k) and ('W_embed' not in k)):
new_state_dict[k] = v
else:
load_data = torch_load_cpu(model_filename)
state_dict = load_data.get('model', {})
new_state_dict = OrderedDict()
for (k, v) in state_dict.items():
if (('W_hidden' not in k) and ('W_embed' not in k)):
new_state_dict[k] = v
model.load_state_dict({**model.state_dict(), **new_state_dict})
if (not isinstance(path, list)):
(model, *_) = _load_model_file(model_filename, model)
model.eval()
return (model, args) |
def get(seed=0, fixed_order=False, pc_valid=0.1, nperm=10):
data = {}
taskcla = []
size = [1, 28, 28]
nperm = nperm
seeds = np.array(list(range(nperm)), dtype=int)
if (not fixed_order):
seeds = shuffle(seeds, random_state=seed)
if (not os.path.isdir(pmnist_dir)):
os.makedirs(pmnist_dir)
mean = (0.1307,)
std = (0.3081,)
dat = {}
dat['train'] = datasets.MNIST(mnist_dir, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
dat['test'] = datasets.MNIST(mnist_dir, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)]))
for (i, r) in enumerate(seeds):
print(i, end=',')
sys.stdout.flush()
data[i] = {}
data[i]['name'] = 'pmnist-{:d}'.format(i)
data[i]['ncla'] = 10
for s in ['train', 'test']:
loader = torch.utils.data.DataLoader(dat[s], batch_size=1, shuffle=False)
data[i][s] = {'x': [], 'xi': [], 'y': []}
for (image, target) in loader:
aux = image.view((- 1)).numpy()
image = torch.FloatTensor(aux).view(size)
data[i][s]['xi'].append(image)
aux = shuffle(aux, random_state=((r * 100) + i))
image = torch.FloatTensor(aux).view(size)
data[i][s]['x'].append(image)
data[i][s]['y'].append(target.numpy()[0])
for s in ['train', 'test']:
data[i][s]['x'] = torch.stack(data[i][s]['x']).view((- 1), size[0], size[1], size[2])
data[i][s]['y'] = torch.LongTensor(np.array(data[i][s]['y'], dtype=int)).view((- 1))
torch.save(data[i][s]['x'], os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'x.bin')))
torch.save(data[i][s]['xi'], os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'xi.bin')))
torch.save(data[i][s]['y'], os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'y.bin')))
print()
else:
for (i, r) in enumerate(seeds):
data[i] = dict.fromkeys(['name', 'ncla', 'train', 'test'])
data[i]['ncla'] = 10
data[i]['name'] = 'pmnist-{:d}'.format(i)
for s in ['train', 'test']:
data[i][s] = {'x': [], 'xi': [], 'y': []}
data[i][s]['x'] = torch.load(os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'x.bin')))
data[i][s]['xi'] = torch.load(os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'xi.bin')))
data[i][s]['y'] = torch.load(os.path.join(os.path.expanduser(pmnist_dir), ((('data' + str(r)) + s) + 'y.bin')))
for t in data.keys():
r = np.arange(data[t]['train']['x'].size(0))
r = np.array(r, dtype=int)
nvalid = int((pc_valid * len(r)))
ivalid = torch.LongTensor(r[:nvalid])
itrain = torch.LongTensor(r[nvalid:])
data[t]['valid'] = {}
data[t]['valid']['x'] = data[t]['train']['x'][ivalid].clone()
data[t]['valid']['y'] = data[t]['train']['y'][ivalid].clone()
data[t]['train']['x'] = data[t]['train']['x'][itrain].clone()
data[t]['train']['y'] = data[t]['train']['y'][itrain].clone()
n = 0
for t in data.keys():
taskcla.append((t, data[t]['ncla']))
n += data[t]['ncla']
data['ncla'] = n
return (data, taskcla, size) |
def emotion_freqs(importtext):
tokens = word_tokenize(importtext)
fearwords = ['scared', 'afraid', 'avoid', 'not', 'no', 'anxiety', 'road', 'spider', 'snake', 'heights', 'die', 'falling', 'death', 'fast', 'despair', 'agonize', 'bother', 'worry', 'endure', 'sustain', 'tolerate', 'creeps', 'jitters', 'nervous', 'nervousness', 'concerned', 'worry']
angerwords = ['angry', 'mad', 'injustice', 'annoyed', 'school', 'work', 'predictable', 'upset', 'frustrated', 'sick', 'tired', 'fuck', 'shoot', 'shit', 'darn', 'sucks', 'bad', 'ugly']
sadwords = ['sad', 'depressed', 'cry', 'bad', 'disappointed', 'distress', 'uneasy', 'upset', 'regret', 'dismal', 'black', 'hopeless']
joywords = ['happy', 'glad', 'swell', 'pleasant', 'well', 'good', 'joy', 'sweet', 'grateful', 'ecstatic', 'euphoric', 'encouraged', 'smile', 'laugh', 'content', 'satisfied', 'delighted']
disgustwords = ['wrong', 'disgusting', 'bad', 'taste', 'aversion', 'horror', 'repulsed', 'hate', 'allergy', 'dislike', 'displeasure']
surprisewords = ['surprised', 'appetite', 'fondness', 'like', 'relish', 'shine', 'surprise', 'unexpected', 'random', 'new', 'plastic', 'cool']
trustwords = ['useful', 'trust', 'listen', 'insight', 'believe', 'seek', 'see', 'feel', 'touch', 'mom', 'brother', 'friend', 'girlfriend', 'father', 'dad', 'uncle', 'family']
anticipationwords = ['excited', 'looking', 'forward', 'to', 'birthday', 'anniversary', 'christmas', 'new years', 'halloween', 'party', 'expectation']
fear = 0
anger = 0
sad = 0
joy = 0
disgust = 0
surprise = 0
trust = 0
anticipation = 0
for i in range(len(tokens)):
if (tokens[i].lower() in fearwords):
fear = (fear + 1)
if (tokens[i].lower() in angerwords):
anger = (anger + 1)
if (tokens[i].lower() in sadwords):
sad = (sad + 1)
if (tokens[i].lower() in joywords):
joy = (joy + 1)
if (tokens[i].lower() in disgustwords):
disgust = (digust + 1)
if (tokens[i].lower() in surprisewords):
surprise = (surprise + 1)
if (tokens[i].lower() in trustwords):
trust = (trust + 1)
if (tokens[i].lower() in anticipationwords):
anticipation = (anticipation + 1)
fearfreq = float((fear / len(tokens)))
angerfreq = float((anger / len(tokens)))
sadfreq = float((sad / len(tokens)))
joyfreq = float((joy / len(tokens)))
disgustfreq = float((disgust / len(tokens)))
surprisefreq = float((surprise / len(tokens)))
trustfreq = float((trust / len(tokens)))
anticipationfreq = float((anticipation / len(tokens)))
return [fearfreq, angerfreq, sadfreq, joyfreq, disgustfreq, surprisefreq, trustfreq, anticipationfreq] |
def parse_list(config, key, dtype=int):
if (key in config):
if isinstance(config[key], str):
config[key] = list(map(dtype, config[key].split(',')))
assert (isinstance(config[key], list) and all([isinstance(e, dtype) for e in config[key]])), f'{key} should be a list of values dtype {dtype}. Given {config[key]} of type {type(config[key])} with values of type {[type(e) for e in config[key]]}.' |
def get_param(l, exclude=set(['top', 'bottom', 'name', 'type'])):
if (not hasattr(l, 'ListFields')):
if hasattr(l, '__delitem__'):
return [get_param(i) for i in l]
return l
r = dict()
for (f, v) in l.ListFields():
if (f.name not in exclude):
r[f.name] = get_param(v, [])
return r |
(unsafe_hash=True, eq=True, order=True)
class VehicleStateDyn(VehicleState):
vy: float = 0
dpsi: float = 0
idx = frozendict({'x': 0, 'y': 1, 'psi': 2, 'vx': 3, 'vy': 4, 'dpsi': 5, 'delta': 6})
def __add__(self, other: 'VehicleStateDyn') -> 'VehicleStateDyn':
if (type(other) == type(self)):
return replace(self, x=(self.x + other.x), y=(self.y + other.y), psi=(self.psi + other.psi), vx=(self.vx + other.vx), vy=(self.vy + other.vy), dpsi=(self.dpsi + other.dpsi), delta=(self.delta + other.delta))
else:
raise NotImplementedError
def __mul__(self, val: float) -> 'VehicleStateDyn':
return replace(self, x=(self.x * val), y=(self.y * val), psi=(self.psi * val), vx=(self.vx * val), vy=(self.vy * val), dpsi=(self.dpsi * val), delta=(self.delta * val))
def as_ndarray(self) -> np.ndarray:
return np.array([self.x, self.y, self.psi, self.vx, self.vy, self.dpsi, self.delta])
def from_array(cls, z: np.ndarray):
assert (cls.get_n_states() == z.size == z.shape[0]), f'z vector {z} cannot initialize VehicleState.'
return VehicleStateDyn(x=z[cls.idx['x']], y=z[cls.idx['y']], psi=z[cls.idx['psi']], vx=z[cls.idx['vx']], vy=z[cls.idx['vy']], dpsi=z[cls.idx['dpsi']], delta=z[cls.idx['delta']])
def to_vehicle_state(self) -> VehicleState:
return VehicleState(x=self.x, y=self.y, psi=self.psi, vx=self.vx, delta=self.delta) |
class FlaxDDPMScheduler(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['flax'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['flax']) |
class ReplayMemory(Dataset):
def __init__(self, capacity):
self.capacity = capacity
self.memory = list()
self.position = 0
def push(self, item):
if (len(self.memory) < (self.position + 1)):
self.memory.append(item)
else:
self.memory[self.position] = item
self.position = ((self.position + 1) % self.capacity)
def is_full(self):
return (len(self.memory) == self.capacity)
def __getitem__(self, item):
return self.memory[item]
def __len__(self):
return len(self.memory)
def clear(self):
self.memory = list() |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, act_layer='leaky_relu', aa_layer=None):
super(Bottleneck, self).__init__()
self.conv1 = conv2d_iabn(inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer, act_param=0.001)
if (stride == 1):
self.conv2 = conv2d_iabn(planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=0.001)
elif (aa_layer is None):
self.conv2 = conv2d_iabn(planes, planes, kernel_size=3, stride=2, act_layer=act_layer, act_param=0.001)
else:
self.conv2 = nn.Sequential(conv2d_iabn(planes, planes, kernel_size=3, stride=1, act_layer=act_layer, act_param=0.001), aa_layer(channels=planes, filt_size=3, stride=2))
reduction_chs = max(((planes * self.expansion) // 8), 64)
self.se = (SEModule(planes, reduction_channels=reduction_chs) if use_se else None)
self.conv3 = conv2d_iabn(planes, (planes * self.expansion), kernel_size=1, stride=1, act_layer='identity')
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
if (self.downsample is not None):
residual = self.downsample(x)
else:
residual = x
out = self.conv1(x)
out = self.conv2(out)
if (self.se is not None):
out = self.se(out)
out = self.conv3(out)
out = (out + residual)
out = self.relu(out)
return out |
class AverageMeter(object):
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if (self.summary_type is Summary.NONE):
fmtstr = ''
elif (self.summary_type is Summary.AVERAGE):
fmtstr = '{name} {avg:.3f}'
elif (self.summary_type is Summary.SUM):
fmtstr = '{name} {sum:.3f}'
elif (self.summary_type is Summary.COUNT):
fmtstr = '{name} {count:.3f}'
else:
raise ValueError(('invalid summary type %r' % self.summary_type))
return fmtstr.format(**self.__dict__) |
('/semcomplete/', methods=['POST'])
def semantic_autocomplete():
inputs = json.loads(request.data)
scene_description = inputs['abstract_scene_description']
recursion_levels = None
if ('recursion_levels' in inputs):
recursion_levels = inputs['recursion_levels']
session_token = inputs['session_token']
input_event = InputSceneDescription(session_token, scene_description)
input_event_file = input_event.save_to_event_file(event_file(session_token))
output_event = ShoppingList().tick()
try:
brainstormer = Brainstorm.IterativeBrainstormer(interface=myGPTinterface)
output = brainstormer.run(scene_description, 1)
except openai.error.RateLimitError:
logger.info('language model overloaded, try again later')
return ('language model overloaded, try again later', 500)
except:
logger.info('language model produced unparsable output')
return ('language model produced unparsable output', 500)
assert isinstance(output, SceneGraphs.SceneShoppingList)
output = output.flatten()
output = [el.to_json(recursive=False) for el in output]
output_event.tock().update(output)
output_event_file = output_event.save_to_event_file(event_file(session_token))
return jsonify(output) |
class Graphormer(BertPreTrainedModel):
def __init__(self, config):
super(Graphormer, self).__init__(config)
self.config = config
self.bert = EncoderBlock(config)
self.cls_head = nn.Linear(config.hidden_size, self.config.output_feature_dim)
self.residual = nn.Linear(config.img_feature_dim, self.config.output_feature_dim)
self.apply(self.init_weights)
def forward(self, img_feats, input_ids=None, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, position_ids=None, head_mask=None):
predictions = self.bert(img_feats=img_feats, input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, head_mask=head_mask)
pred_score = self.cls_head(predictions[0])
res_img_feats = self.residual(img_feats)
pred_score = (pred_score + res_img_feats)
if (self.config.output_attentions and self.config.output_hidden_states):
return (pred_score, predictions[1], predictions[(- 1)])
else:
return pred_score |
def seed_everything(seed=1234):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed) |
class VideoNet(nn.Module):
def __init__(self):
super(VideoNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=5)
self.relu1 = nn.ReLU()
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=1)
self.conv2 = nn.Conv2d(in_channels=3, out_channels=1, kernel_size=3)
self.lstm = nn.LSTM(input_size=1, hidden_size=40, num_layers=1, batch_first=True)
self.fc = nn.Linear(40, 1)
def forward(self, x):
batch_size = x.shape[0]
T = x.shape[1]
x = x.reshape((batch_size * T), 1, x.shape[2], x.shape[3])
x = self.conv1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = torch.max(x, dim=3).values
x = torch.max(x, dim=2).values
x = x.reshape(batch_size, T, 1)
(outputs, (h1, c1)) = self.lstm(x)
h1 = h1.squeeze(0)
return self.fc(h1) |
def download_tweets_for_csv(file_name: str, column: str, api_data: Dict) -> str:
def hydrate(row, translation, columns):
if (str(row[column]) in translation):
row['text'] = translation[row[column]]
row = row.drop(column)
return row
else:
ser = pd.Series(index=columns)
ser = ser.drop(column)
return ser
new_file = (file_name + '_with_tweets')
df = pd.read_csv(file_name, dtype={column: str})
t = Twarc(api_data['twitter']['consumer_key'], api_data['twitter']['consumer_secret'], api_data['twitter']['access_token'], api_data['twitter']['access_token_secret'])
translation = {}
for tweet in t.hydrate(df[column]):
translation[str(tweet['id'])] = tweet['full_text']
df = df.apply(hydrate, axis=1, args=(translation, df.columns)).dropna(how='all')
df.to_csv(new_file, index=False, quoting=csv.QUOTE_NONNUMERIC)
return new_file |
class SearchEngine(ABC):
def run(self):
pass
def get_best_trials(self, k):
pass |
class VaswaniRule(extension.Extension):
def __init__(self, attr, d, warmup_steps=4000, init=None, target=None, optimizer=None, scale=1.0):
self._attr = attr
self._d_inv05 = ((d ** (- 0.5)) * scale)
self._warmup_steps_inv15 = (warmup_steps ** (- 1.5))
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
if (self._init is None):
self._init = (self._d_inv05 * (1.0 * self._warmup_steps_inv15))
if (self._last_value is not None):
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._init)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = (self._d_inv05 * min((self._t ** (- 0.5)), (self._t * self._warmup_steps_inv15)))
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
def _get_optimizer(self, trainer):
return (self._optimizer or trainer.updater.get_optimizer('main'))
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value |
class AICity20ReCam(BaseImageDataset):
dataset_dir = 'AIC20_ReID/'
dataset_aug_dir = 'AIC20_ReID_Cropped/'
dataset_blend_dir = 'AIC20_ReID_blend/'
def __init__(self, root='', verbose=True, **kwargs):
super(AICity20ReCam, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_aug_dir = osp.join(root, self.dataset_aug_dir)
self.train_dir = osp.join(self.dataset_dir, 'image_train')
self.query_dir = osp.join(self.dataset_aug_dir, 'image_train')
self.gallery_dir = osp.join(self.dataset_aug_dir, 'image_train')
self.train_aug_dir = osp.join(self.dataset_aug_dir, 'image_train')
train_list_path = osp.join(self.dataset_dir, 'trainval_partial', 'train.txt')
query_list_path = osp.join(self.dataset_dir, 'trainval_partial', 'query.txt')
gallery_list_path = osp.join(self.dataset_dir, 'trainval_partial', 'test.txt')
self._check_before_run()
train = self._process_dir(self.train_dir, train_list_path, relabel=False)
query = self._process_dir(self.query_dir, query_list_path, relabel=False)
gallery = self._process_dir(self.gallery_dir, gallery_list_path, relabel=False)
train = self.relabel(train)
if verbose:
print('=> aicity trainval loaded')
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
self.train_tracks = self._read_tracks(osp.join(self.dataset_dir, 'train_track.txt'))
self.test_tracks = self._read_tracks(osp.join(self.dataset_dir, 'trainval_partial', 'test_track.txt'))
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, list_path, relabel=False):
dataset = []
with open(list_path, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
(pid, camid, trackid, image_name) = line.split('_')
pid = int(pid)
camid = int(camid[1:])
img_path = osp.join(dir_path, image_name)
dataset.append((img_path, camid, pid))
if relabel:
dataset = self.relabel(dataset)
return dataset |
def define_G(input_nc, output_nc, ngf, norm='instance', which_model_netG='resnet', use_dropout=False, gpu_ids=[]):
netG = None
if (len(gpu_ids) > 0):
assert torch.cuda.is_available()
norm_layer = get_norm_layer(norm_type=norm)
netG = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9, gpu_ids=gpu_ids)
if (len(gpu_ids) > 0):
netG.cuda()
netG.apply(weights_init)
return netG |
def Trans4PASS_v2(num_classes=19, emb_chans=128):
model = Trans4PASS(num_classes, emb_chans, encoder='trans4pass_v2')
return model |
def get_all_notebook_files(directory='./notebooks/'):
ret = []
for (dirpath, subdirs, files) in os.walk(directory):
for f in files:
ret.append(os.path.join(dirpath, f))
return ret |
class HM(autograd.Function):
def forward(ctx, inputs, inputs_norm, indexes, features, features_norm, momentum):
ctx.features = features
ctx.features_norm = features_norm
ctx.momentum = momentum
ctx.save_for_backward(inputs, indexes)
outputs = inputs_norm.mm(ctx.features_norm.t())
return outputs
def backward(ctx, grad_outputs):
(inputs, indexes) = ctx.saved_tensors
grad_inputs = None
if ctx.needs_input_grad[1]:
grad_inputs = grad_outputs.mm(ctx.features_norm)
for (x, y) in zip(inputs, indexes):
ctx.features[y] = ((ctx.momentum * ctx.features[y]) + ((1.0 - ctx.momentum) * x))
return (None, grad_inputs, None, None, None, None) |
def load_dataset(root_dir, redux, params, shuffled=False, single=False):
noise = (params.noise_type, params.noise_param)
if (params.noise_type == 'mc'):
dataset = MonteCarloDataset(root_dir, redux, params.crop_size, clean_targets=params.clean_targets)
else:
dataset = NoisyDataset(root_dir, redux, params.crop_size, clean_targets=params.clean_targets, noise_dist=noise, seed=params.seed)
if single:
return DataLoader(dataset, batch_size=1, shuffle=shuffled)
else:
return DataLoader(dataset, batch_size=params.batch_size, shuffle=shuffled) |
class AutoColBERTModel():
def from_pretrained(cls, model_path: str, config=None):
if (config is None):
config = AutoConfig.from_pretrained(model_path)
if (config.model_type == 'bert'):
model = ColBERT.from_pretrained(model_path, config=config)
else:
raise NotImplementedError()
return model |
class SpeechEncoderDecoderModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def load_requirements(path_dir: str=PATH_ROOT, comment_char: str='#') -> List:
with open(os.path.join(path_dir, 'core_requirements.txt'), 'r') as file:
lines = [ln.strip() for ln in file.readlines()]
reqs = []
for ln in lines:
if (comment_char in ln):
ln = ln[:ln.index(comment_char)]
if ln:
reqs.append(ln)
return reqs |
def hard_update(target, source):
for (target_param, param) in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data) |
def build_next_utterance(src, trg):
global DG
for (head1, _) in src.items():
for (head2, _) in trg.items():
create_edge(head1, head2, 'next_utterance') |
class UpsamplingBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel, stride, pad, dil):
super(UpsamplingBlock, self).__init__()
conv = nn.Conv2d
biup = nn.UpsamplingBilinear2d
block = nn.Sequential()
block.add_module('conv_1', conv(input_nc, output_nc, kernel, stride, pad, dilation=dil))
block.add_module('upsample_2', biup(scale_factor=2))
self.biup_block = block
def forward(self, input):
return self.biup_block(input) |
class Bleu():
def __init__(self, n=4):
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
assert (gts.keys() == res.keys())
imgIds = gts.keys()
bleu_scorer = BleuScorer(n=self._n)
for id in imgIds:
hypo = res[id]
ref = gts[id]
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) >= 1)
bleu_scorer += (hypo[0], ref)
(score, scores) = bleu_scorer.compute_score(option='closest', verbose=0)
return (score, scores)
def method(self):
return 'Bleu' |
def clean_html(string: str):
left_mark = '<'
right_mark = '>'
while True:
next_left_start = string.find(left_mark)
if (next_left_start == (- 1)):
break
next_right_start = string.find(right_mark, next_left_start)
if (next_right_start == (- 1)):
print(('Right mark without Left: ' + string))
break
clean_html.clean_links.append(string[next_left_start:(next_right_start + len(right_mark))])
string = ((string[:next_left_start] + ' ') + string[(next_right_start + len(right_mark)):])
return string |
class CovarianceHeatmapDisplay():
def __init__(self, train_covariances, test_covariances):
self.train_covariances = train_covariances
self.test_covariances = test_covariances
def _validate_plot_params(self):
check_seaborn_support('CorrelationHeatmapDisplay')
def from_estimator(cls, model, train_views, test_views=None):
train_scores = model.transform(train_views)
if (test_views is not None):
test_scores = model.transform(test_views)
else:
test_scores = None
train_covariances = np.cov(train_scores[0].T, train_scores[1].T)
if (test_scores is not None):
test_covariances = np.cov(test_scores[0].T, test_scores[1].T)
else:
test_covariances = None
return cls.from_covariances(train_covariances, test_covariances)
def from_covariances(cls, train_covariances, test_covariances=None):
return cls(train_covariances, test_covariances)
def plot(self):
self._validate_plot_params()
(fig, axs) = plt.subplots(1, 2, figsize=(10, 5))
sns.heatmap(self.train_covariances, annot=True, ax=axs[0])
if (self.test_covariances is not None):
sns.heatmap(self.test_covariances, annot=True, ax=axs[1])
axs[0].set_title('Train Covariances')
axs[1].set_title('Test Covariances')
self.figure_ = fig
return self |
def createAndConnectResetInitChannels(self, board, resetInitSnips):
resetInitChannels = []
for i in range(self.numChipsUsed):
initResetChannel = board.createChannel(bytes(('initreset' + str(i)), 'utf-8'), 'int', 3)
initResetChannel.connect(None, resetInitSnips[i])
resetInitChannels.append(initResetChannel)
logging.info('Channels added')
return resetInitChannels |
def do_training(hypes):
modules = utils.load_modules_from_hypes(hypes)
with tf.Session() as sess:
with tf.name_scope('Queues'):
queue = modules['input'].create_queues(hypes, 'train')
regression_weights = tf.placeholder(dtype=tf.float32, shape=(3,))
hypes['solver']['regression_weights'] = regression_weights
tv_graph = core.build_training_graph(hypes, queue, modules)
tv_sess = core.start_tv_session(hypes)
with tf.name_scope('Validation'):
tf.get_variable_scope().reuse_variables()
image_pl = tf.placeholder(tf.float32)
calib = tf.placeholder(tf.float32, shape=[1, hypes['grid_height'], hypes['grid_width'], 3, 4])
xy_scale = tf.placeholder(tf.float32, shape=[1, hypes['grid_height'], hypes['grid_width'], 2])
image = tf.expand_dims(image_pl, 0)
image.set_shape([1, 384, 1248, 3])
inf_out = core.build_inference_graph(hypes, modules, image, calib, xy_scale)
tv_graph['image_pl'] = image_pl
tv_graph['inf_out'] = inf_out
tv_graph['calib_pl'] = calib
tv_graph['xy_scale_pl'] = xy_scale
all_variables = tf.get_collection_ref(tf.GraphKeys.GLOBAL_VARIABLES)
sess.run(tf.variables_initializer(all_variables))
var_list = [var for var in all_variables if (('beta' not in var.name) and ('Adam' not in var.name))]
saver = tf.train.Saver(var_list=var_list)
saver.restore(sess, hypes['pretrained'])
modules['input'].start_enqueuing_threads(hypes, queue, 'train', sess)
run_training(hypes, modules, tv_graph, tv_sess)
tv_sess['coord'].request_stop()
tv_sess['coord'].join(tv_sess['threads']) |
def schema_integrate(example: Batch) -> Union[(Dict, Any)]:
title = example['title']
question = example['question']
context = example['context']
guid = example['id']
classtype = ([''] * len(title))
dataset_name = source = (['squad_v2'] * len(title))
(answers, is_impossible) = ([], [])
for answer_examples in example['answers']:
if answer_examples['text']:
answers.append(answer_examples)
is_impossible.append(False)
else:
answers.append({'text': [''], 'answer_start': [(- 1)]})
is_impossible.append(True)
return {'guid': guid, 'question': question, 'context': context, 'answers': answers, 'title': title, 'classtype': classtype, 'source': source, 'is_impossible': is_impossible, 'dataset': dataset_name} |
def make_builder(out_file, impl):
if (impl == 'mmap'):
return MMapIndexedDatasetBuilder(out_file)
else:
return IndexedDatasetBuilder(out_file) |
class NetworkFailureReason(object):
NODE_FAILURE = 'Node Failure'
WAITING_NODE = 'Waiting node' |
def build_net(net_name, input_tfs, reuse=False):
net = None
if (net_name == fc_2layers_256units.NAME):
net = fc_2layers_256units.build_net(input_tfs, reuse)
elif (net_name == fc_2layers_512units.NAME):
net = fc_2layers_512units.build_net(input_tfs, reuse)
else:
assert False, ('Unsupported net: ' + net_name)
return net |
class Net2(nn.Module):
def __init__(self):
super(Net2, self).__init__()
def forward(self, input):
return ((0.5 * input) * (1.0 + torch.tanh(((input * 0.) * (1.0 + ((0.044715 * input) * input)))))) |
def remove_under_k(seq, k):
seq = seq.strip().split(' ')
result = []
freqs = [(k, len(list(g))) for (k, g) in groupby(seq)]
for (c, f) in freqs:
if (f > k):
result += [c for _ in range(f)]
return (' '.join(result) + '\n') |
class HyperSynthesisTransform(nn.Module):
def __init__(self, num_filters=192, num_filters_out=192):
super(HyperSynthesisTransform, self).__init__()
self.conv_h4 = nn.ConvTranspose2d(num_filters, num_filters, 5, stride=2, padding=2, output_padding=1)
self.relu_h4 = nn.ReLU()
self.conv_h5 = nn.ConvTranspose2d(num_filters, num_filters, 5, stride=2, padding=2, output_padding=1)
self.relu_h5 = nn.ReLU()
self.conv_h6 = nn.ConvTranspose2d(num_filters, num_filters_out, 3, stride=1, padding=1)
def forward(self, x):
x = self.conv_h4(x)
x = self.relu_h4(x)
x = self.conv_h5(x)
x = self.relu_h5(x)
x = self.conv_h6(x)
return x |
class VarTypeEnum(Enum):
INVALID = 0
SEQUENCE = 1
MATRIX = 2
VECTOR = 3
SET = 4
SCALAR = 5
FUNCTION = 6
INDEX = 7 |
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, (prefix + '*'))
cp_list = glob.glob(pattern)
if (len(cp_list) == 0):
return ''
return sorted(cp_list)[(- 1)] |
def iter_caption_to_json(iter_caption, json_file):
key_captions = [(key, json.loads(p)) for (key, p) in iter_caption]
info = {'info': 'dummy', 'licenses': 'dummy', 'type': 'captions'}
info['images'] = [{'file_name': k, 'id': k} for (k, _) in key_captions]
n = 0
annotations = []
for (k, cs) in key_captions:
for c in cs:
annotations.append({'image_id': k, 'caption': c['caption'], 'id': n})
n += 1
info['annotations'] = annotations
from src.tools.common import write_to_file
write_to_file(json.dumps(info), json_file) |
class ResnetCompleteNetworkTest(tf.test.TestCase):
def _resnet_small(self, inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, spatial_squeeze=True, reuse=None, scope='resnet_v1_small'):
block = resnet_v1.resnet_v1_block
blocks = [block('block1', base_depth=1, num_units=3, stride=2), block('block2', base_depth=2, num_units=3, stride=2), block('block3', base_depth=4, num_units=3, stride=2), block('block4', base_depth=8, num_units=2, stride=1)]
return resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training=is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(logits, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue(('predictions' in end_points))
self.assertListEqual(end_points['predictions'].get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue(('global_pool' in end_points))
self.assertListEqual(end_points['global_pool'].get_shape().as_list(), [2, 1, 1, 32])
def testClassificationEndPointsWithNoBatchNormArgscope(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(logits, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, is_training=None, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue(('predictions' in end_points))
self.assertListEqual(end_points['predictions'].get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue(('global_pool' in end_points))
self.assertListEqual(end_points['global_pool'].get_shape().as_list(), [2, 1, 1, 32])
def testEndpointNames(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet')
expected = ['resnet/conv1']
for block in range(1, 5):
for unit in range(1, (4 if (block < 4) else 3)):
for conv in range(1, 4):
expected.append(('resnet/block%d/unit_%d/bottleneck_v1/conv%d' % (block, unit, conv)))
expected.append(('resnet/block%d/unit_%d/bottleneck_v1' % (block, unit)))
expected.append(('resnet/block%d/unit_1/bottleneck_v1/shortcut' % block))
expected.append(('resnet/block%d' % block))
expected.extend(['global_pool', 'resnet/logits', 'resnet/spatial_squeeze', 'predictions'])
self.assertItemsEqual(end_points.keys(), expected)
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 28, 28, 4], 'resnet/block2': [2, 14, 14, 8], 'resnet/block3': [2, 7, 7, 16], 'resnet/block4': [2, 7, 7, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 21, 21, 8], 'resnet/block3': [2, 11, 11, 16], 'resnet/block4': [2, 11, 11, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, include_root_block=False, spatial_squeeze=False, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 64, 64, 4], 'resnet/block2': [2, 32, 32, 8], 'resnet/block3': [2, 16, 16, 16], 'resnet/block4': [2, 16, 16, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(_, end_points) = self._resnet_small(inputs, num_classes, global_pool=global_pool, output_stride=output_stride, spatial_squeeze=False, scope='resnet')
endpoint_to_shape = {'resnet/block1': [2, 41, 41, 4], 'resnet/block2': [2, 41, 41, 8], 'resnet/block3': [2, 41, 41, 16], 'resnet/block4': [2, 41, 41, 32]}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
(output, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False, output_stride=output_stride)
if (output_stride is None):
factor = 1
else:
factor = (nominal_stride // output_stride)
output = resnet_utils.subsample(output, factor)
tf.get_variable_scope().reuse_variables()
(expected, _) = self._resnet_small(inputs, None, is_training=False, global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(), atol=0.0001, rtol=0.0001)
def testUnknownBatchSize(self):
batch = 2
(height, width) = (65, 65)
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(logits, _) = self._resnet_small(inputs, num_classes, global_pool=global_pool, spatial_squeeze=False, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(output, _) = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
(height, width) = (65, 65)
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
(output, _) = self._resnet_small(inputs, None, global_pool=global_pool, output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32)) |
()
def tf():
from sacred.optional import has_tensorflow
if has_tensorflow:
import tensorflow
return tensorflow
else:
class tensorflow():
class summary():
class FileWriter():
def __init__(self, logdir, graph):
self.logdir = logdir
self.graph = graph
print(('Mocked FileWriter got logdir=%s, graph=%s' % (logdir, graph)))
class Session():
def __init__(self):
self.graph = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
import sacred.stflow.method_interception
sacred.stflow.method_interception.tensorflow = tensorflow
return tensorflow |
class PlainNet(PlainNet.PlainNet):
def __init__(self, argv=None, opt=None, num_classes=None, plainnet_struct=None, no_create=False, no_reslink=None, no_BN=None, use_se=None, dropout=None, **kwargs):
if (argv is not None):
module_opt = parse_cmd_options(argv)
else:
module_opt = None
if (no_BN is None):
if (module_opt is not None):
no_BN = module_opt.no_BN
else:
no_BN = False
if (no_reslink is None):
if (module_opt is not None):
no_reslink = module_opt.no_reslink
else:
no_reslink = False
if (use_se is None):
if (module_opt is not None):
use_se = module_opt.use_se
else:
use_se = False
if (dropout is None):
if (module_opt is not None):
self.dropout = module_opt.dropout
else:
self.dropout = None
else:
self.dropout = dropout
if (self.dropout is not None):
print('--- using dropout={:4g}'.format(self.dropout))
super(PlainNet, self).__init__(argv=argv, opt=opt, num_classes=num_classes, plainnet_struct=plainnet_struct, no_create=no_create, no_reslink=no_reslink, no_BN=no_BN, use_se=use_se, **kwargs)
self.last_channels = self.block_list[(- 1)].out_channels
self.fc_linear = basic_blocks.Linear(in_channels=self.last_channels, out_channels=self.num_classes, no_create=no_create)
self.no_create = no_create
self.no_reslink = no_reslink
self.no_BN = no_BN
self.use_se = use_se
for layer in self.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.eps = 0.001
def extract_stage_features_and_logit(self, x, target_downsample_ratio=None):
stage_features_list = []
image_size = x.shape[2]
output = x
for (block_id, the_block) in enumerate(self.block_list):
output = the_block(output)
if (self.dropout is not None):
dropout_p = ((float(block_id) / len(self.block_list)) * self.dropout)
output = F.dropout(output, dropout_p, training=self.training, inplace=True)
dowsample_ratio = round((image_size / output.shape[2]))
if (dowsample_ratio == target_downsample_ratio):
stage_features_list.append(output)
target_downsample_ratio *= 2
pass
pass
output = F.adaptive_avg_pool2d(output, output_size=1)
output = torch.flatten(output, 1)
logit = self.fc_linear(output)
return (stage_features_list, logit)
def forward(self, x):
output = x
for (block_id, the_block) in enumerate(self.block_list):
output = the_block(output)
if (self.dropout is not None):
dropout_p = ((float(block_id) / len(self.block_list)) * self.dropout)
output = F.dropout(output, dropout_p, training=self.training, inplace=True)
output = F.adaptive_avg_pool2d(output, output_size=1)
if (self.dropout is not None):
output = F.dropout(output, self.dropout, training=self.training, inplace=True)
output = torch.flatten(output, 1)
output = self.fc_linear(output)
return output
def forward_pre_GAP(self, x):
output = x
for the_block in self.block_list:
output = the_block(output)
return output
def get_FLOPs(self, input_resolution):
the_res = input_resolution
the_flops = 0
for the_block in self.block_list:
the_flops += the_block.get_FLOPs(the_res)
the_res = the_block.get_output_resolution(the_res)
the_flops += self.fc_linear.get_FLOPs(the_res)
return the_flops
def get_model_size(self):
the_size = 0
for the_block in self.block_list:
the_size += the_block.get_model_size()
the_size += self.fc_linear.get_model_size()
return the_size
def get_num_layers(self):
num_layers = 0
for block in self.block_list:
assert isinstance(block, super_blocks.PlainNetSuperBlockClass)
num_layers += block.sub_layers
return num_layers
def replace_block(self, block_id, new_block):
self.block_list[block_id] = new_block
if (block_id < (len(self.block_list) - 1)):
if (self.block_list[(block_id + 1)].in_channels != new_block.out_channels):
self.block_list[(block_id + 1)].set_in_channels(new_block.out_channels)
else:
assert (block_id == (len(self.block_list) - 1))
self.last_channels = self.block_list[(- 1)].out_channels
if (self.fc_linear.in_channels != self.last_channels):
self.fc_linear.set_in_channels(self.last_channels)
self.module_list = nn.ModuleList(self.block_list)
def split(self, split_layer_threshold):
new_str = ''
for block in self.block_list:
new_str += block.split(split_layer_threshold=split_layer_threshold)
return new_str
def init_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data, gain=3.26033)
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, (3.26033 * np.sqrt((2 / (m.weight.shape[0] + m.weight.shape[1])))))
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.zeros_(m.bias)
else:
pass
for superblock in self.block_list:
if (not isinstance(superblock, super_blocks.PlainNetSuperBlockClass)):
continue
for block in superblock.block_list:
if (not isinstance(block, basic_blocks.ResBlock)):
continue
last_bn_block = None
for inner_resblock in block.block_list:
if isinstance(inner_resblock, basic_blocks.BN):
last_bn_block = inner_resblock
pass
pass
assert (last_bn_block is not None)
nn.init.zeros_(last_bn_block.netblock.weight) |
def compute_nas_score(gpu, model, mixup_gamma, resolution, batch_size, repeat, fp16=False):
info = {}
nas_score_list = []
if (gpu is not None):
device = torch.device('cuda:{}'.format(gpu))
else:
device = torch.device('cpu')
if fp16:
dtype = torch.half
else:
dtype = torch.float32
with torch.no_grad():
for repeat_count in range(repeat):
network_weight_gaussian_init(model)
input = torch.randn(size=[batch_size, 3, resolution, resolution], device=device, dtype=dtype)
input2 = torch.randn(size=[batch_size, 3, resolution, resolution], device=device, dtype=dtype)
mixup_input = (input + (mixup_gamma * input2))
output = model.forward_pre_GAP(input)
mixup_output = model.forward_pre_GAP(mixup_input)
nas_score = torch.sum(torch.abs((output - mixup_output)), dim=[1, 2, 3])
nas_score = torch.mean(nas_score)
log_bn_scaling_factor = 0.0
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
bn_scaling_factor = torch.sqrt(torch.mean(m.running_var))
log_bn_scaling_factor += torch.log(bn_scaling_factor)
pass
pass
nas_score = (torch.log(nas_score) + log_bn_scaling_factor)
nas_score_list.append(float(nas_score))
std_nas_score = np.std(nas_score_list)
avg_precision = ((1.96 * std_nas_score) / np.sqrt(len(nas_score_list)))
avg_nas_score = np.mean(nas_score_list)
info['avg_nas_score'] = float(avg_nas_score)
info['std_nas_score'] = float(std_nas_score)
info['avg_precision'] = float(avg_precision)
return info |
def check_loss(loss):
return ((not bool(torch.isnan(loss).item())) and bool((loss >= 0.0).item()) and bool((loss < 1000000.0).item())) |
def add_head(head_map, tree, head):
tree_repr = (tree.span, tree.label)
head_map[tree_repr] = head |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.