code stringlengths 101 5.91M |
|---|
def read_langs_turn(args, file_name, max_line=None, ds_name=''):
print('Reading from {} for read_langs_turn'.format(file_name))
data = []
with open(file_name) as f:
dials = f.readlines()
cnt_lin = 1
dialog_history = []
turn_usr = ''
turn_sys = ''
turn_idx = 0
for dial in dials[1:]:
dial_split = dial.split('\t')
(session_ID, Message_ID, Message_from, Message) = (dial_split[0], dial_split[1], dial_split[3], dial_split[4])
if ((Message_ID == '1') and (turn_sys != '')):
if args['only_last_turn']:
data.append(data_detail)
turn_usr = ''
turn_sys = ''
dialog_history = []
cnt_lin += 1
turn_idx = 0
if (Message_from == 'user'):
turn_usr = Message.lower().strip()
data_detail = get_input_example('turn')
data_detail['ID'] = '{}-{}'.format(ds_name, cnt_lin)
data_detail['turn_id'] = turn_idx
data_detail['turn_usr'] = turn_usr
data_detail['turn_sys'] = turn_sys
data_detail['dialog_history'] = list(dialog_history)
if (not args['only_last_turn']):
data.append(data_detail)
dialog_history.append(turn_sys)
dialog_history.append(turn_usr)
turn_idx += 1
elif (Message_from == 'agent'):
turn_sys = Message.lower().strip()
if (max_line and (cnt_lin >= max_line)):
break
return data |
def test_fit(X, model, model2):
model.fit(X)
assert_array_almost_equal(model.factors[0].probs, [[0.4545, 0.5455]], 4)
assert_array_almost_equal(model.factors[1].probs, [[[0.0909, 0.1818, 0.0], [0.0909, 0.0, 0.0909]], [[0.0, 0.1818, 0.0909], [0.0909, 0.0909, 0.0909]]], 4)
assert_array_almost_equal(model.factors[2].probs, [[0.5455, 0.4545]], 4)
assert_array_almost_equal(model.factors[3].probs, [[0.1818, 0.3636], [0.2727, 0.1818]], 4)
assert_array_almost_equal(model.marginals[0].probs, [[0.5, 0.5]])
assert_array_almost_equal(model.marginals[1].probs, [[(1.0 / 3), (1.0 / 3), (1.0 / 3)]])
assert_array_almost_equal(model.marginals[2].probs, [[0.5, 0.5]])
assert_array_almost_equal(model.marginals[3].probs, [[0.5, 0.5]])
model2.fit(X)
assert_array_almost_equal(model2.factors[0].probs, [[0.4545, 0.5455]], 4)
assert_array_almost_equal(model2.factors[1].probs, [[[0.0, 0.0909, 0.0909], [0.0909, 0.2727, 0.0]], [[0.0909, 0.0909, 0.0909], [0.0909, 0.0, 0.0909]]], 4)
assert_array_almost_equal(model2.factors[2].probs, [[0.2727, 0.1818], [0.2727, 0.2727]], 4)
assert_array_almost_equal(model2.factors[3].probs, [[0.2727, 0.1818], [0.1818, 0.3636]], 4)
assert_array_almost_equal(model2.marginals[0].probs, [[0.5, 0.5]])
assert_array_almost_equal(model2.marginals[1].probs, [[(1.0 / 3), (1.0 / 3), (1.0 / 3)]])
assert_array_almost_equal(model2.marginals[2].probs, [[0.5, 0.5]])
assert_array_almost_equal(model2.marginals[3].probs, [[0.5, 0.5]]) |
_torch
class SqueezeBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification) if is_torch_available() else None)
pipeline_model_mapping = ({'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification} if is_torch_available() else {})
test_pruning = False
test_resize_embeddings = True
test_head_masking = False
def setUp(self):
self.model_tester = SqueezeBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=SqueezeBertConfig, dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_squeezebert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = SqueezeBertModel.from_pretrained(model_name)
self.assertIsNotNone(model) |
(beta=floats(1.0, 50.0), threshold=integers(20, 50))
def test_creation_from_vector(beta, threshold):
shape = (3, 1, 5)
z = torch.tensor(np.random.rand(*shape))
w_delta = torch.tensor(np.random.rand(*shape))
v = torch.cat((z, w_delta), dim=(- 1))
box = MinDeltaBoxTensor.from_vector(v, beta=beta, threshold=threshold)
assert (box.Z.shape == (3, 1, 5))
assert torch.allclose(box.z, z)
assert torch.allclose(box.Z, (z + torch.nn.functional.softplus(w_delta, beta=beta, threshold=threshold))) |
class AutoModelForTokenClassification(nn.Module):
def __init__(self, args, Model, config, num_labels=2):
super(AutoModelForTokenClassification, self).__init__()
self.num_labels = num_labels
self.bert = Model
self.config = config
self.dropout = nn.Dropout(args.drop_ratio)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.logsoftmax = nn.LogSoftmax(dim=1)
self._init_weights(self.classifier)
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
def forward(self, input_ids, target_mask, token_type_ids=None, attention_mask=None, labels=None, head_mask=None):
outputs = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
target_output = (sequence_output * target_mask.unsqueeze(2))
target_output = self.dropout(target_output)
target_output = (target_output.sum(1) / target_mask.sum())
logits = self.classifier(target_output)
logits = self.logsoftmax(logits)
if (labels is not None):
loss_fct = nn.NLLLoss()
loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1)))
return loss
return logits |
class LiltForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def predict_fn(image):
loaded_model = load_model(model_id='1y6tseN0194T6d-4iIh5wo7RL9ttQERe0')
(loaded_image, original_shape) = image_process(image)
(heatmap_a, heatmap_b, preds) = make_gradcam_heatmap(loaded_image, loaded_model)
int_label = tf.argmax(preds, axis=(- 1)).numpy()[0]
str_label = str_labels[int_label]
overaly_a = save_and_display_gradcam(loaded_image[0], heatmap_a, image_shape=original_shape[:2])
overlay_b = save_and_display_gradcam(loaded_image[0], heatmap_b, image_shape=original_shape[:2])
return [f'Predicted: {str_label}', overaly_a, overlay_b] |
def test_pr3635_diamond_e():
o = m.MVE()
assert (o.b == 1)
assert (o.c == 2)
assert (o.d0 == 3)
assert (o.d1 == 4)
assert (o.e == 5)
assert (o.get_b_b() == 1)
assert (o.get_c_b() == 1)
assert (o.get_d0_b() == 1)
assert (o.get_d1_b() == 1)
assert (o.get_e_b() == 1)
assert (o.get_c_c() == 2)
assert (o.get_d0_c() == 2)
assert (o.get_d1_c() == 2)
assert (o.get_e_c() == 2)
assert (o.get_d0_d0() == 3)
assert (o.get_e_d0() == 3)
assert (o.get_d1_d1() == 4)
assert (o.get_e_d1() == 4)
assert (o.get_e_e() == 5) |
class CTRLLMHeadModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
.parametrize('naive_dice', [True, False])
def test_dice_loss(naive_dice):
loss_class = DiceLoss
pred = torch.rand((10, 4, 4))
target = torch.rand((10, 4, 4))
weight = torch.rand(10)
loss = loss_class(naive_dice=naive_dice)(pred, target)
assert isinstance(loss, torch.Tensor)
loss = loss_class(naive_dice=naive_dice)(pred, target, weight)
assert isinstance(loss, torch.Tensor)
loss = loss_class(naive_dice=naive_dice)(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
loss = loss_class(naive_dice=naive_dice)(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
reduction_override = 'sum'
loss_class(naive_dice=naive_dice)(pred, target, avg_factor=10, reduction_override=reduction_override)
for reduction_override in [None, 'none', 'mean']:
loss_class(naive_dice=naive_dice)(pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
with pytest.raises(NotImplementedError):
loss_class(use_sigmoid=False, activate=True, naive_dice=naive_dice)(pred, target)
with pytest.raises(AssertionError):
weight = torch.rand((2, 8))
loss_class(naive_dice=naive_dice)(pred, target, weight)
with pytest.raises(AssertionError):
weight = torch.rand(8)
loss_class(naive_dice=naive_dice)(pred, target, weight) |
class DictKeepInputLabelIdx(DictKeepKeys):
def __init__(self):
super().__init__(['input', 'label', 'idx', 'aug_index']) |
def main():
seen = tf.placeholder(tf.float32, shape=[None, 1024])
unseen = tf.placeholder(tf.float32, shape=[None, 1024])
(mmd, n) = rbf_mmd2(seen, unseen)
(mmd, n) = mix_rbf_mmd2(seen, unseen, gammas=[10.0, 1.0, 0.1, 0.01, 0.001])
source_numpy = np.load(sys.argv[1])
target_numpy = np.load(sys.argv[2])
source_numpy_labels = np.load(sys.argv[3])
target_numpy_labels = np.load(sys.argv[4])
with tf.Session() as sess:
print('Total', sess.run(mmd, feed_dict={seen: source_numpy, unseen: target_numpy}))
for i in np.unique(source_numpy_labels):
print(i, sess.run(mmd, feed_dict={seen: source_numpy[(source_numpy_labels == i)], unseen: target_numpy[(target_numpy_labels == i)]})) |
class TestCenterRegionAssigner(TestCase):
def test_center_region_assigner(self):
center_region_assigner = CenterRegionAssigner(pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 4)
self.assertEqual(len(assign_result.labels), 4)
expected_gt_inds = torch.LongTensor([1, 0, 0, 0])
self.assertTrue(torch.all((assign_result.gt_inds == expected_gt_inds)))
expected_shadowed_labels = torch.LongTensor([[2, 3]])
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
self.assertTrue(torch.all((shadowed_labels == expected_shadowed_labels)))
def test_center_region_assigner_with_ignore(self):
center_region_assigner = CenterRegionAssigner(pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [30, 32, 40, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([2, 3])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
assign_result = center_region_assigner.assign(pred_instances, gt_instances, gt_instances_ignore=gt_instances_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 0, (- 1)])
self.assertTrue(torch.all((assign_result.gt_inds == expected_gt_inds)))
self.assertTrue(torch.all((assign_result.gt_inds == expected_gt_inds)))
def test_center_region_assigner_with_empty_gt(self):
center_region_assigner = CenterRegionAssigner(pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances, gt_instances)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
self.assertTrue(torch.all((assign_result.gt_inds == expected_gt_inds)))
def test_center_region_assigner_with_empty_boxes(self):
center_region_assigner = CenterRegionAssigner(pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue((tuple(assign_result.labels.shape) == (0,)))
def test_center_region_assigner_with_empty_boxes_and_ignore(self):
center_region_assigner = CenterRegionAssigner(pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
gt_labels = torch.LongTensor([2, 3])
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
gt_instances_ignore = InstanceData(bboxes=gt_bboxes_ignore)
assign_result = center_region_assigner.assign(pred_instances, gt_instances, gt_instances_ignore=gt_instances_ignore)
self.assertEqual(len(assign_result.gt_inds), 0)
self.assertTrue((tuple(assign_result.labels.shape) == (0,)))
def test_center_region_assigner_with_empty_boxes_and_gt(self):
center_region_assigner = CenterRegionAssigner(pos_scale=0.2, neg_scale=0.2, min_pos_iof=0.01)
priors = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
gt_labels = torch.empty(0)
pred_instances = InstanceData(priors=priors)
gt_instances = InstanceData(bboxes=gt_bboxes, labels=gt_labels)
assign_result = center_region_assigner.assign(pred_instances, gt_instances)
self.assertEqual(len(assign_result.gt_inds), 0) |
def seresnet26_cub(num_classes=200, **kwargs):
return get_seresnet(num_classes=num_classes, blocks=26, bottleneck=False, model_name='seresnet26_cub', **kwargs) |
def generator_loss(fake):
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake), logits=fake))
return loss |
def resnet50_fc512_ms12_a0d3(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=1, fc_dims=[512], dropout_p=None, mixstyle_layers=['layer1', 'layer2'], mixstyle_alpha=0.3, **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model |
class Decoder(nn.Module):
def __init__(self, num_points_per_patch=1024):
super(Decoder, self).__init__()
self.m = num_points_per_patch
self.meshgrid = [[(- 0.3), 0.3, 32], [(- 0.3), 0.3, 32]]
self.mlp1 = nn.Sequential(nn.Linear(514, 256), nn.ReLU(), nn.Linear(256, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 4))
self.mlp2 = nn.Sequential(nn.Linear(516, 256), nn.ReLU(), nn.Linear(256, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 4))
def build_grid(self, batch_size):
x = np.linspace(*self.meshgrid[0])
y = np.linspace(*self.meshgrid[1])
grid = np.array(list(itertools.product(x, y)))
grid = np.repeat(grid[(np.newaxis, ...)], repeats=batch_size, axis=0)
grid = torch.tensor(grid)
return grid.float()
def forward(self, input):
input = input.repeat(1, self.m, 1)
grid = self.build_grid(input.shape[0])
if torch.cuda.is_available():
grid = grid.cuda()
concate1 = torch.cat((input, grid), dim=(- 1))
after_folding1 = self.mlp1(concate1)
concate2 = torch.cat((input, after_folding1), dim=(- 1))
after_folding2 = self.mlp2(concate2)
return after_folding2 |
def string_sub(args):
params = functionParams(args, ('s', 'i', 'j'))
s = params.get('s', '')
i = int((params.get('i', 1) or 1))
j = int((params.get('j', (- 1)) or (- 1)))
if (i > 0):
i -= 1
if (j < 0):
j += 1
if (j == 0):
j = len(s)
return s[i:j] |
class TFBaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
last_hidden_state: tf.Tensor = None
pooler_output: tf.Tensor = None
past_key_values: Optional[List[tf.Tensor]] = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
cross_attentions: Optional[Tuple[tf.Tensor]] = None |
class CrossAttention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.0):
super().__init__()
inner_dim = (dim_head * heads)
project_out = (not ((heads == 1) and (dim_head == dim)))
self.heads = heads
self.scale = (dim_head ** (- 0.5))
self.attend = nn.Softmax(dim=(- 1))
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, (inner_dim * 2), bias=False)
self.to_out = (nn.Sequential(nn.Linear(inner_dim, dim), nn.Dropout(dropout)) if project_out else nn.Identity())
def forward(self, x_q, x_kv):
(_, _, dim, heads) = (*x_q.shape, self.heads)
(_, _, dim_large) = x_kv.shape
assert (dim == dim_large)
q = self.to_q(x_q)
q = rearrange(q, 'b n (h d) -> b h n d', h=heads)
kv = self.to_kv(x_kv).chunk(2, dim=(- 1))
(k, v) = map((lambda t: rearrange(t, 'b n (h d) -> b h n d', h=heads)), kv)
dots = (einsum('b h i d, b h j d -> b h i j', q, k) * self.scale)
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out) |
def plot_gif(v):
instr_id = v['instr_id']
gt = e.gt[int(instr_id.split('_')[0])]
graph = e.graphs[gt['scan']]
node_pos = nx.get_node_attributes(graph, 'position')
for (k, vv) in node_pos.items():
node_pos[k] = vv[:(- 1)]
rel_pos = [node_pos[vp] for vp in v['path']]
rel_x = [r[0] for r in rel_pos]
rel_y = [r[1] for r in rel_pos]
xlim = [(min(rel_x) - 3), (max(rel_x) + 3)]
ylim = [(min(rel_y) - 3), (max(rel_y) + 3)]
_G = nx.Graph()
_nodes = set()
(fig, ax) = plt.subplots()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.axis('off')
def init():
pass
def animate(i):
vp = v['path'][i]
if (vp not in _nodes):
_G.add_node(vp, pos=node_pos[vp])
_nodes.add(vp)
if (i > 0):
_G.add_edge(v['path'][(i - 1)], vp)
nx.draw_networkx_nodes(_G, pos=node_pos, ax=ax, node_size=100, nodelist=[vp], node_color='r')
nx.draw_networkx_nodes(_G, pos=node_pos, ax=ax, node_size=100, nodelist=(_nodes - set([vp])), node_color='b')
nx.draw_networkx_edges(_G, pos=node_pos, ax=ax, alpha=1, width=3)
t = len(v['path'])
return matplotlib.animation.FuncAnimation(fig, animate, init_func=init, frames=t, repeat=False) |
def load(model, optimizer, filename):
try:
dump = torch.load(filename)
except BaseException:
print('[ Fail: model loading failed. ]')
if (model is not None):
model.load_state_dict(dump['model'])
if (optimizer is not None):
optimizer.load_state_dict(dump['optimizer'])
opt = dump['config']
return (model, optimizer, opt) |
class ProbabilisticLayer(Random):
def __init__(self, **kwargs):
super(ProbabilisticLayer, self).__init__(**kwargs)
def sample_expected(self, Y):
raise NotImplemented
def sample(self, Y):
raise NotImplemented
def log_prob(self, X, Y):
raise NotImplemented |
class T5TokenizerFast():
def __init__(self, *args, **kwargs):
requires_tokenizers(self)
def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) |
def forward_backward_benchmark(net, run_segment, device, input_size=(1, 3, 224, 224), repeat=100, min_repeat=5):
assert (repeat > min_repeat)
net.train()
(regular_start_memory, regular_end_memory, regular_peak_memory, regular_avg_time) = forward_backward(net, device, input_size, repeat, min_repeat)
(checkpoint_start_memory, checkpoint_end_memory, checkpoint_peak_memory, checkpoint_avg_time) = forward_backward(run_segment, device, input_size, repeat, min_repeat)
regular_pytorch_overhead = max(regular_start_memory, regular_end_memory)
checkpoint_pytorch_overhead = max(checkpoint_start_memory, checkpoint_end_memory)
regular_intermediate_tensors = (regular_peak_memory - regular_pytorch_overhead)
checkpoint_intermediate_tensors = (checkpoint_peak_memory - checkpoint_pytorch_overhead)
print('Average Iteration Time: Checkpointing {:.4f} s, Regular {:.4f} s, overhead {:.2f}%'.format(checkpoint_avg_time, regular_avg_time, (((checkpoint_avg_time - regular_avg_time) * 100) / regular_avg_time)))
print('Average Peak Memory: Checkpointing {:.4f} MB, Regular {:.4f} MB, Memory Cut off {:.2f}%'.format((checkpoint_peak_memory / (1024 ** 2)), (regular_peak_memory / (1024 ** 2)), (((regular_peak_memory - checkpoint_peak_memory) * 100) / regular_peak_memory)))
print('Average Intermediate Tensors: Checkpointing {:.4f} MB, Regular {:.4f} MB, Memory Cut off {:.2f}%'.format((checkpoint_intermediate_tensors / (1024 ** 2)), (regular_intermediate_tensors / (1024 ** 2)), (((regular_intermediate_tensors - checkpoint_intermediate_tensors) * 100) / regular_intermediate_tensors))) |
def config_class_to_model_type(config):
for (key, cls) in CONFIG_MAPPING_NAMES.items():
if (cls == config):
return key
return None |
def get_accuracy(n):
return (float((n[0][0] + n[1][1])) / (((n[0][0] + n[1][1]) + n[0][1]) + n[1][0])) |
class ConvLSTM(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size, step=1, effective_step=[1], bias=True):
super(ConvLSTM, self).__init__()
self.input_channels = ([input_channels] + hidden_channels)
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels)
self.step = step
self.bias = bias
self.effective_step = effective_step
self._all_layers = []
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size, self.bias)
setattr(self, name, cell)
self._all_layers.append(cell)
self.conv_refine1_1 = nn.Conv2d(64, 64, 3, padding=1)
self.bn_refine1_1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine1_1 = nn.PReLU()
self.conv_refine1_2 = nn.Conv2d(64, 64, 3, padding=1)
self.bn_refine1_2 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine1_2 = nn.PReLU()
self.conv_refine1_3 = nn.Conv2d(64, 64, 3, padding=1)
self.bn_refine1_3 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine1_3 = nn.PReLU()
self.down_2_1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.down_2_2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.conv_refine2_1 = nn.Conv2d(128, 128, 3, padding=1)
self.bn_refine2_1 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine2_1 = nn.PReLU()
self.conv_refine2_2 = nn.Conv2d(128, 128, 3, padding=1)
self.bn_refine2_2 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine2_2 = nn.PReLU()
self.conv_refine2_3 = nn.Conv2d(128, 128, 3, padding=1)
self.bn_refine2_3 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine2_3 = nn.PReLU()
self.conv_r2_1 = nn.Conv2d(128, 64, 3, padding=1)
self.bn_r2_1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu_r2_1 = nn.PReLU()
self.conv_refine3_1 = nn.Conv2d(256, 256, 3, padding=1)
self.bn_refine3_1 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine3_1 = nn.PReLU()
self.conv_refine3_2 = nn.Conv2d(256, 256, 3, padding=1)
self.bn_refine3_2 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine3_2 = nn.PReLU()
self.conv_refine3_3 = nn.Conv2d(256, 256, 3, padding=1)
self.bn_refine3_3 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine3_3 = nn.PReLU()
self.conv_r3_1 = nn.Conv2d(256, 64, 3, padding=1)
self.bn_r3_1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu_r3_1 = nn.PReLU()
self.conv_refine4_1 = nn.Conv2d(512, 512, 3, padding=1)
self.bn_refine4_1 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine4_1 = nn.PReLU()
self.conv_refine4_2 = nn.Conv2d(512, 512, 3, padding=1)
self.bn_refine4_2 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine4_2 = nn.PReLU()
self.conv_refine4_3 = nn.Conv2d(512, 512, 3, padding=1)
self.bn_refine4_3 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine4_3 = nn.PReLU()
self.conv_r4_1 = nn.Conv2d(512, 64, 3, padding=1)
self.bn_r4_1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu_r4_1 = nn.PReLU()
self.conv_refine5_1 = nn.Conv2d(512, 512, 3, padding=1)
self.bn_refine5_1 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine5_1 = nn.PReLU()
self.conv_refine5_2 = nn.Conv2d(512, 512, 3, padding=1)
self.bn_refine5_2 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine5_2 = nn.PReLU()
self.conv_refine5_3 = nn.Conv2d(512, 512, 3, padding=1)
self.bn_refine5_3 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True)
self.relu_refine5_3 = nn.PReLU()
self.conv_r5_1 = nn.Conv2d(512, 64, 3, padding=1)
self.bn_r5_1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu_r5_1 = nn.PReLU()
self.conv5_conv_1 = nn.Conv2d(64, 64, 1, padding=0)
self.bn5_conv_1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu5_conv_1 = nn.ReLU(inplace=True)
self.conv5_conv = nn.Conv2d(64, 64, 3, padding=1)
self.bn5_conv = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.relu5_conv = nn.ReLU(inplace=True)
self.Atrous_conv_1 = nn.Conv2d(64, 64, 3, padding=7, dilation=7)
self.Atrous_bn5_1 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.Atrous_relu_1 = nn.ReLU(inplace=True)
self.Atrous_conv_2 = nn.Conv2d(64, 64, 3, padding=5, dilation=5)
self.Atrous_bn5_2 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.Atrous_relu_2 = nn.ReLU(inplace=True)
self.Atrous_conv_5 = nn.Conv2d(64, 64, 3, padding=3, dilation=3)
self.Atrous_bn5_5 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.Atrous_relu_5 = nn.ReLU(inplace=True)
self.Atrous_pooling = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.Atrous_conv_pool = nn.Conv2d(64, 64, 1, padding=0)
self.Atrous_bn_pool = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True)
self.Atrous_relu_pool = nn.ReLU(inplace=True)
self.conv_c = nn.Conv2d(64, 64, 3, padding=1)
self.conv_h = nn.Conv2d(64, 64, 3, padding=1)
self.pool_avg = nn.AvgPool2d(64, stride=2, ceil_mode=True)
self.conv_s1 = nn.Conv2d((64 * self.num_layers), 64, 1, padding=0)
self.conv_s2 = nn.Conv2d((64 * self.num_layers), 1, 1, padding=0)
self.conv_pred = nn.Conv2d(64, 2, 1, padding=0)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal(m.weight.data, std=0.01)
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, depth_vector, h1, h2, h3, h4, h5, d1, d2, d3, d4, d5):
internal_state = []
d1_1 = self.relu_refine1_1(self.bn_refine1_1(self.conv_refine1_1(d1)))
d1_2 = self.relu_refine1_2(self.bn_refine1_2(self.conv_refine1_2(d1_1)))
d1_2 = (d1_2 + h1)
d1_2 = self.down_2_2(self.down_2_1(d1_2))
d1_2_0 = d1_2
d1_3 = self.relu_refine1_3(self.bn_refine1_3(self.conv_refine1_3(d1_2)))
drb1 = (d1_2_0 + d1_3)
d2_1 = self.relu_refine2_1(self.bn_refine2_1(self.conv_refine2_1(d2)))
d2_2 = self.relu_refine2_2(self.bn_refine2_2(self.conv_refine2_2(d2_1)))
d2_2 = (d2_2 + h2)
d2_2 = self.down_2_1(d2_2)
d2_2_0 = d2_2
d2_3 = self.relu_refine2_3(self.bn_refine2_3(self.conv_refine2_3(d2_2)))
drb2 = (d2_2_0 + d2_3)
drb2 = self.relu_r2_1(self.bn_r2_1(self.conv_r2_1(drb2)))
d3_1 = self.relu_refine3_1(self.bn_refine3_1(self.conv_refine3_1(d3)))
d3_2 = self.relu_refine3_2(self.bn_refine3_2(self.conv_refine3_2(d3_1)))
d3_2 = (d3_2 + h3)
d3_2_0 = d3_2
d3_3 = self.relu_refine3_3(self.bn_refine3_3(self.conv_refine3_3(d3_2)))
drb3 = (d3_2_0 + d3_3)
drb3 = self.relu_r3_1(self.bn_r3_1(self.conv_r3_1(drb3)))
d4_1 = self.relu_refine4_1(self.bn_refine4_1(self.conv_refine4_1(d4)))
d4_2 = self.relu_refine4_2(self.bn_refine4_2(self.conv_refine4_2(d4_1)))
d4_2 = (d4_2 + h4)
d4_2 = F.upsample(d4_2, scale_factor=2, mode='bilinear')
d4_2_0 = d4_2
d4_3 = self.relu_refine4_3(self.bn_refine4_3(self.conv_refine4_3(d4_2)))
drb4 = (d4_2_0 + d4_3)
drb4 = self.relu_r4_1(self.bn_r4_1(self.conv_r4_1(drb4)))
d5_1 = self.relu_refine5_1(self.bn_refine5_1(self.conv_refine5_1(d5)))
d5_2 = self.relu_refine5_2(self.bn_refine5_2(self.conv_refine5_2(d5_1)))
d5_2 = (d5_2 + h5)
d5_2 = F.upsample(d5_2, scale_factor=4, mode='bilinear')
d5_2_0 = d5_2
d5_3 = self.relu_refine5_3(self.bn_refine5_3(self.conv_refine5_3(d5_2)))
drb5 = (d5_2_0 + d5_3)
drb5 = self.relu_r5_1(self.bn_r5_1(self.conv_r5_1(drb5)))
drb_fusion = ((((drb1 + drb2) + drb3) + drb4) + drb5)
f1 = self.relu5_conv_1(self.bn5_conv_1(self.conv5_conv_1(drb_fusion)))
f2 = self.relu5_conv(self.bn5_conv(self.conv5_conv(drb_fusion)))
f3 = self.Atrous_relu_1(self.Atrous_bn5_1(self.Atrous_conv_1(drb_fusion)))
f4 = self.Atrous_relu_2(self.Atrous_bn5_2(self.Atrous_conv_2(drb_fusion)))
f5 = self.Atrous_relu_5(self.Atrous_bn5_5(self.Atrous_conv_5(drb_fusion)))
f6 = F.upsample(self.Atrous_relu_pool(self.Atrous_bn_pool(self.Atrous_conv_pool(self.Atrous_pooling(self.Atrous_pooling(drb_fusion))))), scale_factor=4, mode='bilinear')
fusion = torch.cat([f1, f2, f3, f4, f5, f6], dim=0)
fusion_o = fusion
input = torch.cat(torch.chunk(fusion, 6, dim=0), dim=1)
for step in range(self.step):
depth = depth_vector
if (step == 0):
(basize, _, height, width) = input.size()
(h_step, c) = ConvLSTMCell.init_hidden(basize, self.hidden_channels[(self.num_layers - 1)], (height, width))
depth = torch.mul(F.softmax(depth, dim=1), 6)
(basize, dime, h, w) = depth.size()
depth = depth.view(1, basize, dime, h, w).transpose(0, 1).transpose(1, 2)
depth = torch.cat(torch.chunk(depth, basize, dim=0), dim=1).view((basize * dime), 1, 1, 1)
depth = torch.mul(fusion_o, depth).view(1, (basize * dime), 64, 64, 64)
depth = torch.cat(torch.chunk(depth, basize, dim=1), dim=0)
F_sum = torch.sum(depth, 1, keepdim=False)
depth_fw_ori = F_sum
depth = self.conv_c(F_sum)
h_c = self.conv_h(h_step)
depth = (depth + h_c)
depth = self.pool_avg(depth)
depth = torch.mul(F.softmax(depth, dim=1), 64)
F_sum_wt = torch.mul(depth_fw_ori, depth)
x = F_sum_wt
if (step < (self.step - 1)):
for i in range(self.num_layers):
if (step == 0):
(bsize, _, height, width) = x.size()
(h, c) = ConvLSTMCell.init_hidden(bsize, self.hidden_channels[i], (height, width))
internal_state.append((h, c))
name = 'cell{}'.format(i)
(h, c) = internal_state[i]
h_step = h
(x, new_c, new_o) = getattr(self, name)(x, h, c)
internal_state[i] = (x, new_c)
if (step == 0):
outputs_o = new_o
else:
outputs_o = torch.cat((outputs_o, new_o), dim=1)
outputs = self.conv_s1(outputs_o)
spatial_weight = F.sigmoid(self.conv_s2(outputs_o))
outputs = torch.mul(outputs, spatial_weight)
outputs = self.conv_pred(outputs)
output = F.upsample(outputs, scale_factor=4, mode='bilinear')
return output |
_incremental_state
class FConvDecoder(FairseqDecoder):
def __init__(self, dictionary, embed_dim=512, out_embed_dim=256, max_positions=1024, convolutions=(((512, 3),) * 8), attention=True, dropout=0.1, selfattention=False, attention_nheads=1, selfattention_nheads=1, project_input=False, gated_attention=False, downsample=False, pretrained=False, trained_decoder=None):
super().__init__(dictionary)
self.register_buffer('version', torch.Tensor([2]))
self.pretrained = pretrained
self.pretrained_decoder = trained_decoder
self.dropout = dropout
self.need_attn = True
in_channels = convolutions[0][0]
def expand_bool_array(val):
if isinstance(val, bool):
return ([val] * len(convolutions))
return val
attention = expand_bool_array(attention)
selfattention = expand_bool_array(selfattention)
if ((not isinstance(attention, list)) or (len(attention) != len(convolutions))):
raise ValueError('Attention is expected to be a list of booleans of length equal to the number of layers.')
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
self.embed_positions = PositionalEmbedding(max_positions, embed_dim, padding_idx)
self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
self.projections = nn.ModuleList()
self.convolutions = nn.ModuleList()
self.attention = nn.ModuleList()
self.selfattention = nn.ModuleList()
self.attproj = nn.ModuleList()
for (i, (out_channels, kernel_size)) in enumerate(convolutions):
self.projections.append((Linear(in_channels, out_channels) if (in_channels != out_channels) else None))
self.convolutions.append(LinearizedConv1d(in_channels, (out_channels * 2), kernel_size, padding=(kernel_size - 1), dropout=dropout))
self.attention.append((DownsampledMultiHeadAttention(out_channels, embed_dim, attention_nheads, project_input=project_input, gated=False, downsample=False) if attention[i] else None))
self.attproj.append((Linear(out_channels, embed_dim, dropout=dropout) if attention[i] else None))
self.selfattention.append((SelfAttention(out_channels, embed_dim, selfattention_nheads, project_input=project_input, gated=gated_attention, downsample=downsample) if selfattention[i] else None))
in_channels = out_channels
self.fc2 = Linear(in_channels, out_embed_dim)
self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
if self.pretrained:
self.gate1 = nn.Sequential(Linear((out_embed_dim * 2), out_embed_dim), nn.Sigmoid())
self.gate2 = nn.Sequential(Linear((out_embed_dim * 2), out_embed_dim), nn.Sigmoid())
self.joining = nn.Sequential(Linear((out_embed_dim * 2), (out_embed_dim * 2)), LayerNorm((out_embed_dim * 2)), nn.GLU(), Linear(out_embed_dim, (out_embed_dim * 2)), LayerNorm((out_embed_dim * 2)), nn.GLU(), Linear(out_embed_dim, out_embed_dim), LayerNorm(out_embed_dim))
self.pretrained_outputs = {}
def save_output():
def hook(a, b, output):
self.pretrained_outputs['out'] = output
return hook
self.pretrained_decoder.fc2.register_forward_hook(save_output())
def forward(self, prev_output_tokens, encoder_out):
trained_encoder_out = (encoder_out['pretrained'] if self.pretrained else None)
encoder_out = encoder_out['encoder']['encoder_out']
(encoder_a, encoder_b) = self._split_encoder_out(encoder_out)
positions = self.embed_positions(prev_output_tokens)
x = (self.embed_tokens(prev_output_tokens) + positions)
x = F.dropout(x, p=self.dropout, training=self.training)
target_embedding = x.transpose(0, 1)
x = self.fc1(x)
x = x.transpose(0, 1)
avg_attn_scores = None
for (proj, conv, attention, selfattention, attproj) in zip(self.projections, self.convolutions, self.attention, self.selfattention, self.attproj):
residual = (x if (proj is None) else proj(x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv(x)
x = F.glu(x, dim=2)
if (attention is not None):
r = x
(x, attn_scores) = attention((attproj(x) + target_embedding), encoder_a, encoder_b)
x = (x + r)
if ((not self.training) and self.need_attn):
if (avg_attn_scores is None):
avg_attn_scores = attn_scores
else:
avg_attn_scores.add_(attn_scores)
if (selfattention is not None):
x = selfattention(x)
x = ((x + residual) * math.sqrt(0.5))
x = x.transpose(0, 1)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
if (not self.pretrained):
x = self.fc3(x)
if self.pretrained:
(trained_x, _) = self.pretrained_decoder.forward(prev_output_tokens, trained_encoder_out)
y = torch.cat([x, self.pretrained_outputs['out']], dim=(- 1))
gate1 = self.gate1(y)
gate2 = self.gate2(y)
gated_x1 = (gate1 * x)
gated_x2 = (gate2 * self.pretrained_outputs['out'])
fusion = torch.cat([gated_x1, gated_x2], dim=(- 1))
fusion = self.joining(fusion)
fusion_output = self.fc3(fusion)
return (fusion_output, avg_attn_scores)
else:
return (x, avg_attn_scores)
def max_positions(self):
return self.embed_positions.max_positions
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def _split_encoder_out(self, encoder_out):
(encoder_a, encoder_b) = encoder_out
encoder_a = encoder_a.transpose(0, 1).contiguous()
encoder_b = encoder_b.transpose(0, 1).contiguous()
result = (encoder_a, encoder_b)
return result |
def print_headless_mentions(out, parses, heads, mentions):
for mention in mentions:
(sentence, start, end) = mention
if ((end - start) > 1):
node = parses[sentence].get_nodes('lowest', start, end)
if (node is None):
print(mention_text(text, mention), file=out)
print(text_tree(parses[sentence], False), file=out) |
class Mastering_Effects_Manipulator():
def __init__(self, block_size=(2 ** 17)):
self.block_size = block_size
self.sample_rate = 44100
self.processors_pre = ProcessorList(block_size=self.block_size, sample_rate=self.sample_rate)
self.processors_pre.add(Gain(gain=(- 8.0), block_size=self.block_size, sample_rate=self.sample_rate))
self.processors_core = ProcessorList(block_size=self.block_size, sample_rate=self.sample_rate)
self.processors_core.add(Equalizer(block_size=self.block_size, sample_rate=self.sample_rate, gain_range=((- 15.0), 10.0)))
self.processors_core.add(CrossoverMidSideImager(crossover_order=4, block_size=self.block_size, sample_rate=self.sample_rate))
self.processors_core.add(LimiterTypeA(block_size=self.block_size, sample_rate=self.sample_rate))
self.pre_processors = self.processors_pre.get_all()
self.core_processors = self.processors_core.get_all()
def process(self, buffer_list, randomize=None, reset=None):
main_processors = (self.pre_processors + self.core_processors)
if randomize:
for processor in self.core_processors:
processor.randomize()
else:
pass
output_buff_list = []
for buffer in buffer_list:
for processor in main_processors:
buffer = processor.process(buffer)
output_buff_list.append(buffer)
if reset:
for processor in main_processors:
processor.reset()
return output_buff_list |
class HRateHyperprior(HRateEstimator):
def __init__(self, z_dim, factor_dim=5, side_z_dim=None, is_pred_mean=True, **kwargs):
super().__init__(z_dim, **kwargs)
if (side_z_dim is None):
side_z_dim = max(10, (self.z_dim // factor_dim))
self.side_z_dim = side_z_dim
self.is_pred_mean = is_pred_mean
self.entropy_bottleneck = EntropyBottleneck(side_z_dim, **self.kwargs_ent_bottleneck)
self.gaussian_conditional = GaussianConditional(None)
(self.side_encoder, self.z_encoder) = self.get_encoders()
self.reset_parameters()
def get_encoders(self):
kwargs_mlp = dict(n_hid_layers=2, hid_dim=max(self.z_dim, 256))
side_encoder = MLP(self.z_dim, self.side_z_dim, **kwargs_mlp)
z_dim = self.z_dim
if self.is_pred_mean:
z_dim *= 2
z_encoder = MLP(self.side_z_dim, z_dim, **kwargs_mlp)
return (side_encoder, z_encoder)
def chunk_params(self, gaussian_params):
if self.is_pred_mean:
(scales_hat, means_hat) = gaussian_params.chunk(2, (- 1))
else:
(scales_hat, means_hat) = (gaussian_params, None)
return (scales_hat, means_hat)
def forward_help(self, z, _, __):
z_in = self.process_z_in(z)
side_z = self.side_encoder(z_in)
(side_z_hat, q_s) = self.entropy_bottleneck(side_z)
gaussian_params = self.z_encoder(side_z_hat)
(scales_hat, means_hat) = self.chunk_params(gaussian_params)
(z_hat, q_zls) = self.gaussian_conditional(z_in, scales_hat, means=means_hat)
neg_log_q_s = (- torch.log(q_s).sum((- 1)))
neg_log_q_zls = (- torch.log(q_zls).sum((- 1)))
neg_log_q_zs = (neg_log_q_s + neg_log_q_zls)
logs = dict(H_q_ZlS=(neg_log_q_zls.mean() / math.log(BASE_LOG)), H_q_Z=(neg_log_q_zs.mean() / math.log(BASE_LOG)), H_q_S=(neg_log_q_s.mean() / math.log(BASE_LOG)), H_ZlX=0)
if self.is_compute_real_rate:
(n_bits, logs2) = self.real_rate(z, is_return_logs=True)
logs.update(logs2)
logs['n_bits'] = n_bits
other = dict()
z_hat = self.process_z_out(z_hat)
return (z_hat, neg_log_q_zs, logs, other)
def get_indexes_means_hat(self, side_z_strings):
side_z_hat = self.entropy_bottleneck.decompress(side_z_strings)
gaussian_params = self.z_encoder(side_z_hat)
(scales_hat, means_hat) = self.chunk_params(gaussian_params)
scales_hat = atleast_ndim(scales_hat, 4)
means_hat = atleast_ndim(scales_hat, 4)
means_hat = atleast_ndim(scales_hat, 4)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
return (indexes, means_hat)
def compress(self, z, parent=None):
z_in = self.process_z_in(z)
side_z = self.side_encoder(z_in)
side_z_strings = self.entropy_bottleneck.compress(side_z)
(indexes, means_hat) = self.get_indexes_means_hat(side_z_strings)
z_in = atleast_ndim(z_in, 4)
z_strings = self.gaussian_conditional.compress(z_in, indexes, means=means_hat)
return [z_strings, side_z_strings]
def decompress(self, all_strings):
assert (isinstance(all_strings, list) and (len(all_strings) == 2))
(z_strings, side_z_strings) = all_strings
(indexes, means_hat) = self.get_indexes_means_hat(side_z_strings)
z_hat = self.gaussian_conditional.decompress(z_strings, indexes, means=means_hat)
z_hat = einops.rearrange(z_hat, 'b c e1 e2 -> b (c e1 e2)', e1=1, e2=1)
return self.process_z_out(z_hat)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
try:
policy = 'resize'
update_registered_buffers(self.gaussian_conditional, f'{prefix}gaussian_conditional', ['_quantized_cdf', '_offset', '_cdf_length', 'scale_table'], state_dict, policy=policy)
except KeyError:
pass
super()._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) |
def write_text(path: Path, text: str, encoding=None):
with path.open(mode='w', encoding=encoding) as f:
f.write(text) |
class ModuleManager():
def __init__(self, name=None):
self._modules_dict = dict()
self._name = name
def __len__(self):
return len(self._modules_dict)
def __repr__(self):
name_str = (self._name if self._name else self.__class__.__name__)
return '{}:{}'.format(name_str, list(self._modules_dict.keys()))
def __getitem__(self, item):
if (item not in self._modules_dict.keys()):
raise KeyError('{} does not exist in availabe l {}'.format(item, self))
return self._modules_dict[item]
def modules_dict(self):
return self._modules_dict
def name(self):
return self._name
def _add_single_module(self, module):
if (not (inspect.isclass(module) or inspect.isfunction(module))):
raise TypeError('Expect class/function type, but received {}'.format(type(module)))
module_name = module.__name__
if (module_name in self._modules_dict.keys()):
raise KeyError('{} exists already!'.format(module_name))
else:
self._modules_dict[module_name] = module
def add_module(self, modules):
if isinstance(modules, Sequence):
for module in modules:
self._add_single_module(module)
else:
module = modules
self._add_single_module(module)
return modules |
class MinibatchRlEval(MinibatchRlBase):
_eval = True
def train(self):
n_itr = self.startup()
with logger.prefix(f'itr #0 '):
(eval_traj_infos, eval_time) = self.evaluate_agent(0)
self.log_diagnostics(0, eval_traj_infos, eval_time)
for itr in range(n_itr):
with logger.prefix(f'itr #{itr} '):
self.agent.sample_mode(itr)
(samples, traj_infos) = self.sampler.obtain_samples(itr)
self.agent.train_mode(itr)
opt_info = self.algo.optimize_agent(itr, samples)
self.store_diagnostics(itr, traj_infos, opt_info)
if (((itr + 1) % self.log_interval_itrs) == 0):
(eval_traj_infos, eval_time) = self.evaluate_agent(itr)
self.log_diagnostics(itr, eval_traj_infos, eval_time)
self.shutdown()
def evaluate_agent(self, itr):
if (itr > 0):
self.pbar.stop()
logger.log('Evaluating agent...')
self.agent.eval_mode(itr)
eval_time = (- time.time())
traj_infos = self.sampler.evaluate_agent(itr)
eval_time += time.time()
logger.log('Evaluation runs complete.')
return (traj_infos, eval_time)
def initialize_logging(self):
super().initialize_logging()
self._cum_eval_time = 0
def log_diagnostics(self, itr, eval_traj_infos, eval_time):
if (not eval_traj_infos):
logger.log('WARNING: had no complete trajectories in eval.')
steps_in_eval = sum([info['Length'] for info in eval_traj_infos])
logger.record_tabular('StepsInEval', steps_in_eval)
logger.record_tabular('TrajsInEval', len(eval_traj_infos))
self._cum_eval_time += eval_time
logger.record_tabular('CumEvalTime', self._cum_eval_time)
super().log_diagnostics(itr, eval_traj_infos, eval_time) |
class DictDataset(Dataset):
def __init__(self, **kwargs):
self.data = kwargs
self.data_len = None
for v in kwargs.values():
if (self.data_len is None):
self.data_len = v.size(0)
else:
assert (self.data_len == v.size(0))
def __getitem__(self, index):
res = {}
for (k, v) in self.data.items():
res[k] = v[index]
return res
def __len__(self):
return self.data_len |
def rbf_kernel(x, y, sigma):
return np.exp(((- (np.linalg.norm((x - y)) ** 2)) / (2 * (sigma ** 2)))) |
class NPQueue(object):
def __init__(self, initial_capacity: int=100, dtype=np.int64):
self._arr = np.zeros(initial_capacity, dtype=dtype)
self._start_idx = 0
self._end_idx = 0
def _reset(self):
current_size = (self._end_idx - self._start_idx)
new_arr = np.zeros((2 * current_size), dtype=self._arr.dtype)
new_arr[:current_size] = self.view()
self._arr = new_arr
self._start_idx = 0
self._end_idx = current_size
def append(self, value):
self._arr[self._end_idx] = value
self._end_idx += 1
if (self._end_idx == len(self._arr)):
self._reset()
def pop(self):
self._end_idx -= 1
return self._arr[(self._end_idx + 1)]
def popleft(self):
self._start_idx += 1
def view(self):
return self._arr[self._start_idx:self._end_idx]
def __len__(self):
return (self._end_idx - self._start_idx)
def first(self):
assert ((self._end_idx - self._start_idx) > 0)
return self._arr[self._start_idx]
def last(self):
assert ((self._end_idx - self._start_idx) > 0)
return self._arr[(self._end_idx - 1)]
def __str__(self):
return self.view().__str__() |
def parse_requirements(filename):
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if (line and (not line.startswith('#')))] |
def benchmark(exec_func=None, *, plot=True, auto=False):
if (exec_func is None):
return functools.partial(benchmark, plot=plot, auto=auto)
(exec_func)
def wrapper_func():
global _plot, _log_dir, _auto
_plot = ({} if plot else None)
plt.close('all')
_log_dir = _get_log_dir(exec_func.__name__)
if os.path.exists(_log_dir):
count = 1
while os.path.exists(((_log_dir + '_') + str(count))):
count += 1
_log_dir = ((_log_dir + '_') + str(count))
if auto:
_auto = auto
auto_dir = os.path.join(_log_dir, 'auto')
os.makedirs(auto_dir)
exec_func()
if plot:
plot_dir = os.path.join(_log_dir, 'plot')
os.makedirs(plot_dir)
for env_id in _plot:
plt.figure(env_id)
plt.legend()
plt.xlabel(_plot[env_id]['xlabel'])
plt.ylabel(_plot[env_id]['ylabel'])
plt.title(env_id)
plt.savefig(((plot_dir + '/') + env_id))
if auto:
_upload_to_gcp_storage(_log_dir)
return wrapper_func |
_module()
class Contrast(object):
def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5):
assert isinstance(magnitude, (int, float)), f'The magnitude type must be int or float, but got {type(magnitude)} instead.'
assert (0 <= prob <= 1.0), f'The prob should be in range [0,1], got {prob} instead.'
assert (0 <= random_negative_prob <= 1.0), f'The random_negative_prob should be in range [0,1], got {random_negative_prob} instead.'
self.magnitude = magnitude
self.prob = prob
self.random_negative_prob = random_negative_prob
def __call__(self, results):
if (np.random.rand() > self.prob):
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
for key in results.get('img_fields', ['strong']):
img = results[key]
img_contrasted = mmcv.adjust_contrast(img, factor=(1 + magnitude))
results[key] = img_contrasted.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'prob={self.prob}, '
repr_str += f'random_negative_prob={self.random_negative_prob})'
return repr_str |
class StaticCombiner(Combiner):
def __init__(self, database: Database, top_k: int, mixing_weight: float, kernel: Kernel, bandwidth: float) -> None:
super(StaticCombiner, self).__init__()
self.database = database
self.top_k = top_k
self.mixing_weight = mixing_weight
self.kernel = kernel
self.bandwidth = bandwidth
def forward(self, hidden: torch.Tensor, logits: torch.Tensor) -> torch.Tensor:
(batch_size, seq_len, hidden_size) = hidden.size()
vocab_size = logits.size((- 1))
hidden = hidden.view((batch_size * seq_len), hidden_size)
logits = logits.view((batch_size * seq_len), vocab_size)
model_based_distribution = F.softmax(logits, dim=(- 1))
vocab_size = model_based_distribution.size((- 1))
(distances, token_indices) = self.database.search(hidden.cpu().numpy(), top_k=self.top_k)
distances = torch.FloatTensor(distances).to(hidden.device)
token_indices = torch.LongTensor(token_indices).to(hidden.device)
(example_based_distribution, _) = self.kernel.compute_example_based_distribution(distances, self.bandwidth, token_indices, vocab_size)
mixed_distribution = (((1 - self.mixing_weight) * model_based_distribution) + (self.mixing_weight * example_based_distribution))
log_probs = torch.log(mixed_distribution)
log_probs = log_probs.view(batch_size, seq_len, vocab_size).contiguous()
return log_probs |
class GraphConvolution(nn.Module):
def __init__(self, d_model):
super().__init__()
self.d_model = d_model
self.num_relations = 40
self.fc_dir_weight = clones(nn.Linear(d_model, d_model, bias=False), 3)
self.fc_dir_bias = [nn.Parameter(torch.zeros(d_model)) for _ in range(((self.num_relations * 2) - 1))]
self.fc_dir_bias1 = nn.ParameterList(self.fc_dir_bias[(- 1):])
self.fc_dir_bias2 = nn.ParameterList(self.fc_dir_bias[:(self.num_relations - 1)])
self.fc_dir_bias3 = nn.ParameterList(self.fc_dir_bias[(self.num_relations - 1):(- 1)])
self.fc_gate_weight = clones(nn.Linear(d_model, d_model, bias=False), 3)
self.fc_gate_bias = [nn.Parameter(torch.zeros(d_model)) for _ in range(((self.num_relations * 2) - 1))]
self.fc_gate_bias1 = nn.ParameterList(self.fc_gate_bias[(- 1):])
self.fc_gate_bias2 = nn.ParameterList(self.fc_gate_bias[:(self.num_relations - 1)])
self.fc_gate_bias3 = nn.ParameterList(self.fc_gate_bias[(self.num_relations - 1):(- 1)])
def _compute_one_direction(self, x, fc, biases, adj_mat, relations, fc_gate, biases_gate):
x = fc(x)
g = fc_gate(x)
out = None
for (r, bias, bias_gate) in zip(relations, biases, biases_gate):
mask = (adj_mat == r).float()
g1 = torch.sigmoid((g + bias_gate))
res = torch.matmul(mask, ((x + bias) * g1))
if (out is None):
out = res
else:
out += res
return out
def forward(self, node, node_mask, adj_mat):
out = self._compute_one_direction(node, self.fc_dir_weight[1], self.fc_dir_bias2, adj_mat, range(2, (self.num_relations + 1)), self.fc_gate_weight[1], self.fc_gate_bias2)
adj_mat = adj_mat.transpose((- 1), (- 2))
out += self._compute_one_direction(node, self.fc_dir_weight[2], self.fc_dir_bias3, adj_mat, range(2, (self.num_relations + 1)), self.fc_gate_weight[2], self.fc_gate_bias3)
out += self._compute_one_direction(node, self.fc_dir_weight[0], self.fc_dir_bias1, adj_mat, [1], self.fc_gate_weight[0], self.fc_gate_bias1)
return F.relu(out) |
def avg_sq_ch_mean(model, input, output):
return torch.mean((output.mean(axis=[0, 2, 3]) ** 2)).item() |
def worker_init_rand(worker_id):
random.seed(torch.initial_seed())
np.random.seed((torch.initial_seed() % (2 ** 32))) |
class PyramidNet(nn.Module):
def __init__(self, dataset, depth, alpha, num_classes, bottleneck=False):
super(PyramidNet, self).__init__()
self.dataset = dataset
if self.dataset.startswith('cifar'):
self.inplanes = 16
if (bottleneck == True):
n = int(((depth - 2) / 9))
block = Bottleneck
else:
n = int(((depth - 2) / 6))
block = BasicBlock
self.addrate = (alpha / ((3 * n) * 1.0))
self.input_featuremap_dim = self.inplanes
self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)
self.featuremap_dim = self.input_featuremap_dim
self.layer1 = self.pyramidal_make_layer(block, n)
self.layer2 = self.pyramidal_make_layer(block, n, stride=2)
self.layer3 = self.pyramidal_make_layer(block, n, stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.final_featuremap_dim, num_classes)
self.fc_roi = nn.Linear(self.final_featuremap_dim, num_classes)
elif (self.dataset == 'imagenet'):
blocks = {18: BasicBlock, 34: BasicBlock, 50: Bottleneck, 101: Bottleneck, 152: Bottleneck, 200: Bottleneck}
layers = {18: [2, 2, 2, 2], 34: [3, 4, 6, 3], 50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3], 200: [3, 24, 36, 3]}
if (layers.get(depth) is None):
if (bottleneck == True):
blocks[depth] = Bottleneck
temp_cfg = int(((depth - 2) / 12))
else:
blocks[depth] = BasicBlock
temp_cfg = int(((depth - 2) / 8))
layers[depth] = [temp_cfg, temp_cfg, temp_cfg, temp_cfg]
print('=> the layer configuration for each stage is set to', layers[depth])
self.inplanes = 64
self.addrate = (alpha / (sum(layers[depth]) * 1.0))
self.input_featuremap_dim = self.inplanes
self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.featuremap_dim = self.input_featuremap_dim
self.layer1 = self.pyramidal_make_layer(blocks[depth], layers[depth][0])
self.layer2 = self.pyramidal_make_layer(blocks[depth], layers[depth][1], stride=2)
self.layer3 = self.pyramidal_make_layer(blocks[depth], layers[depth][2], stride=2)
self.layer4 = self.pyramidal_make_layer(blocks[depth], layers[depth][3], stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(self.final_featuremap_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def pyramidal_make_layer(self, block, block_depth, stride=1):
downsample = None
if (stride != 1):
downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)
layers = []
self.featuremap_dim = (self.featuremap_dim + self.addrate)
layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample))
for i in range(1, block_depth):
temp_featuremap_dim = (self.featuremap_dim + self.addrate)
layers.append(block((int(round(self.featuremap_dim)) * block.outchannel_ratio), int(round(temp_featuremap_dim)), 1))
self.featuremap_dim = temp_featuremap_dim
self.input_featuremap_dim = (int(round(self.featuremap_dim)) * block.outchannel_ratio)
return nn.Sequential(*layers)
def forward(self, x, boxes=None, share_fc=False):
if ((self.dataset == 'cifar10') or (self.dataset == 'cifar100')):
bs = x.shape[0]
sz = x.shape[(- 1)]
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
feat_map = x
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
if (boxes is not None):
index = torch.arange(bs).view((- 1), 1).to(x.device)
boxes = torch.cat([index, boxes], 1)
spatial_scale = (feat_map.shape[(- 1)] / sz)
roi_feat = roi_align(feat_map, boxes, output_size=(1, 1), spatial_scale=spatial_scale, sampling_ratio=(- 1), aligned=True).squeeze()
if share_fc:
out_roi = self.fc(roi_feat)
else:
out_roi = self.fc_roi(roi_feat)
elif (self.dataset == 'imagenet'):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
if (boxes is not None):
return (x, out_roi)
return x |
def load_args():
parser = argparse.ArgumentParser(description='Transformer baseline', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--dataset', type=str, default='NCI1', help='name of dataset')
parser.add_argument('--nb-heads', type=int, default=4)
parser.add_argument('--nb-layers', type=int, default=3)
parser.add_argument('--dim-hidden', type=int, default=64)
parser.add_argument('--pos-enc', choices=[None, 'diffusion', 'pstep', 'adj'], default=None)
parser.add_argument('--gckn-dim', type=int, default=32, help='dimension for laplacian PE')
parser.add_argument('--gckn-path', type=int, default=5, help='path size for gckn')
parser.add_argument('--gckn-sigma', type=float, default=0.6)
parser.add_argument('--gckn-pooling', default='sum', choices=['mean', 'sum'])
parser.add_argument('--gckn-agg', action='store_false', help='do not use aggregated GCKN features')
parser.add_argument('--gckn-normalize', action='store_false', help='do not normalize gckn features')
parser.add_argument('--p', type=int, default=1, help='p step random walk kernel')
parser.add_argument('--beta', type=float, default=1.0, help='bandwidth for the diffusion kernel')
parser.add_argument('--normalization', choices=[None, 'sym', 'rw'], default='sym', help='normalization for Laplacian')
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--epochs', type=int, default=300, help='number of epochs')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--outdir', type=str, default='', help='output path')
parser.add_argument('--warmup', type=int, default=2000)
parser.add_argument('--batch-norm', action='store_true', help='use batch norm instead of layer norm')
parser.add_argument('--zero-diag', action='store_true', help='zero diagonal for PE matrix')
parser.add_argument('--fold-idx', type=int, default=1, help='indices for the train/test datasets')
parser.add_argument('--weight-decay', type=float, default=0.0001)
parser.add_argument('--test', action='store_true', help='train on full train+val dataset')
args = parser.parse_args()
args.use_cuda = torch.cuda.is_available()
args.save_logs = False
if (args.outdir != ''):
args.save_logs = True
outdir = args.outdir
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
outdir = (outdir + '/transformer')
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
outdir = (outdir + '/{}'.format(args.dataset))
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
if args.zero_diag:
outdir = (outdir + '/zero_diag')
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
lapdir = 'gckn_{}_{}_{}_{}_{}_{}'.format(args.gckn_path, args.gckn_dim, args.gckn_sigma, args.gckn_pooling, args.gckn_agg, args.gckn_normalize)
outdir = (outdir + '/{}'.format(lapdir))
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
bn = ('BN' if args.batch_norm else 'LN')
outdir = (outdir + '/{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(args.weight_decay, args.dropout, args.lr, args.nb_layers, args.nb_heads, args.dim_hidden, bn, args.pos_enc, args.normalization, args.p, args.beta))
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
outdir = (outdir + '/fold-{}'.format(args.fold_idx))
if (not os.path.exists(outdir)):
try:
os.makedirs(outdir)
except Exception:
pass
args.outdir = outdir
return args |
def attention_from_original_checkpoint(model, diffuser_attention_prefix, original_attention_prefix):
attention = {}
attention.update({f'{diffuser_attention_prefix}.attention.query.weight': model[f'{original_attention_prefix}.self.query.weight']})
attention.update({f'{diffuser_attention_prefix}.attention.query.bias': model[f'{original_attention_prefix}.self.query.bias']})
attention.update({f'{diffuser_attention_prefix}.attention.key.weight': model[f'{original_attention_prefix}.self.key.weight']})
attention.update({f'{diffuser_attention_prefix}.attention.key.bias': model[f'{original_attention_prefix}.self.key.bias']})
attention.update({f'{diffuser_attention_prefix}.attention.value.weight': model[f'{original_attention_prefix}.self.value.weight']})
attention.update({f'{diffuser_attention_prefix}.attention.value.bias': model[f'{original_attention_prefix}.self.value.bias']})
attention.update({f'{diffuser_attention_prefix}.output.dense.weight': model[f'{original_attention_prefix}.output.dense.weight']})
attention.update({f'{diffuser_attention_prefix}.output.dense.bias': model[f'{original_attention_prefix}.output.dense.bias']})
attention.update({f'{diffuser_attention_prefix}.output.LayerNorm.weight': model[f'{original_attention_prefix}.output.LayerNorm.weight']})
attention.update({f'{diffuser_attention_prefix}.output.LayerNorm.bias': model[f'{original_attention_prefix}.output.LayerNorm.bias']})
return attention |
class AdamW(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, warmup=0):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, warmup=warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
if (group['warmup'] > state['step']):
scheduled_lr = (1e-08 + ((state['step'] * group['lr']) / group['warmup']))
else:
scheduled_lr = group['lr']
step_size = ((scheduled_lr * math.sqrt(bias_correction2)) / bias_correction1)
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * scheduled_lr), p_data_fp32)
p_data_fp32.addcdiv_((- step_size), exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss |
def cifar10_loader(args):
args.num_classes = 10
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
transform_test = transforms.Compose([transforms.ToTensor(), normalize])
trainset = datasets.CIFAR10(root=args.data_dir, train=True, download=False, transform=transform_train)
testset = datasets.CIFAR10(root=args.data_dir, train=False, download=False, transform=transform_test)
return (trainset, testset) |
def train(model, dataloader, optimizer, criterion, epoch_number, max_gradient_norm):
model.train()
device = model.device
epoch_start = time.time()
batch_time_avg = 0.0
running_loss = 0.0
correct_preds = 0
tqdm_batch_iterator = tqdm(dataloader)
for (batch_index, batch) in enumerate(tqdm_batch_iterator):
batch_start = time.time()
premises = batch['premise'].to(device)
premises_lengths = batch['premise_length'].to(device)
hypotheses = batch['hypothesis'].to(device)
hypotheses_lengths = batch['hypothesis_length'].to(device)
labels = batch['label'].to(device)
optimizer.zero_grad()
(logits, probs) = model(premises, premises_lengths, hypotheses, hypotheses_lengths)
loss = criterion(logits, labels)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_gradient_norm)
optimizer.step()
batch_time_avg += (time.time() - batch_start)
running_loss += loss.item()
correct_preds += correct_predictions(probs, labels)
description = 'Avg. batch proc. time: {:.4f}s, loss: {:.4f}'.format((batch_time_avg / (batch_index + 1)), (running_loss / (batch_index + 1)))
tqdm_batch_iterator.set_description(description)
epoch_time = (time.time() - epoch_start)
epoch_loss = (running_loss / len(dataloader))
epoch_accuracy = (correct_preds / len(dataloader.dataset))
return (epoch_time, epoch_loss, epoch_accuracy) |
class DictConfig():
def __init__(self):
pass
def to_dict(self):
return {k: v for (k, v) in vars(self).items() if ((k not in ('self', 'model_fn', 'loss_fn', 'build_model', 'is_multimodal')) and (not k.startswith('_')))} |
def _strip_snodes(base_graph: ag.Graph) -> ag.Graph:
g = base_graph.copy(nlp=nlp.parse)
g.strip_snodes()
return g |
def main():
patch = make_patch()
copyfile('jheppub.sty', 'jheppub.sty.bak')
with open('jheppub.sty.bak') as read_file, open('jheppub.sty', 'w+') as write_file:
for line in read_file:
write_file.write(line.replace('\\newcommand\\{\\renewcommand\\{}\\renewcommand\\{}}', patch)) |
def main():
args = parse_args()
send_example_telemetry('run_summarization_no_trainer', args)
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs['log_with'] = args.report_to
accelerator_log_kwargs['logging_dir'] = args.output_dir
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)
if ((args.source_prefix is None) and (args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'])):
logger.warning("You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with `--source_prefix 'summarize: ' `")
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if args.push_to_hub:
if (args.hub_model_id is None):
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
create_repo(repo_name, exist_ok=True, token=args.hub_token)
repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
with open(os.path.join(args.output_dir, '.gitignore'), 'w+') as gitignore:
if ('step_*' not in gitignore):
gitignore.write('step_*\n')
if ('epoch_*' not in gitignore):
gitignore.write('epoch_*\n')
elif (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
if (args.dataset_name is not None):
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if (args.train_file is not None):
data_files['train'] = args.train_file
if (args.validation_file is not None):
data_files['validation'] = args.validation_file
extension = args.train_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files)
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=(not args.use_slow_tokenizer))
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=(not args.use_slow_tokenizer))
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config)
else:
logger.info('Training new model from scratch')
model = AutoModelForSeq2SeqLM.from_config(config)
embedding_size = model.get_input_embeddings().weight.shape[0]
if (len(tokenizer) > embedding_size):
model.resize_token_embeddings(len(tokenizer))
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
prefix = (args.source_prefix if (args.source_prefix is not None) else '')
column_names = raw_datasets['train'].column_names
dataset_columns = summarization_name_mapping.get(args.dataset_name, None)
if (args.text_column is None):
text_column = (dataset_columns[0] if (dataset_columns is not None) else column_names[0])
else:
text_column = args.text_column
if (text_column not in column_names):
raise ValueError(f"--text_column' value '{args.text_column}' needs to be one of: {', '.join(column_names)}")
if (args.summary_column is None):
summary_column = (dataset_columns[1] if (dataset_columns is not None) else column_names[1])
else:
summary_column = args.summary_column
if (summary_column not in column_names):
raise ValueError(f"--summary_column' value '{args.summary_column}' needs to be one of: {', '.join(column_names)}")
if (args.val_max_target_length is None):
args.val_max_target_length = args.max_target_length
max_target_length = args.max_target_length
padding = ('max_length' if args.pad_to_max_length else False)
def preprocess_function(examples):
inputs = examples[text_column]
targets = examples[summary_column]
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
with accelerator.main_process_first():
train_dataset = raw_datasets['train'].map(preprocess_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not args.overwrite_cache), desc='Running tokenizer on dataset')
max_target_length = args.val_max_target_length
eval_dataset = raw_datasets['validation'].map(preprocess_function, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not args.overwrite_cache), desc='Running tokenizer on dataset')
for index in random.sample(range(len(train_dataset)), 1):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.')
label_pad_token_id = ((- 100) if args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
preds = ['\n'.join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ['\n'.join(nltk.sent_tokenize(label)) for label in labels]
return (preds, labels)
train_dataloader = DataLoader(train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
overrode_max_train_steps = True
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=(args.num_warmup_steps * args.gradient_accumulation_steps), num_training_steps=(args.max_train_steps * args.gradient_accumulation_steps))
(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader, lr_scheduler)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if overrode_max_train_steps:
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
checkpointing_steps = args.checkpointing_steps
if ((checkpointing_steps is not None) and checkpointing_steps.isdigit()):
checkpointing_steps = int(checkpointing_steps)
if args.with_tracking:
experiment_config = vars(args)
experiment_config['lr_scheduler_type'] = experiment_config['lr_scheduler_type'].value
accelerator.init_trackers('summarization_no_trainer', experiment_config)
metric = evaluate.load('rouge')
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
starting_epoch = 0
if args.resume_from_checkpoint:
if ((args.resume_from_checkpoint is not None) or (args.resume_from_checkpoint != '')):
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}')
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[(- 1)]
training_difference = os.path.splitext(path)[0]
if ('epoch' in training_difference):
starting_epoch = (int(training_difference.replace('epoch_', '')) + 1)
resume_step = None
else:
resume_step = int(training_difference.replace('step_', ''))
starting_epoch = (resume_step // len(train_dataloader))
resume_step -= (starting_epoch * len(train_dataloader))
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for (step, batch) in enumerate(train_dataloader):
if (args.resume_from_checkpoint and (epoch == starting_epoch)):
if ((resume_step is not None) and (step < resume_step)):
completed_steps += 1
continue
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if ((completed_steps % checkpointing_steps) == 0):
output_dir = f'step_{completed_steps}'
if (args.output_dir is not None):
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if (completed_steps >= args.max_train_steps):
break
model.eval()
gen_kwargs = {'max_length': args.val_max_target_length, 'num_beams': args.num_beams}
for (step, batch) in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(batch['input_ids'], attention_mask=batch['attention_mask'], **gen_kwargs)
generated_tokens = accelerator.pad_across_processes(generated_tokens, dim=1, pad_index=tokenizer.pad_token_id)
labels = batch['labels']
if (not args.pad_to_max_length):
labels = accelerator.pad_across_processes(batch['labels'], dim=1, pad_index=tokenizer.pad_token_id)
(generated_tokens, labels) = accelerator.gather_for_metrics((generated_tokens, labels))
generated_tokens = generated_tokens.cpu().numpy()
labels = labels.cpu().numpy()
if args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
if isinstance(generated_tokens, tuple):
generated_tokens = generated_tokens[0]
decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
metric.add_batch(predictions=decoded_preds, references=decoded_labels)
result = metric.compute(use_stemmer=True)
result = {k: round((v * 100), 4) for (k, v) in result.items()}
logger.info(result)
if args.with_tracking:
result['train_loss'] = (total_loss.item() / len(train_dataloader))
result['epoch'] = epoch
result['step'] = completed_steps
accelerator.log(result, step=completed_steps)
if (args.push_to_hub and (epoch < (args.num_train_epochs - 1))):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(commit_message=f'Training in progress epoch {epoch}', blocking=False, auto_lfs_prune=True)
if (args.checkpointing_steps == 'epoch'):
output_dir = f'epoch_{epoch}'
if (args.output_dir is not None):
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if (args.output_dir is not None):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True)
all_results = {f'eval_{k}': v for (k, v) in result.items()}
with open(os.path.join(args.output_dir, 'all_results.json'), 'w') as f:
json.dump(all_results, f) |
class FairseqDecoder(nn.Module):
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
self.onnx_trace = False
self.adaptive_softmax = None
def forward(self, prev_output_tokens, encoder_out=None, **kwargs):
(x, extra) = self.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs)
x = self.output_layer(x)
return (x, extra)
def extract_features(self, prev_output_tokens, encoder_out=None, **kwargs):
raise NotImplementedError
def output_layer(self, features, **kwargs):
raise NotImplementedError
def get_normalized_probs(self, net_output: Tuple[(Tensor, Optional[Dict[(str, List[Optional[Tensor]])]])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]=None):
return self.get_normalized_probs_scriptable(net_output, log_probs, sample)
def get_normalized_probs_scriptable(self, net_output: Tuple[(Tensor, Optional[Dict[(str, List[Optional[Tensor]])]])], log_probs: bool, sample: Optional[Dict[(str, Tensor)]]=None):
if (hasattr(self, 'adaptive_softmax') and (self.adaptive_softmax is not None)):
if (sample is not None):
assert ('target' in sample)
target = sample['target']
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output[0], target=target)
return (out.exp_() if (not log_probs) else out)
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=(- 1), onnx_trace=self.onnx_trace)
else:
return utils.softmax(logits, dim=(- 1), onnx_trace=self.onnx_trace)
def max_positions(self):
return 1000000.0
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def prepare_for_onnx_export_(self):
self.onnx_trace = True |
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.quality = args.quality
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
if (self.args.load != ''):
self.optimizer.load(ckp.dir, epoch=len(ckp.log))
self.error_last = .0
def train(self):
self.loss.step()
epoch = (self.optimizer.get_last_epoch() + 1)
lr = self.optimizer.get_lr()
self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)))
self.loss.start_log()
self.model.train()
(timer_data, timer_model) = (utility.timer(), utility.timer())
self.loader_train.dataset.set_scale(0)
for (batch, (lr, hr, _)) in enumerate(self.loader_train):
(lr, hr) = self.prepare(lr, hr)
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
sr = self.model(lr, 0)
loss = self.loss(sr, hr)
loss.backward()
if (self.args.gclip > 0):
utils.clip_grad_value_(self.model.parameters(), self.args.gclip)
self.optimizer.step()
timer_model.hold()
if (((batch + 1) % self.args.print_every) == 0):
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(((batch + 1) * self.args.batch_size), len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[((- 1), (- 1))]
self.optimizer.schedule()
def test(self):
torch.set_grad_enabled(False)
epoch = self.optimizer.get_last_epoch()
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(torch.zeros(1, len(self.loader_test), len(self.scale)))
self.model.eval()
timer_test = utility.timer()
if self.args.save_results:
self.ckp.begin_background()
for (idx_data, d) in enumerate(self.loader_test):
for (idx_scale, scale) in enumerate(self.scale):
d.dataset.set_scale(idx_scale)
for (lr, hr, filename) in tqdm(d, ncols=80):
(lr, hr) = self.prepare(lr, hr)
sr = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
quality = self.quality
save_list = [sr]
self.ckp.log[((- 1), idx_data, idx_scale)] += utility.calc_psnr(sr, hr, scale, self.args.rgb_range, dataset=d)
if self.args.save_gt:
save_list.extend([hr])
if self.args.save_results:
self.ckp.save_results(d, filename[0], save_list, scale, quality)
self.ckp.log[((- 1), idx_data, idx_scale)] /= len(d)
best = self.ckp.log.max(0)
self.ckp.write_log('[{} N{}0]\tPSNR: {:.3f} (Best: {:.3f} {})'.format(d.dataset.name, quality, self.ckp.log[((- 1), idx_data, idx_scale)], best[0][(idx_data, idx_scale)], (best[1][(idx_data, idx_scale)] + 1)))
self.ckp.write_log('Forward: {:.2f}s\n'.format(timer_test.toc()))
self.ckp.write_log('Saving...')
if self.args.save_results:
self.ckp.end_background()
if (not self.args.test_only):
self.ckp.save(self, epoch, is_best=((best[1][(0, 0)] + 1) == epoch))
self.ckp.write_log('Total: {:.2f}s\n'.format(timer_test.toc()), refresh=True)
torch.set_grad_enabled(True)
def prepare(self, *args):
device = torch.device(('cpu' if self.args.cpu else 'cuda'))
def _prepare(tensor):
if (self.args.precision == 'half'):
tensor = tensor.half()
return tensor.to(device)
return [_prepare(a) for a in args]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = (self.optimizer.get_last_epoch() + 1)
return (epoch >= self.args.epochs) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, prob=None, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.prob = prob
self.freeze = False
self.choice = np.random.binomial(size=1, n=1, p=self.prob)[0]
def set_freeze(self):
self.freeze = True
def unset_freeze(self):
self.freeze = False
def forward(self, x):
residual = x
if (self.downsample is not None):
residual = self.downsample(x)
if (not self.freeze):
self.choice = np.random.binomial(size=1, n=1, p=self.prob)[0]
if (self.choice == 0):
return residual
else:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out |
def fix_ratio(image, cfg):
(h, w, c) = image.shape
if (h >= w):
ratio = ((h * 1.0) / w)
h_ = cfg.long_side
w_ = round((h_ / ratio))
else:
ratio = ((w * 1.0) / h)
w_ = cfg.long_side
h_ = round((w_ / ratio))
image = cv2.resize(image, dsize=(w_, h_), interpolation=cv2.INTER_LINEAR)
image = border_pad(image, cfg)
return image |
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
tokenized_list = [tokenizer(text, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True) for text in strings]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list]
return dict(input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens) |
class _CAM():
def __init__(self, model: nn.Module, target_layer: Optional[str]=None, input_shape: Tuple[(int, ...)]=(3, 224, 224)) -> None:
self.assert_model(model)
self.submodule_dict = dict(model.named_modules())
if (target_layer is None):
target_layer = locate_candidate_layer(model, input_shape)
if isinstance(target_layer, str):
DLLogger.log('no value was provided for `target_layer`, thus set to `{}`.'.format(target_layer))
else:
raise ValueError('unable to resolve `target_layer` automatically, please specify its value.')
if (target_layer not in self.submodule_dict.keys()):
raise ValueError(f'Unable to find submodule {target_layer} in the model')
self.target_layer = target_layer
self.model = model
self.hook_a: Optional[Tensor] = None
self.hook_handles: List[torch.utils.hooks.RemovableHandle] = []
self.hook_handles.append(self.submodule_dict[target_layer].register_forward_hook(self._hook_a))
self._hooks_enabled = True
self._relu = False
self._score_used = False
def assert_model(model: STDClassifier) -> None:
if (not isinstance(model, STDClassifier)):
return
assert any([isinstance(model.encoder, dlib.encoders.resnet.ResNetEncoder), isinstance(model.encoder, dlib.encoders.vgg.VGGEncoder), isinstance(model.encoder, dlib.encoders.inceptionv3.InceptionV3Encoder)])
assert isinstance(model.classification_head, WGAP)
def _hook_a(self, module: nn.Module, input: Tensor, output: Tensor) -> None:
if self._hooks_enabled:
self.hook_a = output.data
def clear_hooks(self) -> None:
for handle in self.hook_handles:
handle.remove()
self.hook_handles.clear()
def _normalize(cams: Tensor, spatial_dims: Optional[int]=None) -> Tensor:
spatial_dims = (cams.ndim if (spatial_dims is None) else spatial_dims)
cams.sub_(cams.flatten(start_dim=(- spatial_dims)).min((- 1)).values[((...,) + ((None,) * spatial_dims))])
cams.div_(cams.flatten(start_dim=(- spatial_dims)).max((- 1)).values[((...,) + ((None,) * spatial_dims))])
return cams
def _get_weights(self, class_idx: int, scores: Optional[Tensor]=None) -> Tensor:
raise NotImplementedError
def _precheck(self, class_idx: int, scores: Optional[Tensor]=None) -> None:
if (not isinstance(self.hook_a, Tensor)):
raise AssertionError('Inputs need to be forwarded in the model for the conv features to be hooked')
if (self.hook_a.shape[0] != 1):
raise ValueError(f'expected a 1-sized batch to be hooked. Received: {self.hook_a.shape[0]}')
if ((not isinstance(class_idx, int)) or (class_idx < 0)):
raise ValueError('Incorrect `class_idx` argument value')
if (self._score_used and (not isinstance(scores, torch.Tensor))):
raise ValueError('model output scores is required to be passed to compute CAMs')
def __call__(self, class_idx: int, scores: Optional[Tensor]=None, normalized: bool=True, reshape: Optional[Tuple]=None, argmax: Optional[bool]=False) -> Tensor:
self._precheck(class_idx, scores)
cam = self.compute_cams(class_idx, scores, normalized)
if (reshape is not None):
assert (len(reshape) == 2)
interpolation_mode = 'bilinear'
cam = F.interpolate(cam.unsqueeze(0).unsqueeze(0), reshape, mode=interpolation_mode, align_corners=False).squeeze(0).squeeze(0)
cam = cam.detach()
return cam
def compute_cams(self, class_idx: int, scores: Optional[Tensor]=None, normalized: bool=True) -> Tensor:
weights = self._get_weights(class_idx, scores)
missing_dims = ((self.hook_a.ndim - weights.ndim) - 1)
weights = weights[((...,) + ((None,) * missing_dims))]
batch_cams = torch.nansum((weights * self.hook_a.squeeze(0)), dim=0)
if self._relu:
batch_cams = F.relu(batch_cams, inplace=True)
if normalized:
batch_cams = self._normalize(batch_cams)
return batch_cams
def extra_repr(self) -> str:
return f"target_layer='{self.target_layer}'"
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.extra_repr()})' |
def f1_eval(logits, features):
def getpred(result, T1=0.5, T2=0.4):
ret = []
for i in range(len(result)):
r = []
(maxl, maxj) = ((- 1), (- 1))
for j in range(len(result[i])):
if (result[i][j] > T1):
r += [j]
if (result[i][j] > maxl):
maxl = result[i][j]
maxj = j
if (len(r) == 0):
if (maxl <= T2):
r = [36]
else:
r += [maxj]
ret += [r]
return ret
def geteval(devp, data):
(correct_sys, all_sys) = (0, 0)
correct_gt = 0
for i in range(len(data)):
for id in data[i]:
if (id != 36):
correct_gt += 1
if (id in devp[i]):
correct_sys += 1
for id in devp[i]:
if (id != 36):
all_sys += 1
precision = (1 if (all_sys == 0) else (correct_sys / all_sys))
recall = (0 if (correct_gt == 0) else (correct_sys / correct_gt))
f_1 = ((((2 * precision) * recall) / (precision + recall)) if ((precision + recall) != 0) else 0)
return f_1
logits = np.asarray(logits)
logits = list((1 / (1 + np.exp((- logits)))))
labels = []
for f in features:
label = []
assert (len(f[0].label_id) == 36)
for i in range(36):
if (f[0].label_id[i] == 1):
label += [i]
if (len(label) == 0):
label = [36]
labels += [label]
assert (len(labels) == len(logits))
bestT2 = bestf_1 = 0
for T2 in range(51):
devp = getpred(logits, T2=(T2 / 100.0))
f_1 = geteval(devp, labels)
if (f_1 > bestf_1):
bestf_1 = f_1
bestT2 = (T2 / 100.0)
return (bestf_1, bestT2) |
class IFFT2Op(gof.Op):
__props__ = ()
def output_type(self, inp):
return T.TensorType(inp.dtype, broadcastable=([False] * inp.type.ndim))
def make_node(self, a, s=None):
a = T.as_tensor_variable(a)
if (a.ndim < 4):
raise TypeError((('%s: input must have dimension >= 4, with ' % self.__class__.__name__) + 'first dimension batches, then last axes are (Nx, Ny, 2)'))
if (s is None):
s = a.shape[(- 3):(- 1)]
s = T.as_tensor_variable(s)
else:
s = T.as_tensor_variable(s)
if ((not s.dtype.startswith('int')) and (not s.dtype.startswith('uint'))):
raise TypeError(('%s: length of the transformed axis must be of type integer' % self.__class__.__name__))
return gof.Apply(self, [a, s], [self.output_type(a)()])
def perform(self, node, inputs, output_storage):
a = inputs[0]
s = inputs[1]
inp = (a[(..., 0)] + (1j * a[(..., 1)]))
A = np.fft.ifft2(inp)
out = np.zeros((A.shape + (2,)), dtype=a.dtype)
(out[(..., 0)], out[(..., 1)]) = (np.real(A), np.imag(A))
output_storage[0][0] = (out * s.prod()).astype(a.dtype)
def grad(self, inputs, output_grads):
(gout,) = output_grads
s = inputs[1]
gf = fft2_op(gout, s)
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
return [[True], [False]] |
def collate(samples, pad_idx, eos_idx, vocab, left_pad_source=False, left_pad_target=False, input_feeding=True):
assert input_feeding
if (len(samples) == 0):
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
(src_lengths, sort_order) = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if (samples[0].get('target', None) is not None):
target = merge('target', left_pad=left_pad_target)
target = target.index_select(0, sort_order)
ntokens = sum((len(s['target']) for s in samples))
if input_feeding:
prev_output_tokens = merge('target', left_pad=left_pad_target, move_eos_to_beginning=True)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum((len(s['source']) for s in samples))
batch = {'id': id, 'ntokens': ntokens, 'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}, 'target': target, 'nsentences': samples[0]['source'].size(0)}
if (prev_output_tokens is not None):
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch |
def make_pooler(cfg, head_name):
resolution = cfg.MODEL[head_name].POOLER_RESOLUTION
scales = cfg.MODEL[head_name].POOLER_SCALES
sampling_ratio = cfg.MODEL[head_name].POOLER_SAMPLING_RATIO
pooler = Pooler(output_size=(resolution, resolution), scales=scales, sampling_ratio=sampling_ratio)
return pooler |
(reuse_venv=True)
def build(session: nox.Session) -> None:
session.install('build')
session.log('Building normal files')
session.run('python', '-m', 'build', *session.posargs)
session.log('Building pybind11-global files (PYBIND11_GLOBAL_SDIST=1)')
session.run('python', '-m', 'build', *session.posargs, env={'PYBIND11_GLOBAL_SDIST': '1'}) |
def check_all_auto_object_names_being_defined():
check_missing_backends()
failures = []
mappings_to_check = {'TOKENIZER_MAPPING_NAMES': TOKENIZER_MAPPING_NAMES, 'IMAGE_PROCESSOR_MAPPING_NAMES': IMAGE_PROCESSOR_MAPPING_NAMES, 'FEATURE_EXTRACTOR_MAPPING_NAMES': FEATURE_EXTRACTOR_MAPPING_NAMES, 'PROCESSOR_MAPPING_NAMES': PROCESSOR_MAPPING_NAMES}
for module_name in ['modeling_auto', 'modeling_tf_auto', 'modeling_flax_auto']:
module = getattr(transformers.models.auto, module_name, None)
if (module is None):
continue
mapping_names = [x for x in dir(module) if x.endswith('_MAPPING_NAMES')]
mappings_to_check.update({name: getattr(module, name) for name in mapping_names})
for (name, mapping) in mappings_to_check.items():
for (model_type, class_names) in mapping.items():
if (not isinstance(class_names, tuple)):
class_names = (class_names,)
for class_name in class_names:
if (class_name is None):
continue
if (not hasattr(transformers, class_name)):
if (name.endswith('MODEL_MAPPING_NAMES') and is_a_private_model(class_name)):
continue
failures.append(f'`{class_name}` appears in the mapping `{name}` but it is not defined in the library.')
if (len(failures) > 0):
raise Exception((f'''There were {len(failures)} failures:
''' + '\n'.join(failures))) |
class EncoderText(nn.Module):
def __init__(self, vocab_size, word_dim, embed_size, num_layers, use_abs=False):
super(EncoderText, self).__init__()
self.use_abs = use_abs
self.embed_size = embed_size
self.embed = nn.Embedding(vocab_size, word_dim)
self.rnn = nn.GRU(word_dim, embed_size, num_layers, batch_first=True)
self.init_weights()
def init_weights(self):
self.embed.weight.data.uniform_((- 0.1), 0.1)
def forward(self, x, lengths):
x = self.embed(x)
packed = pack_padded_sequence(x, lengths, batch_first=True)
(out, _) = self.rnn(packed)
padded = pad_packed_sequence(out, batch_first=True)
I = torch.LongTensor(lengths).view((- 1), 1, 1)
I = (I.expand(x.size(0), 1, self.embed_size) - 1).cuda()
out = torch.gather(padded[0], 1, I).squeeze(1)
out = l2norm(out)
if self.use_abs:
out = torch.abs(out)
return out |
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default='adamw_torch')
remove_unused_columns: bool = field(default=False)
freeze_mm_mlp_adapter: bool = field(default=False)
force_fsdp: bool = field(default=False)
model_max_length: int = field(default=512, metadata={'help': 'Maximum sequence length. Sequences will be right padded (and possibly truncated).'}) |
def main(args):
import glob
import random
import numpy as np
import json
import itertools
with open(args.input_path, 'r') as json_file:
json_list = list(json_file)
fixed_list = [[int(item) for item in one.split()] for one in args.position_list.split(',')]
global_designed_chain_list = [str(item) for item in args.chain_list.split()]
my_dict = {}
if (not args.specify_non_fixed):
for json_str in json_list:
result = json.loads(json_str)
all_chain_list = [item[(- 1):] for item in list(result) if (item[:9] == 'seq_chain')]
fixed_position_dict = {}
for (i, chain) in enumerate(global_designed_chain_list):
fixed_position_dict[chain] = fixed_list[i]
for chain in all_chain_list:
if (chain not in global_designed_chain_list):
fixed_position_dict[chain] = []
my_dict[result['name']] = fixed_position_dict
else:
for json_str in json_list:
result = json.loads(json_str)
all_chain_list = [item[(- 1):] for item in list(result) if (item[:9] == 'seq_chain')]
fixed_position_dict = {}
for chain in all_chain_list:
seq_length = len(result[f'seq_chain_{chain}'])
all_residue_list = (np.arange(seq_length) + 1).tolist()
if (chain not in global_designed_chain_list):
fixed_position_dict[chain] = all_residue_list
else:
idx = np.argwhere((np.array(global_designed_chain_list) == chain))[0][0]
fixed_position_dict[chain] = list((set(all_residue_list) - set(fixed_list[idx])))
my_dict[result['name']] = fixed_position_dict
with open(args.output_path, 'w') as f:
f.write((json.dumps(my_dict) + '\n')) |
class PhraseTree(object):
puncs = [',', '.', ':', '``', "''", 'PU']
def __init__(self, symbol=None, children=[], sentence=[], leaf=None):
self.symbol = symbol
self.children = children
self.sentence = sentence
self.leaf = leaf
self._str = None
def __str__(self):
if (self._str is None):
if (len(self.children) != 0):
childstr = ' '.join((str(c) for c in self.children))
self._str = '({} {})'.format(self.symbol, childstr)
else:
self._str = '({} {})'.format(self.sentence[self.leaf][1], self.sentence[self.leaf][0])
return self._str
def propagate_sentence(self, sentence):
self.sentence = sentence
for child in self.children:
child.propagate_sentence(sentence)
def pretty(self, level=0, marker=' '):
pad = (marker * level)
if (self.leaf is not None):
leaf_string = '({} {})'.format(self.symbol, self.sentence[self.leaf][0])
return (pad + leaf_string)
else:
result = ((pad + '(') + self.symbol)
for child in self.children:
result += ('\n' + child.pretty((level + 1)))
result += ')'
return result
def parse(line):
line += ' '
sentence = []
(_, t) = PhraseTree._parse(line, 0, sentence)
if ((t.symbol == 'TOP') and (len(t.children) == 1)):
t = t.children[0]
return t
def _parse(line, index, sentence):
assert (line[index] == '('), 'Invalid tree string {} at {}'.format(line, index)
index += 1
symbol = None
children = []
leaf = None
while (line[index] != ')'):
if (line[index] == '('):
(index, t) = PhraseTree._parse(line, index, sentence)
children.append(t)
elif (symbol is None):
rpos = min(line.find(' ', index), line.find(')', index))
symbol = line[index:rpos]
index = rpos
else:
rpos = line.find(')', index)
word = line[index:rpos]
sentence.append((word, symbol))
leaf = (len(sentence) - 1)
index = rpos
if (line[index] == ' '):
index += 1
assert (line[index] == ')'), ('Invalid tree string %s at %d' % (line, index))
t = PhraseTree(symbol=symbol, children=children, sentence=sentence, leaf=leaf)
return ((index + 1), t)
def left_span(self):
try:
return self._left_span
except AttributeError:
if (self.leaf is not None):
self._left_span = self.leaf
else:
self._left_span = self.children[0].left_span()
return self._left_span
def right_span(self):
try:
return self._right_span
except AttributeError:
if (self.leaf is not None):
self._right_span = self.leaf
else:
self._right_span = self.children[(- 1)].right_span()
return self._right_span
def brackets(self, advp_prt=True, counts=None):
if (counts is None):
counts = defaultdict(int)
if (self.leaf is not None):
return {}
nonterm = self.symbol
if (advp_prt and (nonterm == 'PRT')):
nonterm = 'ADVP'
left = self.left_span()
right = self.right_span()
while ((left < len(self.sentence)) and (self.sentence[left][1] in PhraseTree.puncs)):
left += 1
while ((right > 0) and (self.sentence[right][1] in PhraseTree.puncs)):
right -= 1
if ((left <= right) and (nonterm != 'TOP')):
counts[(nonterm, left, right)] += 1
for child in self.children:
child.brackets(advp_prt=advp_prt, counts=counts)
return counts
def phrase(self):
if (self.leaf is not None):
return [(self.leaf, self.symbol)]
else:
result = []
for child in self.children:
result.extend(child.phrase())
return result
def load_treefile(fname):
trees = []
for line in open(fname):
t = PhraseTree.parse(line)
trees.append(t)
return trees
def compare(self, gold, advp_prt=True):
predbracks = self.brackets(advp_prt)
goldbracks = gold.brackets(advp_prt)
correct = 0
for gb in goldbracks:
if (gb in predbracks):
correct += min(goldbracks[gb], predbracks[gb])
pred_total = sum(predbracks.values())
gold_total = sum(goldbracks.values())
return FScore(correct, pred_total, gold_total)
def enclosing(self, i, j):
for child in self.children:
left = child.left_span()
right = child.right_span()
if ((left <= i) and (right >= j)):
if ((left == i) and (right == j)):
break
return child.enclosing(i, j)
return (self.left_span(), self.right_span())
def span_labels(self, i, j):
if (self.leaf is not None):
return []
if ((self.left_span() == i) and (self.right_span() == j)):
result = [self.symbol]
else:
result = []
for child in self.children:
left = child.left_span()
right = child.right_span()
if ((left <= i) and (right >= j)):
result.extend(child.span_labels(i, j))
break
return result |
class isInContourV3_Hard(Contour_Checking_fn):
def __init__(self, contour, patch_size, center_shift=0.5):
self.cont = contour
self.patch_size = patch_size
self.shift = int(((patch_size // 2) * center_shift))
def __call__(self, pt):
center = ((pt[0] + (self.patch_size // 2)), (pt[1] + (self.patch_size // 2)))
if (self.shift > 0):
all_points = [((center[0] - self.shift), (center[1] - self.shift)), ((center[0] + self.shift), (center[1] + self.shift)), ((center[0] + self.shift), (center[1] - self.shift)), ((center[0] - self.shift), (center[1] + self.shift))]
else:
all_points = [center]
for points in all_points:
points = (int(points[0]), int(points[1]))
if (cv2.pointPolygonTest(self.cont, points, False) < 0):
return 0
return 1 |
def build_stereo_dataset(cfg, type):
if (type not in cfg.data):
return None
data_root = cfg.data[type].data_root
data_type = cfg.data[type].type
annFile = cfg.data[type].annfile
is_train = (True if (type == 'train') else False)
transforms = build_transforms(cfg, type, is_train=is_train)
if ('SceneFlow' in data_type):
dataset = SceneFlowDataset(annFile, data_root, transforms)
elif ('KITTI' in data_type):
if ('2012' in data_type):
dataset = Kitti2012Dataset(annFile, data_root, transforms)
elif ('2015' in data_type):
dataset = Kitti2015Dataset(annFile, data_root, transforms)
else:
raise ValueError('invalid data type: {}'.format(data_type))
else:
raise ValueError('invalid data type: {}'.format(data_type))
return dataset |
class HyperParams():
def __init__(self):
pass
def get_uniwarp_config(self, argv):
config = {}
config['optimizer:num_epochs'] = 1000000
config['model:num_batch_pairs'] = 100
config['uniwarp:length'] = 1024
config['uniwarp:rnn_encoder_layers'] = [256, 128, 64]
config['uniwarp:warp_nn_layers'] = [64, 16, 1]
config['uniwarp:eta'] = 0.0001
config['uniwarp:max_grad_norm'] = 10.0
config['uniwarp:lambda'] = 0.0
config['uniwarp:cnn_encoder_layers'] = [1024, 256, 64]
config['uniwarp:cnn_kernel_lengths'] = [5, 5, 3]
config['uniwarp:cnn_strides'] = [2, 1, 1]
config['uniwarp:dropout_rate'] = 0.05
config['uniwarp:enable_batch_normalization'] = True
config['dataset:num_channels'] = 1
return config
def restore(file_path):
return json.loads(file_path) |
def reorder_tsv_keys(in_tsv_file, ordered_keys, out_tsv_file):
tsv = TSVFile(in_tsv_file)
logging.info('loading keys in input')
keys = [tsv.seek_first_column(i) for i in tqdm(range(len(tsv)), mininterval=2)]
key_to_idx = {key: i for (i, key) in enumerate(keys)}
def gen_rows():
logging.info('writing')
for key in tqdm(ordered_keys, mininterval=2):
idx = key_to_idx[key]
(yield tsv.seek(idx))
tsv_writer(gen_rows(), out_tsv_file) |
_registry(operator_type='_FusedMatMul')
class _FusedMatMul(Operator):
def __init__(self):
super().__init__()
def set_attr(self, framework, node):
if (framework == 'tensorflow'):
transpose_a = node.attr['transpose_a'].b
transpose_b = node.attr['transpose_b'].b
epsilon = node.attr['epsilon'].f
if transpose_a:
self._attr['src0_perm'] = '1,0'
if (not transpose_b):
self._attr['src1_perm'] = '1,0'
else:
self._attr['src1_perm'] = '0,1'
if (epsilon != 0.0):
self._attr['epsilon'] = epsilon |
def run_validating():
if (not os.path.exists(model_save_dir)):
os.makedirs(model_save_dir)
model_filename = './mfb_dis_ucf24.model'
(tower_grads, tower_ac) = ([], [])
(tower_losses, tower_ac_losses, tower_wd_losses) = ([], [], [])
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
starter_learning_rate = 0.0001
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 1000000, 0.8, staircase=True)
opt = tf.train.AdamOptimizer(learning_rate)
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = None
val_list_file = open(val_list_path, 'r')
val_list = val_list_file.read().splitlines()
for (i, line) in enumerate(val_list):
val_list[i] = os.path.join(dataset_path, val_list[i])
assert ((len(val_list) % FLAGS.num_gpus) == 0)
num_for_each_gpu = (len(val_list) // FLAGS.num_gpus)
(clips_list, labels_list, texts_list) = ([], [], [])
with sess.as_default():
for i in range(FLAGS.num_gpus):
(clips, labels, texts) = input_pipeline_dis(val_list[(i * num_for_each_gpu):((i + 1) * num_for_each_gpu)], FLAGS.batch_size, num_epochs=FLAGS.num_epochs, is_training=False)
clips_list.append(clips)
labels_list.append(labels)
texts_list.append(texts)
mfb_list = []
with tf.variable_scope('vars') as var_scope:
for gpu_index in range(FLAGS.num_gpus):
with tf.device(('/gpu:%d' % gpu_index)):
with tf.name_scope(('%s_%d' % ('tower', gpu_index))) as scope:
mfb = mfb_dis_net(clips_list[gpu_index], labels_list[gpu_index], FLAGS.num_class, FLAGS.height, FLAGS.width, FLAGS.seq_length, FLAGS.channel, FLAGS.batch_size, is_training=False)
mfb_list.append(mfb)
(loss, ac_loss, wd_loss) = tower_loss(scope, mfb, use_pretrained_encoder, encoder_gradient_ratio)
var_scope.reuse_variables()
vars_to_optimize = tf.trainable_variables()
grads = opt.compute_gradients(loss, var_list=vars_to_optimize)
tower_grads.append(grads)
tower_losses.append(loss)
tower_ac_losses.append(ac_loss)
tower_wd_losses.append(wd_loss)
tower_ac.append(mfb.ac)
loss_op = tf.reduce_mean(tower_losses)
ac_loss_op = tf.reduce_mean(tower_ac_losses)
wd_loss_op = tf.reduce_mean(tower_wd_losses)
ac_op = tf.reduce_mean(tower_ac)
tf.summary.scalar('loss', loss_op)
tf.summary.scalar('ac_loss', ac_loss_op)
tf.summary.scalar('ac', ac_op)
tf.summary.scalar('wd_loss', wd_loss_op)
saver = tf.train.Saver(max_to_keep=10)
init = tf.initialize_all_variables()
sess.run(init)
if (not os.path.exists(model_save_dir)):
os.makedirs(model_save_dir)
if use_pretrained_model:
print('[*] Loading checkpoint ...')
model = tf.train.latest_checkpoint(model_save_dir)
if (model is not None):
saver.restore(sess, model)
print(('[*] Loading success: %s!' % model))
else:
print('[*] Loading failed ...')
if (not os.path.exists(loss_save_dir)):
os.makedirs(loss_save_dir)
loss_file = open(os.path.join(loss_save_dir, (prefix + '_val.txt')), 'a+')
total_steps = ((FLAGS.num_sample / (FLAGS.num_gpus * FLAGS.batch_size)) * FLAGS.num_epochs)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
(ac_list, loss_list) = ([], [])
step = 0
try:
with sess.as_default():
print('\n\n\n start validating \n\n\n')
step = global_step.eval()
print(('[step = %d]' % step))
while (not coord.should_stop()):
(ac, ac_loss) = sess.run([ac_op, ac_loss_op])
ac_list.append(ac)
loss_list.append(ac_loss)
print(('ac=%.3f, loss=%.8f' % ((ac * 100), ac_loss)))
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
sess.close()
mean_ac = np.mean(np.asarray(ac_list))
mean_loss = np.mean(np.asarray(loss_list))
line = ('[step=%d] mean_ac=%.3f, mean_loss=%.8f' % (step, (mean_ac * 100), mean_loss))
print(line)
loss_file.write((line + '\n')) |
class KvVariableSaveable(BaseSaverBuilder.SaveableObject):
def __init__(self, var, name):
self._var = var
tensors_dict = var.export(name=name)
self._key_dtype = var.key_dtype
self._value_dtype = var.dtype
self._embedding_dim = var.shape.as_list()[1]
self._is_loading_finished = get_or_create_is_loading_finished()
specs = [BaseSaverBuilder.SaveSpec(tensor, '', tensor_name) for (tensor_name, tensor) in tensors_dict.items()]
orig_ordering = [k.split('-')[(- 1)] for k in tensors_dict.keys()]
if (tfplus_saver_mode() == 0):
specs = (specs[:3] + specs[6:])
self._ordering = (orig_ordering[:3] + orig_ordering[6:])
self._empty_ordering = orig_ordering[3:6]
else:
self._ordering = orig_ordering
self._empty_ordering = None
super(KvVariableSaveable, self).__init__(var, specs, name)
def get_generic_name(self, var_name=None):
return self.op.get_generic_name(var_name)
def dynamic_restore(self, ckpt_path_tensor, restore_tensors, restore_mode, num_shards, ckpt_num_shards):
if self.device:
from tensorflow.python.framework import device as pydev
device = pydev.DeviceSpec.from_string(self.device)
device.device_type = 'CPU'
device.device_index = 0
device = device.to_string()
else:
device = None
with ops.device(device):
return self.op.dynamic_restore(ckpt_path_tensor, restore_tensors, restore_mode, num_shards, ckpt_num_shards)
def restore(self, restored_tensors, restored_shapes, filename_tensor=None, ckpt_name=None):
def _restore():
kwargs = {k: restored_tensors[i] for (i, k) in enumerate(self._ordering)}
if (self._empty_ordering is not None):
empty_keys = tf.zeros([0], self._key_dtype)
if _ENABLE_DELTA_EXPORT:
empty_values = tf.zeros([0, self._embedding_dim], dtypes.uint32)
else:
empty_values = tf.zeros([0, self._embedding_dim], dtypes.uint16)
kwargs.update({self._empty_ordering[0]: empty_keys, self._empty_ordering[1]: empty_keys, self._empty_ordering[2]: empty_values})
if _ENABLE_DELTA_EXPORT:
with ops.colocate_with(self.op.handle):
if (tfplus_saver_mode() == 0):
first_n = 3
else:
first_n = 8
if full_or_delta_import_v2_enabled():
kwargs.update({'is_loading_finished': self._is_loading_finished})
return gen_kv_variable_ops.kv_variable_full_or_delta_import_v2(self.op.handle, first_n=first_n, **kwargs)
return gen_kv_variable_ops.kv_variable_full_or_delta_import(self.op.handle, first_n=first_n, **kwargs)
else:
with ops.colocate_with(self.op.handle):
if (tfplus_saver_mode() == 0):
first_n = 3
else:
first_n = 6
return gen_kv_variable_ops.kv_variable_import(self.op.handle, first_n=first_n, **kwargs)
restore_op = _restore()
if self._var.is_ignore_eflops_device_fn:
return restore_op
with ops.control_dependencies([restore_op]):
init_op = gen_kv_variable_ops.init_kv_variable_v2(self._var._handle, variables._try_guard_against_uninitialized_dependencies(self.var.name, self._var._initial_value))
return init_op
def var(self):
return self._var |
def test_glorot_normal_c01b_4d_only():
from lasagne.init import GlorotNormal
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100,))
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100, 100))
with pytest.raises(RuntimeError):
GlorotNormal(c01b=True).sample((100, 100, 100)) |
def write_hyperparameters_json(hyperparams: dict, PATHS: dict) -> None:
doc_location = os.path.join(PATHS.get('model'), 'hyperparameters.json')
with open(doc_location, 'w', encoding='utf-8') as target:
json.dump(hyperparams, target, ensure_ascii=False, indent=4) |
def convert_PDF_to_plaintext(fpath, keep_layout=False):
if (not os.path.isfile(CFG_PATH_PDFTOTEXT)):
raise IOError('Missing pdftotext executable')
if keep_layout:
layout_option = '-layout'
else:
layout_option = '-raw'
doclines = []
p_break_in_line = re.compile('^\\s*\\f(.+)$', re.UNICODE)
cmd_pdftotext = [CFG_PATH_PDFTOTEXT, layout_option, '-q', '-enc', 'UTF-8', fpath, '-']
LOGGER.debug(u'%s', ' '.join(cmd_pdftotext))
pipe_pdftotext = subprocess.Popen(cmd_pdftotext, stdout=subprocess.PIPE)
for docline in pipe_pdftotext.stdout:
unicodeline = docline.decode('utf-8')
m_break_in_line = p_break_in_line.match(unicodeline)
if (m_break_in_line is None):
doclines.append(unicodeline)
else:
doclines.append(u'\x0c')
doclines.append(m_break_in_line.group(1))
LOGGER.debug(u'convert_PDF_to_plaintext found: %s lines of text', len(doclines))
return doclines |
def avg_prec(correct_duplicates: List, retrieved_duplicates: List) -> float:
if ((len(retrieved_duplicates) == 0) and (len(correct_duplicates) == 0)):
return 1.0
if ((not len(retrieved_duplicates)) or (not len(correct_duplicates))):
return 0.0
count_real_correct = len(correct_duplicates)
relevance = np.array([(1 if (i in correct_duplicates) else 0) for i in retrieved_duplicates])
relevance_cumsum = np.cumsum(relevance)
prec_k = [(relevance_cumsum[k] / (k + 1)) for k in range(len(relevance))]
prec_and_relevance = [(relevance[k] * prec_k[k]) for k in range(len(relevance))]
avg_precision = (np.sum(prec_and_relevance) / count_real_correct)
return avg_precision |
def _mobilenet_v3_model(arch: str, inverted_residual_setting: List[InvertedResidualConfig], last_channel: int, pretrained: bool, progress: bool, quantize: bool, **kwargs: Any):
model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs)
_replace_relu(model)
if quantize:
backend = 'qnnpack'
model.fuse_model()
model.qconfig = torch.quantization.get_default_qat_qconfig(backend)
torch.quantization.prepare_qat(model, inplace=True)
if pretrained:
_load_weights(arch, model, quant_model_urls.get(((arch + '_') + backend), None), progress)
torch.quantization.convert(model, inplace=True)
model.eval()
elif pretrained:
_load_weights(arch, model, model_urls.get(arch, None), progress)
return model |
def main():
print(window_width, window_height)
top_widgets = []
left_widgets = []
for i in range(num_top):
top_widgets.append(ORCWidget(('HF_' + str(i)), [top_button_width_min, top_button_width_pref, top_button_width_max, top_button_height_min, top_button_height_pref, top_button_height_max]))
top_widgets[(- 1)].set_optional()
for i in range(num_top, (num_top + num_left)):
left_widgets.append(ORCWidget(('VF_' + str(i)), [left_button_width_min, left_button_width_pref, left_button_width_max, left_button_height_min, left_button_height_pref, left_button_height_max]))
left_widgets[(- 1)].set_optional()
logo1 = ORCWidget('logo1', [textbox_width, textbox_width, textbox_width, textbox_height, textbox_height, textbox_height])
logo2 = ORCWidget('logo2', [textbox_width, textbox_width, textbox_width, textbox_height, textbox_height, textbox_height])
logo2.set_optional()
logo2.set_weight(0.0001)
pivot = Pivot('p', None, window_width, window_height)
column = ORCColumn('column', pivot)
horizonalflow = HorizontalFlow('HF', top_widgets, column)
row = ORCRow('row', horizonalflow)
verticalflow = VerticalFlow('VF', left_widgets, row)
verticallogo = VerticalFlow('VL', [logo1, logo2], verticalflow)
logo1.set_weight(1e-05)
pivot.set_layout(column)
logo2.set_weight(1e-05)
column.define_sublayouts([horizonalflow, row])
row.define_sublayouts([verticalflow, verticallogo])
start = time.time()
pivot.solve()
print(('Time: ' + str((time.time() - start))))
if show_window:
time_result.insert(0, str((time.time() - start)))
(best_leaf, best_leaf_result, best_leaf_loss) = pivot.get_best()
horizonalflow_row_height = best_leaf.parent.parent.parent.best_row_height
horizonalflow_row_width = best_leaf.parent.parent.parent.best_row_width
horizonalflow_result_index = best_leaf.parent.parent.parent.best_result_index
verticalflow_row_height = best_leaf.parent.best_row_height
verticalflow_row_width = best_leaf.parent.best_row_width
verticalflow_result_index = best_leaf.parent.best_result_index
verticallogo_row_height = best_leaf.best_row_height
verticallogo_row_width = best_leaf.best_row_width
verticallogo_result_index = best_leaf.best_result_index
HF_l = best_leaf_result['HF_l']
HF_r = best_leaf_result['HF_r']
HF_t = best_leaf_result['HF_t']
HF_b = best_leaf_result['HF_b']
VF_l = best_leaf_result['VF_l']
VF_r = best_leaf_result['VF_r']
VF_t = best_leaf_result['VF_t']
VF_b = best_leaf_result['VF_b']
VL_l = best_leaf_result['VL_l']
VL_r = best_leaf_result['VL_r']
VL_t = best_leaf_result['VL_t']
VL_b = best_leaf_result['VL_b']
left = HF_l
top = HF_t
for i in range(len(horizonalflow_result_index)):
if isinstance(horizonalflow_row_width[i], list):
for j in range(len(horizonalflow_result_index[i])):
widget_width = horizonalflow_row_width[i][j]
widget_height = horizonalflow_row_height[i]
if show_window:
widgets[horizonalflow_result_index[i][j]][0].place(x=left, y=top, width=widget_width, height=widget_height)
left += widget_width
left = HF_l
top += widget_height
else:
for j in range(len(horizonalflow_result_index[i])):
widget_width = horizonalflow_row_width[i]
widget_height = horizonalflow_row_height[i][j]
if show_window:
widgets[horizonalflow_result_index[i][j]][0].place(x=left, y=top, width=widget_width, height=widget_height)
top += widget_height
left += widget_width
top = HF_t
left = VF_l
top = VF_t
for i in range(len(verticalflow_result_index)):
if isinstance(verticalflow_row_width[i], list):
for j in range(len(verticalflow_result_index[i])):
widget_width = verticalflow_row_width[i][j]
widget_height = verticalflow_row_height[i]
if show_window:
widgets[(verticalflow_result_index[i][j] + num_top)][0].place(x=left, y=top, width=widget_width, height=widget_height)
left += widget_width
left = VF_l
top += widget_height
else:
for j in range(len(verticalflow_result_index[i])):
widget_width = verticalflow_row_width[i]
widget_height = verticalflow_row_height[i][j]
if show_window:
widgets[(verticalflow_result_index[i][j] + num_top)][0].place(x=left, y=top, width=widget_width, height=widget_height)
top += widget_height
left += widget_width
top = VF_t
left = VL_l
top = VL_t
for i in range(len(verticallogo_result_index)):
if isinstance(verticallogo_row_width[i], list):
for j in range(len(verticallogo_result_index[i])):
widget_width = verticallogo_row_width[i][j]
widget_height = verticallogo_row_height[i]
if show_window:
widgets[((verticallogo_result_index[i][j] + num_top) + num_left)][0].place(x=left, y=top, width=widget_width, height=widget_height)
left += widget_width
left = VL_l
top += widget_height
else:
for j in range(len(verticallogo_result_index[i])):
widget_width = verticallogo_row_width[i]
widget_height = verticallogo_row_height[i][j]
if show_window:
print(verticallogo_result_index[i][j])
widgets[((verticallogo_result_index[i][j] + num_top) + num_left)][0].place(x=left, y=top, width=widget_width, height=widget_height)
top += widget_height
left += widget_width
top = VL_t
if show_window:
mainloop() |
class JobLibProvider(ComputeProvider):
def __init__(self, n_jobs=(- 1)):
self.n_jobs = n_jobs
def parallel(self, compute_fn, compute_args_iter):
results = Parallel(n_jobs=self.n_jobs)((delayed(compute_fn)(*args) for args in compute_args_iter))
return results |
def topic_recommendation(json):
json = json.get('recommendations')
if (not json):
return ('No recommendations submitted.', 400)
if (len(json) > app.config['max_users_per_recommendation']):
return (('Requests must not contain more than %s users.' % app.config['max_users_per_recommendation']), 400)
check_funcs = {nonexistent_users, too_many_recommendations, contains_ineligible_topics, score_is_not_float, duplicate_topic_suggestion}
for check_func in check_funcs:
err = check_func(json)
if err:
return err
return None |
def test_bin_pack_step__jit(bin_pack: BinPack) -> None:
chex.clear_trace_counter()
step_fn = jax.jit(chex.assert_max_traces(bin_pack.step, n=1))
key = jax.random.PRNGKey(0)
(state, timestep) = bin_pack.reset(key)
action = bin_pack.action_spec().generate_value()
_ = step_fn(state, action)
(state, timestep) = step_fn(state, action)
assert_type_bin_pack_state(state) |
def get_outputscale(kernel):
if isinstance(kernel, gpytorch.kernels.ScaleKernel):
return kernel.outputscale
else:
return None |
def EfficientNetV2B3(include_top=False, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, stride_size=2, classifier_activation='softmax', include_preprocessing=True, **kwargs):
return EfficientNetV2(width_coefficient=1.2, depth_coefficient=1.4, default_size=300, model_name='efficientnetv2-b3', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, stride_size=stride_size, classifier_activation=classifier_activation, include_preprocessing=include_preprocessing, **kwargs) |
def aquila_attention_forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: bool=False, use_cache: bool=False) -> Tuple[(torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]])]:
(bsz, q_len, _) = hidden_states.size()
query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[(- 2)]
if (past_key_value is not None):
kv_seq_len += past_key_value[0].shape[(- 2)]
if ((query_states.device.type == 'xpu') and (not (self.training and query_states.requires_grad))):
(query_states, key_states) = apply_rotary_pos_emb_no_cache_xpu(query_states, key_states, position_ids, 'aquila')
else:
(cos, sin) = self.rotary_emb(value_states, seq_len=kv_seq_len)
(query_states, key_states) = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids, 'aquila')
if (past_key_value is not None):
cache_k = past_key_value[0]
cache_v = past_key_value[1]
if (cache_k.stride()[1] <= (cache_k.size(2) * cache_k.size(3))):
(new_cache_k, new_cache_v) = extend_kv_cache(bsz, self.num_heads, self.head_dim, cache_k.size(2), (kv_seq_len + KV_CACHE_ALLOC_BLOCK_LENGTH), dtype=cache_k.dtype, device=hidden_states.device)
new_cache_k[:] = cache_k
new_cache_v[:] = cache_v
cache_k = new_cache_k
cache_v = new_cache_v
(key_states, value_states) = append_kv_cache(cache_k, cache_v, key_states, value_states)
elif use_cache:
max_cache_length = (kv_seq_len + KV_CACHE_ALLOC_BLOCK_LENGTH)
(new_key_states, new_value_states) = init_kv_cache(bsz, self.num_heads, self.head_dim, kv_seq_len, max_cache_length, dtype=key_states.dtype, device=hidden_states.device)
new_key_states[:] = key_states
new_value_states[:] = value_states
key_states = new_key_states
value_states = new_value_states
past_key_value = ((key_states, value_states) if use_cache else None)
attn_weights = (torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim))
attn_weights = torch.clamp(attn_weights, min=(- 1024.0), max=1024.0)
if (attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len)):
log4Error.invalidInputError(f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is {attn_weights.size()}')
if (attention_mask is not None):
if (attention_mask.size() != (bsz, 1, q_len, kv_seq_len)):
log4Error.invalidInputError(f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}')
attn_weights = (attn_weights + attention_mask)
attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device))
attn_weights = nn.functional.softmax(attn_weights, dim=(- 1), dtype=torch.float32).to(query_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
if (attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim)):
log4Error.invalidInputError(f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is {attn_output.size()}')
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
if (not output_attentions):
attn_weights = None
return (attn_output, attn_weights, past_key_value) |
_registry(operator_type='ListConstruct')
class ListConstruct(Operator):
def __init__(self):
super().__init__() |
class MinorityCoalescer(AutotabularPreprocessingAlgorithm):
def __init__(self, minimum_fraction: float=0.01, random_state: Optional[np.random.RandomState]=None):
self.minimum_fraction = minimum_fraction
def fit(self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE]=None) -> 'MinorityCoalescer':
self.minimum_fraction = float(self.minimum_fraction)
self.preprocessor = autotabular.pipeline.implementations.MinorityCoalescer.MinorityCoalescer(minimum_fraction=self.minimum_fraction)
self.preprocessor.fit(X, y)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if (self.preprocessor is None):
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'coalescer', 'name': 'Categorical minority coalescer', 'handles_regression': True, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'handles_sparse': True, 'handles_dense': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,)}
def get_hyperparameter_search_space(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> ConfigurationSpace:
cs = ConfigurationSpace()
minimum_fraction = UniformFloatHyperparameter('minimum_fraction', lower=0.0001, upper=0.5, default_value=0.01, log=True)
cs.add_hyperparameter(minimum_fraction)
return cs |
_torch
class CTRLModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = ((CTRLModel, CTRLLMHeadModel) if is_torch_available() else ())
all_generative_model_classes = ((CTRLLMHeadModel,) if is_torch_available() else ())
test_pruning = False
test_torchscript = False
test_resize_embeddings = False
test_head_masking = False
class CTRLModelTester(object):
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_token_type_ids=True, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
mc_token_ids = None
if self.use_mc_token_ids:
mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = CTRLConfig(vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, n_ctx=self.max_position_embeddings)
head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)
return (config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels)
def check_loss_output(self, result):
self.parent.assertListEqual(list(result['loss'].size()), [])
def create_and_check_ctrl_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask)
model(input_ids, token_type_ids=token_type_ids)
(sequence_output, presents) = model(input_ids)
result = {'sequence_output': sequence_output, 'presents': presents}
self.parent.assertListEqual(list(result['sequence_output'].size()), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertEqual(len(result['presents']), config.n_layer)
def create_and_check_lm_head_model(self, config, input_ids, input_mask, head_mask, token_type_ids, *args):
model = CTRLLMHeadModel(config)
model.to(torch_device)
model.eval()
(loss, lm_logits, _) = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
result = {'loss': loss, 'lm_logits': lm_logits}
self.parent.assertListEqual(list(result['loss'].size()), [])
self.parent.assertListEqual(list(result['lm_logits'].size()), [self.batch_size, self.seq_length, self.vocab_size])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return (config, inputs_dict)
def setUp(self):
self.model_tester = CTRLModelTest.CTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_model_from_pretrained(self):
for model_name in list(CTRL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
model = CTRLModel.from_pretrained(model_name, cache_dir=CACHE_DIR)
self.assertIsNotNone(model) |
(version='2.0')
def initial_tuning_cfg_with_quant_mode(op_name_type, quant_mode, tuning_space: TuningSpace) -> OpTuningConfig:
internal_pattern = pattern_to_internal(quant_mode)
full_path = {'activation': None, 'weight': None}
(full_path['activation'], full_path['weight']) = pattern_to_path(internal_pattern)
has_weight = (op_name_type in tuning_space.ops_attr['weight'])
config_args = {}
att_lst = (['activation', 'weight'] if has_weight else ['activation'])
for att in att_lst:
att_full_path = tuning_space.get_default_full_path(op_name_type, full_path[att])
config_args[(att + '_dtype')] = tuning_space.ops_data_type[op_name_type].get(att_full_path, None)
mode_item = tuning_space.get_item_by_path((op_name_type, *att_full_path))
if mode_item:
method_args = {method_item.name: method_item.options[0] for method_item in mode_item.options if (method_item.name in TUNING_ITEMS_LST)}
config_args.update(method_args)
quant_mode = internal_pattern[0]
op_tuning_config = OpTuningConfig(op_name_type[0], op_name_type[1], quant_mode, tuning_space, kwargs=config_args)
return op_tuning_config |
def test_interpolation_potential_dens():
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(0.01, 2.0, 201), zgrid=(0.0, 0.2, 201), logR=False, interpDens=True, zsym=True)
rs = numpy.linspace(0.01, 2.0, 21)
zs = numpy.linspace((- 0.2), 0.2, 41)
for r in rs:
for z in zs:
densdiff = numpy.fabs(((rzpot.dens(r, z) - potential.evaluateDensities(potential.MWPotential, r, z)) / potential.evaluateDensities(potential.MWPotential, r, z)))
assert (densdiff < (10.0 ** (- 10.0))), f'RZPot interpolation of density of density w/ interpRZPotential fails at (R,z) = ({r:g},{z:g}) by {densdiff:g}'
rs = numpy.linspace(0.01, 2.0, 20)
zs = numpy.linspace((- 0.2), 0.2, 40)
for r in rs:
for z in zs:
densdiff = numpy.fabs(((rzpot.dens(r, z) - potential.evaluateDensities(potential.MWPotential, r, z)) / potential.evaluateDensities(potential.MWPotential, r, z)))
assert (densdiff < (4.0 * (10.0 ** (- 6.0)))), f'RZPot interpolation of density w/ interpRZPotential fails at (R,z) = ({r:g},{z:g}) by {densdiff:g}'
(mr, mz) = numpy.meshgrid(rs, zs)
mr = mr.flatten()
mz = mz.flatten()
assert numpy.all((numpy.fabs(((rzpot.dens(mr, mz) - potential.evaluateDensities(potential.MWPotential, mr, mz)) / potential.evaluateDensities(potential.MWPotential, mr, mz))) < (4.0 * (10.0 ** (- 6.0))))), 'RZPot interpolation of density w/ interpRZPotential fails for vector input'
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(numpy.log(0.01), numpy.log(20.0), 251), logR=True, zgrid=(0.0, 0.2, 201), interpDens=True, zsym=True)
rs = numpy.linspace(0.01, 20.0, 20)
(mr, mz) = numpy.meshgrid(rs, zs)
mr = mr.flatten()
mz = mz.flatten()
assert numpy.all((numpy.fabs(((rzpot.dens(mr, mz) - potential.evaluateDensities(potential.MWPotential, mr, mz)) / potential.evaluateDensities(potential.MWPotential, mr, mz))) < (4.0 * (10.0 ** (- 6.0))))), 'RZPot interpolation of density w/ interpRZPotential fails for vector input, w/ logR'
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(0.01, 2.0, 201), zgrid=((- 0.2), 0.2, 251), logR=False, interpDens=True, zsym=False)
rs = numpy.linspace(0.01, 2.0, 20)
zs = numpy.linspace((- 0.2), 0.2, 40)
(mr, mz) = numpy.meshgrid(rs, zs)
mr = mr.flatten()
mz = mz.flatten()
assert numpy.all((numpy.fabs(((rzpot.dens(mr, mz) - potential.evaluateDensities(potential.MWPotential, mr, mz)) / potential.evaluateDensities(potential.MWPotential, mr, mz))) < (4.0 * (10.0 ** (- 6.0))))), 'RZPot interpolation of density w/ interpRZPotential fails for vector input, w/o zsym'
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(numpy.log(0.01), numpy.log(20.0), 251), logR=True, zgrid=((- 0.2), 0.2, 201), interpDens=True, zsym=False)
rs = numpy.linspace(0.01, 20.0, 20)
zs = numpy.linspace((- 0.2), 0.2, 40)
(mr, mz) = numpy.meshgrid(rs, zs)
mr = mr.flatten()
mz = mz.flatten()
assert numpy.all((numpy.fabs(((rzpot.dens(mr, mz) - potential.evaluateDensities(potential.MWPotential, mr, mz)) / potential.evaluateDensities(potential.MWPotential, mr, mz))) < (4.0 * (10.0 ** (- 6.0))))), 'RZPot interpolation of density w/ interpRZPotential fails for vector input w/o zsym and w/ logR'
return None |
def profile_fvcore(model, input_size=(3, 224, 224), input_dtype=torch.float32, max_depth=4, batch_size=1, detailed=False, force_cpu=False):
if force_cpu:
model = model.to('cpu')
(device, dtype) = (next(model.parameters()).device, next(model.parameters()).dtype)
example_input = torch.ones(((batch_size,) + input_size), device=device, dtype=input_dtype)
fca = FlopCountAnalysis(model, example_input)
aca = ActivationCountAnalysis(model, example_input)
if detailed:
print(flop_count_table(fca, max_depth=max_depth))
return (fca, fca.total(), aca, aca.total()) |
def optimize_qparams_matmul(layer, cached_inps, cached_outs, test_inp, test_out, batch_size=100):
print('\nOptimize quantization params')
inp1_range_orig = layer.quantize_input1.running_range.data.clone()
inp1_zp_orig = layer.quantize_input1.running_zero_point.data.clone()
inp2_range_orig = layer.quantize_input2.running_range.data.clone()
inp2_zp_orig = layer.quantize_input2.running_zero_point.data.clone()
def layer_err(p, inp1, inp2, out):
layer.quantize_input1.running_range.data = (inp1_range_orig * p[0])
layer.quantize_input1.running_zero_point.data = (inp1_zp_orig + p[1])
layer.quantize_input2.running_range.data = (inp2_range_orig * p[2])
layer.quantize_input2.running_zero_point.data = (inp2_zp_orig + p[3])
yq = layer(inp1, inp2)
return F.mse_loss(yq, out).item()
init = np.array([1, 0, 1, 0])
results = []
for i in tqdm(range(int((cached_outs.size(0) / batch_size)))):
cur_inp1 = cached_inps[0][(i * batch_size):((i + 1) * batch_size)]
cur_inp2 = cached_inps[1][(i * batch_size):((i + 1) * batch_size)]
cur_out = cached_outs[(i * batch_size):((i + 1) * batch_size)]
res = opt.minimize((lambda p: layer_err(p, cur_inp1, cur_inp2, cur_out)), init, method=methods[2])
results.append(res.x)
mean_res = np.array(results).mean(axis=0)
print(mean_res)
mse_before = layer_err(init, test_inp[0], test_inp[1], test_out)
mse_after = layer_err(mean_res, test_inp[0], test_inp[1], test_out)
return (mse_before, mse_after) |
class ConstantTimeGenerator(InterArrivalTimeGenerator):
def __init__(self, step_duration: float) -> None:
self.step_duration: float = step_duration
def next(self) -> float:
return self.step_duration
def mean(self) -> float:
return self.step_duration |
def main(args):
components = list(make_version_tuple())
if args.bump:
components[(- 1)] += 1
version = '.'.join((str(c) for c in components))
if args.tag:
subprocess.check_output(['git', 'tag', version])
for package_dot_json_loc in ['./frontend/labextension', './frontend/nbextension']:
package_dot_json = os.path.join(package_dot_json_loc, 'package.json')
with open(package_dot_json, 'r') as f:
package_json = json.loads(f.read())
if (package_json.get('version', None) != version):
package_json['version'] = version
with open(package_dot_json, 'w') as f:
f.write(json.dumps(package_json, indent=2))
with open('./requirements.txt.in', 'r') as f:
template = f.read()
with open('./requirements.txt', 'w') as f:
for line in template.splitlines(keepends=True):
if line.startswith('#'):
continue
f.write(line.format(version=version))
return 0 |
class PrivilegeEscalation(Action):
def __init__(self, name, target, cost, access, process=None, os=None, prob=1.0, req_access=AccessLevel.USER, **kwargs):
super().__init__(name=name, target=target, cost=cost, prob=prob, req_access=req_access)
self.access = access
self.os = os
self.process = process
def __str__(self):
return f'{super().__str__()}, os={self.os}, process={self.process}, access={self.access}'
def __eq__(self, other):
if (not super().__eq__(other)):
return False
return ((self.process == other.process) and (self.os == other.os) and (self.access == other.access)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.