code stringlengths 101 5.91M |
|---|
class DavisConfig(Config):
TARGET_DATASET = 'test-dev'
SAVE_PATH = 'out'
DATA_PATH = 'data'
json_path = '../prepare/mask_rcnn_result'
minDetScore = 0.05
ov_threshold = 0.6 |
class BaseModel(nn.Module, metaclass=ABCMeta):
def init_weights(self):
def forward_train(self, imgs, labels):
def forward_test(self, imgs):
def forward(self, imgs, labels, test_mode, **kwargs):
if test_mode:
return self.forward_test(imgs, **kwargs)
return self.forward_train(imgs, labels, **kwargs)
def train_step(self, data_batch, optimizer):
def val_step(self, data_batch, **kwargs):
output = self.forward_test(**data_batch, **kwargs)
return output
def parse_losses(self, losses):
log_vars = OrderedDict()
for (loss_name, loss_value) in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum((_loss.mean() for _loss in loss_value))
else:
raise TypeError(f'{loss_name} is not a tensor or list of tensors')
loss = sum((_value for (_key, _value) in log_vars.items() if ('loss' in _key)))
log_vars['loss'] = loss
for name in log_vars:
log_vars[name] = log_vars[name].item()
return (loss, log_vars) |
class LogisticTS(BaseLogisticPolicy):
policy_name: str = 'logistic_ts'
def __post_init__(self) -> None:
super().__post_init__()
def select_action(self, context: np.ndarray) -> np.ndarray:
theta = np.array([model.predict_proba_with_sampling(context) for model in self.model_list]).flatten()
return theta.argsort()[::(- 1)][:self.len_list] |
class DirectiveToken(Token):
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
self.name = name
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark |
class DataPrefetcher():
def __init__(self, dataset):
self.dataset = dataset
self.stream = torch.cuda.Stream()
self.preload()
def preload(self):
try:
self.next_input = next(self.dataset)
except StopIteration:
self.next_input = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
self.preload()
return input
def __iter__(self):
return self
def __len__(self):
return len(self.dataset) |
class _MkldnnConvNd(torch.jit.ScriptModule):
__constants__ = ['stride', 'padding', 'dilation', 'groups']
def __init__(self, dense_module):
super(_MkldnnConvNd, self).__init__()
self.stride = dense_module.stride
self.padding = dense_module.padding
self.dilation = dense_module.dilation
self.groups = dense_module.groups
if (dense_module.bias is not None):
self.register_buffer('bias', dense_module.bias.to_mkldnn())
else:
self.register_buffer('bias', torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
.script_method
def __getstate__(self):
return (self.weight.to_dense(), self.bias.to_dense(), self.training)
.script_method
def forward(self, x):
return torch.mkldnn_convolution(x, self.weight, self.bias, self.padding, self.stride, self.dilation, self.groups) |
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(wavs, wav_lens) = batch.sig
feats = self.modules.wav2vec2(wavs, wav_lens)
x = self.modules.enc(feats)
logits = self.modules.output_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return (p_ctc, wav_lens)
def compute_objectives(self, predictions, batch, stage):
batch = batch.to(self.device)
(chars, char_lens) = batch.char_encoded
ids = batch.id
(p_ctc, wav_lens) = predictions
loss = self.hparams.ctc_cost(p_ctc, chars, wav_lens, char_lens)
if (stage != sb.Stage.TRAIN):
sequence = sb.decoders.ctc_greedy_decode(p_ctc, wav_lens, self.hparams.blank_index)
self.cer_metric.append(ids=ids, predict=sequence, target=chars, target_len=char_lens, ind2lab=self.tokenizer.decode_ndim)
self.ctc_metric.append(ids, p_ctc, chars, wav_lens, char_lens)
return loss
def fit_batch(self, batch):
stage = sb.Stage.TRAIN
predictions = self.compute_forward(batch, stage)
loss = self.compute_objectives(predictions, batch, stage)
loss.backward()
if self.check_gradients(loss):
self.optimizer_wav2vec.step()
self.optimizer.step()
self.optimizer_wav2vec.zero_grad()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage)
return loss.detach()
def init_optimizers(self):
self.optimizer_wav2vec = self.hparams.opt_class_wav2vec(self.hparams.model_wav2vec2.parameters())
self.optimizer = self.hparams.opt_class(self.hparams.model.parameters())
if (self.checkpointer is not None):
self.checkpointer.add_recoverable('optimizer_wav2vec', self.optimizer_wav2vec)
self.checkpointer.add_recoverable('optimizer', self.optimizer)
def on_stage_start(self, stage, epoch):
if (stage != sb.Stage.TRAIN):
self.cer_metric = self.hparams.cer_computer()
self.ctc_metric = self.hparams.ctc_computer()
def on_stage_end(self, stage, stage_loss, epoch):
stage_stats = {'loss': stage_loss}
if (stage == sb.Stage.TRAIN):
self.train_stats = stage_stats
else:
stage_stats['CER'] = self.cer_metric.summarize('error_rate')
if (stage == sb.Stage.VALID):
(old_lr, new_lr) = self.hparams.lr_annealing(stage_stats['loss'])
(old_lr_wav2vec, new_lr_wav2vec) = self.hparams.lr_annealing_wav2vec(stage_stats['loss'])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
sb.nnet.schedulers.update_learning_rate(self.optimizer_wav2vec, new_lr_wav2vec)
self.hparams.train_logger.log_stats(stats_meta={'epoch': epoch, 'lr': old_lr, 'lr_wav2vec': old_lr_wav2vec}, train_stats=self.train_stats, valid_stats=stage_stats)
self.checkpointer.save_and_keep_only(meta={'CER': stage_stats['CER']}, min_keys=['CER'])
elif (stage == sb.Stage.TEST):
self.hparams.train_logger.log_stats(stats_meta={'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stage_stats)
with open(hparams['cer_file_test'], 'w') as w:
self.cer_metric.write_stats(w)
with open(hparams['ctc_file_test'], 'w') as w:
self.ctc_metric.write_stats(w) |
def visualize_errors(frames, predictions, targets, fp_mistakes, fn_mistakes):
(scenes, scene_preds) = ([], [])
(_, ih, iw, _) = frames.shape
for mistakes in [fp_mistakes, fn_mistakes]:
for (start, end) in mistakes:
idx = int((start + ((end - start) // 2)))
scene = frames[max(0, (idx - 25)):][:50]
scene_pred = predictions[max(0, (idx - 25)):][:50]
scene_tar = targets[max(0, (idx - 25)):][:50]
if (len(scene) < 50):
continue
scenes.append(scene)
scene_preds.append((scene_tar, scene_pred))
if (len(scenes) == 0):
return None
scenes = np.concatenate([np.concatenate(list(scene), 1) for scene in scenes], 0)
img = Image.fromarray(scenes)
draw = ImageDraw.Draw(img)
for (h, preds) in enumerate(scene_preds):
for (w, (tar, pred)) in enumerate(zip(*preds)):
if (tar == 1):
draw.text(((((w * iw) + iw) - 10), (h * ih)), 'T', fill=(255, 0, 0))
draw.rectangle([((((w * iw) + iw) - 1), (h * ih)), ((((w * iw) + iw) - 4), (((h * ih) + ih) - 1))], fill=(0, 0, 0))
draw.rectangle([((((w * iw) + iw) - 2), (h * ih)), ((((w * iw) + iw) - 3), ((h * ih) + ((ih - 1) * pred)))], fill=(0, 255, 0))
return img |
class SubNode(NumBinopNode):
def compute_c_result_type(self, type1, type2):
if ((type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum)):
return type1
elif ((type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array)):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(self, type1, type2) |
def test_has_path_no_path():
proxy = tt.ObjectProxy([1])
proxy.count(1)
assert (tt.UsageTraceNode.from_proxy(proxy).find_path(('count', 'foobar')) is None) |
def load_tf_weights_in_xxx(model, config, tf_checkpoint_path):
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error('Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see for installation instructions.')
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info('Converting TensorFlow checkpoint from {}'.format(tf_path))
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for (name, shape) in init_vars:
logger.info('Loading TF weight {} with shape {}'.format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for (name, array) in zip(names, arrays):
name = name.split('/')
if any(((n in ['adam_v', 'adam_m', 'global_step']) for n in name)):
logger.info('Skipping {}'.format('/'.join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch('[A-Za-z]+_\\d+', m_name):
scope_names = re.split('_(\\d+)', m_name)
else:
scope_names = [m_name]
if ((scope_names[0] == 'kernel') or (scope_names[0] == 'gamma')):
pointer = getattr(pointer, 'weight')
elif ((scope_names[0] == 'output_bias') or (scope_names[0] == 'beta')):
pointer = getattr(pointer, 'bias')
elif (scope_names[0] == 'output_weights'):
pointer = getattr(pointer, 'weight')
elif (scope_names[0] == 'squad'):
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info('Skipping {}'.format('/'.join(name)))
continue
if (len(scope_names) >= 2):
num = int(scope_names[1])
pointer = pointer[num]
if (m_name[(- 11):] == '_embeddings'):
pointer = getattr(pointer, 'weight')
elif (m_name == 'kernel'):
array = np.transpose(array)
try:
assert (pointer.shape == array.shape)
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info('Initialize PyTorch weight {}'.format(name))
pointer.data = torch.from_numpy(array)
return model |
def evaluate(dataset, predictions, k, no_f1=False):
count = 0
f1 = exact_match = total = 0
ranks = {}
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if (qa['id'] not in predictions):
message = (('Unanswered question ' + qa['id']) + ' will receive score 0.')
count += 1
continue
ground_truths = list(map((lambda x: x['text']), qa['answers']))
prediction = predictions[qa['id']][:k]
if (len(prediction) == 0):
(rank, cur_exact_match) = (None, 0.0)
else:
(rank, cur_exact_match) = max(enumerate((metric_max_over_ground_truths(exact_match_score, each, ground_truths) for each in prediction)), key=(lambda item: item[1]))
exact_match += cur_exact_match
ranks[qa['id']] = ((rank + 1) if (cur_exact_match == 1.0) else .0)
if (not no_f1):
if (len(prediction) > 0):
f1 += max((metric_max_over_ground_truths(f1_score, each, ground_truths) for each in prediction))
exact_match = ((100.0 * exact_match) / total)
f1 = ((100.0 * f1) / total)
if count:
print(('There are %d unanswered question(s)' % count))
return {'exact_match': exact_match, 'f1': f1, 'ranks': ranks} |
def test_metric_evaluate_y_pred_none():
metrics = create_metric_list(k, revenue)
for metric in metrics:
with pytest.raises(TypeError):
metric.evaluate(y_true, None) |
def filter_text(transcription: str, dataset='train', acronyms=None, acronyms_noi=None):
dataset = dataset.strip().lower()
if (dataset == 'train'):
transcription = re.sub('\\[SILENCE\\]', '', transcription, flags=re.IGNORECASE)
transcription = re.sub('<.*?>', '', transcription)
transcription = match_swbd1(transcription.strip())
transcription = re.sub('\\s\\s+', ' ', transcription)
if (len(transcription) > 0):
transcription = map_acronyms(acronyms, acronyms_noi, transcription)
transcription = remove_acronym_symbols(transcription)
transcription = transcription.upper().strip()
elif (dataset in ['eval2000', 'hub5', 'test']):
transcription = match_eval2000(transcription.strip())
elif (dataset == 'fisher'):
transcription = match_fisher(transcription.strip())
else:
raise NameError(f"Invalid dataset descriptor '{dataset}' supplied.")
transcription = re.sub('\\s\\s+', ' ', transcription)
return transcription.strip() |
class Adamax(Optimizer):
def __init__(self, params, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(Adamax, self).__init__(params, defaults)
_grad()
def step(self, closure=None):
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
grads = []
params_with_grad = []
states = []
exp_avgs = []
exp_infs = []
(beta1, beta2) = group['betas']
eps = group['eps']
for p in group['params']:
if (p.grad is not None):
if p.grad.is_sparse:
raise RuntimeError('Adamax does not support sparse gradients')
grads.append(p.grad)
params_with_grad.append(p)
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['exp_inf'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_infs.append(state['exp_inf'])
state['step'] += 1
states.append(state)
if (group['weight_decay'] != 0):
torch._foreach_add_(grads, params_with_grad, alpha=group['weight_decay'])
torch._foreach_mul_(exp_avgs, beta1)
torch._foreach_add_(exp_avgs, grads, alpha=(1 - beta1))
torch._foreach_mul_(exp_infs, beta2)
for (exp_inf, grad) in zip(exp_infs, grads):
norm_buf = torch.cat([exp_inf.unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0)
torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long()))
bias_corrections = [(1 - (beta1 ** state['step'])) for state in states]
clr = [(group['lr'] / bias_correction) for bias_correction in bias_corrections]
for i in range(len(params_with_grad)):
params_with_grad[i].addcdiv_(exp_avgs[i], exp_infs[i], value=(- clr[i]))
return loss |
def verify(verifier, prover):
commitment = prover.commit()
challenge = verifier.send_challenge(commitment)
response = prover.compute_response(challenge)
return verifier.verify(response) |
def get_learning_rate(optimizer):
for group in optimizer.param_groups:
if ('lr' in group):
return group['lr'] |
class CanineForMultipleChoice(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_ic_neck():
neck = ICNeck(in_channels=(4, 16, 16), out_channels=8, norm_cfg=dict(type='SyncBN'), align_corners=False)
assert _conv_has_norm(neck, sync_bn=True)
inputs = [torch.randn(1, 4, 32, 64), torch.randn(1, 16, 16, 32), torch.randn(1, 16, 8, 16)]
neck = ICNeck(in_channels=(4, 16, 16), out_channels=4, norm_cfg=dict(type='BN', requires_grad=True), align_corners=False)
if torch.cuda.is_available():
(neck, inputs) = to_cuda(neck, inputs)
outputs = neck(inputs)
assert (outputs[0].shape == (1, 4, 16, 32))
assert (outputs[1].shape == (1, 4, 32, 64))
assert (outputs[1].shape == (1, 4, 32, 64)) |
class LanguageAccept(Accept):
def _value_matches(self, value, item):
return ((item == '*') or (_normalize_lang(value) == _normalize_lang(item)))
def best_match(self, matches, default=None):
result = super(LanguageAccept, self).best_match(matches)
if (result is not None):
return result
fallback = Accept([(_locale_delim_re.split(item[0], 1)[0], item[1]) for item in self])
result = fallback.best_match(matches)
if (result is not None):
return result
fallback_matches = [_locale_delim_re.split(item, 1)[0] for item in matches]
result = super(LanguageAccept, self).best_match(fallback_matches)
if (result is not None):
return next((item for item in matches if item.startswith(result)))
return default |
def _gen_swizzles(cls):
KEYGROUP_SET = ['xyzw', 'rgba', 'stpq']
cls._swizzle_to_keygroup = {}
cls._keygroup_to_checker = {}
def make_valid_attribs_checker(key_group):
def check(instance, pattern):
valid_attribs = set(key_group[:instance.n])
pattern_set = set(pattern)
diff = (pattern_set - valid_attribs)
if len(diff):
valid_attribs = tuple(sorted(valid_attribs))
pattern = tuple(pattern)
raise TaichiSyntaxError(f'vec{instance.n} only has attributes={valid_attribs}, got={pattern}')
return check
for key_group in KEYGROUP_SET:
cls._keygroup_to_checker[key_group] = make_valid_attribs_checker(key_group)
for (index, attr) in enumerate(key_group):
def gen_property(attr, attr_idx, key_group):
checker = cls._keygroup_to_checker[key_group]
def prop_getter(instance):
checker(instance, attr)
return instance[attr_idx]
_scope
def prop_setter(instance, value):
checker(instance, attr)
instance[attr_idx] = value
return property(prop_getter, prop_setter)
prop = gen_property(attr, index, key_group)
setattr(cls, attr, prop)
cls._swizzle_to_keygroup[attr] = key_group
for key_group in KEYGROUP_SET:
sw_patterns = _generate_swizzle_patterns(key_group, required_length=4)
sw_patterns = filter((lambda p: (len(p) > 1)), sw_patterns)
for prop_key in sw_patterns:
def gen_property(pattern, key_group):
checker = cls._keygroup_to_checker[key_group]
def prop_getter(instance):
checker(instance, pattern)
res = []
for ch in pattern:
res.append(instance[key_group.index(ch)])
return Vector(res)
_scope
def prop_setter(instance, value):
if (len(pattern) != len(value)):
raise TaichiRuntimeError(f'value len does not match the swizzle pattern={pattern}')
checker(instance, pattern)
for (ch, val) in zip(pattern, value):
instance[key_group.index(ch)] = val
prop = property(prop_getter, prop_setter)
return prop
prop = gen_property(prop_key, key_group)
setattr(cls, prop_key, prop)
cls._swizzle_to_keygroup[prop_key] = key_group
return cls |
def enable_calibration(model):
logger.info('Enabling Calibration')
for (name, module) in model.named_modules():
if name.endswith('_quantizer'):
if (module._calibrator is not None):
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}') |
def short_repr(x, n=64):
x_repr = repr(x)
if isinstance(x_repr, bytes_type):
try:
x_repr = text_type(x_repr, 'utf-8')
except UnicodeDecodeError:
x_repr = text_type(x_repr, 'latin1')
if (len(x_repr) > n):
x_repr = ((x_repr[:(n // 2)] + '...') + x_repr[(len(x_repr) - (n // 2)):])
return x_repr |
def test_inclusive_policy_negative_examples_3(digraph, features_1d, labels):
policy = InclusivePolicy(digraph, features_1d, labels)
ground_truth = [True, True, False, False, False, False, True, True]
result = policy.negative_examples('2.1')
assert_array_equal(ground_truth, result) |
.parametrize('task_name', [tn for tn in ((all_tasks - julia_tasks) - noref_tasks)])
def test_reference_posterior_exists(task_name):
task = get_task(task_name)
reference_samples = task.get_reference_posterior_samples(num_observation=1)
assert hasattr(reference_samples, 'shape')
assert (len(reference_samples.shape) == 2)
assert (reference_samples.shape[0] > 0) |
def get_verifytype(html):
if ('icon_pf_approve_co' in html):
return 2
elif ('icon_pf_approve' in html):
return 1
else:
return 0 |
def mask_adj_out(adj, max_distance, coordinates, return_xarray=False):
n_nodes = adj.shape[0]
assert (n_nodes == adj.shape[1]), 'Adjacency matrix must be #Nodes x #Nodes'
tmp = xa.DataArray(adj, dims=('x1', 'cord'), coords={'x1': range(n_nodes), 'cord': coordinates})
new_adj = np.zeros((n_nodes, n_nodes))
new_adj = xa.DataArray(new_adj, dims=('x1', 'cord'), coords={'x1': range(n_nodes), 'cord': coordinates})
for i in range(n_nodes):
node = coordinates[i]
(lat, lon) = (node[0], node[1])
neighbors = {'lat': slice((lat - max_distance), (lat + max_distance)), 'lon': slice((lon - max_distance), (lon + max_distance))}
new_adj.loc[(i, neighbors)] = tmp.loc[(i, neighbors)]
if (not return_xarray):
return new_adj.values
return new_adj |
def generator_loss(loss_func, fake):
fake_loss = 0
if loss_func.__contains__('wgan'):
fake_loss = (- tf.reduce_mean(fake))
if (loss_func == 'lsgan'):
fake_loss = tf.reduce_mean(tf.squared_difference(fake, 1.0))
if ((loss_func == 'gan') or (loss_func == 'dragan')):
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake), logits=fake))
if (loss_func == 'hinge'):
fake_loss = (- tf.reduce_mean(fake))
loss = fake_loss
return loss |
class MoEModelOutputWithPastAndCrossAttentions(ModelOutput):
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
router_probs: Optional[Tuple[torch.FloatTensor]] = None |
def init_classifier(dataset):
d = data.to_dataset(dataset)
if (dataset == 'mnist'):
return lenet.LeNet5()
elif (dataset in ('svhn', 'fashion')):
return resnet.ResNet(d.nc, d.ny)
else:
return linear.MLP(d.nx, d.ny) |
def is_valid_total_length(index, date_to_sent_mapping, all_sent_dates, timeline_properties):
selected_date = all_sent_dates[index]
if ((selected_date < timeline_properties.start) or (selected_date > timeline_properties.end)):
return False
return (sum([len(sents) for sents in date_to_sent_mapping.values()]) < timeline_properties.num_sentences) |
def syncMain():
zConf = Zeroconf()
bleRelay = BLERelay()
browser = ServiceBrowser(zConf, '_ble_relay_recv._tcp.local.', bleRelay)
DBG('Looking for ble relay receivers', logLevel=LogLevel.DEBUG)
input('Cancel with CTRL-D') |
def ShenfunFile(name, T, backend='hdf5', mode='r', mesh='quadrature', **kw):
if (backend.lower() == 'hdf5'):
return HDF5File((name + '.h5'), domain=[np.squeeze(d) for d in T.mesh(kind=mesh)], mode=mode, **kw)
assert (kw.get('forward_output', False) is False), 'NetCDF4 cannot store complex arrays, use HDF5'
return NCFile((name + '.nc'), domain=[np.squeeze(d) for d in T.mesh(kind=mesh)], mode=mode, **kw) |
class Schedule_Autofz(Schedule_Base):
def __init__(self, fuzzers, prep_time=300, focus_time=300, diff_threshold=10):
super().__init__(fuzzers=fuzzers, prep_time=prep_time, focus_time=focus_time)
self.name = f'Autofz_{prep_time}_{focus_time}_AIMD_DT{diff_threshold}'
self.policy_bitmap = policy.BitmapPolicy()
self.focused_round = []
self.picked_times = {}
self.before_prep_fuzzer_info = empty_fuzzer_info(self.fuzzers)
self.find_new_round = False
self.diff_threshold = diff_threshold
self.diff_threshold_base = diff_threshold
self.diff_threshold_round = diff_threshold
self.diff_round = 0
self.has_winner_round = False
self.dynamic_prep_time_round = 0
self.dynamic_focus_time_round = 0
def pre_round(self):
self.round_start_time = time.time()
update_success = maybe_get_fuzzer_info(fuzzers=self.fuzzers)
if (not update_success):
SLEEP = 10
logger.info(f'wait for all fuzzer having coverage, sleep {SLEEP} seconds')
sleep(SLEEP)
global START_TIME
elasp = (time.time() - START_TIME)
if (elasp > 600):
terminate_autofz()
self.prep_time_round = 0
self.focus_time_round = 0
self.dynamic_prep_time_round = 0
self.dynamic_focus_time_round = 0
self.focused_round = []
self.has_winner_round = False
return update_success
def one_round(self):
round_start_time = time.time()
self.diff_threshold_round = self.diff_threshold
global OUTPUT
do_sync(self.fuzzers, OUTPUT)
if self.first_round:
fuzzer_info = empty_fuzzer_info(self.fuzzers)
else:
fuzzer_info = get_fuzzer_info(self.fuzzers)
self.before_prep_fuzzer_info = fuzzer_info
logger.debug(f'before_fuzzer_info: {self.before_prep_fuzzer_info}')
prep_fuzzers = self.fuzzers
self.prep_fuzzers = prep_fuzzers
logger.info(f'round {self.round_num} preparation phase')
if PARALLEL:
has_winner = self.prep_parallel()
else:
has_winner = self.prep_round_robin()
prep_end_time = time.time()
fuzzer_info = get_fuzzer_info(self.fuzzers)
after_prep_fuzzer_info = fuzzer_info
logger.debug(f'after_fuzzer_info: {after_prep_fuzzer_info}')
bitmap_diff = fuzzer_bitmap_diff(self.fuzzers, self.before_prep_fuzzer_info, after_prep_fuzzer_info)
self.add_bitmap_prep_contribution(prep_fuzzers, self.before_prep_fuzzer_info, after_prep_fuzzer_info)
logger.debug(f'BITMAP_DIFF: {bitmap_diff}')
logger.debug(f'BITMAP_PREP_CONTRIBUTION: {self.bitmap_contribution}')
(picked_fuzzers, cpu_assign) = ([], {})
if has_winner:
(picked_fuzzers, cpu_assign) = self.policy_bitmap.calculate_cpu(prep_fuzzers, after_prep_fuzzer_info, JOBS)
self.diff_threshold += self.diff_threshold_base
else:
(picked_fuzzers, cpu_assign) = self.calculate_cpu_bitmap_intersection(prep_fuzzers, after_prep_fuzzer_info, self.focus_time)
self.diff_threshold *= 0.5
for fuzzer in picked_fuzzers:
self.picked_times[fuzzer] += 1
self.cov_before_focus = after_prep_fuzzer_info
do_sync(self.fuzzers, OUTPUT)
if has_winner:
self.dynamic_focus_time_round = ((self.prep_time - self.dynamic_prep_time_round) + self.focus_time)
else:
self.dynamic_focus_time_round = self.focus_time
logger.debug(f'prep time: {self.dynamic_prep_time_round}, focus time: {self.dynamic_focus_time_round}')
find_new = False
focus_start_time = time.time()
logger.info(f'round {self.round_num} focus phase')
if PARALLEL:
find_new = self.focus_cpu_assign_parallel(cpu_assign, self.dynamic_focus_time_round)
else:
logger.debug('scheduling focus session')
find_new = self.focus_cpu_assign(cpu_assign, self.dynamic_focus_time_round)
logger.debug(f'find new is {find_new}')
focus_end_time = time.time()
focus_elasp = (focus_end_time - focus_start_time)
logger.debug(f'focus elasp: {focus_elasp} seconds')
self.find_new_round = find_new
after_focus_fuzzer_info = get_fuzzer_info(self.fuzzers)
logger.debug(f'focused_round: {self.focused_round}')
assert ((self.dynamic_prep_time_round + self.dynamic_focus_time_round) == (self.prep_time + self.focus_time))
append_log('round', {'round_num': self.round_num, 'start_time': round_start_time, 'prep_end_time': prep_end_time, 'focus_start_time': focus_start_time, 'focus_end_time': focus_end_time, 'end_time': time.time(), 'prep_time': self.prep_time_round, 'focus_time': self.focus_time_round, 'dynamic_prep_time': self.dynamic_prep_time_round, 'dynamic_focus_time': self.dynamic_focus_time_round, 'first_round': self.first_round, 'before_prep_fuzzer_info': compress_fuzzer_info(self.fuzzers, self.before_prep_fuzzer_info), 'before_focus_fuzzer_info': compress_fuzzer_info(self.fuzzers, after_prep_fuzzer_info), 'after_focus_fuzzer_info': compress_fuzzer_info(self.fuzzers, after_focus_fuzzer_info), 'picked_fuzzers': picked_fuzzers, 'prep_fuzzers': prep_fuzzers, 'picked_times': self.picked_times, 'cpu_assign': cpu_assign, 'has_winner': self.has_winner_round, 'diff': self.diff_round, 'diff_threshold': self.diff_threshold_round})
def post_round(self):
now = time.time()
elasp = (now - self.round_start_time)
logger.debug(f'round elasp: {elasp} seconds')
self.first_round = False
self.round_num += 1
def pre_run(self) -> bool:
logger.info(f'{self.name}: pre_run')
logger.info(f'diff_threshold {self.diff_threshold}')
self.reset_bitmap_contribution()
for fuzzer in self.fuzzers:
self.all_bitmap_contribution[fuzzer] = Bitmap.empty()
self.picked_times[fuzzer] = 0
return True |
def register_Ns3MeshWifiInterfaceMacPlugin_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MeshWifiInterfaceMacPlugin const &', 'arg0')])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True)
cls.add_method('Receive', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::WifiMacHeader const &', 'header')], is_pure_virtual=True, is_virtual=True)
cls.add_method('SetParent', 'void', [param('ns3::Ptr< ns3::MeshWifiInterfaceMac >', 'parent')], is_pure_virtual=True, is_virtual=True)
cls.add_method('UpdateBeacon', 'void', [param('ns3::MeshWifiBeacon &', 'beacon')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('UpdateOutcomingFrame', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::WifiMacHeader &', 'header'), param('ns3::Mac48Address', 'from'), param('ns3::Mac48Address', 'to')], is_pure_virtual=True, is_virtual=True)
return |
class TestUnitImpulse(object):
def test_no_index(self):
assert_array_equal(waveforms.unit_impulse(7), [1, 0, 0, 0, 0, 0, 0])
assert_array_equal(waveforms.unit_impulse((3, 3)), [[1, 0, 0], [0, 0, 0], [0, 0, 0]])
def test_index(self):
assert_array_equal(waveforms.unit_impulse(10, 3), [0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
assert_array_equal(waveforms.unit_impulse((3, 3), (1, 1)), [[0, 0, 0], [0, 1, 0], [0, 0, 0]])
imp = waveforms.unit_impulse((4, 4), 2)
assert_array_equal(imp, np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 0]]))
def test_mid(self):
assert_array_equal(waveforms.unit_impulse((3, 3), 'mid'), [[0, 0, 0], [0, 1, 0], [0, 0, 0]])
assert_array_equal(waveforms.unit_impulse(9, 'mid'), [0, 0, 0, 0, 1, 0, 0, 0, 0])
def test_dtype(self):
imp = waveforms.unit_impulse(7)
assert_(np.issubdtype(imp.dtype, np.floating))
imp = waveforms.unit_impulse(5, 3, dtype=int)
assert_(np.issubdtype(imp.dtype, np.integer))
imp = waveforms.unit_impulse((5, 2), (3, 1), dtype=complex)
assert_(np.issubdtype(imp.dtype, np.complexfloating)) |
def train(args, net, env):
with tf.Session() as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
if (len(args.ckpt_name) > 0):
saver.restore(sess, os.path.join(args.save_dir, args.ckpt_name))
shift = sess.run(net.shift)
scale = sess.run(net.scale)
shift_u = sess.run(net.shift_u)
scale_u = sess.run(net.scale_u)
replay_memory = ReplayMemory(args, shift, scale, shift_u, scale_u, env, net, sess, predict_evolution=True)
def val_loss(kl_weight):
replay_memory.reset_batchptr_val()
loss = 0.0
for b in range(replay_memory.n_batches_val):
batch_dict = replay_memory.next_batch_val()
x = batch_dict['states']
u = batch_dict['inputs']
feed_in = {}
feed_in[net.x] = np.reshape(x, (((2 * args.batch_size) * args.seq_length), args.state_dim))
feed_in[net.u] = u
feed_in[net.kl_weight] = kl_weight
feed_out = net.cost
cost = sess.run(feed_out, feed_in)
loss += cost
return (loss / replay_memory.n_batches_val)
n_loops = (10 if args.ilqr else 1)
for n in range(n_loops):
if (n >= 1):
tf.global_variables_initializer().run()
sess.run(tf.assign(net.shift, replay_memory.shift_x))
sess.run(tf.assign(net.scale, replay_memory.scale_x))
sess.run(tf.assign(net.shift_u, replay_memory.shift_u))
sess.run(tf.assign(net.scale_u, replay_memory.scale_u))
old_score = 1e+20
print('setting learning rate to ', args.learning_rate)
sess.run(tf.assign(net.learning_rate, args.learning_rate))
lr = args.learning_rate
anneal_time = 5
T = (anneal_time * replay_memory.n_batches_train)
count = 0
count_decay = 0
decay_epochs = []
for e in range(args.num_epochs):
visualize_predictions(args, sess, net, replay_memory, env, e)
loss = 0.0
kl_loss = 0.0
pred_loss = 0.0
loss_count = 0
b = 0
replay_memory.reset_batchptr_train()
while (b < replay_memory.n_batches_train):
start = time.time()
if (e < 3):
kl_weight = 1e-06
else:
count += 1
kl_weight = min(args.kl_weight, (1e-06 + ((args.kl_weight * count) / float(T))))
batch_dict = replay_memory.next_batch_train()
x = batch_dict['states']
u = batch_dict['inputs']
feed_in = {}
feed_in[net.x] = np.reshape(x, (((2 * args.batch_size) * args.seq_length), args.state_dim))
feed_in[net.u] = u
feed_in[net.kl_weight] = kl_weight
feed_out = [net.cost, net.kl_loss, net.pred_loss, net.train]
out = sess.run(feed_out, feed_in)
loss += out[0]
kl_loss += out[1]
pred_loss += out[2]
loss_count += 1
end = time.time()
b += 1
if (((((e * replay_memory.n_batches_train) + b) % 100) == 0) and (b > 0)):
print('{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'.format(((e * replay_memory.n_batches_train) + b), (args.num_epochs * replay_memory.n_batches_train), e, (loss / loss_count), (end - start)))
print('{}/{} (epoch {}), pred_loss = {:.3f}, time/batch = {:.3f}'.format(((e * replay_memory.n_batches_train) + b), (args.num_epochs * replay_memory.n_batches_train), e, (pred_loss / loss_count), (end - start)))
print('{}/{} (epoch {}), kl_loss = {:.3f}, time/batch = {:.3f}'.format(((e * replay_memory.n_batches_train) + b), (args.num_epochs * replay_memory.n_batches_train), e, (kl_loss / loss_count), (end - start)))
print('')
loss = 0.0
kl_loss = 0.0
pred_loss = 0.0
loss_count = 0
score = val_loss(kl_weight)
print('Validation Loss: {0:f}'.format(score))
b = 0
if (((old_score - score) < (- 0.01)) and (e >= 8)):
count_decay += 1
decay_epochs.append(e)
if ((len(decay_epochs) >= 3) and (np.sum(np.diff(decay_epochs)[(- 2):]) == 2)):
break
lr = (args.learning_rate * (args.decay_rate ** count_decay))
if (args.ilqr and (lr < 5e-05)):
break
print('setting learning rate to ', lr)
sess.run(tf.assign(net.learning_rate, lr))
checkpoint_path = os.path.join(args.save_dir, (args.save_name + '.ckpt'))
saver.save(sess, checkpoint_path, global_step=e)
print('model saved to {}'.format(checkpoint_path))
old_score = score
if args.ilqr:
avg_reward = perform_rollouts(args, net, env, sess, replay_memory)
print('Number of training batches now is:', replay_memory.n_batches_train)
if (avg_reward > 180.0):
break |
def _dispatch(tree, symbols, inferred_symbols):
try:
tree = iter(tree)
for t in tree:
_dispatch(t, symbols, inferred_symbols)
except TypeError:
current_module = sys.modules[__name__]
meth = getattr(current_module, ('_' + tree.__class__.__name__))
return meth(tree, symbols, inferred_symbols) |
def _print_alignment(alignment, a, b, empty_symbol='<eps>', separator=' ; ', file=sys.stdout):
a_padded = []
b_padded = []
ops_padded = []
for (op, i, j) in alignment:
op_string = str(op)
a_string = (str(a[i]) if (i is not None) else empty_symbol)
b_string = (str(b[j]) if (j is not None) else empty_symbol)
pad_length = max(len(op_string), len(a_string), len(b_string))
a_padded.append(a_string.center(pad_length))
b_padded.append(b_string.center(pad_length))
ops_padded.append(op_string.center(pad_length))
print(separator.join(a_padded), file=file)
print(separator.join(ops_padded), file=file)
print(separator.join(b_padded), file=file) |
_model_architecture('transformer_lm', 'transformer_lm_gpt2_small')
def transformer_lm_gpt2_small(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_layers = getattr(args, 'decoder_layers', 24)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu_accurate')
base_lm_architecture(args) |
class FNetTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'token_type_ids']
slow_tokenizer_class = FNetTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=True, unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token)
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = (False if (not self.vocab_file) else True)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((cls + token_ids_0) + sep)
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
class Linear(object):
def __init__(self, input_size, output_size, weight_init=Uniform(), name='', biases=True):
self.W = theano.shared(weight_init((input_size, output_size)), name=('%s_W' % name))
self.b = None
self.params = [self.W]
if biases:
self.b = theano.shared(weight_init(output_size), name=('%s_b' % name))
self.params.append(self.b)
def __call__(self, x):
y = T.dot(x, self.W)
if (self.b is not None):
y += self.b
return y |
def test_python_iterator_in_cpp():
t = (1, 2, 3)
assert (m.object_to_list(t) == [1, 2, 3])
assert (m.object_to_list(iter(t)) == [1, 2, 3])
assert (m.iterator_to_list(iter(t)) == [1, 2, 3])
with pytest.raises(TypeError) as excinfo:
m.object_to_list(1)
assert ('object is not iterable' in str(excinfo.value))
with pytest.raises(TypeError) as excinfo:
m.iterator_to_list(1)
assert ('incompatible function arguments' in str(excinfo.value))
def bad_next_call():
raise RuntimeError('py::iterator::advance() should propagate errors')
with pytest.raises(RuntimeError) as excinfo:
m.iterator_to_list(iter(bad_next_call, None))
assert (str(excinfo.value) == 'py::iterator::advance() should propagate errors')
lst = [1, None, 0, None]
assert (m.count_none(lst) == 2)
assert (m.find_none(lst) is True)
assert (m.count_nonzeros({'a': 0, 'b': 1, 'c': 2}) == 2)
r = range(5)
assert all(m.tuple_iterator(tuple(r)))
assert all(m.list_iterator(list(r)))
assert all(m.sequence_iterator(r)) |
def process_mat(mat):
videos = mat['Tags'][0]
result = []
for video_mat in videos:
video_mat = video_mat[0]
video_data = process_video_mat(video_mat)
result.append(video_data)
return result |
class DIV2KJPEG(DIV2KSR):
def __init__(self, phase, opt):
self.quality = opt.quality
super().__init__(phase, opt)
def get_subdir(self):
dir_HQ = 'DIV2K_train_HR'
dir_LQ = 'DIV2K_train_JPEG/{}'.format(self.quality)
return (dir_HQ, dir_LQ) |
class GradedModularFormElement(ModuleElement):
def __init__(self, parent, forms_datum):
forms_dictionary = {}
if isinstance(forms_datum, dict):
for (k, f) in forms_datum.items():
if isinstance(k, (int, Integer)):
k = ZZ(k)
if (k == 0):
forms_dictionary[k] = parent.base_ring().coerce(f)
elif is_ModularFormElement(f):
if (f.weight() == k):
if (parent.group().is_subgroup(f.group()) and parent.base_ring().has_coerce_map_from(f.base_ring())):
M = parent.modular_forms_of_weight(f.weight()).change_ring(parent.base_ring())
forms_dictionary[k] = M(f)
else:
raise ValueError(('the group and/or the base ring of at least one modular form (%s) is not consistant with the base space' % f))
else:
raise ValueError(('at least one key (%s) of the defining dictionary does not correspond to the weight of its value (%s). Real weight: %s' % (k, f, f.weight())))
else:
raise ValueError(('at least one value (%s) of the defining dictionary is not a `ModularFormElement`' % f))
else:
raise ValueError(('at least one key (%s) of the defining dictionary is not an integer' % k))
elif isinstance(forms_datum, list):
for f in forms_datum:
if is_ModularFormElement(f):
chi = f.character(compute=False)
if ((chi is not None) and (not chi.is_trivial())):
raise NotImplementedError('graded modular forms for non-trivial characters is not yet implemented')
if (parent.group().is_subgroup(f.group()) and parent.base_ring().has_coerce_map_from(f.base_ring())):
M = parent.modular_forms_of_weight(f.weight()).change_ring(parent.base_ring())
forms_dictionary[f.weight()] = M((forms_dictionary.get(f.weight(), 0) + f))
else:
raise ValueError(('the group and/or the base ring of at least one modular form (%s) is not consistant with the base space' % f))
else:
forms_dictionary[ZZ(0)] = parent.base_ring().coerce(f)
else:
raise TypeError('the defining data structure should be a list or a dictionary')
self._forms_dictionary = {k: f for (k, f) in forms_dictionary.items() if (not f.is_zero())}
Element.__init__(self, parent)
def __bool__(self):
return bool(self._forms_dictionary)
def is_zero(self):
return (not self)
def is_one(self):
return ((len(self._forms_dictionary) == 1) and self[0].is_one())
def group(self):
return self.parent().group()
def q_expansion(self, prec=None):
Pow = PowerSeriesRing(self.base_ring(), name=defaults.DEFAULT_VARIABLE)
return (Pow(self._forms_dictionary.get(0, Pow.zero())) + sum((f.q_expansion(prec) for (k, f) in self._forms_dictionary.items() if (k != 0))))
qexp = q_expansion
def _repr_(self):
return str(self.q_expansion())
def _latex_(self):
return self.q_expansion()._latex_()
def __getitem__(self, weight):
if (not isinstance(weight, (int, Integer))):
raise KeyError('the weight must be an integer')
if (weight < 0):
raise ValueError('the weight must be non-negative')
return self._forms_dictionary.get(weight, self.parent().zero())
homogeneous_component = __getitem__
def __call__(self, x, prec=None):
return self.q_expansion(prec)(x)
def _add_(self, other):
GM = self.__class__
f_self = self._forms_dictionary
f_other = other._forms_dictionary
f_sum = {k: (f_self.get(k, 0) + f_other.get(k, 0)) for k in (set(f_self) | set(f_other))}
return GM(self.parent(), f_sum)
def __neg__(self):
GM = self.__class__
f_self = self._forms_dictionary
minus_self = {k: (- f) for (k, f) in f_self.items()}
return GM(self.parent(), minus_self)
def _mul_(self, other):
from collections import defaultdict
GM = self.__class__
f_self = self._forms_dictionary
f_other = other._forms_dictionary
f_mul = defaultdict(int)
for k_self in f_self.keys():
for k_other in f_other.keys():
f_mul[(k_self + k_other)] += (f_self[k_self] * f_other[k_other])
return GM(self.parent(), f_mul)
def _lmul_(self, c):
GM = self.__class__
f_self = self._forms_dictionary
f_mul = {k: (c * f) for (k, f) in f_self.items()}
return GM(self.parent(), f_mul)
def _richcmp_(self, other, op):
if ((op != op_EQ) and (op != op_NE)):
raise TypeError('invalid comparison between modular forms ring elements')
return richcmp(self._forms_dictionary, other._forms_dictionary, op)
def weight(self):
if self.is_homogeneous():
if self.is_zero():
return ZZ(0)
return next(iter(self._forms_dictionary))
else:
raise ValueError('the given graded form is not homogeneous (not a modular form)')
def weights_list(self):
if self.is_zero():
return [ZZ(0)]
return sorted(self._forms_dictionary)
def is_homogeneous(self):
return (len(self._forms_dictionary) <= 1)
is_modular_form = is_homogeneous
def _homogeneous_to_polynomial(self, names, gens):
if (not (self.base_ring() == QQ)):
raise NotImplementedError('conversion to polynomial are not implemented if the base ring is not Q')
M = self.parent()
k = self.weight()
poly_parent = M.polynomial_ring(names, gens)
if (k == 0):
return poly_parent(self[k])
weights_of_generators = [gens[i].weight() for i in range(0, len(gens))]
W = WeightedIntegerVectors(k, weights_of_generators).list()
sturm_bound = self.group().sturm_bound(k)
matrix_datum = []
list_of_monomials = []
for exponents in W:
monomial_form = M.one()
monomial_poly = poly_parent.one()
iter = 0
for (e, g) in zip(exponents, gens):
monomial_form *= (M(g) ** e)
monomial_poly *= (poly_parent.gen(iter) ** e)
iter += 1
matrix_datum.append(monomial_form[k].coefficients(range(0, (sturm_bound + 1))))
list_of_monomials.append(monomial_poly)
mat = Matrix(matrix_datum).transpose()
coef_self = vector(self[k].coefficients(range(0, (sturm_bound + 1)))).column()
soln = mat.solve_right(coef_self)
poly = poly_parent.zero()
for (iter, p) in enumerate(list_of_monomials):
poly += (soln[(iter, 0)] * p)
return poly
def to_polynomial(self, names='x', gens=None):
M = self.parent()
if (gens is None):
gens = M.gen_forms()
return sum((M(self[k])._homogeneous_to_polynomial(names, gens) for k in self.weights_list()))
def serre_derivative(self):
M = self.parent()
return M(sum((M(f.serre_derivative()) for (k, f) in self._forms_dictionary.items() if (k != 0))))
def derivative(self, name='E2'):
from sage.modular.quasimodform.ring import QuasiModularForms
F = QuasiModularForms(self.group(), self.base_ring(), name)(self)
return F.derivative() |
class eSEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc(x)
x = self.hsigmoid(x)
return (input * x) |
def load_model(model_path=''):
model = get_concat_2levelmel_model()
if torch.cuda.is_available():
model.cuda()
return model |
def register_Ns3GlobalRouteManager_methods(root_module, cls):
cls.add_method('AllocateRouterId', 'uint32_t', [], is_static=True)
cls.add_method('DeleteGlobalRoutes', 'void', [], is_static=True)
cls.add_method('BuildGlobalRoutingDatabase', 'void', [], is_static=True)
cls.add_method('InitializeRoutes', 'void', [], is_static=True)
return |
.parametrize('fname, ctx, func_name', list_ctx_and_func_name2([('reset_nan', 'ResetNaN'), ('reset_inf', 'ResetInf')]))
.parametrize('val', [0, (- 1)])
.parametrize('seed', [313])
def test_reset_nan_reset_inf_forward_backward(seed, val, fname, ctx, func_name):
from nbla_test_utils import function_tester
np_fun = getattr(np, fname.replace('reset_', 'is'))
nn_fun = getattr(F, fname)
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32)]
inputs[0][(rng.rand(*inputs[0].shape) > 0.5)] = getattr(np, fname.replace('reset_', ''))
def ref_forward(x, val):
y = x.copy()
y[np_fun(x)] = val
return y
def ref_backward(x, dy, val, **kw):
dx = dy.copy()
dx[np_fun(x)] = 0
return dx.flatten()
function_tester(rng, nn_fun, ref_forward, inputs, func_args=[val], ref_grad=ref_backward, ctx=ctx, func_name=func_name) |
def evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix='') -> Dict:
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if (args.local_rank in [(- 1), 0]):
os.makedirs(eval_output_dir, exist_ok=True)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
def collate(examples: List[torch.Tensor]):
if (tokenizer._pad_token is None):
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc='Evaluating'):
(inputs, labels) = (mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch))
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = (model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels))
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = (eval_loss / nb_eval_steps)
perplexity = torch.exp(torch.tensor(eval_loss))
result = {'perplexity': perplexity}
output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
return result |
class Null(nn.Module):
def __init__(self):
super(Null, self).__init__()
def forward(self, x):
return x |
class RandomLightPendulum(ModifiablePendulumEnv):
def __init__(self):
super(RandomLightPendulum, self).__init__()
self.mass = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_MASS, self.EXTREME_UPPER_MASS, self.RANDOM_LOWER_MASS, self.RANDOM_UPPER_MASS)
def reset(self, new=True):
if new:
self.mass = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_MASS, self.EXTREME_UPPER_MASS, self.RANDOM_LOWER_MASS, self.RANDOM_UPPER_MASS)
return super(RandomLightPendulum, self).reset(new)
def parameters(self):
parameters = super(RandomLightPendulum, self).parameters
parameters.update({'mass': self.mass})
return parameters |
def center_plus_four_crops(img: Tensor, size: List[int], margin_h: int, margin_w: int) -> Tuple[(Tensor, Tensor, Tensor, Tensor, Tensor)]:
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif (isinstance(size, (tuple, list)) and (len(size) == 1)):
size = (size[0], size[0])
if (len(size) != 2):
raise ValueError('Please provide only two dimensions (h, w) for size.')
(image_width, image_height) = _get_image_size(img)
(crop_height, crop_width) = size
if ((crop_width > image_width) or (crop_height > image_height)):
msg = 'Requested crop size {} is bigger than input size {}'
raise ValueError(msg.format(size, (image_height, image_width)))
if ((crop_width + margin_w) > image_width):
msg = 'Requested margin size {} + input {} is bigger than input size {}'
raise ValueError(msg.format((margin_h, margin_w), size, (image_height, image_width)))
x11 = (((image_width - crop_width) - (2 * margin_w)) // 2)
x12 = (x11 + margin_w)
x21 = (x12 + crop_width)
x22 = (x21 + margin_w)
y11 = (((image_height - crop_height) - (2 * margin_h)) // 2)
y12 = (y11 + margin_h)
y21 = (y12 + crop_height)
y22 = (y21 + margin_h)
tl = crop(img, y11, x11, margin_h, (margin_w + crop_width))
tr = crop(img, y11, x21, (margin_h + crop_height), margin_w)
bl = crop(img, y12, x11, (margin_h + crop_height), margin_w)
br = crop(img, y21, x12, margin_h, (margin_w + crop_width))
center = center_crop(img, [crop_height, crop_width])
return (tl, tr, bl, br, center) |
def test_base_encoder():
encoder = BaseEncoder()
encoder.init_weights()
encoder.train()
feat = torch.randn(1, 256, 4, 40)
out_enc = encoder(feat)
assert (out_enc.shape == torch.Size([1, 256, 4, 40])) |
def setup_logger(name, save_dir, prefix='', timestamp=True):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
timestamp = (time.strftime('.%m_%d_%H_%M_%S') if timestamp else '')
prefix = (('.' + prefix) if prefix else '')
log_file = os.path.join(save_dir, 'log{}.txt'.format((prefix + timestamp)))
fh = logging.FileHandler(log_file)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.propagate = False
return logger |
def buildCorpus(body_file, summ_file, w_exp=False):
logger.debug(('building corpus [body file]: %s' % body_file))
logger.debug(('building corpus [summ file]: %s' % summ_file))
corpus = []
body_corpus = loadFile(body_file)
summ_corpus = loadFile(summ_file)
for curr_filename in body_corpus:
if (curr_filename not in summ_corpus):
logger.error(('[no summary sentences]: %s' % curr_filename))
continue
(body_nodes, body_root_nodes, body_edges, body_exp_edges) = body_corpus[curr_filename]
(summ_nodes, _, summ_edges, _) = summ_corpus[curr_filename]
num_summ_nodes = len(summ_nodes)
num_summ_edges = len(summ_edges)
node_idx = 0
node_indices = {}
my_nodes = {}
s_nodes = set()
r_nodes = set()
for (anchor, node) in body_nodes.iteritems():
node_idx += 1
my_nodes[(node_idx,)] = node
node_indices[anchor[0]] = node_idx
if (anchor in summ_nodes):
s_nodes.add((node_idx,))
if (anchor in body_root_nodes):
r_nodes.add((node_idx,))
my_edges = {}
s_edges = set()
if w_exp:
for (anchor, edge) in body_exp_edges.iteritems():
idx1 = node_indices[anchor[0]]
idx2 = node_indices[anchor[1]]
my_edges[(idx1, idx2)] = edge
if (anchor in summ_edges):
s_edges.add((idx1, idx2))
else:
for (anchor, edge) in body_edges.iteritems():
idx1 = node_indices[anchor[0]]
idx2 = node_indices[anchor[1]]
my_edges[(idx1, idx2)] = edge
if (anchor in summ_edges):
s_edges.add((idx1, idx2))
inst = Instance(curr_filename, (my_nodes, s_nodes, r_nodes), (my_edges, s_edges), (num_summ_nodes, num_summ_edges))
corpus.append(inst)
return corpus |
def GetKCoreNodes_PNEANet(Graph, CoreIdSzV):
return _snap.GetKCoreNodes_PNEANet(Graph, CoreIdSzV) |
class Data(QtCore.QObject):
data_updated = QtCore.pyqtSignal()
def __init__(self, data={}):
QtCore.QObject.__init__(self)
self.data = data
def notify_update(self):
self.data_updated.emit()
def __getitem__(self, key):
return self.data.__getitem__(key)
def __setitem__(self, key, value):
self.data.__setitem__(key, value)
def __len__(self):
return self.data.__len__()
def __delitem__(self, key):
return self.data.__delitem__(key)
def __contains__(self, key):
return self.data.__contains__(key)
def __iter__(self):
return self.data.__iter__()
def __next__(self):
return self.data.__next__()
def items(self):
return self.data.items()
def values(self):
return self.data.values()
def keys(self):
return self.data.keys()
def __repr__(self):
return self.data.__repr__() |
class CohomologyRAAG(CombinatorialFreeModule):
def __init__(self, R, A):
if (R not in Fields()):
raise NotImplementedError('only implemented with coefficients in a field')
self._group = A
names = tuple([('e' + name[1:]) for name in A.variable_names()])
from sage.graphs.independent_sets import IndependentSets
from sage.sets.finite_enumerated_set import FiniteEnumeratedSet
indices = [tuple(ind_set) for ind_set in IndependentSets(A._graph)]
indices = FiniteEnumeratedSet(indices)
cat = AlgebrasWithBasis(R.category()).Super().Graded().FiniteDimensional()
CombinatorialFreeModule.__init__(self, R, indices, category=cat, prefix='H')
self._assign_names(names)
def _repr_(self) -> str:
return 'Cohomology ring of {} with coefficients in {}'.format(self._group, self.base_ring())
def _repr_term(self, m) -> str:
if (not m):
return '1'
return '*'.join((('e' + str(i)) for i in m))
def _ascii_art_term(self, m):
if (not m):
return ascii_art('1')
wedge = '/\\'
return ascii_art(*[('e' + str(i)) for i in m], sep=wedge)
def _unicode_art_term(self, m):
if (not m):
return unicode_art('1')
import unicodedata
wedge = unicodedata.lookup('LOGICAL AND')
return unicode_art(*[('e' + str(i)) for i in m], sep=wedge)
def _latex_term(self, m):
if (not m):
return '1'
from sage.misc.latex import latex
return ' \\wedge '.join(('e_{{{}}}'.format(latex(i)) for i in m))
def gen(self, i):
return self._from_dict({(i,): self.base_ring().one()}, remove_zeros=False)
_method
def one_basis(self):
return ()
_method
def algebra_generators(self):
V = self._group._graph.vertices(True)
d = {x: self.gen(i) for (i, x) in enumerate(V)}
from sage.sets.family import Family
return Family(V, (lambda x: d[x]))
def gens(self):
return tuple(self.algebra_generators())
def ngens(self):
return self._group._graph.num_verts()
def degree_on_basis(self, I):
return len(I)
class Element(CohomologyRAAGElement): |
class MLP(nn.Module):
def __init__(self, n_input, n_output, hidden_neurons=(512,), dropout_rate=0.1):
super(MLP, self).__init__()
n_neurons = (((n_input,) + hidden_neurons) + (n_output,))
self.layers = nn.ModuleList()
for i in range((len(n_neurons) - 1)):
self.layers.append(nn.Linear(n_neurons[i], n_neurons[(i + 1)]))
self.act = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x):
h = x
for i in range((len(self.layers) - 1)):
h = self.dropout(self.act(self.layers[i](h)))
h = self.layers[(- 1)](h)
return h |
def prefetch_test(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
Dataset = dataset_factory[opt.dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Logger(opt)
Detector = detector_factory[opt.task]
split = ('val' if (not opt.trainval) else 'test')
dataset = Dataset(opt, split)
detector = Detector(opt)
data_loader = torch.utils.data.DataLoader(PrefetchDataset(opt, dataset, detector.pre_process), batch_size=1, shuffle=False, num_workers=1, pin_memory=True)
results = {}
num_iters = len(dataset)
bar = Bar('{}'.format(opt.exp_id), max=num_iters)
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
avg_time_stats = {t: AverageMeter() for t in time_stats}
for (ind, (img_id, pre_processed_images)) in enumerate(data_loader):
ret = detector.run(pre_processed_images)
results[img_id.numpy().astype(np.int32)[0]] = ret['results']
Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
for t in avg_time_stats:
avg_time_stats[t].update(ret[t])
Bar.suffix = (Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(t, tm=avg_time_stats[t]))
bar.next()
bar.finish()
dataset.run_eval(results, opt.save_dir) |
.parametrize('observation_shape', [(100,)])
.parametrize('hidden_units', [[256, 256]])
.parametrize('batch_size', [32])
.parametrize('use_batch_norm', [False, True])
.parametrize('dropout_rate', [None, 0.2])
.parametrize('activation', [torch.nn.ReLU()])
def test_vector_encoder(observation_shape: Sequence[int], hidden_units: Sequence[int], batch_size: int, use_batch_norm: bool, dropout_rate: Optional[float], activation: torch.nn.Module) -> None:
encoder = VectorEncoder(observation_shape=observation_shape, hidden_units=hidden_units, use_batch_norm=use_batch_norm, dropout_rate=dropout_rate, activation=activation)
x = torch.rand((batch_size, *observation_shape))
y = encoder(x)
assert (y.shape == (batch_size, hidden_units[(- 1)]))
encoder.eval()
eval_y = encoder(x)
if (use_batch_norm or dropout_rate):
assert (not torch.allclose(y, eval_y))
else:
assert torch.allclose(y, eval_y)
check_parameter_updates(encoder, (x,)) |
class TransformerEncoder(Module):
__constants__ = ['norm']
def __init__(self, encoder_layer, num_layers, norm=None):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src: Tensor, mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None) -> Tensor:
output = src
for mod in self.layers:
output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask)
if (self.norm is not None):
output = self.norm(output)
return output |
def generate_xdmf(h5filename, periodic=True, order='visit'):
import h5py
f = h5py.File(h5filename, 'a')
keys = []
f.visit(keys.append)
assert (order.lower() in ('paraview', 'visit'))
datasets = {2: {}, 3: {}}
for key in keys:
if (f[key.split('/')[0]].attrs['rank'] > 0):
continue
if isinstance(f[key], h5py.Dataset):
if (not (('mesh' in key) or ('domain' in key) or ('Vector' in key))):
tstep = int(key.split('/')[(- 1)])
ndim = int(key.split('/')[1][0])
if (ndim in (2, 3)):
ds = datasets[ndim]
if (tstep in ds):
ds[tstep] += [key]
else:
ds[tstep] = [key]
if (periodic is True):
periodic = ([0] * 5)
elif (periodic is False):
periodic = ([1] * 5)
else:
assert isinstance(periodic, (tuple, list))
periodic = list(array(invert(periodic), int))
coor = ['x0', 'x1', 'x2', 'x3', 'x4']
for (ndim, dsets) in datasets.items():
timesteps = list(dsets.keys())
per = copy.copy(periodic)
if (not timesteps):
continue
timesteps.sort(key=int)
tt = ''
for i in timesteps:
tt += ('%s ' % i)
datatype = f[dsets[timesteps[0]][0]].dtype
assert (datatype.char not in 'FDG'), 'Cannot use generate_xdmf to visualize complex data.'
prec = (4 if (datatype is dtype('float32')) else 8)
xff = {}
geometry = {}
topology = {}
attrs = {}
grid = {}
NN = {}
for name in dsets[timesteps[0]]:
group = name.split('/')[0]
if ('slice' in name):
slices = name.split('/')[2]
else:
slices = 'whole'
cc = copy.copy(coor)
if (slices not in xff):
xff[slices] = copy.copy(xdmffile)
N = list(f[name].shape)
kk = 0
sl = 0
if ('slice' in slices):
ss = slices.split('_')
ii = []
for (i, sx) in enumerate(ss):
if ('slice' in sx):
ii.append(i)
elif (len(f[group].attrs.get('shape')) == 3):
kk = i
sl = int(sx)
N.insert(i, 1)
cc = take(coor, ii)
else:
ii = list(range(ndim))
NN[slices] = N
if ('domain' in f[group].keys()):
if ((ndim == 2) and (('slice' not in slices) or (len(f[group].attrs.get('shape')) > 3))):
geo = get_geometry(kind=0, dim=2)
assert (len(ii) == 2)
(i, j) = ii
if (order.lower() == 'paraview'):
data = [f[(group + '/domain/{}'.format(coor[i]))][0], f[(group + '/domain/{}'.format(coor[j]))][0], (f[(group + '/domain/{}'.format(coor[i]))][1] / (N[0] - per[i])), (f[(group + '/domain/{}'.format(coor[j]))][1] / (N[1] - per[j]))]
geometry[slices] = geo.format(*data)
else:
data = [f[(group + '/domain/{}'.format(coor[j]))][0], f[(group + '/domain/{}'.format(coor[i]))][0], (f[(group + '/domain/{}'.format(coor[j]))][1] / (N[0] - per[j])), (f[(group + '/domain/{}'.format(coor[i]))][1] / (N[1] - per[i]))]
geometry[slices] = geo.format(*data)
else:
if (ndim == 2):
ii.insert(kk, kk)
per[kk] = 0
(i, j, k) = ii
geo = get_geometry(kind=0, dim=3)
data = [f[(group + '/domain/{}'.format(coor[i]))][0], f[(group + '/domain/{}'.format(coor[j]))][0], f[(group + '/domain/{}'.format(coor[k]))][0], (f[(group + '/domain/{}'.format(coor[i]))][1] / (N[0] - per[i])), (f[(group + '/domain/{}'.format(coor[j]))][1] / (N[1] - per[j])), (f[(group + '/domain/{}'.format(coor[k]))][1] / (N[2] - per[k]))]
if (ndim == 2):
(origin, dx) = f[(group + '/domain/x{}'.format(kk))]
M = f[group].attrs.get('shape')
pos = (origin + ((dx / (M[kk] - per[kk])) * sl))
data[kk] = pos
data[(kk + 3)] = pos
geometry[slices] = geo.format(*data)
topology[slices] = get_topology(N, kind=0)
elif ('mesh' in f[group].keys()):
if ((ndim == 2) and (('slice' not in slices) or (len(f[group].attrs.get('shape')) > 3))):
geo = get_geometry(kind=1, dim=2)
else:
geo = get_geometry(kind=1, dim=3)
if ((ndim == 2) and (('slice' not in slices) or (len(f[group].attrs.get('shape')) > 3))):
if (order.lower() == 'paraview'):
sig = (prec, N[0], N[1], h5filename, cc[0], cc[1], group)
else:
sig = (prec, N[1], N[0], h5filename, cc[1], cc[0], group)
else:
if (ndim == 2):
pos = f[(group + '/mesh/x{}'.format(kk))][sl]
z = re.findall('<DataItem(.*?)</DataItem>', geo, re.DOTALL)
geo = geo.replace(z[(2 - kk)], (' Format="XML" NumberType="Float" Precision="{0}" Dimensions="{%d}">\n {%d}\n ' % ((1 + kk), (7 - kk))))
cc = list(cc)
cc.insert(kk, pos)
sig = (prec, N[0], N[1], N[2], h5filename, cc[2], cc[1], cc[0], group)
geometry[slices] = geo.format(*sig)
topology[slices] = get_topology(N, kind=1)
grid[slices] = ''
attrs = {}
for tstep in timesteps:
d = dsets[tstep]
slx = set()
for (i, x) in enumerate(d):
slices = x.split('/')[2]
if (not ('slice' in slices)):
slices = 'whole'
N = NN[slices]
if (slices not in attrs):
attrs[slices] = ''
attrs[slices] += get_attribute(x, h5filename, N, prec)
slx.add(slices)
for slices in slx:
grid[slices] += get_grid(geometry[slices], topology[slices], attrs[slices].rstrip())
attrs[slices] = ''
for (slices, ff) in xff.items():
if ('slice' in slices):
fname = (((h5filename[:(- 3)] + '_') + slices) + '.xdmf')
else:
fname = (h5filename[:(- 3)] + '.xdmf')
xfl = open(fname, 'w')
h = ff.format(tt, len(timesteps), grid[slices].rstrip())
xfl.write(h)
xfl.close()
f.close() |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--predictions', required=True, help='Path to predictions file.')
parser.add_argument('--track', choices=['default', 'xlingual'], default='default', help='default track or xlingual track. For xlingual, we need to use a different tokenizer.')
parser.add_argument('--compute_per_category_metrics', action='store_true', help='Compute metrics on every evaluation category.')
parser.add_argument('--compute_per_task_metrics', action='store_true', help='Compute metrics on every evaluation task.')
return parser.parse_args() |
class IdentificationClassificationFeatures():
def __init__(self, input_ids, attention_mask, token_type_ids, cls_index, p_mask, example_index, unique_id, paragraph_len, token_is_max_context, tokens, token_to_orig_map, span_to_orig_map, class_label, span_labels, valid_span_missing_in_context, data_id: str=None, encoding: BatchEncoding=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.span_to_orig_map: Dict[(int, List[int])] = span_to_orig_map
self.class_label = class_label
self.span_labels = span_labels
if valid_span_missing_in_context:
assert (class_label in [NLILabel.ENTAILMENT.value, NLILabel.CONTRADICTION.value])
self.valid_span_missing_in_context = valid_span_missing_in_context
self.data_id = data_id
self.encoding = encoding |
def print_all_problematic_outputs_between_partitions(graph: Graph, edge_weight_function):
problems = []
valid_state = True
for n in graph.nodes:
if (n.value_type in {type(None), list, tuple, dict, set, int, bool, float, str, slice, torch.Size, torch.dtype}):
for o in n.out_edges:
if (n.stage_id != o.stage_id):
msg = f'invalid output type at partition boundary {n.stage_id}=>{o.stage_id}'
msg += f'''
output is {n.scope} of type {n.value_type}, weight {edge_weight_function(n, o)}'''
valid_state = False
problems.append(msg)
s = ((f'''Valid outputs states = {valid_state}
''' + 'problems:\n') + '\n'.join(problems))
print(s) |
def process_spec_file(spec_name, num_bins: int, upper_limit: int, spec_dir: Path):
spec_file = (spec_dir / f'{spec_name}.json')
loaded_json = json.load(open(spec_file, 'r'))
if (loaded_json.get('output_tbl') is None):
return None
mz = loaded_json['output_tbl']['formula_mass_no_adduct']
inten = loaded_json['output_tbl']['ms2_inten']
spec_ar = np.vstack([mz, inten]).transpose(1, 0)
binned = common.bin_spectra([spec_ar], num_bins, upper_limit)
avged = binned[0]
return avged |
def convert_shuffle(base_input_path, base_output_path, short_name):
if (not zipfile.is_zipfile(base_input_path)):
raise FileNotFoundError(('Expected %s to be the zipfile with AQMAR in it' % base_input_path))
with zipfile.ZipFile(base_input_path) as zin:
namelist = zin.namelist()
annotation_files = [x for x in namelist if (x.endswith('.txt') and (not ('/' in x)))]
annotation_files = sorted(annotation_files)
assert (annotation_files[2] == 'Computer.txt')
assert (annotation_files[3] == 'Computer_Software.txt')
(annotation_files[2], annotation_files[3]) = (annotation_files[3], annotation_files[2])
if (len(annotation_files) != 28):
raise RuntimeError(('Expected exactly 28 labeled .txt files in %s but got %d' % (base_input_path, len(annotation_files))))
sentences = []
for in_filename in annotation_files:
with zin.open(in_filename) as infile:
new_sentences = read_sentences(infile)
print(f'{len(new_sentences)} sentences read from {in_filename}')
new_sentences = normalize_tags(new_sentences)
sentences.extend(new_sentences)
all_tags = Counter([p[1] for sent in sentences for p in sent])
print('All tags after normalization:')
print(list(all_tags.keys()))
num = len(sentences)
train_num = int((num * 0.7))
dev_num = int((num * 0.15))
random.seed(1234)
random.shuffle(sentences)
train_sents = sentences[:train_num]
dev_sents = sentences[train_num:(train_num + dev_num)]
test_sents = sentences[(train_num + dev_num):]
shuffled_dataset = [train_sents, dev_sents, test_sents]
write_dataset(shuffled_dataset, base_output_path, short_name) |
class TestCounterOps(TestCase):
def test_stats_ops(self):
workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', [], ['prev_k', 'prev_v', 'prev_ts']))
previous_keys = workspace.FetchBlob('prev_k')
existing = len(previous_keys)
prefix = '/'.join([__name__, 'TestCounterOps', 'test_stats_ops'])
keys = [(prefix + '/key1').encode('ascii'), (prefix + '/key2').encode('ascii')]
values = [34, 45]
workspace.FeedBlob('k', np.array(keys, dtype=str))
workspace.FeedBlob('v', np.array(values, dtype=np.int64))
for _ in range(2):
workspace.RunOperatorOnce(core.CreateOperator('StatRegistryUpdate', ['k', 'v'], []))
workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', [], ['k2', 'v2', 't2']))
workspace.RunOperatorOnce(core.CreateOperator('StatRegistryCreate', [], ['reg']))
workspace.RunOperatorOnce(core.CreateOperator('StatRegistryUpdate', ['k2', 'v2', 'reg'], []))
workspace.RunOperatorOnce(core.CreateOperator('StatRegistryExport', ['reg'], ['k3', 'v3', 't3']))
k3 = workspace.FetchBlob('k3')
v3 = workspace.FetchBlob('v3')
t3 = workspace.FetchBlob('t3')
self.assertEqual((len(k3) - existing), 2)
self.assertEqual(len(v3), len(k3))
self.assertEqual(len(t3), len(k3))
for key in keys:
self.assertIn(key, k3) |
def _get_build_directory(name, verbose):
root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR')
if (root_extensions_directory is None):
root_extensions_directory = os.path.join(tempfile.gettempdir(), 'torch_extensions')
if verbose:
print('Using {} as PyTorch extensions root...'.format(root_extensions_directory))
build_directory = os.path.join(root_extensions_directory, name)
if (not os.path.exists(build_directory)):
if verbose:
print('Creating extension directory {}...'.format(build_directory))
os.makedirs(build_directory)
return build_directory |
def _pickle_RecognizableSeriesSpace(coefficients, indices, category):
return RecognizableSeriesSpace(coefficients, indices=indices, category=category) |
def load_from_wv_format(filename):
with open(filename) as f:
l = f.readline().split()
(total_num, embedding_size) = (int(l[0]), int(l[1]))
res = np.zeros((total_num, embedding_size), dtype=float)
ls = map((lambda x: x.strip().split()), f.readlines())
for line in ls:
res[int(line[0])] = list(map(float, line[1:]))
return res |
class EarlyStopping():
def __init__(self, min_max='min', tolerance=20, min_delta=1e-09):
self.tolerance = tolerance
self.min_delta = min_delta
self.min_max = min_max
self.counter = 0
self.early_stop = False
def min_stopping(self, valid_loss, best_valid_loss):
if ((valid_loss - best_valid_loss) > self.min_delta):
self.counter += 1
if (self.counter >= self.tolerance):
self.early_stop = True
else:
self.counter = 0
def max_stopping(self, valid_acc, best_valid_acc):
if ((best_valid_acc - valid_acc) > self.min_delta):
self.counter += 1
if (self.counter >= self.tolerance):
self.early_stop = True
else:
self.counter = 0
def __call__(self, valid_metric, best_metic):
if (self.min_max == 'min'):
self.min_stopping(valid_metric, best_metic)
elif (self.min_max == 'max'):
self.max_stopping(valid_metric, best_metic)
else:
raise ValueError(f'Unexpected split name: {self.min_max}') |
def z_score(x, axis=0):
x = np.array(x).astype(float)
xr = np.rollaxis(x, axis=axis)
xr -= np.mean(x, axis=axis)
xr /= np.std(x, axis=axis)
return x |
class EndStateElimination(transformation.MultiStateTransformation):
end_state = transformation.PatternNode(SDFGState)
def expressions(cls):
return [sdutil.node_path_graph(cls.end_state)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
state = self.end_state
out_edges = graph.out_edges(state)
in_edges = graph.in_edges(state)
if (len(out_edges) != 0):
return False
if (len(in_edges) != 1):
return False
edge = in_edges[0]
if (not edge.data.is_unconditional()):
return False
if (state.number_of_nodes() > 0):
return False
return True
def apply(self, _, sdfg):
state = self.end_state
edge = sdfg.in_edges(state)[0]
sym_assign = edge.data.assignments.keys()
sdfg.remove_node(state)
for sym in sym_assign:
if (sym in sdfg.free_symbols):
sdfg.remove_symbol(sym) |
def main():
args = parse_args()
benchmarks = [ALL_BENCHMARKS[name]() for name in args.benchmarks]
for bench in benchmarks:
bench.run()
for bench in benchmarks:
bench.display() |
def test_join_items_left_outer_deep(join_items):
(left_items, right_items) = join_items
joined = pyhf.workspace._join_items('left outer', left_items, right_items, key='name', deep_merge_key='deep')
assert (next((k['deep'] for k in joined if (k['name'] == 'common'))) == [{'name': 1}, {'name': 2}]) |
def _sample_line(real, fake):
shape = ([real.size(0)] + ([1] * (real.dim() - 1)))
alpha = torch.rand(shape, device=real.device)
sample = (real + (alpha * (fake - real)))
return sample |
class T5EncoderModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
.parametrize('temperature', [0.0, 0.5, 1.0, 2.0])
def test_self_adversarial_negative_sampling(temperature):
labels = np.array([1, 0, (- 2), 0, 1], dtype=np.int32)
logit_scores = np.array([1.2, (- 2.3), 0.0, 4.5, (- 0.67)], dtype=np.float32)
scores = expit(logit_scores)
loss_func = SelfAdversarialNegativeSampling(temperature)
actual_loss = loss_func(tf.constant(labels), tf.constant(logit_scores))
def loss_part(score, label):
if (label == 1):
return (- np.log(score))
relevant = scores[np.where((labels == label))]
numer = np.exp((temperature * score))
denom = np.sum(np.exp((temperature * relevant)))
return (((- np.log((1 - score))) * numer) / denom)
expected_loss = np.mean([loss_part(score, label) for (score, label) in zip(scores, labels)])
assert (actual_loss.numpy() == pytest.approx(expected_loss, rel=1e-06)) |
def load_optim(model_optim_dict, load_dir):
checkpoint_dir = os.path.join('checkpoints/', load_dir)
print('LOAD_OPTIM: NOT YET LOADING ANY MOMENTUM PARAMS')
if (not os.path.exists(checkpoint_dir)):
print("...ain't no full checkpoint here!")
else:
ckpt_names = os.listdir(checkpoint_dir)
steps = [int(i.split('-')[1].split('.')[0]) for i in ckpt_names]
if (len(ckpt_names) > 0):
ind = np.argmax(steps)
model_name = ckpt_names[ind]
path = os.path.join(checkpoint_dir, model_name)
print(('loading from %s' % path))
load_dict = np.load(path, allow_pickle=True)['save_dict']
load_dict = load_dict.item()
for (k, v) in model_optim_dict.items():
print(('loading %s' % k))
v.data = torch.FloatTensor(load_dict[k]).to(torch.device('cuda'))
print('done loading')
else:
print("...ain't no full checkpoint here!") |
class PreprocessingConfig():
defaults: List[Any] = field(default_factory=(lambda : DEFAULTS))
hydra: Dict[(str, Any)] = field(default_factory=(lambda : {'run': {'dir': './runs/preprocessing/${now:%m-%d}/dataset-${dataset.name}'}}))
seed: int = 21
dry_run: bool = False
dataset: DatasetConfig = MISSING |
class Bottleneck(nn.Module):
def __init__(self, in_channels, out_channels, dropout_prob=0.0, downsample=False, asymmetric_ksize=None, dilation=1, use_prelu=True):
super().__init__()
bt_channels = (out_channels // 4)
self.downsample = downsample
self.channels_to_pad = (out_channels - in_channels)
input_stride = (2 if downsample else 1)
main_branch = [nn.Conv2d(in_channels, bt_channels, input_stride, input_stride, bias=False), nn.BatchNorm2d(bt_channels, 0.001), (nn.PReLU(bt_channels) if use_prelu else nn.ReLU(True))]
if (asymmetric_ksize is None):
main_branch += [nn.Conv2d(bt_channels, bt_channels, (3, 3), 1, dilation, dilation, bias=False)]
else:
assert (type(asymmetric_ksize) is int)
(ksize, padding) = (asymmetric_ksize, ((asymmetric_ksize - 1) // 2))
main_branch += [nn.Conv2d(bt_channels, bt_channels, (ksize, 1), 1, (padding, 0), bias=False), nn.Conv2d(bt_channels, bt_channels, (1, ksize), 1, (0, padding))]
main_branch += [nn.BatchNorm2d(bt_channels, 0.001), (nn.PReLU(bt_channels) if use_prelu else nn.ReLU(True)), nn.Conv2d(bt_channels, out_channels, (1, 1), bias=False), nn.BatchNorm2d(out_channels, 0.001), nn.Dropout2d(dropout_prob)]
self.main_branch = nn.Sequential(*main_branch)
self.output_activation = (nn.PReLU(out_channels) if use_prelu else nn.ReLU(True))
def forward(self, x):
if self.downsample:
(x_skip_connection, max_indices) = F.max_pool2d(x, (2, 2), return_indices=True)
else:
x_skip_connection = x
if (self.channels_to_pad > 0):
x_skip_connection = F.pad(x_skip_connection, (0, 0, 0, 0, 0, self.channels_to_pad))
x = self.output_activation((x_skip_connection + self.main_branch(x)))
if self.downsample:
return (x, max_indices)
else:
return x |
def init(args):
load_config_file_to_args(args)
check_and_update_generation_args(args)
if (not args.src_locale):
args.src_locale = args.eval_src_languages
if (not args.tgt_locale):
args.tgt_locale = args.eval_tgt_languages
set_seed(args)
devices = get_devices()
device = devices[0]
if (args.ned_retrieve_method == 'bootleg'):
ned_model = init_ned_model(args, 'bootleg-annotator')
else:
ned_model = init_ned_model(args)
logger.info(f'''Arguments:
{pformat(vars(args))}''')
logger.info(f'Loading from {args.best_checkpoint}')
Model = getattr(models, args.model)
(model, _) = Model.load(args.path, model_checkpoint_file=args.checkpoint_name, args=args, device=device, src_lang=args.src_locale, tgt_lang=args.tgt_locale)
model.to(device)
model.eval()
estimator_filenames = []
if (args.calibrator_paths is None):
for filename in os.listdir(args.path):
path = os.path.join(args.path, filename)
if (not ConfidenceEstimator.is_estimator(path)):
continue
if (args.calibrator_paths is None):
args.calibrator_paths = []
args.calibrator_paths.append(path)
estimator_filenames.append(os.path.splitext(filename)[0])
confidence_estimators = None
if (args.calibrator_paths is not None):
confidence_estimators = []
for path in args.calibrator_paths:
estimator = ConfidenceEstimator.load(path)
confidence_estimators.append(estimator)
logger.info('Loading confidence estimator "%s" from %s', estimator.name, path)
args.mc_dropout_num = confidence_estimators[0].mc_dropout_num
return (model, device, confidence_estimators, estimator_filenames, ned_model) |
class CenterCrop(DauphinTransform):
def __init__(self, size, name=None, prob=1.0, level=0):
self.size = size
self.transform_func = transforms.CenterCrop(self.size)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return (self.transform_func(pil_img), label)
def __repr__(self):
return f'<Transform ({self.name}), prob={self.prob}, level={self.level}, size={self.size}>' |
def storage_from_cache(cls, key):
storage_ref = shared_cache.get(key)
if (storage_ref is None):
return None
return cls._new_with_weak_ptr(storage_ref.cdata) |
def normalization(x):
return [((float(i) - min(x)) / float(((max(x) - min(x)) + zero_bit))) for i in x] |
def try_ann_to_type(ann, loc):
if (ann is None):
return TensorType.get()
if (inspect.isclass(ann) and issubclass(ann, torch.Tensor)):
return TensorType.get()
if is_tuple(ann):
return TupleType([try_ann_to_type(a, loc) for a in ann.__args__])
if is_list(ann):
elem_type = try_ann_to_type(ann.__args__[0], loc)
if elem_type:
return ListType(elem_type)
if is_dict(ann):
key = try_ann_to_type(ann.__args__[0], loc)
value = try_ann_to_type(ann.__args__[1], loc)
return DictType(key, value)
if is_optional(ann):
if issubclass(ann.__args__[1], type(None)):
contained = ann.__args__[0]
else:
contained = ann.__args__[1]
valid_type = try_ann_to_type(contained, loc)
msg = 'Unsupported annotation {} could not be resolved because {} could not be resolved.'
assert valid_type, msg.format(repr(ann), repr(contained))
return OptionalType(valid_type)
if (torch.distributed.rpc.is_available() and is_rref(ann)):
return RRefType(try_ann_to_type(ann.__args__[0], loc))
if is_future(ann):
return FutureType(try_ann_to_type(ann.__args__[0], loc))
if (ann is float):
return FloatType.get()
if (ann is int):
return IntType.get()
if (ann is str):
return StringType.get()
if (ann is bool):
return BoolType.get()
if (ann is Any):
return AnyType.get()
if (ann is type(None)):
return NoneType.get()
if (inspect.isclass(ann) and hasattr(ann, '__torch_script_interface__')):
return InterfaceType(_qualified_name(ann))
if (ann is torch.device):
return DeviceObjType.get()
if (ann is torch.dtype):
return IntType.get()
if (inspect.isclass(ann) and issubclass(ann, enum.Enum)):
qualified_name = _qualified_name(ann)
if (_get_script_class(qualified_name) is None):
torch.jit._script._recursive_compile_class(ann, loc)
return EnumType(_qualified_name(ann), get_enum_value_type(ann, loc), list(ann))
if inspect.isclass(ann):
qualified_name = _qualified_name(ann)
if (_get_script_class(qualified_name) is not None):
return ClassType(qualified_name)
ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception)
if (torch._jit_internal.can_compile_class(ann) and (not issubclass(ann, ignored_builtin_classes))):
torch.jit._script._recursive_compile_class(ann, loc)
return ClassType(qualified_name)
def fake_rcb(key):
return None
return torch._C._resolve_type_from_object(ann, loc, fake_rcb) |
def type_ref_to_reflection_dict(type_ref):
if type_ref.is_primitive_type():
return ('{ kind: "primitive", type: %s, typeStr: "%s" }' % (type_ref.reflection_constructor, type_ref.name))
else:
return ('{ kind: "struct", type: %s, typeStr: "%s" }' % (type_ref.reflection_constructor, type_ref.name)) |
def noisystudent_loader():
model = timm.create_model('tf_efficientnet_l2_ns', pretrained=False)
load_model_state_dict(model, 'efficientnet-l2-noisystudent')
return model |
def get_bag_of_keywords(cmd):
cmd = clean_anonymize_command(cmd)
tokens = cmd.strip().split()
tokens = [x for x in tokens if (VAR_STR not in x)]
return tokens |
def token_switching(encoding, prob):
for (i, token) in enumerate(encoding['input_ids'][0]):
if (token not in [0, 1, 2, 3, 4]):
if (np.random.uniform(0, 1) < prob):
encoding['input_ids'][0][i] = np.random.choice(np.arange(5, tokenizer.vocab_size), 1)[0]
return encoding |
def get_inv_cdf_fns(cdfs: DataFrame) -> Iterable[Callable[([Array], Array)]]:
def inv_cdf_factory(cdfs_df: DataFrame, key: str) -> Callable[([Array], Array)]:
series = pd.Series(cdfs_df[key].index.values, index=cdfs[key].values)
index = series.index
def repaid_probs_fn(query_probs: Array) -> Array:
if isinstance(query_probs, np.ndarray):
nearest_scores = _find_nearest_indices(index, query_probs)
return series[nearest_scores].values
query_prob = query_probs
nearest_prob = index[index.get_loc(query_prob, method='nearest')]
return series[nearest_prob]
return repaid_probs_fn
inv_cdfs = [inv_cdf_factory(cdfs, 'Black'), inv_cdf_factory(cdfs, 'White')]
return inv_cdfs |
def cross_entropy(input_, target):
input_ = input_.view(input_.size(0), (- 1))
loss = (- (target * torch.log(torch.clamp(input_, min=epsilon, max=1))).sum((- 1)))
return loss.mean() |
class GatHIVNet(HIVNet):
def __init__(self, hidden_dim, num_graph_layers, in_feat_drop, residual, readout='mean', activation=nn.ReLU, heads=8, gat_dropout=0.0, gat_version=1):
self.heads = heads
self.gat_dropout = gat_dropout
assert (gat_version in [1, 2])
self.gat_version = gat_version
super().__init__(hidden_dim=hidden_dim, num_graph_layers=num_graph_layers, in_feat_drop=in_feat_drop, residual=residual, readout=readout, activation=activation)
def make_graph_layer(self, hidden_dim, layer_idx):
if (layer_idx == (self.num_graph_layers - 1)):
heads = 1
else:
heads = self.heads
ctor = (GATConv if (self.gat_version == 1) else GATv2Conv)
return ctor(hidden_dim, (hidden_dim // heads), heads=heads, dropout=self.gat_dropout) |
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
print('Creating dataset')
datasets = [create_dataset('pretrain', config, min_scale=0.2)]
print(('number of training samples: %d' % len(datasets[0])))
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True], num_tasks, global_rank)
data_loader = create_loader(datasets, samplers, batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
print('Creating model')
model = blip_pretrain(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'], queue_size=config['queue_size'])
model = model.to(device)
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
start_epoch = 0
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = (checkpoint['epoch'] + 1)
print(('resume checkpoint from %s' % args.checkpoint))
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
print('Start training')
start_time = time.time()
for epoch in range(start_epoch, config['max_epoch']):
step_lr_schedule(optimizer, epoch, config['init_lr'], config['min_lr'], config['lr_decay_rate'])
train_stats = train(model, data_loader, optimizer, epoch, device, config)
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'epoch': epoch}
save_obj = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'config': config, 'epoch': epoch}
torch.save(save_obj, os.path.join(args.output_dir, ('checkpoint_%02d.pth' % epoch)))
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write((json.dumps(log_stats) + '\n'))
dist.barrier()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str)) |
def get_stupid_embedder(config):
cm = config.model
cmu = cm.utterance_embedder
glove_embeddings = GloveEmbeddings(cmu.vocab_size, cmu.glove_dim)
token_embedder = TokenEmbedder(glove_embeddings, trainable=cmu.trainable)
if (cmu.type == 'average'):
utterance_embedder = AverageUtteranceEmbedder(token_embedder, cmu.max_words)
elif (cmu.type == 'lstm'):
utterance_embedder = LSTMUtteranceEmbedder(token_embedder, cmu.lstm_dim, cmu.max_words)
else:
raise ValueError('Unknown UtteranceEmbedder type {}'.format(cmu.type))
embedder = StupidEmbedder(cm.dim, utterance_embedder, cm.dropout)
return embedder |
def is_blocked_key(key):
if (key in {'updated', ''}):
return True
if (('image' in key) or ('caption' in key)):
return True
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.