code stringlengths 101 5.91M |
|---|
def test_audio():
audio_path = os.path.join(os.path.dirname(__file__), 'jfk.flac')
audio = load_audio(audio_path)
assert (audio.ndim == 1)
assert ((SAMPLE_RATE * 10) < audio.shape[0] < (SAMPLE_RATE * 12))
assert (0 < audio.std() < 1)
mel_from_audio = log_mel_spectrogram(audio)
mel_from_file = log_mel_spectrogram(audio_path)
assert np.allclose(mel_from_audio, mel_from_file)
assert ((mel_from_audio.max() - mel_from_audio.min()) <= 2.0) |
def _graph_and_latents_collate_func(args):
(graphs, latent_space) = zip(*args)
all_graphs = graphs[0].concatenate(graphs)
latent_space = torch.from_numpy(np.stack(latent_space))
return (all_graphs, latent_space) |
_lr_scheduler('tri_stage')
class TriStageLRSchedule(FairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if (len(args.lr) > 1):
raise ValueError('Cannot use a fixed learning rate schedule with tri-stage lr. Consider --lr-scheduler=fixed instead.')
self.peak_lr = args.lr[0]
self.init_lr = (args.init_lr_scale * args.lr[0])
self.final_lr = (args.final_lr_scale * args.lr[0])
self.warmup_steps = args.warmup_steps
self.hold_steps = args.hold_steps
self.decay_steps = args.decay_steps
self.warmup_rate = ((self.peak_lr - self.init_lr) / self.warmup_steps)
self.decay_factor = ((- math.log(args.final_lr_scale)) / args.decay_steps)
self.lr = self.init_lr
self.optimizer.set_lr(self.lr)
def add_args(parser):
parser.add_argument('--warmup-steps', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--hold-steps', default=20000, type=int, metavar='N', help='steps in hold stage.')
parser.add_argument('--decay-steps', default=60000, type=int, metavar='N', help='steps in decay stages')
parser.add_argument('--init-lr-scale', default=0.01, type=float, help='\n initial learning rate scale during warmup phase; default is 0.01')
parser.add_argument('--final-lr-scale', default=0.01, type=float, help='final learning rate scale; default to 0.01')
def _decide_stage(self, update_step):
if (update_step < self.warmup_steps):
return (0, update_step)
offset = self.warmup_steps
if (update_step < (offset + self.hold_steps)):
return (1, (update_step - offset))
offset += self.hold_steps
if (update_step <= (offset + self.decay_steps)):
return (2, (update_step - offset))
offset += self.decay_steps
return (3, (update_step - offset))
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
(stage, steps_in_stage) = self._decide_stage(num_updates)
if (stage == 0):
self.lr = (self.init_lr + (self.warmup_rate * steps_in_stage))
elif (stage == 1):
self.lr = self.peak_lr
elif (stage == 2):
self.lr = (self.peak_lr * math.exp(((- self.decay_factor) * steps_in_stage)))
elif (stage == 3):
self.lr = self.final_lr
else:
raise ValueError('Undefined stage')
self.optimizer.set_lr(self.lr)
return self.lr |
.parametrize('archive_type', ['grid', 'cvt', 'sliding', 'cvt_3d'])
.parametrize('invalid_arg_cbar', ['None', 3.2, True, (3.2, None), [3.2, None]])
def test_heatmap_fails_on_invalid_cbar_option(archive_type, invalid_arg_cbar):
archive = {'grid': (lambda : GridArchive(solution_dim=2, dims=[20, 20, 20], ranges=([((- 1), 1)] * 3))), 'cvt': (lambda : CVTArchive(solution_dim=2, cells=100, ranges=([((- 1), 1)] * 3), samples=100)), 'sliding': (lambda : SlidingBoundariesArchive(solution_dim=2, dims=[20, 20, 20], ranges=([((- 1), 1)] * 3))), 'cvt_3d': (lambda : CVTArchive(solution_dim=2, cells=100, ranges=([((- 1), 1)] * 3), samples=100))}[archive_type]()
with pytest.raises(ValueError):
{'grid': grid_archive_heatmap, 'cvt': cvt_archive_heatmap, 'sliding': sliding_boundaries_archive_heatmap, 'cvt_3d': cvt_archive_3d_plot}[archive_type](archive=archive, cbar=invalid_arg_cbar) |
class RejectionLog():
def __init__(self, file):
self.file = file
self.initialized = False
def initialize_rejection_log(self, forest):
raise RejectionLogError("Function 'initialize_rejection_log' was not overloaded by child class")
def add_to_rejection_log(self, forest, rejection_status):
raise RejectionLogError("Function 'add_to_rejection_log' was not overloaded by child class")
def save_rejection_log(self):
raise RejectionLogError("Function 'save_rejection_log' was not overloaded by child class") |
def test_importing():
from pybind11_tests.modules import OD
from collections import OrderedDict
assert (OD is OrderedDict)
assert (str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])") |
class InvertedResidual(nn.Module):
def __init__(self, in_channels, out_channels, stride, expand_ratio, dilation=1, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2]), f'stride must in [1, 2]. But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = ((self.stride == 1) and (in_channels == out_channels))
hidden_dim = int(round((in_channels * expand_ratio)))
layers = []
if (expand_ratio != 1):
layers.append(ConvModule(in_channels=in_channels, out_channels=hidden_dim, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
layers.extend([ConvModule(in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=hidden_dim, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), ConvModule(in_channels=hidden_dim, out_channels=out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x)
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out |
def test_audio_dataset_init_val(fs, mocker):
dataset = audio_dataset(fs, mocker, split='val')
assert (len(dataset.file_list) == 10) |
def create_optimizer(config, logger, model_params, state_dict=None):
assert ('lr' in config.optim_params)
config.optim_params.lr = float(config.optim_params.lr)
if hasattr(torch.optim, config.optimizer):
optim = getattr(torch.optim, config.optimizer)
elif hasattr(torch_optimizer, config.optimizer):
optim = getattr(torch_optimizer, config.optimizer)
else:
raise NotImplementedError(config.optimizer)
print(config.optim_params)
optimizer = optim(model_params, **config.optim_params)
logger.settings('Optimizer {} created'.format(type(optimizer).__name__))
if ((state_dict is not None) and ('optimizer' in state_dict)):
optimizer.load_state_dict(state_dict['optimizer'])
logger.info('Optimizer state loaded')
else:
logger.info(optimizer)
return optimizer |
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='flickr', help='coco|flickr')
parser.add_argument('--input_json', type=str, default='data/flickrtalk.json', help='path to the json file containing additional info and vocab')
parser.add_argument('--input_fc_dir', type=str, default='data/flickrbu/flickrbu_fc', help='path to the directory containing the preprocessed fc feats')
parser.add_argument('--input_att_dir', type=str, default='data/flickrbu/flickrbu_att', help='path to the directory containing the preprocessed att feats')
parser.add_argument('--input_box_dir', type=str, default='data/flickrbu/flickrbu_box', help='path to the directory containing the boxes of att feats')
parser.add_argument('--input_label_h5', type=str, default='data/flickrtalk_label.h5', help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--cached_tokens', type=str, default='flickr-train-idxs', help='Cached token file for calculating cider score during self critical training.')
parser.add_argument('--start_from', type=str, default=None, help="continue training from saved model at this path. Path must contain files saved by previous training process: \n 'infos.pkl' : configuration;\n 'checkpoint' : paths to model file(s) (created by tf).\n Note: this file contains absolute paths, be careful when moving files around;\n 'model.ckpt-*' : file(s) with model definition (created by tf)\n ")
parser.add_argument('--caption_model', type=str, default='topdown', help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, att2all2, adaatt, adaattmo, topdown, stackatt, denseatt, transformer')
parser.add_argument('--rnn_size', type=int, default=512, help='size of the rnn in number of hidden nodes in each layer. 512 for flickr and 1024 for coco')
parser.add_argument('--num_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--rnn_type', type=str, default='lstm', help='rnn, gru, or lstm')
parser.add_argument('--input_encoding_size', type=int, default=512, help='the encoding size of each token in the vocabulary.')
parser.add_argument('--att_hid_size', type=int, default=512, help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer')
parser.add_argument('--fc_feat_size', type=int, default=2048, help='2048 for resnet, 4096 for vgg')
parser.add_argument('--att_feat_size', type=int, default=2048, help='2048 for resnet, 512 for vgg')
parser.add_argument('--logit_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--use_bn', type=int, default=0, help='If 1, then do batch_normalization first in att_embed, if 2 then do bn both in the beginning and the end of att_embed')
parser.add_argument('--norm_att_feat', type=int, default=0, help='If normalize attention features')
parser.add_argument('--use_box', type=int, default=0, help='If use box features')
parser.add_argument('--norm_box_feat', type=int, default=0, help='If use box, do we normalize box feature')
parser.add_argument('--max_epochs', type=int, default=110, help='number of epochs,110 for flickr, 60 for mscoco')
parser.add_argument('--batch_size', type=int, default=29, help='minibatch size')
parser.add_argument('--grad_clip', type=float, default=0.1, help='clip gradients at this value')
parser.add_argument('--drop_prob_lm', type=float, default=0.5, help='strength of dropout in the Language Model RNN')
parser.add_argument('--self_critical_after', type=int, default=(- 1), help='After what epoch do we start finetuning the CNN? (-1 = disable; never finetune, 0 = finetune from start)')
parser.add_argument('--seq_per_img', type=int, default=5, help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image')
parser.add_argument('--beam_size', type=int, default=1, help='used when sample_max = 1, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
parser.add_argument('--max_length', type=int, default=20, help='Maximum length during sampling')
parser.add_argument('--length_penalty', type=str, default='', help='wu_X or avg_X, X is the alpha')
parser.add_argument('--block_trigrams', type=int, default=0, help='block repeated trigram.')
parser.add_argument('--remove_bad_endings', type=int, default=0, help='Remove bad endings')
parser.add_argument('--optim', type=str, default='adam', help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=(- 1), help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
parser.add_argument('--learning_rate_decay_every', type=int, default=3, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--optim_alpha', type=float, default=0.9, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-08, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay')
parser.add_argument('--label_smoothing', type=float, default=0, help='')
parser.add_argument('--noamopt', action='store_true', help='')
parser.add_argument('--noamopt_warmup', type=int, default=2000, help='')
parser.add_argument('--noamopt_factor', type=float, default=1, help='')
parser.add_argument('--reduce_on_plateau', action='store_true', help='')
parser.add_argument('--scheduled_sampling_start', type=int, default=(- 1), help='at what iteration to start decay gt probability')
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5, help='every how many iterations thereafter to gt probability')
parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05, help='How much to update the prob')
parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25, help='Maximum scheduled sampling prob.')
parser.add_argument('--val_images_use', type=int, default=(- 1), help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
parser.add_argument('--save_checkpoint_every', type=int, default=1000, help='how often to save a model checkpoint (in iterations)?, 1000 for flickr; 2500 for mscoco')
parser.add_argument('--save_history_ckpt', type=int, default=0, help='If save checkpoints at every save point')
parser.add_argument('--checkpoint_path', type=str, default='log/sc-ground-CE-gt-sup-0.1-nll-pos-scan', help='directory to store checkpointed models')
parser.add_argument('--language_eval', type=int, default=1, help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--losses_log_every', type=int, default=25, help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
parser.add_argument('--load_best_score', type=int, default=1, help='Do we load previous best score when resuming training.')
parser.add_argument('--id', type=str, default='sc-ground-CE-gt-sup-0.1-nll-pos-scan', help='an id identifying this run/job. used in cross-val and appended when writing progress files')
parser.add_argument('--train_only', type=int, default=0, help='if true then use 80k, else use 110k')
parser.add_argument('--spice_reward_weight', type=float, default=0, help='The reward weight from spice')
parser.add_argument('--cider_reward_weight', type=float, default=1, help='The reward weight from cider')
parser.add_argument('--bleu_reward_weight', type=float, default=0, help='The reward weight from bleu4')
parser.add_argument('--ground_reward_weight', type=float, default=1, help='The reward weight from ground')
parser.add_argument('--att_supervise', type=bool, default=False, help='whether use attention supervise')
parser.add_argument('--att_sup_crit', type=str, default='KL', help='NLL | KL | ExtendNll')
parser.add_argument('--att_supervise_weight', type=float, default=0, help='att_supervise_weight')
parser.add_argument('--use_gt_box', type=bool, default=False, help='whether use gt box supervise')
args = parser.parse_args()
assert (args.rnn_size > 0), 'rnn_size should be greater than 0'
assert (args.num_layers > 0), 'num_layers should be greater than 0'
assert (args.input_encoding_size > 0), 'input_encoding_size should be greater than 0'
assert (args.batch_size > 0), 'batch_size should be greater than 0'
assert ((args.drop_prob_lm >= 0) and (args.drop_prob_lm < 1)), 'drop_prob_lm should be between 0 and 1'
assert (args.seq_per_img > 0), 'seq_per_img should be greater than 0'
assert (args.beam_size > 0), 'beam_size should be greater than 0'
assert (args.save_checkpoint_every > 0), 'save_checkpoint_every should be greater than 0'
assert (args.losses_log_every > 0), 'losses_log_every should be greater than 0'
assert ((args.language_eval == 0) or (args.language_eval == 1)), 'language_eval should be 0 or 1'
assert ((args.load_best_score == 0) or (args.load_best_score == 1)), 'language_eval should be 0 or 1'
assert ((args.train_only == 0) or (args.train_only == 1)), 'language_eval should be 0 or 1'
return args |
def _bonferroni(p_values, num_comparison):
adjust = np.vectorize((lambda pv: min(1.0, (pv * num_comparison))))
adjusted_p_values = adjust(p_values)
assert np.all((adjusted_p_values[(~ np.isnan(adjusted_p_values))] <= 1.0))
assert np.all((adjusted_p_values[(~ np.isnan(adjusted_p_values))] >= 0.0))
return adjusted_p_values |
class VGGnet_test(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.im_info = tf.placeholder(tf.float32, shape=[None, 3])
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data': self.data, 'im_info': self.im_info})
self.trainable = trainable
self.setup()
def setup(self):
anchor_scales = cfg.ANCHOR_SCALES
_feat_stride = [16]
self.feed('data').conv(3, 3, 64, 1, 1, name='conv1_1').conv(3, 3, 64, 1, 1, name='conv1_2').max_pool(2, 2, 2, 2, padding='VALID', name='pool1').conv(3, 3, 128, 1, 1, name='conv2_1').conv(3, 3, 128, 1, 1, name='conv2_2').max_pool(2, 2, 2, 2, padding='VALID', name='pool2').conv(3, 3, 256, 1, 1, name='conv3_1').conv(3, 3, 256, 1, 1, name='conv3_2').conv(3, 3, 256, 1, 1, name='conv3_3').max_pool(2, 2, 2, 2, padding='VALID', name='pool3').conv(3, 3, 512, 1, 1, name='conv4_1').conv(3, 3, 512, 1, 1, name='conv4_2').conv(3, 3, 512, 1, 1, name='conv4_3').max_pool(2, 2, 2, 2, padding='VALID', name='pool4').conv(3, 3, 512, 1, 1, name='conv5_1').conv(3, 3, 512, 1, 1, name='conv5_2').conv(3, 3, 512, 1, 1, name='conv5_3')
self.feed('conv5_3').conv(3, 3, 512, 1, 1, name='rpn_conv/3x3')
self.feed('rpn_conv/3x3').Bilstm(512, 128, 512, name='lstm_o')
self.feed('lstm_o').lstm_fc(512, ((len(anchor_scales) * 10) * 4), name='rpn_bbox_pred')
self.feed('lstm_o').lstm_fc(512, ((len(anchor_scales) * 10) * 2), name='rpn_cls_score')
self.feed('rpn_cls_score').spatial_reshape_layer(2, name='rpn_cls_score_reshape').spatial_softmax(name='rpn_cls_prob')
self.feed('rpn_cls_prob').spatial_reshape_layer(((len(anchor_scales) * 10) * 2), name='rpn_cls_prob_reshape')
self.feed('rpn_cls_prob_reshape', 'rpn_bbox_pred', 'im_info').proposal_layer(_feat_stride, anchor_scales, 'TEST', name='rois') |
class NgramCounts(object):
def __init__(self, ngram_order):
self.ngram_order = ngram_order
self.bos_symbol = (- 3)
self.eos_symbol = (- 2)
self.backoff_symbol = (- 1)
self.counts = []
for n in range(ngram_order):
self.counts.append(defaultdict((lambda : defaultdict(float))))
def AddCount(self, history, predicted_word, count):
self.counts[len(history)][history][predicted_word] += count
def AddRawCountsFromLine(self, line):
try:
words = (([self.bos_symbol] + [int(x) for x in line.split()]) + [self.eos_symbol])
except:
sys.exit('make_one_biased_lm.py: bad input line {0} (expected a sequence of integers)'.format(line))
for n in range(1, len(words)):
predicted_word = words[n]
history_start = max(0, ((n + 1) - self.ngram_order))
history = tuple(words[history_start:n])
self.AddCount(history, predicted_word, 1.0)
def AddRawCountsFromStandardInput(self):
lines_processed = 0
while True:
line = sys.stdin.readline()
if (line == ''):
break
self.AddRawCountsFromLine(line)
lines_processed += 1
if ((lines_processed == 0) or (args.verbose > 0)):
print('make_one_biased_lm.py: processed {0} lines of input'.format(lines_processed), file=sys.stderr)
def GetHistToTotalCount(self):
ans = defaultdict(float)
for n in range(2, self.ngram_order):
for (hist, word_to_count) in self.counts[n].items():
total_count = sum(word_to_count.values())
while (len(hist) >= 2):
ans[hist] += total_count
hist = hist[1:]
return ans
def CompletelyDiscountLowCountStates(self, min_count):
hist_to_total_count = self.GetHistToTotalCount()
for n in reversed(list(range(2, self.ngram_order))):
this_order_counts = self.counts[n]
to_delete = []
for hist in this_order_counts.keys():
if (hist_to_total_count[hist] < min_count):
word_to_count = this_order_counts[hist]
to_delete.append(hist)
backoff_hist = hist[1:]
for (word, count) in word_to_count.items():
self.AddCount(backoff_hist, word, count)
for hist in to_delete:
del this_order_counts[hist]
def ApplyBackoff(self, D):
assert ((D > 0.0) and (D < 1.0))
for n in reversed(list(range(1, self.ngram_order))):
this_order_counts = self.counts[n]
for (hist, word_to_count) in this_order_counts.items():
backoff_hist = hist[1:]
backoff_word_to_count = self.counts[(n - 1)][backoff_hist]
this_discount_total = 0.0
for word in word_to_count:
assert (word_to_count[word] >= 1.0)
word_to_count[word] -= D
this_discount_total += D
backoff_word_to_count[word] += 1.0
word_to_count[self.backoff_symbol] += this_discount_total
def Print(self, info_string):
print(info_string, file=sys.stderr)
total = 0.0
total_excluding_backoff = 0.0
for this_order_counts in self.counts:
for (hist, word_to_count) in this_order_counts.items():
this_total_count = sum(word_to_count.values())
print('{0}: total={1} '.format(hist, this_total_count), end='', file=sys.stderr)
print(' '.join(['{0} -> {1} '.format(word, count) for (word, count) in word_to_count.items()]), file=sys.stderr)
total += this_total_count
total_excluding_backoff += this_total_count
if (self.backoff_symbol in word_to_count):
total_excluding_backoff -= word_to_count[self.backoff_symbol]
print('total count = {0}, excluding discount = {1}'.format(total, total_excluding_backoff), file=sys.stderr)
def AddTopWords(self, top_words_file):
empty_history = ()
word_to_count = self.counts[0][empty_history]
total = sum(word_to_count.values())
try:
f = open(top_words_file, mode='r', encoding='utf-8')
except:
sys.exit(('make_one_biased_lm.py: error opening top-words file: --top-words=' + top_words_file))
while True:
line = f.readline()
if (line == ''):
break
try:
[word_index, prob] = line.split()
word_index = int(word_index)
prob = float(prob)
assert ((word_index > 0) and (prob > 0.0))
word_to_count[word_index] += (prob * total)
except Exception as e:
sys.exit("make_one_biased_lm.py: could not make sense of the line '{0}' in op-words file: {1} ".format(line, str(e)))
f.close()
def GetTotalCountMap(self):
total_count_map = dict()
for n in range(0, self.ngram_order):
for (hist, word_to_count) in self.counts[n].items():
total_count_map[hist] = sum(word_to_count.values())
return total_count_map
def GetHistToStateMap(self):
hist_to_state = dict()
fst_state_counter = 0
for n in range(0, self.ngram_order):
for hist in self.counts[n].keys():
hist_to_state[hist] = fst_state_counter
fst_state_counter += 1
return hist_to_state
def GetProb(self, hist, word, total_count_map):
total_count = total_count_map[hist]
word_to_count = self.counts[len(hist)][hist]
prob = (float(word_to_count[word]) / total_count)
if ((len(hist) > 0) and (word != self.backoff_symbol)):
prob_in_backoff = self.GetProb(hist[1:], word, total_count_map)
backoff_prob = (float(word_to_count[self.backoff_symbol]) / total_count)
prob += (backoff_prob * prob_in_backoff)
return prob
def PrintAsFst(self, word_disambig_symbol):
hist_to_state = self.GetHistToStateMap()
total_count_map = self.GetTotalCountMap()
for n in ([1, 0] + list(range(2, self.ngram_order))):
this_order_counts = self.counts[n]
keys = (this_order_counts.keys() if (n != 1) else sorted(this_order_counts.keys()))
for hist in keys:
word_to_count = this_order_counts[hist]
this_fst_state = hist_to_state[hist]
for word in word_to_count.keys():
this_cost = (- math.log(self.GetProb(hist, word, total_count_map)))
if (word > 0):
next_hist = (hist + (word,))
while (not (next_hist in hist_to_state)):
next_hist = next_hist[1:]
next_fst_state = hist_to_state[next_hist]
print(this_fst_state, next_fst_state, word, word, this_cost)
elif (word == self.eos_symbol):
print(this_fst_state, this_cost)
else:
assert (word == self.backoff_symbol)
backoff_fst_state = hist_to_state[hist[1:len(hist)]]
print(this_fst_state, backoff_fst_state, word_disambig_symbol, 0, this_cost) |
class SenseRemover():
def __init__(self, node_utils):
self.node_utils = node_utils
self.stemmer = nltk.stem.SnowballStemmer('english').stem
self.removed_instance_count = 0
self.amr_instance_count = 0
self.restore_count = 0
self.not_removed_instances = set()
def remove_file(self, file_path):
for (i, amr) in enumerate(AMRIO.read(file_path), 1):
if ((i % 1000) == 0):
logger.info('Processed {} examples.'.format(i))
self.remove_graph(amr)
(yield amr)
def remove_graph(self, amr):
graph = amr.graph
for node in graph.get_nodes():
if (node.copy_of is not None):
continue
instance = node.instance
lemmas = self.map_instance_to_lemmas(instance)
lemma = self.find_corresponding_lemma(instance, lemmas, amr)
if (lemma is None):
lemma = self.remove_sense(instance)
self.update_graph(graph, node, instance, lemma)
def map_instance_to_lemmas(self, instance):
if (not (isinstance(instance, str) and (not re.search('^".*"$', instance)))):
instance = str(instance)
if re.search('-\\d\\d$', instance):
lemmas = self.node_utils.get_lemmas(instance)
else:
lemmas = [instance]
return lemmas
def find_corresponding_lemma(self, instance, lemmas, amr):
self.amr_instance_count += 1
input_lemma = None
for lemma in lemmas:
if (lemma in amr.lemmas):
input_lemma = lemma
break
if (input_lemma is not None):
restored_frame = self.node_utils.get_frames(input_lemma)[0]
if (restored_frame != instance):
input_lemma = None
if (input_lemma is None):
self.not_removed_instances.add(instance)
else:
self.removed_instance_count += 1
return input_lemma
def remove_sense(self, instance):
instance_lemma = re.sub('-\\d\\d$', '', str(instance))
restored = self.node_utils.get_frames(instance_lemma)[0]
if (restored == instance):
return instance_lemma
return instance
def update_graph(self, graph, node, old, new):
if (new is not None):
graph.replace_node_attribute(node, 'instance', old, new)
self.try_restore(str(old), new)
else:
self.try_restore(old, old)
def try_restore(self, old, new):
_old = self.node_utils.get_frames(new)[0]
self.restore_count += int((old == _old))
def reset_statistics(self):
self.removed_instance_count = 0
self.amr_instance_count = 0
self.restore_count = 0
self.no_removed_instances = set()
def print_statistics(self):
logger.info('sense remove rate: {}% ({}/{})'.format((self.removed_instance_count / self.amr_instance_count), self.removed_instance_count, self.amr_instance_count))
logger.info('restore rate: {}% ({}/{})'.format((self.restore_count / self.amr_instance_count), self.restore_count, self.amr_instance_count))
logger.info('size of not removed lemma set: {}'.format(len(self.not_removed_instances))) |
class MeanSquaredLogarithmicError(LossFunction):
def __init__(self, bigdl_type='float'):
super(MeanSquaredLogarithmicError, self).__init__(None, bigdl_type) |
def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray:
nb_samples = len(samples_idx)
samples_to_remove = (nb_samples % batch_size)
if (samples_to_remove != 0):
samples_idx = samples_idx[:(- samples_to_remove)]
sections_split = (nb_samples // batch_size)
batch_idx = np.split(samples_idx, sections_split)
return batch_idx |
def vgg13(num_classes=1000, pretrained='imagenet'):
model = models.vgg13(pretrained=False)
if (pretrained is not None):
settings = pretrained_settings['vgg13'][pretrained]
model = load_pretrained(model, num_classes, settings)
return model |
def get_loss_one_logit(student_logit, teacher_logit):
t = 2.0
from torch.nn import functional as F
return (F.kl_div(input=F.log_softmax((student_logit / t), dim=(- 1)), target=F.softmax((teacher_logit / t), dim=(- 1)), reduction='batchmean') * (t ** 2)) |
def ncompute(openfilepath):
with open(openfilepath, encoding='utf-8') as f:
id = 0
reader = pd.read_csv(f)
data = {}
for i in range(0, len(reader)):
id = reader.iloc[i]['ID']
mn = reader.iloc[i]['Metric Name']
if (mn == 'gpu__time_duration.sum'):
data[id] = {}
data[id][mn] = reader.iloc[i]['Metric Value']
size = len(data)
kernels = (size / 10)
metrics = ['lts__t_sectors.avg.pct_of_peak_sustained_elapsed']
ans = []
tmp = ([0] * len(metrics))
totaldurationtime = 0
for i in range(size):
if (((i % kernels) == 0) and (i > 1)):
tmp = [(t / totaldurationtime) for t in tmp]
ans.append(copy.deepcopy(tmp))
totaldurationtime = 0
for j in range(len(tmp)):
tmp[j] = 0
totaldurationtime += (data[i]['gpu__time_duration.sum'] / 1000)
for j in range(len(metrics)):
tmp[j] += ((data[i][metrics[j]] * data[i]['gpu__time_duration.sum']) / 1000)
tmp = [(t / totaldurationtime) for t in tmp]
ans.append(copy.deepcopy(tmp))
avgans = ([0] * len(metrics))
for i in range(len(ans)):
for j in range(len(metrics)):
avgans[j] += ans[i][j]
avgans = [(i / 10) for i in avgans]
print(avgans[0])
print(size)
return (avgans[0], kernels) |
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.histogram(variable.op.name, variable))
summaries.append(tf.summary.scalar('training/Learning Rate', learning_rate))
return summaries |
def score_hard_rationale_predictions(truth: List[Rationale], pred: List[Rationale]) -> Dict[(str, Dict[(str, float)])]:
scores = dict()
truth = set(truth)
pred = set(pred)
micro_prec = (len((truth & pred)) / len(pred))
micro_rec = (len((truth & pred)) / len(truth))
micro_f1 = _f1(micro_prec, micro_rec)
scores['instance_micro'] = {'p': micro_prec, 'r': micro_rec, 'f1': micro_f1}
ann_to_rat = _keyed_rationale_from_list(truth)
pred_to_rat = _keyed_rationale_from_list(pred)
instances_to_scores = dict()
for k in (set(ann_to_rat.keys()) | pred_to_rat.keys()):
if (len(pred_to_rat.get(k, set())) > 0):
instance_prec = (len((ann_to_rat.get(k, set()) & pred_to_rat.get(k, set()))) / len(pred_to_rat[k]))
else:
instance_prec = 0
if (len(ann_to_rat.get(k, set())) > 0):
instance_rec = (len((ann_to_rat.get(k, set()) & pred_to_rat.get(k, set()))) / len(ann_to_rat[k]))
else:
instance_rec = 0
instance_f1 = _f1(instance_prec, instance_rec)
instances_to_scores[k] = {'p': instance_prec, 'r': instance_rec, 'f1': instance_f1}
macro_prec = (sum((instance['p'] for instance in instances_to_scores.values())) / len(instances_to_scores))
macro_rec = (sum((instance['r'] for instance in instances_to_scores.values())) / len(instances_to_scores))
macro_f1 = (sum((instance['f1'] for instance in instances_to_scores.values())) / len(instances_to_scores))
scores['instance_macro'] = {'p': macro_prec, 'r': macro_rec, 'f1': macro_f1}
return scores |
class RandomNegativeSkipGram(RandomNegativeCBOW):
def __call__(self, batch) -> LongTensor:
(x, y) = batch['context_words'].shape
negatives = torch.multinomial(self.sampling_distn, num_samples=((self.number_of_samples * x) * y), replacement=True).resize(x, y, self.number_of_samples)
batch['context_words'] = torch.cat((batch['context_words'].unsqueeze((- 1)), negatives), dim=(- 1))
return batch |
.parametrize('data_key, hydrate', [('wbm_summary', True), ('wbm_initial_structures', True), ('wbm_computed_structure_entries', False), ('mp_elemental_ref_entries', True), ('mp_energies', True)])
def test_load(data_key: str, hydrate: bool, dummy_df_serialized: pd.DataFrame, capsys: CaptureFixture[str], tmp_path: Path) -> None:
filepath = DATA_FILES[data_key]
with patch('urllib.request.urlretrieve') as url_retrieve:
df_csv = pd._testing.makeDataFrame().reset_index(names=id_col)
writer = (dummy_df_serialized.to_json if ('.json' in filepath) else df_csv.to_csv)
url_retrieve.side_effect = (lambda _url, path: writer(path))
out = load(data_key, hydrate=hydrate, cache_dir=(str(tmp_path) if (random() < 0.5) else tmp_path))
(stdout, _stderr) = capsys.readouterr()
assert (f'Downloading {data_key!r} from {figshare_urls[data_key][0]}' in stdout)
assert (url_retrieve.call_count == 1)
assert isinstance(out, pd.DataFrame), f'{data_key} not a DataFrame'
from_cache = load(data_key, hydrate=hydrate, cache_dir=tmp_path)
pd.testing.assert_frame_equal(out, from_cache) |
class Normal(nn.Module):
def __init__(self, mu=0, sigma=1):
super(Normal, self).__init__()
self.normalization = Variable(torch.Tensor([np.log((2 * np.pi))]))
self.mu = Variable(torch.Tensor([mu]))
self.logsigma = Variable(torch.Tensor([math.log(sigma)]))
def _check_inputs(self, size, mu_logsigma):
if ((size is None) and (mu_logsigma is None)):
raise ValueError('Either one of size or params should be provided.')
elif ((size is not None) and (mu_logsigma is not None)):
mu = mu_logsigma.select((- 1), 0).expand(size)
logsigma = mu_logsigma.select((- 1), 1).expand(size)
return (mu, logsigma)
elif (size is not None):
mu = self.mu.expand(size)
logsigma = self.logsigma.expand(size)
return (mu, logsigma)
elif (mu_logsigma is not None):
mu = mu_logsigma.select((- 1), 0)
logsigma = mu_logsigma.select((- 1), 1)
return (mu, logsigma)
else:
raise ValueError('Given invalid inputs: size={}, mu_logsigma={})'.format(size, mu_logsigma))
def sample(self, size=None, params=None):
(mu, logsigma) = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = ((std_z * torch.exp(logsigma)) + mu)
return sample
def log_density(self, sample, params=None):
if (params is not None):
(mu, logsigma) = self._check_inputs(None, params)
else:
(mu, logsigma) = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp((- logsigma))
tmp = ((sample - mu) * inv_sigma)
output = ((- 0.5) * (((tmp * tmp) + (2 * logsigma)) + c))
return output
def NLL(self, params, sample_params=None):
(mu, logsigma) = self._check_inputs(None, params)
if (sample_params is not None):
(sample_mu, sample_logsigma) = self._check_inputs(None, sample_params)
else:
(sample_mu, sample_logsigma) = (mu, logsigma)
c = self.normalization.type_as(sample_mu.data)
logsigma = logsigma.cuda()
mu = mu.cuda()
nll = ((((logsigma.mul((- 2)).exp() * (sample_mu - mu).pow(2)) + torch.exp((sample_logsigma.mul(2) - logsigma.mul(2)))) + (2 * logsigma)) + c)
return nll.mul(0.5)
def kld(self, params):
(mu, logsigma) = self._check_inputs(None, params)
kld = ((logsigma.mul(2).add(1) - mu.pow(2)) - logsigma.exp().pow(2))
kld.mul_((- 0.5))
return kld
def get_params(self):
return torch.cat([self.mu, self.logsigma])
def nparams(self):
return 2
def ndim(self):
return 1
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = (self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(self.mu.data[0], self.logsigma.exp().data[0]))
return tmpstr |
def begin(cfg):
if cfg.other.is_debug:
set_debug(cfg)
pl.seed_everything(cfg.seed)
cfg.paths.work = str(Path.cwd())
cfg.other.git_hash = GIT_HASH
logger.info(f'Workdir : {cfg.paths.work}.')
if (cfg.data_pred.name == 'data_feat'):
with omegaconf.open_dict(cfg):
cfg.data_pred.name = cfg.data_feat.name
cfg.data_pred = OmegaConf.merge(cfg.data_feat, cfg.data_pred) |
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
self.smooth = 1
def forward(self, input, target):
axes = tuple(range(1, input.dim()))
intersect = (input * target).sum(dim=axes)
union = (torch.pow(input, 2).sum(dim=axes) + torch.pow(target, 2).sum(dim=axes))
loss = (1 - (((2 * intersect) + self.smooth) / (union + self.smooth)))
return loss.mean() |
def add_located(raw_data_dicts, srt_data, frame_cnt):
data_dicts = copy.deepcopy(raw_data_dicts)
nan_cnt = 0
for i in tqdm(range(len(data_dicts))):
vid_name = data_dicts[i]['vid_name']
sub_text_list = srt_data['sub_text'][vid_name]
sub_time = srt_data['sub_time'][vid_name]
(ts, is_nan) = convert_ts(data_dicts[i]['ts'])
nan_cnt += is_nan
data_dicts[i]['ts'] = ts
data_dicts[i]['located_frame'] = interval2frame(ts, frame_cnt[vid_name])
data_dicts[i]['located_sub_text'] = get_located_sub_text(ts, sub_text_list, sub_time)
print(('There are %d NaN values in ts, which are replaced by [10, 30], will be fixed later' % nan_cnt))
return data_dicts |
class TableModel(QtCore.QAbstractTableModel):
def __init__(self, data):
super(TableModel, self).__init__()
self._data = data
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if (role != QtCore.Qt.DisplayRole):
return QtCore.QVariant()
if (orientation == QtCore.Qt.Horizontal):
try:
return self._data.columns.tolist()[section]
except (IndexError,):
return QtCore.QVariant()
elif (orientation == QtCore.Qt.Vertical):
try:
return self._data.index.tolist()[section]
except (IndexError,):
return QtCore.QVariant()
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if (role == Qt.DisplayRole):
return str(self._data.iloc[(index.row(), index.column())])
return None
def rowCount(self, parent=None):
return self._data.shape[0]
def columnCount(self, parent=None):
return self._data.shape[1] |
def generate_my_simplicial_complex_d2(N, p1, p2):
G = nx.fast_gnp_random_graph(N, p1, seed=None)
if (not nx.is_connected(G)):
giant = list(nx.connected_components(G))[0]
G = nx.subgraph(G, giant)
print(('not connected, but GC has order %i ans size %i' % (len(giant), G.size())))
triangles_list = []
G_copy = G.copy()
for tri in combinations(list(G.nodes()), 3):
if (random.random() <= p2):
triangles_list.append(tri)
G_copy.add_edge(tri[0], tri[1])
G_copy.add_edge(tri[1], tri[2])
G_copy.add_edge(tri[0], tri[2])
G = G_copy
node_neighbors_dict = {}
for n in list(G.nodes()):
node_neighbors_dict[n] = G[n].keys()
return (node_neighbors_dict, triangles_list) |
class AsyncNoOverlapAlternatingActionServer(NoOverlapAlternatingActionServer):
def serve_actions_evaluation(self, itr):
obs_ready = self.sync.obs_ready
obs_ready_pair = self.obs_ready_pair
act_ready_pair = self.act_ready_pair
(step_np, step_np_pair) = (self.eval_step_buffer_np, self.eval_step_buffer_np_pair)
agent_inputs_pair = self.eval_agent_inputs_pair
self.agent.reset()
step_np.action[:] = 0
step_np.reward[:] = 0
stop = False
alt = 0
step_h = step_np_pair[alt]
for b in obs_ready_pair[alt]:
b.acquire()
(action, agent_info) = self.agent.step(*agent_inputs_pair[alt])
step_h.action[:] = action
step_h.agent_info[:] = agent_info
alt = 1
step_h = step_np_pair[alt]
for b in obs_ready_pair[alt]:
b.acquire()
for w in act_ready_pair[(1 - alt)]:
w.release()
(action, agent_info) = self.agent.step(*agent_inputs_pair[alt])
step_h.action[:] = action
step_h.agent_info[:] = agent_info
for t in range(1, self.eval_max_T):
for alt in range(2):
step_h = step_np_pair[alt]
for b in obs_ready_pair[alt]:
b.acquire()
if self.ctrl.stop_eval.value:
self.sync.stop_eval.value = stop = True
for w in act_ready_pair[(1 - alt)]:
w.release()
if stop:
break
for b_reset in np.where(step_h.done)[0]:
step_h.action[b_reset] = 0
step_h.reward[b_reset] = 0
self.agent.reset_one(idx=b_reset)
(action, agent_info) = self.agent.step(*agent_inputs_pair[alt])
step_h.action[:] = action
step_h.agent_info[:] = agent_info
if stop:
break
for w in act_ready_pair[alt]:
w.release()
for b in obs_ready:
b.acquire()
assert (not b.acquire(block=False))
for w in act_ready:
assert (not w.acquire(block=False)) |
(name='test_team_batting_html')
def _test_team_batting_html(get_data_file_contents: Callable[([str], str)]) -> str:
return get_data_file_contents('team_batting.html') |
def test_interpolation_potential_density_notinterpolated():
rzpot = potential.interpRZPotential(RZPot=potential.MWPotential, rgrid=(0.01, 2.0, 101), zgrid=(0.0, 0.2, 101), logR=False, interpDens=False, zsym=True)
rs = [0.5, 1.5]
zs = [0.075, 0.15]
for r in rs:
for z in zs:
assert (numpy.fabs(((rzpot.dens(r, z) - potential.evaluateDensities(potential.MWPotential, r, z)) / potential.evaluateDensities(potential.MWPotential, r, z))) < (10.0 ** (- 10.0))), f'RZPot interpolation of the density w/ interpRZPotential fails when the potential was not interpolated at (R,z) = ({r:g},{z:g})'
return None |
class SequentialSchedule(Scheduler):
def __init__(self, iteration_per_epoch: int) -> None:
from bigdl.dllib.optim.optimizer import SequentialSchedule as BSequentialSchedule
self.scheduler = BSequentialSchedule(iteration_per_epoch)
def get_scheduler(self) -> 'optimizer.SequentialSchedule':
return self.scheduler
def add(self, scheduler: Scheduler, max_iteration: int) -> 'SequentialSchedule':
return self.get_scheduler().add(scheduler.get_scheduler(), max_iteration) |
class AutoObject(NestedSpace):
def __call__(self, *args, **kwargs):
if (not self._inited):
self._inited = True
self._instance = self.init()
return self._instance.__call__(*args, **kwargs)
def init(self):
config = self.cs.get_default_configuration().get_dictionary()
return self.sample(**config)
def cs(self):
cs = _new_cs(self.prefix)
for (k, v) in self.kwvars.items():
if isinstance(v, NestedSpace):
_add_cs(cs, v.cs, k)
elif isinstance(v, Space):
hp = v.get_hp(name=k)
_add_hp(cs, hp)
else:
_rm_hp(cs, k)
return cs
def kwspaces(self):
invalidInputError(False, 'not implement kwspaces for AutoObject')
def sample(self):
invalidInputError(False, 'not implement sample for AutoObject')
def __repr__(self):
return 'AutoObject' |
class VanConfig(PretrainedConfig):
model_type = 'van'
def __init__(self, image_size=224, num_channels=3, patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], hidden_sizes=[64, 128, 320, 512], depths=[3, 3, 12, 3], mlp_ratios=[8, 8, 4, 4], hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-06, layer_scale_init_value=0.01, drop_path_rate=0.0, dropout_rate=0.0, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.num_channels = num_channels
self.patch_sizes = patch_sizes
self.strides = strides
self.hidden_sizes = hidden_sizes
self.depths = depths
self.mlp_ratios = mlp_ratios
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.layer_scale_init_value = layer_scale_init_value
self.drop_path_rate = drop_path_rate
self.dropout_rate = dropout_rate |
def compute_loss(model, device, data_loader):
model.eval()
loss = 0
scores = {}
with torch.no_grad():
for (id_list, X1, X2, target) in data_loader:
(X1, X2, target) = (X1.to(device), X2.to(device), target.to(device))
target = target.view((- 1), 1).float()
y = model(X1, X2)
loss += F.binary_cross_entropy(y, target, size_average=False)
for (i, id) in enumerate(id_list):
scores[id] = y[i].data.cpu().numpy()
loss /= len(data_loader.dataset)
return (loss, scores) |
class AdamP(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, delta=delta, wd_ratio=wd_ratio, nesterov=nesterov)
super(AdamP, self).__init__(params, defaults)
def _channel_view(self, x):
return x.view(x.size(0), (- 1))
def _layer_view(self, x):
return x.view(1, (- 1))
def _cosine_similarity(self, x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return ((dot.abs() / x_norm) / y_norm)
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = ([(- 1)] + ([1] * (len(p.shape) - 1)))
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if (cosine_sim.max() < (delta / math.sqrt(view_func(p.data).size(1)))):
p_n = (p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps))
perturb -= (p_n * view_func((p_n * perturb)).sum(dim=1).view(expand_size))
wd = wd_ratio
return (perturb, wd)
return (perturb, wd)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
(beta1, beta2) = group['betas']
nesterov = group['nesterov']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
if nesterov:
perturb = (((beta1 * exp_avg) + ((1 - beta1) * grad)) / denom)
else:
perturb = (exp_avg / denom)
wd_ratio = 1
if (len(p.shape) > 1):
(perturb, wd_ratio) = self._projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps'])
if (group['weight_decay'] > 0):
p.data.mul_((1 - ((group['lr'] * group['weight_decay']) * wd_ratio)))
p.data.add_((- step_size), perturb)
return loss |
def rlaus_resnet50(rla_channel=32):
print('Constructing rlaus_resnet50......')
model = RLAus_ResNet(RLAus_Bottleneck, [3, 4, 6, 3])
return model |
def ffprob_shot_segmentation(video_path='data', video_name='Cosmus_Laundromat.mp4'):
shot_seg_text_file = os.path.join(video_path, 'shot_segmentation.txt')
if (not os.path.isfile(shot_seg_text_file)):
print('Ffmpeg shot segmentation in action...')
video_path_in_linux_style = '/'.join(video_path.split('\\'))
full_video_path = '/'.join([video_path_in_linux_style, video_name])
ouput_file = '/'.join([video_path_in_linux_style, 'shot_segmentation.txt'])
command = ((('ffprobe -show_frames -of compact=p=0 -f lavfi "movie=' + full_video_path) + ',select=gt(scene\\,.4)" > ') + ouput_file)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
proc.communicate()
print('Finished ffmpeg shot segmentation')
print('Reading shot seg text file')
with open(shot_seg_text_file) as f:
content = f.readlines()
shotIdx = [0]
frames_per_second = getFramerate(os.path.join(video_path, video_name))
i = 0
for line in content:
shotIdx.append(np.int(np.round((float(line.split(sep='pkt_pts_time=')[1].split(sep='|pkt_dts')[0]) * frames_per_second))))
i = (i + 1)
Lmin = 25
Lmax = 200
cap = cv2.VideoCapture(os.path.join(video_path, video_name))
total_num_of_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
C = np.subtract(np.append(shotIdx[1:], total_num_of_frames), shotIdx)
C_without_short_shots = []
for i in range((len(C) - 1)):
if (C[i] >= Lmin):
C_without_short_shots.append(C[i])
else:
C[(i + 1)] = (C[(i + 1)] + C[i])
if (C[(- 1)] >= Lmin):
C_without_short_shots.append(C[(- 1)])
else:
C_without_short_shots[(- 1)] += C[(- 1)]
final_C = []
for i in range(len(C_without_short_shots)):
if (C_without_short_shots[i] <= Lmax):
final_C.append(C_without_short_shots[i])
else:
devide_factor = np.int(((C_without_short_shots[i] // Lmax) + 1))
length_of_each_part = (C_without_short_shots[i] // devide_factor)
for j in range((devide_factor - 1)):
final_C.append(length_of_each_part)
final_C.append((C_without_short_shots[i] - ((devide_factor - 1) * length_of_each_part)))
return final_C |
def add_params(parser):
parser.add_argument('--job_name', help='ElasticJob name', required=True)
parser.add_argument('--namespace', default='default', type=str, help='The name of the Kubernetes namespace where ElasticJob pods will be created')
parser.add_argument('--platform', default='pyk8s', type=str, help='The name of platform which can be pyk8s, k8s, ray or local.') |
class WordNgram():
def __init__(self, lang, device):
self.lang = Path(lang)
self.device = device
self.is_cuda = (device.type == 'cuda')
self.symbol_table = k2.SymbolTable.from_file((self.lang / 'words.txt'))
self.oovid = int(open((self.lang / 'oov.int')).read().strip())
self.load_G()
return
for i in range(10):
try:
self.load_G()
break
except:
print(f'{i}-th trial to load G.fst but failed')
def load_G(self):
if os.path.exists((self.lang / 'G.pt')):
f = open((self.lang / 'G.pt'), 'r')
fcntl.flock(f, fcntl.LOCK_EX)
G_dict = torch.load((self.lang / 'G.pt'))
fcntl.flock(f, fcntl.LOCK_UN)
G = k2.Fsa.from_dict(G_dict).to(self.device)
G = k2.create_fsa_vec([G])
G = k2.arc_sort(G)
print('Successfully load the cached G.pt', flush=True)
else:
f = open((self.lang / 'G.fst.txt'))
G = k2.Fsa.from_openfst(f.read(), acceptor=False)
del G.aux_labels
first_word_disambig_id = find_first_disambig_symbol(self.symbol_table)
G.labels[(G.labels >= first_word_disambig_id)] = 0
G = k2.arc_sort(G)
torch.save(G.as_dict(), (self.lang / 'G.pt'))
print('No cached G.pt found. Build a new G from G.fst.txt')
self.G = G.to(self.device)
def text2lat(self, text, gram_len=6):
text = [tok.replace(' ', '') for tok in text]
if (gram_len < 1):
raise ValueError('invalid ngram_len. it should be larger than 1')
arcs = []
for s in range((len(text) + 1)):
for r in range(1, (gram_len + 1)):
if (((len(text) - s) - r) < 0):
continue
w = ''.join(text[s:(s + r)])
if (w in self.symbol_table):
wid = self.symbol_table[w]
elif (r == 1):
wid = self.oovid
else:
continue
arc = [s, (s + r), wid, 0.0]
arcs.append(arc)
arcs.append([len(text), (len(text) + 1), (- 1), 0.0])
arcs.append([(len(text) + 1)])
arcs = sorted(arcs, key=(lambda arc: arc[0]))
arcs = [[str(i) for i in arc] for arc in arcs]
arcs = [' '.join(arc) for arc in arcs]
arcs = '\n'.join(arcs)
lat = k2.Fsa.from_str(arcs, True)
lat = k2.arc_sort(lat)
return lat.to(self.device)
def score_lattice(self, lats, log_semiring=True):
assert (lats.device == self.device)
lats = k2.add_epsilon_self_loops(lats)
if self.is_cuda:
b_to_a_map = torch.zeros(lats.shape[0], device=self.device, dtype=torch.int32)
scored_lattice = k2.intersect_device(self.G, lats, b_to_a_map, sorted_match_a=True)
scored_lattice = k2.top_sort(k2.connect(k2.remove_epsilon_self_loops(scored_lattice)))
else:
scored_lattice = k2.intersect(self.G, lats, treat_epsilons_specially=False)
scored_lattice = k2.top_sort(k2.connect(k2.remove_epsilon_self_loops(scored_lattice)))
return scored_lattice
def score_texts(self, texts, log_semiring=True):
lats = [self.text2lat(t) for t in texts]
lats = k2.create_fsa_vec(lats)
scored_lattice = self.score_lattice(lats, log_semiring)
scores = scored_lattice._get_tot_scores(log_semiring, True)
return scores
def draw(self, fsavec, prefix=None):
for i in range(fsavec.shape[0]):
fsa = fsavec[i]
fsa.draw(f'{prefix}_{i}.svg') |
def simxSetArrayParameter(clientID, paramIdentifier, paramValues, operationMode):
c_paramValues = (ct.c_float * 3)(*paramValues)
return c_SetArrayParameter(clientID, paramIdentifier, c_paramValues, operationMode) |
class SubMobileSPADEGenerator(BaseNetwork):
def modify_commandline_options(parser, is_train):
return parser
def __init__(self, opt, config):
super(SubMobileSPADEGenerator, self).__init__()
self.opt = opt
self.config = config
nf = opt.ngf
(self.sw, self.sh) = self.compute_latent_vector_size(opt)
channel = config['channels'][0]
self.fc = nn.Conv2d(self.opt.semantic_nc, (16 * channel), 3, padding=1)
ic = (channel * 16)
channel = config['channels'][1]
self.head_0 = SubMobileSPADEResnetBlock((16 * nf), (16 * nf), ic, opt, {'channel': (channel * 16), 'hidden': (channel * 2)})
channel = config['channels'][2]
self.G_middle_0 = SubMobileSPADEResnetBlock((16 * nf), (16 * nf), ic, opt, {'channel': (channel * 16), 'hidden': (channel * 2)})
channel = config['channels'][3]
self.G_middle_1 = SubMobileSPADEResnetBlock((16 * nf), (16 * nf), ic, opt, {'channel': (channel * 16), 'hidden': (channel * 2)})
channel = config['channels'][4]
self.up_0 = SubMobileSPADEResnetBlock((16 * nf), (8 * nf), ic, opt, {'channel': (channel * 8), 'hidden': (channel * 2)})
ic = (channel * 8)
channel = config['channels'][5]
self.up_1 = SubMobileSPADEResnetBlock((8 * nf), (4 * nf), ic, opt, {'channel': (channel * 4), 'hidden': (channel * 2)})
ic = (channel * 4)
channel = config['channels'][6]
self.up_2 = SubMobileSPADEResnetBlock((4 * nf), (2 * nf), ic, opt, {'channel': (channel * 2), 'hidden': (channel * 2)})
ic = (channel * 2)
channel = config['channels'][7]
self.up_3 = SubMobileSPADEResnetBlock((2 * nf), (1 * nf), ic, opt, {'channel': channel, 'hidden': (channel * 2)})
final_nc = channel
if (opt.num_upsampling_layers == 'most'):
raise NotImplementedError
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if (opt.num_upsampling_layers == 'normal'):
num_up_layers = 5
elif (opt.num_upsampling_layers == 'more'):
num_up_layers = 6
elif (opt.num_upsampling_layers == 'most'):
num_up_layers = 7
else:
raise ValueError(('opt.num_upsampling_layers [%s] not recognized' % opt.num_upsampling_layers))
sw = (opt.crop_size // (2 ** num_up_layers))
sh = round((sw / opt.aspect_ratio))
return (sw, sh)
def forward(self, input, z=None):
seg = input
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
x = self.head_0(x, seg)
x = self.up(x)
x = self.G_middle_0(x, seg)
if ((self.opt.num_upsampling_layers == 'more') or (self.opt.num_upsampling_layers == 'most')):
x = self.up(x)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
if (self.opt.num_upsampling_layers == 'most'):
x = self.up(x)
x = self.up_4(x, seg)
x = self.conv_img(F.leaky_relu(x, 0.2))
x = F.tanh(x)
return x |
_criterion('masked_lm')
class MaskedLmLoss(FairseqCriterion):
def forward(self, model, sample, reduce=True):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if (sample_size == 0):
masked_tokens = None
logits = model(**sample['net_input'], masked_tokens=masked_tokens)[0]
targets = model.get_targets(sample, [logits])
if (sample_size != 0):
targets = targets[masked_tokens]
loss = F.nll_loss(F.log_softmax(logits.view((- 1), logits.size((- 1))), dim=(- 1), dtype=torch.float32), targets.view((- 1)), reduction='sum', ignore_index=self.padding_idx)
logging_output = {'loss': loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample['nsentences'], 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def reduce_metrics(logging_outputs) -> None:
loss_sum = utils.item(sum((log.get('loss', 0) for log in logging_outputs)))
sample_size = utils.item(sum((log.get('sample_size', 0) for log in logging_outputs)))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
metrics.log_derived('ppl', (lambda meters: utils.get_perplexity(meters['loss'].avg)))
def logging_outputs_can_be_summed() -> bool:
return True |
def extract_comments(node, code, comments, lang):
if (len(node.children) == 0):
if (node.type in comment_node_name[lang]):
comment_dict = {'content': code[node.start_byte:node.end_byte].decode('UTF-8'), 'range': list(range((node.start_point[0] + 1), (node.end_point[0] + 2))), 'start_byte': node.start_byte, 'end_byte': node.end_byte, 'type': node.type}
if (comment_dict not in comments):
comments.append(comment_dict)
for child in node.children:
if (child.type in comment_node_name[lang]):
comment_dict = {'content': code[child.start_byte:child.end_byte].decode('UTF-8'), 'range': list(range((child.start_point[0] + 1), (child.end_point[0] + 2))), 'start_byte': child.start_byte, 'end_byte': child.end_byte, 'type': child.type}
if (comment_dict not in comments):
comments.append(comment_dict)
comments = extract_comments(child, code, comments, lang)
return comments |
def training_params(is_gcloud=False, output_dir=None):
if (not output_dir):
output_dir = util.construct_experiment_output_dir(__file__)
num_gpus = 1
stop_after = 7
dynamic_batch_size = {2: 128, 3: 128, 4: 64, 5: 32, 6: 16, 7: 6, 8: 3}
imgs_per_phase = 384000
dynamic_steps_per_phase = {phase: max((imgs_per_phase / batch_size), 6000) for (phase, batch_size) in dynamic_batch_size.items()}
dynamic_steps_per_phase[7] *= 2
return train.TrainingParams(description=DESCRIPTION, is_gcloud=is_gcloud, num_gpus=num_gpus, dataset_params=celeba_hq_dataset.get_dataset_params(is_gcloud=is_gcloud, crop_at_center=True), checkpoint_every_n_steps=None, checkpoint_every_n_secs=((2 * 60) * 60), dynamic_steps_per_phase=dynamic_steps_per_phase, dynamic_batch_size=dynamic_batch_size, stop_after=stop_after, eval_every_n_secs=((48 * 60) * 60), write_summaries_every_n_steps=700, infogan_summary_reps=0, output_dir=output_dir, allow_initial_partial_restore=True, noise_size=64, noise_stddev=1.0, summary_grid_size=3, infogan_cont_weight=10.0, infogan_cont_depth_to_num_vars={2: 16, 3: 16, 4: 16, 5: 16, 6: 16, 7: 0, 8: 0}, generator_params=networks.GeneratorParams(channels_at_4x4=2048, channels_max=480, optimizer=('adam_b0_b99', 0.0005), ema_decay_for_visualization=0.999, weight_norm='equalized', norm='batch_norm_in_place', norm_per_gpu=True, double_conv=True, conditioning=False, infogan_input_method='append_channels', append_channels_div=1), discriminator_params=networks.DiscriminatorParams(channels_at_2x2=4096, channels_max=512, conditioning=False, optimizer=('adam_b0_b99', 0.0005), weight_norm='equalized', norm=None, norm_per_gpu=True, double_conv=True, second_conv_channels_x2=True), use_gpu_tower_scope=True) |
def get_inceptionresnetv2(model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
net = InceptionResNetV2(**kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
class Completion(TypedDict):
id: str
object: Literal['text_completion']
created: int
model: str
choices: List[CompletionChoice]
usage: CompletionUsage |
def discount_path(path, h):
curr = 0
rets = []
for i in range(len(path)):
curr = ((curr * h) + path[((- 1) - i)])
rets.append(curr)
rets = ch.stack(list(reversed(rets)), 0)
return rets |
class Seq2SeqMoEModelOutput(ModelOutput):
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
decoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_router_logits: Optional[Tuple[torch.FloatTensor]] = None |
_model('fconv_lm')
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
def add_args(parser):
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-layers', type=str, metavar='EXPR', help='decoder layers [(dim, kernel_size), ...]')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--decoder-attention', type=str, metavar='EXPR', help='decoder attention [True, ...]')
def build_model(cls, args, task):
base_lm_architecture(args)
if (safe_hasattr(args, 'max_target_positions') and (not safe_hasattr(args, 'tokens_per_sample'))):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, convolutions=eval(args.decoder_layers), out_embed_dim=args.decoder_embed_dim, attention=eval(args.decoder_attention), dropout=args.dropout, max_positions=args.tokens_per_sample, share_embed=False, positional_embeddings=False, adaptive_softmax_cutoff=(utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) if (args.criterion == 'adaptive_loss') else None), adaptive_softmax_dropout=args.adaptive_softmax_dropout)
return FConvLanguageModel(decoder) |
def test1():
station1 = Node('Westminster')
station2 = Node('Waterloo', None, [station1])
station3 = Node('Trafalgar Square', None, [station1, station2])
station4 = Node('Canary Wharf', None, [station2, station3])
station5 = Node('London Bridge', None, [station4, station3])
station6 = Node('Tottenham Court Road', None, [station5, station4])
path_found = depth_first_search(station6, station1)
assert path_found |
def _check_file_path(path: Union[(str, Path)], model_dir: Path) -> Path:
if (path is None):
return None
p = (Path(path) if isinstance(path, str) else path)
if (not p.is_file()):
p = (model_dir / p.name)
assert p.is_file(), p
return p |
def mod2pi(x):
v = np.mod(x, np.copysign((2.0 * math.pi), x))
if (v < (- math.pi)):
v += (2.0 * math.pi)
elif (v > math.pi):
v -= (2.0 * math.pi)
return v |
def test_points_in_boxes_gpu():
if (not torch.cuda.is_available()):
pytest.skip('test requires GPU and torch+cuda')
boxes = torch.tensor([[[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.3]], [[(- 10.0), 23.0, 16.0, 10, 20, 20, 0.5]]], dtype=torch.float32).cuda()
pts = torch.tensor([[[1, 2, 3.3], [1.2, 2.5, 3.0], [0.8, 2.1, 3.5], [1.6, 2.6, 3.6], [0.8, 1.2, 3.9], [(- 9.2), 21.0, 18.2], [3.8, 7.9, 6.3], [4.7, 3.5, (- 12.2)]], [[3.8, 7.6, (- 2)], [(- 10.6), (- 12.9), (- 20)], [(- 16), (- 18), 9], [(- 21.3), (- 52), (- 5)], [0, 0, 0], [6, 7, 8], [(- 2), (- 3), (- 4)], [6, 4, 9]]], dtype=torch.float32).cuda()
point_indices = points_in_boxes_gpu(points=pts, boxes=boxes)
expected_point_indices = torch.tensor([[0, 0, 0, 0, 0, (- 1), (- 1), (- 1)], [(- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]], dtype=torch.int32).cuda()
assert (point_indices.shape == torch.Size([2, 8]))
assert (point_indices == expected_point_indices).all()
if (torch.cuda.device_count() > 1):
pts = pts.to('cuda:1')
boxes = boxes.to('cuda:1')
expected_point_indices = expected_point_indices.to('cuda:1')
point_indices = points_in_boxes_gpu(points=pts, boxes=boxes)
assert (point_indices.shape == torch.Size([2, 8]))
assert (point_indices == expected_point_indices).all() |
class TFNextSentencePredictorOutput(ModelOutput):
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None |
def weights_init_classifier(m):
classname = m.__class__.__name__
if (classname.find('Linear') != (- 1)):
init.normal_(m.weight.data, std=0.001)
init.constant_(m.bias.data, 0.0) |
class FangraphsMonth(EnumBase):
ALL = 0
MARCH_APRIL = 4
MARCH = MARCH_APRIL
APRIL = MARCH_APRIL
MAY = 5
JUNE = 6
JULY = 7
AUGUST = 8
SEPTEMBER_OCTOBER = 9
SEPTEMBER = SEPTEMBER_OCTOBER
OCTOBER = SEPTEMBER_OCTOBER |
class RecurrentCrossAttentionLayer(Module):
def __init__(self, attention, d_model, n_heads, d_keys=None, d_values=None, d_model_keys=None, event_dispatcher=''):
super(RecurrentCrossAttentionLayer, self).__init__()
d_keys = (d_keys or (d_model // n_heads))
d_values = (d_values or (d_model // n_heads))
d_model_keys = (d_model_keys or d_model)
self.inner_attention = attention
self.query_projection = Linear(d_model, (d_keys * n_heads))
self.key_projection = Linear(d_model_keys, (d_keys * n_heads))
self.value_projection = Linear(d_model_keys, (d_values * n_heads))
self.out_projection = Linear((d_values * n_heads), d_model)
self.n_heads = n_heads
self.event_dispatcher = EventDispatcher.get(event_dispatcher)
def forward(self, query, keys, values, key_lengths, state=None):
(N, _) = query.shape
H = self.n_heads
query = self.query_projection(query).view(N, H, (- 1))
if (state is None):
(_, S, _) = keys.shape
keys = self.key_projection(keys).view(N, S, H, (- 1))
values = self.value_projection(values).view(N, S, H, (- 1))
else:
keys = None
values = None
(new_value, state) = self.inner_attention(query, keys, values, key_lengths, state=state)
new_value = new_value.view(N, (- 1))
return (self.out_projection(new_value), state) |
def check_model_list():
models_dir = os.path.join(PATH_TO_DIFFUSERS, 'models')
_models = []
for model in os.listdir(models_dir):
model_dir = os.path.join(models_dir, model)
if (os.path.isdir(model_dir) and ('__init__.py' in os.listdir(model_dir))):
_models.append(model)
models = [model for model in dir(diffusers.models) if (not model.startswith('__'))]
missing_models = sorted(set(_models).difference(models))
if missing_models:
raise Exception(f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}.") |
def download_file_from_google_drive(id, destination):
URL = '
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination) |
class _MockDistribution():
def __init__(self, action):
self._action = action
def rsample_with_pre_tanh_value(self, **kwargs):
del kwargs
return (self._action, self._action)
def rsample(self, **kwargs):
del kwargs
return (self._action, self._action)
def log_prob(self, value, **kwargs):
del kwargs
del value
return torch.Tensor([10.0]) |
class MultiPatternInferenceEPL():
def __init__(self, numCores, numExcNeuronsPerCore, numInhNeuronsPerCore, inputBiases=None, gcInputBias=None, conn_prob=0.2, delayMCToGC=16, numMCToGCDelays=4, doOnlyInference=True, debug=False, log=True):
self.net = nx.NxNet()
self.numCores = numCores
self.numExcNeuronsPerCore = numExcNeuronsPerCore
self.numInhNeuronsPerCore = numInhNeuronsPerCore
self.inputBiases = inputBiases
self.gcInputBias = gcInputBias
self.conn_prob = conn_prob
self.numMCToGCDelays = numMCToGCDelays
self.delayMCToGC = delayMCToGC
self.stim2bias = [0, 34, 36, 38, 41, 43, 46, 50, 54, 59, 65, 72, 81, 92, 107, 129, 161, 214, 321, 641]
self.cycleDuration = 40
self.doOnlyInference = doOnlyInference
self.debug = debug
self.log = log
self.numStepsRan = 0
if (not self.debug):
self.setupNetwork()
def numENeurons(self):
return (self.numCores * self.numExcNeuronsPerCore)
def numENeuronsPerCore(self):
return self.numExcNeuronsPerCore
def numINeurons(self):
return (self.numCores * self.numInhNeuronsPerCore)
def numINeuronsPerCore(self):
return self.numInhNeuronsPerCore
def setupNetwork(self):
self.loadWeightsAndInputs()
self.createMCAndSTONetwork()
self.createMCToGCNetwork()
self.setupProbes()
def createMCAndSTONetwork(self):
self.createExcitatoryMCNeurons()
self.createSTONeurons()
self.connectSTONeuronsWithMCADNeurons()
def createMCToGCNetwork(self):
self.createInhibitoryGCNeurons()
self.connectInhibitoryGCToExcitatoryMCNeurons()
self.connectExcitatoryMCToInhibitoryGCNeurons()
def loadWeightsAndInputs(self):
dir_path = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(dir_path, '../../data/')
self.inhGCToExcMCWeights = np.load(os.path.join(data_dir, 'i2eWgtMat.npy'))
self.inhGCToExcMCDelays = np.load(os.path.join(data_dir, 'i2eDlyMat.npy'))
self.excMCToInhGCWeights = np.load(os.path.join(data_dir, 'e2iWgtMat.npy'))
if (not self.debug):
windTunnelDataFile = 'windTunnelData.pi'
rf = open(os.path.join(data_dir, windTunnelDataFile), 'rb')
self.trainingSet = pickle.load(rf)
self.testSet = pickle.load(rf)
rf.close()
def createInhibitoryGCNeurons(self):
self.allGCNeuronsGroup = self.net.createCompartmentGroup()
self.gcNeuronGrpPerCoreList = []
if (self.gcInputBias is None):
self.gcInputBias = 0
for coreIdx in range(self.numCores):
gcNeuronGrpPerCore = self.net.createCompartmentGroup()
gcNeuronProtoPerCore = nx.CompartmentPrototype(logicalCoreId=coreIdx, compartmentCurrentDecay=4095, compartmentVoltageDecay=4095, biasMant=(0 if (not self.debug) else self.gcInputBias), vThMant=((5 * 200) if (not self.debug) else (self.gcInputBias // 64)), refractoryDelay=25, vMinExp=0, numDendriticAccumulators=64, functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE, thresholdBehavior=nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET)
for i in range(self.numINeuronsPerCore):
gcCx = self.net.createCompartment(prototype=gcNeuronProtoPerCore)
gcNeuronGrpPerCore.addCompartments(gcCx)
self.allGCNeuronsGroup.addCompartments(gcCx)
self.gcNeuronGrpPerCoreList.append(gcNeuronGrpPerCore)
def connectInhibitoryGCToExcitatoryMCNeurons(self):
ConnGroup = namedtuple('ConnGroup', 'positive negative')
self.inh2ExcConnGroups = list()
for coreIdx in range(self.numCores):
if (not self.debug):
excWgts = self.inhGCToExcMCWeights[(0, coreIdx)]
excDlys = self.inhGCToExcMCDelays[(0, coreIdx)]
inhWgts = self.inhGCToExcMCWeights[(1, coreIdx)]
inhDlys = self.inhGCToExcMCDelays[(1, coreIdx)]
else:
wgts = self.inhGCToExcMCWeights
dlys = self.inhGCToExcMCDelays
excWgts = np.ones_like(wgts[(0, coreIdx)])
excDlys = (np.ones_like(dlys[(0, coreIdx)]) * 2)
inhWgts = (np.ones_like(wgts[(1, coreIdx)]) * (- 1))
inhDlys = (np.ones_like(dlys[(1, coreIdx)]) * 1)
excConnProtoBox = nx.ConnectionPrototype(numDelayBits=6, enableDelay=1, signMode=nx.SYNAPSE_SIGN_MODE.EXCITATORY, postSynResponseMode=nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX, compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE)
inhConnProtoBox = nx.ConnectionPrototype(numDelayBits=6, enableDelay=1, signMode=nx.SYNAPSE_SIGN_MODE.INHIBITORY, postSynResponseMode=nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX, compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE)
posConnGrp = self.net.createConnectionGroup(src=self.gcNeuronGrpPerCoreList[coreIdx], dst=self.mcNeuronGrpPerCoreList[coreIdx], prototype=excConnProtoBox, connectionMask=(excWgts > 0), weight=excWgts, delay=excDlys)
negConnGrp = self.net.createConnectionGroup(src=self.gcNeuronGrpPerCoreList[coreIdx], dst=self.mcNeuronGrpPerCoreList[coreIdx], prototype=inhConnProtoBox, connectionMask=(inhWgts < 0), weight=inhWgts, delay=inhDlys)
self.inh2ExcConnGroups.append(ConnGroup(positive=posConnGrp, negative=negConnGrp))
def connectExcitatoryMCToInhibitoryGCNeurons(self):
minDelay = self.delayMCToGC
numDelays = self.numMCToGCDelays
self.exc2InhConnGroups = list()
for delay in range(minDelay, (minDelay + numDelays)):
wgtMat = self.excMCToInhGCWeights[(delay - minDelay)]
connProtoE2I = nx.ConnectionPrototype(delay=(delay if (not self.debug) else 0), numDelayBits=6, enableDelay=1, signMode=nx.SYNAPSE_SIGN_MODE.EXCITATORY, compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE)
connGrp = self.net.createConnectionGroup(dst=self.allGCNeuronsGroup, src=self.allMCSomaNeuronsGrp, prototype=connProtoE2I, connectionMask=(wgtMat > 0), weight=wgtMat)
self.exc2InhConnGroups.append(connGrp)
def createExcitatoryMCNeurons(self):
if (self.inputBiases is None):
self.inputBiases = ([0] * self.numCores)
self.mcADNeuronGroup = self.net.createCompartmentGroup()
for coreIdx in range(self.numCores):
mcADProto = nx.CompartmentPrototype(logicalCoreId=coreIdx, compartmentCurrentDecay=0, vThMant=10, biasMant=self.inputBiases[coreIdx], refractoryDelay=20, vMinExp=0, numDendriticAccumulators=64, functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE, thresholdBehavior=nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET)
mcADCx = self.net.createCompartment(prototype=mcADProto)
self.mcADNeuronGroup.addCompartments(mcADCx)
self.allMCSomaNeuronsGrp = self.net.createCompartmentGroup()
self.mcNeuronGrpPerCoreList = []
for coreIdx in range(self.numCores):
mcSomaNeuronProto = nx.CompartmentPrototype(logicalCoreId=coreIdx, compartmentCurrentDecay=0, compartmentVoltageDecay=4095, vThMant=2, refractoryDelay=19, vMinExp=0, numDendriticAccumulators=64, functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE, thresholdBehavior=nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET)
mcNeuronGrpPerCore = self.net.createCompartmentGroup()
for _ in range(self.numENeuronsPerCore):
mcSomaNeuronCx = self.net.createCompartment(prototype=mcSomaNeuronProto)
self.allMCSomaNeuronsGrp.addCompartments(mcSomaNeuronCx)
mcNeuronGrpPerCore.addCompartments(mcSomaNeuronCx)
self.mcNeuronGrpPerCoreList.append(mcNeuronGrpPerCore)
mcADToSomaConnProtoBox = nx.ConnectionPrototype(weight=3, delay=19, numDelayBits=6, enableDelay=1, signMode=nx.SYNAPSE_SIGN_MODE.EXCITATORY, postSynResponseMode=nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX, compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE)
for coreIdx in range(self.numENeurons):
self.net._createConnection(src=self.mcADNeuronGroup[coreIdx], dst=self.allMCSomaNeuronsGrp[coreIdx], prototype=mcADToSomaConnProtoBox)
def createSTONeurons(self):
self.stoNeuronGroup = self.net.createCompartmentGroup()
for i in range(self.numENeurons):
stoNeuronProto = nx.CompartmentPrototype(logicalCoreId=i, compartmentCurrentDecay=4095, vThMant=39, biasMant=64, numDendriticAccumulators=64, vMinExp=0, functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE, thresholdBehavior=nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET)
stoNeuronCx = self.net.createCompartment(prototype=stoNeuronProto)
self.stoNeuronGroup.addCompartments(stoNeuronCx)
def connectSTONeuronsWithMCADNeurons(self, wgt=20):
connProtoBox = nx.ConnectionPrototype(weight=(- wgt), delay=20, numDelayBits=6, enableDelay=1, signMode=nx.SYNAPSE_SIGN_MODE.INHIBITORY, postSynResponseMode=nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX, compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE)
for coreIdx in range(self.numENeurons):
self.net._createConnection(src=self.stoNeuronGroup[coreIdx], dst=self.mcADNeuronGroup[coreIdx], prototype=connProtoBox)
for idx in range(self.numENeuronsPerCore):
self.net._createConnection(src=self.stoNeuronGroup[coreIdx], dst=self.mcNeuronGrpPerCoreList[coreIdx][idx], prototype=connProtoBox)
def applyInputs(self, inputList, thethaReset=False):
if (len(inputList) != self.numENeurons):
raise ValueError('Incorrect size of inputs list')
if (self.board is None):
raise ValueError("There's no board as the network is not compiled yet.")
for (mcIdx, inputVal) in enumerate(inputList):
cx = self.mcADNeuronGroup[mcIdx]
(_, chipId, coreId, cxId, _, _) = self.net.resourceMap.compartment(cx.nodeId)
n2Core = self.board.n2Chips[chipId].n2Cores[coreId]
n2Core.cxCfg[np.asscalar(cxId)].bias = self.stim2bias[inputVal]
n2Core.cxCfg.pushModified()
if thethaReset:
n2Core.cxState[np.asscalar(cxId)].v = 0
n2Core.cxState.pushModified()
def switchThetaState(self, state):
for mcIdx in range(self.numCores):
cx = self.allMCSomaNeuronsGrp[mcIdx]
(_, chipId, coreId, cxId, _, vthProfileCfgId1) = map((lambda x: int(x)), self.net.resourceMap.compartment(cx.nodeId))
n2Core = self.board.n2Chips[chipId].n2Cores[coreId]
vth = (2 if (state == 1) else 100)
n2Core.vthProfileCfg[vthProfileCfgId1].staticCfg.vth = vth
n2Core.vthProfileCfg.pushModified()
def sniff(self, inputList, numGammaCycles=5, numThetaCycles=1):
self.applyInputs(inputList)
numSteps = (numGammaCycles * self.cycleDuration)
board.run(numSteps)
self.applyInputs(([0] * self.numCores), thethaReset=True)
self.switchThetaState(state=0)
board.run(numSteps)
self.switchThetaState(state=1)
self.numStepsRan += (2 * numSteps)
def dumpSpikesOutputForPostProcessing(self, nGamma):
(_, spikeProbes, _) = self.mcSomaProbes
offset = (20 + 1)
gammaCode = []
for _ in range(nGamma):
gammaCode.append(([0] * 72))
for (i, spkProbe) in enumerate(spikeProbes):
data = spkProbe.data[offset:]
spikes1 = np.nonzero(data)[0]
for j in spikes1:
gammaCycle = (j // 40)
rank = (((gammaCycle * 40) + 21) - ((gammaCycle * 40) + (j % 40)))
gammaCode[gammaCycle][i] = rank
pickledfilename = 'spikes.pi'
wf = open(pickledfilename, 'wb')
pickle.dump(gammaCode, wf)
wf.close()
def setupProbes(self):
self.setupMCAndSTOProbes()
def setupMCAndSTOProbes(self):
probeParams = [nx.ProbeParameter.COMPARTMENT_VOLTAGE, nx.ProbeParameter.SPIKE, nx.ProbeParameter.COMPARTMENT_CURRENT]
self.mcADProbes = self.mcADNeuronGroup.probe(probeParams)
self.mcSomaProbes = self.allMCSomaNeuronsGrp.probe(probeParams)
self.stoProbes = self.stoNeuronGroup.probe(probeParams)
def getProbesForNeuronIdx(self, probes, idx):
(vProbes, spikeProbes, uProbes) = probes
return (vProbes[idx], spikeProbes[idx], uProbes[idx])
def plotSTOVsMCNeuronProbes(self, idx):
(vProbeE, spikeProbeE, uProbeE) = self.getProbesForNeuronIdx(self.mcSomaProbes, idx)
(vProbeSTO, spikeProbeSTO, uProbeSTO) = self.getProbesForNeuronIdx(self.stoProbes, idx)
plt.figure()
ax1 = plt.subplot(321)
vProbeE.plot()
plt.title('E-NEURON(V_PROBE)')
plt.subplot(323, sharex=ax1)
spikeProbeE.plot()
plt.title('E-NEURON(SPIKE_PROBE)')
plt.subplot(325, sharex=ax1)
uProbeE.plot()
plt.title('E-NEURON(U_PROBE)')
plt.subplot(322, sharex=ax1)
vProbeSTO.plot()
plt.title('STO-NEURON(V_PROBE)')
plt.subplot(324, sharex=ax1)
spikeProbeSTO.plot()
plt.title('STO-NEURON(SPIKE_PROBE)')
plt.subplot(326, sharex=ax1)
uProbeSTO.plot()
plt.title('E-NEURON(U_PROBE)')
def plotSpikeRaster(self, probes, offset=60):
(_, spikeProbes, _) = probes
plt.figure()
data = [np.nonzero(spkProbe.data[offset:])[0] for spkProbe in spikeProbes]
size = self.numENeurons
plt.eventplot(positions=data, colors=[(1, 0, 0)], lineoffsets=np.arange(size), linelengths=(np.ones(size) / 2.0))
plt.title('E-Neurons (Spike Raster Plot)')
plt.ylabel('# E-Neurons')
plt.xlabel('Time + {} timesteps'.format(offset))
plt.tight_layout()
def compileAndGetBoard(self):
self.board = nx.N2Compiler().compile(self.net)
return self.board |
def seresnet1001_svhn(num_classes=10, **kwargs):
return get_seresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name='seresnet1001_svhn', **kwargs) |
def horizon(params: dict, detector: Union[(Detector, Network)], target_SNR: int=9, waveform_model: str=WAVEFORM_MODEL, cosmology_model: cosmology.Cosmology=Planck18):
if (('redshift' in params) or ('luminosity_distance' in params)):
warnings.warn('The redshift and distance parameters will not be used in this function.')
if isinstance(detector, Detector):
snr_computer = compute_SNR
elif isinstance(detector, Network):
snr_computer = compute_SNR_network
def SNR_error(redshift):
distance = cosmology_model.luminosity_distance(redshift).value
mod_params = (params | {'redshift': redshift, 'luminosity_distance': distance})
with np.errstate(divide='ignore'):
return np.log((snr_computer(mod_params, detector, waveform_model) / target_SNR))
if (not (SNR_error(MIN_REDSHIFT) > 0)):
warnings.warn('The source is completely out of band')
return (0.0, 0.0)
(redshift, r) = brentq(SNR_error, MIN_REDSHIFT, MAX_REDSHIFT, full_output=True)
if (not r.converged):
raise ValueError('Horizon computation did not converge!')
distance = cosmology_model.luminosity_distance(redshift).value
return (distance, redshift) |
class KandinskyV22CombinedPipeline(DiffusionPipeline):
model_cpu_offload_seq = 'prior_text_encoder->prior_image_encoder->unet->movq'
_load_connected_pipes = True
def __init__(self, unet: UNet2DConditionModel, scheduler: DDPMScheduler, movq: VQModel, prior_prior: PriorTransformer, prior_image_encoder: CLIPVisionModelWithProjection, prior_text_encoder: CLIPTextModelWithProjection, prior_tokenizer: CLIPTokenizer, prior_scheduler: UnCLIPScheduler, prior_image_processor: CLIPImageProcessor):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler, movq=movq, prior_prior=prior_prior, prior_image_encoder=prior_image_encoder, prior_text_encoder=prior_text_encoder, prior_tokenizer=prior_tokenizer, prior_scheduler=prior_scheduler, prior_image_processor=prior_image_processor)
self.prior_pipe = KandinskyV22PriorPipeline(prior=prior_prior, image_encoder=prior_image_encoder, text_encoder=prior_text_encoder, tokenizer=prior_tokenizer, scheduler=prior_scheduler, image_processor=prior_image_processor)
self.decoder_pipe = KandinskyV22Pipeline(unet=unet, scheduler=scheduler, movq=movq)
def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable]=None):
self.decoder_pipe.enable_xformers_memory_efficient_attention(attention_op)
def enable_sequential_cpu_offload(self, gpu_id=0):
self.prior_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
self.decoder_pipe.enable_sequential_cpu_offload(gpu_id=gpu_id)
def progress_bar(self, iterable=None, total=None):
self.prior_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.progress_bar(iterable=iterable, total=total)
self.decoder_pipe.enable_model_cpu_offload()
def set_progress_bar_config(self, **kwargs):
self.prior_pipe.set_progress_bar_config(**kwargs)
self.decoder_pipe.set_progress_bar_config(**kwargs)
_grad()
_example_docstring(TEXT2IMAGE_EXAMPLE_DOC_STRING)
def __call__(self, prompt: Union[(str, List[str])], negative_prompt: Optional[Union[(str, List[str])]]=None, num_inference_steps: int=100, guidance_scale: float=4.0, num_images_per_prompt: int=1, height: int=512, width: int=512, prior_guidance_scale: float=4.0, prior_num_inference_steps: int=25, generator: Optional[Union[(torch.Generator, List[torch.Generator])]]=None, latents: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', callback: Optional[Callable[([int, int, torch.FloatTensor], None)]]=None, callback_steps: int=1, return_dict: bool=True, prior_callback_on_step_end: Optional[Callable[([int, int, Dict], None)]]=None, prior_callback_on_step_end_tensor_inputs: List[str]=['latents'], callback_on_step_end: Optional[Callable[([int, int, Dict], None)]]=None, callback_on_step_end_tensor_inputs: List[str]=['latents']):
prior_outputs = self.prior_pipe(prompt=prompt, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, num_inference_steps=prior_num_inference_steps, generator=generator, latents=latents, guidance_scale=prior_guidance_scale, output_type='pt', return_dict=False, callback_on_step_end=prior_callback_on_step_end, callback_on_step_end_tensor_inputs=prior_callback_on_step_end_tensor_inputs)
image_embeds = prior_outputs[0]
negative_image_embeds = prior_outputs[1]
prompt = ([prompt] if (not isinstance(prompt, (list, tuple))) else prompt)
if ((len(prompt) < image_embeds.shape[0]) and ((image_embeds.shape[0] % len(prompt)) == 0)):
prompt = ((image_embeds.shape[0] // len(prompt)) * prompt)
outputs = self.decoder_pipe(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=guidance_scale, output_type=output_type, callback=callback, callback_steps=callback_steps, return_dict=return_dict, callback_on_step_end=callback_on_step_end, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs)
self.maybe_free_model_hooks()
return outputs |
class ArithmeticSharedTensor(object):
def __init__(self, tensor=None, size=None, precision=None, src=0, device=None):
self.rep_share = None
if (src == SENTINEL):
return
assert (isinstance(src, int) and (src >= 0) and (src < comm.get().get_world_size())), 'invalid tensor source'
if ((device is None) and hasattr(tensor, 'device')):
device = tensor.device
self.encoder = FixedPointEncoder(precision_bits=precision)
if (tensor is not None):
if (is_int_tensor(tensor) and (precision != 0)):
tensor = tensor.float()
tensor = self.encoder.encode(tensor)
tensor = tensor.to(device=device)
size = tensor.size()
self.share = ArithmeticSharedTensor.PRZS(size, device=device).share
if (self.rank == src):
assert (tensor is not None), 'Source must provide a data tensor'
if hasattr(tensor, 'src'):
assert (tensor.src == src), 'Source of data tensor must match source of encryption'
self.share += tensor
def device(self):
return self._tensor.device
def is_cuda(self):
return self._tensor.is_cuda
def to(self, *args, **kwargs):
self._tensor = self._tensor.to(*args, **kwargs)
return self
def cuda(self, *args, **kwargs):
self._tensor = CUDALongTensor(self._tensor.cuda(*args, **kwargs))
return self
def cpu(self, *args, **kwargs):
self._tensor = self._tensor.cpu(*args, **kwargs)
return self
def share(self):
return self._tensor
def share(self, value):
self._tensor = value
def from_shares(share, precision=None, src=0, device=None):
result = ArithmeticSharedTensor(src=SENTINEL)
share = (share.to(device) if (device is not None) else share)
result.share = (CUDALongTensor(share) if share.is_cuda else share)
result.encoder = FixedPointEncoder(precision_bits=precision)
return result
def PRZS(*size, device=None):
tensor = ArithmeticSharedTensor(src=SENTINEL)
current_share = generate_random_ring_element(*size, generator=comm.get().get_generator(0, device=device), device=device)
next_share = generate_random_ring_element(*size, generator=comm.get().get_generator(1, device=device), device=device)
tensor.share = (current_share - next_share)
return tensor
def rank(self):
return comm.get().get_rank()
def shallow_copy(self):
result = ArithmeticSharedTensor(src=SENTINEL)
result.encoder = self.encoder
result.share = self.share
result.rep_share = self.rep_share
return result
def copy_(self, other):
self.share.copy_(other.share)
if (self.rep_share is not None):
self.rep_share.copy_(other.rep_share)
self.encoder = other.encoder
def __repr__(self):
return f'ArithmeticSharedTensor({self.share})'
def __bool__(self):
raise RuntimeError('Cannot evaluate ArithmeticSharedTensors to boolean values')
def __nonzero__(self):
raise RuntimeError('Cannot evaluate ArithmeticSharedTensors to boolean values')
def __setitem__(self, index, value):
if (isinstance(value, (int, float)) or is_tensor(value)):
value = ArithmeticSharedTensor(value)
assert isinstance(value, ArithmeticSharedTensor), ('Unsupported input type %s for __setitem__' % type(value))
self.share.__setitem__(index, value.share)
def pad(self, pad, mode='constant', value=0):
assert (mode == 'constant'), ('Padding with mode %s is currently unsupported' % mode)
result = self.shallow_copy()
if isinstance(value, (int, float)):
value = self.encoder.encode(value).item()
if (result.rank == 0):
result.share = torch.nn.functional.pad(result.share, pad, mode=mode, value=value)
else:
result.share = torch.nn.functional.pad(result.share, pad, mode=mode, value=0)
elif isinstance(value, ArithmeticSharedTensor):
assert (value.dim() == 0), 'Private values used for padding must be 0-dimensional'
value = value.share.item()
result.share = torch.nn.functional.pad(result.share, pad, mode=mode, value=value)
else:
raise TypeError(('Cannot pad ArithmeticSharedTensor with a %s value' % type(value)))
return result
def stack(tensors, *args, **kwargs):
for (i, tensor) in enumerate(tensors):
if is_tensor(tensor):
tensors[i] = ArithmeticSharedTensor(tensor)
assert isinstance(tensors[i], ArithmeticSharedTensor), ("Can't stack %s with ArithmeticSharedTensor" % type(tensor))
result = tensors[0].shallow_copy()
result.share = torch_stack([tensor.share for tensor in tensors], *args, **kwargs)
return result
def reveal_batch(tensor_or_list, dst=None):
if isinstance(tensor_or_list, ArithmeticSharedTensor):
return tensor_or_list.reveal(dst=dst)
assert isinstance(tensor_or_list, list), f'Invalid input type into reveal {type(tensor_or_list)}'
shares = [tensor.share for tensor in tensor_or_list]
if (dst is None):
return comm.get().all_reduce(shares, batched=True)
else:
return comm.get().reduce(shares, dst=dst, batched=True)
def reveal(self, dst=None):
tensor = self.share.clone()
if (dst is None):
return comm.get().all_reduce(tensor)
else:
return comm.get().reduce(tensor, dst=dst)
def get_plain_text(self, dst=None):
if (self.nelement() < 1):
return torch.empty(self.share.size())
return self.encoder.decode(self.reveal(dst=dst))
def _arithmetic_function_(self, y, op, *args, **kwargs):
return self._arithmetic_function(y, op, *args, inplace=True, **kwargs)
def _arithmetic_function(self, y, op, inplace=False, *args, **kwargs):
assert (op in ['add', 'sub', 'mul', 'matmul', 'conv1d', 'conv2d', 'conv_transpose1d', 'conv_transpose2d']), f'Provided op `{op}` is not a supported arithmetic function'
additive_func = (op in ['add', 'sub'])
public = (isinstance(y, (int, float)) or is_tensor(y))
private = isinstance(y, ArithmeticSharedTensor)
if inplace:
result = self
if (additive_func or ((op == 'mul') and public)):
op += '_'
else:
result = self.clone()
if public:
y = result.encoder.encode(y, device=self.device)
if additive_func:
if (result.rank == 0):
result.share = getattr(result.share, op)(y)
else:
result.share = torch.broadcast_tensors(result.share, y)[0]
elif (op == 'mul_'):
result.share = result.share.mul_(y)
else:
result.share = getattr(torch, op)(result.share, y, *args, **kwargs)
elif private:
if additive_func:
result.share = getattr(result.share, op)(y.share)
else:
assert (comm.get().get_world_size() == 3)
result.share.set_(getattr(resharing, op)(result, y, *args, **kwargs).share.data)
else:
raise TypeError(('Cannot %s %s with %s' % (op, type(y), type(self))))
if (not additive_func):
if public:
if (self.encoder.scale > 1):
if (comm.get().get_world_size() == 3):
result.share.set_(resharing.truncation(result, result.encoder.scale).share.data)
return result
return result.div_(result.encoder.scale)
else:
result.encoder = self.encoder
elif ((self.encoder.scale > 1) and (y.encoder.scale > 1)):
if (comm.get().get_world_size() == 3):
result.share.set_(resharing.truncation(result, result.encoder.scale).share.data)
return result
return result.div_(result.encoder.scale)
elif (self.encoder.scale > 1):
result.encoder = self.encoder
else:
result.encoder = y.encoder
return result
def add(self, y):
return self._arithmetic_function(y, 'add')
def add_(self, y):
return self._arithmetic_function_(y, 'add')
def sub(self, y):
return self._arithmetic_function(y, 'sub')
def sub_(self, y):
return self._arithmetic_function_(y, 'sub')
def mul(self, y):
if isinstance(y, int):
result = self.clone()
result.share = (self.share * y)
return result
return self._arithmetic_function(y, 'mul')
def mul_(self, y):
if (isinstance(y, int) or is_int_tensor(y)):
self.share *= y
return self
return self._arithmetic_function_(y, 'mul')
def div(self, y):
result = self.clone()
if isinstance(y, CrypTensor):
result.share = torch.broadcast_tensors(result.share, y.share)[0].clone()
elif is_tensor(y):
result.share = torch.broadcast_tensors(result.share, y)[0].clone()
return result.div_(y)
def div_(self, y):
if (isinstance(y, float) and (int(y) == y)):
y = int(y)
if (is_float_tensor(y) and y.frac().eq(0).all()):
y = y.long()
if (isinstance(y, int) or is_int_tensor(y)):
if (comm.get().get_world_size() > 2):
self.mul_((1 / y))
else:
self.share //= y
return self
if isinstance(y, float):
y = torch.tensor([y], dtype=torch.float, device=self.device)
assert is_float_tensor(y), ('Unsupported type for div_: %s' % type(y))
return self.mul_(y.reciprocal())
def matmul(self, y):
return self._arithmetic_function(y, 'matmul')
def prod(self, dim=None, keepdim=False):
if (dim is None):
return self.flatten().prod(dim=0)
result = self.clone()
while (result.size(dim) > 1):
size = result.size(dim)
(x, y, remainder) = result.split([(size // 2), (size // 2), (size % 2)], dim=dim)
result = x.mul_(y)
result.share = torch_cat([result.share, remainder.share], dim=dim)
if (not keepdim):
result.share = result.share.squeeze(dim)
return result
def mean(self, *args, **kwargs):
result = self.sum(*args, **kwargs)
if (self.dim() == 0):
return result
size = self.size()
if (len(args) > 0):
dims = ([args[0]] if isinstance(args[0], int) else args[0])
size = [size[dim] for dim in dims]
assert (len(size) > 0), 'cannot reduce over zero dimensions'
divisor = reduce((lambda x, y: (x * y)), size)
return result.div(divisor)
def var(self, *args, **kwargs):
if (len(args) > 0):
mean = self.mean(*args, **{'keepdim': True})
else:
mean = self.mean()
result = (self - mean).square().sum(*args, **kwargs)
size = self.size()
if (len(args) > 0):
dims = ([args[0]] if isinstance(args[0], int) else args[0])
size = [size[dim] for dim in dims]
assert (len(size) > 0), 'cannot reduce over zero dimensions'
divisor = reduce((lambda x, y: (x * y)), size)
return result.div(divisor)
def conv1d(self, kernel, **kwargs):
return self._arithmetic_function(kernel, 'conv1d', **kwargs)
def conv2d(self, kernel, **kwargs):
return self._arithmetic_function(kernel, 'conv2d', **kwargs)
def conv_transpose1d(self, kernel, **kwargs):
return self._arithmetic_function(kernel, 'conv_transpose1d', **kwargs)
def conv_transpose2d(self, kernel, **kwargs):
return self._arithmetic_function(kernel, 'conv_transpose2d', **kwargs)
def index_add(self, dim, index, tensor):
result = self.clone()
return result.index_add_(dim, index, tensor)
def index_add_(self, dim, index, tensor):
public = (isinstance(tensor, (int, float)) or is_tensor(tensor))
private = isinstance(tensor, ArithmeticSharedTensor)
if public:
enc_tensor = self.encoder.encode(tensor)
if (self.rank == 0):
self._tensor.index_add_(dim, index, enc_tensor)
elif private:
self._tensor.index_add_(dim, index, tensor._tensor)
else:
raise TypeError('index_add second tensor of unsupported type')
return self
def scatter_add(self, dim, index, other):
return self.clone().scatter_add_(dim, index, other)
def scatter_add_(self, dim, index, other):
public = (isinstance(other, (int, float)) or is_tensor(other))
private = isinstance(other, ArithmeticSharedTensor)
if public:
if (self.rank == 0):
self.share.scatter_add_(dim, index, self.encoder.encode(other))
elif private:
self.share.scatter_add_(dim, index, other.share)
else:
raise TypeError('scatter_add second tensor of unsupported type')
return self
def avg_pool2d(self, kernel_size, *args, **kwargs):
z = self.sum_pool2d(kernel_size, *args, **kwargs)
if isinstance(kernel_size, (int, float)):
pool_size = (kernel_size ** 2)
else:
pool_size = (kernel_size[0] * kernel_size[1])
return (z / pool_size)
def sum_pool2d(self, *args, **kwargs):
result = self.shallow_copy()
result.share = torch.nn.functional.avg_pool2d(self.share, *args, **kwargs, divisor_override=1)
return result
def take(self, index, dimension=None):
result = self.shallow_copy()
index = index.long()
if (dimension is None):
result.share = torch.take(self.share, index)
else:
all_indices = [slice(0, x) for x in self.size()]
all_indices[dimension] = index
result.share = self.share[all_indices]
return result
def neg_(self):
self.share.neg_()
return self
def neg(self):
return self.clone().neg_()
def square(self):
result = self.clone()
assert (comm.get().get_world_size() == 3)
result.share = resharing.square(self).div_(self.encoder.scale).share
return result
def dot(self, y, weights=None):
assert (self.size() == y.size()), 'Number of elements do not match'
if (weights is not None):
assert (weights.size() == self.size()), 'Incorrect number of weights'
result = (self * weights)
else:
result = self.clone()
return result.mul_(y).sum()
def ger(self, y):
assert ((self.dim() == 1) and (y.dim() == 1)), 'Outer product must be on 1D tensors'
return self.view(((- 1), 1)).matmul(y.view((1, (- 1))))
def where(self, condition, y):
if is_tensor(condition):
condition = condition.float()
y_masked = (y * (1 - condition))
else:
y_masked = ((1 - condition) * y)
return ((self * condition) + y_masked)
def scatter_(self, dim, index, src):
if is_tensor(src):
src = ArithmeticSharedTensor(src)
assert isinstance(src, ArithmeticSharedTensor), ('Unrecognized scatter src type: %s' % type(src))
self.share.scatter_(dim, index, src.share)
return self
def scatter(self, dim, index, src):
result = self.clone()
return result.scatter_(dim, index, src)
__add__ = add
__iadd__ = add_
__radd__ = __add__
__sub__ = sub
__isub__ = sub_
__mul__ = mul
__imul__ = mul_
__rmul__ = __mul__
__div__ = div
__truediv__ = div
__itruediv__ = div_
__neg__ = neg
def __rsub__(self, tensor):
return ((- self) + tensor) |
def get_offset(beta2, rho_inf):
if (not (beta2 > 0.6)):
raise ValueError('beta2 ({}) must be greater than 0.6'.format(beta2))
offset = 1
while True:
if (rho_fn(offset, beta2, rho_inf) > 4):
return offset
offset += 1 |
def load_testing(root_path, dir, batch_size, kwargs):
transform = transforms.Compose([transforms.Resize([224, 224]), transforms.ToTensor()])
data = datasets.ImageFolder(root=os.path.join(root_path, dir), transform=transform)
test_loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True, **kwargs)
return test_loader |
class FlattenParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FLATTENPARAMETER |
def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']]
model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_effnet(model_kwargs, variant, pretrained)
return model |
def load_data(loc):
df = pd.read_csv(loc, engine='python', encoding='utf-8')
df.fillna('')
df = np.asarray(df)
return df |
def conv3x3(in_planes: int, out_planes: int, stride: int=1, groups: int=1, dilation: int=1) -> HalutConv2d:
return HalutConv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, split_factor=4) |
class RFPoseDecode(nn.Module):
def __init__(self):
super(RFPoseDecode, self).__init__()
self.convt1 = nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=(3, 6, 6), stride=(1, 2, 2), padding=(1, 2, 2))
self.convt2 = nn.ConvTranspose3d(in_channels=64, out_channels=64, kernel_size=(3, 6, 6), stride=(1, 2, 2), padding=(1, 2, 2))
self.convt3 = nn.ConvTranspose3d(in_channels=64, out_channels=n_class, kernel_size=(3, 6, 6), stride=(1, 2, 2), padding=(1, 2, 2))
self.convt4 = nn.ConvTranspose3d(in_channels=64, out_channels=n_class, kernel_size=(3, 6, 6), stride=(1, 4, 4), padding=(1, 1, 1))
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
self.upsample = nn.Upsample(size=(rodnet_configs['win_size'], radar_configs['ramap_rsize'], radar_configs['ramap_asize']), mode='nearest')
def forward(self, x):
x = self.prelu(self.convt1(x))
x = self.prelu(self.convt2(x))
x = self.convt3(x)
x = self.upsample(x)
return x |
class SimulTransAgent(Agent):
def __init__(self, args):
self.load_model(args)
self.build_word_splitter(args)
self.max_len = args.max_len
self.eos = DEFAULT_EOS
def add_args(parser):
parser.add_argument('--model-path', type=str, required=True, help='path to your pretrained model.')
parser.add_argument('--data-bin', type=str, required=True, help='Path of data binary')
parser.add_argument('--user-dir', type=str, default='example/simultaneous_translation', help='User directory for simultaneous translation')
parser.add_argument('--src-splitter-type', type=str, default=None, help='Subword splitter type for source text')
parser.add_argument('--tgt-splitter-type', type=str, default=None, help='Subword splitter type for target text')
parser.add_argument('--src-splitter-path', type=str, default=None, help='Subword splitter model path for source text')
parser.add_argument('--tgt-splitter-path', type=str, default=None, help='Subword splitter model path for target text')
parser.add_argument('--max-len', type=int, default=150, help='Maximum length difference between source and target prediction')
parser.add_argument('--model-overrides', default='{}', type=str, metavar='DICT', help='A dictionary used to override model args at generation that were used during model training')
return parser
def load_dictionary(self, task):
raise NotImplementedError
def load_model(self, args):
args.user_dir = os.path.join(os.path.dirname(__file__), '..', '..')
utils.import_user_module(args)
filename = args.model_path
if (not os.path.exists(filename)):
raise IOError('Model file not found: {}'.format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename, json.loads(args.model_overrides))
saved_args = state['args']
saved_args.data = args.data_bin
task = tasks.setup_task(saved_args)
self.model = task.build_model(saved_args)
self.model.load_state_dict(state['model'], strict=True)
self.load_dictionary(task)
def init_states(self):
return {'indices': {'src': [], 'tgt': []}, 'tokens': {'src': [], 'tgt': []}, 'segments': {'src': [], 'tgt': []}, 'steps': {'src': 0, 'tgt': 0}, 'finished': False, 'finish_read': False, 'model_states': {}}
def update_states(self, states, new_state):
raise NotImplementedError
def policy(self, states):
action = None
while (action is None):
if states['finished']:
return self.finish_action()
decision = self.model.decision_from_states(states)
if ((decision == 0) and (not self.finish_read(states))):
action = self.read_action(states)
else:
action = self.write_action(states)
return action
def finish_read(self, states):
raise NotImplementedError
def write_action(self, states):
(token, index) = self.model.predict_from_states(states)
if ((index == self.dict['tgt'].eos()) or (len(states['tokens']['tgt']) > self.max_len)):
states['finished'] = True
end_idx_last_full_word = self._target_length(states)
else:
states['tokens']['tgt'] += [token]
end_idx_last_full_word = self.word_splitter['tgt'].end_idx_last_full_word(states['tokens']['tgt'])
self._append_indices(states, [index], 'tgt')
if (end_idx_last_full_word > states['steps']['tgt']):
word = self.word_splitter['tgt'].merge(states['tokens']['tgt'][states['steps']['tgt']:end_idx_last_full_word])
states['steps']['tgt'] = end_idx_last_full_word
states['segments']['tgt'] += [word]
return {'key': SEND, 'value': word}
else:
return None
def read_action(self, states):
return {'key': GET, 'value': None}
def finish_action(self):
return {'key': SEND, 'value': DEFAULT_EOS}
def reset(self):
pass
def finish_eval(self, states, new_state):
if ((len(new_state) == 0) and (len(states['indices']['src']) == 0)):
return True
return False
def _append_indices(self, states, new_indices, key):
states['indices'][key] += new_indices
def _target_length(self, states):
return len(states['tokens']['tgt']) |
def conv2d(x, y, **kwargs):
return __replicated_secret_sharing_protocol('conv2d', x, y, **kwargs) |
def to_bigdl_2d_padding(border_mode, *args):
if (border_mode == 'same'):
if (len(args) == 0):
return ((- 1), (- 1))
elif (len(args) == 4):
(h, kh, dh, dilation_h) = args
pad_h = __calculate_2d_same_padding(h, kh, dh, dilation_h)
return (pad_h, 0)
elif (len(args) == 8):
(h, kh, dh, dilation_h, w, kw, dw, dilation_w) = args
pad_h = __calculate_2d_same_padding(h, kh, dh, dilation_h)
pad_w = __calculate_2d_same_padding(w, kw, dw, dilation_w)
return (pad_h, pad_w)
elif (border_mode == 'valid'):
return (0, 0)
else:
invalidInputError(False, ('Unsupported border mode: %s' % border_mode)) |
def test_tell_fails_when_ask_tell_mismatch_dqd(scheduler_fixture):
(scheduler, *_) = scheduler_fixture
_ = scheduler.ask_dqd()
with pytest.raises(RuntimeError):
scheduler.tell(None, None) |
def parse_bookshelf_pl(pl, node_dict):
with open(pl, 'r') as f:
lines = [l for l in (line.strip() for line in f) if l]
lines_iter = iter(lines[1:])
for l in lines_iter:
if l.startswith('#'):
continue
tokens = l.split()
assert (len(tokens) > 3)
(name, x, y) = (tokens[0], int(float(tokens[1])), int(float(tokens[2])))
n = node_dict[name]
(n.llx, n.lly) = (x, y) |
def adjacency(graph, directed=False, reversed=False, stochastic=False, heuristic=None):
if ((graph._adjacency is not None) and (graph._adjacency[1:] == (directed, reversed, stochastic, (heuristic and heuristic.__code__)))):
return graph._adjacency[0]
map = {}
for n in graph.nodes:
map[n.id] = {}
for e in graph.edges:
(id1, id2) = (((not reversed) and (e.node1.id, e.node2.id)) or (e.node2.id, e.node1.id))
map[id1][id2] = (1.0 - (0.5 * e.weight))
if heuristic:
map[id1][id2] += heuristic(id1, id2)
if (not directed):
map[id2][id1] = map[id1][id2]
if stochastic:
for id1 in map:
n = sum(map[id1].values())
for id2 in map[id1]:
map[id1][id2] /= n
graph._adjacency = (map, directed, reversed, stochastic, (heuristic and heuristic.__code__))
return map |
def display_tree_mnist(embeddings, true_labels=None, transparency=None, legend_labels=None, numeric_labels=True, distinct=False):
dotsize = 10
if (transparency is None):
if (true_labels is None):
transparency = 0.05
else:
transparency = (300 / float(len(true_labels)))
if (distinct or (not numeric_labels)):
colordict = {}
num_colors = len(legend_labels)
for i in range(num_colors):
colordict[legend_labels[i]] = getColor('viridis', num_colors, i, distinct=True)
plt.figure()
embeddings = embeddings.reshape(embeddings.shape[1], (- 1))
for (i, embedding) in enumerate(embeddings):
if ((true_labels is not None) and numeric_labels and (not distinct)):
plt.scatter(embedding, (np.ones(embedding.shape[0]) * i), alpha=transparency, c=true_labels, s=dotsize)
elif ((true_labels is not None) and (distinct or (not numeric_labels))):
plt.scatter(embedding, (np.ones(embedding.shape[0]) * i), alpha=transparency, c=[colordict[x] for x in true_labels], s=dotsize)
else:
plt.scatter(embedding, (np.ones(embedding.shape[0]) * i), alpha=transparency, s=dotsize)
if (true_labels is not None):
if (legend_labels is not None):
legend_elems = []
num_colors = len(legend_labels)
for i in range(num_colors):
label = legend_labels[i]
if (distinct or (not numeric_labels)):
color = getColor('viridis', num_colors, i, distinct=True)
else:
color = getColor('viridis', num_colors, i)
legend_elems.append(Line2D([0], [0], marker='o', alpha=1, color='w', markerfacecolor=color, label=label))
legend = plt.legend(handles=legend_elems)
else:
color_bar = plt.colorbar()
color_bar.set_alpha(1)
color_bar.draw_all()
plt.show() |
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Sequential(nn.Linear(in_features=100, out_features=256), nn.LeakyReLU())
self.layer2 = nn.Sequential(nn.Linear(in_features=256, out_features=512), nn.LeakyReLU())
self.layer3 = nn.Sequential(nn.Linear(in_features=512, out_features=1024), nn.LeakyReLU())
self.output = nn.Sequential(nn.Linear(in_features=1024, out_features=(28 * 28)), nn.Tanh())
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.output(x)
return x |
def _test():
import torch
in_size = (480, 480)
aux = False
pretrained = False
models = [(pspnet_resnetd50b_voc, 21), (pspnet_resnetd101b_voc, 21), (pspnet_resnetd50b_coco, 21), (pspnet_resnetd101b_coco, 21), (pspnet_resnetd50b_ade20k, 150), (pspnet_resnetd101b_ade20k, 150), (pspnet_resnetd50b_cityscapes, 19), (pspnet_resnetd101b_cityscapes, 19)]
for (model, num_classes) in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
if aux:
assert ((model != pspnet_resnetd50b_voc) or (weight_count == ))
assert ((model != pspnet_resnetd101b_voc) or (weight_count == ))
assert ((model != pspnet_resnetd50b_coco) or (weight_count == ))
assert ((model != pspnet_resnetd101b_coco) or (weight_count == ))
assert ((model != pspnet_resnetd50b_ade20k) or (weight_count == ))
assert ((model != pspnet_resnetd101b_ade20k) or (weight_count == ))
assert ((model != pspnet_resnetd50b_cityscapes) or (weight_count == ))
assert ((model != pspnet_resnetd101b_cityscapes) or (weight_count == ))
else:
assert ((model != pspnet_resnetd50b_voc) or (weight_count == ))
assert ((model != pspnet_resnetd101b_voc) or (weight_count == ))
assert ((model != pspnet_resnetd50b_coco) or (weight_count == ))
assert ((model != pspnet_resnetd101b_coco) or (weight_count == ))
assert ((model != pspnet_resnetd50b_ade20k) or (weight_count == ))
assert ((model != pspnet_resnetd101b_ade20k) or (weight_count == ))
assert ((model != pspnet_resnetd50b_cityscapes) or (weight_count == ))
assert ((model != pspnet_resnetd101b_cityscapes) or (weight_count == ))
x = torch.randn(1, 3, in_size[0], in_size[1])
ys = net(x)
y = (ys[0] if aux else ys)
y.sum().backward()
assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and (y.size(3) == x.size(3))) |
def graph_keys_dict():
graph_keys_dict_raw = dict(tf.GraphKeys.__dict__)
graph_keys_dict_raw['AUX_LOSS'] = 'aux_loss'
graph_keys_dict_clean = dict()
for (k, v) in graph_keys_dict_raw.items():
if (not (k.startswith('__') and k.endswith('__'))):
if isinstance(v, str):
graph_keys_dict_clean[k] = v
return graph_keys_dict_clean |
((not torch.cuda.is_available()), 'Skip cpu ut, only run on gpu.')
((torch_version() < (2, 0, 0)), 'AtorchTrainer need torch2.0 .')
class AtorchTrainerTest(unittest.TestCase):
def test_atorch_trainer(self):
world_size = 4
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(find_free_port())
mp.spawn(run_atorch_trainer, args=(world_size,), nprocs=world_size, join=True)
os.environ['MASTER_ADDR'] = ''
os.environ['MASTER_PORT'] = '' |
def ProcessAppendDescriptor(segment, parent_node_name, affix, edge_attributes=None):
dot_graph = []
names = []
desc_name = 'Append_{0}'.format(affix)
for i in range(len(segment['sub_segments'])):
sub_segment = segment['sub_segments'][i]
part_name = '{0}{1}{2}'.format(desc_name, sub_segment['name'], i)
names.append('<{0}> part {1}'.format(GetDotNodeName(part_name)['node'], i))
dot_graph += DescriptorSegmentToDot(sub_segment, '{0}:{1}'.format(desc_name, part_name), desc_name)
part_index = len(segment['sub_segments'])
for i in range(len(segment['arguments'])):
part_name = '{0}{1}{2}'.format(desc_name, segment['arguments'][i], (part_index + i))
names.append('<{0}> part {1}'.format(GetDotNodeName(part_name)['node'], (part_index + i)))
dot_graph.append('{0} -> {1}:{2}'.format(GetDotNodeName(segment['arguments'][i])['node'], GetDotNodeName(desc_name)['node'], GetDotNodeName(part_name)['node']))
label = '|'.join(names)
label = (('{{' + label) + '}|Append}')
dot_graph.append('{0} [shape=Mrecord, label="{1}"];'.format(GetDotNodeName(desc_name)['node'], label))
attr_string = ''
if (edge_attributes is not None):
if ('label' in edge_attributes):
attr_string += ' label={0} '.format(edge_attributes['label'])
if ('style' in edge_attributes):
attr_string += ' style={0} '.format(edge_attributes['style'])
dot_string = '{0} -> {1} [tailport=s]'.format(GetDotNodeName(desc_name)['node'], GetDotNodeName(parent_node_name)['node'])
if (attr_string != ''):
dot_string += ' [{0}] '.format(attr_string)
dot_graph.append(dot_string)
return dot_graph |
class TFLibrary(Library):
def __init__(self, output_dir, diff_bound=1e-05, time_bound=10, time_thresold=0.001) -> None:
super().__init__(output_dir)
self.diff_bound = diff_bound
self.time_bound = time_bound
self.time_thresold = time_thresold
def test_with_oracle(self, api: TFAPI, oracle: OracleType):
if (oracle == OracleType.CRASH):
code = 'import tensorflow as tf\n'
code += self.generate_code(api, oracle)
with open(join(self.directory, 'temp.py'), 'w') as f:
f.write(code)
(results, error) = self.run_code(code)
if (error == None):
self.write_to_dir(join(self.output[oracle], 'success'), api.api, code)
else:
self.write_to_dir(join(self.output[oracle], 'fail'), api.api, code)
elif (oracle == OracleType.CUDA):
code = 'import tensorflow as tf\n'
code += self.generate_code(api, oracle)
write_code = (('results = dict()\n' + code) + '\nprint(results)\n')
with open(join(self.directory, 'temp.py'), 'w') as f:
f.write(write_code)
(results, error) = self.run_code(code)
err_cpu = results[ERR_CPU_KEY]
err_gpu = results[ERR_GPU_KEY]
write_dir = ''
if (error is None):
if ((err_cpu is None) != (err_gpu is None)):
write_dir = join(self.output[oracle], 'potential-bug')
elif (err_cpu == None):
res_cpu = results[RES_CPU_KEY]
res_gpu = results[RES_GPU_KEY]
if self.is_equal(res_cpu, res_gpu):
write_dir = join(self.output[oracle], 'success')
else:
write_dir = join(self.output[oracle], 'potential-bug')
elif (('SystemError' in err_cpu) or ('SystemError' in err_gpu)):
write_dir = join(self.output[oracle], 'potential-bug')
else:
write_dir = join(self.output[oracle], 'success')
elif ('SystemError' in error):
write_dir = join(self.output[oracle], 'potential-bug')
else:
write_dir = join(self.output[oracle], 'fail')
self.write_to_dir(write_dir, api.api, write_code)
elif (oracle == OracleType.PRECISION):
code = 'import tensorflow as tf\n'
code += 'import time\n'
code += self.generate_code(api, oracle)
write_code = (('results = dict()\n' + code) + '\nprint(results)\n')
with open(join(self.directory, 'temp.py'), 'w') as f:
f.write(write_code)
(results, error) = self.run_code(code)
err_high = results[ERR_HIGH_KEY]
err_low = results[ERR_LOW_KEY]
write_dir = ''
if (error is None):
if ((err_high is None) != (err_low is None)):
write_dir = join(self.output[oracle], 'potential-bug')
elif (err_high == None):
time_high = results[TIME_HIGH_KEY]
time_low = results[TIME_LOW_KEY]
if ((time_low >= (self.time_bound * time_high)) and (time_high >= self.time_thresold)):
write_dir = join(self.output[oracle], 'potential-bug')
else:
write_dir = join(self.output[oracle], 'success')
elif (('SystemError' in err_high) or ('SystemError' in err_low)):
write_dir = join(self.output[oracle], 'potential-bug')
else:
write_dir = join(self.output[oracle], 'success')
elif ('SystemError' in error):
write_dir = join(self.output[oracle], 'potential-bug')
else:
write_dir = join(self.output[oracle], 'fail')
self.write_to_dir(write_dir, api.api, write_code)
def generate_code(api: TFAPI, oracle: OracleType) -> str:
code = ''
if (oracle == OracleType.CRASH):
code += api.to_code_oracle(oracle=oracle)
return code
elif (oracle == OracleType.CUDA):
code += api.to_code_oracle(oracle=oracle)
return code
elif (oracle == OracleType.PRECISION):
code += api.to_code_oracle(oracle=oracle)
return code
else:
assert 0
def run_code(code):
results = dict()
results[ERR_CPU_KEY] = None
results[ERR_GPU_KEY] = None
results[ERR_HIGH_KEY] = None
results[ERR_LOW_KEY] = None
exec(code)
error = (results[ERROR_KEY] if (ERROR_KEY in results) else None)
return (results, error)
def get_type(x):
res = Argument.get_type(x)
if (res != None):
return res
if isinstance(x, tf.Tensor):
return ArgType.TF_TENSOR
elif isinstance(x, tf.DType):
return ArgType.TF_DTYPE
else:
return ArgType.TF_OBJECT
def _eval_k(x):
return tf.convert_to_tensor(x).numpy()
def get_tensor_value(t):
if isinstance(t, tf.SparseTensor):
return tf.sparse.to_dense(t).numpy()
else:
return t.numpy()
def is_equal(x, y):
x_type = TFArgument.get_type(x)
y_type = TFArgument.get_type(y)
if (x_type != y_type):
return False
if (x_type == ArgType.KERAS_TENSOR):
return tf.math.equal(x, y)
if (x_type == ArgType.TF_TENSOR):
try:
if (isinstance(x, tf.RaggedTensor) != isinstance(y, tf.RaggedTensor)):
return False
if isinstance(x, tf.RaggedTensor):
s = tf.math.equal(x, y)
return s.flat_values.numpy().all()
np_x = TFLibrary.get_tensor_value(x)
np_y = TFLibrary.get_tensor_value(y)
if x.dtype.is_floating:
return tf.experimental.numpy.allclose(np_x, np_y, rtol=0.001, atol=0.0001)
elif x.dtype.is_integer:
return np.equal(np_x, np_y).all()
except:
raise ValueError(f'Comparison between {type(x)} is not supported now.')
return True
elif (x_type == ArgType.FLOAT):
return (abs((x - y)) < 1e-05)
elif (x_type in [ArgType.LIST, ArgType.TUPLE]):
if (len(x) != len(y)):
return False
for i in range(len(x)):
if (TFLibrary.is_equal(x[i], y[i]) == False):
return False
return True
else:
try:
flag = (x == y)
except:
return True
if isinstance(flag, np.ndarray):
flag = flag.all()
try:
if flag:
pass
except:
flag = True
return flag |
class TestCommunication(unittest.TestCase):
def setUp(self) -> None:
self.queue = MessageQueue()
def test_request(self) -> None:
method = 'GET'
operation = '/api/a/b/x'
data = self._get_random_dict()
request = Request(method, operation, data)
self.assertEqual(method, request.method)
self.assertEqual(operation, request.operation)
self.assertEqual(data, request.data)
def test_response(self) -> None:
response = Response()
self.assertEqual({}, response.data)
self.assertEqual({}, response.command)
def test_create_simple_response(self) -> None:
data = self._get_random_dict()
response = create_simple_response(data)
self.assertEqual(data, response.data)
self.assertEqual({}, response.command)
def test_message(self) -> None:
status = 'Test status'
subject = 'Test subject'
data = self._get_random_dict()
message = Message(status, subject, data)
self.assertEqual(status, message.status)
self.assertEqual(subject, message.subject)
self.assertEqual(data, message.data)
def test_message_queue_post_failure(self) -> None:
data = self._get_random_dict()
self.queue.post_failure('subject', data)
self._assert_message('failure', 'subject', data)
def test_message_queue_post_success(self) -> None:
data = self._get_random_dict()
self.queue.post_success('subject', data)
self._assert_message('success', 'subject', data)
def test_message_queue_post_error(self) -> None:
data = self._get_random_dict()
self.queue.post_error('subject', data)
self._assert_message('error', 'subject', data)
def _get_random_dict(self, size: int=5) -> dict:
from numpy.random import randint
return {('key ' + str(i)): randint(65536) for i in range(size)}
def _assert_message(self, expected_status: str, expected_subject: str, expected_data: dict) -> None:
message = self.queue.get()
self.assertEqual(expected_status, message.status)
self.assertEqual(expected_subject, message.subject)
self.assertEqual(expected_data, message.data) |
class SLSTM(SpikingNeuron):
def __init__(self, input_size, hidden_size, bias=True, threshold=1.0, spike_grad=None, surrogate_disable=False, init_hidden=False, inhibition=False, learn_threshold=False, reset_mechanism='none', state_quant=False, output=False):
super().__init__(threshold, spike_grad, surrogate_disable, init_hidden, inhibition, learn_threshold, reset_mechanism, state_quant, output)
if self.init_hidden:
(self.syn, self.mem) = self.init_slstm()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.lstm_cell = nn.LSTMCell(self.input_size, self.hidden_size, bias=self.bias)
def forward(self, input_, syn=False, mem=False):
if (hasattr(mem, 'init_flag') or hasattr(syn, 'init_flag')):
(syn, mem) = _SpikeTorchConv(syn, mem, input_=self._reshape_input(input_))
elif ((mem is False) and hasattr(self.mem, 'init_flag')):
(self.syn, self.mem) = _SpikeTorchConv(self.syn, self.mem, input_=self._reshape_input(input_))
if (not self.init_hidden):
self.reset = self.mem_reset(mem)
(syn, mem) = self._build_state_function(input_, syn, mem)
if self.state_quant:
syn = self.state_quant(syn)
mem = self.state_quant(mem)
spk = self.fire(mem)
return (spk, syn, mem)
if self.init_hidden:
self.reset = self.mem_reset(self.mem)
(self.syn, self.mem) = self._build_state_function_hidden(input_)
if self.state_quant:
self.syn = self.state_quant(self.syn)
self.mem = self.state_quant(self.mem)
self.spk = self.fire(self.mem)
if self.output:
return (self.spk, self.syn, self.mem)
else:
return self.spk
def _base_state_function(self, input_, syn, mem):
(base_fn_mem, base_fn_syn) = self.lstm_cell(input_, (mem, syn))
return (base_fn_syn, base_fn_mem)
def _base_state_reset_zero(self, input_, syn, mem):
(base_fn_mem, _) = self.lstm_cell(input_, (mem, syn))
return (0, base_fn_mem)
def _build_state_function(self, input_, syn, mem):
if (self.reset_mechanism_val == 0):
state_fn = tuple(map((lambda x, y: (x - y)), self._base_state_function(input_, syn, mem), (0, (self.reset * self.threshold))))
elif (self.reset_mechanism_val == 1):
state_fn = tuple(map((lambda x, y: (x - (self.reset * y))), self._base_state_function(input_, syn, mem), self._base_state_reset_zero(input_, syn, mem)))
elif (self.reset_mechanism_val == 2):
state_fn = self._base_state_function(input_, syn, mem)
return state_fn
def _base_state_function_hidden(self, input_):
(base_fn_mem, base_fn_syn) = self.lstm_cell(input_, (self.mem, self.syn))
return (base_fn_syn, base_fn_mem)
def _base_state_reset_zero_hidden(self, input_):
(base_fn_mem, _) = self.lstm_cell(input_, (self.mem, self.syn))
return (0, base_fn_mem)
def _build_state_function_hidden(self, input_):
if (self.reset_mechanism_val == 0):
state_fn = tuple(map((lambda x, y: (x - y)), self._base_state_function_hidden(input_), (0, (self.reset * self.threshold))))
elif (self.reset_mechanism_val == 1):
state_fn = tuple(map((lambda x, y: (x - (self.reset * y))), self._base_state_function_hidden(input_), self._base_state_reset_zero_hidden(input_)))
elif (self.reset_mechanism_val == 2):
state_fn = self._base_state_function_hidden(input_)
return state_fn
def _reshape_input(self, input_):
device = input_.device
(b, _) = input_.size()
return torch.zeros(b, self.hidden_size).to(device)
def init_slstm():
mem = _SpikeTensor(init_flag=False)
syn = _SpikeTensor(init_flag=False)
return (mem, syn)
def detach_hidden(cls):
for layer in range(len(cls.instances)):
if isinstance(cls.instances[layer], SLSTM):
cls.instances[layer].syn.detach_()
cls.instances[layer].mem.detach_()
def reset_hidden(cls):
for layer in range(len(cls.instances)):
if isinstance(cls.instances[layer], SLSTM):
cls.instances[layer].syn = _SpikeTensor(init_flag=False)
cls.instances[layer].mem = _SpikeTensor(init_flag=False) |
def _make_beit_backbone(model, features=[96, 192, 384, 768], size=[384, 384], hooks=[0, 4, 8, 11], vit_features=768, use_readout='ignore', start_index=1, start_index_readout=1):
backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, start_index_readout)
backbone.model.patch_embed.forward = types.MethodType(patch_embed_forward, backbone.model.patch_embed)
backbone.model.forward_features = types.MethodType(beit_forward_features, backbone.model)
for block in backbone.model.blocks:
attn = block.attn
attn._get_rel_pos_bias = types.MethodType(_get_rel_pos_bias, attn)
attn.forward = types.MethodType(attention_forward, attn)
attn.relative_position_indices = {}
block.forward = types.MethodType(block_forward, block)
return backbone |
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
original_type = type(original)
replacement_type = type(replacement)
if (replacement_type == original_type):
return replacement
def conditional_cast(from_type, to_type):
if ((replacement_type == from_type) and (original_type == to_type)):
return (True, to_type(replacement))
else:
return (False, None)
casts = [(tuple, list), (list, tuple)]
try:
casts.append((str, unicode))
except Exception:
pass
for (from_type, to_type) in casts:
(converted, converted_value) = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(original_type, replacement_type, original, replacement, full_key)) |
def tabulate(rows: List[List[Union[(str, int)]]], headers: List[str]) -> str:
col_widths = [max((len(str(x)) for x in col)) for col in zip(*rows, headers)]
row_format = ('{{:{}}} ' * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*[('-' * w) for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return '\n'.join(lines) |
def adjacency_matrix(senders, receivers, dim):
one_hot_senders = tf.one_hot(senders, dim)
one_hot_receivers = tf.one_hot(receivers, dim)
adj_mat = tf.einsum('ki,kj->ij', one_hot_senders, one_hot_receivers)
return adj_mat |
def plot_anomalies_value(y_true, y_pred, pattern_ano_index, trend_ano_index):
df = pd.DataFrame({'y_true': y_true.squeeze(), 'y_pred': y_pred.squeeze()})
df['p_ano_index'] = 0
df.loc[(df.index[pattern_ano_index], 'ano_index')] = 1
df['t_ano_index'] = 0
df.loc[(df.index[trend_ano_index], 'ano_index')] = 1
(fig, axs) = plt.subplots(figsize=(16, 6))
axs.plot(df.index, df.y_true, color='blue', label='Ground Truth')
axs.plot(df.index, df.y_pred, color='orange', label='Prediction')
axs.scatter(df.index[pattern_ano_index].tolist(), df.y_true[pattern_ano_index], color='red', label='pattern anomaly values')
axs.scatter(df.index[trend_ano_index].tolist(), df.y_true[trend_ano_index], color='green', label='trend anomaly values')
axs.set_title('The Anomaly Values')
plt.xlabel('time_step')
plt.legend(loc='upper left')
plt.show() |
_legacy_interface(weights=('pretrained', EfficientNet_B4_Weights.IMAGENET1K_V1))
def efficientnet_b4(*, weights: Optional[EfficientNet_B4_Weights]=None, progress: bool=True, **kwargs: Any) -> EfficientNet:
weights = EfficientNet_B4_Weights.verify(weights)
(inverted_residual_setting, last_channel) = _efficientnet_conf('efficientnet_b4', width_mult=1.4, depth_mult=1.8)
return _efficientnet(inverted_residual_setting, 0.4, last_channel, weights, progress, **kwargs) |
def build_model(cfg):
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
model = revert_sync_batchnorm(model)
model = MMDataParallel(model)
return model |
def read_metadata(path):
ids = []
with open(path) as f:
for (i, line) in enumerate(f):
groups = line.strip().split()
ids.append(' '.join(groups[:4]))
return ids |
class KarrasDiffusionSchedulers(Enum):
DDIMScheduler = 1
DDPMScheduler = 2
PNDMScheduler = 3
LMSDiscreteScheduler = 4
EulerDiscreteScheduler = 5
HeunDiscreteScheduler = 6
EulerAncestralDiscreteScheduler = 7
DPMSolverMultistepScheduler = 8
DPMSolverSinglestepScheduler = 9
KDPM2DiscreteScheduler = 10
KDPM2AncestralDiscreteScheduler = 11
DEISMultistepScheduler = 12
UniPCMultistepScheduler = 13
DPMSolverSDEScheduler = 14 |
def get_batch_size(inputs):
if isinstance(inputs, (list, tuple)):
return get_batch_size(inputs[0])
return inputs.size()[0] |
class Attn_Net(nn.Module):
def __init__(self, L=1024, D=256, dropout=False, n_classes=1):
super().__init__()
self.module = [nn.Linear(L, D), nn.Tanh()]
if dropout:
self.module.append(nn.Dropout(0.25))
self.module.append(nn.Linear(D, n_classes))
self.module = nn.Sequential(*self.module)
def forward(self, x):
return (self.module(x), x) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.