code stringlengths 101 5.91M |
|---|
def bsinv(P, F, K, t, o='call'):
w = 1
if (o == 'put'):
w = (- 1)
elif (o == 'otm'):
w = ((2 * (K > 1.0)) - 1)
P = np.maximum(P, np.maximum((w * (F - K)), 0))
def error(s):
return (bs(F, K, ((s ** 2) * t), o) - P)
s = brentq(error, 1e-09, .0)
return s |
def get_config(name):
stream = open(name, 'r')
config_dict = yaml.safe_load(stream)
return Config(config_dict) |
def get_extensions():
this_dir = path.dirname(path.abspath(__file__))
extensions_dir = path.join(this_dir, 'ape', 'layers', 'csrc')
main_source = path.join(extensions_dir, 'vision.cpp')
sources = glob.glob(path.join(extensions_dir, '**', '*.cpp'))
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = (True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False)
if is_rocm_pytorch:
assert (torch_ver >= [1, 8]), 'ROCM support requires PyTorch >= 1.8!'
source_cuda = (glob.glob(path.join(extensions_dir, '**', '*.cu')) + glob.glob(path.join(extensions_dir, '*.cu')))
sources = ([main_source] + sources)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if ((torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or (os.getenv('FORCE_CUDA', '0') == '1')):
extension = CUDAExtension
sources += source_cuda
if (not is_rocm_pytorch):
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-O3', '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
else:
define_macros += [('WITH_HIP', None)]
extra_compile_args['nvcc'] = []
nvcc_flags_env = os.getenv('NVCC_FLAGS', '')
if (nvcc_flags_env != ''):
extra_compile_args['nvcc'].extend(nvcc_flags_env.split(' '))
if (torch_ver < [1, 7]):
CC = os.environ.get('CC', None)
if (CC is not None):
extra_compile_args['nvcc'].append('-ccbin={}'.format(CC))
include_dirs = [extensions_dir]
ext_modules = [extension('ape._C', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)]
return ext_modules |
def conversation_example_without_text(url):
text = ''
chat_id = start_chat(url, text)['chat_id']
message_with_print(url, chat_id, 'Hello dear friend!')
message_with_print(url, chat_id, 'How are you?')
message_with_print(url, chat_id, 'What is your name?')
try:
message_with_print(url, chat_id, 'What was discovered in 1985?')
except json.decoder.JSONDecodeError as e:
print(e)
message_with_print(url, chat_id, 'You dont know?')
message_with_print(url, chat_id, 'I have to go. Bye!')
end_chat(url, chat_id) |
class ResBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockGenerator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.xavier_uniform_(self.conv1.weight.data, 1.0)
nn.init.xavier_uniform_(self.conv2.weight.data, 1.0)
self.model = nn.Sequential(nn.BatchNorm2d(in_channels), nn.ReLU(), nn.Upsample(scale_factor=2), self.conv1, nn.BatchNorm2d(out_channels), nn.ReLU(), self.conv2)
self.bypass = nn.Sequential()
if (stride != 1):
self.bypass = nn.Upsample(scale_factor=2)
def forward(self, x):
return (self.model(x) + self.bypass(x)) |
class AdamOptimizer(object):
def __init__(self, config, loss, params):
self._lr = get_shared_floatX(config.learning_rate, 'lr')
self._t = get_shared_floatX(1, 't')
self._all_m_tm1 = []
self._all_v_tm1 = []
self._updates = [(self._t, (self._t + 1))]
if config.lr_decay:
lr_coef = tt.pow(config.lr_decay, ((self._t - 1) // config.lr_decay_freq))
self._updates.append((self._lr, (lr_coef * config.learning_rate)))
grads = theano.grad(loss, params)
self._global_grad_norm = tt.sqrt(tt.sum(tt.stack([tt.sum((g ** 2.0)) for g in grads])))
if config.max_grad_norm:
global_clip_factor = ifelse(tt.lt(self._global_grad_norm, config.max_grad_norm), cast_floatX_np(1.0), cast_floatX((config.max_grad_norm / self._global_grad_norm)))
grads = [(global_clip_factor * g) for g in grads]
lr_t = ((self._lr * clip_sqrt((1 - tt.pow(config.adam_beta2, self._t)))) / (1 - tt.pow(config.adam_beta1, self._t)))
for (p, g) in zip(params, grads):
m_tm1 = get_shared_floatX(np.zeros_like(p.get_value()), ('adam_m_' + p.name))
v_tm1 = get_shared_floatX(np.zeros_like(p.get_value()), ('adam_v_' + p.name))
self._all_m_tm1.append(m_tm1)
self._all_v_tm1.append(v_tm1)
m_t = ((config.adam_beta1 * m_tm1) + ((1 - config.adam_beta1) * g))
v_t = ((config.adam_beta2 * v_tm1) + ((1 - config.adam_beta2) * tt.sqr(g)))
delta_t = (((- lr_t) * m_t) / (clip_sqrt(v_t) + config.adam_eps))
p_t = (p + delta_t)
self._updates += [(m_tm1, m_t), (v_tm1, v_t), (p, p_t)]
def get_updates(self):
return self._updates
def get_global_grad_norm(self):
return self._global_grad_norm
def get_lr_value(self):
return self._lr.get_value() |
def register_Ns3AodvRreqHeader_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_constructor([param('ns3::aodv::RreqHeader const &', 'arg0')])
cls.add_constructor([param('uint8_t', 'flags', default_value='0'), param('uint8_t', 'reserved', default_value='0'), param('uint8_t', 'hopCount', default_value='0'), param('uint32_t', 'requestID', default_value='0'), param('ns3::Ipv4Address', 'dst', default_value='ns3::Ipv4Address()'), param('uint32_t', 'dstSeqNo', default_value='0'), param('ns3::Ipv4Address', 'origin', default_value='ns3::Ipv4Address()'), param('uint32_t', 'originSeqNo', default_value='0')])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetDestinationOnly', 'bool', [], is_const=True)
cls.add_method('GetDst', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetDstSeqno', 'uint32_t', [], is_const=True)
cls.add_method('GetGratuitousRrep', 'bool', [], is_const=True)
cls.add_method('GetHopCount', 'uint8_t', [], is_const=True)
cls.add_method('GetId', 'uint32_t', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetOrigin', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetOriginSeqno', 'uint32_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetUnknownSeqno', 'bool', [], is_const=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetDestinationOnly', 'void', [param('bool', 'f')])
cls.add_method('SetDst', 'void', [param('ns3::Ipv4Address', 'a')])
cls.add_method('SetDstSeqno', 'void', [param('uint32_t', 's')])
cls.add_method('SetGratuitousRrep', 'void', [param('bool', 'f')])
cls.add_method('SetHopCount', 'void', [param('uint8_t', 'count')])
cls.add_method('SetId', 'void', [param('uint32_t', 'id')])
cls.add_method('SetOrigin', 'void', [param('ns3::Ipv4Address', 'a')])
cls.add_method('SetOriginSeqno', 'void', [param('uint32_t', 's')])
cls.add_method('SetUnknownSeqno', 'void', [param('bool', 'f')])
return |
def _apply_through_arrow(operation, /, *args, generate_bitmasks=False, expect_option_type=False, string_to32=False, bytestring_to32=False, ensure_empty_mask=False, **kwargs):
from awkward._backends.dispatch import backend_of
from awkward.contents.content import Content
from awkward.operations.ak_from_arrow import from_arrow
from awkward.operations.ak_to_arrow import to_arrow
from awkward._backends.typetracer import TypeTracerBackend
backend = backend_of(*args)
typetracer = TypeTracerBackend.instance()
if (backend is typetracer):
converted_args = [(to_arrow(x.form.length_zero_array(highlevel=False), extensionarray=False, string_to32=string_to32, bytestring_to32=bytestring_to32) if isinstance(x, Content) else x) for x in args]
out = from_arrow(operation(*converted_args, **kwargs), generate_bitmasks=generate_bitmasks, highlevel=False).to_typetracer(forget_length=True)
else:
converted_args = [(to_arrow(x, extensionarray=False, string_to32=string_to32, bytestring_to32=bytestring_to32) if isinstance(x, Content) else x) for x in args]
out = from_arrow(operation(*converted_args, **kwargs), generate_bitmasks=generate_bitmasks, highlevel=False)
if expect_option_type:
return out
else:
return _drop_option_preserving_form(out, ensure_empty_mask=ensure_empty_mask) |
def load_combined_labels(output_path: str):
return (torch.cat((load_flat_labels_tensors(join(output_path, 'vectors', 'train', 'params_train_dps_y_all.npy')), load_flat_labels_tensors(join(output_path, 'vectors', 'train', 'ret_train_dps_y_all.npy')), load_flat_labels_tensors(join(output_path, 'vectors', 'train', 'var_train_dps_y_all.npy')))), torch.cat((load_flat_labels_tensors(join(output_path, 'vectors', 'valid', 'params_valid_dps_y_all.npy')), load_flat_labels_tensors(join(output_path, 'vectors', 'valid', 'ret_valid_dps_y_all.npy')), load_flat_labels_tensors(join(output_path, 'vectors', 'valid', 'var_valid_dps_y_all.npy')))), torch.cat((load_flat_labels_tensors(join(output_path, 'vectors', 'test', 'params_test_dps_y_all.npy')), load_flat_labels_tensors(join(output_path, 'vectors', 'test', 'ret_test_dps_y_all.npy')), load_flat_labels_tensors(join(output_path, 'vectors', 'test', 'var_test_dps_y_all.npy'))))) |
def gen_appended_dataset(seed: int, dataset: str, version: str, params: Dict[(str, Any)], overwrite: bool) -> None:
random.seed(seed)
np.random.seed(seed)
update_type = params.get('type')
batch_ratio = params.get('batch_ratio')
L.info(f'Start generating appended data for {dataset}/{version}')
if (update_type == 'ind'):
(_, rand_version) = get_random_data(dataset, version, overwrite=overwrite)
append_data(dataset, version, rand_version, interval=batch_ratio)
elif (update_type == 'cor'):
(_, sort_version) = get_sorted_data(dataset, version, overwrite=overwrite)
append_data(dataset, version, sort_version, interval=batch_ratio)
elif (update_type == 'skew'):
(_, skew_version) = get_skew_data(dataset, version, sample_ratio=float(params['skew_size']), overwrite=overwrite)
append_data(dataset, version, skew_version, interval=batch_ratio)
else:
raise NotImplementedError
L.info('Finish updating data!') |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
print(f'''Config:
{cfg.pretty_text}''')
cfg.dump('example.py')
if args.graph:
model = init_segmentor(args.config, device='cpu')
print(f'''Model graph:
{str(model)}''')
with open('example-graph.txt', 'w') as f:
f.writelines(str(model)) |
def get_value(dic, keys_chain):
res = dic
for key in keys_chain:
res = res[key]
return res |
class Trainer(object):
def __init__(self, opts):
super(Trainer, self).__init__()
self.opts = opts
self.best_acc = 0
self.start_epoch = 0
self.max_bsz_cnn_gpu0 = opts.max_bsz_cnn_gpu0
self.resume = (self.opts.checkpoint if ((self.opts.checkpoint is not None) and os.path.isdir(self.opts.checkpoint)) else None)
self.global_setter()
def global_setter(self):
self.setup_device()
self.setup_directories()
self.setup_logger()
self.setup_lr_scheduler()
self.setup_dataloader()
self.setup_model_optimizer_lossfn()
def setup_directories(self):
if (not os.path.isdir(self.opts.savedir)):
os.makedirs(self.opts.savedir)
def setup_device(self):
num_gpus = torch.cuda.device_count()
self.num_gpus = num_gpus
if (num_gpus > 0):
print_log_message('Using {} GPUs'.format(num_gpus))
else:
print_log_message('Using CPU')
self.device = torch.device(('cuda:0' if (num_gpus > 0) else 'cpu'))
self.use_multi_gpu = (True if (num_gpus > 1) else False)
if torch.backends.cudnn.is_available():
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
cudnn.deterministic = True
def setup_logger(self):
try:
from torch.utils.tensorboard import SummaryWriter
except:
from utilities.summary_writer import SummaryWriter
self.logger = SummaryWriter(log_dir=self.opts.savedir, comment='Training and Validation logs')
def setup_lr_scheduler(self):
self.lr_scheduler = get_lr_scheduler(self.opts)
def setup_dataloader(self):
from model.base_feature_extractor import BaseFeatureExtractor
base_feature_extractor = BaseFeatureExtractor(opts=self.opts)
base_feature_extractor = base_feature_extractor.to(device=self.device)
if self.use_multi_gpu:
base_feature_extractor = torch.nn.DataParallel(base_feature_extractor)
self.base_feature_extractor = base_feature_extractor
self.base_feature_extractor.eval()
if self.base_feature_extractor.training:
print_warning_message('Base feature extractor is in training mode. Moving to evaluation mode')
self.base_feature_extractor.eval()
(train_loader, val_loader, diag_classes, class_weights) = get_data_loader(opts=self.opts)
self.train_loader = train_loader
self.val_loader = val_loader
self.diag_classes = diag_classes
self.class_weights = torch.from_numpy(class_weights)
def setup_model_optimizer_lossfn(self):
odim = (self.base_feature_extractor.module.output_feature_sz if self.use_multi_gpu else self.base_feature_extractor.output_feature_sz)
mi_model = build_model(opts=self.opts, diag_classes=self.diag_classes, base_feature_odim=odim)
if (self.resume is not None):
(resume_ep, resume_model_state, resume_optim_state, resume_perf) = load_checkpoint(checkpoint_dir=self.opts.checkpoint, device=self.device)
self.start_epoch = resume_ep
self.best_acc = resume_perf
self.mi_model.load_state_dict(resume_model_state)
self.optimizer.load_state_dict(resume_optim_state)
for state in self.optimizer.state.values():
for (k, v) in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=self.device)
print_log_message('Resuming from checkpoint saved at {}th epoch'.format(self.start_epoch))
mi_model = mi_model.to(device=self.device)
if self.use_multi_gpu:
mi_model = torch.nn.DataParallel(mi_model)
self.mi_model = mi_model
criteria = build_criteria(opts=self.opts, class_weights=self.class_weights.float())
self.criteria = criteria.to(device=self.device)
self.optimizer = build_optimizer(model=self.mi_model, opts=self.opts)
def training(self, epoch, lr, *args, **kwargs):
train_stats = Statistics()
self.mi_model.train()
self.optimizer.zero_grad()
num_samples = len(self.train_loader)
epoch_start_time = time.time()
for (batch_id, batch) in enumerate(self.train_loader):
(words, true_diag_labels) = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(words=words, cnn_model=self.base_feature_extractor, mi_model=self.mi_model, max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0, num_gpus=self.num_gpus, device=self.device)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
if ((((batch_id + 1) % self.opts.accum_count) == 0) or ((batch_id + 1) == len(self.train_loader))):
self.optimizer.step()
self.optimizer.zero_grad()
train_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if (((batch_id % self.opts.log_interval) == 0) and (batch_id > 0)):
train_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
return (train_stats.avg_acc(), train_stats.avg_loss())
def warm_up(self, *args, **kwargs):
self.mi_model.train()
num_samples = len(self.train_loader)
warm_up_iterations = int((math.ceil(((self.opts.warm_up_iterations * 1.0) / num_samples)) * num_samples))
print_info_message('Warming Up')
print_log_message('LR will linearly change from {} to {} in about {} steps'.format(self.opts.warm_up_min_lr, self.opts.lr, warm_up_iterations))
lr_list = np.linspace(1e-07, self.opts.lr, warm_up_iterations)
epoch_start_time = time.time()
iteration = (- 1)
while (iteration < warm_up_iterations):
warm_up_stats = Statistics()
for (batch_id, batch) in enumerate(self.train_loader):
if (iteration >= warm_up_iterations):
break
iteration += 1
try:
lr_iter = lr_list[iteration]
except:
lr_iter = self.opts.lr
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=lr_iter)
(words, true_diag_labels) = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(words=words, cnn_model=self.base_feature_extractor, mi_model=self.mi_model, max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0, num_gpus=self.num_gpus, device=self.device)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
loss.backward()
if ((((batch_id + 1) % self.opts.accum_count) == 0) or ((batch_id + 1) == len(self.train_loader))):
self.optimizer.step()
self.optimizer.zero_grad()
warm_up_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if (((batch_id % self.opts.log_interval) == 0) and (batch_id > 0)):
warm_up_stats.output(epoch=(- 1), batch=iteration, n_batches=warm_up_iterations, start=epoch_start_time, lr=lr_iter)
gc.collect()
print_log_message('Warming Up... Done!!!')
def validation(self, epoch, lr, *args, **kwargs):
val_stats = Statistics()
self.mi_model.eval()
num_samples = len(self.val_loader)
with torch.no_grad():
epoch_start_time = time.time()
for (batch_id, batch) in enumerate(self.val_loader):
(words, true_diag_labels) = batch
true_diag_labels = true_diag_labels.to(device=self.device)
pred_diag_labels = prediction(words=words, cnn_model=self.base_feature_extractor, mi_model=self.mi_model, max_bsz_cnn_gpu0=self.max_bsz_cnn_gpu0, num_gpus=self.num_gpus, device=self.device)
loss = self.criteria(pred_diag_labels, true_diag_labels)
top1_acc = accuracy(pred_diag_labels, true_diag_labels, topk=(1,))
val_stats.update(loss=loss.item(), acc=top1_acc[0].item())
if (((batch_id % self.opts.log_interval) == 0) and (batch_id > 0)):
val_stats.output(epoch=epoch, batch=batch_id, n_batches=num_samples, start=epoch_start_time, lr=lr)
gc.collect()
avg_acc = val_stats.avg_acc()
avg_loss = val_stats.avg_loss()
print_log_message('* Validation Stats')
print_log_message('* Loss: {:5.2f}, Mean Acc: {:3.2f}'.format(avg_loss, avg_acc))
return (avg_acc, avg_loss)
def run(self, *args, **kwargs):
kwargs['need_attn'] = False
if self.opts.warm_up:
self.warm_up(args=args, kwargs=kwargs)
if (self.resume is not None):
for epoch in range(self.start_epoch):
self.lr_scheduler.step(epoch)
eval_stats_dict = dict()
for epoch in range(self.start_epoch, self.opts.epochs):
epoch_lr = self.lr_scheduler.step(epoch)
self.optimizer = update_optimizer(optimizer=self.optimizer, lr_value=epoch_lr)
(train_acc, train_loss) = self.training(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
(val_acc, val_loss) = self.validation(epoch=epoch, lr=epoch_lr, args=args, kwargs=kwargs)
eval_stats_dict[epoch] = val_acc
gc.collect()
is_best = (val_acc >= self.best_acc)
self.best_acc = max(val_acc, self.best_acc)
model_state = (self.mi_model.module.state_dict() if isinstance(self.mi_model, torch.nn.DataParallel) else self.mi_model.state_dict())
optimizer_state = self.optimizer.state_dict()
save_checkpoint(epoch=epoch, model_state=model_state, optimizer_state=optimizer_state, best_perf=self.best_acc, save_dir=self.opts.savedir, is_best=is_best, keep_best_k_models=self.opts.keep_best_k_models)
self.logger.add_scalar('LR', round(epoch_lr, 6), epoch)
self.logger.add_scalar('TrainingLoss', train_loss, epoch)
self.logger.add_scalar('TrainingAcc', train_acc, epoch)
self.logger.add_scalar('ValidationLoss', val_loss, epoch)
self.logger.add_scalar('ValidationAcc', val_acc, epoch)
eval_stats_dict_sort = {k: v for (k, v) in sorted(eval_stats_dict.items(), key=(lambda item: item[1]), reverse=True)}
eval_stats_fname = '{}/val_stats_bag_{}_word_{}_{}_{}'.format(self.opts.savedir, self.opts.bag_size, self.opts.word_size, self.opts.attn_fn, self.opts.attn_type)
writer = DictWriter(file_name=eval_stats_fname, format='json')
if (not os.path.isfile(eval_stats_fname)):
writer.write(data_dict=eval_stats_dict_sort)
else:
with open(eval_stats_fname, 'r') as json_file:
eval_stats_dict_old = json.load(json_file)
eval_stats_dict_old.update(eval_stats_dict_sort)
eval_stats_dict_updated = {k: v for (k, v) in sorted(eval_stats_dict_old.items(), key=(lambda item: item[1]), reverse=True)}
writer.write(data_dict=eval_stats_dict_updated)
self.logger.close() |
def snowball_sample(G, num_waves, seeds):
assert (len(seeds) == len(set(seeds)))
zonedict = dict()
N = snap.ConvertGraph(snap.PNEANet, G)
nodes = set(seeds)
for seed in seeds:
zonedict[seed] = 0
newNodes = set(nodes)
for i in range(num_waves):
wave = (i + 1)
for node in set(newNodes):
neighbours = snap.TIntV()
snap.GetNodesAtHop(G, node, 1, neighbours, False)
newNeighbours = (set(neighbours) - nodes)
for node in newNeighbours:
if (not zonedict.has_key(node)):
zonedict[node] = wave
newNodes.update(newNeighbours)
nodes.update(newNodes)
NodeVec = snap.TIntV()
for node in nodes:
NodeVec.Add(node)
sampleN = snap.GetSubGraph(N, NodeVec)
sampleN.AddIntAttrN('zone', (- 1))
for (nodeid, zone) in zonedict.iteritems():
sampleN.AddIntAttrDatN(nodeid, zone, 'zone')
return sampleN |
def create_RepVGG_A2(last_stride, norm_type):
return RepVGG(last_stride, norm_type, num_blocks=[2, 4, 14, 1], width_multiplier=[1.5, 1.5, 1.5, 2.75], override_groups_map=None) |
def register_Ns3EpcX2Tag_methods(root_module, cls):
cls.add_constructor([param('ns3::EpcX2Tag const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::Time', 'senderTimestamp')])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSenderTimestamp', 'ns3::Time', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
cls.add_method('SetSenderTimestamp', 'void', [param('ns3::Time', 'senderTimestamp')])
return |
def ConvertSubGraph_PNEANet_PNEANet(InGraph, NIdV, RenumberNodes=False):
return _snap.ConvertSubGraph_PNEANet_PNEANet(InGraph, NIdV, RenumberNodes) |
class BeitForMaskedImageModeling(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class WorkFlowConfig(Config):
data_loader_config: object = None
open_set_data_loader_config: object = None
preprocessor_config: object = None
log_parser_config: object = None
log_vectorizer_config: object = None
partitioner_config: object = None
open_set_partitioner_config: object = None
categorical_encoder_config: object = None
feature_extractor_config: object = None
anomaly_detection_config: object = None
nn_anomaly_detection_config: object = None
clustering_config: object = None
workflow_config: object = None
def from_dict(cls, config_dict):
config = super(WorkFlowConfig, cls).from_dict(config_dict)
if config.data_loader_config:
config.data_loader_config = DataLoaderConfig.from_dict(config.data_loader_config)
if config.open_set_data_loader_config:
config.open_set_data_loader_config = OpenSetDataLoaderConfig.from_dict(config.open_set_data_loader_config)
if config.preprocessor_config:
config.preprocessor_config = PreprocessorConfig.from_dict(config.preprocessor_config)
if config.partitioner_config:
config.partitioner_config = PartitionerConfig.from_dict(config.partitioner_config)
if config.open_set_partitioner_config:
config.open_set_partitioner_config = OpenSetPartitionerConfig.from_dict(config.open_set_partitioner_config)
if config.log_parser_config:
config.log_parser_config = LogParserConfig.from_dict(config.log_parser_config)
if config.log_vectorizer_config:
config.log_vectorizer_config = VectorizerConfig.from_dict(config.log_vectorizer_config)
if config.feature_extractor_config:
config.feature_extractor_config = FeatureExtractorConfig.from_dict(config.feature_extractor_config)
if config.categorical_encoder_config:
config.categorical_encoder_config = CategoricalEncoderConfig.from_dict(config.categorical_encoder_config)
if config.anomaly_detection_config:
config.anomaly_detection_config = AnomalyDetectionConfig.from_dict(config.anomaly_detection_config)
if config.nn_anomaly_detection_config:
config.nn_anomaly_detection_config = NNAnomalyDetectionConfig.from_dict(config.nn_anomaly_detection_config)
if config.clustering_config:
config.clustering_config = ClusteringConfig.from_dict(config.clustering_config)
return config |
def main(args):
distributed.init_process_group(backend='nccl', init_method='env://')
(device_id, device) = (args.local_rank, torch.device(args.local_rank))
(rank, world_size) = (distributed.get_rank(), distributed.get_world_size())
torch.cuda.set_device(device_id)
config = make_config(args)
test_dataloader = make_dataloader(args, config, rank, world_size)
model = make_model(config)
log_debug('Loading snapshot from %s', args.model)
snapshot = resume_from_snapshot(model, args.model, ['body', 'rpn_head', 'roi_head', 'sem_head'])
torch.backends.cudnn.benchmark = config['general'].getboolean('cudnn_benchmark')
model = DistributedDataParallel(model.cuda(device), device_ids=[device_id], output_device=device_id, find_unused_parameters=True)
save_function = partial(save_prediction_image, out_dir=args.out_dir)
evaluate(model, test_dataloader, device=device, summary=None, log_interval=config['general'].getint('log_interval'), save_function=save_function) |
def main():
p = argparse.ArgumentParser(usage=__doc__.lstrip())
p.add_argument('--project', default='scipy/scipy')
p.add_argument('milestone')
args = p.parse_args()
getter = CachedGet('gh_cache.json', GithubGet())
try:
milestones = get_milestones(getter, args.project)
if (args.milestone not in milestones):
msg = 'Milestone {0} not available. Available milestones: {1}'
msg = msg.format(args.milestone, ', '.join(sorted(milestones)))
p.error(msg)
issues = get_issues(getter, args.project, args.milestone)
issues.sort()
finally:
getter.save()
prs = [x for x in issues if ('/pull/' in x.url)]
issues = [x for x in issues if (x not in prs)]
def print_list(title, items):
print()
print(title)
print(('-' * len(title)))
print()
for issue in items:
msg = '* `#{0} <{1}>`__: {2}'
title = re.sub('\\s+', ' ', issue.title.strip())
title = title.replace('`', '\\`').replace('*', '\\*')
if (len(title) > 60):
remainder = re.sub('\\s.*$', '...', title[60:])
if (len(remainder) > 20):
remainder = (title[:80] + '...')
else:
title = (title[:60] + remainder)
msg = msg.format(issue.id, issue.url, title)
print(msg)
print()
msg = f'Issues closed for {args.milestone}'
print_list(msg, issues)
msg = f'Pull requests for {args.milestone}'
print_list(msg, prs)
return 0 |
def resize(input, size=None, scale_factor=None, mode='nearest', align_corners=None, warning=True):
if warning:
if ((size is not None) and align_corners):
(input_h, input_w) = tuple((int(x) for x in input.shape[2:]))
(output_h, output_w) = tuple((int(x) for x in size))
if ((output_h > input_h) or (output_w > output_h)):
if (((output_h > 1) and (output_w > 1) and (input_h > 1) and (input_w > 1)) and ((output_h - 1) % (input_h - 1)) and ((output_w - 1) % (input_w - 1))):
warnings.warn(f'When align_corners={align_corners}, the output would more aligned if input size {(input_h, input_w)} is `x+1` and out size {(output_h, output_w)} is `nx+1`')
return F.interpolate(input, size, scale_factor, mode, align_corners) |
def number_of_divisors(n):
m = ZZ(n)
if m.is_zero():
raise ValueError('input must be nonzero')
from sage.libs.pari.all import pari
return ZZ(pari(m).numdiv()) |
def register_types_ns3_Config(module):
root_module = module.get_root()
module.add_class('MatchContainer', import_from_module='ns.core')
module.add_container('std::vector< ns3::Ptr< ns3::Object > >', 'ns3::Ptr< ns3::Object >', container_type=u'vector')
module.add_container('std::vector< std::string >', 'std::string', container_type=u'vector') |
class RecurrentCategorical(Distribution):
def __init__(self, dim):
self._cat = Categorical(dim)
self._dim = dim
def dim(self):
return self._dim
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
return TT.sum((old_prob_var * (TT.log((old_prob_var + TINY)) - TT.log((new_prob_var + TINY)))), axis=2)
def kl(self, old_dist_info, new_dist_info):
old_prob = old_dist_info['prob']
new_prob = new_dist_info['prob']
return np.sum((old_prob * (np.log((old_prob + TINY)) - np.log((new_prob + TINY)))), axis=2)
def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):
old_prob_var = old_dist_info_vars['prob']
new_prob_var = new_dist_info_vars['prob']
a_dim = x_var.shape[(- 1)]
flat_ratios = self._cat.likelihood_ratio_sym(x_var.reshape(((- 1), a_dim)), dict(prob=old_prob_var.reshape(((- 1), a_dim))), dict(prob=new_prob_var.reshape(((- 1), a_dim))))
return flat_ratios.reshape(old_prob_var.shape[:2])
def entropy(self, dist_info):
probs = dist_info['prob']
return (- np.sum((probs * np.log((probs + TINY))), axis=2))
def log_likelihood_sym(self, xs, dist_info_vars):
probs = dist_info_vars['prob']
a_dim = probs.shape[(- 1)]
flat_logli = self._cat.log_likelihood_sym(xs.reshape(((- 1), a_dim)), dict(prob=probs.reshape(((- 1), a_dim))))
return flat_logli.reshape(probs.shape[:2])
def log_likelihood(self, xs, dist_info):
probs = dist_info['prob']
a_dim = probs.shape[(- 1)]
flat_logli = self._cat.log_likelihood_sym(xs.reshape(((- 1), a_dim)), dict(prob=probs.reshape(((- 1), a_dim))))
return flat_logli.reshape(probs.shape[:2])
def dist_info_keys(self):
return ['prob'] |
def small_nonqnn_to_recording_resnet(image, config, test=False, channel_last=False, bn_self_folding=False, record_layers=(), name='bn-graph-ref'):
recorder = config.recorder_activation
recorder_weight = config.recorder_weight
axes = get_channel_axes(image, channel_last)
h = image
func_id = 0
with nn.parameter_scope('MulScale-{}'.format(func_id)):
if (not record_layers):
h = recorder()(h, axes=axes)
h /= 255.0
func_id += 1
with nn.parameter_scope('first-conv'):
h = recorder()(h, axes=axes)
(w, b) = create_conv_weight_bias(h, 16, kernel=(3, 3), channel_last=channel_last, name=name)
w = recorder_weight()(w, axes=axes, name='w')
b = recorder_weight()(b, axes=axes, name='b')
h = F.convolution(h, w, b, pad=(1, 1), stride=(1, 1), channel_last=channel_last)
if bn_self_folding:
(a, b) = create_scale_bias(1, h.shape, axes=axes)
with nn.parameter_scope('Mul2-{}'.format(func_id)):
if (not record_layers):
h = recorder()(h, axes=axes)
h = (recorder()(a, axes=axes) * h)
else:
h = (a * h)
func_id += 1
with nn.parameter_scope('Add2-{}'.format(func_id)):
if (not record_layers):
h = recorder()(h, axes=axes)
h = (h + recorder()(b, axes=axes))
else:
h = (h + b)
func_id += 1
with nn.parameter_scope('ReLU-{}'.format(func_id)):
if (not record_layers):
h = recorder()(h, axes=axes)
h = F.relu(h)
func_id += 1
with nn.parameter_scope('MaxPooling-{}'.format(func_id)):
if (not record_layers):
h = recorder()(h, axes=axes)
h = F.max_pooling(h, (2, 2), channel_last=channel_last)
func_id += 1
h = nonqnn_to_recording_resblock(h, config, maps=16, test=test, channel_last=channel_last, bn_self_folding=bn_self_folding, record_layers=record_layers, name='cb1')
h = nonqnn_to_recording_resblock(h, config, maps=16, test=test, channel_last=channel_last, bn_self_folding=bn_self_folding, record_layers=record_layers, name='cb2')
h = nonqnn_to_recording_resblock(h, config, maps=16, test=test, channel_last=channel_last, bn_self_folding=bn_self_folding, record_layers=record_layers, name='cb3')
h = nonqnn_to_recording_resblock(h, config, maps=16, test=test, channel_last=channel_last, bn_self_folding=bn_self_folding, record_layers=record_layers, name='cb4')
with nn.parameter_scope('AveragePooling-16'):
if (not record_layers):
h = recorder()(h, axes=axes)
h = F.average_pooling(h, (2, 2), channel_last=channel_last)
with nn.parameter_scope('fc'):
h = recorder()(h, axes=axes)
(w, b) = create_affine_weight_bias(h, 10, name=name)
w = recorder_weight()(w, axes=axes, name='w')
b = recorder_weight()(b, axes=axes, name='b')
pred = F.affine(h, w, b)
return pred |
class Supervised(NamedTuple):
model_optimizer_ctor: Callable[(..., torch.optim.Optimizer)]
lr_scheduler_ctor: Callable
param_avg_ctor: Callable[(..., EMA)]
num_iters: int
batch_size: int
num_workers: int
mixed_precision: bool
devices: Sequence[Union[(torch.device, str)]]
class Stats(NamedTuple):
iter: int
loss: float
model: Classifier
avg_model: Classifier
optimizer: torch.optim.Optimizer
scheduler: Any
def train(self, model: Classifier, dataset: Dataset):
return expand_generator(self.train_iter(model, dataset), return_only=True)
def train_iter(self, model: Classifier, dataset: Dataset) -> Generator[(Stats, None, Any)]:
sampler = BatchSampler(RandomSampler(dataset, replacement=True, num_samples=(self.num_iters * self.batch_size)), batch_size=self.batch_size, drop_last=True)
loader = DataLoader(dataset, batch_sampler=sampler, num_workers=self.num_workers, worker_init_fn=(lambda i: np.random.seed(((torch.initial_seed() % (2 ** 32)) + i))), pin_memory=True)
model.to(device=self.devices[0])
param_avg = self.param_avg_ctor(model)
no_wd_filter = (lambda m, k: (isinstance(m, nn.BatchNorm2d) or k.endswith('bias')))
wd_filter = (lambda m, k: (not no_wd_filter(m, k)))
optim = self.model_optimizer_ctor([{'params': filter_parameters(model, wd_filter)}, {'params': filter_parameters(model, no_wd_filter), 'weight_decay': 0.0}])
scheduler = self.lr_scheduler_ctor(optim)
scaler = torch.cuda.amp.GradScaler()
for (batch_idx, (x, y)) in enumerate(loader):
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True)
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
out = torch.nn.parallel.data_parallel(model, x, module_kwargs={'autocast': self.mixed_precision}, device_ids=self.devices)
loss = F.cross_entropy(out, y, reduction='mean')
model.zero_grad()
if self.mixed_precision:
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
else:
loss.backward()
optim.step()
param_avg.step()
scheduler.step()
(yield self.Stats(iter=(batch_idx + 1), loss=loss.cpu().item(), model=model, avg_model=param_avg.avg_model, optimizer=optim, scheduler=scheduler)) |
class NumberFieldIsomorphism(Map):
def _repr_type(self):
return 'Isomorphism'
def is_injective(self):
return True
def is_surjective(self):
return True |
def row_containing_sym(L, c, x):
r1 = (- 1)
r2 = (- 1)
for r in range(L.nrows()):
if ((r1 >= 0) and (r2 >= 0)):
break
if ((L[(r, c)] == x) and (r1 < 0)):
r1 = r
continue
if ((L[(r, c)] == x) and (r2 < 0)):
r2 = r
break
assert ((r1 >= 0) and (r2 >= 0))
return (r1 if coin() else r2) |
def cos_sim(vector_a, vector_b):
vector_a = vector_a.reshape((- 1))
vector_b = vector_b.reshape((- 1))
vector_a = np.mat(vector_a)
vector_b = np.mat(vector_b)
num = float((vector_a * vector_b.T))
denom = (np.linalg.norm(vector_a) * np.linalg.norm(vector_b))
with np.errstate(invalid='ignore'):
cos = np.nan_to_num((num / denom))
sim = (0.5 + (0.5 * cos))
return sim |
class FlatModule():
def get_parameters(recursive=True, grad_only=False, memo=None):
return nn.get_parameters()
def get_path_name(self):
return (('' + self.name) if (self.name != 'model') else '')
def __init__(self, name):
self.name = (name if name else 'model') |
def _seg_71():
return [(126571, 'X'), (126572, 'M', u''), (126573, 'M', u''), (126574, 'M', u''), (126575, 'M', u''), (126576, 'M', u''), (126577, 'M', u''), (126578, 'M', u''), (126579, 'X'), (126580, 'M', u''), (126581, 'M', u''), (126582, 'M', u''), (126583, 'M', u''), (126584, 'X'), (126585, 'M', u''), (126586, 'M', u''), (126587, 'M', u''), (126588, 'M', u''), (126589, 'X'), (126590, 'M', u''), (126591, 'X'), (126592, 'M', u''), (126593, 'M', u''), (126594, 'M', u''), (126595, 'M', u''), (126596, 'M', u''), (126597, 'M', u''), (126598, 'M', u''), (126599, 'M', u''), (126600, 'M', u''), (126601, 'M', u''), (126602, 'X'), (126603, 'M', u''), (126604, 'M', u''), (126605, 'M', u''), (126606, 'M', u''), (126607, 'M', u''), (126608, 'M', u''), (126609, 'M', u''), (126610, 'M', u''), (126611, 'M', u''), (126612, 'M', u''), (126613, 'M', u''), (126614, 'M', u''), (126615, 'M', u''), (126616, 'M', u''), (126617, 'M', u''), (126618, 'M', u''), (126619, 'M', u''), (126620, 'X'), (126625, 'M', u''), (126626, 'M', u''), (126627, 'M', u''), (126628, 'X'), (126629, 'M', u''), (126630, 'M', u''), (126631, 'M', u''), (126632, 'M', u''), (126633, 'M', u''), (126634, 'X'), (126635, 'M', u''), (126636, 'M', u''), (126637, 'M', u''), (126638, 'M', u''), (126639, 'M', u''), (126640, 'M', u''), (126641, 'M', u''), (126642, 'M', u''), (126643, 'M', u''), (126644, 'M', u''), (126645, 'M', u''), (126646, 'M', u''), (126647, 'M', u''), (126648, 'M', u''), (126649, 'M', u''), (126650, 'M', u''), (126651, 'M', u''), (126652, 'X'), (126704, 'V'), (126706, 'X'), (126976, 'V'), (127020, 'X'), (127024, 'V'), (127124, 'X'), (127136, 'V'), (127151, 'X'), (127153, 'V'), (127168, 'X'), (127169, 'V'), (127184, 'X'), (127185, 'V'), (127222, 'X'), (127233, '3', u'0,'), (127234, '3', u'1,'), (127235, '3', u'2,'), (127236, '3', u'3,'), (127237, '3', u'4,'), (127238, '3', u'5,'), (127239, '3', u'6,'), (127240, '3', u'7,')] |
def build_argparse():
parser = argparse.ArgumentParser()
parser.add_argument('--train', dest='train', default=True, action='store_true', help='Train the model (default)')
parser.add_argument('--no_train', dest='train', action='store_false', help="Don't train the model")
parser.add_argument('--shorthand', type=str, default='en_ewt', help="Treebank shorthand, eg 'en' for English")
parser.add_argument('--load_name', type=str, default=None, help='Name for loading an existing model')
parser.add_argument('--save_dir', type=str, default='saved_models/classifier', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help='Name for saving the model')
parser.add_argument('--base_name', type=str, default='sst', help='Base name of the model to use when building a model name from args')
parser.add_argument('--checkpoint_save_name', type=str, default=None, help='File name to save the most recent checkpoint')
parser.add_argument('--no_checkpoint', dest='checkpoint', action='store_false', help="Don't save checkpoints")
parser.add_argument('--save_intermediate_models', default=False, action='store_true', help='Save all intermediate models - this can be a lot!')
parser.add_argument('--train_file', type=str, default=DEFAULT_TRAIN, help='Input file(s) to train a model from. Each line is an example. Should go <label> <tokenized sentence>. Comma separated list.')
parser.add_argument('--dev_file', type=str, default=DEFAULT_DEV, help='Input file(s) to use as the dev set.')
parser.add_argument('--test_file', type=str, default=DEFAULT_TEST, help='Input file(s) to use as the test set.')
parser.add_argument('--output_predictions', default=False, action='store_true', help='Output predictions when running the test set')
parser.add_argument('--max_epochs', type=int, default=100)
parser.add_argument('--tick', type=int, default=2000)
parser.add_argument('--model_type', type=(lambda x: ModelType[x.upper()]), default=ModelType.CNN, help=('Model type to use. Options: %s' % ' '.join((x.name for x in ModelType))))
parser.add_argument('--filter_sizes', default=(3, 4, 5), type=ast.literal_eval, help='Filter sizes for the layer after the word vectors')
parser.add_argument('--filter_channels', default=1000, type=ast.literal_eval, help='Number of channels for layers after the word vectors. Int for same number of channels (scaled by width) for each filter, or tuple/list for exact lengths for each filter')
parser.add_argument('--fc_shapes', default='400,100', type=convert_fc_shapes, help='Extra fully connected layers to put after the initial filters. If set to blank, will FC directly from the max pooling to the output layer.')
parser.add_argument('--dropout', default=0.5, type=float, help='Dropout value to use')
parser.add_argument('--batch_size', default=50, type=int, help='Batch size when training')
parser.add_argument('--dev_eval_steps', default=100000, type=int, help='Run the dev set after this many train steps. Set to 0 to only do it once per epoch')
parser.add_argument('--dev_eval_scoring', type=(lambda x: DevScoring[x.upper()]), default=DevScoring.WEIGHTED_F1, help=('Scoring method to use for choosing the best model. Options: %s' % ' '.join((x.name for x in DevScoring))))
parser.add_argument('--weight_decay', default=None, type=float, help='Weight decay (eg, l2 reg) to use in the optimizer')
parser.add_argument('--learning_rate', default=None, type=float, help='Learning rate to use in the optimizer')
parser.add_argument('--momentum', default=None, type=float, help='Momentum to use in the optimizer')
parser.add_argument('--optim', default='adadelta', choices=['adadelta', 'madgrad', 'sgd'], help='Optimizer type: SGD, Adadelta, or madgrad. Highly recommend to install madgrad and use that')
parser.add_argument('--test_remap_labels', default=None, type=ast.literal_eval, help='Map of which label each classifier label should map to. For example, "{0:0, 1:0, 3:1, 4:1}" to map a 5 class sentiment test to a 2 class. Any labels not mapped will be considered wrong')
parser.add_argument('--forgive_unmapped_labels', dest='forgive_unmapped_labels', default=True, action='store_true', help='When remapping labels, such as from 5 class to 2 class, pick a different label if the first guess is not remapped.')
parser.add_argument('--no_forgive_unmapped_labels', dest='forgive_unmapped_labels', action='store_false', help="When remapping labels, such as from 5 class to 2 class, DON'T pick a different label if the first guess is not remapped.")
parser.add_argument('--loss', type=(lambda x: Loss[x.upper()]), default=Loss.CROSS, help='Whether to use regular cross entropy or scale it by 1/log(quantity)')
parser.add_argument('--loss_focal_gamma', default=2, type=float, help='gamma value for a focal loss')
parser.add_argument('--min_train_len', type=int, default=0, help='Filter sentences less than this length')
parser.add_argument('--pretrain_max_vocab', type=int, default=(- 1))
parser.add_argument('--wordvec_pretrain_file', type=str, default=None, help='Exact name of the pretrain file to read')
parser.add_argument('--wordvec_raw_file', type=str, default=None, help='Exact name of the raw wordvec file to read')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/wordvec', help='Directory of word vectors')
parser.add_argument('--wordvec_type', type=(lambda x: WVType[x.upper()]), default='word2vec', help='Different vector types have different options, such as google 300d replacing numbers with #')
parser.add_argument('--extra_wordvec_dim', type=int, default=0, help='Extra dim of word vectors - will be trained')
parser.add_argument('--extra_wordvec_method', type=(lambda x: ExtraVectors[x.upper()]), default='sum', help='How to train extra dimensions of word vectors, if at all')
parser.add_argument('--extra_wordvec_max_norm', type=float, default=None, help='Max norm for initializing the extra vectors')
parser.add_argument('--charlm_forward_file', type=str, default=None, help='Exact path to use for forward charlm')
parser.add_argument('--charlm_backward_file', type=str, default=None, help='Exact path to use for backward charlm')
parser.add_argument('--charlm_projection', type=int, default=None, help='Project the charlm values to this dimension')
parser.add_argument('--char_lowercase', dest='char_lowercase', action='store_true', help='Use lowercased characters in character model.')
parser.add_argument('--elmo_model', default='extern_data/manyelmo/english', help='Directory with elmo model')
parser.add_argument('--use_elmo', dest='use_elmo', default=False, action='store_true', help='Use an elmo model as a source of parameters')
parser.add_argument('--elmo_projection', type=int, default=None, help='Project elmo to this many dimensions')
parser.add_argument('--bert_model', type=str, default=None, help='Use an external bert model (requires the transformers package)')
parser.add_argument('--no_bert_model', dest='bert_model', action='store_const', const=None, help="Don't use bert")
parser.add_argument('--bilstm', dest='bilstm', action='store_true', default=True, help='Use a bilstm after the inputs, before the convs. Using bilstm is about as accurate and significantly faster (because of dim reduction) than going straight to the filters')
parser.add_argument('--no_bilstm', dest='bilstm', action='store_false', help="Don't use a bilstm after the inputs, before the convs.")
parser.add_argument('--bilstm_hidden_dim', type=int, default=300, help='Dimension of the bilstm to use')
parser.add_argument('--maxpool_width', type=int, default=1, help='Width of the maxpool kernel to use')
parser.add_argument('--no_constituency_backprop', dest='constituency_backprop', default=True, action='store_false', help="When using a constituency parser, backprop into the parser's weights if True")
parser.add_argument('--constituency_model', type=str, default='/home/john/stanza_resources/it/constituency/vit_bert.pt', help='Which constituency model to use. TODO: make this more user friendly')
parser.add_argument('--constituency_batch_norm', default=False, action='store_true', help='Add a LayerNorm between the output of the parser and the classifier layers')
parser.add_argument('--constituency_node_attn', default=False, action='store_true', help='True means to make an attn layer out of the tree, with the words as key and nodes as query')
parser.add_argument('--no_constituency_node_attn', dest='constituency_node_attn', action='store_false', help='True means to make an attn layer out of the tree, with the words as key and nodes as query')
parser.add_argument('--constituency_top_layer', dest='constituency_top_layer', default=False, action='store_true', help='True means use the top (ROOT) layer of the constituents. Otherwise, the next layer down (S, usually) will be used')
parser.add_argument('--no_constituency_top_layer', dest='constituency_top_layer', action='store_false', help='True means use the top (ROOT) layer of the constituents. Otherwise, the next layer down (S, usually) will be used')
parser.add_argument('--constituency_all_words', default=False, action='store_true', help='Use all word positions in the constituency classifier')
parser.add_argument('--no_constituency_all_words', dest='constituency_all_words', default=False, action='store_false', help='Use the start and end word embeddings as inputs to the constituency classifier')
parser.add_argument('--log_norms', default=False, action='store_true', help='Log the parameters norms while training. A very noisy option')
parser.add_argument('--wandb', action='store_true', help='Start a wandb session and write the results of training. Only applies to training. Use --wandb_name instead to specify a name')
parser.add_argument('--wandb_name', default=None, help='Name of a wandb session to start when training. Will default to the dataset short name')
parser.add_argument('--seed', default=None, type=int, help='Random seed for model')
utils.add_device_args(parser)
return parser |
def main(args):
utils.import_user_module(args)
print(args)
os.makedirs(args.destdir, exist_ok=True)
target = (not args.only_source)
task = tasks.get_task(args.task)
def train_path(lang):
return '{}{}'.format(args.trainpref, (('.' + lang) if lang else ''))
def file_name(prefix, lang):
fname = prefix
if (lang is not None):
fname += '.{lang}'.format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return (dest_path('dict', lang) + '.txt')
def build_dictionary(filenames, src=False, tgt=False):
assert (src ^ tgt)
return task.build_dictionary(filenames, workers=args.workers, threshold=(args.thresholdsrc if src else args.thresholdtgt), nwords=(args.nwordssrc if src else args.nwordstgt), padding_factor=args.padding_factor)
if ((not args.srcdict) and os.path.exists(dict_path(args.source_lang))):
raise FileExistsError(dict_path(args.source_lang))
if (target and (not args.tgtdict) and os.path.exists(dict_path(args.target_lang))):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert ((not args.srcdict) or (not args.tgtdict)), 'cannot use both --srcdict and --tgtdict with --joined-dictionary'
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, '--trainpref must be set if --srcdict is not specified'
src_dict = build_dictionary({train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert args.trainpref, '--trainpref must be set if --srcdict is not specified'
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, '--trainpref must be set if --tgtdict is not specified'
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if (target and (tgt_dict is not None)):
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
print('| [{}] Dictionary: {} types'.format(lang, (len(vocab) - 1)))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result['replaced'])
n_seq_tok[0] += worker_result['nseq']
n_seq_tok[1] += worker_result['ntok']
input_file = '{}{}'.format(input_prefix, (('.' + lang) if (lang is not None) else ''))
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if (num_workers > 1):
pool = Pool(processes=(num_workers - 1))
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
pool.apply_async(binarize, (args, input_file, vocab, prefix, lang, offsets[worker_id], offsets[(worker_id + 1)]), callback=merge_result)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, 'bin'), impl=args.dataset_impl, vocab_size=len(vocab))
merge_result(Binarizer.binarize(input_file, vocab, (lambda t: ds.add_item(t)), offset=0, end=offsets[1]))
if (num_workers > 1):
pool.join()
for worker_id in range(1, num_workers):
prefix = '{}{}'.format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx'))
print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(lang, input_file, n_seq_tok[0], n_seq_tok[1], ((100 * sum(replaced.values())) / n_seq_tok[1]), vocab.unk_word))
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if (args.dataset_impl == 'raw'):
output_text_file = dest_path((output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang)), lang)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, 'train', lang, num_workers=args.workers)
if args.validpref:
for (k, validpref) in enumerate(args.validpref.split(',')):
outprefix = ('valid{}'.format(k) if (k > 0) else 'valid')
make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers)
if args.testpref:
for (k, testpref) in enumerate(args.testpref.split(',')):
outprefix = ('test{}'.format(k) if (k > 0) else 'test')
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
print('| Wrote preprocessed data to {}'.format(args.destdir))
if args.alignfile:
assert args.trainpref, '--trainpref must be set if --alignfile is specified'
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, 'r', encoding='utf-8') as align_file:
with open(src_file_name, 'r', encoding='utf-8') as src_file:
with open(tgt_file_name, 'r', encoding='utf-8') as tgt_file:
for (a, s, t) in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map((lambda x: tuple(x.split('-'))), a.split()))
for (sai, tai) in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if ((srcidx != src_dict.unk()) and (tgtidx != tgt_dict.unk())):
assert (srcidx != src_dict.pad())
assert (srcidx != src_dict.eos())
assert (tgtidx != tgt_dict.pad())
assert (tgtidx != tgt_dict.eos())
if (srcidx not in freq_map):
freq_map[srcidx] = {}
if (tgtidx not in freq_map[srcidx]):
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(args.source_lang, args.target_lang)), 'w', encoding='utf-8') as f:
for (k, v) in align_dict.items():
print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f) |
class TopLegendRenderer():
def __init__(self):
self.__padding = 10
def set_padding(self, padding):
self.__padding = padding
def set_legends(self, legends, colors):
self.__legends = legends
self.__colors = colors
def layout(self, width):
self.__width = width
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = (((self.__padding + self.__padding) + t_width) + self.__padding)
item_height = (t_height + self.__padding)
if (item_height > line_height):
line_height = item_height
if ((line_used + item_width) > self.__width):
line_used = (self.__padding + item_width)
total_height += line_height
else:
line_used += item_width
x = (line_used - item_width)
total_height += line_height
self.__height = total_height
def get_height(self):
return self.__height
def draw(self, ctx):
i = 0
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = (((self.__padding + self.__padding) + t_width) + self.__padding)
item_height = (t_height + self.__padding)
if (item_height > line_height):
line_height = item_height
if ((line_used + item_width) > self.__width):
line_used = (self.__padding + item_width)
total_height += line_height
else:
line_used += item_width
x = (line_used - item_width)
ctx.rectangle(x, total_height, self.__padding, self.__padding)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke_preserve()
ctx.set_source_rgb(self.__colors[i].r, self.__colors[i].g, self.__colors[i].b)
ctx.fill()
ctx.move_to((x + (self.__padding * 2)), (total_height + t_height))
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(legend)
i += 1
return |
def test_indexnet_encoder():
with pytest.raises(ValueError):
IndexNetEncoder(4, out_stride=8)
with pytest.raises(NameError):
IndexNetEncoder(4, index_mode='unknown_mode')
indexnet_encoder = IndexNetEncoder(4, out_stride=32, width_mult=1, index_mode='m2o', aspp=True, use_nonlinear=True, use_context=True)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert (outputs['out'].shape == (2, 160, 1, 1))
assert (len(outputs['shortcuts']) == 7)
target_shapes = [(2, 32, 32, 32), (2, 16, 16, 16), (2, 24, 16, 16), (2, 32, 8, 8), (2, 64, 4, 4), (2, 96, 2, 2), (2, 160, 2, 2)]
for (shortcut, target_shape) in zip(outputs['shortcuts'], target_shapes):
assert (shortcut.shape == target_shape)
assert (len(outputs['dec_idx_feat_list']) == 7)
target_shapes = [(2, 32, 32, 32), None, (2, 24, 16, 16), (2, 32, 8, 8), (2, 64, 4, 4), None, (2, 160, 2, 2)]
for (dec_idx_feat, target_shape) in zip(outputs['dec_idx_feat_list'], target_shapes):
if (dec_idx_feat is not None):
assert (dec_idx_feat.shape == target_shape)
indexnet_encoder = IndexNetEncoder(4, out_stride=16, width_mult=2, index_mode='o2o', aspp=False, use_nonlinear=False, use_context=False)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert (outputs['out'].shape == (2, 160, 2, 2))
assert (len(outputs['shortcuts']) == 7)
target_shapes = [(2, 64, 32, 32), (2, 32, 16, 16), (2, 48, 16, 16), (2, 64, 8, 8), (2, 128, 4, 4), (2, 192, 2, 2), (2, 320, 2, 2)]
for (shortcut, target_shape) in zip(outputs['shortcuts'], target_shapes):
assert (shortcut.shape == target_shape)
assert (len(outputs['dec_idx_feat_list']) == 7)
target_shapes = [(2, 64, 32, 32), None, (2, 48, 16, 16), (2, 64, 8, 8), (2, 128, 4, 4), None, None]
for (dec_idx_feat, target_shape) in zip(outputs['dec_idx_feat_list'], target_shapes):
if (dec_idx_feat is not None):
assert (dec_idx_feat.shape == target_shape)
indexnet_encoder = IndexNetEncoder(4, out_stride=16, width_mult=2, index_mode='holistic', aspp=False, freeze_bn=True, use_nonlinear=False, use_context=False)
indexnet_encoder.init_weights()
x = torch.rand(2, 4, 32, 32)
outputs = indexnet_encoder(x)
assert (outputs['out'].shape == (2, 160, 2, 2))
assert (len(outputs['shortcuts']) == 7)
target_shapes = [(2, 64, 32, 32), (2, 32, 16, 16), (2, 48, 16, 16), (2, 64, 8, 8), (2, 128, 4, 4), (2, 192, 2, 2), (2, 320, 2, 2)]
for (shortcut, target_shape) in zip(outputs['shortcuts'], target_shapes):
assert (shortcut.shape == target_shape)
assert (len(outputs['dec_idx_feat_list']) == 7)
target_shapes = [(2, 1, 32, 32), None, (2, 1, 16, 16), (2, 1, 8, 8), (2, 1, 4, 4), None, None]
for (dec_idx_feat, target_shape) in zip(outputs['dec_idx_feat_list'], target_shapes):
if (dec_idx_feat is not None):
assert (dec_idx_feat.shape == target_shape) |
_module()
class FastRCNN(TwoStageDetector):
'Implementation of `Fast R-CNN <
def __init__(self, backbone, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(FastRCNN, self).__init__(backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
for (var, name) in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if (not isinstance(var, list)):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if (num_augs != len(img_metas)):
raise ValueError(f'num of augmentations ({len(imgs)}) != num of image meta ({len(img_metas)})')
if (num_augs == 1):
return self.simple_test(imgs[0], img_metas[0], proposals[0], **kwargs)
else:
assert NotImplementedError |
def map_dtype(dtype: DType) -> DType:
if ((isinstance(dtype, Categorical) is True) and (isinstance(dtype, Ordinal) is False) and (isinstance(dtype, Nominal) is False)):
return Nominal()
elif ((isinstance(dtype, Numerical) is True) and (isinstance(dtype, Continuous) is False) and (isinstance(dtype, Discrete) is False)):
return Continuous()
else:
return dtype |
def get_filter_valid_roadnetwork_keys():
filter_valid_roadnetwork = ['roadgraph_samples/xyz', 'roadgraph_samples/id', 'roadgraph_samples/type', 'roadgraph_samples/valid']
return filter_valid_roadnetwork |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_sigmoid_forward_backward(seed, ctx, func_name):
from nbla_test_utils import cap_ignore_region, function_tester
rng = np.random.RandomState(seed)
inputs = [(rng.randn(2, 3, 4).astype(np.float32) * 2)]
function_tester(rng, F.sigmoid, ref_sigmoid, inputs, ctx=ctx, func_name=func_name) |
class LakhMIDIDataset(RemoteFolderDataset):
_info = DatasetInfo(_NAME, _DESCRIPTION, _HOMEPAGE, _LICENSE)
_citation = _CITATION
_sources = {'lmd_full': {'filename': 'lmd_full.tar.gz', 'url': ' 'archive': True, 'size': , 'md5': '2536ce3fd2cede53ddaa264f731859ab', 'sha256': '6fcfe2ac49ca08f3f214cec86ab138d4fc4dabcd7f27f491a838dae6db45a12b'}}
_extension = 'mid'
def read(self, filename: Union[(str, Path)]) -> Music:
return read_midi((self.root / filename)) |
class PygPCQM4MDataset(InMemoryDataset):
def __init__(self, root='dataset', smiles2graph=smiles2graph, transform=None, pre_transform=None):
print('The PCQM4M has been deprecated. The leaderboard is no longer maintained.')
print('Please use PCQM4Mv2 instead.')
self.original_root = root
self.smiles2graph = smiles2graph
self.folder = osp.join(root, 'pcqm4m_kddcup2021')
self.version = 1
self.url = '
if (osp.isdir(self.folder) and (not osp.exists(osp.join(self.folder, f'RELEASE_v{self.version}.txt')))):
print('PCQM4M dataset has been updated.')
if (input('Will you update the dataset now? (y/N)\n').lower() == 'y'):
shutil.rmtree(self.folder)
super(PygPCQM4MDataset, self).__init__(self.folder, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
def raw_file_names(self):
return 'data.csv.gz'
def processed_file_names(self):
return 'geometric_data_processed.pt'
def download(self):
if decide_download(self.url):
path = download_url(self.url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
else:
print('Stop download.')
exit((- 1))
def process(self):
data_df = pd.read_csv(osp.join(self.raw_dir, 'data.csv.gz'))
smiles_list = data_df['smiles']
homolumogap_list = data_df['homolumogap']
print('Converting SMILES strings into graphs...')
data_list = []
for i in tqdm(range(len(smiles_list))):
data = Data()
smiles = smiles_list[i]
homolumogap = homolumogap_list[i]
graph = self.smiles2graph(smiles)
assert (len(graph['edge_feat']) == graph['edge_index'].shape[1])
assert (len(graph['node_feat']) == graph['num_nodes'])
data.__num_nodes__ = int(graph['num_nodes'])
data.edge_index = torch.from_numpy(graph['edge_index']).to(torch.int64)
data.edge_attr = torch.from_numpy(graph['edge_feat']).to(torch.int64)
data.x = torch.from_numpy(graph['node_feat']).to(torch.int64)
data.y = torch.Tensor([homolumogap])
data_list.append(data)
split_dict = self.get_idx_split()
assert all([(not torch.isnan(data_list[i].y)[0]) for i in split_dict['train']])
assert all([(not torch.isnan(data_list[i].y)[0]) for i in split_dict['valid']])
assert all([torch.isnan(data_list[i].y)[0] for i in split_dict['test']])
if (self.pre_transform is not None):
data_list = [self.pre_transform(data) for data in data_list]
(data, slices) = self.collate(data_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
def get_idx_split(self):
split_dict = replace_numpy_with_torchtensor(torch.load(osp.join(self.root, 'split_dict.pt')))
return split_dict |
class MLPDecoderFM(nn.Module):
def __init__(self, in_channels, out_channel, hidden_channel=256, outfeature_channel=64, dropout_ratio=0.1, conv_norm=nn.BatchNorm2d):
super(MLPDecoderFM, self).__init__()
self.linear1 = nn.Conv2d(in_channels[0], hidden_channel, 1)
self.linear2 = nn.Conv2d(in_channels[1], hidden_channel, 1)
self.linear3 = nn.Conv2d(in_channels[2], hidden_channel, 1)
self.linear4 = nn.Conv2d(in_channels[3], hidden_channel, 1)
self.linear_fuse = nn.Sequential(nn.Conv2d(((hidden_channel * 4) + outfeature_channel), hidden_channel, 1), conv_norm(hidden_channel), nn.ReLU(inplace=True))
self.dropout = nn.Dropout2d(dropout_ratio)
self.linear_out = nn.Conv2d(hidden_channel, out_channel, 1)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, features, img_size, out_feat=False):
(x1, x2, x3, x4, x5) = features
(h, w) = x1.shape[2:]
x1 = self.linear1(x1)
x1 = nn.functional.interpolate(x1, size=(h, w), mode='bilinear', align_corners=False)
x2 = self.linear2(x2)
x2 = nn.functional.interpolate(x2, size=(h, w), mode='bilinear', align_corners=False)
x3 = self.linear3(x3)
x3 = nn.functional.interpolate(x3, size=(h, w), mode='bilinear', align_corners=False)
x4 = self.linear4(x4)
x4 = nn.functional.interpolate(x4, size=(h, w), mode='bilinear', align_corners=False)
out = torch.cat([x1, x2, x3, x4, x5], dim=1)
out = self.linear_fuse(out)
if (out_feat == True):
feat = self.avg_pool(out)
out = self.dropout(out)
out = nn.functional.interpolate(out, size=img_size, mode='bilinear', align_corners=False)
out = self.linear_out(out)
return ({'seg': out, 'feat': feat} if out_feat else out) |
def create_mode_switcher_node_group():
if ('mode_switcher' not in bpy.data.node_groups):
test_group = bpy.data.node_groups.new('mode_switcher', 'ShaderNodeTree')
group_inputs = test_group.nodes.new('NodeGroupInput')
group_inputs.location = ((- 350), 0)
test_group.inputs.new('NodeSocketShader', 'Real')
test_group.inputs.new('NodeSocketColor', 'Ground Truth')
group_outputs = test_group.nodes.new('NodeGroupOutput')
group_outputs.location = (300, 0)
test_group.outputs.new('NodeSocketShader', 'Switch')
node_mix = test_group.nodes.new('ShaderNodeMixShader')
node_mix.location = (100, 0)
modeDriver = bpy.data.node_groups['mode_switcher'].driver_add('nodes["Mix Shader"].inputs[0].default_value')
modeDriver.driver.expression = 'mode'
modeVariable = modeDriver.driver.variables.new()
modeVariable.name = 'mode'
modeVariable.type = 'SINGLE_PROP'
modeVariable.targets[0].id_type = 'SCENE'
modeVariable.targets[0].id = bpy.data.scenes['Scene']
modeVariable.targets[0].data_path = 'uv_holographics.mode'
test_group.links.new(group_inputs.outputs['Real'], node_mix.inputs[1])
test_group.links.new(group_inputs.outputs['Ground Truth'], node_mix.inputs[2])
test_group.links.new(node_mix.outputs[0], group_outputs.inputs['Switch'])
else:
log('- create_mode_switcher_node_group() : node group already exists') |
def ce_loss(logit, y):
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=y)) |
def register_Ns3CallbackImpl__Void_Unsigned_long_Unsigned_long_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, unsigned long long, unsigned long long, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
cls.add_method('DoGetTypeid', 'std::string', [], is_static=True)
cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True)
cls.add_method('operator()', 'void', [param('long unsigned int', 'arg0'), param('long unsigned int', 'arg1')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return |
def instance_norm(input, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=True, momentum=0.1, eps=1e-05):
if ((not use_input_stats) and ((running_mean is None) or (running_var is None))):
raise ValueError('Expected running_mean and running_var to be not None when use_input_stats=False')
(b, c) = (input.size(0), input.size(1))
if (weight is not None):
weight = weight.repeat(b)
if (bias is not None):
bias = bias.repeat(b)
import torch.onnx.symbolic
.symbolic_override_first_arg_based(torch.onnx.symbolic.instance_norm)
def _instance_norm(input, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=None, momentum=None, eps=None):
if (running_mean is not None):
running_mean_orig = running_mean
running_mean = running_mean_orig.repeat(b)
if (running_var is not None):
running_var_orig = running_var
running_var = running_var_orig.repeat(b)
input_reshaped = input.contiguous().view(1, (b * c), *input.size()[2:])
out = batch_norm(input_reshaped, running_mean, running_var, weight=weight, bias=bias, training=use_input_stats, momentum=momentum, eps=eps)
if (running_mean is not None):
running_mean_orig.copy_(running_mean.view(b, c).mean(0, keepdim=False))
if (running_var is not None):
running_var_orig.copy_(running_var.view(b, c).mean(0, keepdim=False))
return out.view(b, c, *input.size()[2:])
return _instance_norm(input, running_mean=running_mean, running_var=running_var, weight=weight, bias=bias, use_input_stats=use_input_stats, momentum=momentum, eps=eps) |
def update_local_variables(multi_gpu_meta_graph_def, op_names_to_replicate, num_replicas):
def _get_new_var_def(var_def, prefix):
new_var_def = variable_pb2.VariableDef()
new_var_def.CopyFrom(var_def)
new_var_def.variable_name = ops.prepend_name_scope(var_def.variable_name, prefix)
new_var_def.initializer_name = ops.prepend_name_scope(var_def.initializer_name, prefix)
new_var_def.snapshot_name = ops.prepend_name_scope(var_def.snapshot_name, prefix)
return new_var_def
if (tf.GraphKeys.LOCAL_VARIABLES not in multi_gpu_meta_graph_def.collection_def):
return
lv_collection = multi_gpu_meta_graph_def.collection_def[tf.GraphKeys.LOCAL_VARIABLES]
new_lv_col = meta_graph_pb2.CollectionDef()
for var_def_string in lv_collection.bytes_list.value:
var_def = variable_pb2.VariableDef()
var_def.ParseFromString(var_def_string)
if (_get_op_name(var_def.variable_name) in op_names_to_replicate):
new_var_defs = [_get_new_var_def(var_def, parallax_replica_prefix(i)) for i in range(num_replicas)]
new_lv_col.bytes_list.value.extend([new_var_def.SerializeToString() for new_var_def in new_var_defs])
else:
new_lv_col.bytes_list.value.append(var_def.SerializeToString())
multi_gpu_meta_graph_def.collection_def[tf.GraphKeys.LOCAL_VARIABLES].Clear()
multi_gpu_meta_graph_def.collection_def[tf.GraphKeys.LOCAL_VARIABLES].CopyFrom(new_lv_col)
if (len(lv_collection.bytes_list.value) == 0):
del multi_gpu_meta_graph_def.collection_def[tf.GraphKeys.LOCAL_VARIABLES] |
class Trainer(object):
def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None):
if isinstance(cfg, Namespace):
logger.warning('argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf')
cfg = convert_namespace_to_omegaconf(cfg)
self.cfg = cfg
self.task = task
shared_params = _catalog_shared_params(model)
self.tpu = cfg.common.tpu
self.cuda = (torch.cuda.is_available() and (not cfg.common.cpu) and (not self.tpu))
if self.cuda:
self.device = torch.device('cuda')
elif self.tpu:
self.device = utils.get_tpu_device()
else:
self.device = torch.device('cpu')
self._criterion = criterion
self._model = model
if cfg.common.fp16:
self._criterion = self._criterion.half()
self._model = self._model.half()
elif cfg.common.bf16:
self._criterion = self._criterion.to(dtype=torch.bfloat16)
self._model = self._model.to(dtype=torch.bfloat16)
if (not cfg.distributed_training.pipeline_model_parallel):
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel
self.last_device = None
if (self.cuda and self.pipeline_model_parallel):
self.last_device = torch.device(cfg.distributed_training.pipeline_devices[(- 1)])
for shared_param in shared_params:
ref = _get_module_by_path(self._model, shared_param[0])
for path in shared_param[1:]:
logger.info('detected shared parameter: {} <- {}'.format(shared_param[0], path))
_set_module_by_path(self._model, path, ref)
self._dummy_batch = None
self._lr_scheduler = None
self._num_updates = 0
self._num_xla_compiles = 0
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
if (self.cuda and (self.data_parallel_world_size > 1)):
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
self.quantizer = quantizer
if (self.quantizer is not None):
self.quantizer.set_trainer(self)
if self.cuda:
self.cuda_env = utils.CudaEnvironment()
if (self.data_parallel_world_size > 1):
self.cuda_env_arr = distributed_utils.all_gather_list(self.cuda_env, group=distributed_utils.get_global_group())
else:
self.cuda_env_arr = [self.cuda_env]
if (self.data_parallel_rank == 0):
utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)
else:
self.cuda_env = None
self.cuda_env_arr = None
metrics.log_start_time('wall', priority=790, round=0)
self._start_time = time.time()
self._previous_training_time = 0
self._cumulative_training_time = None
def reinitialize(self):
self._lr_scheduler = None
self._optimizer = None
self._wrapped_criterion = None
self._wrapped_model = None
def data_parallel_world_size(self):
if (self.cfg.distributed_training.distributed_world_size == 1):
return 1
return distributed_utils.get_data_parallel_world_size()
def data_parallel_process_group(self):
return distributed_utils.get_data_parallel_group()
def data_parallel_rank(self):
if (self.cfg.distributed_training.distributed_world_size == 1):
return 0
return distributed_utils.get_data_parallel_rank()
def is_data_parallel_master(self):
return (self.data_parallel_rank == 0)
def criterion(self):
if (self._wrapped_criterion is None):
if (utils.has_parameters(self._criterion) and (self.data_parallel_world_size > 1) and (not self.cfg.optimization.use_bmuf)):
self._wrapped_criterion = models.DistributedFairseqModel(self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
def model(self):
if (self._wrapped_model is None):
if ((self.data_parallel_world_size > 1) and (not self.cfg.optimization.use_bmuf)):
self._wrapped_model = models.DistributedFairseqModel(self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group)
else:
self._wrapped_model = self._model
return self._wrapped_model
def optimizer(self):
if (self._optimizer is None):
self._build_optimizer()
return self._optimizer
def lr_scheduler(self):
if (self._lr_scheduler is None):
self._build_optimizer()
return self._lr_scheduler
def _build_optimizer(self):
params = list(filter((lambda p: p.requires_grad), chain(self.model.parameters(), self.criterion.parameters())))
if (self.cfg.common.fp16 or self.cfg.common.bf16):
if (self.cuda and (torch.cuda.get_device_capability(0)[0] < 7)):
logger.info('NOTE: your device does NOT support faster training with --fp16, please switch to FP32 which is likely to be faster')
if (self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16):
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(self.cfg, params)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)
else:
if (self.cuda and (torch.cuda.get_device_capability(0)[0] >= 7)):
logger.info('NOTE: your device may support faster training with --fp16')
self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)
if self.cfg.optimization.use_bmuf:
self._optimizer = optim.FairseqBMUF(self.cfg.bmuf, self._optimizer)
if (self.cfg.distributed_training.zero_sharding == 'os'):
if ((self.cfg.common.fp16 and (not self.cfg.common.memory_efficient_fp16) and (not self.cfg.common.memory_efficient_bf16)) and (not self.cfg.common.fp16_no_flatten_grads)):
raise ValueError('ZeRO is incomptabile with fp16 and flattened grads. Please use --fp16-no-flatten-grads')
else:
optim.shard_(self._optimizer, self.data_parallel_process_group)
self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.cfg.lr_scheduler, self.optimizer)
self._lr_scheduler.step_update(0)
def consolidate_optimizer(self):
if hasattr(self.optimizer.optimizer, 'consolidate_state_dict'):
self.optimizer.optimizer.consolidate_state_dict()
def save_checkpoint(self, filename, extra_state):
if self.is_data_parallel_master:
extra_state['metrics'] = metrics.state_dict()
extra_state['previous_training_time'] = self.cumulative_training_time()
checkpoint_utils.save_state(filename, self.cfg, self.get_model().state_dict(), self.get_criterion(), self.optimizer, self.lr_scheduler, self.get_num_updates(), self._optim_history, extra_state)
logger.info(f'Finished saving checkpoint to {filename}')
def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False):
(extra_state, self._optim_history, last_optim_state) = (None, [], None)
logger.info(f'Preparing to load checkpoint {filename}')
is_distributed = (self.data_parallel_world_size > 1)
bexists = PathManager.isfile(filename)
if bexists:
load_on_all_ranks = (self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks or self.tpu)
if (load_on_all_ranks or (self.data_parallel_rank == 0)):
state = checkpoint_utils.load_checkpoint_to_cpu(filename, load_on_all_ranks=load_on_all_ranks)
last_optim_state = state.get('last_optimizer_state', None)
if ((not load_on_all_ranks) and (self.cfg.distributed_training.zero_sharding == 'os') and ('last_optimizer_state' in state) and is_distributed):
state['last_optimizer_state'] = 'SHARDED'
else:
last_optim_state = None
state = None
if (is_distributed and (not load_on_all_ranks)):
state = distributed_utils.broadcast_object(state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device)
if (self.data_parallel_rank > 0):
last_optim_state = state.get('last_optimizer_state', None)
try:
self.get_model().load_state_dict(state['model'], strict=True, model_cfg=self.cfg.model)
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(state['criterion'], strict=True)
except Exception:
raise Exception('Cannot load model parameters from checkpoint {}; please ensure that the architectures match.'.format(filename))
extra_state = state['extra_state']
self._optim_history = state['optimizer_history']
if ((last_optim_state is not None) and (not reset_optimizer)):
self._build_optimizer()
last_optim = self._optim_history[(- 1)]
assert (last_optim['criterion_name'] == self.get_criterion().__class__.__name__), 'Criterion does not match; please reset the optimizer (--reset-optimizer).'
assert (last_optim['optimizer_name'] == self.optimizer.__class__.__name__), 'Optimizer does not match; please reset the optimizer (--reset-optimizer).'
if (not reset_lr_scheduler):
self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])
if ((not load_on_all_ranks) and is_distributed):
last_optim_state = self.optimizer.broadcast_global_state_dict(last_optim_state)
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim['num_updates'])
if (extra_state is not None):
epoch = extra_state['train_iterator']['epoch']
if ('previous_training_time' in extra_state):
self._previous_training_time = extra_state['previous_training_time']
self._start_time = time.time()
self.lr_step(epoch)
if (('metrics' in extra_state) and (not reset_meters)):
metrics.load_state_dict(extra_state['metrics'])
for meter in metrics.get_meters('default'):
if isinstance(meter, meters.TimeMeter):
meter.reset()
logger.info('Loaded checkpoint {} (epoch {} {} updates)'.format(filename, epoch, self.get_num_updates()))
else:
logger.info('No existing checkpoint found {}'.format(filename))
return extra_state
def get_train_iterator(self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False):
if load_dataset:
logger.info('loading train data for epoch {}'.format(epoch))
self.task.load_dataset(self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector)
batch_iterator = self.task.get_batch_iterator(dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions(self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=(self.data_parallel_world_size if shard_batch_itr else 1), shard_id=(self.data_parallel_rank if shard_batch_itr else 0), num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def get_valid_iterator(self, subset, disable_iterator_cache=False):
batch_iterator = self.task.get_batch_iterator(dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions(self.task.max_positions(), self.model.max_positions()), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def begin_epoch(self, epoch):
logger.info('begin training epoch {}'.format(epoch))
self.lr_step_begin_epoch(epoch)
if (self.quantizer is not None):
self.quantizer.begin_epoch(epoch)
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('begin_epoch')
xm.mark_step()
def begin_valid_epoch(self, epoch):
self.task.begin_valid_epoch(epoch, self.get_model())
def reset_dummy_batch(self, batch):
self._dummy_batch = batch
('train')
def train_step(self, samples, raise_oom=False):
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time('train_wall', priority=800, round=0)
(logging_outputs, sample_size, ooms) = ([], 0, 0)
for (i, sample) in enumerate(samples):
(sample, is_dummy_batch) = self._prepare_sample(sample)
def maybe_no_sync():
if ((self.data_parallel_world_size > 1) and hasattr(self.model, 'no_sync') and (i < (len(samples) - 1))):
return self.model.no_sync()
else:
return contextlib.ExitStack()
try:
with maybe_no_sync():
(loss, sample_size_i, logging_output) = self.task.train_step(sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
if (self.cuda and (self.get_num_updates() == 0)):
torch.cuda.empty_cache()
except RuntimeError as e:
if ('out of memory' in str(e)):
self._log_oom(e)
if raise_oom:
raise e
logger.warning('attempting to recover from OOM in forward/backward pass')
ooms += 1
self.zero_grad()
if self.cuda:
torch.cuda.empty_cache()
if (self.cfg.distributed_training.distributed_world_size == 1):
return None
else:
raise e
if (self.tpu and (i < (len(samples) - 1))):
import torch_xla.core.xla_model as xm
xm.mark_step()
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
if self._sync_stats():
train_time = self._local_cumulative_training_time()
(logging_outputs, (sample_size, ooms, total_train_time)) = self._aggregate_logging_outputs(logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch)
self._cumulative_training_time = (total_train_time / self.data_parallel_world_size)
overflow = False
try:
with torch.autograd.profiler.record_function('reduce-grads'):
self.optimizer.all_reduce_grads(self.model)
if utils.has_parameters(self.criterion):
self.optimizer.all_reduce_grads(self.criterion)
with torch.autograd.profiler.record_function('multiply-grads'):
if (not self.cfg.optimization.use_bmuf):
self.optimizer.multiply_grads((self.data_parallel_world_size / sample_size))
elif (sample_size > 0):
num = (self.data_parallel_world_size if self._sync_stats() else 1)
self.optimizer.multiply_grads((num / sample_size))
with torch.autograd.profiler.record_function('clip-grads'):
grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm)
if (not self.tpu):
if ((not self.cfg.optimization.use_bmuf) and (self.cfg.distributed_training.distributed_wrapper != 'SlowMo')):
self._check_grad_norms(grad_norm)
if (not torch.isfinite(grad_norm).all()):
raise FloatingPointError('gradients are Nan/Inf')
with torch.autograd.profiler.record_function('optimizer'):
self.task.optimizer_step(self.optimizer, model=self.model, update_num=self.get_num_updates())
except FloatingPointError:
self.zero_grad()
with NanDetector(self.get_model()):
for (_, sample) in enumerate(samples):
(sample, _) = self._prepare_sample(sample)
self.task.train_step(sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False)
raise
except OverflowError as e:
overflow = True
logger.info(('NOTE: overflow detected, ' + str(e)))
grad_norm = torch.tensor(0.0).cuda()
self.zero_grad()
except RuntimeError as e:
if ('out of memory' in str(e)):
self._log_oom(e)
logger.error('OOM during optimization, irrecoverable')
raise e
if hasattr(self.model, 'perform_additional_optimizer_actions'):
if hasattr(self.optimizer, 'fp32_params'):
self.model.perform_additional_optimizer_actions(self.optimizer.optimizer, self.optimizer.fp32_params)
else:
self.model.perform_additional_optimizer_actions(self.optimizer.optimizer)
logging_output = None
if ((not overflow) or (self.cfg.distributed_training.distributed_wrapper == 'SlowMo')):
self.set_num_updates((self.get_num_updates() + 1))
if self.tpu:
import torch_xla.core.xla_model as xm
xm.mark_step()
logging_output = {}
if ((self.get_num_updates() % self.cfg.common.log_interval) == 0):
mem_info = xm.get_memory_info(self.device)
gb_free = ((mem_info['kb_free'] / 1024) / 1024)
gb_total = ((mem_info['kb_total'] / 1024) / 1024)
metrics.log_scalar('gb_free', gb_free, priority=1500, round=1, weight=0)
metrics.log_scalar('gb_total', gb_total, priority=1600, round=1, weight=0)
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size, grad_norm)
self._check_xla_compilation()
else:
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size, grad_norm)
if (self.cuda and (self.cfg.common.empty_cache_freq > 0) and ((((self.get_num_updates() + self.cfg.common.empty_cache_freq) - 1) % self.cfg.common.empty_cache_freq) == 0)):
torch.cuda.empty_cache()
if self.cfg.common.fp16:
metrics.log_scalar('loss_scale', self.optimizer.scaler.loss_scale, priority=700, round=4, weight=0)
metrics.log_stop_time('train_wall')
return logging_output
('valid')
def valid_step(self, sample, raise_oom=False):
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('valid_step')
xm.mark_step()
with torch.no_grad():
self.model.eval()
self.criterion.eval()
(sample, is_dummy_batch) = self._prepare_sample(sample)
try:
(_loss, sample_size, logging_output) = self.task.valid_step(sample, self.model, self.criterion)
except RuntimeError as e:
if ('out of memory' in str(e)):
self._log_oom(e)
if (not raise_oom):
logger.warning('ran out of memory in validation step, retrying batch')
for p in self.model.parameters():
if (p.grad is not None):
p.grad = None
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
if torch.is_tensor(sample_size):
sample_size.zero_()
else:
sample_size *= 0.0
if (self.data_parallel_world_size > 1):
(logging_outputs, (sample_size,)) = self._aggregate_logging_outputs(logging_outputs, sample_size, ignore=is_dummy_batch)
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
def lr_step_begin_epoch(self, epoch):
self.lr_scheduler.step_begin_epoch(epoch)
return self.lr_step_update()
def lr_step(self, epoch, val_loss=None):
self.lr_scheduler.step(epoch, val_loss)
return self.lr_step_update()
def lr_step_update(self):
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
if isinstance(new_lr, dict):
for (k, v) in new_lr.items():
metrics.log_scalar(f'lr_{k}', v, weight=0, priority=300)
new_lr = new_lr.get('default', next(iter(new_lr.values())))
else:
metrics.log_scalar('lr', new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
return self.optimizer.get_lr()
def get_model(self):
return self._model
def get_criterion(self):
return self._criterion
def get_meter(self, name):
from fairseq import meters
if ('get_meter' not in self._warn_once):
self._warn_once.add('get_meter')
utils.deprecation_warning('Trainer.get_meter is deprecated. Please use fairseq.metrics instead.')
train_meters = metrics.get_meters('train')
if (train_meters is None):
train_meters = {}
if ((name == 'train_loss') and ('loss' in train_meters)):
return train_meters['loss']
elif (name == 'train_nll_loss'):
m = train_meters.get('nll_loss', None)
return (m or meters.AverageMeter())
elif (name == 'wall'):
m = metrics.get_meter('default', 'wall')
return (m or meters.TimeMeter())
elif (name == 'wps'):
m = metrics.get_meter('train', 'wps')
return (m or meters.TimeMeter())
elif (name in {'valid_loss', 'valid_nll_loss'}):
k = name[len('valid_'):]
m = metrics.get_meter('valid', k)
return (m or meters.AverageMeter())
elif (name == 'oom'):
return meters.AverageMeter()
elif (name in train_meters):
return train_meters[name]
return None
def get_num_updates(self):
return self._num_updates
def set_num_updates(self, num_updates):
self._num_updates = num_updates
self.lr_step_update()
if self.quantizer:
self.quantizer.step_update(self._num_updates)
metrics.log_scalar('num_updates', self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=None)
def cumulative_training_time(self):
if (self._cumulative_training_time is None):
return self._local_cumulative_training_time()
else:
return self._cumulative_training_time
def _local_cumulative_training_time(self):
return ((time.time() - self._start_time) + self._previous_training_time)
def _prepare_sample(self, sample, is_dummy=False):
if (sample == 'DUMMY'):
raise Exception("Trying to use an uninitialized 'dummy' batch. This usually indicates that the total number of batches is smaller than the number of participating GPUs. Try reducing the batch size or using fewer GPUs.")
if ((sample is None) or (len(sample) == 0)):
assert ((self._dummy_batch is not None) and (len(self._dummy_batch) > 0)), 'Invalid dummy batch: {}'.format(self._dummy_batch)
(sample, _) = self._prepare_sample(self._dummy_batch, is_dummy=True)
return (sample, True)
if self.cuda:
if self.pipeline_model_parallel:
if ('target' in sample):
sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device)
else:
sample = utils.move_to_cuda(sample)
elif (self.tpu and is_dummy):
sample = utils.move_to_cuda(sample, device=self.device)
def apply_half(t):
if (t.dtype is torch.float32):
return t.half()
return t
def apply_bfloat16(t):
if (t.dtype is torch.float32):
return t.to(dtype=torch.bfloat16)
return t
if self.cfg.common.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if self.cfg.common.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
if (self._dummy_batch == 'DUMMY'):
self._dummy_batch = sample
return (sample, False)
def _set_seed(self):
seed = (self.cfg.common.seed + self.get_num_updates())
utils.set_torch_seed(seed)
def _sync_stats(self):
if (self.data_parallel_world_size == 1):
return False
elif self.cfg.optimization.use_bmuf:
return ((((self.get_num_updates() + 1) % self.cfg.bmuf.global_sync_iter) == 0) and ((self.get_num_updates() + 1) > self.cfg.bmuf.warmup_iterations))
else:
return True
def _log_oom(self, exc):
msg = 'OOM: Ran out of memory with exception: {}'.format(exc)
logger.warning(msg)
if (torch.cuda.is_available() and hasattr(torch.cuda, 'memory_summary')):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum, ignore=False):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(logging_outputs, *extra_stats_to_sum, ignore=ignore)
else:
return self._all_gather_list_sync(logging_outputs, *extra_stats_to_sum, ignore=ignore)
def _all_gather_list_sync(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum, ignore=False):
if self.tpu:
raise NotImplementedError
if ignore:
logging_outputs = []
results = list(zip(*distributed_utils.all_gather_list(([logging_outputs] + list(extra_stats_to_sum)), max_size=getattr(self.cfg.common, 'all_gather_list_size', 16384), group=self.data_parallel_process_group)))
(logging_outputs, extra_stats_to_sum) = (results[0], results[1:])
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return (logging_outputs, extra_stats_to_sum)
def _fast_stat_sync_sum(self, logging_outputs: List[Dict[(str, Any)]], *extra_stats_to_sum, ignore=False):
data = {}
for (i, stat) in enumerate(extra_stats_to_sum):
data[('extra_stats_' + str(i))] = stat
if (len(logging_outputs) > 0):
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if (not ignore):
v = sum((log[k] for log in logging_outputs if (k in log)))
else:
v = logging_outputs[0][k]
v = (torch.zeros_like(v) if torch.is_tensor(v) else 0)
data[('logging_outputs_' + k)] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(data, device=self.device, group=self.data_parallel_process_group)
extra_stats_to_sum = [data[('extra_stats_' + str(i))] for i in range(len(extra_stats_to_sum))]
if (log_keys is not None):
logging_outputs = [{k: data[('logging_outputs_' + k)] for k in log_keys}]
else:
logging_outputs = []
return (logging_outputs, extra_stats_to_sum)
def _check_grad_norms(self, grad_norm):
if (self._grad_norm_buf is not None):
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(self._grad_norm_buf, group=self.data_parallel_process_group)
def is_consistent(tensor):
max_abs_diff = torch.max(torch.abs((tensor - tensor[0])))
return (torch.isfinite(tensor).all() or ((max_abs_diff / (tensor[0] + 1e-06)) < 1e-06).all())
if (not is_consistent(self._grad_norm_buf)):
pretty_detail = '\n'.join(('rank {:3d} = {:.8f}'.format(r, n) for (r, n) in enumerate(self._grad_norm_buf.tolist())))
error_detail = 'grad_norm across the workers:\n{}\n'.format(pretty_detail)
raise FloatingPointError((((('Fatal error: gradients are inconsistent between workers. Try --ddp-backend=no_c10d. Or are you mixing up different generation of GPUs in training?' + '\n') + ('-' * 80)) + '\n{}\n'.format(error_detail)) + ('-' * 80)))
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if ((grad_norm is not None) and ((not torch.is_tensor(grad_norm)) or torch.isfinite(grad_norm))):
metrics.log_speed('ups', 1.0, priority=100, round=2)
metrics.log_scalar('gnorm', grad_norm, priority=400, round=3)
if (self.cfg.optimization.clip_norm > 0):
metrics.log_scalar('clip', torch.where((grad_norm > self.cfg.optimization.clip_norm), grad_norm.new_tensor(100), grad_norm.new_tensor(0)), priority=500, round=1)
with metrics.aggregate() as agg:
if (logging_outputs is not None):
self.task.reduce_metrics(logging_outputs, self.get_criterion())
del logging_outputs
if ('loss' not in agg):
if ('loss' not in self._warn_once):
self._warn_once.add('loss')
logger.warning("Criterion.reduce_metrics did not log a 'loss' value, which may break some functionality")
metrics.log_scalar('loss', (- 1))
if self.tpu:
logging_output = {}
else:
logging_output = agg.get_smoothed_values()
logging_output['sample_size'] = sample_size
for key_to_delete in ['ppl', 'wps', 'wpb', 'bsz']:
if (key_to_delete in logging_output):
del logging_output[key_to_delete]
return logging_output
def _check_xla_compilation(self):
import torch_xla.debug.metrics as met
compile_stats = met.metric_data('CompileTime')
if (compile_stats is None):
return
num_xla_compiles = compile_stats[0]
if (num_xla_compiles > self._num_xla_compiles):
logger.warning('XLA compilation detected on device #{}; too many of these can lead to slow training, but we expect a few in the beginning'.format(self.cfg.distributed_training.distributed_rank))
self._num_xla_compiles = num_xla_compiles |
def cosine_sim(query, retrio):
(query, retrio) = (l2norm(query), l2norm(retrio))
return query.mm(retrio.t()) |
def cleva_math_result_match(gold: str, pred: str) -> float:
pattern = '[-+*/%\\.\\(\\)\\d]+'
matches = re.findall(pattern, pred)
if matches:
pred = matches[(- 1)].lstrip(')')
pred = pred.strip()
return exact_match(gold, pred) |
class SE(nn.Module):
def __init__(self, w_in, w_se):
super(SE, self).__init__()
self.construct(w_in, w_se)
def construct(self, w_in, w_se):
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.f_ex = nn.Sequential(nn.Conv2d(w_in, w_se, kernel_size=1, bias=True), nn.ReLU(inplace=regnet_cfg.MEM.RELU_INPLACE), nn.Conv2d(w_se, w_in, kernel_size=1, bias=True), nn.Sigmoid())
def forward(self, x):
return (x * self.f_ex(self.avg_pool(x))) |
.parametrize('workers', (1, 2))
def test_in_cli(testdir, cli, open_api_3_schema_with_recoverable_errors, workers, snapshot_cli):
schema_file = testdir.makefile('.yaml', schema=yaml.dump(open_api_3_schema_with_recoverable_errors))
assert (cli.run(str(schema_file), '--dry-run', f'--workers={workers}') == snapshot_cli) |
def test_hit_ratio_evaluate_correct_cases():
y_pred = torch.from_numpy(np.array([[0.1, 0.4, 0.2], [0.5, 0.1, 0.7]]))
y_true = torch.from_numpy(np.array([[0, 0, 1], [1, 0, 0]]))
hr = HitRatioAtK(k)
assert (hr.evaluate(y_true, y_pred) == 1.0)
y_true = torch.from_numpy(np.array([[1, 0, 0], [0, 1, 0]]))
assert (hr.evaluate(y_true, y_pred) == 0) |
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
factor = (2 if bilinear else 1)
self.down4 = Down(512, (1024 // factor))
self.up1 = Up(1024, (512 // factor), bilinear)
self.up2 = Up(512, (256 // factor), bilinear)
self.up3 = Up(256, (128 // factor), bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits |
class MonashTSF():
def __init__(self, dataset, freq, horizon):
self.freq = freq
self.horizon = horizon
self.data = load_dataset('monash_tsf', dataset)['test']
def __getitem__(self, i):
data = self.data[i]
arr = np.asarray(data['target']).T
t0 = (data['start'][0] if isinstance(data['start'], list) else data['start'])
ts = pd.DataFrame(arr.reshape(len(arr), (- 1)), index=pd.date_range(start=t0, periods=len(arr), freq=self.freq))
ts = ((ts - ts.min(axis=0)) / (ts.max(axis=0) - ts.min(axis=0)))
n_test = min(120, int((len(ts) / 5)))
return dict(train_data=ts.iloc[:(- n_test)], test_data=ts.iloc[(- n_test):], horizon=self.horizon, calib_frac=0.2)
def __len__(self):
return len(self.data)
def __iter__(self):
for i in range(len(self)):
(yield self[i]) |
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if ('transforms' in pipeline):
transforms = pipeline['transforms']
break
assert (transforms is not None), 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if (_['type'] == 'Normalize')]
assert (len(norm_config_li) == 1), '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config |
def exclude_tests(exclude_list, selected_tests, exclude_message=None):
for exclude_test in exclude_list:
tests_copy = selected_tests[:]
for test in tests_copy:
if test.startswith(exclude_test):
if (exclude_message is not None):
print_to_stderr('Excluding {} {}'.format(test, exclude_message))
selected_tests.remove(test)
return selected_tests |
_function_dispatch(_einsum_dispatcher, module='numpy')
def einsum(*operands, **kwargs):
optimize_arg = kwargs.pop('optimize', False)
if (optimize_arg is False):
return c_einsum(*operands, **kwargs)
valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
einsum_kwargs = {k: v for (k, v) in kwargs.items() if (k in valid_einsum_kwargs)}
valid_contract_kwargs = (['optimize'] + valid_einsum_kwargs)
unknown_kwargs = [k for (k, v) in kwargs.items() if (k not in valid_contract_kwargs)]
if len(unknown_kwargs):
raise TypeError(('Did not understand the following kwargs: %s' % unknown_kwargs))
specified_out = False
out_array = einsum_kwargs.pop('out', None)
if (out_array is not None):
specified_out = True
(operands, contraction_list) = einsum_path(*operands, optimize=optimize_arg, einsum_call=True)
handle_out = False
for (num, contraction) in enumerate(contraction_list):
(inds, idx_rm, einsum_str, remaining, blas) = contraction
tmp_operands = [operands.pop(x) for x in inds]
handle_out = (specified_out and ((num + 1) == len(contraction_list)))
if blas:
(input_str, results_index) = einsum_str.split('->')
(input_left, input_right) = input_str.split(',')
tensor_result = (input_left + input_right)
for s in idx_rm:
tensor_result = tensor_result.replace(s, '')
(left_pos, right_pos) = ([], [])
for s in sorted(idx_rm):
left_pos.append(input_left.find(s))
right_pos.append(input_right.find(s))
new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
if ((tensor_result != results_index) or handle_out):
if handle_out:
einsum_kwargs['out'] = out_array
new_view = c_einsum(((tensor_result + '->') + results_index), new_view, **einsum_kwargs)
else:
if handle_out:
einsum_kwargs['out'] = out_array
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out_array
else:
return operands[0] |
def change_default(library, implementation):
old_default = library.default_implementation
library.default_implementation = implementation
(yield)
library.default_implementation = old_default |
class Gaussian(Distributions):
def __init__(self, variance):
self._variance = variance
def apply(self, val: np.ndarray):
return np.random.normal(val, self._variance) |
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark |
def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None):
if (dim is None):
if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, n_power_iterations, dim, eps)
return module |
class TFAutoModelWithLMHead(object):
def __init__(self):
raise EnvironmentError('TFAutoModelWithLMHead is designed to be instantiated using the `TFAutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` method.')
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
if ('distilbert' in pretrained_model_name_or_path):
return TFDistilBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('roberta' in pretrained_model_name_or_path):
return TFRobertaForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('bert' in pretrained_model_name_or_path):
return TFBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('openai-gpt' in pretrained_model_name_or_path):
return TFOpenAIGPTLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('gpt2' in pretrained_model_name_or_path):
return TFGPT2LMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('transfo-xl' in pretrained_model_name_or_path):
return TFTransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlnet' in pretrained_model_name_or_path):
return TFXLNetLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('xlm' in pretrained_model_name_or_path):
return TFXLMWithLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
elif ('ctrl' in pretrained_model_name_or_path):
return TFCTRLLMHeadModel.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of 'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', 'xlm', 'roberta', 'ctrl'".format(pretrained_model_name_or_path)) |
class Spatial_Basic_Block(nn.Module):
def __init__(self, in_channels, out_channels, max_graph_distance, residual=False, **kwargs):
super(Spatial_Basic_Block, self).__init__()
if (not residual):
self.residual = (lambda x: 0)
elif (in_channels == out_channels):
self.residual = (lambda x: x)
else:
self.residual = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1), nn.BatchNorm2d(out_channels))
self.conv = SpatialGraphConv(in_channels, out_channels, max_graph_distance)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A):
res_block = self.residual(x)
x = self.conv(x, A)
x = self.bn(x)
x = self.relu((x + res_block))
return x |
def extractFileName(path, withoutExtension=None):
ntpath.basename('a/b/c')
(head, tail) = ntpath.split(path)
if withoutExtension:
return (tail.split('.')[(- 2)] or ntpath.basename(head).split('.')[(- 2)])
return (tail or ntpath.basename(head)) |
.parametrize('lil_container', LIL_CONTAINERS)
def test_timeout(lil_container):
sp = svm.SVC(C=1, kernel=(lambda x, y: (x y.T)), probability=True, random_state=0, max_iter=1)
warning_msg = 'Solver terminated early \\(max_iter=1\\). Consider pre-processing your data with StandardScaler or MinMaxScaler.'
with pytest.warns(ConvergenceWarning, match=warning_msg):
sp.fit(lil_container(X), Y) |
def render_missing_impact_1v1(itmdt: Intermediate, cfg: Config) -> Dict[(str, Any)]:
plot_width = (cfg.plot.width if (cfg.plot.width is not None) else 400)
plot_height = (cfg.plot.height if (cfg.plot.height is not None) else 400)
(x, y, meta) = (itmdt['x'], itmdt['y'], itmdt['meta'])
htgs: Dict[(str, List[Tuple[(str, str)]])] = {}
if isinstance(meta['dtype'], Continuous):
panels = []
if cfg.hist.enable:
fig = render_hist(itmdt['hist'], y, meta, plot_width, plot_height, True)
panels.append(Panel(child=fig, title='Histogram'))
htgs['Histogram'] = cfg.hist.how_to_guide(plot_height, plot_width)
if cfg.pdf.enable:
fig = render_dist(itmdt['dist'], y, 'pdf', plot_width, plot_height)
panels.append(Panel(child=fig, title='PDF'))
htgs['PDF'] = cfg.pdf.how_to_guide(plot_height, plot_width)
if cfg.cdf.enable:
fig = render_dist(itmdt['dist'], y, 'cdf', plot_width, plot_height)
panels.append(Panel(child=fig, title='CDF'))
htgs['CDF'] = cfg.cdf.how_to_guide(plot_height, plot_width)
if cfg.box.enable:
fig = render_boxwhisker(itmdt['box'], plot_width, plot_height)
panels.append(Panel(child=fig, title='Box Plot'))
htgs['Box Plot'] = cfg.box.univar_how_to_guide(plot_height, plot_width)
for panel in panels:
panel.child.frame_width = plot_width
return {'layout': [panel.child for panel in panels], 'meta': [panel.title for panel in panels], 'container_width': max([panel.child.plot_width for panel in panels]), 'how_to_guide': htgs}
elif isinstance(meta['dtype'], (Nominal, SmallCardNum, GeoGraphy, DateTime)):
fig = render_hist(itmdt['hist'], y, meta, plot_width, plot_height, True)
(shown, total) = (meta['shown'], meta['total'])
if (shown != total):
_title = f'Missing impact of {x} by ({shown} out of {total}) {y}'
else:
_title = f'Missing impact of {x} by {y}'
htgs[_title] = cfg.bar.how_to_guide(plot_height, plot_width)
return {'layout': [fig], 'meta': [_title], 'container_width': fig.plot_width, 'how_to_guide': htgs}
else:
mtype = type(meta['dtype'])
raise ValueError(f'unsupported type:{mtype}') |
def test_subtokenize_for_bert_like(bert_with_tokenizer):
(bert, tokenizer) = bert_with_tokenizer
sub_prefix = _tokenizer2sub_prefix(tokenizer)
tokens = TokenSequence.from_tokenized_text(['I', 'like', 'it', 'sooooo', 'much!'])
chunks = [('EntA', start, end) for start in range(len(tokens)) for end in range((start + 1), (len(tokens) + 1))]
data = [{'tokens': tokens, 'chunks': chunks}]
new_data = subtokenize_for_bert_like(data, tokenizer, verbose=False)
assert (new_data[0]['sub2ori_idx'] == [0, 1, 2, 3, 3.333, 3.667, 4, 4.5, 5])
assert (new_data[0]['ori2sub_idx'] == [0, 1, 2, 3, 6, 8])
for (entry, new_entry) in zip(data, new_data):
for (ck, new_ck) in zip(entry['chunks'], new_entry['chunks']):
assert (''.join(entry['tokens'].raw_text[ck[1]:ck[2]]).lower() == ''.join(new_entry['tokens'].raw_text[new_ck[1]:new_ck[2]]).replace(sub_prefix, '').lower()) |
def get_classif_name(classifier_config, usepytorch):
if (not usepytorch):
modelname = 'sklearn-LogReg'
else:
nhid = classifier_config['nhid']
optim = ('adam' if ('optim' not in classifier_config) else classifier_config['optim'])
bs = (64 if ('batch_size' not in classifier_config) else classifier_config['batch_size'])
modelname = ('pytorch-MLP-nhid%s-%s-bs%s' % (nhid, optim, bs))
return modelname |
def cut_wav(wav_path, time_pair, fname, dist_folder):
[start, stop] = time_pair
start = int(((int(start) * 48000) / ))
stop = int(((int(stop) * 48000) / ))
(y, osr) = sf.read(wav_path, subtype='PCM_16', channels=1, samplerate=48000, endian='LITTLE', start=start, stop=stop)
new_path = os.path.join(dist_folder, (fname + '.raw'))
sf.write(new_path, y, subtype='PCM_16', samplerate=48000, endian='LITTLE') |
def shap_coefficients(n):
N = (np.arange((n - 1)) + 2)
coeff_dict = {}
for S in powerset(N):
coeff_dict[tuple(S)] = Fraction((np.math.factorial(len(S)) * np.math.factorial(((n - len(S)) + 1))), np.math.factorial(n))
coeff_dict[tuple((S + (1,)))] = Fraction(((- np.math.factorial(len(S))) * np.math.factorial(((n - len(S)) + 1))), np.math.factorial(n))
return coeff_dict |
def dropout(x, keep_prob, training, noise_shape=None):
if (keep_prob >= 1.0):
return x
return tf.cond(training, (lambda : tf.nn.dropout(x, keep_prob, noise_shape=noise_shape)), (lambda : x)) |
def create_sampler(datasets, shuffles, num_tasks, global_rank):
samplers = []
for (dataset, shuffle) in zip(datasets, shuffles):
sampler = torch.utils.data.DistributedSampler(dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle)
samplers.append(sampler)
return samplers |
def null_wait_for_socket(*args, **kwargs):
raise NoWayToWaitForSocketError('no select-equivalent available') |
class VarianceThresholder():
def __init__(self, num_pipe_info: Dict[(str, Any)]) -> None:
if num_pipe_info['variance_threshold']:
self.variance = num_pipe_info['variance']
self.thresholder = operator_dic['variance_threshold'](self.variance)
def fit(self, col_df: dd.Series) -> Any:
self.thresholder.fit(col_df)
return self
def transform(self, col_df: dd.Series) -> dd.Series:
return self.thresholder.transform(col_df)
def fit_transform(self, training_df: dd.Series, test_df: dd.Series) -> Tuple[(dd.Series, dd.Series)]:
self.thresholder.fit(training_df)
return (self.thresholder.transform(training_df), self.thresholder.transform(test_df)) |
class TestFindPatternApplyF(TestCase):
def test_does_nothing_if_not_required(self):
d = {'arg1': 3, 'arg2': 2}
def pattern(x):
return (isinstance(x, (tuple, list)) and (x[0] == 't'))
find_pattern_apply_f(d, pattern, (lambda x: (x ** 2)))
self.assertEqual(d, {'arg1': 3, 'arg2': 2})
def test_basic(self):
d = {'arg1': 3, 'arg2': ('t', 2)}
def pattern(x):
return (isinstance(x, (tuple, list)) and (x[0] == 't'))
find_pattern_apply_f(d, pattern, (lambda x: (x[1] ** 2)))
self.assertEqual(d, {'arg1': 3, 'arg2': 4})
def test_handles_nesting(self):
d = {'arg1': 3, 'arg2': {'k': ('t', 2)}}
def pattern(x):
return (isinstance(x, (tuple, list)) and (x[0] == 't'))
find_pattern_apply_f(d, pattern, (lambda x: (x[1] ** 2)))
self.assertEqual(d, {'arg1': 3, 'arg2': {'k': 4}})
def test_finds_in_seqeuence(self):
d = {'arg1': 3, 'arg2': ['k', ('t', 2)]}
def pattern(x):
return (isinstance(x, (tuple, list)) and (x[0] == 't'))
find_pattern_apply_f(d, pattern, (lambda x: (x[1] ** 2)))
self.assertEqual(d, {'arg1': 3, 'arg2': ['k', 4]}) |
class Partition2(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[6]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[7]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[8]/ModuleList[layer]/T5LayerFF[1]/Dropout[dropout]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:2'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1]
self.lookup = {'l_0': 'encoder.block.6.layer.0.layer_norm', 'l_1': 'encoder.block.6.layer.0.SelfAttention.q', 'l_2': 'encoder.block.6.layer.0.SelfAttention.k', 'l_3': 'encoder.block.6.layer.0.SelfAttention.v', 'l_4': 'encoder.block.6.layer.0.SelfAttention.o', 'l_5': 'encoder.block.6.layer.0.dropout', 'l_6': 'encoder.block.6.layer.1.layer_norm', 'l_7': 'encoder.block.6.layer.1.DenseReluDense.wi', 'l_8': 'encoder.block.6.layer.1.DenseReluDense.dropout', 'l_9': 'encoder.block.6.layer.1.DenseReluDense.wo', 'l_10': 'encoder.block.6.layer.1.dropout', 'l_11': 'encoder.block.7.layer.0.layer_norm', 'l_12': 'encoder.block.7.layer.0.SelfAttention.q', 'l_13': 'encoder.block.7.layer.0.SelfAttention.k', 'l_14': 'encoder.block.7.layer.0.SelfAttention.v', 'l_15': 'encoder.block.7.layer.0.SelfAttention.o', 'l_16': 'encoder.block.7.layer.0.dropout', 'l_17': 'encoder.block.7.layer.1.layer_norm', 'l_18': 'encoder.block.7.layer.1.DenseReluDense.wi', 'l_19': 'encoder.block.7.layer.1.DenseReluDense.dropout', 'l_20': 'encoder.block.7.layer.1.DenseReluDense.wo', 'l_21': 'encoder.block.7.layer.1.dropout', 'l_22': 'encoder.block.8.layer.0.layer_norm', 'l_23': 'encoder.block.8.layer.0.SelfAttention.q', 'l_24': 'encoder.block.8.layer.0.SelfAttention.k', 'l_25': 'encoder.block.8.layer.0.SelfAttention.v', 'l_26': 'encoder.block.8.layer.0.SelfAttention.o', 'l_27': 'encoder.block.8.layer.0.dropout', 'l_28': 'encoder.block.8.layer.1.layer_norm', 'l_29': 'encoder.block.8.layer.1.DenseReluDense.wi', 'l_30': 'encoder.block.8.layer.1.DenseReluDense.dropout', 'l_31': 'encoder.block.8.layer.1.DenseReluDense.wo', 'l_32': 'encoder.block.8.layer.1.dropout'}
self.to(self.device)
def forward(self, *args):
(x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(t_0)
t_2 = self.l_2(t_0)
t_3 = self.l_3(t_0)
t_0 = t_0.shape
t_0 = t_0[slice(None, 2, None)]
t_0 = t_0[0]
t_1 = t_1.view(t_0, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_2 = t_2.view(t_0, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_3 = t_3.view(t_0, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_2 = t_2.transpose(3, 2)
t_2 = torch.matmul(t_1, t_2)
t_2 += x1
t_1 = t_2.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_2 = t_1.type_as(t_2)
t_2 = torch.nn.functional.dropout(t_2, p=0.1, training=self.training, inplace=False)
t_3 = torch.matmul(t_2, t_3)
t_3 = t_3.transpose(1, 2)
t_3 = t_3.contiguous()
t_0 = t_3.view(t_0, (- 1), 4096)
t_0 = self.l_4(t_0)
t_3 = self.l_5(t_0)
t_3 = (x0 + t_3)
t_0 = (t_0, None, x1)
t_2 = t_0[0]
t_3 = (t_3,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_3 + t_0)
t_3 = t_0[slice(None, 2, None)]
t_1 = t_3[0]
t_4 = self.l_6(t_1)
t_3 = t_3[1]
t_0 = t_0[slice(2, None, None)]
t_4 = self.l_7(t_4)
t_4 = torch.nn.functional.relu(t_4, inplace=False)
t_4 = self.l_8(t_4)
t_4 = self.l_9(t_4)
t_4 = self.l_10(t_4)
t_4 = (t_1 + t_4)
t_3 = (t_4, t_3)
t_0 = (t_3 + t_0)
t_3 = t_0[slice(None, 2, None)]
t_3 = t_3[0]
t_4 = self.l_11(t_3)
t_0 = t_0[2]
t_1 = self.l_12(t_4)
t_5 = self.l_13(t_4)
t_6 = self.l_14(t_4)
t_4 = t_4.shape
t_4 = t_4[slice(None, 2, None)]
t_4 = t_4[0]
t_1 = t_1.view(t_4, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_5 = t_5.view(t_4, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_6 = t_6.view(t_4, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_5 = t_5.transpose(3, 2)
t_5 = torch.matmul(t_1, t_5)
t_5 += t_0
t_1 = t_5.float()
t_1 = torch.nn.functional.softmax(t_1, dim=(- 1), _stacklevel=3, dtype=None)
t_5 = t_1.type_as(t_5)
t_5 = torch.nn.functional.dropout(t_5, p=0.1, training=self.training, inplace=False)
t_6 = torch.matmul(t_5, t_6)
t_6 = t_6.transpose(1, 2)
t_6 = t_6.contiguous()
t_4 = t_6.view(t_4, (- 1), 4096)
t_4 = self.l_15(t_4)
t_6 = self.l_16(t_4)
t_6 = (t_3 + t_6)
t_0 = (t_4, None, t_0)
t_4 = t_0[0]
t_6 = (t_6,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_6 + t_0)
t_6 = t_0[slice(None, 2, None)]
t_3 = t_6[0]
t_5 = self.l_17(t_3)
t_6 = t_6[1]
t_0 = t_0[slice(2, None, None)]
t_5 = self.l_18(t_5)
t_5 = torch.nn.functional.relu(t_5, inplace=False)
t_5 = self.l_19(t_5)
t_5 = self.l_20(t_5)
t_5 = self.l_21(t_5)
t_5 = (t_3 + t_5)
t_6 = (t_5, t_6)
t_0 = (t_6 + t_0)
t_6 = t_0[slice(None, 2, None)]
t_6 = t_6[0]
t_5 = self.l_22(t_6)
t_0 = t_0[2]
t_3 = self.l_23(t_5)
t_1 = self.l_24(t_5)
t_7 = self.l_25(t_5)
t_5 = t_5.shape
t_5 = t_5[slice(None, 2, None)]
t_5 = t_5[0]
t_3 = t_3.view(t_5, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_1 = t_1.view(t_5, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_7 = t_7.view(t_5, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_1 = t_1.transpose(3, 2)
t_1 = torch.matmul(t_3, t_1)
t_1 += t_0
t_3 = t_1.float()
t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None)
t_1 = t_3.type_as(t_1)
t_1 = torch.nn.functional.dropout(t_1, p=0.1, training=self.training, inplace=False)
t_7 = torch.matmul(t_1, t_7)
t_7 = t_7.transpose(1, 2)
t_7 = t_7.contiguous()
t_5 = t_7.view(t_5, (- 1), 4096)
t_5 = self.l_26(t_5)
t_7 = self.l_27(t_5)
t_7 = (t_6 + t_7)
t_0 = (t_5, None, t_0)
t_5 = t_0[0]
t_7 = (t_7,)
t_0 = t_0[slice(1, None, None)]
t_0 = (t_7 + t_0)
t_7 = t_0[slice(None, 2, None)]
t_6 = t_7[0]
t_1 = self.l_28(t_6)
t_7 = t_7[1]
t_0 = t_0[slice(2, None, None)]
t_1 = self.l_29(t_1)
t_1 = torch.nn.functional.relu(t_1, inplace=False)
t_1 = self.l_30(t_1)
t_1 = self.l_31(t_1)
t_1 = self.l_32(t_1)
t_1 = (t_6 + t_1)
t_7 = (t_1, t_7)
t_0 = (t_7 + t_0)
t_7 = t_0[slice(None, 2, None)]
t_7 = t_7[0]
t_0 = t_0[2]
return list(flatten((t_7, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def warmup_exp(x, warmup=0.002, min_val=0.01):
s = tf.cast((x <= warmup), tf.float32)
decay = (min_val ** (1.0 / 100))
num = tf.floor((x * 100))
return ((s * (x / warmup)) + (((1 - s) * 1.0) * (decay ** num))) |
.skipif((not _ti_core.GGUI_AVAILABLE), reason='GGUI Not Available')
_utils.test()
def test_deprecate_initialization_of_scene():
with pytest.warns(DeprecationWarning, match='Instantiating ti.ui.Scene directly is deprecated, use the get_scene\\(\\) function from a taichi.ui.Window object instead.'):
ti.ui.Scene() |
def plot_latent_variables(ax):
for l in dgp.f_layers:
if isinstance(l, gpflux.layers.LatentVariableLayer):
m = l.encoder.means.numpy()
s = l.encoder.stds.numpy()
ax.errorbar(X.flatten(), m.flatten(), yerr=s.flatten(), fmt='o')
return |
def GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 11, 0)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
math_inst = MathInstruction([16, 8, 8], DataType.f32, DataType.f32, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add_complex_fast_f32)
min_cc = 80
max_cc = 1024
tile_descriptions = [TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([32, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc)]
data_type = [DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32]
alignment_constraints = [1]
complex_transforms = [(ComplexTransform.none, ComplexTransform.none), (ComplexTransform.conj, ComplexTransform.none), (ComplexTransform.none, ComplexTransform.conj), (ComplexTransform.conj, ComplexTransform.conj)]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms) |
def nte_sampling(model_instance, pool, size):
active_eval_loader = get_tr_set(train_examples=pool, batch_size=1, args=args)
(raw_prediction, turncate_list) = active_eval(active_eval_loader, model_instance)
sentence_nte = cal_nte(raw_prediction, turncate_list)
query_index = multi_argmax(np.array(sentence_nte), size)
return (query_index, pool[query_index]) |
def train(args, model, device, loader, optimizer):
model.train()
loss_accum = 0
for (step, batch) in enumerate(tqdm(loader, desc='Iteration')):
batch = batch.to(device)
pred = model(batch)
y = batch.go_target_pretrain.view(pred.shape).to(torch.float64)
optimizer.zero_grad()
loss = criterion(pred.double(), y)
loss.backward()
optimizer.step()
loss_accum += loss.detach().cpu()
return (loss_accum / (step + 1)) |
class StandUpExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo):
info.set_current_line(script[0])
char_node = _get_character_node(state)
if ((State.SITTING in char_node.states) or (State.LYING in char_node.states)):
new_char_node = char_node.copy()
new_char_node.states.discard(State.SITTING)
new_char_node.states.discard(State.LYING)
(yield state.change_state([DeleteEdges(CharacterNode(), [Relation.ON], AnyNode()), ChangeNode(new_char_node)]))
else:
info.error('{} is not sitting', char_node) |
def get_transforms_field_v2(transforms):
if isinstance(transforms, np.ndarray):
return transforms
transforms_arr = transforms.to_numpy()
return transforms_arr |
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_hid, d_inner_hid=None, dropout=0):
super(PositionwiseFeedForward, self).__init__()
if (d_inner_hid is None):
d_inner_hid = d_hid
self.w_1 = nn.Conv1d(d_hid, d_inner_hid, 1)
self.w_2 = nn.Conv1d(d_inner_hid, d_hid, 1)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
output = self.relu(self.w_1(x.transpose(1, 2)))
output = self.w_2(output).transpose(2, 1)
output = self.dropout(output)
return output |
def _test_if(x):
y = ops.Const(1)
with ops.If(ops.GT([x, ops.Const(50)])):
ops.Const(2, blob_out=y)
with ops.If(ops.LT([x, ops.Const(50)])):
ops.Const(3, blob_out=y)
ops.stop()
ops.Const(4, blob_out=y)
return y |
class BaseMergeCell(nn.Module):
def __init__(self, fused_channels=256, out_channels=256, with_out_conv=True, out_conv_cfg=dict(groups=1, kernel_size=3, padding=1, bias=True), out_norm_cfg=None, out_conv_order=('act', 'conv', 'norm'), with_input1_conv=False, with_input2_conv=False, input_conv_cfg=None, input_norm_cfg=None, upsample_mode='nearest'):
super(BaseMergeCell, self).__init__()
assert (upsample_mode in ['nearest', 'bilinear'])
self.with_out_conv = with_out_conv
self.with_input1_conv = with_input1_conv
self.with_input2_conv = with_input2_conv
self.upsample_mode = upsample_mode
if self.with_out_conv:
self.out_conv = ConvModule(fused_channels, out_channels, **out_conv_cfg, norm_cfg=out_norm_cfg, order=out_conv_order)
self.input1_conv = (self._build_input_conv(out_channels, input_conv_cfg, input_norm_cfg) if with_input1_conv else nn.Sequential())
self.input2_conv = (self._build_input_conv(out_channels, input_conv_cfg, input_norm_cfg) if with_input2_conv else nn.Sequential())
def _build_input_conv(self, channel, conv_cfg, norm_cfg):
return ConvModule(channel, channel, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, bias=True)
def _binary_op(self, x1, x2):
pass
def _resize(self, x, size):
if (x.shape[(- 2):] == size):
return x
elif (x.shape[(- 2):] < size):
return F.interpolate(x, size=size, mode=self.upsample_mode)
else:
assert (((x.shape[(- 2)] % size[(- 2)]) == 0) and ((x.shape[(- 1)] % size[(- 1)]) == 0))
kernel_size = (x.shape[(- 1)] // size[(- 1)])
x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size)
return x
def forward(self, x1, x2, out_size=None):
assert (x1.shape[:2] == x2.shape[:2])
assert ((out_size is None) or (len(out_size) == 2))
if (out_size is None):
out_size = max(x1.size()[2:], x2.size()[2:])
x1 = self.input1_conv(x1)
x2 = self.input2_conv(x2)
x1 = self._resize(x1, out_size)
x2 = self._resize(x2, out_size)
x = self._binary_op(x1, x2)
if self.with_out_conv:
x = self.out_conv(x)
return x |
def register_Ns3SixLowPanFragN_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::SixLowPanFragN const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetDatagramOffset', 'uint8_t', [], is_const=True)
cls.add_method('GetDatagramSize', 'uint16_t', [], is_const=True)
cls.add_method('GetDatagramTag', 'uint16_t', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetDatagramOffset', 'void', [param('uint8_t', 'datagramOffset')])
cls.add_method('SetDatagramSize', 'void', [param('uint16_t', 'datagramSize')])
cls.add_method('SetDatagramTag', 'void', [param('uint16_t', 'datagramTag')])
return |
class UNetUp(nn.Module):
def __init__(self, in_size, out_size, dropout=0.0):
super(UNetUp, self).__init__()
model = [nn.ConvTranspose2d(in_size, out_size, 3, stride=1, padding=1, bias=False), nn.BatchNorm2d(out_size, 0.8), nn.ReLU(inplace=True)]
if dropout:
model.append(nn.Dropout(dropout))
self.model = nn.Sequential(*model)
def forward(self, x, skip_input):
x = self.model(x)
out = torch.cat((x, skip_input), 1)
return out |
def mse_r0(s_hat, log_r_hat, t_hat, y, log_r, t):
inv_r_hat = torch.exp((- log_r_hat))
inv_r = torch.exp((- log_r))
return MSELoss()(((1.0 - y) * inv_r_hat), ((1.0 - y) * inv_r)) |
def nonlinear_net(hdf5, batch_size):
n = caffe.NetSpec()
(n.data, n.label) = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
n.ip1 = L.InnerProduct(n.data, num_output=40, weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.ip1, num_output=2, weight_filler=dict(type='xavier'))
n.accuracy = L.Accuracy(n.ip2, n.label)
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto() |
class ToyProblem(Problem):
def __init__(self, *, zero_edges=[]):
G = Problem._read_graph_json(os.path.join(TOPOLOGIES_DIR, 'toy-network.json'))
num_nodes = len(G.nodes)
traffic_matrix = np.zeros((num_nodes, num_nodes), dtype=np.float32)
traffic_matrix[(0, 1)] = 5.0
traffic_matrix[(0, 3)] = 10.0
traffic_matrix[(0, 5)] = 11.0
traffic_matrix[(3, 2)] = 13.0
traffic_matrix[(2, 5)] = 12.0
traffic_matrix[(3, 4)] = 7.0
super().__init__(G, traffic_matrix)
super()._change_capacities(min_cap=18, max_cap=18, fixed_caps=[(u, v, 0.0) for (u, v) in zero_edges])
def name(self):
return 'toy-problem' |
def CI_calc_agresti(item1, item2, CV=1.96):
try:
item3 = (item2 * item1)
mean = ((item3 + ((CV ** 2) / 2)) / (item2 + (CV ** 2)))
error = math.sqrt(((mean * (1 - mean)) / (item2 + (CV ** 2))))
CI_down = (mean - (CV * error))
CI_up = (mean + (CV * error))
return (CI_down, CI_up)
except Exception:
return ('None', 'None') |
def main(optin):
if (not os.path.exists(('checkpoint/' + optin.exp))):
os.makedirs(('checkpoint/' + optin.exp))
model = PRN(optin.node_count, optin.coeff).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=optin.lr)
criterion = torch.nn.BCELoss().cuda()
print(model)
print('>>> total params: {:.2f}M'.format((sum((p.numel() for p in model.parameters())) / 1000000.0)))
save_options(optin, os.path.join(('checkpoint/' + optin.exp)), model.__str__(), criterion.__str__(), optimizer.__str__())
print('Loading Coco Training Set')
coco_train = COCO(os.path.join('data/annotations/person_keypoints_train2017.json'))
trainloader = DataLoader(dataset=CocoDataset(coco_train, optin), batch_size=optin.batch_size, num_workers=optin.num_workers, shuffle=True)
bar = Bar('-->', fill='>', max=len(trainloader))
cudnn.benchmark = True
for epoch in range(optin.number_of_epoch):
print('Training Epoch {}'.format(epoch))
print('Total Step:', len(trainloader), '| Total Epoch:', optin.number_of_epoch)
lr = adjust_lr(optimizer, epoch, optin.lr_gamma)
print(('\nEpoch: %d | LR: %.8f' % ((epoch + 1), lr)))
for (idx, (input, label)) in tqdm(enumerate(trainloader)):
input = input.cuda().float()
label = label.cuda().float()
outputs = model(input)
optimizer.zero_grad()
loss = criterion(outputs, label)
loss.backward()
optimizer.step()
if ((idx % 200) == 0):
bar.suffix = 'Epoch: {epoch} Total: {ttl} | ETA: {eta:} | loss:{loss}'.format(ttl=bar.elapsed_td, eta=bar.eta_td, loss=loss.data, epoch=epoch)
bar.next()
Evaluation(model, optin)
save_model({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, checkpoint=('checkpoint/' + optin.exp))
model.train() |
def test_wrap_experiment_makes_log_dir():
prefix = 'wrap_exp_test_makes_log_dir'
exp_path = pathlib.Path(os.getcwd(), 'data/local', prefix)
_hard_rmtree(exp_path)
expected_path = (exp_path / 'test_exp')
_experiment(prefix=prefix)
def test_exp(ctxt=None):
assert expected_path.samefile(ctxt.snapshot_dir)
assert (not exp_path.exists())
test_exp()
prefix_contents = list(exp_path.iterdir())
assert (len(prefix_contents) == 1)
assert prefix_contents[0].samefile(expected_path)
expected_path = (exp_path / 'test_exp_1')
test_exp()
prefix_contents = list(exp_path.iterdir())
assert (len(prefix_contents) == 2)
assert any([expected_path.samefile(directory) for directory in prefix_contents])
expected_path = (exp_path / 'test_exp_2')
test_exp()
prefix_contents = list(exp_path.iterdir())
assert (len(prefix_contents) == 3)
assert any([expected_path.samefile(directory) for directory in prefix_contents]) |
def check_args(cmp, n):
def wrapper(f):
(f)
def walk_op(self, formula, args, **kwargs):
if (not cmp(len(args), n)):
raise ConvertExpressionError('Incorrect number of arguments')
return f(self, formula, args, **kwargs)
return walk_op
return wrapper |
_task('language_modeling')
class LanguageModelingTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-break-mode', default='none', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=1024, type=int, help='max number of tokens per sample for LM dataset')
parser.add_argument('--lazy-load', action='store_true', help='load the dataset lazily')
parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset')
parser.add_argument('--output-dictionary-size', default=(- 1), type=int, help='limit the size of output dictionary')
parser.add_argument('--self-target', action='store_true', help='include self target')
parser.add_argument('--future-target', action='store_true', help='include future target')
parser.add_argument('--past-target', action='store_true', help='include past target')
parser.add_argument('--add-bos-token', action='store_true', help='prepend beginning of sentence token (<s>)')
parser.add_argument('--max-target-positions', type=int, metavar='N', help='max number of tokens in the target sequence')
def __init__(self, args, dictionary, output_dictionary=None, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = (output_dictionary or dictionary)
if (targets is None):
targets = ['future']
self.targets = targets
def setup_task(cls, args, **kwargs):
if getattr(args, 'raw_text', False):
utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
args.dataset_impl = 'raw'
elif getattr(args, 'lazy_load', False):
utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
args.dataset_impl = 'lazy'
dictionary = None
output_dictionary = None
if args.data:
paths = args.data.split(':')
assert (len(paths) > 0)
dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
output_dictionary = dictionary
if (args.output_dictionary_size >= 0):
output_dictionary = TruncatedDictionary(dictionary, args.output_dictionary_size)
if hasattr(args, 'exclude_self_target'):
args.self_target = (not args.exclude_self_target)
targets = []
if getattr(args, 'self_target', False):
targets.append('self')
if getattr(args, 'future_target', False):
targets.append('future')
if getattr(args, 'past_target', False):
targets.append('past')
if (len(targets) == 0):
targets = ['future']
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args):
model = super().build_model(args)
for target in self.targets:
if (target not in model.supported_targets):
raise ValueError('Unsupported language modeling target: {}'.format(target))
return model
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
paths = self.args.data.split(':')
assert (len(paths) > 0)
data_path = paths[(epoch % len(paths))]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(split_path, self.dictionary, self.args.dataset_impl, combine=combine)
if (dataset is None):
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = TokenBlockDataset(dataset, dataset.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, include_targets=True)
add_eos_for_other_targets = ((self.args.sample_break_mode is not None) and (self.args.sample_break_mode != 'none'))
self.datasets[split] = MonolingualDataset(dataset, dataset.sizes, self.dictionary, self.output_dictionary, add_eos_for_other_targets=add_eos_for_other_targets, shuffle=True, targets=self.targets, add_bos_token=self.args.add_bos_token)
def build_dataset_for_inference(self, src_tokens, src_lengths):
return TransformEosDataset(MonolingualDataset(TokenBlockDataset(src_tokens, src_lengths, block_size=None, pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos', include_targets=False), src_lengths, self.source_dictionary, self.target_dictionary, add_eos_for_other_targets=False, shuffle=False, add_bos_token=self.args.add_bos_token), eos=self.source_dictionary.eos(), remove_eos_from_src=True, has_target=False)
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
if ((prefix_tokens is None) and sample['net_input']['src_tokens'].nelement()):
prefix_tokens = sample['net_input']['src_tokens']
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.output_dictionary |
def draw_in_tensorboard(writer, images, i_iter, pred_main, num_classes, type_):
grid_image = make_grid(images[:3].clone().cpu().data, 3, normalize=True)
writer.add_image(f'Image - {type_}', grid_image, i_iter)
softmax = F.softmax(pred_main, dim=1).cpu().data[0].numpy().transpose(1, 2, 0)
mask = colorize_mask(num_classes, np.asarray(np.argmax(softmax, axis=2), dtype=np.uint8)).convert('RGB')
grid_image = make_grid(torch.from_numpy(np.array(mask).transpose(2, 0, 1)), 3, normalize=False, range=(0, 255))
writer.add_image(f'Prediction - {type_}', grid_image, i_iter)
output_sm = F.softmax(pred_main, dim=1).cpu().data[0].numpy().transpose(1, 2, 0)
output_ent = np.sum((- np.multiply(output_sm, np.log2((output_sm + 1e-30)))), axis=2, keepdims=False)
grid_image = make_grid(torch.from_numpy(output_ent), 3, normalize=True, range=(0, np.log2(num_classes)))
writer.add_image(f'Entropy - {type_}', grid_image, i_iter) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.