code stringlengths 101 5.91M |
|---|
class Config(dict):
def __init__(self, *args, **kwargs):
super(Config, self).__init__()
for arg in args:
if isinstance(arg, str):
if (arg.endswith('.json') or arg.endswith('.json5')):
with open(arg) as f:
raw_dict = json5.load(f)
elif arg.endswith('.yaml'):
with open(arg) as f:
raw_dict = yaml.load(f)
else:
raise Exception(('unknown file format %s' % arg))
init_assign(self, raw_dict, traverse=True)
elif isinstance(arg, dict):
init_assign(self, arg, traverse=True)
else:
raise TypeError('arg should be an instance of <str> or <dict>')
if kwargs:
init_assign(self, kwargs, traverse=False)
def __call__(self, *args, **kwargs):
return Config(self, *args, **kwargs)
def __setstate__(self, state):
init_assign(self, state, traverse=True)
def __getstate__(self):
d = dict()
for (key, value) in self.items():
if (type(value) is Config):
value = value.__getstate__()
d[key] = value
return d
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
del self[key]
def __getitem__(self, key):
(sub_cfg, sub_key) = consume_dots(self, key, create_default=False)
return dict.__getitem__(sub_cfg, sub_key)
def __setitem__(self, key, value):
(sub_cfg, sub_key) = consume_dots(self, key, create_default=True)
dict.__setitem__(sub_cfg, sub_key, value)
def __delitem__(self, key):
(sub_cfg, sub_key) = consume_dots(self, key, create_default=False)
dict.__delitem__(sub_cfg, sub_key)
def __contains__(self, key):
try:
(sub_cfg, sub_key) = consume_dots(self, key, create_default=False)
except KeyError:
return False
return dict.__contains__(sub_cfg, sub_key)
def all_keys(self, order='dfs'):
traverse = {'dfs': traverse_dfs, 'bfs': traverse_bfs}[order]
for key in traverse(self, 'key', continue_type=Config):
(yield key)
def all_values(self, order='dfs'):
traverse = {'dfs': traverse_dfs, 'bfs': traverse_bfs}[order]
for value in traverse(self, 'value', continue_type=Config):
(yield value)
def all_items(self, order='dfs'):
traverse = {'dfs': traverse_dfs, 'bfs': traverse_bfs}[order]
for (key, value) in traverse(self, 'item', continue_type=Config):
(yield (key, value))
def parse_args(self, cmd_args=None, strict=True):
unknown_args = []
if (cmd_args is None):
import sys
cmd_args = sys.argv[1:]
index = 0
while (index < len(cmd_args)):
arg = cmd_args[index]
err_msg = ('invalid command line argument pattern: %s' % arg)
assert arg.startswith('--'), err_msg
assert (len(arg) > 2), err_msg
assert (arg[2] != '-'), err_msg
arg = arg[2:]
if ('=' in arg):
(key, full_value_str) = arg.split('=')
index += 1
else:
assert (len(cmd_args) > (index + 1)), 'incomplete command line arguments'
key = arg
full_value_str = cmd_args[(index + 1)]
index += 2
if (':' in full_value_str):
(value_str, value_type_str) = full_value_str.split(':')
value_type = eval(value_type_str)
else:
value_str = full_value_str
value_type = None
if (key not in self):
if strict:
raise KeyError(('%s not exists in config' % key))
else:
unknown_args.extend([('--' + key), full_value_str])
continue
if (value_type is None):
value_type = type(self[key])
if (value_type is bool):
self[key] = {'true': True, 'True': True, '1': True, 'false': False, 'False': False, '0': False}[value_str]
else:
self[key] = value_type(value_str)
return unknown_args
def parse_refs(self, subconf=None, stack_depth=1, max_stack_depth=10):
if (stack_depth > max_stack_depth):
raise Exception(('Recursively calling `parse_refs` too many times with stack depth > %d. A circular reference may exists in your config.\nIf deeper calling stack is really needed, please call `parse_refs` with extra argument like: `parse_refs(max_stack_depth = 9999)`' % max_stack_depth))
if (subconf is None):
subconf = self
for key in subconf.keys():
value = subconf[key]
if ((type(value) is str) and value.startswith('{') and value.endswith('}')):
ref_key = value[2:(- 1)]
ref_value = self[ref_key]
if ((type(ref_value) is str) and ref_value.startswith('{') and value.endswith('}')):
raise Exception(('Refering key %s to %s, but the value of %s is another reference value %s' % (repr(key), repr(value), repr(ref_key), repr(ref_value))))
subconf[key] = ref_value
for key in subconf.keys():
value = subconf[key]
if (type(value) is Config):
self.parse_refs(value, (stack_depth + 1)) |
def clean_oss_model_path(oss_path):
bucket = oss.get_models_bucket()
oss.delete_oss_dir_recursive(bucket, oss_path) |
def cal_performance(pred, tgt, local_rank, smoothing=True):
loss = cal_loss(pred, tgt, local_rank, smoothing)
pred = pred.max(1)[1]
tgt = tgt.contiguous().view((- 1))
non_pad_mask = tgt.ne(0)
n_correct = pred.eq(tgt)
n_correct = n_correct.masked_select(non_pad_mask).sum().item()
return (loss, n_correct) |
def vectorize_batch_graph(graph, word_idx):
id_features = graph['g_ids_features']
gv = {}
nv = []
n_len_v = []
word_max_len = 0
for id in id_features:
feature = id_features[id]
word_max_len = max(word_max_len, len(feature.split()))
for id in graph['g_ids_features']:
feature = graph['g_ids_features'][id]
fv = []
for token in feature.split():
if (len(token) == 0):
continue
if (token in word_idx):
fv.append(word_idx[token])
else:
fv.append(word_idx[conf.unknown_word])
if (len(fv) > word_max_len):
n_len_v.append(word_max_len)
else:
n_len_v.append(len(fv))
for _ in range((word_max_len - len(fv))):
fv.append(0)
fv = fv[:word_max_len]
nv.append(fv)
nv.append([0 for temp in range(word_max_len)])
n_len_v.append(0)
gv['g_ids_features'] = np.array(nv)
gv['g_ids_feature_lens'] = np.array(n_len_v)
g_fw_adj = graph['g_fw_adj']
g_fw_adj_v = []
degree_max_size = 0
for id in g_fw_adj:
degree_max_size = max(degree_max_size, len(g_fw_adj[id]))
g_bw_adj = graph['g_bw_adj']
for id in g_bw_adj:
degree_max_size = max(degree_max_size, len(g_bw_adj[id]))
degree_max_size = min(degree_max_size, conf.sample_size_per_layer)
for id in g_fw_adj:
adj = g_fw_adj[id]
for _ in range((degree_max_size - len(adj))):
adj.append(len(g_fw_adj.keys()))
adj = adj[:degree_max_size]
assert (len(adj) == degree_max_size)
g_fw_adj_v.append(adj)
g_fw_adj_v.append([len(g_fw_adj.keys()) for _ in range(degree_max_size)])
g_bw_adj_v = []
for id in g_bw_adj:
adj = g_bw_adj[id]
for _ in range((degree_max_size - len(adj))):
adj.append(len(g_bw_adj.keys()))
adj = adj[:degree_max_size]
assert (len(adj) == degree_max_size)
g_bw_adj_v.append(adj)
g_bw_adj_v.append([len(g_bw_adj.keys()) for _ in range(degree_max_size)])
g_nodes = graph['g_nodes']
graph_max_size = 0
for nodes in g_nodes:
graph_max_size = max(graph_max_size, len(nodes))
g_node_v = []
g_node_mask = []
entity_index = []
for nodes in g_nodes:
mask = [1 for _ in range(len(nodes))]
for _ in range((graph_max_size - len(nodes))):
nodes.append(len(g_fw_adj.keys()))
mask.append(0)
nodes = nodes[:graph_max_size]
mask = mask[:graph_max_size]
g_node_v.append(nodes)
g_node_mask.append(mask)
entity_index.append(0)
g_looking_table = []
global_count = 0
for mask in g_node_mask:
for item in mask:
if (item == 1):
g_looking_table.append(global_count)
global_count += 1
gv['g_nodes'] = np.array(g_node_v)
gv['g_bw_adj'] = np.array(g_bw_adj_v)
gv['g_fw_adj'] = np.array(g_fw_adj_v)
gv['g_mask'] = np.array(g_node_mask)
gv['g_looking_table'] = np.array(g_looking_table)
gv['entity_index'] = entity_index
return gv |
class CosineAnnealingWarmUpRestarts(_LRScheduler):
def __init__(self, optimizer, T_0, T_mult=1, eta_max=0.1, T_up=0, gamma=1.0, last_epoch=(- 1)):
if ((T_0 <= 0) or (not isinstance(T_0, int))):
raise ValueError('Expected positive integer T_0, but got {}'.format(T_0))
if ((T_mult < 1) or (not isinstance(T_mult, int))):
raise ValueError('Expected integer T_mult >= 1, but got {}'.format(T_mult))
if ((T_up < 0) or (not isinstance(T_up, int))):
raise ValueError('Expected positive integer T_up, but got {}'.format(T_up))
self.T_0 = T_0
self.T_mult = T_mult
self.base_eta_max = eta_max
self.eta_max = eta_max
self.T_up = T_up
self.T_i = T_0
self.gamma = gamma
self.cycle = 0
self.T_cur = last_epoch
super(CosineAnnealingWarmUpRestarts, self).__init__(optimizer, last_epoch)
def get_lr(self):
if (self.T_cur == (- 1)):
return self.base_lrs
elif (self.T_cur < self.T_up):
return [((((self.eta_max - base_lr) * self.T_cur) / self.T_up) + base_lr) for base_lr in self.base_lrs]
else:
return [(base_lr + (((self.eta_max - base_lr) * (1 + math.cos(((math.pi * (self.T_cur - self.T_up)) / (self.T_i - self.T_up))))) / 2)) for base_lr in self.base_lrs]
def step(self, epoch=None):
if (epoch is None):
epoch = (self.last_epoch + 1)
self.T_cur = (self.T_cur + 1)
if (self.T_cur >= self.T_i):
self.cycle += 1
self.T_cur = (self.T_cur - self.T_i)
self.T_i = (((self.T_i - self.T_up) * self.T_mult) + self.T_up)
elif (epoch >= self.T_0):
if (self.T_mult == 1):
self.T_cur = (epoch % self.T_0)
self.cycle = (epoch // self.T_0)
else:
n = int(math.log((((epoch / self.T_0) * (self.T_mult - 1)) + 1), self.T_mult))
self.cycle = n
self.T_cur = (epoch - ((self.T_0 * ((self.T_mult ** n) - 1)) / (self.T_mult - 1)))
self.T_i = (self.T_0 * (self.T_mult ** n))
else:
self.T_i = self.T_0
self.T_cur = epoch
self.eta_max = (self.base_eta_max * (self.gamma ** self.cycle))
self.last_epoch = math.floor(epoch)
for (param_group, lr) in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr |
class Learner(BaseLearner):
def __init__(self, args):
super().__init__(args)
self._network = DERNet(args, True)
def after_task(self):
self._known_classes = self._total_classes
logging.info('Exemplar size: {}'.format(self.exemplar_size))
def incremental_train(self, data_manager):
self._cur_task += 1
self._total_classes = (self._known_classes + data_manager.get_task_size(self._cur_task))
self._network.update_fc(self._total_classes)
logging.info('Learning on {}-{}'.format(self._known_classes, self._total_classes))
if (self._cur_task > 0):
for i in range(self._cur_task):
for p in self._network.backbones[i].parameters():
p.requires_grad = False
logging.info('All params: {}'.format(count_parameters(self._network)))
logging.info('Trainable params: {}'.format(count_parameters(self._network, True)))
train_dataset = data_manager.get_dataset(np.arange(self._known_classes, self._total_classes), source='train', mode='train', appendent=self._get_memory())
self.train_loader = DataLoader(train_dataset, batch_size=self.args['batch_size'], shuffle=True, num_workers=num_workers)
test_dataset = data_manager.get_dataset(np.arange(0, self._total_classes), source='test', mode='test')
self.test_loader = DataLoader(test_dataset, batch_size=self.args['batch_size'], shuffle=False, num_workers=num_workers)
if (len(self._multiple_gpus) > 1):
self._network = nn.DataParallel(self._network, self._multiple_gpus)
self._train(self.train_loader, self.test_loader)
self.build_rehearsal_memory(data_manager, self.samples_per_class)
if (len(self._multiple_gpus) > 1):
self._network = self._network.module
def train(self):
self._network.train()
if (len(self._multiple_gpus) > 1):
self._network_module_ptr = self._network.module
else:
self._network_module_ptr = self._network
self._network_module_ptr.backbones[(- 1)].train()
if (self._cur_task >= 1):
for i in range(self._cur_task):
self._network_module_ptr.backbones[i].eval()
def _train(self, train_loader, test_loader):
self._network.to(self._device)
if (self._cur_task == 0):
optimizer = optim.SGD(filter((lambda p: p.requires_grad), self._network.parameters()), momentum=0.9, lr=self.args['init_lr'], weight_decay=self.args['init_weight_decay'])
scheduler = optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=self.args['init_milestones'], gamma=self.args['init_lr_decay'])
self._init_train(train_loader, test_loader, optimizer, scheduler)
else:
optimizer = optim.SGD(filter((lambda p: p.requires_grad), self._network.parameters()), lr=self.args['lrate'], momentum=0.9, weight_decay=self.args['weight_decay'])
scheduler = optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=self.args['milestones'], gamma=self.args['lrate_decay'])
self._update_representation(train_loader, test_loader, optimizer, scheduler)
def _init_train(self, train_loader, test_loader, optimizer, scheduler):
prog_bar = tqdm(range(self.args['init_epoch']))
for (_, epoch) in enumerate(prog_bar):
self.train()
losses = 0.0
(correct, total) = (0, 0)
for (i, (_, inputs, targets)) in enumerate(train_loader):
(inputs, targets) = (inputs.to(self._device), targets.to(self._device))
logits = self._network(inputs)['logits']
loss = F.cross_entropy(logits, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses += loss.item()
(_, preds) = torch.max(logits, dim=1)
correct += preds.eq(targets.expand_as(preds)).cpu().sum()
total += len(targets)
scheduler.step()
train_acc = np.around(((tensor2numpy(correct) * 100) / total), decimals=2)
if ((epoch % 5) == 0):
test_acc = self._compute_accuracy(self._network, test_loader)
info = 'Task {}, Epoch {}/{} => Loss {:.3f}, Train_accy {:.2f}, Test_accy {:.2f}'.format(self._cur_task, (epoch + 1), self.args['init_epoch'], (losses / len(train_loader)), train_acc, test_acc)
else:
info = 'Task {}, Epoch {}/{} => Loss {:.3f}, Train_accy {:.2f}'.format(self._cur_task, (epoch + 1), self.args['init_epoch'], (losses / len(train_loader)), train_acc)
prog_bar.set_description(info)
logging.info(info)
def _update_representation(self, train_loader, test_loader, optimizer, scheduler):
prog_bar = tqdm(range(self.args['epochs']))
for (_, epoch) in enumerate(prog_bar):
self.train()
losses = 0.0
losses_clf = 0.0
losses_aux = 0.0
(correct, total) = (0, 0)
for (i, (_, inputs, targets)) in enumerate(train_loader):
(inputs, targets) = (inputs.to(self._device), targets.to(self._device))
outputs = self._network(inputs)
(logits, aux_logits) = (outputs['logits'], outputs['aux_logits'])
loss_clf = F.cross_entropy(logits, targets)
aux_targets = targets.clone()
aux_targets = torch.where((((aux_targets - self._known_classes) + 1) > 0), ((aux_targets - self._known_classes) + 1), 0)
loss_aux = F.cross_entropy(aux_logits, aux_targets)
loss = (loss_clf + loss_aux)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses += loss.item()
losses_aux += loss_aux.item()
losses_clf += loss_clf.item()
(_, preds) = torch.max(logits, dim=1)
correct += preds.eq(targets.expand_as(preds)).cpu().sum()
total += len(targets)
scheduler.step()
train_acc = np.around(((tensor2numpy(correct) * 100) / total), decimals=2)
if ((epoch % 5) == 0):
test_acc = self._compute_accuracy(self._network, test_loader)
info = 'Task {}, Epoch {}/{} => Loss {:.3f}, Loss_clf {:.3f}, Loss_aux {:.3f}, Train_accy {:.2f}, Test_accy {:.2f}'.format(self._cur_task, (epoch + 1), self.args['epochs'], (losses / len(train_loader)), (losses_clf / len(train_loader)), (losses_aux / len(train_loader)), train_acc, test_acc)
else:
info = 'Task {}, Epoch {}/{} => Loss {:.3f}, Loss_clf {:.3f}, Loss_aux {:.3f}, Train_accy {:.2f}'.format(self._cur_task, (epoch + 1), self.args['epochs'], (losses / len(train_loader)), (losses_clf / len(train_loader)), (losses_aux / len(train_loader)), train_acc)
prog_bar.set_description(info)
logging.info(info) |
def locate_files(pattern, root_dir=os.curdir, **kwargs):
for (dirpath, dirnames, filenames) in os.walk(os.path.abspath(root_dir), **kwargs):
for filename in fnmatch.filter(filenames, pattern):
(yield os.path.join(dirpath, filename)) |
class QuarticCurve_generic(projective_curve.ProjectivePlaneCurve):
def _repr_type(self):
return 'Quartic'
def genus(self):
return 3 |
def mask_special_tokens(string: str):
exceptions = [match.group(0) for match in re.finditer('[A-Za-z:_.]+_[0-9]+', string)]
for e in exceptions:
string = string.replace(e, '<temp>', 1)
return (string, exceptions) |
def update_level_set():
cashocs.interpolate_levelset_function_to_cells(psi, alpha_in, alpha_out, alpha)
cashocs.interpolate_levelset_function_to_cells(psi, 1.0, 0.0, indicator_omega)
vol.vector().vec().set(assemble((indicator_omega * dx)))
vol.vector().apply('') |
def imagenet_det_classes():
return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra'] |
def update_shard_info_for_in_graph(meta_graph_def, num_replicas):
if (num_replicas <= 1):
return
node_name_to_node = {}
for node in meta_graph_def.graph_def.node:
node_name_to_node[node.name] = node
if (shard.SHARD_ID in meta_graph_def.collection_def):
shard_id_node_names = meta_graph_def.collection_def[shard.SHARD_ID].node_list.value
num_shard_id_nodes = len(shard_id_node_names)
if (num_shard_id_nodes == num_replicas):
for shard_id_node_name in shard_id_node_names:
parallax_log.debug(shard_id_node_name)
shard_id_to_update = int(shard_id_node_name.split(PARALLAX_REPLICA_PREFIX)[1].split('/')[0])
node_name_to_node[_get_op_name(shard_id_node_name)].attr['value'].tensor.int64_val[0] = shard_id_to_update
elif (num_shard_id_nodes != 1):
raise ValueError('The number of shard_id must be same as the number of replicas or 1')
if (shard.NUM_SHARDS in meta_graph_def.collection_def):
num_shards_node_names = meta_graph_def.collection_def[shard.NUM_SHARDS].node_list.value
num_num_shards_nodes = len(num_shards_node_names)
if (num_num_shards_nodes == num_replicas):
for num_shards_node_name in num_shards_node_names:
node_name_to_node[_get_op_name(num_shards_node_name)].attr['value'].tensor.int64_val[0] = num_replicas
elif (num_num_shards_nodes != 1):
raise ValueError('The number of num_shards must be same as the number of replicas or 1')
if (shard.SHARD_FILTER_PRED in meta_graph_def.collection_def):
shard_filter_pred_names = [v.decode('ascii') for v in meta_graph_def.collection_def[shard.SHARD_FILTER_PRED].bytes_list.value]
dataset_factory_replica_consumers = {}
for node in meta_graph_def.graph_def.node:
if (('dataset_factory' in node.attr) and node.name.startswith(PARALLAX_REPLICA_PREFIX)):
dataset_factory_name = node.attr['dataset_factory'].func.name
if (dataset_factory_name not in dataset_factory_replica_consumers):
dataset_factory_replica_consumers[dataset_factory_name] = []
dataset_factory_replica_consumers[dataset_factory_name].append(node)
updated_lib = function_pb2.FunctionDefLibrary()
for func in meta_graph_def.graph_def.library.function:
if (func.signature.name in dataset_factory_replica_consumers):
replicate = False
for node in func.node_def:
if (('predicate' in node.attr) and (node.attr['predicate'].func.name in shard_filter_pred_names)):
num_shards_name = node.input[shard.FILTER_DATASET_NUM_SHARDS_POS].split(':output:0')[0]
shard_id_name = node.input[shard.FILTER_DATASET_SHARD_ID_POS].split(':output:0')[0]
for replica_id in range(num_replicas):
replica_func = function_pb2.FunctionDef()
replica_func.CopyFrom(func)
replica_func.signature.name = ops.prepend_name_scope(func.signature.name, parallax_replica_prefix(replica_id))
for node in replica_func.node_def:
if (node.name == num_shards_name):
node.attr['value'].tensor.int64_val[0] = num_replicas
elif (node.name == shard_id_name):
node.attr['value'].tensor.int64_val[0] = replica_id
updated_lib.function.extend([replica_func])
for consumer in dataset_factory_replica_consumers[func.signature.name]:
replica_id = int(consumer.name.split(PARALLAX_REPLICA_PREFIX)[1].split('/')[0])
replica_func_name = ops.prepend_name_scope(func.signature.name, parallax_replica_prefix(replica_id))
consumer.attr['dataset_factory'].func.name = replica_func_name
replicate = True
break
if (not replicate):
updated_lib.function.extend([func])
else:
updated_lib.function.extend([func])
meta_graph_def.graph_def.library.CopyFrom(updated_lib) |
def cosine_rampup(current, rampup_length):
current = np.clip(current, 0.0, rampup_length)
return float(((- 0.5) * (np.cos(((np.pi * current) / rampup_length)) - 1))) |
def subsample_classes(dataset, include_classes=range(160)):
include_classes_cub = (np.array(include_classes) + 1)
cls_idxs = [x for (x, (_, r)) in enumerate(dataset.data.iterrows()) if (int(r['target']) in include_classes_cub)]
target_xform_dict = {}
for (i, k) in enumerate(include_classes):
target_xform_dict[k] = i
dataset = subsample_dataset(dataset, cls_idxs)
dataset.target_transform = (lambda x: target_xform_dict[x])
return dataset |
def gen_grid(args, config, config_budget={}):
task_name = '{}_grid_{}'.format(get_fname(args.config), get_fname(args.grid))
fname_start = get_fname(args.config)
out_dir = '{}/{}'.format(args.out_dir, task_name)
makedirs_rm_exist(out_dir)
config['out_dir'] = os.path.join(config['out_dir'], task_name)
outs = load_search_file(args.grid)
for (i, out) in enumerate(outs):
vars_label = [row[0].split('.') for row in out]
vars_alias = [row[1] for row in out]
vars_value = grid2list([string_to_python(row[2]) for row in out])
if (i == 0):
print('Variable label: {}'.format(vars_label))
print('Variable alias: {}'.format(vars_alias))
for vars in vars_value:
config_out = config.copy()
fname_out = fname_start
for (id, var) in enumerate(vars):
if (len(vars_label[id]) == 1):
config_out[vars_label[id][0]] = var
elif (len(vars_label[id]) == 2):
if (vars_label[id][0] in config_out):
config_out[vars_label[id][0]][vars_label[id][1]] = var
else:
config_out[vars_label[id][0]] = {vars_label[id][1]: var}
else:
raise ValueError('Only 2-level config files are supported')
fname_out += '-{}={}'.format(vars_alias[id], str(var).strip('[]').strip("''"))
if (len(config_budget) > 0):
config_out = dict_match_baseline(config_out, config_budget)
with open('{}/{}.yaml'.format(out_dir, fname_out), 'w') as f:
yaml.dump(config_out, f, default_flow_style=False)
print('{} configurations saved to: {}'.format(len(vars_value), out_dir)) |
def default_argument_parser():
parser = argparse.ArgumentParser(description='fastreid Training')
parser.add_argument('--config-file', default='', metavar='FILE', help='path to config file')
parser.add_argument('--resume', action='store_true', help='whether to attempt to resume from the checkpoint directory')
parser.add_argument('--eval-only', action='store_true', help='perform evaluation only')
parser.add_argument('--num-gpus', type=int, default=1, help='number of gpus *per machine*')
parser.add_argument('--num-machines', type=int, default=1, help='total number of machines')
parser.add_argument('--machine-rank', type=int, default=0, help='the rank of this machine (unique per machine)')
port = (((2 ** 15) + (2 ** 14)) + (hash((os.getuid() if (sys.platform != 'win32') else 1)) % (2 ** 14)))
parser.add_argument('--dist-url', default='tcp://127.0.0.1:{}'.format(port))
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
return parser |
def _create_data(algo, nb_nodes):
batch_size = 8
ds = _make_iterable_sampler(algo, batch_size, nb_nodes)
full_sample = next(ds)
chunk_length = full_sample.features.lengths[0].astype(int)
chunked_ds = dataset.chunkify(_make_iterable_sampler(algo, batch_size, nb_nodes), chunk_length)
chunk_sample = next(chunked_ds)
return (full_sample, chunk_sample) |
()
_context
('--network_pkl', help='Network pickle filename', required=True)
('--timesteps', type=int, help='Timesteps', default=16, show_default=True)
('--num_videos', type=int, help='Number of images to generate', default=100, show_default=True)
('--seed', type=int, help='Random seed', default=42, metavar='DIR')
('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
def generate_videos(ctx: click.Context, network_pkl: str, timesteps: int, num_videos: int, seed: int, outdir: str):
print(('Loading networks from "%s"...' % network_pkl))
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device).eval()
G.forward = Generator.forward.__get__(G, Generator)
print('Done. ')
os.makedirs(outdir, exist_ok=True)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
grid_size = (int(math.sqrt(num_videos)), int(math.sqrt(num_videos)))
grid_z = torch.randn([int((grid_size[0] * grid_size[1])), G.z_dim], device=device).split(1)
images = torch.cat([rearrange(G(z, None, timesteps=16, noise_mode='const')[0].cpu(), '(b t) c h w -> b c t h w', t=timesteps) for z in grid_z]).numpy()
save_image_grid(images, os.path.join(outdir, f'generate_videos.gif'), drange=[(- 1), 1], grid_size=grid_size) |
class Sst2Processor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(data_dir, 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(data_dir, 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(data_dir, 'test')
def no_label_for_test(self):
return False
def is_paired_data(self):
return False
def get_labels(self):
return ['0', '1']
def _create_examples(self, data_dir, set_type):
only_sent = (not (set_type == 'train'))
raw_data_list = _read_sst_tree_list(data_dir, set_type)
(data_list, _) = _gene_sst_sub_trees_and_shift_reduce_info(raw_data_list)
examples = self._get_examples_from_data_list(data_list, only_sent, set_type)
return examples
def _sentiment2label(self, continous_sentiment_label):
sentiment_label = None
if (continous_sentiment_label <= 0.4):
sentiment_label = '0'
elif (continous_sentiment_label > 0.6):
sentiment_label = '1'
return sentiment_label
def _get_examples_from_data_list(self, data_list, only_sentence, set_type):
idx_ex = 0
examples = []
for sub_trees in data_list:
for sub_tree in sub_trees:
if (only_sentence and (not sub_tree['is_sent'])):
continue
root_node = sub_tree['root_node']
sentiment_label = self._sentiment2label(root_node['sentiment_label'])
if (sentiment_label is None):
continue
token_list = []
for node in sub_tree['tree_nodes']:
if node['is_leaf']:
token_list.append(node['token'])
assert (len(token_list) > 0)
text_a = ' '.join(token_list)
guid = ('%s-%d' % (set_type, idx_ex))
idx_ex += 1
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=sentiment_label))
return examples |
class ModReLU(nn.Module):
def __init__(self, features):
super().__init__()
self.features = features
self.b = nn.Parameter(torch.Tensor(self.features))
self.reset_parameters()
def reset_parameters(self):
self.b.data.uniform_((- 0.01), 0.01)
def forward(self, inputs):
norm = torch.abs(inputs)
biased_norm = (norm + self.b)
magnitude = F.relu(biased_norm)
phase = torch.sign(inputs)
return (phase * magnitude) |
def test_ATan2():
(x, y) = symbols('x y')
i = atan2(x, y)
assert isinstance(i, atan2)
i = atan2(0, 1)
assert (i == 0) |
_function_dispatch(_stack_arrays_dispatcher)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, autoconvert=False):
if isinstance(arrays, ndarray):
return arrays
elif (len(arrays) == 1):
return arrays[0]
seqarrays = [np.asanyarray(a).ravel() for a in arrays]
nrecords = [len(a) for a in seqarrays]
ndtype = [a.dtype for a in seqarrays]
fldnames = [d.names for d in ndtype]
dtype_l = ndtype[0]
newdescr = _get_fieldspec(dtype_l)
names = [n for (n, d) in newdescr]
for dtype_n in ndtype[1:]:
for (fname, fdtype) in _get_fieldspec(dtype_n):
if (fname not in names):
newdescr.append((fname, fdtype))
names.append(fname)
else:
nameidx = names.index(fname)
(_, cdtype) = newdescr[nameidx]
if autoconvert:
newdescr[nameidx] = (fname, max(fdtype, cdtype))
elif (fdtype != cdtype):
raise TypeError(("Incompatible type '%s' <> '%s'" % (cdtype, fdtype)))
if (len(newdescr) == 1):
output = ma.concatenate(seqarrays)
else:
output = ma.masked_all((np.sum(nrecords),), newdescr)
offset = np.cumsum(np.r_[(0, nrecords)])
seen = []
for (a, n, i, j) in zip(seqarrays, fldnames, offset[:(- 1)], offset[1:]):
names = a.dtype.names
if (names is None):
output[('f%i' % len(seen))][i:j] = a
else:
for name in n:
output[name][i:j] = a[name]
if (name not in seen):
seen.append(name)
return _fix_output(_fix_defaults(output, defaults), usemask=usemask, asrecarray=asrecarray) |
def update_args(base_args, input_args):
for (key, value) in dict(input_args).items():
base_args.__dict__[key] = value
return base_args |
def _adjust_gamma_u8(image, gamma, gain):
lut = ((255 * gain) * (np.linspace(0, 1, 256) ** gamma))
lut = np.minimum(np.rint(lut), 255).astype('uint8')
return lut[image] |
def keep_relevant_rows_and_unstack(ref_df, predictions):
predictions_w_true_labels = []
eg_id_counter = []
for (i, row) in ref_df.iterrows():
if (row.num_rs > 0):
p = predictions[i]
if (len(p) > row.num_rs):
p = p[:row.num_rs]
elif (len(p) < row.num_rs):
p.extend(([row.text] * (row.num_rs - len(p))))
predictions_w_true_labels.extend(p)
eg_id_counter.extend(list(range(row.num_rs)))
ref_df = ref_df[(ref_df['num_rs'] > 0)].reset_index(drop=True)
ref_df['causal_text_w_pairs'] = ref_df['causal_text_w_pairs'].apply((lambda x: literal_eval(x)))
ref_df = ref_df.explode('causal_text_w_pairs')
ref_df = ref_df.rename(columns={'causal_text_w_pairs': 'text_w_pairs'})
ref_df['eg_id'] = eg_id_counter
return (ref_df.reset_index(drop=True), predictions_w_true_labels) |
class ImageFeaturesHdfReader(object):
def __init__(self, features_hdfpath: str, in_memory: bool=False):
self.features_hdfpath = features_hdfpath
self._in_memory = in_memory
with h5py.File(self.features_hdfpath, 'r') as features_hdf:
self._split = features_hdf.attrs['split']
self._image_id_list = list(features_hdf['image_id'])
self.features = ([None] * len(self._image_id_list))
def __len__(self):
return len(self._image_id_list)
def __getitem__(self, image_id: int):
index = self._image_id_list.index(image_id)
if self._in_memory:
if (self.features[index] is not None):
image_id_features = self.features[index]
else:
with h5py.File(self.features_hdfpath, 'r') as features_hdf:
image_id_features = features_hdf['features'][index]
self.features[index] = image_id_features
else:
with h5py.File(self.features_hdfpath, 'r') as features_hdf:
image_id_features = features_hdf['features'][index]
return image_id_features
def keys(self) -> List[int]:
return self._image_id_list
def split(self):
return self._split |
def set_random_seed(seed: int):
np.random.seed(seed)
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed) |
class FastTextEmbeddingBag(EmbeddingBag):
def __init__(self, embedding_matrix, sparse=False):
embedding_matrix_shape = embedding_matrix.shape
super().__init__(embedding_matrix_shape[0], embedding_matrix_shape[1], sparse=sparse)
self.weight.data.copy_(torch.FloatTensor(embedding_matrix))
def get_output_dim(self):
return self.weight.shape[1]
def forward(self, token_subwordIds):
one_view = token_subwordIds.view((- 1), token_subwordIds.shape[2])
out = super().forward(one_view)
out_batched = out.view(token_subwordIds.shape[0], token_subwordIds.shape[1], (- 1))
return out_batched |
def _regnet(variant, pretrained, **kwargs):
load_strict = True
model_class = RegNet
if kwargs.pop('features_only', False):
assert False, 'Not Implemented'
load_strict = False
kwargs.pop('num_classes', 0)
model_cfg = model_cfgs[variant]
default_cfg = default_cfgs[variant]
model = model_class(model_cfg, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes=kwargs.get('num_classes', 0), in_chans=kwargs.get('in_chans', 3), strict=load_strict)
return model |
def batch_segids20(s, l):
(res1, res2) = ([], [])
h = int((l / 2))
for i in range(h):
res1.append(tf.tile([i], [s[i]]))
res2.append(tf.tile([(i + h)], [s[(i + h)]]))
return concat_versions(0, (res1 + res2)) |
def register_Ns3OlsrAssociation_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
cls.add_constructor([])
cls.add_constructor([param('ns3::olsr::Association const &', 'arg0')])
cls.add_instance_attribute('netmask', 'ns3::Ipv4Mask', is_const=False)
cls.add_instance_attribute('networkAddr', 'ns3::Ipv4Address', is_const=False)
return |
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False, divide_norm=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self.divide_norm = divide_norm
self.scale_factor = (float((d_model // nhead)) ** 0.5)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return (tensor if (pos is None) else (tensor + pos))
def forward_post(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
q = k = self.with_pos_embed(src, pos)
if self.divide_norm:
q = ((q / torch.norm(q, dim=(- 1), keepdim=True)) * self.scale_factor)
k = (k / torch.norm(k, dim=(- 1), keepdim=True))
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = (src + self.dropout1(src2))
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = (src + self.dropout2(src2))
src = self.norm2(src)
return src
def forward_pre(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = (src + self.dropout1(src2))
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = (src + self.dropout2(src2))
return src
def forward(self, src, src_mask: Optional[Tensor]=None, src_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos) |
def _seg_43():
return [(64162, 'M', u''), (64163, 'M', u''), (64164, 'M', u''), (64165, 'M', u''), (64166, 'M', u''), (64167, 'M', u''), (64168, 'M', u''), (64169, 'M', u''), (64170, 'M', u''), (64171, 'M', u''), (64172, 'M', u''), (64173, 'M', u''), (64174, 'M', u''), (64175, 'M', u''), (64176, 'M', u''), (64177, 'M', u''), (64178, 'M', u''), (64179, 'M', u''), (64180, 'M', u''), (64181, 'M', u''), (64182, 'M', u''), (64183, 'M', u''), (64184, 'M', u''), (64185, 'M', u''), (64186, 'M', u''), (64187, 'M', u''), (64188, 'M', u''), (64189, 'M', u''), (64190, 'M', u''), (64191, 'M', u''), (64192, 'M', u''), (64193, 'M', u''), (64194, 'M', u''), (64195, 'M', u''), (64196, 'M', u''), (64197, 'M', u''), (64198, 'M', u''), (64199, 'M', u''), (64200, 'M', u''), (64201, 'M', u''), (64202, 'M', u''), (64203, 'M', u''), (64204, 'M', u''), (64205, 'M', u''), (64206, 'M', u''), (64207, 'M', u''), (64208, 'M', u''), (64209, 'M', u''), (64210, 'M', u''), (64211, 'M', u''), (64212, 'M', u''), (64213, 'M', u''), (64214, 'M', u''), (64215, 'M', u''), (64216, 'M', u''), (64217, 'M', u''), (64218, 'X'), (64256, 'M', u'ff'), (64257, 'M', u'fi'), (64258, 'M', u'fl'), (64259, 'M', u'ffi'), (64260, 'M', u'ffl'), (64261, 'M', u'st'), (64263, 'X'), (64275, 'M', u''), (64276, 'M', u''), (64277, 'M', u''), (64278, 'M', u''), (64279, 'M', u''), (64280, 'X'), (64285, 'M', u''), (64286, 'V'), (64287, 'M', u''), (64288, 'M', u''), (64289, 'M', u''), (64290, 'M', u''), (64291, 'M', u''), (64292, 'M', u''), (64293, 'M', u''), (64294, 'M', u''), (64295, 'M', u''), (64296, 'M', u''), (64297, '3', u'+'), (64298, 'M', u''), (64299, 'M', u''), (64300, 'M', u''), (64301, 'M', u''), (64302, 'M', u''), (64303, 'M', u''), (64304, 'M', u''), (64305, 'M', u''), (64306, 'M', u''), (64307, 'M', u''), (64308, 'M', u''), (64309, 'M', u''), (64310, 'M', u''), (64311, 'X'), (64312, 'M', u''), (64313, 'M', u''), (64314, 'M', u'')] |
def pad_to_len(pair_targets, pad, max_pair_target_len):
for i in range(len(pair_targets)):
pair_targets[i] = pair_targets[i][:max_pair_target_len]
this_len = len(pair_targets[i])
for j in range((max_pair_target_len - this_len)):
pair_targets[i].append(pad)
return pair_targets |
class RayEncoder(nn.Module):
def __init__(self, pos_octaves=8, pos_start_octave=0, ray_octaves=4, ray_start_octave=0):
super().__init__()
self.pos_encoding = PositionalEncoding(num_octaves=pos_octaves, start_octave=pos_start_octave)
self.ray_encoding = PositionalEncoding(num_octaves=ray_octaves, start_octave=ray_start_octave)
def forward(self, pos, rays):
if (len(rays.shape) == 4):
(batchsize, height, width, dims) = rays.shape
pos_enc = self.pos_encoding(pos.unsqueeze(1))
pos_enc = pos_enc.view(batchsize, pos_enc.shape[(- 1)], 1, 1)
pos_enc = pos_enc.repeat(1, 1, height, width)
rays = rays.flatten(1, 2)
ray_enc = self.ray_encoding(rays)
ray_enc = ray_enc.view(batchsize, height, width, ray_enc.shape[(- 1)])
ray_enc = ray_enc.permute((0, 3, 1, 2))
x = torch.cat((pos_enc, ray_enc), 1)
else:
pos_enc = self.pos_encoding(pos)
ray_enc = self.ray_encoding(rays)
x = torch.cat((pos_enc, ray_enc), (- 1))
return x |
def test_temperature_smooth():
smooth = (lambda probs, temp: temperature_smooth(np.array(probs, dtype=np.float32), temp))
same = (lambda x1, x2: assert_almost_equal(x1, x2, decimal=4))
probs = [0.0, 0.2, 0.4, 0.4]
third = (1.0 / 3)
correct = [0.0, third, third, third]
same(smooth(probs, 100000), correct)
with pytest.raises(ValueError):
smooth([1, 2, 0], 1)
with pytest.raises(ValueError):
smooth([1, (- 1), 1], 1)
with pytest.raises(ValueError):
probs = [0, 0.25, 0.75, 0]
smooth(probs, 0)
with pytest.raises(ValueError):
probs = [0, 0.25, 0.75, 0]
smooth(probs, float('inf'))
probs = [0, 0.25, 0.75, 0]
same(smooth(probs, 1), probs)
probs = [1, 0, 0]
same(smooth(probs, 10), probs)
same(smooth(probs, 0.1), probs)
a = np.exp(2)
b = np.exp(3)
probs = [0, (a / (a + b)), (b / (a + b))]
smoothed = smooth(probs, 11)
a2 = np.exp((2.0 / 11))
b2 = np.exp((3.0 / 11))
correct = [0, (a2 / (a2 + b2)), (b2 / (a2 + b2))]
same(smoothed, correct) |
class ReflexiveModule_abstract(Parent):
_method(optional=True)
def tensor_type(self):
_method
def base_module(self):
def dual(self):
(k, l) = self.tensor_type()
return self.base_module().tensor_module(l, k)
def tensor(self, *args, **kwds):
return self.tensor_product(*args, **kwds)
def tensor_power(self, n):
tensor_type = self.tensor_type()
return self.base_module().tensor_module((n * tensor_type[0]), (n * tensor_type[1]))
def tensor_product(self, *others):
from sage.modules.free_module_element import vector
from .comp import CompFullySym, CompFullyAntiSym, CompWithSym
base_module = self.base_module()
if (not all(((module.base_module() == base_module) for module in others))):
raise NotImplementedError('all factors must be tensor modules over the same base module')
factors = ([self] + list(others))
result_tensor_type = sum((vector(factor.tensor_type()) for factor in factors))
result_sym = []
result_antisym = []
index_maps = []
running_indices = vector([0, result_tensor_type[0]])
for factor in factors:
tensor_type = factor.tensor_type()
index_map = tuple(((i + running_indices[0]) for i in range(tensor_type[0])))
index_map += tuple(((i + running_indices[1]) for i in range(tensor_type[1])))
index_maps.append(index_map)
if ((tensor_type[0] + tensor_type[1]) > 1):
basis_sym = factor._basis_sym()
all_indices = tuple(range((tensor_type[0] + tensor_type[1])))
if isinstance(basis_sym, CompFullySym):
sym = [all_indices]
antisym = []
elif isinstance(basis_sym, CompFullyAntiSym):
sym = []
antisym = [all_indices]
elif isinstance(basis_sym, CompWithSym):
sym = basis_sym._sym
antisym = basis_sym._antisym
else:
sym = antisym = []
def map_isym(isym):
return tuple((index_map[i] for i in isym))
result_sym.extend((tuple((index_map[i] for i in isym)) for isym in sym))
result_antisym.extend((tuple((index_map[i] for i in isym)) for isym in antisym))
running_indices += vector(tensor_type)
result = base_module.tensor_module(*result_tensor_type, sym=result_sym, antisym=result_antisym)
result._index_maps = tuple(index_maps)
return result |
def serial_ports():
if sys.platform.startswith('win'):
ports = [('COM%s' % (i + 1)) for i in range(256)]
elif (sys.platform.startswith('linux') or sys.platform.startswith('cygwin')):
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
sys.exit(0)
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result |
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=True, drop_path=0.0, init_values=None, norm_layer=nn.LayerNorm, act_layer=nn.GELU, swiglu=False, use_rel_pos=False, input_size=None, xformers=True, use_lora=False, lora_info=dict, use_tome=False, tome_info=dict, use_repadapter=False, repadapter_info=dict):
super().__init__()
self.xformers = (xformers if XFORMERS_IS_AVAILBLE else False)
self.use_tome = use_tome
self.tome_info = tome_info
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, use_rel_pos=use_rel_pos, input_size=input_size, xformers=xformers, use_lora=use_lora, lora_info=lora_info)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int((dim * mlp_ratio))
if swiglu:
self.mlp = SwiGLUFFNFused(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer)
else:
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer)
self.use_repadapter = use_repadapter
if use_repadapter:
adapter_kwarts = {'hidden_dim': repadapter_info['adapter_hidden_dim'], 'scale': repadapter_info['adapter_scale'], 'groups': repadapter_info['adapter_groups'], 'dropout': repadapter_info['adapter_dropout']}
self.adapter_mlp = RepAdapter(dim, **adapter_kwarts)
if (init_values is not None):
self.gamma_1 = nn.Parameter((init_values * torch.ones(dim)), requires_grad=True)
self.gamma_2 = nn.Parameter((init_values * torch.ones(dim)), requires_grad=True)
else:
(self.gamma_1, self.gamma_2) = (None, None)
def forward(self, x, H, W):
(B, _, C) = x.shape
shortcut = x
x = self.norm1(x)
if (not self.use_tome):
x = self.attn(x, H, W)
else:
(m_a, u_a) = compute_merge(x, H, W, self.tome_info)
x = m_a(x)
x = self.attn(x, H, W)
x = u_a(x)
if (self.gamma_1 is None):
x = (shortcut + self.drop_path(x))
if self.use_repadapter:
x = (x + self.drop_path(self.mlp(self.adapter_mlp(self.norm2(x)))))
else:
x = (x + self.drop_path(self.mlp(self.norm2(x))))
else:
x = (shortcut + self.drop_path((self.gamma_1 * x)))
if self.use_repadapter:
x = (x + self.drop_path((self.gamma_2 * self.mlp(self.adapter_mlp(self.norm2(x))))))
else:
x = (x + self.drop_path((self.gamma_2 * self.mlp(self.norm2(x)))))
return x |
def cosine_beta_schedule(timesteps, s=0.008):
steps = (timesteps + 1)
x = torch.linspace(0, timesteps, steps, dtype=torch.float64)
alphas_cumprod = (torch.cos((((((x / timesteps) + s) / (1 + s)) * math.pi) * 0.5)) ** 2)
alphas_cumprod = (alphas_cumprod / alphas_cumprod[0])
betas = (1 - (alphas_cumprod[1:] / alphas_cumprod[:(- 1)]))
return torch.clip(betas, 0, 0.999) |
class MultiGCN(nn.Module):
def __init__(self, n_units=[17, 128, 100], dropout=0.0):
super(MultiGCN, self).__init__()
self.num_layers = (len(n_units) - 1)
self.dropout = dropout
layer_stack = []
for i in range(self.num_layers):
layer_stack.append(GCNConv(in_channels=n_units[i], out_channels=n_units[(i + 1)], cached=False))
self.layer_stack = nn.ModuleList(layer_stack)
def forward(self, x, edges):
edges = edges.long()
for (idx, gcn_layer) in enumerate(self.layer_stack):
x = gcn_layer(x=x, edge_index=edges)
if ((idx + 1) < self.num_layers):
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
return x |
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx):
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
m.weight.data.normal_(0, 0.1)
return m |
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
def __init__(self, args, params, fp32_optimizer, fp32_params):
super().__init__(args)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if (getattr(args, 'fp16_scale_window', None) is None):
if (len(args.update_freq) > 1):
raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule')
data_parallel_size = int((args.distributed_world_size / args.model_parallel_size))
scale_window = int((((2 ** 14) / data_parallel_size) / args.update_freq[0]))
else:
scale_window = args.fp16_scale_window
self.scaler = DynamicLossScaler(init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale)
self.min_loss_scale = self.args.min_loss_scale
def build_optimizer(cls, args, params):
flatten = (not getattr(args, 'fp16_no_flatten_grads', False))
fp32_params = cls.build_fp32_params(params, flatten=flatten)
if flatten:
fp32_optimizer = optim.build_optimizer(args, [fp32_params])
else:
fp32_optimizer = optim.build_optimizer(args, fp32_params)
if (flatten and (not fp32_optimizer.supports_flat_params)):
raise RuntimeError('chosen optimizer does not support flat params, please set --fp16-no-flatten-grads')
return cls(args, params, fp32_optimizer, fp32_params)
def optimizer(self):
return self.fp32_optimizer.optimizer
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr) |
_utils.test()
def test_1d():
x = ti.field(ti.f32, shape=16)
def func():
for i in ti.ndrange((4, 10)):
x[i] = i
func()
for i in range(16):
if (4 <= i < 10):
assert (x[i] == i)
else:
assert (x[i] == 0) |
.skipif((not _ti_core.GGUI_AVAILABLE), reason='GGUI Not Available')
_utils.test(arch=supported_archs)
def test_imgui():
window = ti.ui.Window('test', (640, 480), show_window=False)
gui = window.get_gui()
def render():
with gui.sub_window('window 0', 0.1, 0.1, 0.8, 0.2) as w:
w.text('Hello Taichi!')
w.text('Hello Again!')
with gui.sub_window('window 1', 0.1, 0.4, 0.8, 0.2) as w:
w.button('Press to unlease creativity')
w.slider_float('creativity level', 100.0, 0.0, 100.0)
with gui.sub_window('window 2', 0.1, 0.7, 0.8, 0.2) as w:
w.color_edit_3('Heyy', (0, 0, 1))
for _ in range(RENDER_REPEAT):
render()
window.get_image_buffer_as_numpy()
render()
verify_image(window.get_image_buffer_as_numpy(), 'test_imgui')
window.destroy() |
def main():
camera = skimage.data.camera()
astronaut = rgb2gray(skimage.data.astronaut())
horse = skimage.data.horse()
coffee = rgb2gray(skimage.data.coffee())
data = [camera, astronaut, horse, coffee]
print('Start to data preprocessing...')
data = [preprocessing(d) for d in data]
model = network.HopfieldNetwork()
model.train_weights(data)
test = [get_corrupted_input(d, 0.3) for d in data]
predicted = model.predict(test, threshold=0, asyn=False)
print('Show prediction results...')
plot(data, test, predicted)
print('Show network weights matrix...') |
class WhoamiCommand(BaseUserCommand):
def run(self):
print(ANSI.red('WARNING! `transformers-cli whoami` is deprecated and will be removed in v5. Please use `huggingface-cli whoami` instead.'))
token = HfFolder.get_token()
if (token is None):
print('Not logged in')
exit()
try:
(user, orgs) = whoami(token)
print(user)
if orgs:
print(ANSI.bold('orgs: '), ','.join(orgs))
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1) |
class InceptionV3(nn.Module):
DEFAULT_BLOCK_INDEX = 3
BLOCK_INDEX_BY_DIM = {64: 0, 192: 1, 768: 2, 2048: 3}
def __init__(self, output_blocks=[DEFAULT_BLOCK_INDEX], resize_input=True, normalize_input=True, requires_grad=False, use_fid_inception=True):
super(InceptionV3, self).__init__()
self.resize_input = resize_input
self.normalize_input = normalize_input
self.output_blocks = sorted(output_blocks)
self.last_needed_block = max(output_blocks)
assert (self.last_needed_block <= 3), 'Last possible output block index is 3'
self.blocks = nn.ModuleList()
if use_fid_inception:
inception = fid_inception_v3()
else:
inception = models.inception_v3(pretrained=True)
block0 = [inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
self.blocks.append(nn.Sequential(*block0))
if (self.last_needed_block >= 1):
block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)]
self.blocks.append(nn.Sequential(*block1))
if (self.last_needed_block >= 2):
block2 = [inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e]
self.blocks.append(nn.Sequential(*block2))
if (self.last_needed_block >= 3):
block3 = [inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1))]
self.blocks.append(nn.Sequential(*block3))
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, inp):
outp = []
x = inp
if self.resize_input:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False)
if self.normalize_input:
x = ((2 * x) - 1)
for (idx, block) in enumerate(self.blocks):
x = block(x)
if (idx in self.output_blocks):
outp.append(x)
if (idx == self.last_needed_block):
break
return outp |
_with_checks([KernelPCovR(mixing=0.5), PCovR(mixing=0.5), fCUR(), fFPS(), fPCovCUR(), fPCovFPS(), Ridge2FoldCV(), KernelNormalizer(), StandardFlexibleScaler()])
def test_sklearn_compatible_estimator(estimator, check):
check(estimator) |
def S3a():
var('x,y,z')
f = expand(((((x ** y) + (y ** z)) + (z ** x)) ** 500))
t1 = clock()
g = f.diff(x)
t2 = clock()
return (t2 - t1) |
def test_pcpvt_init():
path = 'PATH_THAT_DO_NOT_EXIST'
model = PCPVT(pretrained=None, init_cfg=None)
assert (model.init_cfg is None)
model.init_weights()
model = PCPVT(pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path))
assert (model.init_cfg == dict(type='Pretrained', checkpoint=path))
with pytest.raises(OSError):
model.init_weights()
model = PCPVT(pretrained=None, init_cfg=123)
with pytest.raises(TypeError):
model.init_weights()
model = PCPVT(pretrained=path, init_cfg=None)
assert (model.init_cfg == dict(type='Pretrained', checkpoint=path))
with pytest.raises(OSError):
model.init_weights()
with pytest.raises(AssertionError):
model = PCPVT(pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
model = PCPVT(pretrained=path, init_cfg=123)
with pytest.raises(TypeError):
model = PCPVT(pretrained=123, init_cfg=None)
with pytest.raises(AssertionError):
model = PCPVT(pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
model = PCPVT(pretrained=123, init_cfg=123) |
class BatchSampler(Sampler[List[int]]):
def __init__(self, sampler: Sampler[int], batch_size: int, drop_last: bool) -> None:
if ((not isinstance(batch_size, _int_classes)) or isinstance(batch_size, bool) or (batch_size <= 0)):
raise ValueError('batch_size should be a positive integer value, but got batch_size={}'.format(batch_size))
if (not isinstance(drop_last, bool)):
raise ValueError('drop_last should be a boolean value, but got drop_last={}'.format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if (len(batch) == self.batch_size):
(yield batch)
batch = []
if ((len(batch) > 0) and (not self.drop_last)):
(yield batch)
def __len__(self):
if self.drop_last:
return (len(self.sampler) // self.batch_size)
else:
return (((len(self.sampler) + self.batch_size) - 1) // self.batch_size) |
class LegacySpecifier(_IndividualSpecifier):
_regex_str = '\n (?P<operator>(==|!=|<=|>=|<|>))\n \\s*\n (?P<version>\n [^,;\\s)]* # Since this is a "legacy" specifier, and the version\n # string can be just about anything, we match everything\n # except for whitespace, a semi-colon for marker support,\n # a closing paren since versions can be enclosed in\n # them, and a comma since it\'s a version separator.\n )\n '
_regex = re.compile((('^\\s*' + _regex_str) + '\\s*$'), (re.VERBOSE | re.IGNORECASE))
_operators = {'==': 'equal', '!=': 'not_equal', '<=': 'less_than_equal', '>=': 'greater_than_equal', '<': 'less_than', '>': 'greater_than'}
def _coerce_version(self, version):
if (not isinstance(version, LegacyVersion)):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return (prospective == self._coerce_version(spec))
def _compare_not_equal(self, prospective, spec):
return (prospective != self._coerce_version(spec))
def _compare_less_than_equal(self, prospective, spec):
return (prospective <= self._coerce_version(spec))
def _compare_greater_than_equal(self, prospective, spec):
return (prospective >= self._coerce_version(spec))
def _compare_less_than(self, prospective, spec):
return (prospective < self._coerce_version(spec))
def _compare_greater_than(self, prospective, spec):
return (prospective > self._coerce_version(spec)) |
def tokenize(cased_lines, tokenizer, basic_tokenizer, worker_id, batch_offset):
sents = []
for cased_line in cased_lines:
tokens = basic_tokenizer.tokenize(cased_line)
split_tokens = []
for token in tokens:
subtokens = tokenizer.tokenize(token)
split_tokens += subtokens
sents.append(split_tokens)
return (worker_id, sents, batch_offset) |
def init_assign(config, d, traverse):
for (full_key, value) in traverse_dfs(d, 'item', continue_type=dict):
if ((type(value) == dict) and (len(value) > 0)):
continue
(sub_cfg, sub_key) = consume_dots(config, full_key, create_default=True)
sub_cfg[sub_key] = value |
def sample_dataset(dataset, n=10000, n_eval=1000, seed=0):
for k in dataset:
n_k = (n if (k == 'train') else n_eval)
if (n_k and (len(dataset[k]) > n_k)):
dataset[k] = dataset[k].train_test_split(train_size=n_k, seed=seed)['train']
return dataset |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-folder', dest='pickle_path', help="path to the Cityscapes dataset 'gtFine' folder", default=None, type=str)
parser.add_argument('--output-folder', dest='outputFolder', help='path to the output folder.', default=None, type=str)
parser.add_argument('--use-train-id', action='store_true', dest='useTrainId')
parser.add_argument('--set-names', dest='setNames', help='set names to which apply the function to', nargs='+', default=['val', 'train', 'test'], type=str)
args = parser.parse_args()
data_out = 'data/mapillary'
DEBUG = False
split = 'val'
if (split == 'train'):
useTrainId = True
else:
useTrainId = False
BBOX_VIS = False
if DEBUG:
args.pickle_path = ''
args.outputFolder = ''
else:
args.pickle_path = f'{data_out}/{split}/panoptic_mapped_to_cityscapes_ids_19_classes/pickles'
args.outputFolder = f'{data_out}/{split}/panoptic_mapped_to_cityscapes_ids_19_classes/pngs'
if (not os.path.exists(args.outputFolder)):
os.makedirs(args.outputFolder)
convert2panoptic(args.pickle_path, args.outputFolder, useTrainId=useTrainId, BBOX_VIS=BBOX_VIS) |
def _load_bgzf_block(handle, text_mode=False):
magic = handle.read(4)
if (not magic):
raise StopIteration
if (magic != _bgzf_magic):
raise ValueError(('A BGZF (e.g. a BAM file) block should start with %r, not %r; handle.tell() now says %r' % (_bgzf_magic, magic, handle.tell())))
(gzip_mod_time, gzip_extra_flags, gzip_os, extra_len) = struct.unpack('<LBBH', handle.read(8))
block_size = None
x_len = 0
while (x_len < extra_len):
subfield_id = handle.read(2)
subfield_len = struct.unpack('<H', handle.read(2))[0]
subfield_data = handle.read(subfield_len)
x_len += (subfield_len + 4)
if (subfield_id == _bytes_BC):
assert (subfield_len == 2), 'Wrong BC payload length'
assert (block_size is None), 'Two BC subfields?'
block_size = (struct.unpack('<H', subfield_data)[0] + 1)
assert (x_len == extra_len), (x_len, extra_len)
assert (block_size is not None), "Missing BC, this isn't a BGZF file!"
deflate_size = (((block_size - 1) - extra_len) - 19)
d = zlib.decompressobj((- 15))
data = (d.decompress(handle.read(deflate_size)) + d.flush())
expected_crc = handle.read(4)
expected_size = struct.unpack('<I', handle.read(4))[0]
assert (expected_size == len(data)), ('Decompressed to %i, not %i' % (len(data), expected_size))
crc = zlib.crc32(data)
if (crc < 0):
crc = struct.pack('<i', crc)
else:
crc = struct.pack('<I', crc)
assert (expected_crc == crc), ('CRC is %s, not %s' % (crc, expected_crc))
if text_mode:
return (block_size, _as_string(data))
else:
return (block_size, data) |
(datatype[(N, N)], datatype[(N, M)], datatype[(N, M)], datatype[1], datatype[1])
def syr2k(C, A, B, alpha, beta):
def mult_c_rows(i: _[0:N]):
def mult_c_cols(j: _[0:(i + 1)]):
(ic << C[(i, j)])
(ib << beta)
(oc >> C[(i, j)])
oc = (ic * ib)
def compute(i: _[0:N], k: _[0:M]):
def compute_elem(j: _[0:(i + 1)]):
(ialpha << alpha)
(ia << A[(i, k)])
(iat << A[(j, k)])
(ib << B[(i, k)])
(ibt << B[(j, k)])
(oc >> C(1, (lambda a, b: (a + b)))[(i, j)])
oc = (((ialpha * iat) * ib) + ((ialpha * ibt) * ia)) |
class _AssertVisitor(ast.NodeVisitor):
def __init__(self) -> None:
super().__init__()
self.asserts: list[ast.Assert] = []
def visit_Assert(self, node: ast.Assert) -> ast.AST:
self.asserts.append(node)
return getattr(super(), 'visit_Assert', super().generic_visit)(node) |
def generate_requirements(extras_require):
for (extra, depends) in extras_require.items():
condition = ''
extra = (extra or '')
if (':' in extra):
(extra, condition) = extra.split(':', 1)
extra = pkg_resources.safe_extra(extra)
if extra:
(yield ('Provides-Extra', extra))
if condition:
condition = (('(' + condition) + ') and ')
condition += ("extra == '%s'" % extra)
if condition:
condition = ('; ' + condition)
for new_req in convert_requirements(depends):
(yield ('Requires-Dist', (new_req + condition))) |
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_) |
def add_deeplab_config(cfg):
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
cfg.SOLVER.POLY_LR_POWER = 0.9
cfg.SOLVER.POLY_LR_CONSTANT_ENDING = 0.0
cfg.MODEL.SEM_SEG_HEAD.LOSS_TYPE = 'hard_pixel_mining'
cfg.MODEL.SEM_SEG_HEAD.PROJECT_FEATURES = ['res2']
cfg.MODEL.SEM_SEG_HEAD.PROJECT_CHANNELS = [48]
cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS = 256
cfg.MODEL.SEM_SEG_HEAD.ASPP_DILATIONS = [6, 12, 18]
cfg.MODEL.SEM_SEG_HEAD.ASPP_DROPOUT = 0.1
cfg.MODEL.SEM_SEG_HEAD.USE_DEPTHWISE_SEPARABLE_CONV = False
cfg.MODEL.RESNETS.RES4_DILATION = 1
cfg.MODEL.RESNETS.RES5_MULTI_GRID = [1, 2, 4]
cfg.MODEL.RESNETS.STEM_TYPE = 'deeplab' |
class ProtectionModelIndividual(nn.Module):
def __init__(self, optimizer, optim_args):
super().__init__()
self.attn_model = GenPix2Pix(4, 1, ngf, net_noise, norm, (not no_dropout), init_type, init_gain, att_mode=True)
self.fusion_model = GenPix2Pix(3, 3, ngf, net_noise, norm, (not no_dropout), init_type, init_gain)
self.model_optim = optimizer(params=(list(self.attn_model.parameters()) + list(self.fusion_model.parameters())), **optim_args)
self.tanh = nn.Tanh()
def forward(self, noises, combined_noise_label_list):
z_noise = torch.stack([self.attn_model(noise_label) for noise_label in combined_noise_label_list])
alpha_noise = torch.softmax(z_noise, dim=0)
unrefined_delta = torch.sum((noises * alpha_noise), dim=0)
final_delta = self.fusion_model(unrefined_delta)
return final_delta |
def reduce_gradients(model, _type='sum'):
types = ['sum', 'avg']
assert (_type in types), 'gradients method must be in "{}"'.format(types)
log_once('gradients method is {}'.format(_type))
if (get_world_size() > 1):
for param in model.parameters():
if param.requires_grad:
dist.all_reduce(param.grad.data)
if (_type == 'avg'):
param.grad.data /= get_world_size()
else:
return None |
def ap(rec, pre):
i = np.argsort(rec)
mrec = np.concatenate(([0], np.array(rec)[i], [1]))
mpre = np.concatenate(([0], np.array(pre)[i], [0]))
assert (mrec.shape == mpre.shape)
for i in range((mpre.size - 3), (- 1), (- 1)):
mpre[i] = max(mpre[i], mpre[(i + 1)])
i = (np.nonzero((mrec[1:] != mrec[:(- 1)]))[0] + 1)
ap = np.sum(((mrec[i] - mrec[(i - 1)]) * mpre[i]))
return ap |
def run_recipe_tests(recipe_folder='tests/recipes', script_field='Script_file', hparam_field='Hparam_file', test_field='test_debug_flags', check_field='test_debug_checks', run_opts='--device=cpu', output_folder='tests/tmp/', filters_fields=[], filters=[], do_checks=True, download_only=False, run_tests_with_checks_only=False):
os.makedirs(output_folder, exist_ok=True)
print(('Test ouputs will be put in %s' % output_folder))
(test_script, test_hparam, test_flag, test_check, test_download, test_message) = prepare_test(recipe_folder, script_field, hparam_field, test_field=test_field, check_field=check_field, filters_fields=filters_fields, filters=filters)
if (len(test_script) == 0):
print('No recipes found for testing (please check recipe filters).')
return False
if download_only:
download_only_test(test_script, test_hparam, test_flag, test_check, run_opts, run_tests_with_checks_only, output_folder)
return False
check = True
for (i, recipe_id) in enumerate(sorted(test_script.keys())):
spec_outfold = False
if ('--output_folder' in test_flag[recipe_id]):
pattern = "--output_folder\\s*=?\\s*([^\\s']+|'[^']*')"
match = re.search(pattern, test_flag[recipe_id])
output_fold = match.group(1).strip("'")
spec_outfold = True
else:
output_fold = os.path.join(output_folder, recipe_id)
os.makedirs(output_fold, exist_ok=True)
stdout_file = os.path.join(output_fold, 'stdout.txt')
stderr_file = os.path.join(output_fold, 'stderr.txt')
check_str = test_check[recipe_id].strip()
if run_tests_with_checks_only:
if (len(check_str) == 0):
continue
print(('(%i/%i) Running test for %s...' % ((i + 1), len(test_script.keys()), recipe_id)))
if (recipe_id in test_download):
download_cmds = test_download[recipe_id].split(';')
for download_cmd in download_cmds:
print(('\t' + download_cmd))
eval(download_cmd)
setup_script = os.path.join('tests/recipes/setup', test_script[recipe_id][:(- 3)].replace('/', '_'), test_hparam[recipe_id].replace(os.path.dirname(test_script[recipe_id]), '')[1:(- 5)].replace('/', '_'))
if os.path.exists(setup_script):
os.system(setup_script)
cmd = (((((((f"PYTHONPATH={((os.getcwd() + '/') + os.path.dirname(test_script[recipe_id]))} python " + test_script[recipe_id]) + ' ') + test_hparam[recipe_id]) + ' ') + test_flag[recipe_id]) + ' ') + run_opts)
if (not spec_outfold):
cmd = ((cmd + ' --output_folder=') + output_fold)
if (not do_checks):
cmd += ' --debug --debug_persistently'
if (recipe_id in test_message):
print(('\t\t' + test_message[recipe_id]))
time_start = time()
return_code = run_test_cmd(cmd, stdout_file, stderr_file)
test_duration = (time() - time_start)
print(('\t... %.2fs' % test_duration))
td_script = os.path.join(os.path.dirname(setup_script), 'tear_down')
if os.path.exists(td_script):
os.system(td_script)
if (return_code != 0):
print(('\tERROR: Error in %s (%s). Check %s and %s for more info.' % (recipe_id, test_hparam[recipe_id], stderr_file, stdout_file)))
check = False
if (do_checks and (len(check_str) > 0)):
print('\t...checking files & performance...')
check &= check_files(check_str, output_fold, recipe_id)
check &= check_performance(check_str, output_fold, recipe_id)
return check |
def test_suppress_warnings_module():
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
def warn_other_module():
def warn(arr):
warnings.warn('Some warning 2', stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
assert_warn_len_equal(my_mod, 0)
with suppress_warnings() as sup:
sup.record(UserWarning)
sup.filter(module=np.lib.shape_base)
warnings.warn('Some warning')
warn_other_module()
assert_equal(len(sup.log), 1)
assert_equal(sup.log[0].message.args[0], 'Some warning')
assert_warn_len_equal(my_mod, 0, py37=0)
sup = suppress_warnings()
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 1, py37=0) |
def RegisterConfig(model_name):
def decorator(f):
CONFIG_REGISTRY[model_name] = f
return f
return decorator |
class IsotopeNumberDensity(ProcessingPlasmaProperty):
outputs = ('isotope_number_density',)
latex_name = ('N_{i}',)
def calculate(isotope_mass, isotope_abundance, density):
number_densities = (isotope_abundance * density)
isotope_number_density_array = (number_densities.to_numpy() / isotope_mass.to_numpy())
return pd.DataFrame(isotope_number_density_array, index=isotope_abundance.index) |
class PortugueseStemmer(_StandardStemmer):
__vowels = 'aeiouaeiouaeo'
__step1_suffixes = ('amentos', 'imentos', 'uciones', 'amento', 'imento', 'adoras', 'adores', 'aco~es', 'logias', 'encias', 'amente', 'idades', 'ismos', 'istas', 'adora', 'aca~o', 'antes', 'ancia', 'logia', 'ucion', 'encia', 'mente', 'idade', 'ezas', 'icos', 'icas', 'ismo', 'avel', 'ivel', 'ista', 'osos', 'osas', 'ador', 'ante', 'ivas', 'ivos', 'iras', 'eza', 'ico', 'ica', 'oso', 'osa', 'iva', 'ivo', 'ira')
__step2_suffixes = ('ariamos', 'eriamos', 'iriamos', 'assemos', 'essemos', 'issemos', 'arieis', 'erieis', 'irieis', 'asseis', 'esseis', 'isseis', 'aramos', 'eramos', 'iramos', 'avamos', 'aremos', 'eremos', 'iremos', 'ariam', 'eriam', 'iriam', 'assem', 'essem', 'issem', 'ara~o', 'era~o', 'ira~o', 'arias', 'erias', 'irias', 'ardes', 'erdes', 'irdes', 'asses', 'esses', 'isses', 'astes', 'estes', 'istes', 'areis', 'areis', 'ereis', 'ereis', 'ireis', 'ireis', 'aveis', 'iamos', 'armos', 'ermos', 'irmos', 'aria', 'eria', 'iria', 'asse', 'esse', 'isse', 'aste', 'este', 'iste', 'arei', 'erei', 'irei', 'aram', 'eram', 'iram', 'avam', 'arem', 'erem', 'irem', 'ando', 'endo', 'indo', 'adas', 'idas', 'aras', 'aras', 'eras', 'eras', 'iras', 'avas', 'ares', 'eres', 'ires', 'ieis', 'ados', 'idos', 'amos', 'amos', 'emos', 'imos', 'iras', 'ada', 'ida', 'ara', 'ara', 'era', 'era', 'ira', 'ava', 'iam', 'ado', 'ido', 'ias', 'ais', 'eis', 'ira', 'ia', 'ei', 'am', 'em', 'ar', 'er', 'ir', 'as', 'es', 'is', 'eu', 'iu', 'ou')
__step4_suffixes = ('os', 'a', 'i', 'o', 'a', 'i', 'o')
def stem(self, word):
word = word.lower()
step1_success = False
step2_success = False
word = word.replace('a', 'a~').replace('o', 'o~')
(r1, r2) = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if ((suffix == 'amente') and r1.endswith(suffix)):
step1_success = True
word = word[:(- 6)]
r2 = r2[:(- 6)]
rv = rv[:(- 6)]
if r2.endswith('iv'):
word = word[:(- 2)]
r2 = r2[:(- 2)]
rv = rv[:(- 2)]
if r2.endswith('at'):
word = word[:(- 2)]
rv = rv[:(- 2)]
elif r2.endswith(('os', 'ic', 'ad')):
word = word[:(- 2)]
rv = rv[:(- 2)]
elif ((suffix in ('ira', 'iras')) and rv.endswith(suffix) and (word[((- len(suffix)) - 1):(- len(suffix))] == 'e')):
step1_success = True
word = ''.join((word[:(- len(suffix))], 'ir'))
rv = ''.join((rv[:(- len(suffix))], 'ir'))
elif r2.endswith(suffix):
step1_success = True
if (suffix in ('logia', 'logias')):
word = word[:(- 2)]
rv = rv[:(- 2)]
elif (suffix in ('ucion', 'uciones')):
word = ''.join((word[:(- len(suffix))], 'u'))
rv = ''.join((rv[:(- len(suffix))], 'u'))
elif (suffix in ('encia', 'encias')):
word = ''.join((word[:(- len(suffix))], 'ente'))
rv = ''.join((rv[:(- len(suffix))], 'ente'))
elif (suffix == 'mente'):
word = word[:(- 5)]
r2 = r2[:(- 5)]
rv = rv[:(- 5)]
if r2.endswith(('ante', 'avel', 'ivel')):
word = word[:(- 4)]
rv = rv[:(- 4)]
elif (suffix in ('idade', 'idades')):
word = word[:(- len(suffix))]
r2 = r2[:(- len(suffix))]
rv = rv[:(- len(suffix))]
if r2.endswith(('ic', 'iv')):
word = word[:(- 2)]
rv = rv[:(- 2)]
elif r2.endswith('abil'):
word = word[:(- 4)]
rv = rv[:(- 4)]
elif (suffix in ('iva', 'ivo', 'ivas', 'ivos')):
word = word[:(- len(suffix))]
r2 = r2[:(- len(suffix))]
rv = rv[:(- len(suffix))]
if r2.endswith('at'):
word = word[:(- 2)]
rv = rv[:(- 2)]
else:
word = word[:(- len(suffix))]
rv = rv[:(- len(suffix))]
break
if (not step1_success):
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
step2_success = True
word = word[:(- len(suffix))]
rv = rv[:(- len(suffix))]
break
if (step1_success or step2_success):
if (rv.endswith('i') and (word[(- 2)] == 'c')):
word = word[:(- 1)]
rv = rv[:(- 1)]
if ((not step1_success) and (not step2_success)):
for suffix in self.__step4_suffixes:
if rv.endswith(suffix):
word = word[:(- len(suffix))]
rv = rv[:(- len(suffix))]
break
if rv.endswith(('e', 'e', 'e')):
word = word[:(- 1)]
rv = rv[:(- 1)]
if ((word.endswith('gu') and rv.endswith('u')) or (word.endswith('ci') and rv.endswith('i'))):
word = word[:(- 1)]
elif word.endswith('c'):
word = ''.join((word[:(- 1)], 'c'))
word = word.replace('a~', 'a').replace('o~', 'o')
return word |
('pass_through')
class PassThroughWordStemmer(WordStemmer):
def stem_word(self, word: Token) -> Token:
return word |
def indentedBlock(blockStatementExpr, indentStack, indent=True):
backup_stack = indentStack[:]
def reset_stack():
indentStack[:] = backup_stack
def checkPeerIndent(s, l, t):
if (l >= len(s)):
return
curCol = col(l, s)
if (curCol != indentStack[(- 1)]):
if (curCol > indentStack[(- 1)]):
raise ParseException(s, l, 'illegal nesting')
raise ParseException(s, l, 'not a peer entry')
def checkSubIndent(s, l, t):
curCol = col(l, s)
if (curCol > indentStack[(- 1)]):
indentStack.append(curCol)
else:
raise ParseException(s, l, 'not a subentry')
def checkUnindent(s, l, t):
if (l >= len(s)):
return
curCol = col(l, s)
if (not (indentStack and (curCol in indentStack))):
raise ParseException(s, l, 'not an unindent')
if (curCol < indentStack[(- 1)]):
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars('\t ').suppress(), stopOn=StringEnd())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group((((Optional(NL) + INDENT) + OneOrMore(((PEER + Group(blockStatementExpr)) + Optional(NL)), stopOn=StringEnd())) + UNDENT))
else:
smExpr = Group(((Optional(NL) + OneOrMore(((PEER + Group(blockStatementExpr)) + Optional(NL)), stopOn=StringEnd())) + UNDENT))
smExpr.setFailAction((lambda a, b, c, d: reset_stack()))
blockStatementExpr.ignore((_bslash + LineEnd()))
return smExpr.setName('indented block') |
def generate_node_procs(parallel, net_size, naming_func):
if parallel:
num_procs = int(parallel[2])
else:
num_procs = 1
group_size = (net_size / num_procs)
node_procs = {}
for i in range(net_size):
node_procs[naming_func(i)] = int((i // group_size))
return node_procs |
def _base_ring_to_fraction_field(S):
R = S.base_ring()
if isinstance(R, Field):
return S
else:
Q = R.fraction_field()
gens = R.gens()
sigmaS = S.twisting_morphism()
sigmaQ = Q.hom([Q(sigmaS(g)) for g in gens])
return Q[(S.variable_name(), sigmaQ)] |
def hillman_grassl(M):
lam = [len(row) for row in M]
l = len(lam)
Mt = transpose(M)
hook_mults = []
for (j, col_j) in enumerate(Mt):
col_j_hook_mults = []
for (r, entry) in enumerate(col_j):
if (entry != 0):
col_j_hook_mults += ([(r, j)] * entry)
hook_mults += reversed(col_j_hook_mults)
res = [([0] * rowlen) for rowlen in lam]
for (r, s) in reversed(hook_mults):
i = r
j = (lam[r] - 1)
while True:
old = res[i][j]
res[i][j] += 1
if (((i + 1) != l) and (j < lam[(i + 1)]) and (old == res[(i + 1)][j])):
i += 1
else:
if (j == s):
break
j -= 1
return res |
class TestFiltering():
def setup_method(self):
latitude = constants.LATITUDE
longitude = constants.LONGITUDE
date_time = constants.DATETIME
user_id = constants.UID
lat_lons = np.array([[43.8430139, 10.507994], [43.54427, 10.32615], [43.70853, 10.4036], [43.77925, 11.24626], [43.8430139, 10.507994], [43.70853, 10.4036], [43.8430139, 10.507994], [43.54427, 10.32615], [43.54427, 10.32615], [43.70853, 10.4036], [43.8430139, 10.507994], [43.77925, 11.24626], [43.70853, 10.4036], [43.54427, 10.32615], [43.77925, 11.24626], [43.70853, 10.4036], [43.77925, 11.24626], [43.8430139, 10.507994], [43.8430139, 10.507994], [43.54427, 10.32615]])
traj = pd.DataFrame(lat_lons, columns=[latitude, longitude])
traj[date_time] = pd.to_datetime([' 8:34:04', ' 9:34:04', ' 10:34:04', ' 10:34:04', ' 8:34:04', ' 9:34:04', ' 10:34:04', ' 11:34:04', ' 8:34:04', ' 9:34:04', ' 10:34:04', ' 11:34:04', ' 10:34:04', ' 11:34:04', ' 12:34:04', ' 10:34:04', ' 11:34:04', ' 12:34:04', ' 10:34:04', ' 11:34:04'])
traj[user_id] = ((((([1 for _ in range(4)] + [2 for _ in range(4)]) + [3 for _ in range(4)]) + [4 for _ in range(3)]) + [5 for _ in range(3)]) + [6 for _ in range(2)])
self.unique_points = [(43.54427, 10.32615), (43.70853, 10.4036), (43.77925, 11.24626), (43.843014, 10.507994)]
self.traj = traj.sort_values([user_id, date_time])
self.trjdat = TrajDataFrame(traj, user_id=user_id)
def test_filter(self):
output = filtering.filter(self.trjdat, max_speed_kmh=10.0)
expected = self.trjdat.drop([1, 5, 9, 13, 16])
output.reset_index(inplace=True)
output.drop(columns=['index'], inplace=True)
expected.reset_index(inplace=True)
expected.drop(columns=['index'], inplace=True)
pd.testing.assert_frame_equal(output, expected)
output = filtering.filter(self.trjdat, max_speed_kmh=120.0)
expected = self.trjdat
pd.testing.assert_frame_equal(output, expected)
output = filtering.filter(self.trjdat, max_speed_kmh=10.0, max_loop=1)
expected = self.trjdat.drop([1, 5, 9, 13, 16])
output.reset_index(inplace=True)
output.drop(columns=['index'], inplace=True)
expected.reset_index(inplace=True)
expected.drop(columns=['index'], inplace=True)
pd.testing.assert_frame_equal(output, expected)
output = filtering.filter(self.trjdat, max_speed_kmh=10.0, ratio_max=0.9)
expected = self.trjdat.drop([1, 5, 9, 13, 16])
output.reset_index(inplace=True)
output.drop(columns=['index'], inplace=True)
expected.reset_index(inplace=True)
expected.drop(columns=['index'], inplace=True)
pd.testing.assert_frame_equal(output, expected) |
def calculate_fid_given_paths(paths, batch_size, device, dims, num_workers=1):
for p in paths:
if (not os.path.exists(p)):
raise RuntimeError(('Invalid path: %s' % p))
print(dims)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
(m1, s1) = compute_statistics_of_path(paths[0], model, batch_size, dims, device, num_workers)
(m2, s2) = compute_statistics_of_path(paths[1], model, batch_size, dims, device, num_workers)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value |
def bench_and_check(bench):
def _func(query, expected):
np.testing.assert_almost_equal(bench(query), expected, decimal=6)
return _func |
def load_caltech101silhouettes(args, **kwargs):
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = False
def reshape_data(data):
return data.reshape(((- 1), 28, 28)).reshape(((- 1), (28 * 28)), order='F')
caltech_raw = loadmat(os.path.join('data', 'Caltech101Silhouettes', 'caltech101_silhouettes_28_split1.mat'))
x_train = (1.0 - reshape_data(caltech_raw['train_data'].astype('float32')))
np.random.shuffle(x_train)
x_val = (1.0 - reshape_data(caltech_raw['val_data'].astype('float32')))
np.random.shuffle(x_val)
x_test = (1.0 - reshape_data(caltech_raw['test_data'].astype('float32')))
y_train = caltech_raw['train_labels']
y_val = caltech_raw['val_labels']
y_test = caltech_raw['test_labels']
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return (train_loader, val_loader, test_loader, args) |
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument('--corruptions', type=str, nargs='+', default='benchmark', choices=['all', 'benchmark', 'noise', 'blur', 'weather', 'digital', 'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate'], help='corruptions')
parser.add_argument('--severities', type=int, nargs='+', default=[0, 1, 2, 3, 4, 5], help='corruption severity levels')
parser.add_argument('--eval', type=str, nargs='+', choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], help='eval types')
parser.add_argument('--iou-thr', type=float, default=0.5, help='IoU threshold for pascal voc evaluation')
parser.add_argument('--summaries', type=bool, default=False, help='Print summaries for every corruption and severity')
parser.add_argument('--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--final-prints', type=str, nargs='+', choices=['P', 'mPC', 'rPC'], default='mPC', help='corruption benchmark metric to print at the end')
parser.add_argument('--final-prints-aggregate', type=str, choices=['all', 'benchmark'], default='benchmark', help='aggregate all results or only those for benchmark corruptions')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args |
def test_categorical_encoder_saving(tmpdir):
from speechbrain.dataio.encoder import CategoricalEncoder
encoder = CategoricalEncoder(starting_index=3)
encoding_file = (tmpdir / 'char_encoding.txt')
if (not encoder.load_if_possible(encoding_file)):
encoder.update_from_iterable('abcd')
encoder.save(encoding_file)
else:
assert False
encoder = CategoricalEncoder()
encoder.expect_len(4)
if (not encoder.load_if_possible(encoding_file)):
assert False
integers = encoder.encode_sequence('dcba')
assert all((isinstance(i, int) for i in integers))
assert (encoder.starting_index == 3)
encoder = CategoricalEncoder()
encoding_file = (tmpdir / 'tuple_encoding.txt')
encoder.add_label((1, 2, 3))
encoder.insert_label((1, 2), index=(- 1))
encoder.save(encoding_file)
encoder = CategoricalEncoder()
encoder.expect_len(2)
assert encoder.load_if_possible(encoding_file)
assert (encoder.encode_label((1, 2)) == (- 1))
encoder = CategoricalEncoder(unk_label='UNKNOWN')
encoding_file = (tmpdir / 'unk_encoding.txt')
encoder.update_from_iterable('abc')
encoder.save(encoding_file)
encoder = CategoricalEncoder()
encoder.expect_len(4)
assert encoder.load_if_possible(encoding_file)
assert (encoder.encode_label('a') == 1)
assert (encoder.decode_ndim(encoder.encode_label('d')) == 'UNKNOWN')
encoder = CategoricalEncoder()
encoder.add_unk()
encoder.expect_len(4)
assert encoder.load_if_possible(encoding_file)
assert (encoder.encode_label('a') == 1)
assert (encoder.decode_ndim(encoder.encode_label('d')) == 'UNKNOWN') |
.experimental
def test_predict_pairs_warm_items_only(log, log_to_pred):
model = MultVAE()
model.fit(log)
recs = model.predict(log.unionByName(log_to_pred), k=3, users=log_to_pred.select('user_idx').distinct(), items=log_to_pred.select('item_idx').distinct(), filter_seen_items=False)
pairs_pred = model.predict_pairs(pairs=log_to_pred.select('user_idx', 'item_idx'), log=log.unionByName(log_to_pred))
condition = (~ sf.col('item_idx').isin([4, 5]))
if (not model.can_predict_cold_users):
condition = (condition & (sf.col('user_idx') != 4))
sparkDataFrameEqual(pairs_pred.select('user_idx', 'item_idx'), log_to_pred.filter(condition).select('user_idx', 'item_idx'))
recs_joined = pairs_pred.withColumnRenamed('relevance', 'pairs_relevance').join(recs, on=['user_idx', 'item_idx'], how='left').sort('user_idx', 'item_idx')
assert np.allclose(recs_joined.select('relevance').toPandas().to_numpy(), recs_joined.select('pairs_relevance').toPandas().to_numpy()) |
def register_Ns3Asn1Header_methods(root_module, cls):
cls.add_constructor([param('ns3::Asn1Header const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'bIterator')], is_pure_virtual=True, is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('PreSerialize', 'void', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'bIterator')], is_const=True, is_virtual=True)
cls.add_method('DeserializeBitset', 'ns3::Buffer::Iterator', [param('std::bitset< 8 > *', 'data'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBitstring', 'ns3::Buffer::Iterator', [param('std::bitset< 1 > *', 'bitstring'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBitstring', 'ns3::Buffer::Iterator', [param('std::bitset< 2 > *', 'bitstring'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBitstring', 'ns3::Buffer::Iterator', [param('std::bitset< 8 > *', 'bitstring'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBitstring', 'ns3::Buffer::Iterator', [param('std::bitset< 10 > *', 'bitstring'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBitstring', 'ns3::Buffer::Iterator', [param('std::bitset< 16 > *', 'bitstring'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBitstring', 'ns3::Buffer::Iterator', [param('std::bitset< 27 > *', 'bitstring'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBitstring', 'ns3::Buffer::Iterator', [param('std::bitset< 28 > *', 'bitstring'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBitstring', 'ns3::Buffer::Iterator', [param('std::bitset< 32 > *', 'bitstring'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeBoolean', 'ns3::Buffer::Iterator', [param('bool *', 'value'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeChoice', 'ns3::Buffer::Iterator', [param('int', 'numOptions'), param('bool', 'isExtensionMarkerPresent'), param('int *', 'selectedOption'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeEnum', 'ns3::Buffer::Iterator', [param('int', 'numElems'), param('int *', 'selectedElem'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeInteger', 'ns3::Buffer::Iterator', [param('int *', 'n'), param('int', 'nmin'), param('int', 'nmax'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeNull', 'ns3::Buffer::Iterator', [param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 0 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 1 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 2 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 3 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 4 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 5 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 6 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 7 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 9 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 10 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequence', 'ns3::Buffer::Iterator', [param('std::bitset< 11 > *', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('DeserializeSequenceOf', 'ns3::Buffer::Iterator', [param('int *', 'numElems'), param('int', 'nMax'), param('int', 'nMin'), param('ns3::Buffer::Iterator', 'bIterator')], visibility='protected')
cls.add_method('FinalizeSerialization', 'void', [], is_const=True, visibility='protected')
cls.add_method('SerializeBitstring', 'void', [param('std::bitset< 1 >', 'bitstring')], is_const=True, visibility='protected')
cls.add_method('SerializeBitstring', 'void', [param('std::bitset< 2 >', 'bitstring')], is_const=True, visibility='protected')
cls.add_method('SerializeBitstring', 'void', [param('std::bitset< 8 >', 'bitstring')], is_const=True, visibility='protected')
cls.add_method('SerializeBitstring', 'void', [param('std::bitset< 10 >', 'bitstring')], is_const=True, visibility='protected')
cls.add_method('SerializeBitstring', 'void', [param('std::bitset< 16 >', 'bitstring')], is_const=True, visibility='protected')
cls.add_method('SerializeBitstring', 'void', [param('std::bitset< 27 >', 'bitstring')], is_const=True, visibility='protected')
cls.add_method('SerializeBitstring', 'void', [param('std::bitset< 28 >', 'bitstring')], is_const=True, visibility='protected')
cls.add_method('SerializeBitstring', 'void', [param('std::bitset< 32 >', 'bitstring')], is_const=True, visibility='protected')
cls.add_method('SerializeBoolean', 'void', [param('bool', 'value')], is_const=True, visibility='protected')
cls.add_method('SerializeChoice', 'void', [param('int', 'numOptions'), param('int', 'selectedOption'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeEnum', 'void', [param('int', 'numElems'), param('int', 'selectedElem')], is_const=True, visibility='protected')
cls.add_method('SerializeInteger', 'void', [param('int', 'n'), param('int', 'nmin'), param('int', 'nmax')], is_const=True, visibility='protected')
cls.add_method('SerializeNull', 'void', [], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 0 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 1 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 2 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 3 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 4 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 5 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 6 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 7 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 9 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 10 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequence', 'void', [param('std::bitset< 11 >', 'optionalOrDefaultMask'), param('bool', 'isExtensionMarkerPresent')], is_const=True, visibility='protected')
cls.add_method('SerializeSequenceOf', 'void', [param('int', 'numElems'), param('int', 'nMax'), param('int', 'nMin')], is_const=True, visibility='protected')
cls.add_method('WriteOctet', 'void', [param('uint8_t', 'octet')], is_const=True, visibility='protected')
return |
def prepare_esc50(data_folder, audio_data_folder, save_json_train, save_json_valid, save_json_test, train_fold_nums=[1, 2, 3], valid_fold_nums=[4], test_fold_nums=[5], skip_manifest_creation=False):
download_esc50(data_folder)
if (type(train_fold_nums) is int):
train_fold_nums = [train_fold_nums]
if (type(valid_fold_nums) is int):
valid_fold_nums = [valid_fold_nums]
if (type(test_fold_nums) is int):
test_fold_nums = [test_fold_nums]
for fold_num in train_fold_nums:
if (fold_num not in ACCEPTABLE_FOLD_NUMS):
print(f'Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}')
logger.info(f'Train fold numbers {train_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}')
return
for fold_num in valid_fold_nums:
if (fold_num not in ACCEPTABLE_FOLD_NUMS):
print(f'Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}')
logger.info(f'Validation fold numbers {valid_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}')
return
for fold_num in test_fold_nums:
if (fold_num not in ACCEPTABLE_FOLD_NUMS):
print(f'Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}')
logger.info(f'Test fold numbers {test_fold_nums}, contains an invalid value. Must be in {ACCEPTABLE_FOLD_NUMS}')
return
if folds_overlap(train_fold_nums, valid_fold_nums):
print(f'Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!')
logger.info(f'Train {train_fold_nums}, and Valid {valid_fold_nums} folds must be mutually exclusive!')
return
if folds_overlap(train_fold_nums, test_fold_nums):
print(f'Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!')
logger.info(f'Train {train_fold_nums} and Test {test_fold_nums} folds must be mutually exclusive!')
return
if (skip_manifest_creation is True):
return
esc50_speechbrain_metadata_csv_path = os.path.join(os.path.abspath(data_folder), 'metadata/', MODIFIED_METADATA_FILE_NAME)
if (not os.path.exists(esc50_speechbrain_metadata_csv_path)):
esc50_speechbrain_metadata_csv_path = create_metadata_speechbrain_file(data_folder)
metadata = load_data_csv(esc50_speechbrain_metadata_csv_path)
logger.info(f'Creating {save_json_train}, {save_json_valid}, and {save_json_test}')
create_json(metadata, audio_data_folder, train_fold_nums, save_json_train)
create_json(metadata, audio_data_folder, valid_fold_nums, save_json_valid)
create_json(metadata, audio_data_folder, test_fold_nums, save_json_test) |
def gen_normalized_adjs(dataset):
(row, col) = dataset.graph['edge_index']
N = dataset.graph['num_nodes']
adj = SparseTensor(row=row, col=col, sparse_sizes=(N, N))
deg = adj.sum(dim=1).to(torch.float)
D_isqrt = deg.pow((- 0.5))
D_isqrt[(D_isqrt == float('inf'))] = 0
DAD = ((D_isqrt.view((- 1), 1) * adj) * D_isqrt.view(1, (- 1)))
DA = ((D_isqrt.view((- 1), 1) * D_isqrt.view((- 1), 1)) * adj)
AD = ((adj * D_isqrt.view(1, (- 1))) * D_isqrt.view(1, (- 1)))
return (DAD, DA, AD) |
class ECQA():
def __init__(self, data_dir):
self.train_path = os.path.join(data_dir, 'cqa_data_train.csv')
self.dev_path = os.path.join(data_dir, 'cqa_data_val.csv')
self.test_path = os.path.join(data_dir, 'cqa_data_test.csv')
def get_samples(self, file_path):
samples = []
df = pandas.read_csv(file_path)
for (index, row) in df.iterrows():
options = [row['q_op1'], row['q_op2'], row['q_op3'], row['q_op4'], row['q_op5']]
samples.append({'index': index, 'question': row['q_text'], 'options': options, 'answer': str((options.index(row['q_ans']) + 1)), 'gold_explanation': row['taskB']})
return samples
def get_train_samples(self):
return self.get_samples(self.train_path)
def get_dev_samples(self):
return self.get_samples(self.dev_path)
def get_test_samples(self):
return self.get_samples(self.test_path) |
def test_unflatten_returns_correct_shape() -> None:
x = tf.random.uniform([2, 3, 4, 5])
(flat_x, unflatten) = flatten_leading_dims(x)
y1 = tf.random.uniform([24, 7])
y2 = tf.random.uniform([24, 7, 11])
unflat_y1 = unflatten(y1)
unflat_y2 = unflatten(y2)
npt.assert_array_equal(tf.shape(unflat_y1), [2, 3, 4, 7])
npt.assert_array_equal(tf.shape(unflat_y2), [2, 3, 4, 7, 11]) |
class CurriculumTeacher():
def __init__(self, env, curriculum, writer=None):
self.env = env
self.curriculum = curriculum
self.writer = writer
def teach(self, num_timesteps=2000):
curriculum_step = 0
for t in range(num_timesteps):
p = self.curriculum[curriculum_step]
(r, train_done, val_done) = self.env.step(p)
if (train_done and (curriculum_step < (len(self.curriculum) - 1))):
curriculum_step = (curriculum_step + 1)
if val_done:
return self.env.model.epochs
if self.writer:
for i in range(self.env.num_actions):
add_summary(self.writer, ('probabilities/task_%d_%d' % (((i // self.env.max_digits) + 1), ((i % self.env.max_digits) + 1))), p[i], self.env.model.epochs)
return self.env.model.epochs |
def _dim_is_scalar_size(dim: Dim) -> bool:
if (dim.size is not None):
return True
if dim.dyn_size_ext:
return (dim.dyn_size_ext.dims == ())
return False |
def get_pseudo_label_NRL_for_one_segment_from_scratch(args, node2step, step2node, matched_nodes, G_wikihow, G_howto100m, G_wikihow_tr, G_howto100m_tr, max_hop):
khop_out_neighbors = get_khop_neighbors_inStepIDs(matched_nodes, node2step, max_hop, G_wikihow, G_howto100m)
khop_in_neighbors = get_khop_neighbors_inStepIDs(matched_nodes, node2step, max_hop, G_wikihow_tr, G_howto100m_tr)
results = dict()
direction = {'out': khop_out_neighbors, 'in': khop_in_neighbors}
for direction_key in ['out', 'in']:
khop_neighbors = direction[direction_key]
for khop in range(1, (max_hop + 1)):
all_neighbors_thishop = []
all_neighbors_thishop_scores = []
for node in matched_nodes:
all_neighbors_thishop += (khop_neighbors[node][khop]['neis_wikihow'] + khop_neighbors[node][khop]['neis_howto100m'])
all_neighbors_thishop_scores += (khop_neighbors[node][khop]['neis_wikihow_scores'] + khop_neighbors[node][khop]['neis_howto100m_scores'])
node_scores = dict()
for node_id in [step2node[step_id] for step_id in all_neighbors_thishop]:
node_scores[node_id] = 0
for i in range(len(all_neighbors_thishop)):
step_id = all_neighbors_thishop[i]
node_id = step2node[step_id]
node_scores[node_id] = max(node_scores[node_id], all_neighbors_thishop_scores[i])
node_scores_sorted = sorted(node_scores.items(), key=(lambda item: item[1]), reverse=True)
(matched_neihgbor_nodes, matched_neihgbor_nodes_scores) = find_matching_of_a_segment_given_sorted_val_corres_idx([node_score for (node_id, node_score) in node_scores_sorted], [node_id for (node_id, node_score) in node_scores_sorted], criteria=args.label_find_neighbors_criteria, threshold=args.label_find_neighbors_thresh, topK=args.label_find_neighbors_topK)
pseudo_label_NRL = {'indices': matched_neihgbor_nodes, 'values': matched_neihgbor_nodes_scores}
results['{}-hop-{}'.format(khop, direction_key)] = pseudo_label_NRL
return results |
def make_vocab(filenames, max_vocab_size=(- 1), newline_token=None, return_type='list', return_count=False):
if (not isinstance(filenames, (list, tuple))):
filenames = [filenames]
words: List[str] = []
for fn in filenames:
words += read_words(fn, newline_token=newline_token)
counter = collections.Counter(words)
count_pairs = sorted(counter.items(), key=(lambda x: ((- x[1]), x[0])))
(words, counts) = list(zip(*count_pairs))
words: List[str]
counts: List[int]
if (max_vocab_size >= 0):
words = words[:max_vocab_size]
counts = counts[:max_vocab_size]
if (return_type == 'list'):
if (not return_count):
return words
else:
return (words, counts)
elif (return_type == 'dict'):
word_to_id = dict(zip(words, range(len(words))))
if (not return_count):
return word_to_id
else:
word_to_count = dict(zip(words, counts))
return (word_to_id, word_to_count)
else:
raise ValueError(f'Unknown return_type: {return_type}') |
class GradUnknownPSF(GradPSF):
def __init__(self, data, psf, prox, psf_type='fixed', convolve_method='astropy', beta_reg=1, lambda_reg=1):
if (not hasattr(prox, 'op')):
raise ValueError('prox must have "op()" method')
self.grad_type = 'psf_unknown'
self.get_grad = self._get_grad_method
self.cost = self._cost_method
self._prox = prox
self._beta_reg = beta_reg
self._lambda_reg = lambda_reg
self._psf0 = np.copy(psf)
self._convolve_method = convolve_method
super(GradUnknownPSF, self).__init__(data, psf, psf_type, convolve_method)
def _update_lambda(self):
self._lambda_reg = self._lambda_reg
def _update_psf(self, x):
self._update_lambda()
psf_grad = (convolve_stack((self.op(x) - self.obs_data), x, rot_kernel=True, method=self._convolve_method) + (self._lambda_reg * (self._psf - self._psf0)))
self._psf = self._prox.op((self._psf - (self._beta_reg * psf_grad)))
def _get_grad_method(self, x):
self._update_psf(x)
self.grad = self._calc_grad(x)
def _cost_method(self, *args, **kwargs):
cost_val = ((0.5 * (np.linalg.norm((self.obs_data - self.op(args[0]))) ** 2)) + (np.linalg.norm((self._psf - self._psf0)) ** 2))
if (('verbose' in kwargs) and kwargs['verbose']):
print(' - DATA FIDELITY + PSF CONSTRAINT (X):', cost_val)
return cost_val |
class Sentence(object):
def __init__(self, syn_type, elements=None, tokens=None, postags=None, lemmas=None, sentnum=None):
if elements:
self.sent_num = elements[0].sent_num
self.tokens = [e.form for e in elements]
self.postags = [e.nltk_pos for e in elements]
self.lemmas = [e.nltk_lemma for e in elements]
if sentnum:
self.sent_num = sentnum
if tokens:
self.tokens = tokens
if postags:
self.postags = postags
if lemmas:
self.lemmas = lemmas
if (syn_type == 'dep'):
self.depheads = [(e.dephead - 1) for e in elements]
self.root = None
for i in range(len(self.depheads)):
if (self.depheads[i] == (- 1)):
self.depheads[i] = i
self.root = i
if (self.root is None):
raise Exception('root not found!')
self.deprels = [e.deprel for e in elements]
self.rootpath = [self.get_path_to_root(i) for i in range(len(self.tokens))]
self.outheads = self.get_heads_outside()
self.paths = {}
self.shortest_paths = {}
elif (syn_type == 'constit'):
self.cparse = None
self.constitspans = {}
self.crootpaths = {}
self.leafnodes = []
self.idxlabelmap = {}
self.lca = {}
self.cpaths = {}
def get_path_to_root(self, node):
par = self.depheads[node]
path = [par]
while (par != self.root):
par = self.depheads[par]
path.append(par)
return path
def get_heads_outside(self):
outheads = {}
for j in range(len(self.tokens)):
for i in range((j + 1)):
outheads[(i, j)] = sum([1 for s in range(i, (j + 1)) if (not (i <= self.depheads[s] <= j))])
return outheads
def get_common_path(self, src, dest):
if ((dest == self.depheads[src]) or (src == self.depheads[dest])):
return []
if (dest in self.rootpath[src]):
return self.rootpath[src][:((- len(self.rootpath[dest])) - 1)]
if (src in self.rootpath[dest]):
return self.rootpath[dest][:((- len(self.rootpath[src])) - 1)]
pathfrom = self.rootpath[src][::(- 1)]
pathto = self.rootpath[dest][::(- 1)]
i = 0
for (n1, n2) in zip(pathfrom, pathto):
if (n1 == n2):
i += 1
continue
if (n1 == dest):
return pathfrom[:(i + 1)]
return (pathfrom[i:][::(- 1)] + pathto[i:])
if (i == len(pathfrom)):
return pathto[(i - 1):]
return pathfrom[(i - 1):][::(- 1)]
def get_all_paths_to(self, node):
if (node in self.paths):
return
for n in range(len(self.tokens)):
if ((n != node) and ((n, node) not in self.paths)):
self.paths[(n, node)] = self.get_common_path(n, node)
self.get_all_shortest_paths(node)
def get_all_shortest_paths(self, target):
for j in range(len(self.tokens)):
for i in range((j + 1)):
self.shortest_paths[(i, j, target)] = frozenset(self.get_shortest_path_in_span(target, (i, j)))
def get_shortest_path_in_span(self, target, span):
splen = (len(self.tokens) + 1)
nodewithsp = span[0]
for node in span:
if (node == target):
return [node]
if ((node, target) not in self.paths):
raise Exception('never considered this path', node, span, target)
if (len(self.paths[(node, target)]) < splen):
splen = len(self.paths[(node, target)])
nodewithsp = node
return (([nodewithsp] + self.paths[(nodewithsp, target)]) + [target])
def get_all_parts_of_ctree(self, cparse, clabeldict, learn_features):
self.cparse = ParentedTree.fromstring(str(cparse))
if (len(cparse.leaves()) != len(self.tokens)):
raise Exception('sentences do not line up!')
idx = 0
for pos in self.cparse.treepositions('leaves'):
self.cparse[pos] = idx
idx += 1
for st in self.cparse.subtrees():
self.idxlabelmap[idx] = clabeldict.addstr(st.label())
st.set_label(idx)
idx += 1
self.get_all_constit_spans()
if (not learn_features):
return
self.leafnodes = [k for k in self.cparse.subtrees((lambda t: (t.height() == 2)))]
for a in range(len(self.leafnodes)):
if (self.leafnodes[a][0] != a):
raise Exception('order mixup!')
self.get_cpath_to_root()
for j in range(len(self.leafnodes)):
for k in range(j, len(self.leafnodes)):
(lca, lcaid) = self.get_lca(self.leafnodes[j], self.leafnodes[k])
self.lca[(j, k)] = (lca, lcaid)
def get_all_constit_spans(self):
for st in self.cparse.subtrees():
x = st.flatten()
span = (x[0], x[(- 1)])
if (span not in self.constitspans):
self.constitspans[span] = []
self.constitspans[span].append(self.idxlabelmap[x.label()])
def get_cpath_to_root(self):
for st in self.cparse.subtrees():
leaf = st.label()
self.crootpaths[leaf] = [st]
if (st == self.cparse.root()):
continue
par = st.parent()
while (par != self.cparse.root()):
self.crootpaths[leaf].append(par)
par = par.parent()
self.crootpaths[leaf].append(par)
def get_lca(self, src, dest):
if (src == dest):
return (src, self.idxlabelmap[src.label()])
pathfrom = self.crootpaths[src.label()][::(- 1)]
pathto = self.crootpaths[dest.label()][::(- 1)]
common = 0
for (n1, n2) in zip(pathfrom, pathto):
if (n1 == n2):
common += 1
continue
return (pathfrom[(common - 1)], self.idxlabelmap[pathfrom[(common - 1)].label()])
def get_common_cpath(self, src, dest):
if (src == dest):
return [src]
pathfrom = self.crootpaths[src.label()][::(- 1)]
pathto = self.crootpaths[dest.label()][::(- 1)]
common = 0
for (n1, n2) in zip(pathfrom, pathto):
if (n1 == n2):
common += 1
continue
break
return (pathfrom[(common - 1):][::(- 1)] + pathto[common:])
def get_cpath_to_target(self, target):
for j in range(len(self.leafnodes)):
for k in range(j, len(self.leafnodes)):
(lca, _) = self.lca[(j, k)]
path = self.get_common_cpath(lca, self.leafnodes[target])
self.cpaths[(j, k, target)] = frozenset([self.idxlabelmap[p.label()] for p in path]) |
class Normal(DistributionBase):
def __init__(self, low, high, q=None, log=False) -> None:
self.low = low
self.high = high
self.q = q
self.log = log |
def _sfc(content, equality=False):
content = list(content)
a = ([0] * sum(content))
content[0] -= 1
k = len(content)
return _simple_fixed_content(a, content, 2, 1, k, equality=equality) |
class CrossAttnUpBlock3D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attn_num_head_channels=1, cross_attention_dim=1280, output_scale_factor=1.0, add_upsample=True, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False):
super().__init__()
resnets = []
temp_convs = []
attentions = []
temp_attentions = []
self.gradient_checkpointing = False
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = (in_channels if (i == (num_layers - 1)) else out_channels)
resnet_in_channels = (prev_output_channel if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=(resnet_in_channels + res_skip_channels), out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
temp_convs.append(TemporalConvLayer(out_channels, out_channels, dropout=0.1))
attentions.append(Transformer2DModel((out_channels // attn_num_head_channels), attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention))
temp_attentions.append(TransformerTemporalModel((out_channels // attn_num_head_channels), attn_num_head_channels, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups))
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
self.attentions = nn.ModuleList(attentions)
self.temp_attentions = nn.ModuleList(temp_attentions)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, encoder_hidden_states=None, upsample_size=None, attention_mask=None, num_frames=1, cross_attention_kwargs=None):
for (resnet, temp_conv, attn, temp_attn) in zip(self.resnets, self.temp_convs, self.attentions, self.temp_attentions):
res_hidden_states = res_hidden_states_tuple[(- 1)]
res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.gradient_checkpointing:
hidden_states = cross_attn_g_c(attn, temp_attn, resnet, temp_conv, hidden_states, encoder_hidden_states, cross_attention_kwargs, temb, num_frames, inverse_temp=True)
else:
hidden_states = resnet(hidden_states, temb)
if (num_frames > 1):
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs).sample
if (num_frames > 1):
hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample
if (self.upsamplers is not None):
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states |
class Ind2OneHotFilter(Filter):
def __init__(self, n):
self.n = n
def __call__(self, x, update=True):
out = np.zeros(self.n)
out[x] = 1
return out
def output_shape(self, input_space):
return (input_space.n,) |
class ProxylessNASNets():
def __init__(self, net_config, net_weights=None):
self.graph = tf.Graph()
self.net_config = net_config
self.n_classes = 1000
with self.graph.as_default():
self._define_inputs()
logits = self.build(init=net_weights)
prediction = logits
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.labels))
self.cross_entropy = cross_entropy
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.global_variables_initializer = tf.global_variables_initializer()
self._initialize_session()
def bn_eps(self):
return self.net_config['bn']['eps']
def bn_decay(self):
return (1 - self.net_config['bn']['momentum'])
def _initialize_session(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(graph=self.graph, config=config)
self.sess.run(self.global_variables_initializer)
def _define_inputs(self):
shape = [None, 224, 224, 3]
self.images = tf.placeholder(tf.float32, shape=shape, name='input_images')
self.labels = tf.placeholder(tf.float32, shape=[None, self.n_classes], name='labels')
self.learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate')
self.is_training = tf.placeholder(tf.bool, shape=[], name='is_training')
def labels_to_one_hot(n_classes, labels):
new_labels = np.zeros((labels.shape[0], n_classes), dtype=np.float32)
new_labels[(range(labels.shape[0]), labels)] = np.ones(labels.shape)
return new_labels
def build(self, init=None):
output = self.images
if (init is not None):
for key in init:
init[key] = tf.constant_initializer(init[key])
first_conv = ConvLayer('first_conv', self.net_config['first_conv']['out_channels'], 3, 2)
output = first_conv.build(output, self, init)
for (i, block_config) in enumerate(self.net_config['blocks']):
if (block_config['mobile_inverted_conv']['name'] == 'ZeroLayer'):
continue
mobile_inverted_conv = MBInvertedConvLayer('mobile_inverted_conv', block_config['mobile_inverted_conv']['out_channels'], block_config['mobile_inverted_conv']['kernel_size'], block_config['mobile_inverted_conv']['stride'], block_config['mobile_inverted_conv']['expand_ratio'])
if ((block_config['shortcut'] is None) or (block_config['shortcut']['name'] == 'ZeroLayer')):
has_residual = False
else:
has_residual = True
block = MobileInvertedResidualBlock(('blocks/%d' % i), mobile_inverted_conv, has_residual)
output = block.build(output, self, init)
feature_mix_layer = ConvLayer('feature_mix_layer', self.net_config['feature_mix_layer']['out_channels'], 1, 1)
output = feature_mix_layer.build(output, self, init)
output = avg_pool(output, 7, 7)
output = flatten(output)
classifier = LinearLayer('classifier', self.n_classes, self.net_config['classifier']['dropout_rate'])
output = classifier.build(output, self, init)
return output |
class AnsiCodes(object):
def __init__(self):
for name in dir(self):
if (not name.startswith('_')):
value = getattr(self, name)
setattr(self, name, code_to_chars(value)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.