code stringlengths 101 5.91M |
|---|
class HLSTMCell(nn.modules.rnn.RNNCellBase):
def __init__(self, input_size, hidden_size, bias=True):
super(HLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.Wi = nn.Linear((input_size + hidden_size), hidden_size, bias=bias)
self.Wf = nn.Linear((input_size + hidden_size), hidden_size, bias=bias)
self.Wo = nn.Linear((input_size + hidden_size), hidden_size, bias=bias)
self.Wg = nn.Linear((input_size + hidden_size), hidden_size, bias=bias)
self.gate = nn.Linear((input_size + (2 * hidden_size)), hidden_size, bias=bias)
def forward(self, input, c_l_minus_one=None, hx=None):
self.check_forward_input(input)
if (hx is None):
hx = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
hx = (hx, hx)
if (c_l_minus_one is None):
c_l_minus_one = input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
self.check_forward_hidden(input, c_l_minus_one, 'c_l_minus_one')
rec_input = torch.cat([input, hx[0]], 1)
i = F.sigmoid(self.Wi(rec_input))
f = F.sigmoid(self.Wf(rec_input))
o = F.sigmoid(self.Wo(rec_input))
g = F.tanh(self.Wg(rec_input))
gate = F.sigmoid(self.gate(torch.cat([c_l_minus_one, hx[1], input], 1)))
c = (((gate * c_l_minus_one) + (f * hx[1])) + (i * g))
h = (o * F.tanh(c))
return (h, c) |
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
if ((features is None) and (labels is None)):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose) |
_to_string_io
def load_POSevents(fhandle: TextIO) -> annotations.Events:
times = []
labels = []
confidence = []
reader = csv.reader(fhandle, delimiter=',')
headers = next(reader)
class_ids = headers[3:]
for line in reader:
times.append([float(line[1]), float(line[2])])
classes = [class_ids[i] for (i, l) in enumerate(line[3:]) if (l == 'POS')]
labels.append(','.join(classes))
confidence.append(1.0)
events_data = annotations.Events(intervals=np.array(times), intervals_unit='seconds', labels=labels, labels_unit='open', confidence=np.array(confidence))
return events_data |
class ExponentialLR(_LRScheduler):
def __init__(self, optimizer, gamma, last_epoch=(- 1), verbose=False):
self.gamma = gamma
super(ExponentialLR, self).__init__(optimizer, last_epoch, verbose)
def get_lr(self):
if (not self._get_lr_called_within_step):
warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning)
if (self.last_epoch == 0):
return [group['lr'] for group in self.optimizer.param_groups]
return [(group['lr'] * self.gamma) for group in self.optimizer.param_groups]
def _get_closed_form_lr(self):
return [(base_lr * (self.gamma ** self.last_epoch)) for base_lr in self.base_lrs] |
class TextImageDataset(TextVideoDataset):
def __getitem__(self, item):
item = (item % len(self.metadata))
sample = self.metadata.iloc[item]
(video_fp, rel_fp) = self._get_video_path(sample)
caption = self._get_caption(sample)
video_loading = self.video_params.get('loading', 'strict')
try:
img = Image.open(video_fp).convert('RGB')
except:
if (video_loading == 'strict'):
raise ValueError(f'Image loading failed for {video_fp}, image loading for this dataset is strict.')
else:
print("'Filling empty data for training now!!!'", file=sys.stderr)
if (not os.path.isfile(video_fp)):
print(('%s does not exist!!!' % video_fp))
img = Image.new('RGB', (self.video_params['input_res'], self.video_params['input_res']), (0, 0, 0))
img = transforms.ToTensor()(img).unsqueeze(0)
if (self.transforms is not None):
img = self.transforms(img)
meta_arr = {'raw_captions': caption, 'paths': rel_fp, 'dataset': self.dataset_name}
data = {'video': img, 'text': caption, 'meta': meta_arr}
return data |
def convert_child_by_dict(model, dict_id_b4_to_after):
if (not dict_id_b4_to_after):
return
for (child_name, child) in model.named_children():
if (id(child) in dict_id_b4_to_after):
setattr(model, child_name, dict_id_b4_to_after[id(child)])
else:
convert_child_by_dict(child, dict_id_b4_to_after) |
class _DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, num_conv_layers):
super(_DecoderBlock, self).__init__()
middle_channels = int((in_channels / 2))
layers = [nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2), nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1), nn.BatchNorm2d(middle_channels), nn.ReLU(inplace=False)]
layers += ([nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1), nn.BatchNorm2d(middle_channels), nn.ReLU(inplace=False)] * (num_conv_layers - 2))
layers += [nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=False)]
self.decode = nn.Sequential(*layers)
def forward(self, x):
return self.decode(x) |
def _config_draft(config):
if config.draft:
config.num_steps = 2
config.eval_period = 1
config.log_period = 1
config.save_period = 1
config.eval_num_batches = 1 |
def test_nonzero_offset_fromarrow_NumpyArray_3():
content = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1]))
assert (to_list(ak._connect.pyarrow.handle_arrow(content.to_arrow()[2:5])) == pyarrow.Array.to_pylist(content.to_arrow()[2:5])) |
class QuestionAnsweringArgumentHandler(ArgumentHandler):
def normalize(self, item):
if isinstance(item, SquadExample):
return item
elif isinstance(item, dict):
for k in ['question', 'context']:
if (k not in item):
raise KeyError('You need to provide a dictionary with keys {question:..., context:...}')
elif (item[k] is None):
raise ValueError(f'`{k}` cannot be None')
elif (isinstance(item[k], str) and (len(item[k]) == 0)):
raise ValueError(f'`{k}` cannot be empty')
return QuestionAnsweringPipeline.create_sample(**item)
raise ValueError(f'{item} argument needs to be of type (SquadExample, dict)')
def __call__(self, *args, **kwargs):
if ((args is not None) and (len(args) > 0)):
if (len(args) == 1):
inputs = args[0]
elif ((len(args) == 2) and ({type(el) for el in args} == {str})):
inputs = [{'question': args[0], 'context': args[1]}]
else:
inputs = list(args)
elif ('X' in kwargs):
inputs = kwargs['X']
elif ('data' in kwargs):
inputs = kwargs['data']
elif (('question' in kwargs) and ('context' in kwargs)):
if (isinstance(kwargs['question'], list) and isinstance(kwargs['context'], str)):
inputs = [{'question': Q, 'context': kwargs['context']} for Q in kwargs['question']]
elif (isinstance(kwargs['question'], list) and isinstance(kwargs['context'], list)):
if (len(kwargs['question']) != len(kwargs['context'])):
raise ValueError("Questions and contexts don't have the same lengths")
inputs = [{'question': Q, 'context': C} for (Q, C) in zip(kwargs['question'], kwargs['context'])]
elif (isinstance(kwargs['question'], str) and isinstance(kwargs['context'], str)):
inputs = [{'question': kwargs['question'], 'context': kwargs['context']}]
else:
raise ValueError("Arguments can't be understood")
else:
raise ValueError(f'Unknown arguments {kwargs}')
generator_types = ((types.GeneratorType, Dataset) if (Dataset is not None) else (types.GeneratorType,))
if isinstance(inputs, generator_types):
return inputs
if isinstance(inputs, dict):
inputs = [inputs]
elif isinstance(inputs, Iterable):
inputs = list(inputs)
else:
raise ValueError(f'Invalid arguments {kwargs}')
for (i, item) in enumerate(inputs):
inputs[i] = self.normalize(item)
return inputs |
def test_kmeans_semi_sup(merge_test_loader, args, K=None):
if (K is None):
K = (args.num_labeled_classes + args.num_unlabeled_classes)
all_feats = []
targets = np.array([])
mask_lab = np.array([])
mask_cls = np.array([])
print('Collating features...')
for (batch_idx, (feats, label, _, mask_lab_)) in enumerate(tqdm(merge_test_loader)):
feats = feats.to(device)
feats = torch.nn.functional.normalize(feats, dim=(- 1))
all_feats.append(feats.cpu().numpy())
targets = np.append(targets, label.cpu().numpy())
mask_cls = np.append(mask_cls, np.array([(True if (x.item() in range(len(args.train_classes))) else False) for x in label]))
mask_lab = np.append(mask_lab, mask_lab_.cpu().bool().numpy())
mask_lab = mask_lab.astype(bool)
mask_cls = mask_cls.astype(bool)
all_feats = np.concatenate(all_feats)
l_feats = all_feats[mask_lab]
u_feats = all_feats[(~ mask_lab)]
l_targets = targets[mask_lab]
u_targets = targets[(~ mask_lab)]
print('Fitting Semi-Supervised K-Means...')
kmeans = SemiSupKMeans(k=K, tolerance=0.0001, max_iterations=args.max_kmeans_iter, init='k-means++', n_init=args.k_means_init, random_state=None, n_jobs=None, pairwise_batch_size=1024, mode=None)
(l_feats, u_feats, l_targets, u_targets) = (torch.from_numpy(x).to(device) for x in (l_feats, u_feats, l_targets, u_targets))
kmeans.fit_mix(u_feats, l_feats, l_targets)
all_preds = kmeans.labels_.cpu().numpy()
u_targets = u_targets.cpu().numpy()
preds = all_preds[(~ mask_lab)]
mask = mask_cls[(~ mask_lab)]
mask = mask.astype(bool)
(all_acc, old_acc, new_acc) = log_accs_from_preds(y_true=u_targets, y_pred=preds, mask=mask, eval_funcs=args.eval_funcs, save_name='SS-K-Means Train ACC Unlabelled', print_output=True)
return (all_acc, old_acc, new_acc, kmeans) |
class RoIPointPool3dFunction(Function):
def forward(ctx, points, point_features, boxes3d, num_sampled_points=512):
assert ((len(points.shape) == 3) and (points.shape[2] == 3))
(batch_size, boxes_num, feature_len) = (points.shape[0], boxes3d.shape[1], point_features.shape[2])
pooled_boxes3d = boxes3d.view(batch_size, (- 1), 7)
pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, (3 + feature_len)))
pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int()
ext_module.roipoint_pool3d_forward(points.contiguous(), pooled_boxes3d.contiguous(), point_features.contiguous(), pooled_features, pooled_empty_flag)
return (pooled_features, pooled_empty_flag)
def backward(ctx, grad_out):
raise NotImplementedError |
class AlgebraicNumRef(ArithRef):
def approx(self, precision=10):
return RatNumRef(Z3_get_algebraic_number_upper(self.ctx_ref(), self.as_ast(), precision), self.ctx)
def as_decimal(self, prec):
return Z3_get_numeral_decimal_string(self.ctx_ref(), self.as_ast(), prec)
def poly(self):
return AstVector(Z3_algebraic_get_poly(self.ctx_ref(), self.as_ast()), self.ctx)
def index(self):
return Z3_algebraic_get_i(self.ctx_ref(), self.as_ast()) |
class Trainer(DefaultTrainer):
def resume_or_load(self, resume=True):
if (not isinstance(self.checkpointer, AdetCheckpointer)):
self.checkpointer = AdetCheckpointer(self.model, self.cfg.OUTPUT_DIR, optimizer=self.optimizer, scheduler=self.scheduler)
super().resume_or_load(resume=resume)
def train_loop(self, start_iter: int, max_iter: int):
logger = logging.getLogger('adet.trainer')
logger.info('Starting training from iteration {}'.format(start_iter))
self.iter = self.start_iter = start_iter
self.max_iter = max_iter
with EventStorage(start_iter) as self.storage:
self.before_train()
for self.iter in range(start_iter, max_iter):
self.before_step()
self.run_step()
self.after_step()
self.after_train()
def train(self):
self.train_loop(self.start_iter, self.max_iter)
if (hasattr(self, '_last_eval_results') and comm.is_main_process()):
verify_results(self.cfg, self._last_eval_results)
return self._last_eval_results
def build_train_loader(cls, cfg):
mapper = DatasetMapperWithBasis(cfg, True)
return build_detection_train_loader(cfg, mapper=mapper)
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if (output_folder is None):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if (evaluator_type in ['sem_seg', 'coco_panoptic_seg']):
evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, output_dir=output_folder))
if (evaluator_type in ['soba']):
evaluator_list.append(SOBAEvaluator(dataset_name, cfg, True, output_folder))
if (evaluator_type in ['coco', 'coco_panoptic_seg']):
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
if (evaluator_type == 'coco_panoptic_seg'):
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if (evaluator_type == 'pascal_voc'):
return PascalVOCDetectionEvaluator(dataset_name)
if (evaluator_type == 'lvis'):
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if (evaluator_type == 'text'):
return TextEvaluator(dataset_name, cfg, True, output_folder)
if (len(evaluator_list) == 0):
raise NotImplementedError('no Evaluator for the dataset {} with the type {}'.format(dataset_name, evaluator_type))
if (len(evaluator_list) == 1):
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def test(cls, cfg, model, evaluators=None):
logger = logging.getLogger(__name__)
if isinstance(evaluators, DatasetEvaluator):
evaluators = [evaluators]
if (evaluators is not None):
assert (len(cfg.DATASETS.TEST) == len(evaluators)), '{} != {}'.format(len(cfg.DATASETS.TEST), len(evaluators))
results = OrderedDict()
association = OrderedDict()
for (idx, dataset_name) in enumerate(cfg.DATASETS.TEST):
data_loader = cls.build_test_loader(cfg, dataset_name)
if (evaluators is not None):
evaluator = evaluators[idx]
else:
try:
evaluator = cls.build_evaluator(cfg, dataset_name)
except NotImplementedError:
logger.warn('No evaluator found. Use `DefaultTrainer.test(evaluators=)`, or implement its `build_evaluator` method.')
results[dataset_name] = {}
continue
(results_i, association_i) = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
association[dataset_name] = association_i
if comm.is_main_process():
assert isinstance(results_i, dict), 'Evaluator must return a dict on the main process. Got {} instead.'.format(results_i)
logger.info('Evaluation results for {} in csv format:'.format(dataset_name))
print_csv_format(results_i)
if (len(results) == 1):
results = list(results.values())[0]
association = list(association.values())[0]
return results
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger('adet.trainer')
logger.info('Running inference with test-time augmentation ...')
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [cls.build_evaluator(cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, 'inference_TTA')) for name in cfg.DATASETS.TEST]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({(k + '_TTA'): v for (k, v) in res.items()})
return res |
def openai_moderation_API(data):
(scores, all_scores) = ([], [])
for sample in tqdm(data):
response = openai.Moderation.create(input=sample['output'])
pred = response['results'][0]
all_scores.append(pred['category_scores'])
scores.append((1 - np.max(list(pred['category_scores'].values()))))
return (scores, all_scores) |
def get_multiple_outputs_model(input_shape):
inputs = Input(shape=input_shape[1:])
x = Conv2D(filters=2, kernel_size=3)(inputs)
x = BatchNormalization()(x)
out1 = ReLU(max_value=6.0)(x)
out2 = Conv2D(2, 4)(out1)
return keras.Model(inputs=inputs, outputs=[out1, out2]) |
def count_uses(dag, uses=None):
if (uses is None):
uses = collections.Counter()
def walk(v):
for a in v.args():
if (a not in uses):
walk(a)
uses[a] += 1
walk(dag)
return uses |
class LinearFeatureBaseline(Baseline):
def __init__(self, env_spec, reg_coeff=1e-05, name='LinearFeatureBaseline'):
super().__init__(env_spec)
self._coeffs = None
self._reg_coeff = reg_coeff
self.name = name
self.lower_bound = (- 10)
self.upper_bound = 10
def get_param_values(self):
return self._coeffs
def set_param_values(self, flattened_params):
self._coeffs = flattened_params
def _features(self, path):
obs = np.clip(path['observations'], self.lower_bound, self.upper_bound)
length = len(path['rewards'])
al = (np.arange(length).reshape((- 1), 1) / 100.0)
return np.concatenate([obs, (obs ** 2), al, (al ** 2), (al ** 3), np.ones((length, 1))], axis=1)
def fit(self, paths):
featmat = np.concatenate([self._features(path) for path in paths])
returns = np.concatenate([path['returns'] for path in paths])
reg_coeff = self._reg_coeff
for _ in range(5):
self._coeffs = np.linalg.lstsq((featmat.T.dot(featmat) + (reg_coeff * np.identity(featmat.shape[1]))), featmat.T.dot(returns), rcond=(- 1))[0]
if (not np.any(np.isnan(self._coeffs))):
break
reg_coeff *= 10
def predict(self, path):
if (self._coeffs is None):
return np.zeros(len(path['rewards']))
return self._features(path).dot(self._coeffs) |
def isASCII(word):
try:
word = word.decode('ascii')
return True
except UnicodeEncodeError:
return False
except UnicodeDecodeError:
return False |
def separate_process_wrapper_fn(func: Callable[([], None)], do_multi_processing: bool) -> Callable[([], None)]:
def multi_process_func(*args, **kwargs):
def wrapper_func(queue: Queue, *args):
try:
result = func(*args)
except Exception as e:
logger.error(e)
print(e)
result = 'N/A'
queue.put(result)
queue = Queue()
p = Process(target=wrapper_func, args=([queue] + list(args)))
p.start()
result = queue.get()
p.join()
return result
if do_multi_processing:
logger.info(f'Function {func} is executed in its own process...')
return multi_process_func
else:
return func |
def get_jsd_type_scores(p_1, p_2, m, weight_1, weight_2, base, alpha):
score_1 = 0
score_2 = 0
if (alpha == 1):
if (p_1 > 0):
score_1 = (weight_1 * (log(m, base) - log(p_1, base)))
else:
score_1 = (weight_1 * log(m, base))
if (p_2 > 0):
score_2 = (weight_2 * (log(p_2, base) - log(m, base)))
else:
score_2 = (weight_2 * (- log(m, base)))
elif (alpha > 0):
if (p_1 > 0):
score_1 = ((weight_1 * ((m ** (alpha - 1)) - (p_1 ** (alpha - 1)))) / (alpha - 1))
if (p_2 > 0):
score_2 = ((weight_2 * ((m ** (alpha - 1)) - (p_2 ** (alpha - 1)))) / (alpha - 1))
return (score_1, score_2) |
class Queue(multiprocessing.queues.Queue):
def __init__(self, *args, **kwargs):
super(Queue, self).__init__(*args, **kwargs)
self._reader = ConnectionWrapper(self._reader)
self._writer = ConnectionWrapper(self._writer)
self._send = self._writer.send
self._recv = self._reader.recv |
.experimental
def test_raises_predict(log, item_features, model):
with pytest.raises(ValueError, match='Item features are missing for predict'):
model.fit(log, None, item_features)
_ = model.predict_pairs(log.select('user_idx', 'item_idx'), user_features=None, item_features=None) |
def random_labels(n_samples, n_classes):
return rng.randint(low=0, high=n_classes, size=n_samples) |
def test_EntanglementSwapping():
counter1 = counter2 = 0
for i in range(1000):
(tl, nodes, memories) = config_three_nodes_network(phi_plus, phi_plus, i)
(a1, a2, a3) = nodes
(memo1, memo2, memo3, memo4) = memories
es1 = EntanglementSwappingB(a1, ('a1.ESb%d' % i), memo1)
a1.protocols.append(es1)
es2 = EntanglementSwappingA(a2, ('a2.ESa%d' % i), memo2, memo3, success_prob=0.2)
a2.protocols.append(es2)
es3 = EntanglementSwappingB(a3, ('a3.ESb%d' % i), memo4)
a3.protocols.append(es3)
es1.set_others(es2.name, a2.name, [memo2.name, memo3.name])
es3.set_others(es2.name, a2.name, [memo2.name, memo3.name])
es2.set_others(es1.name, a1.name, [memo1.name])
es2.set_others(es3.name, a3.name, [memo4.name])
es2.start()
assert (memo2.fidelity == memo3.fidelity == 0)
assert (memo1.entangled_memory['node_id'] == memo4.entangled_memory['node_id'] == 'a2')
assert (memo2.entangled_memory['node_id'] == memo3.entangled_memory['node_id'] == None)
assert (memo2.entangled_memory['memo_id'] == memo3.entangled_memory['memo_id'] == None)
assert (a2.resource_manager.log[(- 2)] == (memo2, 'RAW'))
assert (a2.resource_manager.log[(- 1)] == (memo3, 'RAW'))
tl.run()
if es2.is_success:
counter1 += 1
assert (memo1.entangled_memory['node_id'] == 'a3')
assert (memo4.entangled_memory['node_id'] == 'a1')
assert (memo1.fidelity == memo4.fidelity <= memo1.raw_fidelity)
assert (a1.resource_manager.log[(- 1)] == (memo1, 'ENTANGLED'))
assert (a3.resource_manager.log[(- 1)] == (memo4, 'ENTANGLED'))
else:
counter2 += 1
assert (memo1.entangled_memory['node_id'] == None)
assert (memo4.entangled_memory['node_id'] == None)
assert (memo1.fidelity == memo4.fidelity == 0)
assert (a1.resource_manager.log[(- 1)] == (memo1, 'RAW'))
assert (a3.resource_manager.log[(- 1)] == (memo4, 'RAW'))
assert (abs(((counter1 / (counter1 + counter2)) - 0.2)) < 0.1) |
def calculate_homophily(g, labels, K=1, method='edge', multilabels=False, heterograph=False):
assert (method in ['edge', 'node'])
if multilabels:
assert (len(labels.shape) == 2)
elif ((labels.max() == 1) and (len(labels.shape) > 1)):
labels = labels.argmax(dim=1)
if heterograph:
target_mask = g.ndata['target_mask']
target_ids = g.ndata[dgl.NID][target_mask]
num_target = target_mask.sum().item()
g = g.subgraph(np.arange(g.number_of_nodes())[target_mask])
g = dgl.khop_graph(g, K)
(src, dst) = g.edges()
mask = (labels[src] == labels[dst]).float()
if (method == 'edge'):
out = mask.mean(dim=0)
elif (method == 'node'):
g.edata['mask'] = mask
g.update_all(fn.copy_e('mask', 'm'), fn.mean('m', 'out'))
out = g.ndata.pop('out').mean(dim=0)
return out.mean(0).item() |
def db_input(model, blobs_out, batch_size, db, db_type):
dbreader_name = ('dbreader_' + db)
dbreader = model.param_init_net.CreateDB([], dbreader_name, db=db, db_type=db_type)
return model.net.TensorProtosDBInput(dbreader, blobs_out, batch_size=batch_size) |
def symbolic_override_packed_sequence_based(symbolic_fn):
def might_trace(args):
import torch
first_arg = args[0]
if (not isinstance(first_arg, torch.nn.utils.rnn.PackedSequence)):
raise ValueError('pad_packed_sequence expects sequence to be a PackedSequence, but got an object of type {}'.format(type(first_arg)))
return torch._C._jit_is_tracing(first_arg[0])
return functools.partial(_symbolic_override_wrapper_maker, symbolic_fn, might_trace) |
class StatTest(Enum):
PairedTTest = [PairedTTest, 'paired_ttest']
WilcoxonTest = [WilcoxonTest, 'wilcoxon_test'] |
def check_already_generated(md_dir, aishell1_dir):
already_generated_csv = os.listdir(md_dir)
already_generated_csv = [f.split('.')[0] for f in already_generated_csv]
original_aishell1_dirs = ['dev', 'test', 'train']
actual_aishell1_dirs = (set(next(os.walk(aishell1_dir))[1]) & set(original_aishell1_dirs))
not_already_processed_directories = list((set(actual_aishell1_dirs) - set(already_generated_csv)))
return not_already_processed_directories |
def forward_vae_sample(vae: ConditionalVAE, x: TorchObservation, with_squash: bool=True) -> torch.Tensor:
batch_size = get_batch_size(x)
latent = torch.randn((batch_size, vae.encoder.latent_size), device=get_device(x))
return vae.decoder(x, latent.clamp((- 0.5), 0.5), with_squash=with_squash) |
_cache(maxsize=32)
def _setup_so3_rotation(b, alpha, beta, gamma, device_type, device_index):
Us = __setup_so3_rotation(b, alpha, beta, gamma)
Us = [torch.tensor(U, dtype=torch.float32, device=torch.device(device_type, device_index)) for U in Us]
return Us |
class ScoreCAM(ExplainerBase):
explanation_type = 'local'
alias = ['scorecam', 'score-cam']
def __init__(self, model, target_layer, preprocess_function: Callable, mode: str='classification', **kwargs):
super().__init__()
if ((not is_tf_available()) and (not is_torch_available())):
raise EnvironmentError('Both Torch and Tensorflow cannot be found.')
_class = None
if is_torch_available():
import torch.nn as nn
from .pytorch.scorecam import ScoreCAM
if isinstance(model, nn.Module):
_class = ScoreCAM
if ((_class is None) and is_tf_available()):
import tensorflow as tf
from .tf.scorecam import ScoreCAM
if isinstance(model, tf.keras.Model):
_class = ScoreCAM
if (_class is None):
raise ValueError(f'`model` should be a tf.keras.Model or a torch.nn.Module instead of {type(model)}')
self.explainer = _class(model=model, target_layer=target_layer, preprocess_function=preprocess_function, mode=mode)
def explain(self, X: Image, y=None, **kwargs):
return self.explainer.explain(X=X, y=y, **kwargs) |
class ContourPlot(GraphicPrimitive):
def __init__(self, xy_data_array, xrange, yrange, options):
self.xrange = xrange
self.yrange = yrange
self.xy_data_array = xy_data_array
self.xy_array_row = len(xy_data_array)
self.xy_array_col = len(xy_data_array[0])
GraphicPrimitive.__init__(self, options)
def get_minmax_data(self):
from sage.plot.plot import minmax_data
return minmax_data(self.xrange, self.yrange, dict=True)
def _allowed_options(self):
return {'plot_points': 'How many points to use for plotting precision', 'cmap': 'the name of a predefined colormap,\n a list of colors, or an instance of a\n matplotlib Colormap. Type: import matplotlib.cm;\n matplotlib.cm.datad.keys()\n for available colormap names.', 'colorbar': 'Include a colorbar indicating the levels', 'colorbar_options': 'a dictionary of options for colorbars', 'fill': 'Fill contours or not', 'legend_label': 'The label for this item in the legend.', 'contours': 'Either an integer specifying the number of\n contour levels, or a sequence of numbers giving\n the actual contours to use.', 'linewidths': 'the width of the lines to be plotted', 'linestyles': 'the style of the lines to be plotted', 'labels': 'show line labels or not', 'label_options': 'a dictionary of options for the labels', 'zorder': 'The layer level in which to draw'}
def _repr_(self):
msg = 'ContourPlot defined by a %s x %s data grid'
return (msg % (self.xy_array_row, self.xy_array_col))
def _render_on_subplot(self, subplot):
from sage.rings.integer import Integer
options = self.options()
fill = options['fill']
contours = options['contours']
if ('cmap' in options):
cmap = get_cmap(options['cmap'])
elif (fill or (contours is None)):
cmap = get_cmap('gray')
elif isinstance(contours, (int, Integer)):
cmap = get_cmap([(i, i, i) for i in xsrange(0, 1, (1 / contours))])
else:
step = (1 / Integer(len(contours)))
cmap = get_cmap([(i, i, i) for i in xsrange(0, 1, step)])
(x0, x1) = (float(self.xrange[0]), float(self.xrange[1]))
(y0, y1) = (float(self.yrange[0]), float(self.yrange[1]))
if isinstance(contours, (int, Integer)):
contours = int(contours)
CSF = None
if fill:
if (contours is None):
CSF = subplot.contourf(self.xy_data_array, cmap=cmap, extent=(x0, x1, y0, y1))
else:
CSF = subplot.contourf(self.xy_data_array, contours, cmap=cmap, extent=(x0, x1, y0, y1), extend='both')
linewidths = options.get('linewidths', None)
if isinstance(linewidths, (int, Integer)):
linewidths = int(linewidths)
elif isinstance(linewidths, (list, tuple)):
linewidths = tuple((int(x) for x in linewidths))
from sage.plot.misc import get_matplotlib_linestyle
linestyles = options.get('linestyles', None)
if isinstance(linestyles, (list, tuple)):
linestyles = [get_matplotlib_linestyle(i, 'long') for i in linestyles]
else:
linestyles = get_matplotlib_linestyle(linestyles, 'long')
if (contours is None):
CS = subplot.contour(self.xy_data_array, cmap=cmap, extent=(x0, x1, y0, y1), linewidths=linewidths, linestyles=linestyles)
else:
CS = subplot.contour(self.xy_data_array, contours, cmap=cmap, extent=(x0, x1, y0, y1), linewidths=linewidths, linestyles=linestyles)
if options.get('labels', False):
label_options = options['label_options']
label_options['fontsize'] = int(label_options['fontsize'])
if (fill and (label_options is None)):
label_options['inline'] = False
subplot.clabel(CS, **label_options)
if options.get('colorbar', False):
colorbar_options = options['colorbar_options']
from matplotlib import colorbar
(cax, kwds) = colorbar.make_axes_gridspec(subplot, **colorbar_options)
if (CSF is None):
cb = colorbar.Colorbar(cax, CS, **kwds)
else:
cb = colorbar.Colorbar(cax, CSF, **kwds)
cb.add_lines(CS) |
def get_metric(y_true_aspect, y_predict_aspect, y_true_sentiment, y_predict_sentiment, mask, train_op):
(f_a, f_o) = (0, 0)
(true_aspect, true_sentiment) = convert_to_list(y_true_aspect, y_true_sentiment, mask)
(predict_aspect, predict_sentiment) = convert_to_list(y_predict_aspect, y_predict_sentiment, mask)
(f_aspect, acc_s, f_s, f_absa) = score(true_aspect, predict_aspect, true_sentiment, predict_sentiment, 0)
if train_op:
(f_opinion, _, _, _) = score(true_aspect, predict_aspect, true_sentiment, predict_sentiment, 1)
return (f_aspect, f_opinion, acc_s, f_s, f_absa) |
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, local_rank=0, world_size=1):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=(12 + device_id))
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
if dali_cpu:
dali_device = 'cpu'
self.decode = ops.HostDecoderRandomCrop(device=dali_device, output_type=types.RGB, random_aspect_ratio=[0.8, 1.25], random_area=[0.1, 1.0], num_attempts=100)
else:
dali_device = 'gpu'
self.decode = ops.nvJPEGDecoderRandomCrop(device='mixed', output_type=types.RGB, device_memory_padding=, host_memory_padding=, random_aspect_ratio=[0.8, 1.25], random_area=[0.1, 1.0], num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device='gpu', output_dtype=types.FLOAT, output_layout=types.NCHW, crop=(crop, crop), image_type=types.RGB, mean=[(0.485 * 255), (0.456 * 255), (0.406 * 255)], std=[(0.229 * 255), (0.224 * 255), (0.225 * 255)])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
def define_graph(self):
rng = self.coin()
(self.jpegs, self.labels) = self.input(name='Reader')
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror=rng)
return [output, self.labels] |
class Zirilli(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 10.0)] * self.N), ([10.0] * self.N)))
self.custom_bounds = ([(- 2.0), 2.0], [(- 2.0), 2.0])
self.global_optimum = [[(- 1.0465), 0.0]]
self.fglob = (- 0.)
def fun(self, x, *args):
self.nfev += 1
return ((((0.25 * (x[0] ** 4)) - (0.5 * (x[0] ** 2))) + (0.1 * x[0])) + (0.5 * (x[1] ** 2))) |
def save_networks(path: str, net, name=None, *, backups: int=10, write_layers: bool=False, file_format=None):
os.makedirs(path, exist_ok=True)
if (name is None):
name = get_date_string()
data_path = os.path.join(path, name)
save_models(data_path, net, write_layers=write_layers, file_format=file_format)
if (backups >= 0):
remove_backups(path, keeps=backups)
return name |
def tr_te_dataset(data_tr, data_te, batch_size):
data_tr = data_tr.astype(np.float32)
data_tr_coo = data_tr.tocoo()
n_items = data_tr_coo.shape[1]
indices = np.mat([data_tr_coo.row, data_tr_coo.col]).transpose()
sparse_data_tr = tf.SparseTensor(indices, data_tr_coo.data, data_tr_coo.shape)
data_te = data_te.astype(np.float32)
data_te_coo = data_te.tocoo()
indices = np.mat([data_te_coo.row, data_te_coo.col]).transpose()
sparse_data_te = tf.SparseTensor(indices, data_te_coo.data, data_te_coo.shape)
samples_tr = tf.data.Dataset.from_tensor_slices(sparse_data_tr)
samples_te = tf.data.Dataset.from_tensor_slices(sparse_data_te)
dataset = tf.data.Dataset.zip((samples_tr, samples_te)).shuffle(10000).batch(batch_size, drop_remainder=True)
dataset = dataset.map((lambda x, y: (tf.sparse_tensor_to_dense(x), tf.sparse_tensor_to_dense(y))))
expected_shape = tf.TensorShape([batch_size, n_items])
dataset = dataset.apply(tf.contrib.data.assert_element_shape((expected_shape, expected_shape)))
return dataset |
.fast
.parametrize('max_seq_length1,length,max_seq_length,eos_token_id,chunk_size,num_iterations,gold_input_ids,gold_token_type_ids', [(MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_1, 1, np.array([[0, 1, 2, 3]]), np.array([[0, (- 1), (- 2), (- 3)]])), (MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_2, 1, np.array([[0, 1, 2, 3]]), np.array([[0, (- 1), (- 2), (- 3)]])), (MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_3, 1, np.array([[0, 1, 2, 3]]), np.array([[0, (- 1), (- 2), (- 3)]])), (MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_1, 2, np.array([[0, 1, 2, 3], [1, 2, 3, 4]]), np.array([[0, (- 1), (- 2), (- 3)], [1, 0, (- 1), (- 2)]])), (MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_2, 2, np.array([[0, 1, 2, 3], [1, 2, 3, 4]]), np.array([[0, (- 1), (- 2), (- 3)], [1, 0, (- 1), (- 2)]])), (MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_3, 2, np.array([[0, 1, 2, 3], [1, 2, 3, 4]]), np.array([[0, (- 1), (- 2), (- 3)], [1, 0, (- 1), (- 2)]])), (MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_1, 3, np.array([[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5]]), np.array([[0, (- 1), (- 2), (- 3)], [1, 0, (- 1), (- 2)], [2, 1, 0, (- 1)]])), (MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_2, 3, np.array([[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5]]), np.array([[0, (- 1), (- 2), (- 3)], [1, 0, (- 1), (- 2)], [2, 1, 0, (- 1)]])), (MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, MAX_SEQ_LENGTH, EOS, CHUNK_SIZE_3, 3, np.array([[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5]]), np.array([[0, (- 1), (- 2), (- 3)], [1, 0, (- 1), (- 2)], [2, 1, 0, (- 1)]]))])
def test_hfd5_text_buffer_write(max_seq_length1: int, tokenized_line: TokenizedSequence, chunk_size: int, num_iterations: int, gold_input_ids: np.ndarray, gold_token_type_ids: np.ndarray):
with tempfile.TemporaryDirectory() as output_dir:
hdf5_file_path = os.path.join(output_dir, 'temp.hdf5')
with Hdf5FileBuffer(hdf5_file_path, max_seq_length1, False, DATA_TYPE, chunk_size) as f:
for i in range(num_iterations):
tokenized_line_copy = tokenized_line[:]
tokens_ids = list(map((lambda x: (x + i)), tokenized_line_copy.dump_token_ids()))
token_type_ids = list(map((lambda x: (x + i)), tokenized_line_copy.dump_token_type_ids()))
tokenized_line_copy._tokens = list(map((lambda x: Token(x[0], x[1])), zip(tokens_ids, token_type_ids)))
f.write([tokenized_line_copy])
with h5py.File(hdf5_file_path, 'r') as f:
assert (str(f.keys()) == "<KeysViewHDF5 ['input_ids', 'token_type_ids']>")
assert (f['input_ids'].shape == gold_input_ids.shape)
for (input_ids_i, gold_input_ids_i) in zip(f['input_ids'], gold_input_ids):
assert all((input_ids_i == gold_input_ids_i))
assert (f['token_type_ids'].shape == gold_token_type_ids.shape)
for (token_type_ids_i, gold_token_type_ids_i) in zip(f['token_type_ids'], gold_token_type_ids):
assert all((token_type_ids_i == gold_token_type_ids_i)) |
def test_heap(n):
from random import randint
heap = MinMaxHeap(n)
l = []
for _ in range(n):
x = randint(0, (5 * n))
heap.insert(x)
l.append(x)
assert minmaxheapproperty(heap.a, len(heap))
assert (len(heap) == len(l))
print(heap.a)
while (len(heap) > 0):
assert (min(l) == heap.peekmin())
assert (max(l) == heap.peekmax())
if randint(0, 1):
e = heap.popmin()
assert (e == min(l))
else:
e = heap.popmax()
assert (e == max(l))
l[l.index(e)] = l[(- 1)]
l.pop((- 1))
assert (len(heap) == len(l))
assert minmaxheapproperty(heap.a, len(heap))
print('OK') |
def precook(s, n=4, out=False):
words = s.split()
counts = defaultdict(int)
for k in xrange(1, (n + 1)):
for i in xrange(((len(words) - k) + 1)):
ngram = tuple(words[i:(i + k)])
counts[ngram] += 1
return (len(words), counts) |
def test_sugar_2():
resi = ['RC5_1_0', 'RG_69_0']
angles = ['nu1', 'nu4', 'nu3']
(sugar_b, rr) = bb.sugar_angles(fname, residues=resi, angles=angles)
stri = ('%20s ' % '#')
for pp in angles:
stri += (' %10s ' % pp)
stri += '\n'
for e in range(sugar_b.shape[1]):
stri += ('%20s ' % rr[e])
for k in range(sugar_b.shape[2]):
stri += (' %10.4f ' % sugar_b[(0, e, k)])
stri += '\n'
fh = open(('%s/sugar_02.test.dat' % outdir), 'w')
fh.write(stri)
fh.close()
comp(('%s/sugar_02.test.dat' % refdir)) |
def is_tensor(x):
if isinstance(x, torch.Tensor):
return True
return isinstance(x, np.ndarray) |
class CoercionHMtoPD(HyperbolicModelCoercion):
def image_coordinates(self, x):
return ((x[0] / (1 + x[2])) + (I * (x[1] / (1 + x[2]))))
def image_isometry_matrix(self, x):
return (((matrix(2, [1, (- I), (- I), 1]) * SO21_to_SL2R(x)) * matrix(2, [1, I, I, 1])) / Integer(2)) |
def map_arg(a: Argument, fn: Callable[([Node], Argument)]) -> Argument:
if isinstance(a, (tuple, list)):
return type(a)((map_arg(elem, fn) for elem in a))
elif isinstance(a, dict):
return {k: map_arg(v, fn) for (k, v) in a.items()}
elif isinstance(a, slice):
return slice(map_arg(a.start, fn), map_arg(a.stop, fn), map_arg(a.step, fn))
elif isinstance(a, Node):
return fn(a)
else:
return a |
def dump_tsvs(dataset, fpath):
for name in dataset:
if (not os.path.exists(f'{fpath}/{name}')):
os.makedirs(f'{fpath}/{name}')
with open(f'{fpath}/{name}/{name}.tsv', 'w') as fp:
for (i, row_id) in enumerate(dataset[name]):
row = dataset[name][row_id]
header = sorted(row.keys())
if (i == 0):
fp.write('\t'.join(header))
fp.write('\n')
values = [str(row[col]) for col in header]
line = '\t'.join(values)
fp.write(f'''{line}
''') |
def generate_pose3_extra_factors(output_dir: T.Openable) -> None:
def between_factor_pose3_rotation(a: sf.Pose3, b: sf.Pose3, a_R_b: sf.Rot3, sqrt_info: sf.Matrix33, epsilon: sf.Scalar=0) -> sf.Matrix:
tangent_error = ops.LieGroupOps.local_coordinates(a_R_b, ops.LieGroupOps.between(a, b).R, epsilon=epsilon)
return (sqrt_info * sf.M(tangent_error))
def between_factor_pose3_position(a: sf.Pose3, b: sf.Pose3, a_t_b: sf.Vector3, sqrt_info: sf.Matrix33, epsilon: sf.Scalar=0) -> sf.Matrix:
tangent_error = ops.LieGroupOps.local_coordinates(a_t_b, ops.LieGroupOps.between(a, b).t, epsilon=epsilon)
return (sqrt_info * sf.M(tangent_error))
def between_factor_pose3_translation_norm(a: sf.Pose3, b: sf.Pose3, translation_norm: sf.Scalar, sqrt_info: sf.Matrix11, epsilon: sf.Scalar=0) -> sf.Matrix:
error = (translation_norm - (a.t - b.t).norm(epsilon))
return (sqrt_info * sf.M([error]))
def prior_factor_pose3_rotation(value: sf.Pose3, prior: sf.Rot3, sqrt_info: sf.Matrix33, epsilon: sf.Scalar=0) -> sf.Matrix:
return prior_factor(value.R, prior, sqrt_info, epsilon)
def prior_factor_pose3_position(value: sf.Pose3, prior: sf.Vector3, sqrt_info: sf.Matrix33, epsilon: sf.Scalar=0) -> sf.Matrix:
return prior_factor(value.t, prior, sqrt_info, epsilon)
between_rotation_codegen = Codegen.function(func=between_factor_pose3_rotation, output_names=['res'], config=CppConfig(), docstring=get_between_factor_docstring('a_R_b')).with_linearization(name='between_factor_pose3_rotation', which_args=['a', 'b'])
between_rotation_codegen.generate_function(output_dir, skip_directory_nesting=True)
between_position_codegen = Codegen.function(func=between_factor_pose3_position, output_names=['res'], config=CppConfig(), docstring=get_between_factor_docstring('a_t_b')).with_linearization(name='between_factor_pose3_position', which_args=['a', 'b'])
between_position_codegen.generate_function(output_dir, skip_directory_nesting=True)
between_translation_norm_codegen = Codegen.function(func=between_factor_pose3_translation_norm, output_names=['res'], config=CppConfig()).with_linearization(name='between_factor_pose3_translation_norm', which_args=['a', 'b'])
between_translation_norm_codegen.generate_function(output_dir, skip_directory_nesting=True)
prior_rotation_codegen = Codegen.function(func=prior_factor_pose3_rotation, output_names=['res'], config=CppConfig(), docstring=get_prior_docstring()).with_linearization(name='prior_factor_pose3_rotation', which_args=['value'])
prior_rotation_codegen.generate_function(output_dir, skip_directory_nesting=True)
prior_position_codegen = Codegen.function(func=prior_factor_pose3_position, output_names=['res'], config=CppConfig(), docstring=get_prior_docstring()).with_linearization(name='prior_factor_pose3_position', which_args=['value'])
prior_position_codegen.generate_function(output_dir, skip_directory_nesting=True) |
def export_ego_poses(nusc: NuScenes, out_dir: str):
locations = np.unique([log['location'] for log in nusc.log])
if (not os.path.isdir(out_dir)):
os.makedirs(out_dir)
for location in locations:
print('Rendering map {}...'.format(location))
nusc.render_egoposes_on_map(location)
out_path = os.path.join(out_dir, 'egoposes-{}.png'.format(location))
plt.tight_layout()
plt.savefig(out_path) |
def replaces_method(func: Callable[(..., Tuple[str])], classname: str, method_name: str):
Replacements._method_rep[(classname, method_name)] = func
return func |
def test_additive_aav_packaging():
problem = flexs.landscapes.additive_aav_packaging.registry()['heart']
landscape = flexs.landscapes.AdditiveAAVPackaging(**problem['params'])
test_seqs = s_utils.generate_random_sequences(90, 100, s_utils.AAS)
landscape.get_fitness(test_seqs) |
class Algorithm(str, enum.Enum):
DYNAMOSA = 'DYNAMOSA'
MIO = 'MIO'
MOSA = 'MOSA'
RANDOM = 'RANDOM'
RANDOM_TEST_SUITE_SEARCH = 'RANDOM_TEST_SUITE_SEARCH'
RANDOM_TEST_CASE_SEARCH = 'RANDOM_TEST_CASE_SEARCH'
WHOLE_SUITE = 'WHOLE_SUITE' |
def draw_stickfigure3d(mocap_track, frame, data=None, joints=None, draw_names=False, ax=None, figsize=(8, 8)):
from mpl_toolkits.mplot3d import Axes3D
if (ax is None):
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
if (joints is None):
joints_to_draw = mocap_track.skeleton.keys()
else:
joints_to_draw = joints
if (data is None):
df = mocap_track.values
else:
df = data
for joint in joints_to_draw:
parent_x = df[('%s_Xposition' % joint)][frame]
parent_y = df[('%s_Zposition' % joint)][frame]
parent_z = df[('%s_Yposition' % joint)][frame]
ax.scatter(xs=parent_x, ys=parent_y, zs=parent_z, alpha=0.6, c='b', marker='o')
children_to_draw = [c for c in mocap_track.skeleton[joint]['children'] if (c in joints_to_draw)]
for c in children_to_draw:
child_x = df[('%s_Xposition' % c)][frame]
child_y = df[('%s_Zposition' % c)][frame]
child_z = df[('%s_Yposition' % c)][frame]
ax.plot([parent_x, child_x], [parent_y, child_y], [parent_z, child_z], 'k-', lw=2, c='black')
if draw_names:
ax.text(x=(parent_x + 0.1), y=(parent_y + 0.1), z=(parent_z + 0.1), s=joint, color='rgba(0,0,0,0.9')
return ax |
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=200):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, bias=True, padding=1)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
self.linear1 = nn.Linear(((cardinality * bottleneck_width) * 2048), 512)
self.bn_dense = nn.BatchNorm1d(512)
self.linear2 = nn.Linear(512, num_classes)
self.relu = nn.ReLU()
def _make_layer(self, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = ((Block.expansion * self.cardinality) * self.bottleneck_width)
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0), (- 1))
out = F.relu(self.bn_dense(self.linear1(out)))
out = self.linear2(out)
return out |
def _parse_returns_section(self: NumpyDocstring, section: str) -> list[str]:
lines_raw = self._dedent(self._consume_to_next_section())
if (lines_raw[0] == ':'):
del lines_raw[0]
lines = self._format_block(':returns: ', list(_process_return(lines_raw)))
if (lines and lines[(- 1)]):
lines.append('')
return lines |
def finetune_m0(args):
print('Train m0 and finetune it with new data over time')
print(args)
device = (torch.device(('cuda:' + str(args.device))) if torch.cuda.is_available() else torch.device('cpu'))
dataset = DynRecDataset(name=args.dataset)
pinsage_hyperparam_list = get_pinsage_hyperparam_list(dataset_name=args.dataset)
if (args.log_dir != ''):
if os.path.exists(args.log_dir):
print('Removing existing tensorboard log..')
shutil.rmtree(args.log_dir)
writer = SummaryWriter(log_dir=args.log_dir)
else:
writer = None
if (args.checkpoint_path != ''):
os.makedirs(os.path.dirname(args.checkpoint_path), exist_ok=True)
K_list = [10, 20, 50, 100]
K_primary = 50
checkpoint = {'time_list': sliced_time_list, 'args': args.__dict__, 'pinsage_hyperparam_list': pinsage_hyperparam_list, 'best_val_recall_dict_list': [], 'best_embedding_list': [], 'model_state_dict_list': []}
print('PinSAGE hyperparameters')
print(pinsage_hyperparam_list[0]['emb_dim'])
print(pinsage_hyperparam_list[0]['num_layers'])
print()
print('Training PinSAGE...')
model = PinSAGE(emb_dim=pinsage_hyperparam_list[0]['emb_dim'], num_layers=pinsage_hyperparam_list[0]['num_layers'], item_encoder=ItemEncoder(pinsage_hyperparam_list[0]['emb_dim'], dataset.num_item_attrs_dict)).to(device)
for i in range((len(sliced_time_list) - 1)):
time_train = sliced_time_list[i]
time_val = sliced_time_list[(i + 1)]
print(f'=======Train on G{time_train}, evaluate on G{time_val}\G{time_train}')
(edge_index_useritem_dict, num_users_dict, num_items_dict, _) = split_dynrecdataset(dataset, time_train=time_train, time_val=time_val)
time_dict = {}
time_dict['train'] = time_train
time_dict['val'] = time_val
print('====Basic stats')
split_list = ['train', 'val']
for split in split_list:
time = time_dict[split]
print(f'time: {time}')
print(f'#{split} users: ', num_users_dict[split])
print(f'#{split} items: ', num_items_dict[split])
print(f'#{split} edges: ', len(edge_index_useritem_dict[split][0]))
print()
item_attr_pair_dict = dataset.item_attr_pair_dict(time_dict['val'])
for (item_attr_name, (item_attr, item_attr_offset)) in item_attr_pair_dict.items():
item_attr_pair_dict[item_attr_name] = (item_attr.to(device), item_attr_offset.to(device))
train_dataset = RecDatasetNegsamling(edge_index_useritem_dict['train'][0], edge_index_useritem_dict['train'][1], num_users_dict['train'], num_items_dict['train'])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
train_config_dict = {'num_users': num_users_dict['train'], 'num_items': num_items_dict['train'], 'user': edge_index_useritem_dict['train'][0].to(device), 'item': edge_index_useritem_dict['train'][1].to(device), 'item_attr_pair_dict': item_attr_pair_dict}
eval_dict = {}
evaluator = RecallEvaluator(edge_index_useritem_dict, num_users_dict, num_items_dict)
eval_dict['evaluator'] = evaluator
eval_dict['config_dict'] = train_config_dict
eval_dict['time_train'] = time_train
eval_dict['time_val'] = time_val
optimizer = optim.Adam(model.parameters(), lr=0.001)
if (i == 0):
total_epochs = args.epochs
print(f'Training from scratch for {total_epochs}')
else:
total_epochs = 250
print(f'Training from scratch for {total_epochs}')
(best_embedding, best_val_recall_dict, best_model) = train_eval_loop_pinsage(args, model, device, train_dataset, train_loader, optimizer, train_config_dict, eval_dict, K_list, K_primary, time_train, time_val, writer, total_epochs)
print(best_embedding)
print(best_val_recall_dict)
print('loading from the previous best checkpoint')
model.load_state_dict(best_model.state_dict())
checkpoint['best_embedding_list'].append(best_embedding)
checkpoint['best_val_recall_dict_list'].append(best_val_recall_dict)
checkpoint['model_state_dict_list'].append(best_model.state_dict())
if (args.checkpoint_path != ''):
torch.save(checkpoint, args.checkpoint_path)
if (writer is not None):
writer.close() |
def executeShTest(test, litConfig, useExternalSh, extra_substitutions=[]):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
res = parseIntegratedTestScript(test, useExternalSh, extra_substitutions)
if isinstance(res, lit.Test.Result):
return res
if litConfig.noExecute:
return lit.Test.Result(Test.PASS)
(script, tmpBase, execdir) = res
lit.util.mkdir_p(os.path.dirname(tmpBase))
if useExternalSh:
res = executeScript(test, litConfig, tmpBase, script, execdir)
else:
res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
if isinstance(res, lit.Test.Result):
return res
(out, err, exitCode) = res
if (exitCode == 0):
status = Test.PASS
else:
status = Test.FAIL
output = ('Script:\n--\n%s\n--\nExit Code: %d\n\n' % ('\n'.join(script), exitCode))
if out:
output += ('Command Output (stdout):\n--\n%s\n--\n' % (out,))
if err:
output += ('Command Output (stderr):\n--\n%s\n--\n' % (err,))
return lit.Test.Result(status, output) |
class LowRatingFilter(_BaseFilter):
def __init__(self, value: float, rating_column: str='rating'):
self.value = value
self.rating_column = rating_column
def _filter_spark(self, interactions: SparkDataFrame) -> SparkDataFrame:
return interactions.filter((interactions[self.rating_column] >= self.value))
def _filter_pandas(self, interactions: PandasDataFrame) -> PandasDataFrame:
return interactions[(interactions[self.rating_column] >= self.value)] |
class LogsigmoidLoss(BaseLogsigmoidLoss):
def __init__(self):
super(LogsigmoidLoss, self).__init__()
def __call__(self, score: th.Tensor, label):
return (- logsigmoid((label * score))) |
def get_corpus(products, keys=('name', 'small_description'), category_type='category'):
all_products = list(products.values())
asins_by_cat = defaultdict(set)
corpus_by_cat = defaultdict(list)
for p in all_products:
category = p[category_type]
asin = p['asin']
if (asin in asins_by_cat[category]):
continue
asins_by_cat[category].add(asin)
text = []
for key in keys:
if (key == 'review'):
rs = p['review']['reviews']
if (r is not None):
text_ = ' '.join([r['review'].lower() for r in rs])
else:
text_ = ''
else:
text_ = p[key].lower()
text.append(text_)
text = ' '.join(text)
corpus_by_cat[category].append((asin, text))
return corpus_by_cat |
class SpeedtestHTTPConnection(HTTPConnection):
def __init__(self, *args, **kwargs):
source_address = kwargs.pop('source_address', None)
timeout = kwargs.pop('timeout', 10)
self._tunnel_host = None
HTTPConnection.__init__(self, *args, **kwargs)
self.source_address = source_address
self.timeout = timeout
def connect(self):
try:
self.sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
except (AttributeError, TypeError):
self.sock = create_connection((self.host, self.port), self.timeout, self.source_address)
if self._tunnel_host:
self._tunnel() |
def _should_continue(line, indent):
return (line.startswith(indent) or (len(line) <= 1) or (re.search('^\\s*\\)(\\s*->.*:|:)\\s*$', line) is not None)) |
def limit_lines(lines, N=32):
if (len(lines) > (2 * N)):
lines = ([b'... showing only last few lines ...'] + lines[(- N):])
return lines |
class SetPartitionsTk_k(SetPartitionsBk_k):
def _repr_(self):
return (SetPartitionsBk_k._repr_(self) + ' and that are planar')
def __contains__(self, x):
if (not SetPartitionsBk_k.__contains__(self, x)):
return False
if (not is_planar(x)):
return False
return True
def cardinality(self):
return catalan_number(self.k)
def __iter__(self):
for sp in SetPartitionsBk_k.__iter__(self):
if is_planar(sp):
(yield self.element_class(self, sp)) |
class RandomImgAugment(object):
def __init__(self, no_flip, no_rotation, no_augment, size=None):
self.flip = (not no_flip)
self.augment = (not no_augment)
self.rotation = (not no_rotation)
self.size = size
def __call__(self, inputs):
img1 = inputs[0]
img2 = inputs[1]
depth = inputs[2]
phase = inputs[3]
fb = inputs[4]
h = img1.height
w = img1.width
w0 = w
if (self.size == [(- 1)]):
divisor = 32.0
h = int((math.ceil((h / divisor)) * divisor))
w = int((math.ceil((w / divisor)) * divisor))
self.size = (h, w)
scale_transform = transforms.Compose([transforms.Resize(self.size, Image.BICUBIC)])
img1 = scale_transform(img1)
if (img2 is not None):
img2 = scale_transform(img2)
if (fb is not None):
scale = (float(self.size[1]) / float(w0))
fb = (fb * scale)
if (phase == 'test'):
return (img1, img2, depth, fb)
if (depth is not None):
scale_transform_d = transforms.Compose([transforms.Resize(self.size, Image.BICUBIC)])
depth = scale_transform_d(depth)
if (not (self.size == 0)):
if (depth is not None):
arr_depth = np.array(depth, dtype=np.float32)
arr_depth /= 65535.0
arr_depth[(arr_depth < 0.0)] = 0.0
depth = Image.fromarray(arr_depth, 'F')
if (self.flip and (not ((img2 is not None) and (depth is not None)))):
flip_prob = random.random()
flip_transform = transforms.Compose([RandomHorizontalFlip(flip_prob)])
if (img2 is None):
img1 = flip_transform(img1)
elif (flip_prob < 0.5):
img1_ = img1
img2_ = img2
img1 = flip_transform(img2_)
img2 = flip_transform(img1_)
if (depth is not None):
depth = flip_transform(depth)
if (self.rotation and (not ((img2 is not None) and (depth is not None)))):
if (random.random() < 0.5):
degree = (random.randrange((- 500), 500) / 100)
img1 = F.rotate(img1, degree, Image.BICUBIC)
if (depth is not None):
depth = F.rotate(depth, degree, Image.BILINEAR)
if (img2 is not None):
img2 = F.rotate(img2, degree, Image.BICUBIC)
if (depth is not None):
depth = np.array(depth, dtype=np.float32)
depth = (depth * 2.0)
depth -= 1.0
if self.augment:
if (random.random() < 0.5):
brightness = random.uniform(0.8, 1.0)
contrast = random.uniform(0.8, 1.0)
saturation = random.uniform(0.8, 1.0)
img1 = F.adjust_brightness(img1, brightness)
img1 = F.adjust_contrast(img1, contrast)
img1 = F.adjust_saturation(img1, saturation)
if (img2 is not None):
img2 = F.adjust_brightness(img2, brightness)
img2 = F.adjust_contrast(img2, contrast)
img2 = F.adjust_saturation(img2, saturation)
return (img1, img2, depth, fb) |
def gof(G, Aobs, changestats_func_list, theta, numSamples=1000, sampler_func=basicALAAMsampler, Ainitial=None, iterationInStep=1000, burnIn=10000):
n = len(changestats_func_list)
assert (len(theta) == n)
print('Gof numSamples =', numSamples, 'iterationInStep =', iterationInStep, 'burnIn = ', burnIn)
Zobs = computeObservedStatistics(G, Aobs, changestats_func_list)
sim_results = simulateALAAM(G, changestats_func_list, theta, numSamples, iterationInStep, burnIn, sampler_func, Ainitial, Aobs=Aobs)
Zmatrix = np.stack([r[1] for r in sim_results])
assert (np.shape(Zmatrix) == (numSamples, n))
Zmean = np.mean(Zmatrix, axis=0)
Zsd = np.std(Zmatrix, axis=0)
print('Zmatrix = ', Zmatrix)
print('obs stats =', Zobs)
print('mean stats =', Zmean)
print('sd stats =', Zsd)
tratio = ((Zmean - Zobs) / Zsd)
mahaldist = mahalanobis(Zobs, Zmatrix)
return (tratio, mahaldist) |
class DummyModelHandler(CommonModelHandler):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def _get_normal_model_instance(self, *args, **kwargs):
if (self.normal_model_instance is None):
args = SimpleNamespace()
p = DumT5Partitioner(args)
args.lmhead = True
args.stateless_tied = True
args.precompute_masks = False
self.normal_model_instance = p.get_model(args)
self.tokenizer = p.tokenizer
self.config = p.config
return self.normal_model_instance
def get_extra(self, *args, **kw):
return dict(config=self.config, tokenizer=self.tokenizer) |
def clip_eps(delta_tensor):
return tf.clip_by_value(delta_tensor, clip_value_min=(- EPS[0]), clip_value_max=EPS[0]) |
class KitchenLightSwitchV0(KitchenBase):
TASK_ELEMENTS = ['light switch']
def __init__(self, delta=0, **kwargs):
super(KitchenLightSwitchV0, self).__init__(**kwargs)
self.step_to_primitive_name = {0: 'close_gripper', 1: 'lift', 2: 'move_right', 3: 'move_forward', 4: 'move_left'}
if ((not self.use_combined_action_space) and (self.control_mode == 'primitives')):
action_low = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6, 0.0, 0.45, 0.45, 1.25, 0.0, 0.0])
action_high = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6, 0.0, 0.45, 0.45, 1.25, 0.0, 0.0])
action_low -= delta
action_high += delta
if (not self.fixed_schema):
act_lower_primitive = np.zeros(self.num_primitives)
act_upper_primitive = np.ones(self.num_primitives)
action_low = np.concatenate((act_lower_primitive, action_low))
action_high = np.concatenate((act_upper_primitive, action_high))
self.action_space = Box(action_low, action_high, dtype=np.float32) |
def compute_entropy(prob_states):
ent = 0
for prob in prob_states:
for p in prob:
p = np.array(p).flatten()
i = np.where((p > 0))[0]
t = np.sum(((- p[i]) * np.log2(p[i])))
ent += t
return ent |
class OvercookedEnv(object):
def __init__(self, mdp, start_state_fn=None, horizon=MAX_HORIZON, debug=False):
if isinstance(mdp, OvercookedGridworld):
self.mdp_generator_fn = (lambda : mdp)
elif (callable(mdp) and isinstance(mdp(), OvercookedGridworld)):
self.mdp_generator_fn = mdp
else:
raise ValueError('Mdp should be either OvercookedGridworld instance or a generating function')
self.horizon = horizon
self.start_state_fn = start_state_fn
self.reset()
if ((self.horizon >= MAX_HORIZON) and (self.state.order_list is None) and debug):
print('Environment has (near-)infinite horizon and no terminal states')
def __repr__(self):
return self.mdp.state_string(self.state)
def print_state_transition(self, a_t, r_t, info):
print('Timestep: {}\nJoint action taken: {} \t Reward: {} + shape * {} \n{}\n'.format(self.t, tuple((Action.ACTION_TO_CHAR[a] for a in a_t)), r_t, info['shaped_r'], self))
def env_params(self):
return {'start_state_fn': self.start_state_fn, 'horizon': self.horizon}
def display_states(self, *states):
old_state = self.state
for s in states:
self.state = s
print(self)
self.state = old_state
def print_state(mdp, s):
e = OvercookedEnv(mdp, s)
print(e)
def copy(self):
return OvercookedEnv(mdp=self.mdp.copy(), start_state_fn=self.start_state_fn, horizon=self.horizon)
def step(self, joint_action):
assert (not self.is_done())
self.t += 1
(next_state, mdp_infos) = self.mdp.get_state_transition(self.state, joint_action)
self._update_game_stats(mdp_infos)
self.state = next_state
done = self.is_done()
env_info = self._prepare_info_dict([{}, {}], mdp_infos)
if done:
self._add_episode_info(env_info)
timestep_sparse_reward = sum(mdp_infos['sparse_reward_by_agent'])
return (next_state, timestep_sparse_reward, done, env_info)
def reset(self):
self.mdp = self.mdp_generator_fn()
if (self.start_state_fn is None):
self.state = self.mdp.get_standard_start_state()
else:
self.state = self.start_state_fn()
self.cumulative_sparse_rewards = 0
self.cumulative_shaped_rewards = 0
self.t = 0
rewards_dict = {'cumulative_sparse_rewards_by_agent': np.array(([0] * self.mdp.num_players)), 'cumulative_shaped_rewards_by_agent': np.array(([0] * self.mdp.num_players)), 'cumulative_category_rewards_by_agent': np.array(([[0, 0, 0]] * self.mdp.num_players))}
self.game_stats = {**rewards_dict}
def is_done(self):
return ((self.t >= self.horizon) or self.mdp.is_terminal(self.state))
def _prepare_info_dict(self, joint_agent_action_info, mdp_infos):
env_info = {'agent_infos': [joint_agent_action_info[agent_idx] for agent_idx in range(self.mdp.num_players)]}
env_info['sparse_r_by_agent'] = mdp_infos['sparse_reward_by_agent']
env_info['shaped_r_by_agent'] = mdp_infos['shaped_reward_by_agent']
env_info['shaped_info_by_agent'] = mdp_infos['shaped_info_by_agent']
env_info['phi_s'] = (mdp_infos['phi_s'] if ('phi_s' in mdp_infos) else None)
env_info['phi_s_prime'] = (mdp_infos['phi_s_prime'] if ('phi_s_prime' in mdp_infos) else None)
return env_info
def _add_episode_info(self, env_info):
env_info['episode'] = {'ep_game_stats': self.game_stats, 'ep_sparse_r': sum(self.game_stats['cumulative_sparse_rewards_by_agent']), 'ep_shaped_r': sum(self.game_stats['cumulative_shaped_rewards_by_agent']), 'ep_sparse_r_by_agent': self.game_stats['cumulative_sparse_rewards_by_agent'], 'ep_shaped_r_by_agent': self.game_stats['cumulative_shaped_rewards_by_agent'], 'ep_length': self.t}
return env_info
def _update_game_stats(self, infos):
self.game_stats['cumulative_sparse_rewards_by_agent'] += np.array(infos['sparse_reward_by_agent'])
self.game_stats['cumulative_shaped_rewards_by_agent'] += np.array(infos['shaped_reward_by_agent'])
def execute_plan(self, start_state, joint_action_plan, display=False):
self.state = start_state
done = False
if display:
print('Starting state\n{}'.format(self))
for joint_action in joint_action_plan:
self.step(joint_action)
done = self.is_done()
if display:
print(self)
if done:
break
successor_state = self.state
self.reset()
return (successor_state, done)
def run_agents(self, agent_pair, include_final_state=False, display=False, display_until=np.Inf):
assert (self.cumulative_sparse_rewards == self.cumulative_shaped_rewards == 0), 'Did not reset environment before running agents'
trajectory = []
done = False
if display:
print(self)
while (not done):
s_t = self.state
a_t = agent_pair.joint_action(s_t)
if any([(a is None) for a in a_t]):
break
(s_tp1, r_t, done, info) = self.step(a_t)
trajectory.append((s_t, a_t, r_t, done))
if (display and (self.t < display_until)):
self.print_state_transition(a_t, r_t, info)
assert (len(trajectory) == self.t), '{} vs {}'.format(len(trajectory), self.t)
if include_final_state:
trajectory.append((s_tp1, (None, None), 0, True))
return (np.array(trajectory), self.t, self.cumulative_sparse_rewards, self.cumulative_shaped_rewards)
def get_rollouts(self, agent_pair, num_games, display=False, final_state=False, agent_idx=0, reward_shaping=0.0, display_until=np.Inf, info=True):
trajectories = {'ep_observations': [], 'ep_actions': [], 'ep_rewards': [], 'ep_dones': [], 'ep_returns': [], 'ep_returns_sparse': [], 'ep_lengths': [], 'mdp_params': [], 'env_params': []}
for _ in tqdm.trange(num_games):
agent_pair.set_mdp(self.mdp)
(trajectory, time_taken, tot_rews_sparse, tot_rews_shaped) = self.run_agents(agent_pair, display=display, include_final_state=final_state, display_until=display_until)
(obs, actions, rews, dones) = (trajectory.T[0], trajectory.T[1], trajectory.T[2], trajectory.T[3])
trajectories['ep_observations'].append(obs)
trajectories['ep_actions'].append(actions)
trajectories['ep_rewards'].append(rews)
trajectories['ep_dones'].append(dones)
trajectories['ep_returns'].append((tot_rews_sparse + (tot_rews_shaped * reward_shaping)))
trajectories['ep_returns_sparse'].append(tot_rews_sparse)
trajectories['ep_lengths'].append(time_taken)
trajectories['mdp_params'].append(self.mdp.mdp_params)
trajectories['env_params'].append(self.env_params)
self.reset()
agent_pair.reset()
(mu, se) = mean_and_std_err(trajectories['ep_returns'])
if info:
print('Avg reward {:.2f} (std: {:.2f}, se: {:.2f}) over {} games of avg length {}'.format(mu, np.std(trajectories['ep_returns']), se, num_games, np.mean(trajectories['ep_lengths'])))
trajectories = {k: np.array(v) for (k, v) in trajectories.items()}
return trajectories |
class THTerm(Term):
def eval_real(self, shape, fargs, mode='eval', term_mode=None, diff_var=None, **kwargs):
if (diff_var is None):
if (mode == 'eval'):
out = 0.0
else:
out = nm.zeros(shape, dtype=nm.float64)
iter_kernel = fargs
for (ii, fargs) in iter_kernel():
(out1, status) = Term.eval_real(self, shape, fargs, mode=mode, term_mode=term_mode, diff_var=diff_var, **kwargs)
out += out1
else:
(out, status) = Term.eval_real(self, shape, fargs, mode=mode, term_mode=term_mode, diff_var=diff_var, **kwargs)
return (out, status) |
.operations('path_variable')
.usefixtures('filter_path_parameters')
def test_path_parameters_encoding(real_app_schema):
results = execute(real_app_schema, checks=(status_code_conformance,), hypothesis_settings=hypothesis.settings(derandomize=True, deadline=None))
assert (not results.has_errors)
assert (not results.has_failures) |
class RefQualifierKind(BaseEnumeration):
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return ('RefQualifierKind.%s' % (self.name,)) |
def prune_state_dict(state_dict, args):
if ((not args) or (args.arch == 'ptt_transformer')):
return state_dict
encoder_layers_to_keep = (args.encoder_layers_to_keep if ('encoder_layers_to_keep' in vars(args)) else None)
decoder_layers_to_keep = (args.decoder_layers_to_keep if ('decoder_layers_to_keep' in vars(args)) else None)
if ((not encoder_layers_to_keep) and (not decoder_layers_to_keep)):
return state_dict
print('| Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop')
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted([int(layer_string) for layer_string in layers_to_keep.split(',')])
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile('^{layer}.*\\.layers\\.(\\d+)'.format(layer=layer_name))
return {'substitution_regex': regex, 'mapping_dict': mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, 'encoder'))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, 'decoder'))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search('\\.layers\\.(\\d+)\\.', layer_name)
if (not match):
new_state_dict[layer_name] = state_dict[layer_name]
continue
original_layer_number = match.group(1)
for pruning_pass in pruning_passes:
if ((original_layer_number in pruning_pass['mapping_dict']) and pruning_pass['substitution_regex'].search(layer_name)):
new_layer_number = pruning_pass['mapping_dict'][original_layer_number]
substitution_match = pruning_pass['substitution_regex'].search(layer_name)
new_state_key = ((layer_name[:substitution_match.start(1)] + new_layer_number) + layer_name[substitution_match.end(1):])
new_state_dict[new_state_key] = state_dict[layer_name]
return new_state_dict |
(config_path='conf', config_name='rolling')
def main(cfg):
bg = cv2.imread('conf/bg_digit_240_320.jpg')
digits = tacto.Sensor(**cfg.tacto, background=bg)
log.info('Initializing world')
px.init()
p.resetDebugVisualizerCamera(**cfg.pybullet_camera)
digit_top = px.Body(**cfg.digits.top)
digit_bottom = px.Body(**cfg.digits.bottom)
digits.add_camera(digit_top.id, [(- 1)])
digits.add_camera(digit_bottom.id, [(- 1)])
obj = px.Body(**cfg.object)
digits.add_body(obj)
panel = px.gui.PoseControlPanel(digit_top, **cfg.object_control_panel)
panel.start()
log.info('Use the slides to move the object until in contact with the DIGIT')
t = px.utils.SimulationThread(real_time_factor=1.0)
t.start()
while True:
((_, _, z), orientation) = obj.get_base_pose()
if (z <= 0.01):
obj.reset()
digit_top.reset()
(color, depth) = digits.render()
digits.updateGUI(color, depth)
t.stop() |
def test():
backend = TypeTracerBackend.instance()
layout = ak.contents.ListOffsetArray(ak.index.Index64(backend.index_nplike.asarray([0, 1, 3, 7], dtype=np.dtype('int64'))), ak.contents.NumpyArray(backend.nplike.asarray([1, 2, 3, 4, 5, 6, 7])))
assert (layout.to_packed().length == 3) |
_numpy_output(non_zero=True, positive=True)
def test_modr(A: dace.int64[1], B: dace.int64[(5, 5)]):
return (A % B) |
def _jaccard(a, b):
a = sitk.GetArrayFromImage(a)
b = sitk.GetArrayFromImage(b)
return (np.sum(np.logical_and(a, b)) / np.sum(np.logical_or(a, b))) |
def save_tflite(model, onnx_path, dummy_input):
from tinynn.converter import TFLiteConverter
model = copy.deepcopy(model)
model.cpu()
if hasattr(model, 'module'):
model = model.module
model.eval()
converter = TFLiteConverter(model, dummy_input.cpu(), onnx_path)
converter.convert()
del model |
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super().__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(self.linear_layer.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x) |
def _SQS38():
return [[0, 1, 2, 14], [0, 1, 3, 34], [0, 1, 4, 31], [0, 1, 5, 27], [0, 1, 6, 17], [0, 1, 7, 12], [0, 1, 8, 36], [0, 1, 9, 10], [0, 1, 11, 18], [0, 1, 13, 37], [0, 1, 15, 35], [0, 1, 16, 22], [0, 1, 19, 33], [0, 1, 20, 25], [0, 1, 21, 23], [0, 1, 24, 32], [0, 1, 26, 28], [0, 1, 29, 30], [0, 2, 3, 10], [0, 2, 4, 9], [0, 2, 5, 28], [0, 2, 6, 15], [0, 2, 7, 36], [0, 2, 8, 23], [0, 2, 11, 22], [0, 2, 12, 13], [0, 2, 16, 25], [0, 2, 17, 18], [0, 2, 19, 30], [0, 2, 20, 35], [0, 2, 21, 29], [0, 2, 24, 34], [0, 2, 26, 31], [0, 2, 27, 32], [0, 2, 33, 37], [0, 3, 4, 18], [0, 3, 5, 23], [0, 3, 6, 32], [0, 3, 7, 19], [0, 3, 8, 20], [0, 3, 9, 17], [0, 3, 11, 25], [0, 3, 12, 24], [0, 3, 13, 27], [0, 3, 14, 31], [0, 3, 15, 22], [0, 3, 16, 28], [0, 3, 21, 33], [0, 3, 26, 36], [0, 3, 29, 35], [0, 3, 30, 37], [0, 4, 5, 7], [0, 4, 6, 28], [0, 4, 8, 25], [0, 4, 10, 30], [0, 4, 11, 20], [0, 4, 12, 32], [0, 4, 13, 36], [0, 4, 14, 29], [0, 4, 15, 27], [0, 4, 16, 35], [0, 4, 17, 22], [0, 4, 19, 23], [0, 4, 21, 34], [0, 4, 24, 33], [0, 4, 26, 37], [0, 5, 6, 24], [0, 5, 8, 26], [0, 5, 9, 29], [0, 5, 10, 20], [0, 5, 11, 13], [0, 5, 12, 14], [0, 5, 15, 33], [0, 5, 16, 37], [0, 5, 17, 35], [0, 5, 18, 19], [0, 5, 21, 25], [0, 5, 22, 30], [0, 5, 31, 32], [0, 5, 34, 36], [0, 6, 7, 30], [0, 6, 8, 33], [0, 6, 9, 12], [0, 6, 10, 18], [0, 6, 11, 37], [0, 6, 13, 31], [0, 6, 14, 35], [0, 6, 16, 29], [0, 6, 19, 25], [0, 6, 20, 27], [0, 6, 21, 36], [0, 6, 22, 23], [0, 6, 26, 34], [0, 7, 8, 11], [0, 7, 9, 33], [0, 7, 10, 21], [0, 7, 13, 20], [0, 7, 14, 22], [0, 7, 15, 31], [0, 7, 16, 34], [0, 7, 17, 29], [0, 7, 18, 24], [0, 7, 23, 26], [0, 7, 25, 32], [0, 7, 27, 28], [0, 7, 35, 37], [0, 8, 9, 37], [0, 8, 10, 27], [0, 8, 12, 18], [0, 8, 13, 30], [0, 8, 14, 15], [0, 8, 16, 21], [0, 8, 17, 19], [0, 8, 22, 35], [0, 8, 24, 31], [0, 8, 28, 34], [0, 8, 29, 32], [0, 9, 11, 30], [0, 9, 13, 23], [0, 9, 14, 18], [0, 9, 15, 25], [0, 9, 16, 26], [0, 9, 19, 28], [0, 9, 20, 36], [0, 9, 21, 35], [0, 9, 22, 24], [0, 9, 27, 31], [0, 9, 32, 34], [0, 10, 11, 36], [0, 10, 12, 15], [0, 10, 13, 26], [0, 10, 14, 16], [0, 10, 17, 37], [0, 10, 19, 29], [0, 10, 22, 31], [0, 10, 23, 32], [0, 10, 24, 35], [0, 10, 25, 34], [0, 10, 28, 33], [0, 11, 12, 16], [0, 11, 14, 24], [0, 11, 15, 26], [0, 11, 17, 31], [0, 11, 19, 21], [0, 11, 23, 34], [0, 11, 27, 29], [0, 11, 28, 35], [0, 11, 32, 33], [0, 12, 17, 20], [0, 12, 19, 35], [0, 12, 21, 28], [0, 12, 22, 25], [0, 12, 23, 27], [0, 12, 26, 29], [0, 12, 30, 33], [0, 12, 31, 34], [0, 12, 36, 37], [0, 13, 14, 33], [0, 13, 15, 29], [0, 13, 16, 24], [0, 13, 17, 21], [0, 13, 18, 34], [0, 13, 19, 32], [0, 13, 22, 28], [0, 13, 25, 35], [0, 14, 17, 26], [0, 14, 19, 20], [0, 14, 21, 32], [0, 14, 23, 36], [0, 14, 25, 28], [0, 14, 27, 30], [0, 14, 34, 37], [0, 15, 16, 36], [0, 15, 17, 23], [0, 15, 18, 20], [0, 15, 19, 34], [0, 15, 21, 37], [0, 15, 24, 28], [0, 15, 30, 32], [0, 16, 17, 32], [0, 16, 18, 27], [0, 16, 19, 31], [0, 16, 20, 33], [0, 16, 23, 30], [0, 17, 24, 27], [0, 17, 25, 33], [0, 17, 28, 36], [0, 17, 30, 34], [0, 18, 21, 26], [0, 18, 22, 29], [0, 18, 23, 28], [0, 18, 25, 31], [0, 18, 30, 35], [0, 18, 32, 37], [0, 18, 33, 36], [0, 19, 22, 26], [0, 19, 24, 37], [0, 19, 27, 36], [0, 20, 21, 31], [0, 20, 22, 37], [0, 20, 23, 24], [0, 20, 26, 30], [0, 20, 28, 32], [0, 20, 29, 34], [0, 21, 22, 27], [0, 21, 24, 30], [0, 22, 32, 36], [0, 22, 33, 34], [0, 23, 25, 29], [0, 23, 31, 37], [0, 23, 33, 35], [0, 24, 25, 26], [0, 24, 29, 36], [0, 25, 27, 37], [0, 25, 30, 36], [0, 26, 27, 33], [0, 26, 32, 35], [0, 27, 34, 35], [0, 28, 29, 37], [0, 28, 30, 31], [0, 29, 31, 33], [0, 31, 35, 36], [1, 2, 3, 15], [1, 2, 4, 35], [1, 2, 5, 32], [1, 2, 6, 28], [1, 2, 7, 18], [1, 2, 8, 13], [1, 2, 9, 37], [1, 2, 10, 11], [1, 2, 12, 19], [1, 2, 16, 36], [1, 2, 17, 23], [1, 2, 20, 34], [1, 2, 21, 26], [1, 2, 22, 24], [1, 2, 25, 33], [1, 2, 27, 29], [1, 2, 30, 31], [1, 3, 4, 11], [1, 3, 5, 10], [1, 3, 6, 29], [1, 3, 7, 16], [1, 3, 8, 37], [1, 3, 9, 24], [1, 3, 12, 23], [1, 3, 13, 14], [1, 3, 17, 26], [1, 3, 18, 19], [1, 3, 20, 31], [1, 3, 21, 36], [1, 3, 22, 30], [1, 3, 25, 35], [1, 3, 27, 32], [1, 3, 28, 33], [1, 4, 5, 19], [1, 4, 6, 24], [1, 4, 7, 33], [1, 4, 8, 20], [1, 4, 9, 21], [1, 4, 10, 18], [1, 4, 12, 26], [1, 4, 13, 25], [1, 4, 14, 28], [1, 4, 15, 32], [1, 4, 16, 23], [1, 4, 17, 29], [1, 4, 22, 34], [1, 4, 27, 37], [1, 4, 30, 36], [1, 5, 6, 8], [1, 5, 7, 29], [1, 5, 9, 26], [1, 5, 11, 31], [1, 5, 12, 21], [1, 5, 13, 33], [1, 5, 14, 37], [1, 5, 15, 30], [1, 5, 16, 28], [1, 5, 17, 36], [1, 5, 18, 23], [1, 5, 20, 24], [1, 5, 22, 35], [1, 5, 25, 34], [1, 6, 7, 25], [1, 6, 9, 27], [1, 6, 10, 30], [1, 6, 11, 21], [1, 6, 12, 14], [1, 6, 13, 15], [1, 6, 16, 34], [1, 6, 18, 36], [1, 6, 19, 20], [1, 6, 22, 26], [1, 6, 23, 31], [1, 6, 32, 33], [1, 6, 35, 37], [1, 7, 8, 31], [1, 7, 9, 34], [1, 7, 10, 13], [1, 7, 11, 19], [1, 7, 14, 32], [1, 7, 15, 36], [1, 7, 17, 30], [1, 7, 20, 26], [1, 7, 21, 28], [1, 7, 22, 37], [1, 7, 23, 24], [1, 7, 27, 35], [1, 8, 9, 12], [1, 8, 10, 34], [1, 8, 11, 22], [1, 8, 14, 21], [1, 8, 15, 23], [1, 8, 16, 32], [1, 8, 17, 35], [1, 8, 18, 30], [1, 8, 19, 25], [1, 8, 24, 27], [1, 8, 26, 33], [1, 8, 28, 29], [1, 9, 11, 28], [1, 9, 13, 19], [1, 9, 14, 31], [1, 9, 15, 16], [1, 9, 17, 22], [1, 9, 18, 20], [1, 9, 23, 36], [1, 9, 25, 32], [1, 9, 29, 35], [1, 9, 30, 33], [1, 10, 12, 31], [1, 10, 14, 24], [1, 10, 15, 19], [1, 10, 16, 26], [1, 10, 17, 27], [1, 10, 20, 29], [1, 10, 21, 37], [1, 10, 22, 36], [1, 10, 23, 25], [1, 10, 28, 32], [1, 10, 33, 35], [1, 11, 12, 37], [1, 11, 13, 16], [1, 11, 14, 27], [1, 11, 15, 17], [1, 11, 20, 30], [1, 11, 23, 32], [1, 11, 24, 33], [1, 11, 25, 36], [1, 11, 26, 35], [1, 11, 29, 34], [1, 12, 13, 17], [1, 12, 15, 25], [1, 12, 16, 27], [1, 12, 18, 32], [1, 12, 20, 22], [1, 12, 24, 35], [1, 12, 28, 30], [1, 12, 29, 36], [1, 12, 33, 34], [1, 13, 18, 21], [1, 13, 20, 36], [1, 13, 22, 29], [1, 13, 23, 26], [1, 13, 24, 28], [1, 13, 27, 30], [1, 13, 31, 34], [1, 13, 32, 35], [1, 14, 15, 34], [1, 14, 16, 30], [1, 14, 17, 25], [1, 14, 18, 22], [1, 14, 19, 35], [1, 14, 20, 33], [1, 14, 23, 29], [1, 14, 26, 36], [1, 15, 18, 27], [1, 15, 20, 21], [1, 15, 22, 33], [1, 15, 24, 37], [1, 15, 26, 29], [1, 15, 28, 31], [1, 16, 17, 37], [1, 16, 18, 24], [1, 16, 19, 21], [1, 16, 20, 35], [1, 16, 25, 29], [1, 16, 31, 33], [1, 17, 18, 33], [1, 17, 19, 28], [1, 17, 20, 32], [1, 17, 21, 34], [1, 17, 24, 31], [1, 18, 25, 28], [1, 18, 26, 34], [1, 18, 29, 37], [1, 18, 31, 35], [1, 19, 22, 27], [1, 19, 23, 30], [1, 19, 24, 29], [1, 19, 26, 32], [1, 19, 31, 36], [1, 19, 34, 37], [1, 20, 23, 27], [1, 20, 28, 37], [1, 21, 22, 32], [1, 21, 24, 25], [1, 21, 27, 31], [1, 21, 29, 33], [1, 21, 30, 35], [1, 22, 23, 28], [1, 22, 25, 31], [1, 23, 33, 37], [1, 23, 34, 35], [1, 24, 26, 30], [1, 24, 34, 36], [1, 25, 26, 27], [1, 25, 30, 37], [1, 26, 31, 37], [1, 27, 28, 34], [1, 27, 33, 36], [1, 28, 35, 36], [1, 29, 31, 32], [1, 30, 32, 34], [1, 32, 36, 37], [2, 3, 4, 16], [2, 3, 5, 36], [2, 3, 6, 33], [2, 3, 7, 29], [2, 3, 8, 19], [2, 3, 9, 14], [2, 3, 11, 12], [2, 3, 13, 20], [2, 3, 17, 37], [2, 3, 18, 24], [2, 3, 21, 35], [2, 3, 22, 27], [2, 3, 23, 25], [2, 3, 26, 34], [2, 3, 28, 30], [2, 3, 31, 32], [2, 4, 5, 12], [2, 4, 6, 11], [2, 4, 7, 30], [2, 4, 8, 17], [2, 4, 10, 25], [2, 4, 13, 24], [2, 4, 14, 15], [2, 4, 18, 27], [2, 4, 19, 20], [2, 4, 21, 32], [2, 4, 22, 37], [2, 4, 23, 31], [2, 4, 26, 36], [2, 4, 28, 33], [2, 4, 29, 34], [2, 5, 6, 20], [2, 5, 7, 25], [2, 5, 8, 34], [2, 5, 9, 21], [2, 5, 10, 22], [2, 5, 11, 19], [2, 5, 13, 27], [2, 5, 14, 26], [2, 5, 15, 29], [2, 5, 16, 33], [2, 5, 17, 24], [2, 5, 18, 30], [2, 5, 23, 35], [2, 5, 31, 37], [2, 6, 7, 9], [2, 6, 8, 30], [2, 6, 10, 27], [2, 6, 12, 32], [2, 6, 13, 22], [2, 6, 14, 34], [2, 6, 16, 31], [2, 6, 17, 29], [2, 6, 18, 37], [2, 6, 19, 24], [2, 6, 21, 25], [2, 6, 23, 36], [2, 6, 26, 35], [2, 7, 8, 26], [2, 7, 10, 28], [2, 7, 11, 31], [2, 7, 12, 22], [2, 7, 13, 15], [2, 7, 14, 16], [2, 7, 17, 35], [2, 7, 19, 37], [2, 7, 20, 21], [2, 7, 23, 27], [2, 7, 24, 32], [2, 7, 33, 34], [2, 8, 9, 32], [2, 8, 10, 35], [2, 8, 11, 14], [2, 8, 12, 20], [2, 8, 15, 33], [2, 8, 16, 37], [2, 8, 18, 31], [2, 8, 21, 27], [2, 8, 22, 29], [2, 8, 24, 25], [2, 8, 28, 36], [2, 9, 10, 13], [2, 9, 11, 35], [2, 9, 12, 23], [2, 9, 15, 22], [2, 9, 16, 24], [2, 9, 17, 33], [2, 9, 18, 36], [2, 9, 19, 31], [2, 9, 20, 26], [2, 9, 25, 28], [2, 9, 27, 34], [2, 9, 29, 30], [2, 10, 12, 29], [2, 10, 14, 20], [2, 10, 15, 32], [2, 10, 16, 17], [2, 10, 18, 23], [2, 10, 19, 21], [2, 10, 24, 37], [2, 10, 26, 33], [2, 10, 30, 36], [2, 10, 31, 34], [2, 11, 13, 32], [2, 11, 15, 25], [2, 11, 16, 20], [2, 11, 17, 27], [2, 11, 18, 28], [2, 11, 21, 30], [2, 11, 23, 37], [2, 11, 24, 26], [2, 11, 29, 33], [2, 11, 34, 36], [2, 12, 14, 17], [2, 12, 15, 28], [2, 12, 16, 18], [2, 12, 21, 31], [2, 12, 24, 33], [2, 12, 25, 34], [2, 12, 26, 37], [2, 12, 27, 36], [2, 12, 30, 35], [2, 13, 14, 18], [2, 13, 16, 26], [2, 13, 17, 28], [2, 13, 19, 33], [2, 13, 21, 23], [2, 13, 25, 36], [2, 13, 29, 31], [2, 13, 30, 37], [2, 13, 34, 35], [2, 14, 19, 22], [2, 14, 21, 37], [2, 14, 23, 30], [2, 14, 24, 27], [2, 14, 25, 29], [2, 14, 28, 31], [2, 14, 32, 35], [2, 14, 33, 36], [2, 15, 16, 35], [2, 15, 17, 31], [2, 15, 18, 26], [2, 15, 19, 23], [2, 15, 20, 36], [2, 15, 21, 34], [2, 15, 24, 30], [2, 15, 27, 37], [2, 16, 19, 28], [2, 16, 21, 22], [2, 16, 23, 34], [2, 16, 27, 30], [2, 16, 29, 32], [2, 17, 19, 25], [2, 17, 20, 22], [2, 17, 21, 36], [2, 17, 26, 30], [2, 17, 32, 34], [2, 18, 19, 34], [2, 18, 20, 29], [2, 18, 21, 33], [2, 18, 22, 35], [2, 18, 25, 32], [2, 19, 26, 29], [2, 19, 27, 35], [2, 19, 32, 36], [2, 20, 23, 28], [2, 20, 24, 31], [2, 20, 25, 30], [2, 20, 27, 33], [2, 20, 32, 37], [2, 21, 24, 28], [2, 22, 23, 33], [2, 22, 25, 26], [2, 22, 28, 32], [2, 22, 30, 34], [2, 22, 31, 36], [2, 23, 24, 29], [2, 23, 26, 32], [2, 24, 35, 36], [2, 25, 27, 31], [2, 25, 35, 37], [2, 26, 27, 28], [2, 28, 29, 35], [2, 28, 34, 37], [2, 29, 36, 37], [2, 30, 32, 33], [2, 31, 33, 35], [3, 4, 5, 17], [3, 4, 6, 37], [3, 4, 7, 34], [3, 4, 8, 30], [3, 4, 9, 20], [3, 4, 10, 15], [3, 4, 12, 13], [3, 4, 14, 21], [3, 4, 19, 25], [3, 4, 22, 36], [3, 4, 23, 28], [3, 4, 24, 26], [3, 4, 27, 35], [3, 4, 29, 31], [3, 4, 32, 33], [3, 5, 6, 13], [3, 5, 7, 12], [3, 5, 8, 31], [3, 5, 9, 18], [3, 5, 11, 26], [3, 5, 14, 25], [3, 5, 15, 16], [3, 5, 19, 28], [3, 5, 20, 21], [3, 5, 22, 33], [3, 5, 24, 32], [3, 5, 27, 37], [3, 5, 29, 34], [3, 5, 30, 35], [3, 6, 7, 21], [3, 6, 8, 26], [3, 6, 9, 35], [3, 6, 10, 22], [3, 6, 11, 23], [3, 6, 12, 20], [3, 6, 14, 28], [3, 6, 15, 27], [3, 6, 16, 30], [3, 6, 17, 34], [3, 6, 18, 25], [3, 6, 19, 31], [3, 6, 24, 36], [3, 7, 8, 10], [3, 7, 9, 31], [3, 7, 11, 28], [3, 7, 13, 33], [3, 7, 14, 23], [3, 7, 15, 35], [3, 7, 17, 32], [3, 7, 18, 30], [3, 7, 20, 25], [3, 7, 22, 26], [3, 7, 24, 37], [3, 7, 27, 36], [3, 8, 9, 27], [3, 8, 11, 29], [3, 8, 12, 32], [3, 8, 13, 23], [3, 8, 14, 16], [3, 8, 15, 17], [3, 8, 18, 36], [3, 8, 21, 22], [3, 8, 24, 28], [3, 8, 25, 33], [3, 8, 34, 35], [3, 9, 10, 33], [3, 9, 11, 36], [3, 9, 12, 15], [3, 9, 13, 21], [3, 9, 16, 34], [3, 9, 19, 32], [3, 9, 22, 28], [3, 9, 23, 30], [3, 9, 25, 26], [3, 9, 29, 37], [3, 10, 11, 14], [3, 10, 12, 36], [3, 10, 13, 24], [3, 10, 16, 23], [3, 10, 17, 25], [3, 10, 18, 34], [3, 10, 19, 37], [3, 10, 20, 32], [3, 10, 21, 27], [3, 10, 26, 29], [3, 10, 28, 35], [3, 10, 30, 31], [3, 11, 13, 30], [3, 11, 15, 21], [3, 11, 16, 33], [3, 11, 17, 18], [3, 11, 19, 24], [3, 11, 20, 22], [3, 11, 27, 34], [3, 11, 31, 37], [3, 11, 32, 35], [3, 12, 14, 33], [3, 12, 16, 26], [3, 12, 17, 21], [3, 12, 18, 28], [3, 12, 19, 29], [3, 12, 22, 31], [3, 12, 25, 27], [3, 12, 30, 34], [3, 12, 35, 37], [3, 13, 15, 18], [3, 13, 16, 29], [3, 13, 17, 19], [3, 13, 22, 32], [3, 13, 25, 34], [3, 13, 26, 35], [3, 13, 28, 37], [3, 13, 31, 36], [3, 14, 15, 19], [3, 14, 17, 27], [3, 14, 18, 29], [3, 14, 20, 34], [3, 14, 22, 24], [3, 14, 26, 37], [3, 14, 30, 32], [3, 14, 35, 36], [3, 15, 20, 23], [3, 15, 24, 31], [3, 15, 25, 28], [3, 15, 26, 30], [3, 15, 29, 32], [3, 15, 33, 36], [3, 15, 34, 37], [3, 16, 17, 36], [3, 16, 18, 32], [3, 16, 19, 27], [3, 16, 20, 24], [3, 16, 21, 37], [3, 16, 22, 35], [3, 16, 25, 31], [3, 17, 20, 29], [3, 17, 22, 23], [3, 17, 24, 35], [3, 17, 28, 31], [3, 17, 30, 33], [3, 18, 20, 26], [3, 18, 21, 23], [3, 18, 22, 37], [3, 18, 27, 31], [3, 18, 33, 35], [3, 19, 20, 35], [3, 19, 21, 30], [3, 19, 22, 34], [3, 19, 23, 36], [3, 19, 26, 33], [3, 20, 27, 30], [3, 20, 28, 36], [3, 20, 33, 37], [3, 21, 24, 29], [3, 21, 25, 32], [3, 21, 26, 31], [3, 21, 28, 34], [3, 22, 25, 29], [3, 23, 24, 34], [3, 23, 26, 27], [3, 23, 29, 33], [3, 23, 31, 35], [3, 23, 32, 37], [3, 24, 25, 30], [3, 24, 27, 33], [3, 25, 36, 37], [3, 26, 28, 32], [3, 27, 28, 29], [3, 29, 30, 36], [3, 31, 33, 34], [3, 32, 34, 36], [4, 5, 6, 18], [4, 5, 8, 35], [4, 5, 9, 31], [4, 5, 10, 21], [4, 5, 11, 16], [4, 5, 13, 14], [4, 5, 15, 22], [4, 5, 20, 26], [4, 5, 23, 37], [4, 5, 24, 29], [4, 5, 25, 27], [4, 5, 28, 36], [4, 5, 30, 32], [4, 5, 33, 34], [4, 6, 7, 14], [4, 6, 8, 13], [4, 6, 9, 32], [4, 6, 10, 19], [4, 6, 12, 27], [4, 6, 15, 26], [4, 6, 16, 17], [4, 6, 20, 29], [4, 6, 21, 22], [4, 6, 23, 34], [4, 6, 25, 33], [4, 6, 30, 35], [4, 6, 31, 36], [4, 7, 8, 22], [4, 7, 9, 27], [4, 7, 10, 36], [4, 7, 11, 23], [4, 7, 12, 24], [4, 7, 13, 21], [4, 7, 15, 29], [4, 7, 16, 28], [4, 7, 17, 31], [4, 7, 18, 35], [4, 7, 19, 26], [4, 7, 20, 32], [4, 7, 25, 37], [4, 8, 9, 11], [4, 8, 10, 32], [4, 8, 12, 29], [4, 8, 14, 34], [4, 8, 15, 24], [4, 8, 16, 36], [4, 8, 18, 33], [4, 8, 19, 31], [4, 8, 21, 26], [4, 8, 23, 27], [4, 8, 28, 37], [4, 9, 10, 28], [4, 9, 12, 30], [4, 9, 13, 33], [4, 9, 14, 24], [4, 9, 15, 17], [4, 9, 16, 18], [4, 9, 19, 37], [4, 9, 22, 23], [4, 9, 25, 29], [4, 9, 26, 34], [4, 9, 35, 36], [4, 10, 11, 34], [4, 10, 12, 37], [4, 10, 13, 16], [4, 10, 14, 22], [4, 10, 17, 35], [4, 10, 20, 33], [4, 10, 23, 29], [4, 10, 24, 31], [4, 10, 26, 27], [4, 11, 12, 15], [4, 11, 13, 37], [4, 11, 14, 25], [4, 11, 17, 24], [4, 11, 18, 26], [4, 11, 19, 35], [4, 11, 21, 33], [4, 11, 22, 28], [4, 11, 27, 30], [4, 11, 29, 36], [4, 11, 31, 32], [4, 12, 14, 31], [4, 12, 16, 22], [4, 12, 17, 34], [4, 12, 18, 19], [4, 12, 20, 25], [4, 12, 21, 23], [4, 12, 28, 35], [4, 12, 33, 36], [4, 13, 15, 34], [4, 13, 17, 27], [4, 13, 18, 22], [4, 13, 19, 29], [4, 13, 20, 30], [4, 13, 23, 32], [4, 13, 26, 28], [4, 13, 31, 35], [4, 14, 16, 19], [4, 14, 17, 30], [4, 14, 18, 20], [4, 14, 23, 33], [4, 14, 26, 35], [4, 14, 27, 36], [4, 14, 32, 37], [4, 15, 16, 20], [4, 15, 18, 28], [4, 15, 19, 30], [4, 15, 21, 35], [4, 15, 23, 25], [4, 15, 31, 33], [4, 15, 36, 37], [4, 16, 21, 24], [4, 16, 25, 32], [4, 16, 26, 29], [4, 16, 27, 31], [4, 16, 30, 33], [4, 16, 34, 37], [4, 17, 18, 37], [4, 17, 19, 33], [4, 17, 20, 28], [4, 17, 21, 25], [4, 17, 23, 36], [4, 17, 26, 32], [4, 18, 21, 30], [4, 18, 23, 24], [4, 18, 25, 36], [4, 18, 29, 32], [4, 18, 31, 34], [4, 19, 21, 27], [4, 19, 22, 24], [4, 19, 28, 32], [4, 19, 34, 36], [4, 20, 21, 36], [4, 20, 22, 31], [4, 20, 23, 35], [4, 20, 24, 37], [4, 20, 27, 34], [4, 21, 28, 31], [4, 21, 29, 37], [4, 22, 25, 30], [4, 22, 26, 33], [4, 22, 27, 32], [4, 22, 29, 35], [4, 23, 26, 30], [4, 24, 25, 35], [4, 24, 27, 28], [4, 24, 30, 34], [4, 24, 32, 36], [4, 25, 26, 31], [4, 25, 28, 34], [4, 27, 29, 33], [4, 28, 29, 30], [4, 30, 31, 37], [4, 32, 34, 35], [4, 33, 35, 37], [5, 6, 7, 19], [5, 6, 9, 36], [5, 6, 10, 32], [5, 6, 11, 22], [5, 6, 12, 17], [5, 6, 14, 15], [5, 6, 16, 23], [5, 6, 21, 27], [5, 6, 25, 30], [5, 6, 26, 28], [5, 6, 29, 37], [5, 6, 31, 33], [5, 6, 34, 35], [5, 7, 8, 15], [5, 7, 9, 14], [5, 7, 10, 33], [5, 7, 11, 20], [5, 7, 13, 28], [5, 7, 16, 27], [5, 7, 17, 18], [5, 7, 21, 30], [5, 7, 22, 23], [5, 7, 24, 35], [5, 7, 26, 34], [5, 7, 31, 36], [5, 7, 32, 37], [5, 8, 9, 23], [5, 8, 10, 28], [5, 8, 11, 37], [5, 8, 12, 24], [5, 8, 13, 25], [5, 8, 14, 22], [5, 8, 16, 30], [5, 8, 17, 29], [5, 8, 18, 32], [5, 8, 19, 36], [5, 8, 20, 27], [5, 8, 21, 33], [5, 9, 10, 12], [5, 9, 11, 33], [5, 9, 13, 30], [5, 9, 15, 35], [5, 9, 16, 25], [5, 9, 17, 37], [5, 9, 19, 34], [5, 9, 20, 32], [5, 9, 22, 27], [5, 9, 24, 28], [5, 10, 11, 29], [5, 10, 13, 31], [5, 10, 14, 34], [5, 10, 15, 25], [5, 10, 16, 18], [5, 10, 17, 19], [5, 10, 23, 24], [5, 10, 26, 30], [5, 10, 27, 35], [5, 10, 36, 37], [5, 11, 12, 35], [5, 11, 14, 17], [5, 11, 15, 23], [5, 11, 18, 36], [5, 11, 21, 34], [5, 11, 24, 30], [5, 11, 25, 32], [5, 11, 27, 28], [5, 12, 13, 16], [5, 12, 15, 26], [5, 12, 18, 25], [5, 12, 19, 27], [5, 12, 20, 36], [5, 12, 22, 34], [5, 12, 23, 29], [5, 12, 28, 31], [5, 12, 30, 37], [5, 12, 32, 33], [5, 13, 15, 32], [5, 13, 17, 23], [5, 13, 18, 35], [5, 13, 19, 20], [5, 13, 21, 26], [5, 13, 22, 24], [5, 13, 29, 36], [5, 13, 34, 37], [5, 14, 16, 35], [5, 14, 18, 28], [5, 14, 19, 23], [5, 14, 20, 30], [5, 14, 21, 31], [5, 14, 24, 33], [5, 14, 27, 29], [5, 14, 32, 36], [5, 15, 17, 20], [5, 15, 18, 31], [5, 15, 19, 21], [5, 15, 24, 34], [5, 15, 27, 36], [5, 15, 28, 37], [5, 16, 17, 21], [5, 16, 19, 29], [5, 16, 20, 31], [5, 16, 22, 36], [5, 16, 24, 26], [5, 16, 32, 34], [5, 17, 22, 25], [5, 17, 26, 33], [5, 17, 27, 30], [5, 17, 28, 32], [5, 17, 31, 34], [5, 18, 20, 34], [5, 18, 21, 29], [5, 18, 22, 26], [5, 18, 24, 37], [5, 18, 27, 33], [5, 19, 22, 31], [5, 19, 24, 25], [5, 19, 26, 37], [5, 19, 30, 33], [5, 19, 32, 35], [5, 20, 22, 28], [5, 20, 23, 25], [5, 20, 29, 33], [5, 20, 35, 37], [5, 21, 22, 37], [5, 21, 23, 32], [5, 21, 24, 36], [5, 21, 28, 35], [5, 22, 29, 32], [5, 23, 26, 31], [5, 23, 27, 34], [5, 23, 28, 33], [5, 23, 30, 36], [5, 24, 27, 31], [5, 25, 26, 36], [5, 25, 28, 29], [5, 25, 31, 35], [5, 25, 33, 37], [5, 26, 27, 32], [5, 26, 29, 35], [5, 28, 30, 34], [5, 29, 30, 31], [5, 33, 35, 36], [6, 7, 8, 20], [6, 7, 10, 37], [6, 7, 11, 33], [6, 7, 12, 23], [6, 7, 13, 18], [6, 7, 15, 16], [6, 7, 17, 24], [6, 7, 22, 28], [6, 7, 26, 31], [6, 7, 27, 29], [6, 7, 32, 34], [6, 7, 35, 36], [6, 8, 9, 16], [6, 8, 10, 15], [6, 8, 11, 34], [6, 8, 12, 21], [6, 8, 14, 29], [6, 8, 17, 28], [6, 8, 18, 19], [6, 8, 22, 31], [6, 8, 23, 24], [6, 8, 25, 36], [6, 8, 27, 35], [6, 8, 32, 37], [6, 9, 10, 24], [6, 9, 11, 29], [6, 9, 13, 25], [6, 9, 14, 26], [6, 9, 15, 23], [6, 9, 17, 31], [6, 9, 18, 30], [6, 9, 19, 33], [6, 9, 20, 37], [6, 9, 21, 28], [6, 9, 22, 34], [6, 10, 11, 13], [6, 10, 12, 34], [6, 10, 14, 31], [6, 10, 16, 36], [6, 10, 17, 26], [6, 10, 20, 35], [6, 10, 21, 33], [6, 10, 23, 28], [6, 10, 25, 29], [6, 11, 12, 30], [6, 11, 14, 32], [6, 11, 15, 35], [6, 11, 16, 26], [6, 11, 17, 19], [6, 11, 18, 20], [6, 11, 24, 25], [6, 11, 27, 31], [6, 11, 28, 36], [6, 12, 13, 36], [6, 12, 15, 18], [6, 12, 16, 24], [6, 12, 19, 37], [6, 12, 22, 35], [6, 12, 25, 31], [6, 12, 26, 33], [6, 12, 28, 29], [6, 13, 14, 17], [6, 13, 16, 27], [6, 13, 19, 26], [6, 13, 20, 28], [6, 13, 21, 37], [6, 13, 23, 35], [6, 13, 24, 30], [6, 13, 29, 32], [6, 13, 33, 34], [6, 14, 16, 33], [6, 14, 18, 24], [6, 14, 19, 36], [6, 14, 20, 21], [6, 14, 22, 27], [6, 14, 23, 25], [6, 14, 30, 37], [6, 15, 17, 36], [6, 15, 19, 29], [6, 15, 20, 24], [6, 15, 21, 31], [6, 15, 22, 32], [6, 15, 25, 34], [6, 15, 28, 30], [6, 15, 33, 37], [6, 16, 18, 21], [6, 16, 19, 32], [6, 16, 20, 22], [6, 16, 25, 35], [6, 16, 28, 37], [6, 17, 18, 22], [6, 17, 20, 30], [6, 17, 21, 32], [6, 17, 23, 37], [6, 17, 25, 27], [6, 17, 33, 35], [6, 18, 23, 26], [6, 18, 27, 34], [6, 18, 28, 31], [6, 18, 29, 33], [6, 18, 32, 35], [6, 19, 21, 35], [6, 19, 22, 30], [6, 19, 23, 27], [6, 19, 28, 34], [6, 20, 23, 32], [6, 20, 25, 26], [6, 20, 31, 34], [6, 20, 33, 36], [6, 21, 23, 29], [6, 21, 24, 26], [6, 21, 30, 34], [6, 22, 24, 33], [6, 22, 25, 37], [6, 22, 29, 36], [6, 23, 30, 33], [6, 24, 27, 32], [6, 24, 28, 35], [6, 24, 29, 34], [6, 24, 31, 37], [6, 25, 28, 32], [6, 26, 27, 37], [6, 26, 29, 30], [6, 26, 32, 36], [6, 27, 28, 33], [6, 27, 30, 36], [6, 29, 31, 35], [6, 30, 31, 32], [6, 34, 36, 37], [7, 8, 9, 21], [7, 8, 12, 34], [7, 8, 13, 24], [7, 8, 14, 19], [7, 8, 16, 17], [7, 8, 18, 25], [7, 8, 23, 29], [7, 8, 27, 32], [7, 8, 28, 30], [7, 8, 33, 35], [7, 8, 36, 37], [7, 9, 10, 17], [7, 9, 11, 16], [7, 9, 12, 35], [7, 9, 13, 22], [7, 9, 15, 30], [7, 9, 18, 29], [7, 9, 19, 20], [7, 9, 23, 32], [7, 9, 24, 25], [7, 9, 26, 37], [7, 9, 28, 36], [7, 10, 11, 25], [7, 10, 12, 30], [7, 10, 14, 26], [7, 10, 15, 27], [7, 10, 16, 24], [7, 10, 18, 32], [7, 10, 19, 31], [7, 10, 20, 34], [7, 10, 22, 29], [7, 10, 23, 35], [7, 11, 12, 14], [7, 11, 13, 35], [7, 11, 15, 32], [7, 11, 17, 37], [7, 11, 18, 27], [7, 11, 21, 36], [7, 11, 22, 34], [7, 11, 24, 29], [7, 11, 26, 30], [7, 12, 13, 31], [7, 12, 15, 33], [7, 12, 16, 36], [7, 12, 17, 27], [7, 12, 18, 20], [7, 12, 19, 21], [7, 12, 25, 26], [7, 12, 28, 32], [7, 12, 29, 37], [7, 13, 14, 37], [7, 13, 16, 19], [7, 13, 17, 25], [7, 13, 23, 36], [7, 13, 26, 32], [7, 13, 27, 34], [7, 13, 29, 30], [7, 14, 15, 18], [7, 14, 17, 28], [7, 14, 20, 27], [7, 14, 21, 29], [7, 14, 24, 36], [7, 14, 25, 31], [7, 14, 30, 33], [7, 14, 34, 35], [7, 15, 17, 34], [7, 15, 19, 25], [7, 15, 20, 37], [7, 15, 21, 22], [7, 15, 23, 28], [7, 15, 24, 26], [7, 16, 18, 37], [7, 16, 20, 30], [7, 16, 21, 25], [7, 16, 22, 32], [7, 16, 23, 33], [7, 16, 26, 35], [7, 16, 29, 31], [7, 17, 19, 22], [7, 17, 20, 33], [7, 17, 21, 23], [7, 17, 26, 36], [7, 18, 19, 23], [7, 18, 21, 31], [7, 18, 22, 33], [7, 18, 26, 28], [7, 18, 34, 36], [7, 19, 24, 27], [7, 19, 28, 35], [7, 19, 29, 32], [7, 19, 30, 34], [7, 19, 33, 36], [7, 20, 22, 36], [7, 20, 23, 31], [7, 20, 24, 28], [7, 20, 29, 35], [7, 21, 24, 33], [7, 21, 26, 27], [7, 21, 32, 35], [7, 21, 34, 37], [7, 22, 24, 30], [7, 22, 25, 27], [7, 22, 31, 35], [7, 23, 25, 34], [7, 23, 30, 37], [7, 24, 31, 34], [7, 25, 28, 33], [7, 25, 29, 36], [7, 25, 30, 35], [7, 26, 29, 33], [7, 27, 30, 31], [7, 27, 33, 37], [7, 28, 29, 34], [7, 28, 31, 37], [7, 30, 32, 36], [7, 31, 32, 33], [8, 9, 10, 22], [8, 9, 13, 35], [8, 9, 14, 25], [8, 9, 15, 20], [8, 9, 17, 18], [8, 9, 19, 26], [8, 9, 24, 30], [8, 9, 28, 33], [8, 9, 29, 31], [8, 9, 34, 36], [8, 10, 11, 18], [8, 10, 12, 17], [8, 10, 13, 36], [8, 10, 14, 23], [8, 10, 16, 31], [8, 10, 19, 30], [8, 10, 20, 21], [8, 10, 24, 33], [8, 10, 25, 26], [8, 10, 29, 37], [8, 11, 12, 26], [8, 11, 13, 31], [8, 11, 15, 27], [8, 11, 16, 28], [8, 11, 17, 25], [8, 11, 19, 33], [8, 11, 20, 32], [8, 11, 21, 35], [8, 11, 23, 30], [8, 11, 24, 36], [8, 12, 13, 15], [8, 12, 14, 36], [8, 12, 16, 33], [8, 12, 19, 28], [8, 12, 22, 37], [8, 12, 23, 35], [8, 12, 25, 30], [8, 12, 27, 31], [8, 13, 14, 32], [8, 13, 16, 34], [8, 13, 17, 37], [8, 13, 18, 28], [8, 13, 19, 21], [8, 13, 20, 22], [8, 13, 26, 27], [8, 13, 29, 33], [8, 14, 17, 20], [8, 14, 18, 26], [8, 14, 24, 37], [8, 14, 27, 33], [8, 14, 28, 35], [8, 14, 30, 31], [8, 15, 16, 19], [8, 15, 18, 29], [8, 15, 21, 28], [8, 15, 22, 30], [8, 15, 25, 37], [8, 15, 26, 32], [8, 15, 31, 34], [8, 15, 35, 36], [8, 16, 18, 35], [8, 16, 20, 26], [8, 16, 22, 23], [8, 16, 24, 29], [8, 16, 25, 27], [8, 17, 21, 31], [8, 17, 22, 26], [8, 17, 23, 33], [8, 17, 24, 34], [8, 17, 27, 36], [8, 17, 30, 32], [8, 18, 20, 23], [8, 18, 21, 34], [8, 18, 22, 24], [8, 18, 27, 37], [8, 19, 20, 24], [8, 19, 22, 32], [8, 19, 23, 34], [8, 19, 27, 29], [8, 19, 35, 37], [8, 20, 25, 28], [8, 20, 29, 36], [8, 20, 30, 33], [8, 20, 31, 35], [8, 20, 34, 37], [8, 21, 23, 37], [8, 21, 24, 32], [8, 21, 25, 29], [8, 21, 30, 36], [8, 22, 25, 34], [8, 22, 27, 28], [8, 22, 33, 36], [8, 23, 25, 31], [8, 23, 26, 28], [8, 23, 32, 36], [8, 24, 26, 35], [8, 25, 32, 35], [8, 26, 29, 34], [8, 26, 30, 37], [8, 26, 31, 36], [8, 27, 30, 34], [8, 28, 31, 32], [8, 29, 30, 35], [8, 31, 33, 37], [8, 32, 33, 34], [9, 10, 11, 23], [9, 10, 14, 36], [9, 10, 15, 26], [9, 10, 16, 21], [9, 10, 18, 19], [9, 10, 20, 27], [9, 10, 25, 31], [9, 10, 29, 34], [9, 10, 30, 32], [9, 10, 35, 37], [9, 11, 12, 19], [9, 11, 13, 18], [9, 11, 14, 37], [9, 11, 15, 24], [9, 11, 17, 32], [9, 11, 20, 31], [9, 11, 21, 22], [9, 11, 25, 34], [9, 11, 26, 27], [9, 12, 13, 27], [9, 12, 14, 32], [9, 12, 16, 28], [9, 12, 17, 29], [9, 12, 18, 26], [9, 12, 20, 34], [9, 12, 21, 33], [9, 12, 22, 36], [9, 12, 24, 31], [9, 12, 25, 37], [9, 13, 14, 16], [9, 13, 15, 37], [9, 13, 17, 34], [9, 13, 20, 29], [9, 13, 24, 36], [9, 13, 26, 31], [9, 13, 28, 32], [9, 14, 15, 33], [9, 14, 17, 35], [9, 14, 19, 29], [9, 14, 20, 22], [9, 14, 21, 23], [9, 14, 27, 28], [9, 14, 30, 34], [9, 15, 18, 21], [9, 15, 19, 27], [9, 15, 28, 34], [9, 15, 29, 36], [9, 15, 31, 32], [9, 16, 17, 20], [9, 16, 19, 30], [9, 16, 22, 29], [9, 16, 23, 31], [9, 16, 27, 33], [9, 16, 32, 35], [9, 16, 36, 37], [9, 17, 19, 36], [9, 17, 21, 27], [9, 17, 23, 24], [9, 17, 25, 30], [9, 17, 26, 28], [9, 18, 22, 32], [9, 18, 23, 27], [9, 18, 24, 34], [9, 18, 25, 35], [9, 18, 28, 37], [9, 18, 31, 33], [9, 19, 21, 24], [9, 19, 22, 35], [9, 19, 23, 25], [9, 20, 21, 25], [9, 20, 23, 33], [9, 20, 24, 35], [9, 20, 28, 30], [9, 21, 26, 29], [9, 21, 30, 37], [9, 21, 31, 34], [9, 21, 32, 36], [9, 22, 25, 33], [9, 22, 26, 30], [9, 22, 31, 37], [9, 23, 26, 35], [9, 23, 28, 29], [9, 23, 34, 37], [9, 24, 26, 32], [9, 24, 27, 29], [9, 24, 33, 37], [9, 25, 27, 36], [9, 26, 33, 36], [9, 27, 30, 35], [9, 27, 32, 37], [9, 28, 31, 35], [9, 29, 32, 33], [9, 30, 31, 36], [9, 33, 34, 35], [10, 11, 12, 24], [10, 11, 15, 37], [10, 11, 16, 27], [10, 11, 17, 22], [10, 11, 19, 20], [10, 11, 21, 28], [10, 11, 26, 32], [10, 11, 30, 35], [10, 11, 31, 33], [10, 12, 13, 20], [10, 12, 14, 19], [10, 12, 16, 25], [10, 12, 18, 33], [10, 12, 21, 32], [10, 12, 22, 23], [10, 12, 26, 35], [10, 12, 27, 28], [10, 13, 14, 28], [10, 13, 15, 33], [10, 13, 17, 29], [10, 13, 18, 30], [10, 13, 19, 27], [10, 13, 21, 35], [10, 13, 22, 34], [10, 13, 23, 37], [10, 13, 25, 32], [10, 14, 15, 17], [10, 14, 18, 35], [10, 14, 21, 30], [10, 14, 25, 37], [10, 14, 27, 32], [10, 14, 29, 33], [10, 15, 16, 34], [10, 15, 18, 36], [10, 15, 20, 30], [10, 15, 21, 23], [10, 15, 22, 24], [10, 15, 28, 29], [10, 15, 31, 35], [10, 16, 19, 22], [10, 16, 20, 28], [10, 16, 29, 35], [10, 16, 30, 37], [10, 16, 32, 33], [10, 17, 18, 21], [10, 17, 20, 31], [10, 17, 23, 30], [10, 17, 24, 32], [10, 17, 28, 34], [10, 17, 33, 36], [10, 18, 20, 37], [10, 18, 22, 28], [10, 18, 24, 25], [10, 18, 26, 31], [10, 18, 27, 29], [10, 19, 23, 33], [10, 19, 24, 28], [10, 19, 25, 35], [10, 19, 26, 36], [10, 19, 32, 34], [10, 20, 22, 25], [10, 20, 23, 36], [10, 20, 24, 26], [10, 21, 22, 26], [10, 21, 24, 34], [10, 21, 25, 36], [10, 21, 29, 31], [10, 22, 27, 30], [10, 22, 32, 35], [10, 22, 33, 37], [10, 23, 26, 34], [10, 23, 27, 31], [10, 24, 27, 36], [10, 24, 29, 30], [10, 25, 27, 33], [10, 25, 28, 30], [10, 26, 28, 37], [10, 27, 34, 37], [10, 28, 31, 36], [10, 29, 32, 36], [10, 30, 33, 34], [10, 31, 32, 37], [10, 34, 35, 36], [11, 12, 13, 25], [11, 12, 17, 28], [11, 12, 18, 23], [11, 12, 20, 21], [11, 12, 22, 29], [11, 12, 27, 33], [11, 12, 31, 36], [11, 12, 32, 34], [11, 13, 14, 21], [11, 13, 15, 20], [11, 13, 17, 26], [11, 13, 19, 34], [11, 13, 22, 33], [11, 13, 23, 24], [11, 13, 27, 36], [11, 13, 28, 29], [11, 14, 15, 29], [11, 14, 16, 34], [11, 14, 18, 30], [11, 14, 19, 31], [11, 14, 20, 28], [11, 14, 22, 36], [11, 14, 23, 35], [11, 14, 26, 33], [11, 15, 16, 18], [11, 15, 19, 36], [11, 15, 22, 31], [11, 15, 28, 33], [11, 15, 30, 34], [11, 16, 17, 35], [11, 16, 19, 37], [11, 16, 21, 31], [11, 16, 22, 24], [11, 16, 23, 25], [11, 16, 29, 30], [11, 16, 32, 36], [11, 17, 20, 23], [11, 17, 21, 29], [11, 17, 30, 36], [11, 17, 33, 34], [11, 18, 19, 22], [11, 18, 21, 32], [11, 18, 24, 31], [11, 18, 25, 33], [11, 18, 29, 35], [11, 18, 34, 37], [11, 19, 23, 29], [11, 19, 25, 26], [11, 19, 27, 32], [11, 19, 28, 30], [11, 20, 24, 34], [11, 20, 25, 29], [11, 20, 26, 36], [11, 20, 27, 37], [11, 20, 33, 35], [11, 21, 23, 26], [11, 21, 24, 37], [11, 21, 25, 27], [11, 22, 23, 27], [11, 22, 25, 35], [11, 22, 26, 37], [11, 22, 30, 32], [11, 23, 28, 31], [11, 23, 33, 36], [11, 24, 27, 35], [11, 24, 28, 32], [11, 25, 28, 37], [11, 25, 30, 31], [11, 26, 28, 34], [11, 26, 29, 31], [11, 29, 32, 37], [11, 30, 33, 37], [11, 31, 34, 35], [11, 35, 36, 37], [12, 13, 14, 26], [12, 13, 18, 29], [12, 13, 19, 24], [12, 13, 21, 22], [12, 13, 23, 30], [12, 13, 28, 34], [12, 13, 32, 37], [12, 13, 33, 35], [12, 14, 15, 22], [12, 14, 16, 21], [12, 14, 18, 27], [12, 14, 20, 35], [12, 14, 23, 34], [12, 14, 24, 25], [12, 14, 28, 37], [12, 14, 29, 30], [12, 15, 16, 30], [12, 15, 17, 35], [12, 15, 19, 31], [12, 15, 20, 32], [12, 15, 21, 29], [12, 15, 23, 37], [12, 15, 24, 36], [12, 15, 27, 34], [12, 16, 17, 19], [12, 16, 20, 37], [12, 16, 23, 32], [12, 16, 29, 34], [12, 16, 31, 35], [12, 17, 18, 36], [12, 17, 22, 32], [12, 17, 23, 25], [12, 17, 24, 26], [12, 17, 30, 31], [12, 17, 33, 37], [12, 18, 21, 24], [12, 18, 22, 30], [12, 18, 31, 37], [12, 18, 34, 35], [12, 19, 20, 23], [12, 19, 22, 33], [12, 19, 25, 32], [12, 19, 26, 34], [12, 19, 30, 36], [12, 20, 24, 30], [12, 20, 26, 27], [12, 20, 28, 33], [12, 20, 29, 31], [12, 21, 25, 35], [12, 21, 26, 30], [12, 21, 27, 37], [12, 21, 34, 36], [12, 22, 24, 27], [12, 22, 26, 28], [12, 23, 24, 28], [12, 23, 26, 36], [12, 23, 31, 33], [12, 24, 29, 32], [12, 24, 34, 37], [12, 25, 28, 36], [12, 25, 29, 33], [12, 26, 31, 32], [12, 27, 29, 35], [12, 27, 30, 32], [12, 32, 35, 36], [13, 14, 15, 27], [13, 14, 19, 30], [13, 14, 20, 25], [13, 14, 22, 23], [13, 14, 24, 31], [13, 14, 29, 35], [13, 14, 34, 36], [13, 15, 16, 23], [13, 15, 17, 22], [13, 15, 19, 28], [13, 15, 21, 36], [13, 15, 24, 35], [13, 15, 25, 26], [13, 15, 30, 31], [13, 16, 17, 31], [13, 16, 18, 36], [13, 16, 20, 32], [13, 16, 21, 33], [13, 16, 22, 30], [13, 16, 25, 37], [13, 16, 28, 35], [13, 17, 18, 20], [13, 17, 24, 33], [13, 17, 30, 35], [13, 17, 32, 36], [13, 18, 19, 37], [13, 18, 23, 33], [13, 18, 24, 26], [13, 18, 25, 27], [13, 18, 31, 32], [13, 19, 22, 25], [13, 19, 23, 31], [13, 19, 35, 36], [13, 20, 21, 24], [13, 20, 23, 34], [13, 20, 26, 33], [13, 20, 27, 35], [13, 20, 31, 37], [13, 21, 25, 31], [13, 21, 27, 28], [13, 21, 29, 34], [13, 21, 30, 32], [13, 22, 26, 36], [13, 22, 27, 31], [13, 22, 35, 37], [13, 23, 25, 28], [13, 23, 27, 29], [13, 24, 25, 29], [13, 24, 27, 37], [13, 24, 32, 34], [13, 25, 30, 33], [13, 26, 29, 37], [13, 26, 30, 34], [13, 27, 32, 33], [13, 28, 30, 36], [13, 28, 31, 33], [13, 33, 36, 37], [14, 15, 16, 28], [14, 15, 20, 31], [14, 15, 21, 26], [14, 15, 23, 24], [14, 15, 25, 32], [14, 15, 30, 36], [14, 15, 35, 37], [14, 16, 17, 24], [14, 16, 18, 23], [14, 16, 20, 29], [14, 16, 22, 37], [14, 16, 25, 36], [14, 16, 26, 27], [14, 16, 31, 32], [14, 17, 18, 32], [14, 17, 19, 37], [14, 17, 21, 33], [14, 17, 22, 34], [14, 17, 23, 31], [14, 17, 29, 36], [14, 18, 19, 21], [14, 18, 25, 34], [14, 18, 31, 36], [14, 18, 33, 37], [14, 19, 24, 34], [14, 19, 25, 27], [14, 19, 26, 28], [14, 19, 32, 33], [14, 20, 23, 26], [14, 20, 24, 32], [14, 20, 36, 37], [14, 21, 22, 25], [14, 21, 24, 35], [14, 21, 27, 34], [14, 21, 28, 36], [14, 22, 26, 32], [14, 22, 28, 29], [14, 22, 30, 35], [14, 22, 31, 33], [14, 23, 27, 37], [14, 23, 28, 32], [14, 24, 26, 29], [14, 24, 28, 30], [14, 25, 26, 30], [14, 25, 33, 35], [14, 26, 31, 34], [14, 27, 31, 35], [14, 28, 33, 34], [14, 29, 31, 37], [14, 29, 32, 34], [15, 16, 17, 29], [15, 16, 21, 32], [15, 16, 22, 27], [15, 16, 24, 25], [15, 16, 26, 33], [15, 16, 31, 37], [15, 17, 18, 25], [15, 17, 19, 24], [15, 17, 21, 30], [15, 17, 26, 37], [15, 17, 27, 28], [15, 17, 32, 33], [15, 18, 19, 33], [15, 18, 22, 34], [15, 18, 23, 35], [15, 18, 24, 32], [15, 18, 30, 37], [15, 19, 20, 22], [15, 19, 26, 35], [15, 19, 32, 37], [15, 20, 25, 35], [15, 20, 26, 28], [15, 20, 27, 29], [15, 20, 33, 34], [15, 21, 24, 27], [15, 21, 25, 33], [15, 22, 23, 26], [15, 22, 25, 36], [15, 22, 28, 35], [15, 22, 29, 37], [15, 23, 27, 33], [15, 23, 29, 30], [15, 23, 31, 36], [15, 23, 32, 34], [15, 24, 29, 33], [15, 25, 27, 30], [15, 25, 29, 31], [15, 26, 27, 31], [15, 26, 34, 36], [15, 27, 32, 35], [15, 28, 32, 36], [15, 29, 34, 35], [15, 30, 33, 35], [16, 17, 18, 30], [16, 17, 22, 33], [16, 17, 23, 28], [16, 17, 25, 26], [16, 17, 27, 34], [16, 18, 19, 26], [16, 18, 20, 25], [16, 18, 22, 31], [16, 18, 28, 29], [16, 18, 33, 34], [16, 19, 20, 34], [16, 19, 23, 35], [16, 19, 24, 36], [16, 19, 25, 33], [16, 20, 21, 23], [16, 20, 27, 36], [16, 21, 26, 36], [16, 21, 27, 29], [16, 21, 28, 30], [16, 21, 34, 35], [16, 22, 25, 28], [16, 22, 26, 34], [16, 23, 24, 27], [16, 23, 26, 37], [16, 23, 29, 36], [16, 24, 28, 34], [16, 24, 30, 31], [16, 24, 32, 37], [16, 24, 33, 35], [16, 25, 30, 34], [16, 26, 28, 31], [16, 26, 30, 32], [16, 27, 28, 32], [16, 27, 35, 37], [16, 28, 33, 36], [16, 29, 33, 37], [16, 30, 35, 36], [16, 31, 34, 36], [17, 18, 19, 31], [17, 18, 23, 34], [17, 18, 24, 29], [17, 18, 26, 27], [17, 18, 28, 35], [17, 19, 20, 27], [17, 19, 21, 26], [17, 19, 23, 32], [17, 19, 29, 30], [17, 19, 34, 35], [17, 20, 21, 35], [17, 20, 24, 36], [17, 20, 25, 37], [17, 20, 26, 34], [17, 21, 22, 24], [17, 21, 28, 37], [17, 22, 27, 37], [17, 22, 28, 30], [17, 22, 29, 31], [17, 22, 35, 36], [17, 23, 26, 29], [17, 23, 27, 35], [17, 24, 25, 28], [17, 24, 30, 37], [17, 25, 29, 35], [17, 25, 31, 32], [17, 25, 34, 36], [17, 26, 31, 35], [17, 27, 29, 32], [17, 27, 31, 33], [17, 28, 29, 33], [17, 29, 34, 37], [17, 31, 36, 37], [17, 32, 35, 37], [18, 19, 20, 32], [18, 19, 24, 35], [18, 19, 25, 30], [18, 19, 27, 28], [18, 19, 29, 36], [18, 20, 21, 28], [18, 20, 22, 27], [18, 20, 24, 33], [18, 20, 30, 31], [18, 20, 35, 36], [18, 21, 22, 36], [18, 21, 25, 37], [18, 21, 27, 35], [18, 22, 23, 25], [18, 23, 29, 31], [18, 23, 30, 32], [18, 23, 36, 37], [18, 24, 27, 30], [18, 24, 28, 36], [18, 25, 26, 29], [18, 26, 30, 36], [18, 26, 32, 33], [18, 26, 35, 37], [18, 27, 32, 36], [18, 28, 30, 33], [18, 28, 32, 34], [18, 29, 30, 34], [19, 20, 21, 33], [19, 20, 25, 36], [19, 20, 26, 31], [19, 20, 28, 29], [19, 20, 30, 37], [19, 21, 22, 29], [19, 21, 23, 28], [19, 21, 25, 34], [19, 21, 31, 32], [19, 21, 36, 37], [19, 22, 23, 37], [19, 22, 28, 36], [19, 23, 24, 26], [19, 24, 30, 32], [19, 24, 31, 33], [19, 25, 28, 31], [19, 25, 29, 37], [19, 26, 27, 30], [19, 27, 31, 37], [19, 27, 33, 34], [19, 28, 33, 37], [19, 29, 31, 34], [19, 29, 33, 35], [19, 30, 31, 35], [20, 21, 22, 34], [20, 21, 26, 37], [20, 21, 27, 32], [20, 21, 29, 30], [20, 22, 23, 30], [20, 22, 24, 29], [20, 22, 26, 35], [20, 22, 32, 33], [20, 23, 29, 37], [20, 24, 25, 27], [20, 25, 31, 33], [20, 25, 32, 34], [20, 26, 29, 32], [20, 27, 28, 31], [20, 28, 34, 35], [20, 30, 32, 35], [20, 30, 34, 36], [20, 31, 32, 36], [21, 22, 23, 35], [21, 22, 28, 33], [21, 22, 30, 31], [21, 23, 24, 31], [21, 23, 25, 30], [21, 23, 27, 36], [21, 23, 33, 34], [21, 25, 26, 28], [21, 26, 32, 34], [21, 26, 33, 35], [21, 27, 30, 33], [21, 28, 29, 32], [21, 29, 35, 36], [21, 31, 33, 36], [21, 31, 35, 37], [21, 32, 33, 37], [22, 23, 24, 36], [22, 23, 29, 34], [22, 23, 31, 32], [22, 24, 25, 32], [22, 24, 26, 31], [22, 24, 28, 37], [22, 24, 34, 35], [22, 26, 27, 29], [22, 27, 33, 35], [22, 27, 34, 36], [22, 28, 31, 34], [22, 29, 30, 33], [22, 30, 36, 37], [22, 32, 34, 37], [23, 24, 25, 37], [23, 24, 30, 35], [23, 24, 32, 33], [23, 25, 26, 33], [23, 25, 27, 32], [23, 25, 35, 36], [23, 27, 28, 30], [23, 28, 34, 36], [23, 28, 35, 37], [23, 29, 32, 35], [23, 30, 31, 34], [24, 25, 31, 36], [24, 25, 33, 34], [24, 26, 27, 34], [24, 26, 28, 33], [24, 26, 36, 37], [24, 28, 29, 31], [24, 29, 35, 37], [24, 30, 33, 36], [24, 31, 32, 35], [25, 26, 32, 37], [25, 26, 34, 35], [25, 27, 28, 35], [25, 27, 29, 34], [25, 29, 30, 32], [25, 31, 34, 37], [25, 32, 33, 36], [26, 27, 35, 36], [26, 28, 29, 36], [26, 28, 30, 35], [26, 30, 31, 33], [26, 33, 34, 37], [27, 28, 36, 37], [27, 29, 30, 37], [27, 29, 31, 36], [27, 31, 32, 34], [28, 30, 32, 37], [28, 32, 33, 35], [29, 33, 34, 36], [30, 34, 35, 37]] |
def gauss_spline(x, n):
x = asarray(x)
signsq = ((n + 1) / 12.0)
return ((1 / sqrt(((2 * pi) * signsq))) * exp((((- (x ** 2)) / 2) / signsq))) |
class TDErrorEvaluator(EvaluatorProtocol):
_episodes: Optional[Sequence[EpisodeBase]]
def __init__(self, episodes: Optional[Sequence[EpisodeBase]]=None):
self._episodes = episodes
def __call__(self, algo: QLearningAlgoProtocol, dataset: ReplayBuffer) -> float:
total_errors = []
episodes = (self._episodes if self._episodes else dataset.episodes)
for episode in episodes:
for batch in make_batches(episode, WINDOW_SIZE, dataset.transition_picker):
values = algo.predict_value(batch.observations, batch.actions)
next_actions = algo.predict(batch.next_observations)
next_values = algo.predict_value(batch.next_observations, next_actions)
mask = (1.0 - batch.terminals).reshape((- 1))
rewards = np.asarray(batch.rewards).reshape((- 1))
if algo.reward_scaler:
rewards = algo.reward_scaler.transform_numpy(rewards)
y = (rewards + ((algo.gamma * next_values) * mask))
total_errors += ((values - y) ** 2).tolist()
return float(np.mean(total_errors)) |
class Graph():
def __init__(self, labeling_mode='spatial'):
self.A = self.get_adjacency_matrix(labeling_mode)
self.num_node = num_node
self.self_link = self_link
self.inward = inward
self.outward = outward
self.neighbor = neighbor
def get_adjacency_matrix(self, labeling_mode=None):
if (labeling_mode is None):
return self.A
if (labeling_mode == 'spatial'):
A = tools.get_spatial_graph(num_node, self_link, inward, outward)
else:
raise ValueError()
return A |
def build_sem_seg_train_aug(cfg):
augs = [T.ResizeShortestEdge(cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING)]
if cfg.INPUT.CROP.ENABLED:
augs.append(T.RandomCrop_CategoryAreaConstraint(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE, cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA, cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE))
augs.append(T.RandomFlip())
return augs |
class TestABC(object):
def test_abstract(self):
assert_(issubclass(np.number, numbers.Number))
assert_(issubclass(np.inexact, numbers.Complex))
assert_(issubclass(np.complexfloating, numbers.Complex))
assert_(issubclass(np.floating, numbers.Real))
assert_(issubclass(np.integer, numbers.Integral))
assert_(issubclass(np.signedinteger, numbers.Integral))
assert_(issubclass(np.unsignedinteger, numbers.Integral))
def test_floats(self):
for t in sctypes['float']:
assert_(isinstance(t(), numbers.Real), '{0} is not instance of Real'.format(t.__name__))
assert_(issubclass(t, numbers.Real), '{0} is not subclass of Real'.format(t.__name__))
assert_((not isinstance(t(), numbers.Rational)), '{0} is instance of Rational'.format(t.__name__))
assert_((not issubclass(t, numbers.Rational)), '{0} is subclass of Rational'.format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
assert_(isinstance(t(), numbers.Complex), '{0} is not instance of Complex'.format(t.__name__))
assert_(issubclass(t, numbers.Complex), '{0} is not subclass of Complex'.format(t.__name__))
assert_((not isinstance(t(), numbers.Real)), '{0} is instance of Real'.format(t.__name__))
assert_((not issubclass(t, numbers.Real)), '{0} is subclass of Real'.format(t.__name__))
def test_int(self):
for t in sctypes['int']:
assert_(isinstance(t(), numbers.Integral), '{0} is not instance of Integral'.format(t.__name__))
assert_(issubclass(t, numbers.Integral), '{0} is not subclass of Integral'.format(t.__name__))
def test_uint(self):
for t in sctypes['uint']:
assert_(isinstance(t(), numbers.Integral), '{0} is not instance of Integral'.format(t.__name__))
assert_(issubclass(t, numbers.Integral), '{0} is not subclass of Integral'.format(t.__name__)) |
def test_compute_hmean():
with pytest.raises(AssertionError):
utils.compute_hmean(0, 0, 0.0, 0)
with pytest.raises(AssertionError):
utils.compute_hmean(0, 0, 0, 0.0)
with pytest.raises(AssertionError):
utils.compute_hmean([1], 0, 0, 0)
with pytest.raises(AssertionError):
utils.compute_hmean(0, [1], 0, 0)
(_, _, hmean) = utils.compute_hmean(2, 2, 2, 2)
assert (hmean == 1)
(_, _, hmean) = utils.compute_hmean(0, 0, 2, 2)
assert (hmean == 0) |
_model
def ese_vovnet39b_evos(pretrained=False, **kwargs):
def norm_act_fn(num_features, **kwargs):
return create_norm_act('EvoNormSample', num_features, jit=False, **kwargs)
return _vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs) |
class MujocoReplayBuffer(EnvReplayBuffer):
def __init__(self, max_replay_buffer_size, env, env_info_sizes=None):
super().__init__(max_replay_buffer_size=max_replay_buffer_size, env=env, env_info_sizes=env_info_sizes)
self.body_xpos_shape = env.sim.data.body_xpos.shape
self._body_xpos = np.zeros((max_replay_buffer_size, *self.body_xpos_shape))
self.qpos_shape = env.sim.data.qpos.shape
self._qpos = np.zeros((max_replay_buffer_size, *self.qpos_shape))
self.env_states = []
def add_sample(self, observation, action, reward, terminal, next_observation, **kwargs):
self._body_xpos[self._top] = self.env.sim.data.body_xpos
self._qpos[self._top] = self.env.sim.data.qpos
if (len(self.env_states) >= self.max_replay_buffer_size()):
self.env_states[self._top] = self.env.sim.get_state()
else:
self.env_states.append(copy.deepcopy(self.env.sim.get_state()))
return super().add_sample(observation=observation, action=action, reward=reward, next_observation=next_observation, terminal=terminal, **kwargs)
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(dict(body_xpos=self._body_xpos[:self._size], qpos=self._qpos[:self._size], env_states=self.env_states[:self._size]))
return snapshot
def visualize_agent(self, start_idx, end_idx):
visualize_mujoco_from_states(self.env, self.env_states[start_idx:end_idx])
def reset(self):
super().reset()
self._body_xpos = np.zeros_like(self._body_xpos)
self._qpos = np.zeros_like(self._qpos)
self.env_states = [] |
def clean_line(text):
text = text.replace('[', '')
text = text.replace(']', '')
text = text.replace("'", '')
text = text.replace('\n', '')
text = text.strip()
return text |
class FPN(nn.Module):
def __init__(self, **kwargs):
super(FPN, self).__init__()
dim_in = kwargs.pop('dim_in', [256, 512, 1024, 2048])
spatial_scale = kwargs.pop('spatial_scale', [(1 / 4), (1 / 8), (1 / 16), (1 / 32)])
keep_backbone = kwargs.pop('keep_backbone', False)
fpn_dim = kwargs.pop('fpn_dim', 256)
use_c5 = kwargs.pop('use_c5', True)
m_only = kwargs.pop('m_only', False)
norm = kwargs.pop('norm', '')
min_level = kwargs.pop('min_level', 2)
max_level = kwargs.pop('max_level', 6)
lowest_bk_lvl = kwargs.pop('lowest_bk_lvl', 2)
highest_bk_lvl = kwargs.pop('highest_bk_lvl', 5)
extra_conv = kwargs.pop('extra_conv', False)
self.dim_in = dim_in[(- 1)]
self.spatial_scale = spatial_scale
self.keep_backbone = keep_backbone
self.use_c5 = use_c5
self.m_only = m_only
self.max_level = max_level
self.lowest_bk_lvl = lowest_bk_lvl
self.highest_bk_lvl = highest_bk_lvl
self.extra_conv = extra_conv
self.num_backbone_stages = (len(dim_in) - (min_level - lowest_bk_lvl))
output_levels = ((highest_bk_lvl - lowest_bk_lvl) + 1)
self.spatial_scale = self.spatial_scale[:output_levels]
self.dim_out = [self.dim_in for _ in range(output_levels)]
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, a=1)
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim_in, fpn_dim, norm, act=''):
self.p5_in = make_conv(self.dim_in, fpn_dim, kernel_size=1, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act))
self.p5_out = make_conv(fpn_dim, fpn_dim, kernel_size=3, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act))
self.fpn_in = []
self.fpn_out = []
for i in range((self.num_backbone_stages - 1)):
px_in = make_conv(dim_in[((- i) - 2)], fpn_dim, kernel_size=1, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act))
px_out = make_conv(fpn_dim, fpn_dim, kernel_size=3, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act))
self.fpn_in.append(px_in)
self.fpn_out.append(px_out)
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = nn.ModuleList(self.fpn_out)
self.dim_in = fpn_dim
if ((not self.extra_conv) and (self.max_level == (self.highest_bk_lvl + 1))):
self.maxpool_p6 = nn.MaxPool2d(kernel_size=1, stride=2, padding=0)
self.spatial_scale.append((self.spatial_scale[(- 1)] * 0.5))
if (self.extra_conv and (self.max_level > self.highest_bk_lvl)):
self.extra_pyramid_modules = nn.ModuleList()
if self.use_c5:
self.dim_in = dim_in[(- 1)]
for i in range((self.highest_bk_lvl + 1), (self.max_level + 1)):
self.extra_pyramid_modules.append(make_conv(self.dim_in, fpn_dim, kernel_size=3, stride=2, norm=make_norm(fpn_dim, norm=norm), act=make_act(act=act)))
self.dim_in = fpn_dim
self.spatial_scale.append((self.spatial_scale[(- 1)] * 0.5))
def forward(self, x):
c5_out = x[(- 1)]
px = self.p5_in(c5_out)
fpn_output_blobs = ([self.p5_out(px)] if (not self.m_only) else [px])
for i in range((self.num_backbone_stages - 1)):
cx_out = x[((- i) - 2)]
cx_out = self.fpn_in[i](cx_out)
if (cx_out.size()[2:] != px.size()[2:]):
px = F.interpolate(px, scale_factor=2, mode='nearest')
px = (cx_out + px)
if self.m_only:
fpn_output_blobs.insert(0, px)
else:
fpn_output_blobs.insert(0, self.fpn_out[i](px))
if hasattr(self, 'maxpool_p6'):
fpn_output_blobs.append(self.maxpool_p6(fpn_output_blobs[(- 1)]))
if hasattr(self, 'extra_pyramid_modules'):
if self.use_c5:
p6_in = c5_out
else:
p6_in = fpn_output_blobs[(- 1)]
fpn_output_blobs.append(self.extra_pyramid_modules[0](p6_in))
for module in self.extra_pyramid_modules[1:]:
fpn_output_blobs.append(module(F.relu(fpn_output_blobs[(- 1)])))
if self.keep_backbone:
fpn_output_blobs.append(x)
return fpn_output_blobs |
def convert_to_coco_gt(data, outpath, caption_key, sample_id_key, split, load_gt_from_file=False, img_ids=[]):
gt_data = {'annotations': [], 'images': []}
if load_gt_from_file:
print(f'Generating ground truth file for evaluation from {load_gt_from_file}....')
data = load_gt_file(load_gt_from_file)
for ann in data:
captions = ann[caption_key]
img_id = (int(ann[sample_id_key]) if is_convertible_to_int(ann[sample_id_key]) else ann[sample_id_key])
if (img_ids and (img_id not in img_ids)):
continue
gt_data['images'].append({'id': img_id})
if isinstance(captions, str):
gt_data['annotations'].append({'image_id': img_id, 'caption': captions, 'id': img_id})
else:
gt_data['annotations'].extend([{'image_id': img_id, 'caption': c, 'id': img_id} for c in captions])
else:
print(f'Generating ground truth file for evaluation....')
for (i, ann) in tqdm(enumerate(data[split])):
captions = data[split].annotation[i][caption_key]
img_id = (int(ann[sample_id_key]) if is_convertible_to_int(ann[sample_id_key]) else ann[sample_id_key])
if (img_ids and (img_id not in img_ids)):
continue
gt_data['images'].append({'id': img_id})
if isinstance(captions, str):
gt_data['annotations'].append({'image_id': img_id, 'caption': captions, 'id': img_id})
else:
gt_data['annotations'].extend([{'image_id': img_id, 'caption': c, 'id': img_id} for c in captions])
json.dump(gt_data, open(outpath, 'w'))
print(f'Saved annotations at {outpath}') |
class TestPredict(unittest.TestCase):
def test_predict(self) -> None:
test_audio_path = (RESOURCES_PATH / 'vocadito_10.wav')
(model_output, midi_data, note_events) = inference.predict(test_audio_path, ICASSP_2022_MODEL_PATH)
assert (set(model_output.keys()) == set(['note', 'onset', 'contour']))
assert (model_output['note'].shape == model_output['onset'].shape)
assert isinstance(midi_data, pretty_midi.PrettyMIDI)
lowest_supported_midi = 21
note_pitch_min = [(n[2] >= lowest_supported_midi) for n in note_events]
note_pitch_max = [(n[2] <= (lowest_supported_midi + ANNOTATIONS_N_SEMITONES)) for n in note_events]
assert all(note_pitch_min)
assert all(note_pitch_max)
assert isinstance(note_events, list)
def test_predict_with_saves(self) -> None:
test_audio_path = (RESOURCES_PATH / 'vocadito_10.wav')
with tempfile.TemporaryDirectory() as tmpdir:
inference.predict_and_save([test_audio_path], tmpdir, True, True, True, True)
expected_midi_path = (tmpdir / pathlib.Path('vocadito_10_basic_pitch.mid'))
expected_csv_path = (tmpdir / pathlib.Path('vocadito_10_basic_pitch.csv'))
expected_npz_path = (tmpdir / pathlib.Path('vocadito_10_basic_pitch.npz'))
expected_sonif_path = (tmpdir / pathlib.Path('vocadito_10_basic_pitch.wav'))
for output_path in [expected_midi_path, expected_csv_path, expected_npz_path, expected_sonif_path]:
assert os.path.exists(output_path)
def test_predict_onset_threshold(self) -> None:
test_audio_path = (RESOURCES_PATH / 'vocadito_10.wav')
for onset_threshold in [0, 0.3, 0.8, 1]:
inference.predict(test_audio_path, ICASSP_2022_MODEL_PATH, onset_threshold=onset_threshold)
def test_predict_frame_threshold(self) -> None:
test_audio_path = (RESOURCES_PATH / 'vocadito_10.wav')
for frame_threshold in [0, 0.3, 0.8, 1]:
inference.predict(test_audio_path, ICASSP_2022_MODEL_PATH, frame_threshold=frame_threshold)
def test_predict_min_note_length(self) -> None:
test_audio_path = (RESOURCES_PATH / 'vocadito_10.wav')
for minimum_note_length in [10, 100, 1000]:
(_, _, note_events) = inference.predict(test_audio_path, ICASSP_2022_MODEL_PATH, minimum_note_length=minimum_note_length)
min_len_s = (minimum_note_length / 1000.0)
note_lengths = [((n[1] - n[0]) >= min_len_s) for n in note_events]
assert all(note_lengths)
def test_predict_min_freq(self) -> None:
test_audio_path = (RESOURCES_PATH / 'vocadito_10.wav')
for minimum_frequency in [40, 80, 200, 2000]:
(_, _, note_events) = inference.predict(test_audio_path, ICASSP_2022_MODEL_PATH, minimum_frequency=minimum_frequency)
min_freq_midi = np.round(librosa.hz_to_midi(minimum_frequency))
note_pitch = [(n[2] >= min_freq_midi) for n in note_events]
assert all(note_pitch)
def test_predict_max_freq(self) -> None:
test_audio_path = (RESOURCES_PATH / 'vocadito_10.wav')
for maximum_frequency in [40, 80, 200, 2000]:
(_, _, note_events) = inference.predict(test_audio_path, ICASSP_2022_MODEL_PATH, maximum_frequency=maximum_frequency)
max_freq_midi = np.round(librosa.hz_to_midi(maximum_frequency))
note_pitch = [(n[2] <= max_freq_midi) for n in note_events]
assert all(note_pitch) |
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x, H, W):
x = self.fc1(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x |
class SawyerDoorUnlockEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, (- 0.15))
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.1), 0.8, 0.15)
obj_high = (0.1, 0.85, 0.15)
goal_low = (0.0, 0.64, 0.21)
goal_high = (0.2, 0.7, 0.2111)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.85, 0.15]), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)}
self.goal = np.array([0, 0.85, 0.1])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self.max_path_length = 150
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_door_lock.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, reachDist, pullDist) = self.compute_reward(action, ob)
self.curr_path_length += 1
info = {'reachDist': reachDist, 'goalDist': pullDist, 'epRew': reward, 'pickRew': None, 'success': float((pullDist <= 0.05))}
return (ob, reward, False, info)
def _target_site_config(self):
return [('goal_unlock', self._target_pos), ('goal_lock', np.array([10.0, 10.0, 10.0]))]
def _get_pos_objects(self):
return self._get_site_pos('lockStartUnlock')
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
def reset_model(self):
self._reset_hand()
door_pos = self.init_config['obj_init_pos']
if self.random_init:
door_pos = self._get_state_rand_vec()
self.sim.model.body_pos[self.model.body_name2id('door')] = door_pos
self._set_obj_xyz(1.5708)
self.obj_init_pos = self.get_body_com('lock_link')
self._target_pos = (self.obj_init_pos + np.array([0.1, (- 0.04), 0.0]))
self.maxPullDist = np.linalg.norm((self._target_pos - self.obj_init_pos))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.reachCompleted = False
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
pullDist = np.linalg.norm((objPos - pullGoal))
reachDist = np.linalg.norm((objPos - fingerCOM))
reachRew = (- reachDist)
self.reachCompleted = (reachDist < 0.05)
def pullReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
if self.reachCompleted:
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
return pullRew
else:
return 0
pullRew = pullReward()
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
def prepare_dataset():
((train_images, train_labels), (test_images, test_labels)) = load_cifar10()
dirpath = os.path.join(FLAGS.data_dir, ('seed' + str(FLAGS.dataset_seed)))
if (not os.path.exists(dirpath)):
os.makedirs(dirpath)
rng = np.random.RandomState(FLAGS.dataset_seed)
rand_ix = rng.permutation(NUM_EXAMPLES_TRAIN)
(_train_images, _train_labels) = (train_images[rand_ix], train_labels[rand_ix])
examples_per_class = int((FLAGS.num_labeled_examples / 10))
labeled_train_images = np.zeros((FLAGS.num_labeled_examples, 3072), dtype=np.float32)
labeled_train_labels = np.zeros(FLAGS.num_labeled_examples, dtype=np.int64)
for i in xrange(10):
ind = np.where((_train_labels == i))[0]
labeled_train_images[(i * examples_per_class):((i + 1) * examples_per_class)] = _train_images[ind[0:examples_per_class]]
labeled_train_labels[(i * examples_per_class):((i + 1) * examples_per_class)] = _train_labels[ind[0:examples_per_class]]
_train_images = np.delete(_train_images, ind[0:examples_per_class], 0)
_train_labels = np.delete(_train_labels, ind[0:examples_per_class])
rand_ix_labeled = rng.permutation(FLAGS.num_labeled_examples)
(labeled_train_images, labeled_train_labels) = (labeled_train_images[rand_ix_labeled], labeled_train_labels[rand_ix_labeled])
convert_images_and_labels(labeled_train_images, labeled_train_labels, os.path.join(dirpath, 'cifar10_semisup_labeled_train.tfrecords'))
convert_images_and_labels(train_images, train_labels, os.path.join(dirpath, 'cifar10_semisup_unlabeled_train.tfrecords'))
convert_images_and_labels(test_images, test_labels, os.path.join(dirpath, 'cifar10_semisup_test.tfrecords')) |
.parametrize('ctx, fname', ctxs)
.parametrize('reverse', [False, True])
def test_equal_values(ctx, fname, reverse):
with nn.context_scope(ctx), nn.auto_forward(True):
x = nn.Variable.from_numpy_array([2, 3, 3, 4, 2])
(y, i) = F.sort(x, reverse=reverse, with_index=True)
assert all((y.d == ([4, 3, 3, 2, 2] if reverse else [2, 2, 3, 3, 4])))
assert all((i.d == ([3, 1, 2, 0, 4] if reverse else [0, 4, 1, 2, 3]))) |
def generate_model_with_data_frame(data_frame, variables, variable_type, result_value_name, objective, direction, constraints):
direction = direction.lower()
if (direction == 'maximize'):
direction = pyomo_env.maximize
elif (direction == 'minimize'):
direction = pyomo_env.minimize
else:
raise ValueError("direction must be one of 'maximize' or 'minimize'")
if (not hasattr(pyomo_env, variable_type)):
raise ValueError(('cannot find variable type %s' % variable_type))
variable_type = getattr(pyomo_env, variable_type)
model = pyomo_env.ConcreteModel()
var_num = len(data_frame)
model.x = pyomo_env.Var(list(range(var_num)), within=variable_type)
columns = data_frame.columns
variable_str = 'model.x'
data_str = 'DATA_FRAME'
(obj_expr, c_exprs) = generate_objective_and_constraint_expr(columns=columns, objective=objective, constraints=constraints, variables=variables, result_value_name=result_value_name, variable_str=variable_str, data_str=data_str)
DATA_FRAME_LOCK.acquire()
try:
global DATA_FRAME
DATA_FRAME = data_frame
obj_func = eval(('lambda model: %s' % obj_expr))
model.objective = pyomo_env.Objective(rule=obj_func, sense=direction)
for (i, (expr, for_range, iter_vars)) in enumerate(c_exprs):
attr_name = ('constraint_%d' % i)
if for_range:
assert iter_vars, 'for_range and iter_vars must be both non-empty'
setattr(model, attr_name, pyomo_env.ConstraintList())
constraint_list = getattr(model, attr_name)
template = 'lambda model, constraint_list: [constraint_list.add(%s) for %s in %s]'
add_constraint_str = (template % (expr, ','.join(iter_vars), for_range))
eval(add_constraint_str)(model, constraint_list)
else:
assert (not iter_vars), 'for_range and iter_vars must be both empty'
func = eval(('lambda model: %s' % expr))
constraint = pyomo_env.Constraint(rule=func)
setattr(model, attr_name, constraint)
finally:
DATA_FRAME = None
DATA_FRAME_LOCK.release()
return model |
def add_deeplab_outputs(model, blob_in, dim):
blob_out = model.net.Sum(blob_in, ['mask_fc8'])
if (not model.train):
pass
if cfg.WSL.MASK_SOFTMAX:
model.Transpose('mask_fc8', 'mask_fc8_t', axes=(0, 2, 3, 1))
model.Softmax('mask_fc8_t', 'mask_fc8_probs_t', axis=3)
model.Transpose('mask_fc8_probs_t', 'mask_fc8_probs', axes=(0, 3, 1, 2))
else:
model.net.Sigmoid('mask_fc8', 'mask_fc8_sigmoid')
model.net.ReduceMax('mask_fc8_sigmoid', 'mask_fc8_fg', axes=[1], keepdims=True)
model.net.ConstantFill('mask_fc8_fg', 'mask_fc8_one', value=1.0)
model.net.Sub(['mask_fc8_one', 'mask_fc8_fg'], 'mask_fc8_bg')
model.net.Concat(['mask_fc8_bg', 'mask_fc8_sigmoid'], ['mask_fc8_bgfg', 'mask_fc8_bgfg_split_info'], axis=1)
model.Transpose('mask_fc8_bgfg', 'mask_fc8_bgfg_t', axes=(0, 2, 3, 1))
model.Softmax('mask_fc8_bgfg_t', 'mask_fc8_probs_t', axis=3)
model.Transpose('mask_fc8_probs_t', 'mask_fc8_probs', axes=(0, 3, 1, 2))
model.net.Log('mask_fc8_probs', 'mask_fc8_log')
model.net.Scale('mask_fc8_log', 'mask_fc8_unary', scale=(- 1.0))
model.net.UpsampleBilinearWSL(['data', 'mask_fc8_unary'], 'mask_fc8_data')
crf_args = {}
model.net.DenseCRF(['mask_fc8_unary', 'mask_fc8_data'], 'mask_fc8_crf', **crf_args)
return blob_out |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.