code
stringlengths
281
23.7M
class ConfigTest(unittest.TestCase): def setUp(self): os.environ['DESTALINATOR_STRING_VARIABLE'] = 'test' os.environ['DESTALINATOR_LIST_VARIABLE'] = 'test,' def test_environment_variable_configs(self): self.assertEqual(get_config().string_variable, 'test') self.assertListEqual(get_config().list_variable, ['test'])
.parametrize(['ops', 'state'], [pytest.param(PZ, basis(2, 0), id='PZ_ket1'), pytest.param(PZ, basis(2, 1), id='PZ_ket2'), pytest.param(PZ, ket2dm(basis(2, 0)), id='PZ_dm1'), pytest.param(PZ, ket2dm(basis(2, 1)), id='PZ_dm2'), pytest.param(PZ_ket, basis(2, 0), id='PZket_ket1'), pytest.param(PZ_ket, basis(2, 1), id='PZket_ket2'), pytest.param(PZ_ket, ket2dm(basis(2, 0)), id='PZket_dm1'), pytest.param(PZ_ket, ket2dm(basis(2, 1)), id='PZket_dm2'), pytest.param(PX, basis(2, 0), id='PX_ket1'), pytest.param(PX, basis(2, 1), id='PX_ket2'), pytest.param(PX, ket2dm(basis(2, 0)), id='PX_dm'), pytest.param(PY, basis(2, 0), id='PY_ket1'), pytest.param(PY, basis(2, 1), id='PY_ket2'), pytest.param(PY, ket2dm(basis(2, 1)), id='PY_dm')]) def test_measure(ops, state): (collapsed_states, _) = measurement_statistics_povm(state, ops) for _ in range(10): (index, final_state) = measure_povm(state, ops) assert (final_state == collapsed_states[index])
class SEResNetBottleneck(Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((planes * 4)) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule((planes * 4), reduction=reduction) self.downsample = downsample self.stride = stride
def get_seq_without_gaps_at_index(seq, position): start_idx = bisect.bisect_left(seq, position) forward_gap = get_index_of_gap_in_sorted_integer_seq_forward(seq, start_idx) reverse_gap = get_index_of_gap_in_sorted_integer_seq_reverse(seq, start_idx) if (forward_gap is not None): seq[:] = seq[:forward_gap] if (reverse_gap is not None): seq[:] = seq[reverse_gap:] return seq
def test_tsm_optimizer_constructor(): model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict(fc_lr5=True) optim_constructor_cfg = dict(type='TSMOptimizerConstructor', optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg) optim_constructor = build_optimizer_constructor(optim_constructor_cfg) optimizer = optim_constructor(model) check_tsm_optimizer(optimizer, model, **paramwise_cfg) paramwise_cfg = dict(fc_lr5=False) optim_constructor_cfg = dict(type='TSMOptimizerConstructor', optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg) optim_constructor = build_optimizer_constructor(optim_constructor_cfg) optimizer = optim_constructor(model) check_tsm_optimizer(optimizer, model, **paramwise_cfg)
def test_rotate_bounds_bottomright(view, item): item.SELECT_RESIZE_SIZE = 10 item.SELECT_ROTATE_SIZE = 10 path = item.get_rotate_bounds(QtCore.QPointF(100, 80)) assert (path.boundingRect().topLeft().x() == 95) assert (path.boundingRect().topLeft().y() == 75) assert (path.boundingRect().bottomRight().x() == 115) assert (path.boundingRect().bottomRight().y() == 95) assert (path.contains(QtCore.QPointF(104, 84)) is False)
class ProxiesOnDevice(Proxies): def __init__(self) -> None: super().__init__() self.proxy_id_to_dev_mems: Dict[(int, Set[DeviceMemoryId])] = {} self.dev_mem_to_proxy_ids: DefaultDict[(DeviceMemoryId, Set[int])] = defaultdict(set) def mem_usage_add(self, proxy: ProxyObject) -> None: proxy_id = id(proxy) assert (proxy_id not in self.proxy_id_to_dev_mems) self.proxy_id_to_dev_mems[proxy_id] = set() for dev_mem in get_device_memory_ids(proxy._pxy_get().obj): self.proxy_id_to_dev_mems[proxy_id].add(dev_mem) ps = self.dev_mem_to_proxy_ids[dev_mem] if (len(ps) == 0): self._mem_usage += dev_mem.nbytes ps.add(proxy_id) def mem_usage_remove(self, proxy: ProxyObject) -> None: proxy_id = id(proxy) for dev_mem in self.proxy_id_to_dev_mems.pop(proxy_id): self.dev_mem_to_proxy_ids[dev_mem].remove(proxy_id) if (len(self.dev_mem_to_proxy_ids[dev_mem]) == 0): del self.dev_mem_to_proxy_ids[dev_mem] self._mem_usage -= dev_mem.nbytes def buffer_info(self) -> List[Tuple[(float, int, List[ProxyObject])]]: ret = [] for (dev_mem, proxy_ids) in self.dev_mem_to_proxy_ids.items(): proxies = self.get_proxies_by_ids(proxy_ids) last_access = max((p._pxy_get().last_access for p in proxies)) ret.append((last_access, dev_mem.nbytes, proxies)) return ret
def canonicalized_query_string(params): description_items = [] encoded_params = clean_params_dict(params, urlencode=True) for item in sorted(encoded_params.keys()): encoded_val = encoded_params[item] description_items.append(f'{item}={encoded_val}') return '&'.join(description_items)
def train(model, training_data, validation_data, optimizer, scheduler, pred_loss_func, opt): valid_event_losses = [] valid_pred_losses = [] valid_rmse = [] best_event_ll = (- 999999) for epoch_i in range(opt.epoch): epoch = (epoch_i + 1) log_path = opt.log_path with open(os.path.join(log_path, opt.log), 'a') as f: print('[ Epoch', epoch, ']') start = time.time() (train_event, train_type, train_time) = train_epoch(model, training_data, optimizer, pred_loss_func, opt) f.write(' - (Training) loglikelihood: {ll: 8.5f}, accuracy: {type: 8.5f}, RMSE: {rmse: 8.5f}, elapse: {elapse:3.3f} min\n'.format(ll=train_event, type=train_type, rmse=train_time, elapse=((time.time() - start) / 60))) print(' - (Training) loglikelihood: {ll: 8.5f}, accuracy: {type: 8.5f}, RMSE: {rmse: 8.5f}, elapse: {elapse:3.3f} min'.format(ll=train_event, type=train_type, rmse=train_time, elapse=((time.time() - start) / 60))) start = time.time() (valid_event, valid_type, valid_time) = eval_epoch(model, validation_data, pred_loss_func, opt) f.write(' - (Testing) loglikelihood: {ll: 8.5f}, accuracy: {type: 8.5f}, RMSE: {rmse: 8.5f}, elapse: {elapse:3.3f} min\n'.format(ll=valid_event, type=valid_type, rmse=valid_time, elapse=((time.time() - start) / 60))) print(' - (Testing) loglikelihood: {ll: 8.5f}, accuracy: {type: 8.5f}, RMSE: {rmse: 8.5f}, elapse: {elapse:3.3f} min'.format(ll=valid_event, type=valid_type, rmse=valid_time, elapse=((time.time() - start) / 60))) if (valid_event > best_event_ll): best_event_ll = valid_event torch.save(model.state_dict(), os.path.join(log_path, 'model.pt')) f.write('---(Best model save)----\n') print('---(Best model save)----') valid_event_losses += [valid_event] valid_pred_losses += [valid_type] valid_rmse += [valid_time] f.write(' - [Info] [Valid] Maximum ll: {event: 8.5f}, Maximum accuracy: {pred: 8.5f}, Minimum RMSE: {rmse: 8.5f}\n'.format(event=max(valid_event_losses), pred=max(valid_pred_losses), rmse=min(valid_rmse))) print(' - [Info] [Valid] Maximum ll: {event: 8.5f}, Maximum accuracy: {pred: 8.5f}, Minimum RMSE: {rmse: 8.5f}'.format(event=max(valid_event_losses), pred=max(valid_pred_losses), rmse=min(valid_rmse))) f.write('-[Valid][Current] {epoch}, {ll: 8.5f}, {acc: 8.5f}, {rmse: 8.5f}\n'.format(epoch=epoch, ll=valid_event, acc=valid_type, rmse=valid_time)) print('-[Valid][Current] {epoch}, {ll: 8.5f}, {acc: 8.5f}, {rmse: 8.5f}'.format(epoch=epoch, ll=valid_event, acc=valid_type, rmse=valid_time)) scheduler.step()
def unevaluatedProperties_draft2019(validator, uP, instance, schema): if (not validator.is_type(instance, 'object')): return evaluated_keys = find_evaluated_property_keys_by_schema(validator, instance, schema) unevaluated_keys = [] for property in instance: if (property not in evaluated_keys): for _ in validator.descend(instance[property], uP, path=property, schema_path=property): unevaluated_keys.append(property) if unevaluated_keys: if (uP is False): error = 'Unevaluated properties are not allowed (%s %s unexpected)' extras = sorted(unevaluated_keys, key=str) (yield ValidationError((error % _utils.extras_msg(extras)))) else: error = 'Unevaluated properties are not valid under the given schema (%s %s unevaluated and invalid)' (yield ValidationError((error % _utils.extras_msg(unevaluated_keys))))
class TestAssert_reprcompare_namedtuple(): def test_namedtuple(self) -> None: NT = collections.namedtuple('NT', ['a', 'b']) left = NT(1, 'b') right = NT(1, 'c') lines = callequal(left, right) assert (lines == ["NT(a=1, b='b') == NT(a=1, b='c')", '', 'Omitting 1 identical items, use -vv to show', 'Differing attributes:', "['b']", '', 'Drill down into differing attribute b:', " b: 'b' != 'c'", ' - c', ' + b', 'Use -v to get more diff']) def test_comparing_two_different_namedtuple(self) -> None: NT1 = collections.namedtuple('NT1', ['a', 'b']) NT2 = collections.namedtuple('NT2', ['a', 'b']) left = NT1(1, 'b') right = NT2(2, 'b') lines = callequal(left, right) assert (lines == ["NT1(a=1, b='b') == NT2(a=2, b='b')", '', 'At index 0 diff: 1 != 2', 'Use -v to get more diff'])
def check_repo_quality(): print('Checking all models are included.') check_model_list() print('Checking all models are public.') check_models_are_in_init() print('Checking all models are properly tested.') check_all_decorator_order() check_all_models_are_tested() print('Checking all objects are properly documented.') check_all_objects_are_documented() print('Checking all models are in at least one auto class.') check_all_models_are_auto_configured() print('Checking all names in auto name mappings are defined.') check_all_auto_object_names_being_defined() print('Checking all keys in auto name mappings are defined in `CONFIG_MAPPING_NAMES`.') check_all_auto_mapping_names_in_config_mapping_names()
class CallableObject(object): __slots__ = ['_ob', '_func'] def __init__(self, c): if (not hasattr(c, '__call__')): raise ValueError('Error: given callback is not callable.') if hasattr(c, '__self__'): self._ob = weakref.ref(c.__self__) self._func = c.__func__.__name__ elif hasattr(c, 'im_self'): self._ob = weakref.ref(c.im_self) self._func = c.im_func.__name__ else: self._func = c self._ob = None def isdead(self): if self._ob: return (self._ob() is None) else: return False def compare(self, other): if (self._ob and other._ob): return ((self._ob() is other._ob()) and (self._func == other._func)) elif (not (self._ob or other._ob)): return (self._func == other._func) else: return False def __str__(self): return self._func.__str__() def call(self, *args, **kwargs): if self.isdead(): return try: if self._ob: func = getattr(self._ob(), self._func) else: func = self._func except Exception: return try: return func(*args, **kwargs) except Exception: funcname = self._func if hasattr(funcname, '__name__'): funcname = funcname.__name__ print(('Exception while handling event (in %s):' % (funcname,))) print(getErrorMsg())
class BoxHead(object): def __init__(self, cfgs): self.cfgs = cfgs def fpn_fc_head(self, roi_extractor, rois_list, feature_pyramid, img_shape, is_training, mode=0): with tf.variable_scope('Fast-RCNN'): with tf.variable_scope('rois_pooling'): roi_features_list = [] for (level_name, rois) in zip(self.cfgs.LEVEL, rois_list): if (mode == 1): rois = tf.py_func(forward_convert, inp=[rois, False], Tout=tf.float32) rois = get_horizen_minAreaRectangle(rois, False) roi_features = roi_extractor.roi_align(feature_maps=feature_pyramid[level_name], rois=rois, img_shape=img_shape, scope=level_name) roi_features_list.append(roi_features) all_roi_features = tf.concat(roi_features_list, axis=0) with tf.variable_scope('build_fc_layers'): inputs = slim.flatten(inputs=all_roi_features, scope='flatten_inputs') fc1 = slim.fully_connected(inputs, num_outputs=1024, trainable=is_training, scope='fc1') fc2 = slim.fully_connected(fc1, num_outputs=1024, trainable=is_training, scope='fc2') with slim.arg_scope([slim.fully_connected], weights_regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)): cls_score = slim.fully_connected(fc2, num_outputs=(self.cfgs.CLASS_NUM + 1), weights_initializer=self.cfgs.INITIALIZER, activation_fn=None, trainable=is_training, scope='cls_fc') bbox_pred = slim.fully_connected(fc2, num_outputs=((self.cfgs.CLASS_NUM + 1) * 5), weights_initializer=self.cfgs.BBOX_INITIALIZER, activation_fn=None, trainable=is_training, scope='reg_fc') cls_score = tf.reshape(cls_score, [(- 1), (self.cfgs.CLASS_NUM + 1)]) bbox_pred = tf.reshape(bbox_pred, [(- 1), (5 * (self.cfgs.CLASS_NUM + 1))]) return (bbox_pred, cls_score) def fpn_double_head(self, roi_extractor, rois_list, feature_pyramid, img_shape, is_training, mode=0): with tf.variable_scope('Fast-RCNN'): with tf.variable_scope('rois_pooling'): roi_features_list = [] for (level_name, rois) in zip(self.cfgs.LEVEL, rois_list): if (mode == 1): rois = tf.py_func(forward_convert, inp=[rois, False], Tout=tf.float32) rois = get_horizen_minAreaRectangle(rois, False) roi_features = roi_extractor.roi_align(feature_maps=feature_pyramid[level_name], rois=rois, img_shape=img_shape, scope=level_name) roi_features_list.append(roi_features) all_roi_features = tf.concat(roi_features_list, axis=0) with tf.variable_scope('build_fc_layers'): inputs = slim.flatten(inputs=all_roi_features, scope='flatten_inputs') fc1 = slim.fully_connected(inputs, num_outputs=1024, trainable=is_training, scope='fc1') fc2 = slim.fully_connected(fc1, num_outputs=1024, trainable=is_training, scope='fc2') with slim.arg_scope([slim.fully_connected], weights_regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)): cls_score = slim.fully_connected(fc2, num_outputs=(self.cfgs.CLASS_NUM + 1), weights_initializer=self.cfgs.INITIALIZER, activation_fn=None, trainable=is_training, scope='cls_fc') bbox_input_feat = slim.conv2d(all_roi_features, num_outputs=1024, kernel_size=[3, 3], stride=1, padding='SAME', scope='extra_conv1') bbox_input_feat = slim.conv2d(bbox_input_feat, num_outputs=1024, kernel_size=[3, 3], stride=1, padding='SAME', scope='extra_conv2') bbox_input_feat = slim.avg_pool2d(bbox_input_feat, [1, 1], sc) bbox_input_fc_feat = slim.flatten(bbox_input_feat, scope='bbox_feat_flatten') bbox_pred = slim.fully_connected(bbox_input_fc_feat, num_outputs=((self.cfgs.CLASS_NUM + 1) * 5), weights_initializer=self.cfgs.BBOX_INITIALIZER, activation_fn=None, trainable=is_training, scope='reg_fc') cls_score = tf.reshape(cls_score, [(- 1), (self.cfgs.CLASS_NUM + 1)]) bbox_pred = tf.reshape(bbox_pred, [(- 1), (5 * (self.cfgs.CLASS_NUM + 1))]) return (bbox_pred, cls_score) def fc_head(self, roi_extractor, rois, feature, img_shape, is_training, mode=0): with tf.variable_scope('Fast-RCNN'): with tf.variable_scope('rois_pooling'): all_roi_features = roi_extractor.roi_align(feature_maps=feature, rois=rois, img_shape=img_shape, scope='') with tf.variable_scope('build_fc_layers'): inputs = slim.flatten(inputs=all_roi_features, scope='flatten_inputs') fc1 = slim.fully_connected(inputs, num_outputs=1024, trainable=is_training, scope='fc1') fc2 = slim.fully_connected(fc1, num_outputs=1024, trainable=is_training, scope='fc2') with tf.variable_scope('horizen_branch'): with slim.arg_scope([slim.fully_connected], weights_regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)): cls_score_h = slim.fully_connected(fc2, num_outputs=(self.cfgs.CLASS_NUM + 1), weights_initializer=self.cfgs.INITIALIZER, activation_fn=None, trainable=is_training, scope='cls_fc_h') bbox_pred_h = slim.fully_connected(fc2, num_outputs=((self.cfgs.CLASS_NUM + 1) * 4), weights_initializer=self.cfgs.BBOX_INITIALIZER, activation_fn=None, trainable=is_training, scope='reg_fc_h') cls_score_h = tf.reshape(cls_score_h, [(- 1), (self.cfgs.CLASS_NUM + 1)]) bbox_pred_h = tf.reshape(bbox_pred_h, [(- 1), (4 * (self.cfgs.CLASS_NUM + 1))]) with tf.variable_scope('rotation_branch'): with slim.arg_scope([slim.fully_connected], weights_regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)): cls_score_r = slim.fully_connected(fc2, num_outputs=(self.cfgs.CLASS_NUM + 1), weights_initializer=self.cfgs.INITIALIZER, activation_fn=None, trainable=is_training, scope='cls_fc_r') bbox_pred_r = slim.fully_connected(fc2, num_outputs=((self.cfgs.CLASS_NUM + 1) * 5), weights_initializer=self.cfgs.BBOX_INITIALIZER, activation_fn=None, trainable=is_training, scope='reg_fc_r') cls_score_r = tf.reshape(cls_score_r, [(- 1), (self.cfgs.CLASS_NUM + 1)]) bbox_pred_r = tf.reshape(bbox_pred_r, [(- 1), (5 * (self.cfgs.CLASS_NUM + 1))]) return (bbox_pred_h, cls_score_h, bbox_pred_r, cls_score_r) def fpn_fc_sigmoid_head(self, roi_extractor, rois_list, feature_pyramid, img_shape, is_training, mode=0): with tf.variable_scope('Fast-RCNN'): with tf.variable_scope('rois_pooling'): roi_features_list = [] for (level_name, rois) in zip(self.cfgs.LEVEL, rois_list): if (mode == 1): rois = tf.py_func(forward_convert, inp=[rois, False], Tout=tf.float32) rois = get_horizen_minAreaRectangle(rois, False) roi_features = roi_extractor.roi_align(feature_maps=feature_pyramid[level_name], rois=rois, img_shape=img_shape, scope=level_name) roi_features_list.append(roi_features) all_roi_features = tf.concat(roi_features_list, axis=0) with tf.variable_scope('build_fc_layers'): inputs = slim.flatten(inputs=all_roi_features, scope='flatten_inputs') fc1 = slim.fully_connected(inputs, num_outputs=1024, trainable=is_training, scope='fc1') fc2 = slim.fully_connected(fc1, num_outputs=1024, trainable=is_training, scope='fc2') with slim.arg_scope([slim.fully_connected], weights_regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)): cls_score = slim.fully_connected(fc2, num_outputs=self.cfgs.CLASS_NUM, weights_initializer=self.cfgs.INITIALIZER, activation_fn=None, trainable=is_training, scope='cls_fc') bbox_pred = slim.fully_connected(fc2, num_outputs=5, weights_initializer=self.cfgs.BBOX_INITIALIZER, activation_fn=None, trainable=is_training, scope='reg_fc') cls_score = tf.reshape(cls_score, [(- 1), self.cfgs.CLASS_NUM]) bbox_pred = tf.reshape(bbox_pred, [(- 1), 5]) return (bbox_pred, cls_score) def fpn_fc_head_cls(self, roi_extractor, rois_list, feature_pyramid, img_shape, is_training, coding_len): with tf.variable_scope('Fast-RCNN'): with tf.variable_scope('rois_pooling'): roi_features_list = [] for (level_name, rois) in zip(self.cfgs.LEVEL, rois_list): roi_features = roi_extractor.roi_align(feature_maps=feature_pyramid[level_name], rois=rois, img_shape=img_shape, scope=level_name) roi_features_list.append(roi_features) all_roi_features = tf.concat(roi_features_list, axis=0) with tf.variable_scope('build_fc_layers'): inputs = slim.flatten(inputs=all_roi_features, scope='flatten_inputs') fc1 = slim.fully_connected(inputs, num_outputs=1024, trainable=is_training, scope='fc1') fc2 = slim.fully_connected(fc1, num_outputs=1024, trainable=is_training, scope='fc2') with slim.arg_scope([slim.fully_connected], weights_regularizer=slim.l2_regularizer(self.cfgs.WEIGHT_DECAY)): cls_score = slim.fully_connected(fc2, num_outputs=(self.cfgs.CLASS_NUM + 1), weights_initializer=self.cfgs.INITIALIZER, activation_fn=None, trainable=is_training, scope='cls_fc') bbox_pred = slim.fully_connected(fc2, num_outputs=((self.cfgs.CLASS_NUM + 1) * 5), weights_initializer=self.cfgs.BBOX_INITIALIZER, activation_fn=None, trainable=is_training, scope='reg_fc') angle_cls = slim.fully_connected(fc2, num_outputs=coding_len, weights_initializer=self.cfgs.INITIALIZER, activation_fn=None, trainable=is_training, scope='angle_cls_fc') cls_score = tf.reshape(cls_score, [(- 1), (self.cfgs.CLASS_NUM + 1)]) angle_cls = tf.reshape(angle_cls, [(- 1), coding_len]) bbox_pred = tf.reshape(bbox_pred, [(- 1), (5 * (self.cfgs.CLASS_NUM + 1))]) return (bbox_pred, cls_score, angle_cls)
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): fileinfo = FileInfo(filename) line = clean_lines.lines[linenum] if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line): error(filename, linenum, 'build/include_dir', 4, 'Include the directory when naming .h files') match = _RE_PATTERN_INCLUDE.search(line) if match: include = match.group(2) is_system = (match.group(1) == '<') if (include in include_state): error(filename, linenum, 'build/include', 4, ('"%s" already included at %s:%s' % (include, filename, include_state[include]))) else: include_state[include] = linenum error_message = include_state.CheckNextIncludeOrder(_ClassifyInclude(fileinfo, include, is_system)) if error_message: error(filename, linenum, 'build/include_order', 4, ('%s. Should be: %s.h, c system, c++ system, other.' % (error_message, fileinfo.BaseName()))) canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) if (not include_state.IsInAlphabeticalOrder(clean_lines, linenum, canonical_include)): error(filename, linenum, 'build/include_alpha', 4, ('Include "%s" not in alphabetical order' % include)) include_state.SetLastHeader(canonical_include) match = _RE_PATTERN_INCLUDE.match(line) if match: include = match.group(2) if Match('(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): if (not _IsTestFilename(filename)): error(filename, linenum, 'readability/streams', 3, 'Streams are highly discouraged.')
class ResSPP(nn.Module): def __init__(self, c1=1024, c2=384, n=3, act='swish', k=(5, 9, 13)): super(ResSPP, self).__init__() c_ = c2 if (c2 == 1024): c_ = (c2 // 2) self.conv1 = ConvBNLayer(c1, c_, 1, act=act) self.basicBlock_spp1 = BasicBlock(c_, c_, shortcut=False) self.basicBlock_spp2 = BasicBlock(c_, c_, shortcut=False) self.spp = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=(x // 2)) for x in k]) self.conv2 = ConvBNLayer((c_ * 4), c_, 1, act=act) self.basicBlock_spp3 = BasicBlock(c_, c_, shortcut=False) self.basicBlock_spp4 = BasicBlock(c_, c_, shortcut=False) self.n = n def forward(self, x): y1 = self.conv1(x) if (self.n == 3): y1 = self.basicBlock_spp1(y1) y1 = self.basicBlock_spp2(y1) y1 = torch.cat(([y1] + [m(y1) for m in self.spp]), 1) y1 = self.conv2(y1) y1 = self.basicBlock_spp3(y1) elif (self.n == 1): y1 = self.basicBlock_spp1(y1) y1 = torch.cat(([y1] + [m(y1) for m in self.spp]), 1) y1 = self.conv2(y1) elif (self.n == 2): y1 = self.basicBlock_spp1(y1) y1 = torch.cat(([y1] + [m(y1) for m in self.spp]), 1) y1 = self.conv2(y1) y1 = self.basicBlock_spp2(y1) elif (self.n == 4): y1 = self.basicBlock_spp1(y1) y1 = self.basicBlock_spp2(y1) y1 = torch.cat(([y1] + [m(y1) for m in self.spp]), 1) y1 = self.conv2(y1) y1 = self.basicBlock_spp3(y1) y1 = self.basicBlock_spp4(y1) return y1
def bin_xml_escape(arg: object) -> str: def repl(matchobj: Match[str]) -> str: i = ord(matchobj.group()) if (i <= 255): return ('#x%02X' % i) else: return ('#x%04X' % i) illegal_xml_re = '[^\t\n\r -~\x80-\ud7ff\ue000-0-FF]' return re.sub(illegal_xml_re, repl, str(arg))
def clip_frames(unclipped: np.ndarray, clipped_num_frames: int) -> np.ndarray: unclipped_num_frames = unclipped.shape[0] if (unclipped_num_frames == clipped_num_frames): return unclipped assert (clipped_num_frames == (unclipped_num_frames - 1)) return unclipped[:clipped_num_frames]
.parametrize('new_path,original_dictionary,output', [('/a', {}, {'/a': ['/']}), ('b', {'/a': ['some_path', 'another_path']}, {'/a': ['some_path', 'another_path'], '/b': ['/']}), ('/a/b/c/d', {'/e': ['some_path', 'another_path']}, {'/e': ['some_path', 'another_path'], '/a/b/c/d': ['/', '/a', '/a/b', '/a/b/c']})]) def test_subdir_path_map(new_path, original_dictionary, output): actual_mapping = BuildTriggerHandler.get_parent_directory_mappings(new_path, original_dictionary) for key in actual_mapping: value = actual_mapping[key] actual_mapping[key] = value.sort() for key in output: value = output[key] output[key] = value.sort() assert (actual_mapping == output)
('/v1/superuser/users/<namespace>/quota/<quota_id>', '/v1/superuser/organization/<namespace>/quota/<quota_id>') _if(features.SUPER_USERS) _if(features.QUOTA_MANAGEMENT) class SuperUserUserQuota(ApiResource): schemas = {'UpdateNamespaceQuota': {'type': 'object', 'description': 'Description of a new organization quota', 'properties': {'limit_bytes': {'type': 'integer', 'description': 'Number of bytes the organization is allowed'}}}} _fresh_login _not_prod (['changeUserQuotaSuperUser', 'changeOrganizationQuotaSuperUser']) _scope(scopes.SUPERUSER) _json_request('UpdateNamespaceQuota') def put(self, namespace, quota_id): if SuperUserPermission().can(): quota_data = request.get_json() namespace_user = user.get_user_or_org(namespace) quota = get_quota(namespace_user.username, quota_id) try: if ('limit_bytes' in quota_data): limit_bytes = quota_data['limit_bytes'] namespacequota.update_namespace_quota_size(quota, limit_bytes) except DataModelException as ex: raise request_error(exception=ex) return quota_view(quota) raise Unauthorized() (['deleteUserQuotaSuperUser', 'deleteOrganizationQuotaSuperUser']) _scope(scopes.SUPERUSER) def delete(self, namespace, quota_id): if SuperUserPermission().can(): namespace_user = user.get_user_or_org(namespace) quota = get_quota(namespace_user.username, quota_id) namespacequota.delete_namespace_quota(quota) return ('', 204) raise Unauthorized()
def parse_args(argv: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser(description='example TorchX captum app') parser.add_argument('--load_path', type=str, help='checkpoint path to load model weights from', required=True) parser.add_argument('--data_path', type=str, help='path to load the training data from, if not provided, random dataset will be created') parser.add_argument('--output_path', type=str, help='path to place analysis results', required=True) return parser.parse_args(argv)
def create_channel_from_models(our_model, partner_model, partner_pkey): channel_state = create(NettingChannelStateProperties(reveal_timeout=10, settle_timeout=100, our_state=NettingChannelEndStateProperties(address=our_model.participant_address, balance=our_model.balance, pending_locks=PendingLocksState(our_model.pending_locks)), partner_state=NettingChannelEndStateProperties(address=partner_model.participant_address, balance=partner_model.balance, pending_locks=PendingLocksState(partner_model.pending_locks)), open_transaction=SuccessfulTransactionState(finished_block_number=1))) our_nonce = (our_model.next_nonce - 1) assert (our_nonce >= 0), 'nonce cannot be negative' if (our_nonce > 0): our_unsigned = create(BalanceProofProperties(nonce=our_nonce, transferred_amount=0, locked_amount=len(our_model.pending_locks), locksroot=compute_locksroot(channel_state.our_state.pending_locks), canonical_identifier=channel_state.canonical_identifier)) channel_state.our_state.nonce = our_unsigned.nonce else: our_unsigned = None partner_nonce = (partner_model.next_nonce - 1) assert (partner_nonce >= 0), 'nonce cannot be negative' if (partner_nonce > 0): partner_unsigned = create(BalanceProofProperties(nonce=partner_nonce, transferred_amount=0, locked_amount=len(partner_model.pending_locks), locksroot=compute_locksroot(channel_state.partner_state.pending_locks), canonical_identifier=channel_state.canonical_identifier)) partner_signed = make_signed_balance_proof_from_unsigned(partner_unsigned, LocalSigner(partner_pkey)) channel_state.partner_state.nonce = partner_signed.nonce else: partner_signed = None channel_state.our_state.balance_proof = our_unsigned channel_state.partner_state.balance_proof = partner_signed assert (channel_state.our_total_deposit == our_model.contract_balance) assert (channel_state.partner_total_deposit == partner_model.contract_balance) assert_partner_state(channel_state.our_state, channel_state.partner_state, our_model) assert_partner_state(channel_state.partner_state, channel_state.our_state, partner_model) return channel_state
def test_lazy_gettext_defaultdomain(): app = flask.Flask(__name__) babel.Babel(app, default_locale='de_DE', default_domain='test') first = lazy_gettext('first') with app.test_request_context(): assert (str(first) == 'erste') get_babel(app).default_locale = 'en_US' with app.test_request_context(): assert (str(first) == 'first')
def add_parse_opts(parser) -> None: parser.add_argument('--raw_output', help="If set don't format output from kernel. If set to --raw_output=kunit, filters to just KUnit output.", type=str, nargs='?', const='all', default=None) parser.add_argument('--json', nargs='?', help='Stores test results in a JSON, and either prints to stdout or saves to file if a filename is specified', type=str, const='stdout', default=None)
class Bleu(): def __init__(self, n=4): self._n = n self._hypo_for_image = {} self.ref_for_image = {} def compute_score(self, gts, res, verbose=1): assert (gts.keys() == res.keys()) imgIds = gts.keys() bleu_scorer = BleuScorer(n=self._n) for id in imgIds: hypo = res[id] ref = gts[id] assert (type(hypo) is list) assert (len(hypo) == 1) assert (type(ref) is list) assert (len(ref) >= 1) bleu_scorer += (hypo[0], ref) (score, scores) = bleu_scorer.compute_score(option='closest', verbose=verbose) return (score, scores) def method(self): return 'Bleu'
_grad() def contrastive_evaluate(val_loader, model, memory_bank): top1 = AverageMeter('', ':6.2f') model.eval() for batch in val_loader: images = batch['image'].cuda(non_blocking=True) target = batch['target'].cuda(non_blocking=True) output = model(images) output = memory_bank.weighted_knn(output) acc1 = (100 * torch.mean(torch.eq(output, target).float())) top1.update(acc1.item(), images.size(0)) return top1.avg
def load_document_topics(opt, recover_topic_peaks, max_m=None): filepaths1 = [] filepaths2 = [] topic_model_folder = get_topic_pred_folder(opt) task = opt.get('tasks')[0] subsets = opt.get('subsets') for s in subsets: filepaths1.append(os.path.join(topic_model_folder, (((task + '_') + s) + '_1.npy'))) filepaths2.append(os.path.join(topic_model_folder, (((task + '_') + s) + '_2.npy'))) T1 = [np.load(f) for f in filepaths1] T2 = [np.load(f) for f in filepaths2] if recover_topic_peaks: for split in range(len(T1)): for line in range(len(T1[split])): T1[split][line] = unflatten_topic(T1[split][line]) T2[split][line] = unflatten_topic(T2[split][line]) if (not (max_m is None)): T1 = [t[:max_m] for t in T1] T2 = [t[:max_m] for t in T2] return {'D_T1': T1, 'D_T2': T2}
class TestMisc(): .trio async def test_close_no_stop(self): async with trio_asyncio.open_loop() as loop: triggered = trio.Event() def close_no_stop(): with pytest.raises(RuntimeError): loop.close() triggered.set() loop.call_soon(close_no_stop) (await triggered.wait()) .trio async def test_too_many_stops(self): with trio.move_on_after(1) as scope: async with trio_asyncio.open_loop() as loop: (await trio.lowlevel.checkpoint()) loop.stop() assert (not scope.cancelled_caught), 'Possible deadlock after manual call to loop.stop' .trio async def test_err1(self, loop): async def raise_err(): raise RuntimeError('Foo') with pytest.raises(RuntimeError) as err: (await trio_asyncio.aio_as_trio(raise_err, loop=loop)()) assert (err.value.args[0] == 'Foo') .trio async def test_err3(self, loop): owch = 0 async def nest(): nonlocal owch owch = 1 raise RuntimeError('Hello') async def call_nested(): with pytest.raises(RuntimeError) as err: (await trio_asyncio.trio_as_aio(nest, loop=loop)()) assert (err.value.args[0] == 'Hello') (await trio_asyncio.aio_as_trio(call_nested, loop=loop)()) assert owch .trio async def test_run(self, loop): owch = 0 async def nest(): (await trio.sleep(0.01)) nonlocal owch owch = 1 async def call_nested(): (await trio_asyncio.trio_as_aio(nest, loop=loop)()) (await trio_asyncio.aio_as_trio(call_nested, loop=loop)()) assert owch async def _test_run(self): owch = 0 async def nest(): (await trio.sleep(0.01)) nonlocal owch owch = 1 async def call_nested(): (await trio_asyncio.trio_as_aio(nest)()) (await trio_asyncio.aio_as_trio(call_nested)()) assert owch def test_run2(self): trio_asyncio.run(self._test_run) .trio async def test_run_task(self): owch = 0 async def nest(x): nonlocal owch owch += x with pytest.raises(RuntimeError): trio_asyncio.run_trio_task(nest, 100) with pytest.raises((AttributeError, RuntimeError, TypeError)): with trio_asyncio.open_loop(): nest(1000) async with trio_asyncio.open_loop(): trio_asyncio.run_trio_task(nest, 1) (await trio.sleep(0.05)) assert (owch == 1) .trio async def test_err2(self, loop): owch = 0 async def nest(): nonlocal owch owch = 1 raise RuntimeError('Hello') async def call_nested(): (await trio_asyncio.aio_as_trio(nest, loop=loop)()) async def call_more_nested(): with pytest.raises(RuntimeError) as err: (await trio_asyncio.trio_as_aio(call_nested, loop=loop)()) assert (err.value.args[0] == 'Hello') (await trio_asyncio.aio_as_trio(call_more_nested, loop=loop)()) assert owch .trio async def test_run3(self, loop): owch = 0 async def nest(): nonlocal owch owch = 1 async def call_nested(): (await trio_asyncio.aio_as_trio(nest, loop=loop)()) async def call_more_nested(): (await trio_asyncio.trio_as_aio(call_nested, loop=loop)()) (await trio_asyncio.aio_as_trio(call_more_nested, loop=loop)()) assert owch .trio async def test_cancel_sleep(self, loop): owch = 0 def do_not_run(): nonlocal owch owch = 1 async def cancel_sleep(): h = loop.call_later(0.2, do_not_run) (await asyncio.sleep(0.01)) h.cancel() (await asyncio.sleep(0.3)) (await trio_asyncio.aio_as_trio(cancel_sleep, loop=loop)()) assert (owch == 0)
def test_sqliteio_write_inserts_new_pixmap_item_jpg(tmpfile, view): item = BeePixmapItem(QtGui.QImage(), filename='bee.jpg') view.scene.addItem(item) item.pixmap_to_bytes = MagicMock(return_value=(b'abc', 'jpg')) io = SQLiteIO(tmpfile, view.scene, create_new=True) io.write() assert (item.save_id == 1) result = io.fetchone('SELECT type, sqlar.data, sqlar.name FROM items INNER JOIN sqlar on sqlar.item_id = items.id') assert (result[0] == 'pixmap') assert (result[1] == b'abc') assert (result[2] == '0001-bee.jpg')
def _iload_all_spickle_internal(stream, offset=None): if (offset is not None): stream.seek(offset, 0) else: header = stream.read(512) if (not header.startswith(b'SPICKLE')): raise ValueError('Not a SPICKLE file.') while True: try: (yield _load_one_spickle_internal(stream)) except EOFError: break
class AddressSpace(enum.IntEnum): a16 = VI_A16_SPACE a24 = VI_A24_SPACE a32 = VI_A32_SPACE a64 = VI_A64_SPACE pxi_config = VI_PXI_CFG_SPACE pxi_bar0 = VI_PXI_BAR0_SPACE pxi_bar1 = VI_PXI_BAR1_SPACE pxi_bar2 = VI_PXI_BAR2_SPACE pxi_bar3 = VI_PXI_BAR3_SPACE pxi_bar4 = VI_PXI_BAR4_SPACE pxi_bar5 = VI_PXI_BAR5_SPACE pxi_allocated = VI_PXI_ALLOC_SPACE
class MatIO(fileio.FileIO): FORMATS = ['mat'] MODES = ['r', 'w'] def __init__(self, *args, **kwargs): self._varName = 'Unknown' fileio.FileIO.__init__(self, *args, **kwargs) self.file = open(self.dataPath, (self.mode + 'b')) def _set_varName(self, val): if issubclass(type(val), str): self._varName = val def _get_varName(self) -> str: return self._varName varName = property(fget=_get_varName, fset=_set_varName) def read(self, n=(- 1)): self._complain_ifclosed(self.closed) w = self._read() return w def seek(self, pos): if (pos == 0): self.file.seek(0) self.pos = 0 def _read(self): if (self.pos > 0): raise StopIteration mat = sio.loadmat(self.file) mat_keys = [k for k in mat if (not k.startswith('_'))] full_w = mat[mat_keys[0]] self.pos += 1 w = full2W(full_w) return w def write(self, obj): self._complain_ifclosed(self.closed) if issubclass(type(obj), W): try: w = full(obj)[0] except ValueError: w = obj.sparse sio.savemat(self.file, {'WEIGHT': w}) self.pos += 1 else: raise TypeError(('Expected a PySAL weights object, got: %s.' % type(obj))) def close(self): self.file.close() fileio.FileIO.close(self)
class Effect7046(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): fit.ship.boostItemAttr('explosiveDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('shieldKineticDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('shieldExplosiveDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('armorThermalDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('thermalDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('shieldEmDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('armorExplosiveDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('armorEmDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('shieldThermalDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('kineticDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('armorKineticDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs) fit.ship.boostItemAttr('emDamageResonance', src.getModifiedItemAttr('eliteBonusFlagCruisers1'), skill='Flag Cruisers', **kwargs)
(params=['message', 'inline']) def callback_query(bot, request): cbq = CallbackQuery(TestCallbackQueryBase.id_, TestCallbackQueryBase.from_user, TestCallbackQueryBase.chat_instance, data=TestCallbackQueryBase.data, game_short_name=TestCallbackQueryBase.game_short_name) cbq.set_bot(bot) cbq._unfreeze() if (request.param == 'message'): cbq.message = TestCallbackQueryBase.message cbq.message.set_bot(bot) else: cbq.inline_message_id = TestCallbackQueryBase.inline_message_id return cbq
class RobustLoss(torch.nn.Module): def __init__(self, size, reg, geometry, tol=0.0001, max_iter=1000, debugging=False): super().__init__() self.size = size self.reg = reg self.geometry = geometry self.tol = tol self.max_iter = max_iter self.debugging = debugging self.is_erm = (size == 0) if (geometry not in GEOMETRIES): raise ValueError(('Geometry %s not supported' % geometry)) if ((geometry == 'cvar') and (self.size > 1)): raise ValueError(f'alpha should be < 1 for cvar, is {self.size}') def best_response(self, v): size = self.size reg = self.reg m = v.shape[0] if (self.geometry == 'cvar'): if (self.reg > 0): if (size == 1.0): return (torch.ones_like(v) / m) def p(eta): x = ((v - eta) / reg) return (torch.min(torch.exp(x), torch.Tensor([(1 / size)]).type(x.dtype)) / m) def bisection_target(eta): return (1.0 - p(eta).sum()) eta_min = (reg * torch.logsumexp(((v / reg) - np.log(m)), 0)) eta_max = v.max() if (torch.abs(bisection_target(eta_min)) <= self.tol): return p(eta_min) else: cutoff = int((size * m)) surplus = (1.0 - (cutoff / (size * m))) p = torch.zeros_like(v) idx = torch.argsort(v, descending=True) p[idx[:cutoff]] = (1.0 / (size * m)) if (cutoff < m): p[idx[cutoff]] = surplus return p if (self.geometry == 'chi-square'): if (((v.max() - v.min()) / v.max()) <= MIN_REL_DIFFERENCE): return (torch.ones_like(v) / m) if (size == float('inf')): assert (reg > 0) def p(eta): return (torch.relu((v - eta)) / (reg * m)) def bisection_target(eta): return (1.0 - p(eta).sum()) eta_min = min((v.sum() - (reg * m)), v.min()) eta_max = v.max() else: assert (size < float('inf')) if (m <= (1 + (2 * size))): out = (v == v.max()).float() out /= out.sum() return out if (reg == 0): def p(eta): pp = torch.relu((v - eta)) return (pp / pp.sum()) def bisection_target(eta): pp = p(eta) w = ((m * pp) - torch.ones_like(pp)) return ((0.5 * torch.mean((w ** 2))) - size) eta_min = ((- (1.0 / (np.sqrt(((2 * size) + 1)) - 1))) * v.max()) eta_max = v.max() else: def p(eta): pp = torch.relu((v - eta)) opt_lam = max(reg, (torch.norm(pp) / np.sqrt((m * (1 + (2 * size)))))) return (pp / (m * opt_lam)) def bisection_target(eta): return (1 - p(eta).sum()) eta_min = (v.min() - 1) eta_max = v.max() eta_star = bisection(eta_min, eta_max, bisection_target, tol=self.tol, max_iter=self.max_iter) if self.debugging: return (p(eta_star), eta_star) return p(eta_star) def forward(self, v): if self.is_erm: return v.mean() else: with torch.no_grad(): p = self.best_response(v) if (self.geometry == 'cvar'): return cvar_value(p, v, self.reg) elif (self.geometry == 'chi-square'): return chi_square_value(p, v, self.reg)
class FuzzyTestCase(unittest.TestCase): test_dict = OrderedDict([(u'Hiya', 1), (u'hiya', 2), (u'test3', 3), (1, 324)]) def test_creation_empty(self): fd = FuzzyDict() self.assertEqual(fd, {}) def test_creation_dict(self): fd = FuzzyDict(self.test_dict) self.assertEqual(fd, self.test_dict) self.assertEqual(self.test_dict[u'Hiya'], fd[u'hiya']) fd2 = FuzzyDict(self.test_dict, cutoff=0.8) self.assertEqual(fd, self.test_dict) self.assertRaises(KeyError, fd2.__getitem__, u'hiya') def test_contains(self): fd = FuzzyDict(self.test_dict) self.assertEqual(True, fd.__contains__(u'hiya')) self.assertEqual(True, fd.__contains__(u'test3')) self.assertEqual(True, fd.__contains__(u'hiya')) self.assertEqual(False, fd.__contains__(u'FuzzyWuzzy')) self.assertEqual(True, fd.__contains__(1)) self.assertEqual(False, fd.__contains__(23)) def test_get_item(self): fd = FuzzyDict(self.test_dict) self.assertEqual(self.test_dict[u'Hiya'], fd[u'hiya']) self.assertRaises(KeyError, fd.__getitem__, u'FuzzyWuzzy') fd2 = FuzzyDict(self.test_dict, cutoff=0.14) self.assertEqual(1, fd2[u'FuzzyWuzzy']) self.assertEqual(324, fd2[1]) self.assertRaises(KeyError, fd2.__getitem__, 23)
def accuracy(pred, target, topk=1, thresh=None): assert isinstance(topk, (int, tuple)) if isinstance(topk, int): topk = (topk,) return_single = True else: return_single = False maxk = max(topk) if (pred.size(0) == 0): accu = [pred.new_tensor(0.0) for i in range(len(topk))] return (accu[0] if return_single else accu) assert (pred.ndim == (target.ndim + 1)) assert (pred.size(0) == target.size(0)) assert (maxk <= pred.size(1)), f'maxk {maxk} exceeds pred dimension {pred.size(1)}' (pred_value, pred_label) = pred.topk(maxk, dim=1) pred_label = pred_label.transpose(0, 1) correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label)) if (thresh is not None): correct = (correct & (pred_value > thresh).t()) res = [] for k in topk: correct_k = correct[:k].reshape((- 1)).float().sum(0, keepdim=True) res.append(correct_k.mul_((100.0 / target.numel()))) return (res[0] if return_single else res)
_model def caformer_b36(pretrained=False, **kwargs): model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs) model.default_cfg = default_cfgs['caformer_b36'] if pretrained: state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True) model.load_state_dict(state_dict) return model
.fast def test_ignore_cached_files(): sf = SpectrumFactory(wavenum_min=2000, wavenum_max=3000, pressure=1) file_dir = getTestFile('cdsd_hitemp_09_fragment.txt') test_file = (file_dir[:(- 8)] + '*') sf.load_databank(path=test_file, format='cdsd-hitemp', parfuncfmt='hapi') try: sf.load_databank(path=test_file, format='cdsd-hitemp', parfuncfmt='hapi') except UnicodeDecodeError as err: raise UnicodeDecodeError("Couldn't load database the 2nd time. This may be due to cache files trying to be read as normal files") from err
class Transaction(): def __init__(self, transaction_fill_time: datetime, ticker: Ticker, quantity: float, price: float, commission: float, trade_id=None, account=None, strategy=None, broker=None, currency=None): assert (commission >= 0.0) self.transaction_fill_time = transaction_fill_time self.ticker = ticker self.quantity = quantity self.price = price self.commission = commission self.net_amount = ((quantity * price) - commission) self.trade_id = trade_id self.account = account self.strategy = strategy self.broker = broker self.currency = currency def get_header(): return ['Transaction_fill_time', 'Asset_name', 'Contract_symbol', 'Security_type', 'Contract_size', 'Quantity', 'Price', 'Commission', 'Net_amount', 'Trade_ID', 'Account', 'Strategy', 'Broker', 'Currency'] def get_row(self): row = [self.transaction_fill_time, self.ticker.name, self.ticker.ticker, self.ticker.security_type.value, self.ticker.point_value, self.quantity, self.price, self.commission, self.net_amount, self.trade_id, self.account, self.strategy, self.broker, self.currency] return row def __str__(self): return f'{self.__class__.__name__} ({date_to_str(self.transaction_fill_time)}) -> Quantity: {self.quantity:>8}, Price: {self.price:>10.2f}, Commission: {self.commission:>12.8f}, Net Amount: {self.net_amount:>20.8f}, Ticker: {self.ticker}, Trade_id: {self.trade_id}, Account: {self.account}, Strategy: {self.strategy}, Broker: {self.broker}, Currency: {self.currency}' def __eq__(self, other): if (self is other): return True if (not isinstance(other, Transaction)): return False return ((self.transaction_fill_time, self.ticker, self.quantity, self.price, self.commission) == (other.transaction_fill_time, other.ticker, other.quantity, other.price, other.commission))
def test_create(monkeypatch): created = {} def spy(path, *_1, **_2): created.update({path: True}) monkeypatch.setattr('pyscaffold.file_system.create_file', spy) for contents in ('contents', ''): path = uniqpath() create(path, contents, {}) assert created[path] path = uniqpath() create(path, None, {}) assert (path not in created)
def compose_transforms(meta, center_crop=True, new_imageSize=None, override_meta_imsize=False): normalize = transforms.Normalize(mean=meta['mean'], std=meta['std']) im_size = meta['imageSize'] if override_meta_imsize: im_size = new_imageSize assert (im_size[0] == im_size[1]), 'expected square image size' if center_crop: transform_list = [transforms.Resize(int((im_size[0] * 1.2))), transforms.CenterCrop(size=(im_size[0], im_size[1]))] else: transform_list = [transforms.Resize(size=(im_size[0], im_size[1]))] transform_list += [transforms.ToTensor()] if (meta['std'] == [1, 1, 1]): transform_list += [(lambda x: (x * 255.0))] transform_list.append(normalize) return transforms.Compose(transform_list)
class MESolver(SESolver): name = 'mesolve' _avail_integrators = {} solver_options = {'progress_bar': '', 'progress_kwargs': {'chunk_size': 10}, 'store_final_state': False, 'store_states': None, 'normalize_output': True, 'method': 'adams'} def __init__(self, H, c_ops=None, *, options=None): _time_start = time() if (not isinstance(H, (Qobj, QobjEvo))): raise TypeError('The Hamiltonian must be a Qobj or QobjEvo') c_ops = (c_ops or []) c_ops = ([c_ops] if isinstance(c_ops, (Qobj, QobjEvo)) else c_ops) for c_op in c_ops: if (not isinstance(c_op, (Qobj, QobjEvo))): raise TypeError('All `c_ops` must be a Qobj or QobjEvo') self._num_collapse = len(c_ops) rhs = (H if H.issuper else liouvillian(H)) rhs += sum(((c_op if c_op.issuper else lindblad_dissipator(c_op)) for c_op in c_ops)) Solver.__init__(self, rhs, options=options) def _initialize_stats(self): stats = super()._initialize_stats() stats.update({'solver': 'Master Equation Evolution', 'num_collapse': self._num_collapse}) return stats def StateFeedback(cls, default=None, raw_data=False, prop=False): if raw_data: return _DataFeedback(default, open=True, prop=prop) return _QobjFeedback(default, open=True, prop=prop)
class Migration(migrations.Migration): dependencies = [('tasks', '0019_meta')] operations = [migrations.AddField(model_name='task', name='text_lang3', field=models.TextField(blank=True, help_text='The text for this task in the tertiary language.', null=True, verbose_name='Text (tertiary)')), migrations.AddField(model_name='task', name='text_lang4', field=models.TextField(blank=True, help_text='The text for this task in the quaternary language.', null=True, verbose_name='Text (quaternary)')), migrations.AddField(model_name='task', name='text_lang5', field=models.TextField(blank=True, help_text='The text for this task in the quinary language.', null=True, verbose_name='Text (quinary)')), migrations.AddField(model_name='task', name='title_lang3', field=models.CharField(blank=True, help_text='The title for this task in the tertiary language.', max_length=256, null=True, verbose_name='Title (tertiary)')), migrations.AddField(model_name='task', name='title_lang4', field=models.CharField(blank=True, help_text='The title for this task in the quaternary language.', max_length=256, null=True, verbose_name='Title (quaternary)')), migrations.AddField(model_name='task', name='title_lang5', field=models.CharField(blank=True, help_text='The title for this task in the quinary language.', max_length=256, null=True, verbose_name='Title (quinary)')), migrations.AlterField(model_name='task', name='text_lang1', field=models.TextField(blank=True, help_text='The text for this task in the primary language.', null=True, verbose_name='Text (primary)')), migrations.AlterField(model_name='task', name='text_lang2', field=models.TextField(blank=True, help_text='The text for this task in the secondary language.', null=True, verbose_name='Text (secondary)')), migrations.AlterField(model_name='task', name='title_lang1', field=models.CharField(blank=True, help_text='The title for this task in the primary language.', max_length=256, null=True, verbose_name='Title (primary)')), migrations.AlterField(model_name='task', name='title_lang2', field=models.CharField(blank=True, help_text='The title for this task in the secondary language.', max_length=256, null=True, verbose_name='Title (secondary)'))]
def loadData(root_path): dir_name = 'MULTIWOZ2.1' shutil.copy(os.path.join(root_path, dir_name, 'data.json'), root_path) shutil.copy(os.path.join(root_path, dir_name, 'ontology.json'), root_path) shutil.copy(os.path.join(root_path, dir_name, 'valListFile.json'), root_path) shutil.copy(os.path.join(root_path, dir_name, 'testListFile.json'), root_path) shutil.copy(os.path.join(root_path, dir_name, 'dialogue_acts.json'), root_path)
class GCNModelSiemens(GCNModelVAE): def __init__(self, placeholders, num_features, num_nodes, features_nonzero, **kwargs): super(GCNModelSiemens, self).__init__(placeholders, num_features, num_nodes, features_nonzero, **kwargs) def make_decoder(self): self.l0 = Dense(input_dim=self.input_dim, output_dim=FLAGS.hidden3, act=tf.nn.elu, dropout=0.0, bias=True, logging=self.logging) self.l1 = Dense(input_dim=FLAGS.hidden2, output_dim=FLAGS.hidden3, act=tf.nn.elu, dropout=0.0, bias=True, logging=self.logging) self.l2 = Dense(input_dim=FLAGS.hidden3, output_dim=FLAGS.hidden2, act=(lambda x: x), dropout=self.dropout, bias=True, logging=self.logging) self.l3 = Dense(input_dim=(2 * FLAGS.hidden2), output_dim=FLAGS.hidden3, act=tf.nn.elu, dropout=self.dropout, bias=True, logging=self.logging) self.l3p5 = Dense(input_dim=FLAGS.hidden3, output_dim=FLAGS.hidden3, act=tf.nn.elu, dropout=self.dropout, bias=True, logging=self.logging) self.l4 = Dense(input_dim=FLAGS.hidden3, output_dim=1, act=(lambda x: x), dropout=self.dropout, bias=True, logging=self.logging) self.l5 = InnerProductDecoder(input_dim=FLAGS.hidden2, act=(lambda x: x), logging=self.logging) def decoder(self, z): update = (self.l1(z) + self.l0(tf.sparse_tensor_to_dense(self.inputs))) update = self.l2(update) A = tf.abs((tf.expand_dims(update, 1) - tf.expand_dims(update, 0))) B = (tf.expand_dims(update, 1) + tf.expand_dims(update, 0)) update = tf.concat((A, B), axis=2) update = tf.reshape(update, [(- 1), (2 * FLAGS.hidden2)]) update = self.l3(update) update = self.l3p5(update) update = self.l4(update) reconstructions = tf.squeeze(update) self.full_recon = tf.nn.sigmoid(reconstructions) return reconstructions def sample(self): z = tf.random_normal([self.n_samples, FLAGS.hidden2]) reconstruction = tf.nn.sigmoid(self.decoder(z)) reconstruction = tf.reshape(reconstruction, [self.n_samples, self.n_samples]) return reconstruction
class MetaConv2d(MetaModule): def __init__(self, *args, **kwargs): super().__init__() ignore = nn.Conv2d(*args, **kwargs) self.in_channels = ignore.in_channels self.out_channels = ignore.out_channels self.stride = ignore.stride self.padding = ignore.padding self.dilation = ignore.dilation self.groups = ignore.groups self.kernel_size = ignore.kernel_size self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True)) if (ignore.bias is not None): self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True)) else: self.register_buffer('bias', None) def forward(self, x): return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def named_leaves(self): return [('weight', self.weight), ('bias', self.bias)]
class Bottleneck(nn.Module): expansion = 4 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None): super(Bottleneck, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d width = (int((planes * (base_width / 64.0))) * groups) self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, (planes * self.expansion)) self.bn3 = norm_layer((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): identity = self.downsample(x) out += identity out = self.relu(out) return out
def status_change(names=PROPERTY_NAMES, values=PROPERTY_VALUES): devices = st.sampled_from(['DA898B', 'F0D1BC', '94103EA2B277BD6E', '94103EA2B27751AB', '']) capabilities = st.sampled_from(['10006', '10008', '30008', '30009', '3000A', '10300', '30301']) device_id = st.sampled_from(['DeviceID', ElementWithAttributes('DeviceID', {'available': 'NO'})]) return st.tuples(st.just(ConvertChildrenToText('StatusChange')), st.lists(st.tuples((st.just('StateEvent') | names), st.lists((((st.tuples(device_id, (devices | values)) | st.tuples(st.just('CapabilityId'), (capabilities | values))) | st.tuples(st.just('Value'), values)) | st.tuples(names, values)), min_size=2, max_size=4)), max_size=1))
class _FontStyleRunsRangeIterator(): def __init__(self, font_names, font_sizes, bolds, italics, stretch, dpi): self.zip_iter = runlist.ZipRunIterator((font_names, font_sizes, bolds, italics, stretch)) self.dpi = dpi def ranges(self, start, end): from pyglet import font for (start, end, styles) in self.zip_iter.ranges(start, end): (font_name, font_size, bold, italic, stretch) = styles ft = font.load(font_name, font_size, bold=bool(bold), italic=bool(italic), stretch=stretch, dpi=self.dpi) (yield (start, end, ft)) def __getitem__(self, index): from pyglet import font (font_name, font_size, bold, italic, stretch) = self.zip_iter[index] return font.load(font_name, font_size, bold=bool(bold), italic=bool(italic), stretch=stretch, dpi=self.dpi)
class KnownValues(unittest.TestCase): def test_gwac_pade(self): nocc = (mol.nelectron // 2) gw_obj = gw.GW(mf, freq_int='ac', frozen=0) gw_obj.linearized = False gw_obj.ac = 'pade' gw_obj.kernel(orbs=range((nocc - 3), (nocc + 3))) self.assertAlmostEqual(gw_obj.mo_energy[(nocc - 1)], (- 0.), 5) self.assertAlmostEqual(gw_obj.mo_energy[nocc], 0., 5) def test_gwcd(self): nocc = (mol.nelectron // 2) gw_obj = gw.GW(mf, freq_int='cd', frozen=0) gw_obj.linearized = False gw_obj.kernel(orbs=range(0, (nocc + 3))) self.assertAlmostEqual(gw_obj.mo_energy[(nocc - 1)], (- 0.), 5) self.assertAlmostEqual(gw_obj.mo_energy[nocc], 0., 5) self.assertAlmostEqual(gw_obj.mo_energy[0], (- 19.), 4) def test_gw_exact(self): mol = gto.Mole() mol.verbose = 7 mol.output = '/dev/null' mol.atom = [['O', (0.0, 0.0, 0.0)], ['H', (0.0, (- 0.757), 0.587)], ['H', (0.0, 0.757, 0.587)]] mol.basis = 'cc-pvdz' mol.build() mf = dft.RKS(mol) mf.xc = 'hf' mf.kernel() nocc = (mol.nelectron // 2) nvir = (mf.mo_energy.size - nocc) td = tdscf.dRPA(mf) td.nstates = min(100, (nocc * nvir)) td.kernel() gw_obj = gw.GW(mf, freq_int='exact', frozen=0) gw_obj.kernel() gw_obj.linearized = True gw_obj.kernel(orbs=[(nocc - 1), nocc]) self.assertAlmostEqual(gw_obj.mo_energy[(nocc - 1)], (- 0.), 6) self.assertAlmostEqual(gw_obj.mo_energy[nocc], 0., 6) def test_rpa(self): rpa_obj = rpa.RPA(mf, frozen=0) rpa_obj.kernel() self.assertAlmostEqual(rpa_obj.e_tot, (- 76.), 6) self.assertAlmostEqual(rpa_obj.e_corr, (- 0.), 6)
def checksum(filename, chkname, buffering=None): res = None buf = (buffering or blksize(filename)) if (chkname in ('adler32', 'crc32')): res = _crcsum(filename, chkname, buf) elif (chkname in hashlib.algorithms_available): res = _hashsum(filename, chkname, buf) return res
class TID3Header(TestCase): silence = os.path.join(DATA_DIR, 'silence-44-s.mp3') empty = os.path.join(DATA_DIR, 'emptyfile.mp3') def test_header_empty(self): with open(self.empty, 'rb') as fileobj: self.assertRaises(ID3Error, ID3Header, fileobj) def test_header_silence(self): with open(self.silence, 'rb') as fileobj: header = ID3Header(fileobj) self.assertEquals(header.version, (2, 3, 0)) self.assertEquals(header.size, 1314) def test_header_2_4_invalid_flags(self): fileobj = BytesIO(b'ID3\x04\x00\x1f\x00\x00\x00\x00') self.assertRaises(ID3Error, ID3Header, fileobj) def test_header_2_4_unsynch_size(self): fileobj = BytesIO(b'ID3\x04\x00\x10\x00\x00\x00\xff') self.assertRaises(ID3Error, ID3Header, fileobj) def test_header_2_4_allow_footer(self): fileobj = BytesIO(b'ID3\x04\x00\x10\x00\x00\x00\x00') self.assertTrue(ID3Header(fileobj).f_footer) def test_header_2_3_invalid_flags(self): fileobj = BytesIO(b'ID3\x03\x00\x1f\x00\x00\x00\x00') self.assertRaises(ID3Error, ID3Header, fileobj) fileobj = BytesIO(b'ID3\x03\x00\x0f\x00\x00\x00\x00') self.assertRaises(ID3Error, ID3Header, fileobj) def test_header_2_2(self): fileobj = BytesIO(b'ID3\x02\x00\x00\x00\x00\x00\x00') header = ID3Header(fileobj) self.assertEquals(header.version, (2, 2, 0)) def test_header_2_1(self): fileobj = BytesIO(b'ID3\x01\x00\x00\x00\x00\x00\x00') self.assertRaises(ID3UnsupportedVersionError, ID3Header, fileobj) def test_header_too_small(self): fileobj = BytesIO(b'ID3\x01\x00\x00\x00\x00\x00') self.assertRaises(ID3Error, ID3Header, fileobj) def test_header_2_4_extended(self): fileobj = BytesIO(b'ID3\x04\\x00\x00\x00\x00\x00\x00\x00\x05Z') header = ID3Header(fileobj) self.assertEquals(header._extdata, b'Z') def test_header_2_4_extended_unsynch_size(self): fileobj = BytesIO(b'ID3\x04\\x00\x00\x00\x00\x00\x00\x00\xffZ') self.assertRaises(ID3Error, ID3Header, fileobj) def test_header_2_4_extended_but_not(self): fileobj = BytesIO(b'ID3\x04\\x00\x00\x00\x00TIT1\x00\x00\x00\x01a') header = ID3Header(fileobj) self.assertEquals(header._extdata, b'') def test_header_2_4_extended_but_not_but_not_tag(self): fileobj = BytesIO(b'ID3\x04\\x00\x00\x00\x00TIT9') self.failUnlessRaises(ID3Error, ID3Header, fileobj) def test_header_2_3_extended(self): fileobj = BytesIO(b'ID3\x03\\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00Vx\x9a\xbc') header = ID3Header(fileobj) self.assertEquals(header._extdata, b'\x00\x00Vx\x9a\xbc') def test_negative_header_size(self): fileobj = BytesIO(b'ID3\\x00\x00\x00\x00\x00\x00\x00\x00') self.failUnlessRaises(ID3Error, ID3Header, fileobj) def test_23(self): id3 = ID3(self.silence) self.assertEqual(id3.version, (2, 3, 0)) self.assertEquals(8, len(id3.keys())) self.assertEquals(0, len(id3.unknown_frames)) self.assertEquals('Quod Libet Test Data', id3['TALB']) self.assertEquals('Silence', str(id3['TCON'])) self.assertEquals('Silence', str(id3['TIT1'])) self.assertEquals('Silence', str(id3['TIT2'])) self.assertEquals(3000, (+ id3['TLEN'])) self.assertEquals(['piman', 'jzig'], id3['TPE1']) self.assertEquals('02/10', id3['TRCK']) self.assertEquals(2, (+ id3['TRCK'])) self.assertEquals('2004', id3['TDRC'])
class OutputEvent(OutputView): placeholder = None label = None event_connector = None def __init__(self, name, event_connector, *args, **kwargs): self.event_connector = event_connector gui.SvgSubcontainer.__init__(self, 0, 0, 0, 0, *args, **kwargs) self.placeholder = gui.SvgRectangle(0, 0, 0, 0) self.placeholder.set_stroke(1, 'black') self.placeholder.set_fill('orange') self.placeholder.style['cursor'] = 'pointer' self.append(self.placeholder) self.label = gui.SvgText('100%', '50%', name) self.label.attr_dominant_baseline = 'middle' self.label.attr_text_anchor = 'end' self.label.style['cursor'] = 'pointer' self.append(self.label) FBD_model.Output.__init__(self, name, *args, **kwargs) def link(self, destination, container): if (not issubclass(type(destination), InputEvent)): return self.placeholder.set_fill('green') gui.ClassEventConnector.do(self.event_connector, destination.event_callback) OutputView.link(self, destination, container) def unlink(self, destination=None): self.placeholder.set_fill('orange') gui.ClassEventConnector.do(self.event_connector, None) FBD_model.Output.unlink(self, destination)
def test_run_pyscript_with_exception(base_app, request): test_dir = os.path.dirname(request.module.__file__) python_script = os.path.join(test_dir, 'pyscript', 'raises_exception.py') (out, err) = run_cmd(base_app, 'run_pyscript {}'.format(python_script)) assert err[0].startswith('Traceback') assert ("TypeError: unsupported operand type(s) for +: 'int' and 'str'" in err[(- 1)]) assert (base_app.last_result is True)
def patch_model_repository_get_repository(monkeypatch, get_repository): if (get_repository is not None): def mock_get_repository(base_namespace, base_repository): vis_mock = Mock() vis_mock.name = get_repository get_repo_mock = Mock(visibility=vis_mock) return get_repo_mock else: def mock_get_repository(base_namespace, base_repository): return None monkeypatch.setattr(model.repository, 'get_repository', mock_get_repository)
class LazilyParsedConfig(): def __init__(self, config: dict, steps: tuple=()): self.raw_data = config self.steps = steps def parse_fields(self): for attribute in self.__dict__: (_, prefix, name) = attribute.partition('_field_') if prefix: parse_config(getattr(self, name)) def raise_error(self, message, *, extra_steps=()): import inspect field = inspect.currentframe().f_back.f_code.co_name raise ConfigurationError(message, location=' -> '.join([*self.steps, field, *extra_steps]))
class SeznamOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.seznam.SeznamOAuth2' user_data_url = ' expected_username = 'krasty' access_token_body = json.dumps({'access_token': 'foo', 'account_name': '', 'expires_in': , 'oauth_user_id': '0123abcd', 'refresh_token': 'bar', 'scopes': ['identity'], 'token_type': 'bearer'}) user_data_body = json.dumps({'email': '', 'firstname': 'Krasty', 'lastname': 'Dog', 'oauth_user_id': '0123abcd', 'username': 'krasty'}) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline()
def test_schemafile_and_instancefile(runner, mock_parse_result, in_tmp_dir, tmp_path): touch_files(tmp_path, 'foo.json') runner.invoke(cli_main, ['--schemafile', 'schema.json', 'foo.json']) assert (mock_parse_result.schema_mode == SchemaLoadingMode.filepath) assert (mock_parse_result.schema_path == 'schema.json') assert isinstance(mock_parse_result.instancefiles, tuple) for f in mock_parse_result.instancefiles: assert isinstance(f, click.utils.LazyFile) assert (tuple((f.name for f in mock_parse_result.instancefiles)) == ('foo.json',))
def test_detect_clearsky_components(detect_clearsky_data): (expected, cs) = detect_clearsky_data (clear_samples, components, alpha) = clearsky.detect_clearsky(expected['GHI'], cs['ghi'], times=cs.index, window_length=10, return_components=True) assert_series_equal(expected['Clear or not'], clear_samples, check_dtype=False, check_names=False) assert isinstance(components, OrderedDict) assert np.allclose(alpha, 0.)
def test_sudo_fail_from_root(host): assert (host.user().name == 'root') with pytest.raises(AssertionError) as exc: with host.sudo('unprivileged'): assert (host.user().name == 'unprivileged') host.check_output('ls /root/invalid') assert str(exc.value).startswith('Unexpected exit code') with host.sudo(): assert (host.user().name == 'root')
def ql_syscall_bind(ql: Qiling, sockfd: int, addr: int, addrlen: int): if (sockfd not in range(NR_OPEN)): return (- 1) sock: Optional[ql_socket] = ql.os.fd[sockfd] if (sock is None): return (- 1) data = ql.mem.read(addr, addrlen) abits = ql.arch.bits endian = ql.arch.endian sockaddr = make_sockaddr(abits, endian) sockaddr_obj = sockaddr.from_buffer(data) sa_family = sockaddr_obj.sa_family dest = None regreturn = (- 1) if (sa_family == AF_UNIX): (hpath, vpath) = ql_unix_socket_path(ql, data[2:]) ql.log.debug(f'Binding socket to "{vpath}"') dest = hpath elif (sa_family == AF_INET): sockaddr = make_sockaddr_in(abits, endian) sockaddr_obj = sockaddr.from_buffer(data) port = ntohs(ql, sockaddr_obj.sin_port) host = inet_ntoa(sockaddr_obj.sin_addr.s_addr) if ql.os.bindtolocalhost: host = '127.0.0.1' if ((not ql.os.root) and (port <= 1024)): port = (port + 8000) ql.log.debug(f'Binding socket to {host}:{port}') dest = (host, port) elif ((sa_family == AF_INET6) and ql.os.ipv6): sockaddr_in6 = make_sockaddr_in6(abits, endian) sockaddr_obj = sockaddr_in6.from_buffer(data) port = ntohs(ql, sockaddr_obj.sin6_port) host = inet6_ntoa(sockaddr_obj.sin6_addr.s6_addr) if ql.os.bindtolocalhost: host = '::1' if ((not ql.os.root) and (port <= 1024)): port = (port + 8000) ql.log.debug(f'Binding socket to {host}:{port}') dest = (host, port) if (dest is not None): try: sock.bind(dest) except (ConnectionError, FileNotFoundError): regreturn = (- 1) else: regreturn = 0 return regreturn
def paginate(start_id_kwarg_name='start_id', limit_kwarg_name='limit', callback_kwarg_name='pagination_callback'): def wrapper(func): (func) def wrapped(*args, **kwargs): try: requested_limit = int(request.args.get('n', _MAX_RESULTS_PER_PAGE)) except ValueError: requested_limit = 0 limit = max(min(requested_limit, _MAX_RESULTS_PER_PAGE), 1) next_page_token = request.args.get('next_page', request.args.get('last', None)) start_id = None page_info = decrypt_page_token(next_page_token) if (page_info is not None): start_id = page_info.get('start_id', None) def callback(results, response): if (len(results) <= limit): return next_page_token = encrypt_page_token({'start_id': max([obj.id for obj in results])}) link_url = os.path.join(get_app_url(), url_for(request.endpoint, **request.view_args)) link_param = urlencode({'n': limit, 'next_page': next_page_token}) link = ('<%s?%s>; rel="next"' % (link_url, link_param)) response.headers['Link'] = link kwargs[limit_kwarg_name] = limit kwargs[start_id_kwarg_name] = start_id kwargs[callback_kwarg_name] = callback return func(*args, **kwargs) return wrapped return wrapper
class CheckpointDataLoader(DataLoader): def __init__(self, dataset, checkpoint=None, batch_size=1, shuffle=False, num_workers=0, pin_memory=False, drop_last=True, timeout=0, worker_init_fn=None): if shuffle: sampler = RandomSampler(dataset, checkpoint) else: sampler = SequentialSampler(dataset, checkpoint) if (checkpoint is not None): self.checkpoint_batch_idx = checkpoint['batch_idx'] else: self.checkpoint_batch_idx = 0 super(CheckpointDataLoader, self).__init__(dataset, sampler=sampler, shuffle=False, batch_size=batch_size, num_workers=num_workers, drop_last=drop_last, pin_memory=pin_memory, timeout=timeout, worker_init_fn=None)
class Solution(object): def insertionSortList(self, head): if (head is None): return None helper = ListNode((- 1000)) (pre, curr) = (helper, head) while (curr is not None): next_step = curr.next while (pre.next and (pre.next.val < curr.val)): pre = pre.next curr.next = pre.next pre.next = curr pre = helper curr = next_step return helper.next
def est_dof_support(coef, intercept=None, transform=None, zero_tol=1e-06): coef = np.array(coef) if (transform is None): n_nonzero_coef = count_support(coef, zero_tol=zero_tol) else: n_nonzero_coef = count_support(transform(coef), zero_tol=zero_tol) if (intercept is not None): n_vals_intercept = np.array(intercept).size else: n_vals_intercept = 0 DoF = (n_nonzero_coef + n_vals_intercept) return DoF
class _WebEngineScripts(QObject): _widget: webview.WebEngineView def __init__(self, tab, parent=None): super().__init__(parent) self._tab = tab self._widget = cast(webview.WebEngineView, None) self._greasemonkey = greasemonkey.gm_manager def connect_signals(self): config.instance.changed.connect(self._on_config_changed) self._tab.search.cleared.connect(functools.partial(self._update_stylesheet, searching=False)) self._tab.search.finished.connect(self._update_stylesheet) (str) def _on_config_changed(self, option): if (option in ['scrolling.bar', 'content.user_stylesheets']): self._init_stylesheet() self._update_stylesheet() (bool) def _update_stylesheet(self, searching=False): css = shared.get_user_stylesheet(searching=searching) code = javascript.assemble('stylesheet', 'set_css', css) self._tab.run_js_async(code) def _inject_js(self, name, js_code, *, world=QWebEngineScript.ScriptWorldId.ApplicationWorld, injection_point=QWebEngineScript.InjectionPoint.DocumentCreation, subframes=False): script = QWebEngineScript() script.setInjectionPoint(injection_point) script.setSourceCode(js_code) script.setWorldId(world) script.setRunsOnSubFrames(subframes) script.setName(f'_qute_{name}') self._widget.page().scripts().insert(script) def _remove_js(self, name): scripts = self._widget.page().scripts() if machinery.IS_QT6: for script in scripts.find(f'_qute_{name}'): scripts.remove(script) else: script = scripts.findScript(f'_qute_{name}') if (not script.isNull()): scripts.remove(script) def init(self): js_code = javascript.wrap_global('scripts', resources.read_file('javascript/scroll.js'), resources.read_file('javascript/webelem.js'), resources.read_file('javascript/caret.js')) self._inject_js('js', js_code, subframes=True) self._init_stylesheet() self._greasemonkey.scripts_reloaded.connect(self._inject_all_greasemonkey_scripts) self._inject_all_greasemonkey_scripts() self._inject_site_specific_quirks() def _init_stylesheet(self): self._remove_js('stylesheet') css = shared.get_user_stylesheet() js_code = javascript.wrap_global('stylesheet', resources.read_file('javascript/stylesheet.js'), javascript.assemble('stylesheet', 'set_css', css)) self._inject_js('stylesheet', js_code, subframes=True) () def _inject_all_greasemonkey_scripts(self): scripts = self._greasemonkey.all_scripts() self._inject_greasemonkey_scripts(scripts) def _remove_all_greasemonkey_scripts(self): page_scripts = self._widget.page().scripts() for script in page_scripts.toList(): if script.name().startswith('GM-'): log.greasemonkey.debug('Removing script: {}'.format(script.name())) removed = page_scripts.remove(script) assert removed, script.name() def _inject_greasemonkey_scripts(self, scripts): if sip.isdeleted(self._widget): return page_scripts = self._widget.page().scripts() self._remove_all_greasemonkey_scripts() seen_names = set() for script in scripts: while (script.full_name() in seen_names): script.dedup_suffix += 1 seen_names.add(script.full_name()) new_script = QWebEngineScript() try: world = int(script.jsworld) if (not (0 <= world <= qtutils.MAX_WORLD_ID)): log.greasemonkey.error(f"script {script.name} has invalid value for '-js-world': {script.jsworld}, should be between 0 and {qtutils.MAX_WORLD_ID}") continue except ValueError: try: world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]] except KeyError: log.greasemonkey.error(f"script {script.name} has invalid value for '-js-world': {script.jsworld}") continue new_script.setWorldId(world) new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) new_script.setSourceCode(script.code()) new_script.setName(script.full_name()) new_script.setRunsOnSubFrames(script.runs_on_sub_frames) if script.needs_document_end_workaround(): log.greasemonkey.debug(f'Forcing -at document-end for {script.name}') new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady) log.greasemonkey.debug(f'adding script: {new_script.name()}') page_scripts.insert(new_script) def _get_quirks(self): versions = version.qtwebengine_versions() return [_Quirk('whatsapp_web', injection_point=QWebEngineScript.InjectionPoint.DocumentReady, world=QWebEngineScript.ScriptWorldId.ApplicationWorld), _Quirk('discord'), _Quirk('googledocs', name='ua-googledocs'), _Quirk('string_replaceall', predicate=(versions.webengine < utils.VersionNumber(5, 15, 3))), _Quirk('array_at', predicate=(versions.webengine < utils.VersionNumber(6, 3)))] def _inject_site_specific_quirks(self): if (not config.val.content.site_specific_quirks.enabled): return for quirk in self._get_quirks(): if (not quirk.predicate): continue src = resources.read_file(f'javascript/quirks/{quirk.filename}.user.js') if (quirk.name not in config.val.content.site_specific_quirks.skip): self._inject_js(f'quirk_{quirk.filename}', src, world=quirk.world, injection_point=quirk.injection_point)
class SwishJitAutoFn(torch.autograd.Function): def symbolic(g, x): return g.op('Mul', x, g.op('Sigmoid', x)) def forward(ctx, x): ctx.save_for_backward(x) return swish_jit_fwd(x) def backward(ctx, grad_output): x = ctx.saved_tensors[0] return swish_jit_bwd(x, grad_output)
def get_purpose_features(repo_path, branch): repo = Repository(repo_path) head = repo.references.get(branch) commits = list(repo.walk(head.target, (GIT_SORT_TOPOLOGICAL | GIT_SORT_REVERSE))) features = [] for (_, commit) in enumerate(tqdm(commits)): message = commit.message fix = (1.0 if is_fix(message) else 0.0) feat = [] feat.append(str(commit.hex)) feat.append(str(fix)) features.append(feat) return features
class HierarchicalConcurrent(nn.Sequential): def __init__(self, axis=1): super(HierarchicalConcurrent, self).__init__() self.axis = axis def forward(self, x): out = [] y_prev = None for module in self._modules.values(): y = module(x) if (y_prev is not None): y += y_prev out.append(y) y_prev = y out = torch.cat(tuple(out), dim=self.axis) return out
def check_changelog_urls(_args: argparse.Namespace=None) -> bool: ok = True all_requirements = set() for name in recompile_requirements.get_all_names(): outfile = recompile_requirements.get_outfile(name) missing = set() with open(outfile, 'r', encoding='utf-8') as f: for line in f: line = line.strip() if (line.startswith('#') or (not line)): continue (req, _version) = recompile_requirements.parse_versioned_line(line) if req.startswith('./'): continue if (' ' in req): req = req.split(' ')[0] all_requirements.add(req) if (req not in recompile_requirements.CHANGELOG_URLS): missing.add(req) if missing: ok = False req_str = ', '.join(sorted(missing)) utils.print_col(f'Missing changelog URLs in {name} requirements: {req_str}', 'red') extra = (set(recompile_requirements.CHANGELOG_URLS) - all_requirements) if extra: ok = False req_str = ', '.join(sorted(extra)) utils.print_col(f'Extra changelog URLs: {req_str}', 'red') if (not ok): print('Hint: Changelog URLs are in scripts/dev/changelog_urls.json') return ok
def _worker_rollout_policy(G, args): sample_std = args['sample_std'].flatten() cur_mean = args['cur_mean'].flatten() K = len(cur_mean) params = ((np.random.standard_normal(K) * sample_std) + cur_mean) G.policy.set_param_values(params) path = rollout(G.env, G.policy, args['max_path_length']) path['returns'] = discount_cumsum(path['rewards'], args['discount']) path['undiscounted_return'] = sum(path['rewards']) if (args['criterion'] == 'samples'): inc = len(path['rewards']) elif (args['criterion'] == 'paths'): inc = 1 else: raise NotImplementedError return ((params, path), inc)
class Lingeling(object): def __init__(self, bootstrap_with=None, use_timer=False, incr=False, with_proof=False, warm_start=False): if incr: raise NotImplementedError('Incremental mode is not supported by Lingeling.') if warm_start: raise NotImplementedError('Warm-start mode is not supported by Lingeling.') self.lingeling = None self.status = None self.prfile = None self.new(bootstrap_with, use_timer, with_proof) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.delete() self.lingeling = None def new(self, bootstrap_with=None, use_timer=False, with_proof=False): if (not self.lingeling): self.lingeling = pysolvers.lingeling_new() if bootstrap_with: if ((type(bootstrap_with) == CNFPlus) and bootstrap_with.atmosts): raise NotImplementedError('Atmost constraints are not supported by Lingeling') for clause in bootstrap_with: self.add_clause(clause) self.use_timer = use_timer self.call_time = 0.0 self.accu_time = 0.0 if with_proof: self.prfile = tempfile.TemporaryFile() pysolvers.lingeling_tracepr(self.lingeling, self.prfile) def delete(self): if self.lingeling: pysolvers.lingeling_del(self.lingeling, self.prfile) self.lingeling = None if self.prfile: self.prfile.close() def solve(self, assumptions=[]): if self.lingeling: if self.use_timer: start_time = process_time() self.status = pysolvers.lingeling_solve(self.lingeling, assumptions, int(MainThread.check())) if self.use_timer: self.call_time = (process_time() - start_time) self.accu_time += self.call_time self.prev_assumps = assumptions return self.status def start_mode(self, warm=False): raise NotImplementedError('Warm-start mode is currently unsupported by Lingeling.') def solve_limited(self, assumptions=[], expect_interrupt=False): raise NotImplementedError('Limited solve is currently unsupported by Lingeling.') def conf_budget(self, budget): raise NotImplementedError('Limited solve is currently unsupported by Lingeling.') def prop_budget(self, budget): raise NotImplementedError('Limited solve is currently unsupported by Lingeling.') def dec_budget(self, budget): raise NotImplementedError('Limited solve is currently unsupported by Lingeling.') def interrupt(self): raise NotImplementedError('Limited solve is currently unsupported by Lingeling.') def clear_interrupt(self): raise NotImplementedError('Limited solve is currently unsupported by Lingeling.') def propagate(self, assumptions=[], phase_saving=0): raise NotImplementedError('Simple literal propagation is not yet implemented for Lingeling.') def set_phases(self, literals=[]): if self.lingeling: pysolvers.lingeling_setphases(self.lingeling, literals) def get_status(self): if self.lingeling: return self.status def get_model(self): if (self.lingeling and (self.status == True)): model = pysolvers.lingeling_model(self.lingeling) return (model if (model != None) else []) def get_core(self): if (self.lingeling and (self.status == False)): return pysolvers.lingeling_core(self.lingeling, self.prev_assumps) def get_proof(self): if (self.lingeling and self.prfile): self.prfile.seek(0) return [line.rstrip().decode('ascii') for line in self.prfile.readlines()] def time(self): if self.lingeling: return self.call_time def time_accum(self): if self.lingeling: return self.accu_time def nof_vars(self): if self.lingeling: return pysolvers.lingeling_nof_vars(self.lingeling) def nof_clauses(self): if self.lingeling: return pysolvers.lingeling_nof_cls(self.lingeling) def accum_stats(self): if self.lingeling: return pysolvers.lingeling_acc_stats(self.lingeling) def enum_models(self, assumptions=[]): if self.lingeling: done = False while (not done): self.status = self.solve(assumptions=assumptions) model = self.get_model() if (model is not None): self.add_clause([(- l) for l in model]) (yield model) else: done = True def add_clause(self, clause, no_return=True): if self.lingeling: pysolvers.lingeling_add_cl(self.lingeling, clause) def add_atmost(self, lits, k, no_return=True): raise NotImplementedError('Atmost constraints are not supported by Lingeling.') def add_xor_clause(self, lits, value=True): raise NotImplementedError('XOR clauses are supported only by CryptoMinisat') def append_formula(self, formula, no_return=True): if self.lingeling: if ((type(formula) == CNFPlus) and formula.atmosts): raise NotImplementedError('Atmost constraints are not supported by Lingeling') for clause in formula: self.add_clause(clause, no_return) def supports_atmost(self): return False
def test_apply_patcher_file_newer_version(tmp_path): patcher_data = {} randomizer_data = {} progress_update = MagicMock() game_root = tmp_path.joinpath('game_root') game_root.mkdir() claris_randomizer._patch_version_file(game_root).write_text(str(10000)) with pytest.raises(UnableToExportError, match='The internal game copy was outdated and has been deleted. Please export again and select an ISO.'): claris_randomizer.apply_patcher_file(game_root, patcher_data, randomizer_data, progress_update)
def construct_outgoing_unicast_answers(answers: _AnswerWithAdditionalsType, ucast_source: bool, questions: List[DNSQuestion], id_: int_) -> DNSOutgoing: out = DNSOutgoing(_FLAGS_QR_RESPONSE_AA, False, id_) if ucast_source: for question in questions: out.add_question(question) _add_answers_additionals(out, answers) return out
class TestCustomBuildPy(): FILES = {**TestOverallBehaviour.EXAMPLES['flat-layout'], 'setup.py': dedent(' import pathlib\n from setuptools import setup\n from setuptools.command.build_py import build_py as orig\n\n class my_build_py(orig):\n def run(self):\n super().run()\n raise ValueError("TEST_RAISE")\n\n setup(cmdclass={"build_py": my_build_py})\n ')} def test_safeguarded_from_errors(self, tmp_path, venv): (_, out) = install_project('mypkg', venv, tmp_path, self.FILES) assert (b'SetuptoolsDeprecationWarning' in out) assert (b'ValueError: TEST_RAISE' in out) out = venv.run(['python', '-c', 'import mypkg.mod1; print(mypkg.mod1.var)']) assert (b'42' in out)
def convert_xlm_roberta_xl_checkpoint_to_pytorch(roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool): roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path) roberta.eval() roberta_sent_encoder = roberta.model.encoder.sentence_encoder config = XLMRobertaConfig(vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.cfg.model.encoder_embed_dim, num_hidden_layers=roberta.cfg.model.encoder_layers, num_attention_heads=roberta.cfg.model.encoder_attention_heads, intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=514, type_vocab_size=1, layer_norm_eps=1e-05) if classification_head: config.num_labels = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:', config) model = (XLMRobertaXLForSequenceClassification(config) if classification_head else XLMRobertaXLForMaskedLM(config)) model.eval() model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight) model.roberta.encoder.LayerNorm.weight = roberta_sent_encoder.layer_norm.weight model.roberta.encoder.LayerNorm.bias = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers): layer: BertLayer = model.roberta.encoder.layer[i] roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] attention: RobertaAttention = layer.attention attention.self_attn_layer_norm.weight = roberta_layer.self_attn_layer_norm.weight attention.self_attn_layer_norm.bias = roberta_layer.self_attn_layer_norm.bias self_attn: BertSelfAttention = layer.attention.self assert (roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size))) self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias self_output: BertSelfOutput = layer.attention.output assert (self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape) self_output.dense.weight = roberta_layer.self_attn.out_proj.weight self_output.dense.bias = roberta_layer.self_attn.out_proj.bias layer.LayerNorm.weight = roberta_layer.final_layer_norm.weight layer.LayerNorm.bias = roberta_layer.final_layer_norm.bias intermediate: BertIntermediate = layer.intermediate assert (intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape) intermediate.dense.weight = roberta_layer.fc1.weight intermediate.dense.bias = roberta_layer.fc1.bias bert_output: BertOutput = layer.output assert (bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape) bert_output.dense.weight = roberta_layer.fc2.weight bert_output.dense.bias = roberta_layer.fc2.bias if classification_head: model.classifier.dense.weight = roberta.model.classification_heads['mnli'].dense.weight model.classifier.dense.bias = roberta.model.classification_heads['mnli'].dense.bias model.classifier.out_proj.weight = roberta.model.classification_heads['mnli'].out_proj.weight model.classifier.out_proj.bias = roberta.model.classification_heads['mnli'].out_proj.bias else: model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) our_output = model(input_ids)[0] if classification_head: their_output = roberta.model.classification_heads['mnli'](roberta.extract_features(input_ids)) else: their_output = roberta.model(input_ids)[0] print(our_output.shape, their_output.shape) max_absolute_diff = torch.max(torch.abs((our_output - their_output))).item() print(f'max_absolute_diff = {max_absolute_diff}') success = torch.allclose(our_output, their_output, atol=0.001) print('Do both models output the same tensors?', ('' if success else '')) if (not success): raise Exception('Something went wRoNg') pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True) print(f'Saving model to {pytorch_dump_folder_path}') model.save_pretrained(pytorch_dump_folder_path)
.parametrize('username,password', users) .parametrize('snapshot_id', snapshots) def test_list_snapshot(db, client, username, password, snapshot_id): client.login(username=username, password=password) url = (reverse(urlnames['list']) + f'?snapshot={snapshot_id}') response = client.get(url) if password: assert (response.status_code == 200) assert isinstance(response.json(), list) if (username == 'user'): assert (sorted([item['id'] for item in response.json()]) == []) else: values_list = Value.objects.filter(project__in=view_value_permission_map.get(username, [])).filter(snapshot_id=snapshot_id).order_by('id').values_list('id', flat=True) assert (sorted([item['id'] for item in response.json()]) == list(values_list)) else: assert (response.status_code == 401)
.parametrize(('pyproject_toml', 'parse_output'), [({'build-system': {'requires': ['foo']}}, {'requires': ['foo'], 'build-backend': 'setuptools.build_meta:__legacy__'}), ({'build-system': {'requires': ['foo'], 'build-backend': 'bar'}}, {'requires': ['foo'], 'build-backend': 'bar'}), ({'build-system': {'requires': ['foo'], 'build-backend': 'bar', 'backend-path': ['baz']}}, {'requires': ['foo'], 'build-backend': 'bar', 'backend-path': ['baz']})]) def test_parse_valid_build_system_table_type(pyproject_toml, parse_output): assert (build._parse_build_system_table(pyproject_toml) == parse_output)
def import_tf(): warnings.filterwarnings('ignore', category=FutureWarning) try: import tensorflow as tf tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) module = tf except ImportError: module = None warnings.filterwarnings('default', category=FutureWarning) return module
class ImageValidator(ContentTypeValidator): def __init__(self, minimum: Dimension=None, maximum: Dimension=None, content_types=None, min_aspect_ratio: float=None, max_aspect_ratio: float=None): (self.min_width, self.min_height) = (minimum if minimum else (0, 0)) (self.max_width, self.max_height) = (maximum if maximum else (0, 0)) self.min_aspect_ratio = min_aspect_ratio self.max_aspect_ratio = max_aspect_ratio if content_types: super().__init__(content_types=content_types) def process(self, descriptor: StreamDescriptor, context: dict) -> None: if hasattr(self, 'content_types'): super().process(descriptor, context) width = context.get('width') height = context.get('height') if (not (abs((width or 0)) and abs((height or 0)))): raise DimensionValidationError('width and height are not found in analyze_result.') if (self.min_width and (self.min_width > width)): raise DimensionValidationError(f'Minimum allowed width is: {self.min_width: d}, but the {width: d} is given.') if (self.min_height and (self.min_height > height)): raise DimensionValidationError(f'Minimum allowed height is: {self.min_height: d}, but the {height: d} is given.') if (self.max_width and (self.max_width < width)): raise DimensionValidationError(f'Maximum allowed width is: {self.max_width: d}, but the {width: d} is given.') if (self.max_height and (self.max_height < height)): raise DimensionValidationError(f'Maximum allowed height is: {self.max_height: d}, but the {height: d} is given.') aspect_ratio = (width / height) if ((self.min_aspect_ratio and (self.min_aspect_ratio > aspect_ratio)) or (self.max_aspect_ratio and (self.max_aspect_ratio < aspect_ratio))): raise AspectRatioValidationError(f'Invalid aspect ratio {width} / {height} = {aspect_ratio},accepted_range: {self.min_aspect_ratio} - {self.max_aspect_ratio}')
def create_shared(name, initial_value, dtype='floatX', strict=False, allow_downcast=True): if (dtype == 'floatX'): dtype = theano.config.floatX initial_value = np.ascontiguousarray(initial_value, dtype=dtype) variable = theano.shared(initial_value, name=name, strict=strict, allow_downcast=allow_downcast) return variable
def test_history_expanded(base_app): cmds = ['alias create s shortcuts', 's'] for cmd in cmds: run_cmd(base_app, cmd) (out, err) = run_cmd(base_app, 'history -x') expected = [' 1 alias create s shortcuts', ' 2 shortcuts'] assert (out == expected) verify_hi_last_result(base_app, 2)
(eq=False, hash=False, slots=True, repr=False) class RunVar(Generic[T]): _name: str = attr.ib() _default: (T | type[_NoValue]) = attr.ib(default=_NoValue) def get(self, default: (T | type[_NoValue])=_NoValue) -> T: try: return cast(T, _run.GLOBAL_RUN_CONTEXT.runner._locals[self]) except AttributeError: raise RuntimeError('Cannot be used outside of a run context') from None except KeyError: if (default is not _NoValue): return default if (self._default is not _NoValue): return self._default raise LookupError(self) from None def set(self, value: T) -> RunVarToken[T]: try: old_value = self.get() except LookupError: token = RunVarToken._empty(self) else: token = RunVarToken[T]._create(self, old_value) _run.GLOBAL_RUN_CONTEXT.runner._locals[self] = value return token def reset(self, token: RunVarToken[T]) -> None: if (token is None): raise TypeError('token must not be none') if token.redeemed: raise ValueError('token has already been used') if (token._var is not self): raise ValueError('token is not for us') previous = token.previous_value try: if (previous is _NoValue): _run.GLOBAL_RUN_CONTEXT.runner._locals.pop(self) else: _run.GLOBAL_RUN_CONTEXT.runner._locals[self] = previous except AttributeError: raise RuntimeError('Cannot be used outside of a run context') from None token.redeemed = True def __repr__(self) -> str: return f'<RunVar name={self._name!r}>'
def main() -> None: application = Application.builder().token('TOKEN').build() application.add_handler(CommandHandler('start', start)) application.add_handler(CommandHandler('bad_command', bad_command)) application.add_error_handler(error_handler) application.run_polling(allowed_updates=Update.ALL_TYPES)
def NMC_electrolyte_exchange_current_density_PeymanMPM(c_e, c_s_surf, c_s_max, T): m_ref = (4.824 * (10 ** (- 6))) E_r = 39570 arrhenius = np.exp(((E_r / pybamm.constants.R) * ((1 / 298.15) - (1 / T)))) return ((((m_ref * arrhenius) * (c_e ** 0.5)) * (c_s_surf ** 0.5)) * ((c_s_max - c_s_surf) ** 0.5))
def instantiate_generator_class(builder: IRBuilder) -> Value: fitem = builder.fn_info.fitem generator_reg = builder.add(Call(builder.fn_info.generator_class.ir.ctor, [], fitem.line)) if builder.fn_info.is_nested: curr_env_reg = builder.fn_info.callable_class.curr_env_reg else: curr_env_reg = builder.fn_info.curr_env_reg builder.add(SetAttr(generator_reg, ENV_ATTR_NAME, curr_env_reg, fitem.line)) zero = Integer(0) builder.add(SetAttr(curr_env_reg, NEXT_LABEL_ATTR_NAME, zero, fitem.line)) return generator_reg
class CPD_VGG(nn.Module): def __init__(self, channel=32): super(CPD_VGG, self).__init__() self.vgg = B2_VGG() self.rfb3_1 = RFB(256, channel) self.rfb4_1 = RFB(512, channel) self.rfb5_1 = RFB(512, channel) self.agg1 = aggregation(channel) self.rfb3_2 = RFB(256, channel) self.rfb4_2 = RFB(512, channel) self.rfb5_2 = RFB(512, channel) self.agg2 = aggregation(channel) self.HA = HA() self.upsample = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False) def forward(self, x): x1 = self.vgg.conv1(x) x2 = self.vgg.conv2(x1) x3 = self.vgg.conv3(x2) x3_1 = x3 x4_1 = self.vgg.conv4_1(x3_1) x5_1 = self.vgg.conv5_1(x4_1) x3_1 = self.rfb3_1(x3_1) x4_1 = self.rfb4_1(x4_1) x5_1 = self.rfb5_1(x5_1) attention = self.agg1(x5_1, x4_1, x3_1) x3_2 = self.HA(attention.sigmoid(), x3) x4_2 = self.vgg.conv4_2(x3_2) x5_2 = self.vgg.conv5_2(x4_2) x3_2 = self.rfb3_2(x3_2) x4_2 = self.rfb4_2(x4_2) x5_2 = self.rfb5_2(x5_2) detection = self.agg2(x5_2, x4_2, x3_2) return (self.upsample(attention), self.upsample(detection))
def start_server_in_current_thread_session(): websocket_conn_opened = threading.Event() thread = threading.current_thread() class SingleSessionWSHandler(_webio_handler(cdn=False)): session: ScriptModeSession = None instance: typing.ClassVar = None closed = False def send_msg_to_client(self, session): for msg in session.get_task_commands(): try: self.write_message(json.dumps(msg)) except TypeError as e: logger.exception('Data serialization error: %s\nThis may be because you pass the wrong type of parameter to the function of PyWebIO.\nData content: %s', e, msg) def open(self): if (SingleSessionWSHandler.session is None): SingleSessionWSHandler.instance = self session_info = get_session_info_from_headers(self.request.headers) session_info['user_ip'] = self.request.remote_ip session_info['request'] = self.request session_info['backend'] = 'tornado' session_info['protocol'] = 'websocket' self.session = SingleSessionWSHandler.session = ScriptModeSession(thread, session_info=session_info, on_task_command=self.send_msg_to_client, loop=asyncio.get_event_loop()) websocket_conn_opened.set() else: self.close() def on_message(self, data): if isinstance(data, bytes): event = deserialize_binary_event(data) else: event = json.loads(data) if (event is None): return self.session.send_client_event(event) def on_close(self): if (self.session is not None): self.session.close() self.closed = True logger.debug('ScriptModeSession closed') async def wait_to_stop_loop(server): alive_none_daemonic_thread_cnt = None while (alive_none_daemonic_thread_cnt != 1): alive_none_daemonic_thread_cnt = sum((1 for t in threading.enumerate() if (t.is_alive() and (not t.isDaemon())))) (await asyncio.sleep(0.5)) if (SingleSessionWSHandler.session and SingleSessionWSHandler.session.need_keep_alive()): while (not SingleSessionWSHandler.instance.closed): (await asyncio.sleep(0.5)) if SingleSessionWSHandler.instance: SingleSessionWSHandler.instance.close() server.stop() logger.debug('Closing tornado ioloop...') tasks = [t for t in asyncio.all_tasks() if ((t is not asyncio.current_task()) and (not t.done()))] for task in tasks: task.cancel() (await asyncio.sleep(0)) tornado.ioloop.IOLoop.current().stop() def server_thread(): from tornado.log import access_log, app_log, gen_log access_log.setLevel(logging.ERROR) app_log.setLevel(logging.ERROR) gen_log.setLevel(logging.ERROR) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) set_ioloop(tornado.ioloop.IOLoop.current()) port = 0 if os.environ.get('PYWEBIO_SCRIPT_MODE_PORT'): port = int(os.environ.get('PYWEBIO_SCRIPT_MODE_PORT')) (server, port) = _setup_server(webio_handler=SingleSessionWSHandler, port=port, host='127.0.0.1', websocket_max_message_size=parse_file_size('200M')) tornado.ioloop.IOLoop.current().spawn_callback(partial(wait_to_stop_loop, server=server)) if ('PYWEBIO_SCRIPT_MODE_PORT' not in os.environ): tornado.ioloop.IOLoop.current().spawn_callback(open_webbrowser_on_server_started, '127.0.0.1', port) tornado.ioloop.IOLoop.current().start() logger.debug('Tornado server exit') t = threading.Thread(target=server_thread, name='Tornado-server') t.start() websocket_conn_opened.wait()
class NgramCounts(): def __init__(self, ngram_order, bos_symbol='<s>', eos_symbol='</s>'): assert (ngram_order >= 2) self.ngram_order = ngram_order self.bos_symbol = bos_symbol self.eos_symbol = eos_symbol self.counts = [] for n in range(ngram_order): self.counts.append(defaultdict((lambda : CountsForHistory()))) self.d = [] def add_count(self, history, predicted_word, context_word, count): self.counts[len(history)][history].add_count(predicted_word, context_word, count) def add_raw_counts_from_line(self, line): if (line == ''): words = [self.bos_symbol, self.eos_symbol] else: words = (([self.bos_symbol] + whitespace.split(line)) + [self.eos_symbol]) for i in range(len(words)): for n in range(1, (self.ngram_order + 1)): if ((i + n) > len(words)): break ngram = words[i:(i + n)] predicted_word = ngram[(- 1)] history = tuple(ngram[:(- 1)]) if ((i == 0) or (n == self.ngram_order)): context_word = None else: context_word = words[(i - 1)] self.add_count(history, predicted_word, context_word, 1) def add_raw_counts_from_standard_input(self): lines_processed = 0 infile = io.TextIOWrapper(sys.stdin.buffer, encoding=default_encoding) for line in infile: line = line.strip(strip_chars) self.add_raw_counts_from_line(line) lines_processed += 1 if ((lines_processed == 0) or (args.verbose > 0)): print('make_phone_lm.py: processed {0} lines of input'.format(lines_processed), file=sys.stderr) def add_raw_counts_from_file(self, filename): lines_processed = 0 with open(filename, encoding=default_encoding) as fp: for line in fp: line = line.strip(strip_chars) self.add_raw_counts_from_line(line) lines_processed += 1 if ((lines_processed == 0) or (args.verbose > 0)): print('make_phone_lm.py: processed {0} lines of input'.format(lines_processed), file=sys.stderr) def cal_discounting_constants(self): self.d = [0] for n in range(1, self.ngram_order): this_order_counts = self.counts[n] n1 = 0 n2 = 0 for (hist, counts_for_hist) in this_order_counts.items(): stat = Counter(counts_for_hist.word_to_count.values()) n1 += stat[1] n2 += stat[2] assert ((n1 + (2 * n2)) > 0) self.d.append((max(0.001, (n1 * 1.0)) / (n1 + (2 * n2)))) def cal_f(self): n = (self.ngram_order - 1) this_order_counts = self.counts[n] for (hist, counts_for_hist) in this_order_counts.items(): for (w, c) in counts_for_hist.word_to_count.items(): counts_for_hist.word_to_f[w] = ((max((c - self.d[n]), 0) * 1.0) / counts_for_hist.total_count) for n in range(0, (self.ngram_order - 1)): this_order_counts = self.counts[n] for (hist, counts_for_hist) in this_order_counts.items(): n_star_star = 0 for w in counts_for_hist.word_to_count.keys(): n_star_star += len(counts_for_hist.word_to_context[w]) if (n_star_star != 0): for w in counts_for_hist.word_to_count.keys(): n_star_z = len(counts_for_hist.word_to_context[w]) counts_for_hist.word_to_f[w] = ((max((n_star_z - self.d[n]), 0) * 1.0) / n_star_star) else: for w in counts_for_hist.word_to_count.keys(): n_star_z = counts_for_hist.word_to_count[w] counts_for_hist.word_to_f[w] = ((max((n_star_z - self.d[n]), 0) * 1.0) / counts_for_hist.total_count) def cal_bow(self): n = (self.ngram_order - 1) this_order_counts = self.counts[n] for (hist, counts_for_hist) in this_order_counts.items(): for w in counts_for_hist.word_to_count.keys(): counts_for_hist.word_to_bow[w] = None for n in range(0, (self.ngram_order - 1)): this_order_counts = self.counts[n] for (hist, counts_for_hist) in this_order_counts.items(): for w in counts_for_hist.word_to_count.keys(): if (w == self.eos_symbol): counts_for_hist.word_to_bow[w] = None else: a_ = (hist + (w,)) assert (len(a_) < self.ngram_order) assert (a_ in self.counts[len(a_)].keys()) a_counts_for_hist = self.counts[len(a_)][a_] sum_z1_f_a_z = 0 for u in a_counts_for_hist.word_to_count.keys(): sum_z1_f_a_z += a_counts_for_hist.word_to_f[u] sum_z1_f_z = 0 _ = a_[1:] _counts_for_hist = self.counts[len(_)][_] for u in a_counts_for_hist.word_to_count.keys(): sum_z1_f_z += _counts_for_hist.word_to_f[u] counts_for_hist.word_to_bow[w] = ((1.0 - sum_z1_f_a_z) / (1.0 - sum_z1_f_z)) def print_raw_counts(self, info_string): print(info_string) res = [] for this_order_counts in self.counts: for (hist, counts_for_hist) in this_order_counts.items(): for w in counts_for_hist.word_to_count.keys(): ngram = ((' '.join(hist) + ' ') + w) ngram = ngram.strip(strip_chars) res.append('{0}\t{1}'.format(ngram, counts_for_hist.word_to_count[w])) res.sort(reverse=True) for r in res: print(r) def print_modified_counts(self, info_string): print(info_string) res = [] for this_order_counts in self.counts: for (hist, counts_for_hist) in this_order_counts.items(): for w in counts_for_hist.word_to_count.keys(): ngram = ((' '.join(hist) + ' ') + w) ngram = ngram.strip(strip_chars) modified_count = len(counts_for_hist.word_to_context[w]) raw_count = counts_for_hist.word_to_count[w] if (modified_count == 0): res.append('{0}\t{1}'.format(ngram, raw_count)) else: res.append('{0}\t{1}'.format(ngram, modified_count)) res.sort(reverse=True) for r in res: print(r) def print_f(self, info_string): print(info_string) res = [] for this_order_counts in self.counts: for (hist, counts_for_hist) in this_order_counts.items(): for w in counts_for_hist.word_to_count.keys(): ngram = ((' '.join(hist) + ' ') + w) ngram = ngram.strip(strip_chars) f = counts_for_hist.word_to_f[w] if (f == 0): f = 1e-99 res.append('{0}\t{1}'.format(ngram, math.log(f, 10))) res.sort(reverse=True) for r in res: print(r) def print_f_and_bow(self, info_string): print(info_string) res = [] for this_order_counts in self.counts: for (hist, counts_for_hist) in this_order_counts.items(): for w in counts_for_hist.word_to_count.keys(): ngram = ((' '.join(hist) + ' ') + w) ngram = ngram.strip(strip_chars) f = counts_for_hist.word_to_f[w] if (f == 0): f = 1e-99 bow = counts_for_hist.word_to_bow[w] if (bow is None): res.append('{1}\t{0}'.format(ngram, math.log(f, 10))) else: res.append('{1}\t{0}\t{2}'.format(ngram, math.log(f, 10), math.log(bow, 10))) res.sort(reverse=True) for r in res: print(r) def print_as_arpa(self, fout=io.TextIOWrapper(sys.stdout.buffer, encoding='latin-1')): print('\\data\\', file=fout) for hist_len in range(self.ngram_order): print('ngram {0}={1}'.format((hist_len + 1), sum([len(counts_for_hist.word_to_f) for counts_for_hist in self.counts[hist_len].values()])), file=fout) print('', file=fout) for hist_len in range(self.ngram_order): print('\\{0}-grams:'.format((hist_len + 1)), file=fout) this_order_counts = self.counts[hist_len] for (hist, counts_for_hist) in this_order_counts.items(): for word in counts_for_hist.word_to_count.keys(): ngram = (hist + (word,)) prob = counts_for_hist.word_to_f[word] bow = counts_for_hist.word_to_bow[word] if (prob == 0): prob = 1e-99 line = '{0}\t{1}'.format(('%.7f' % math.log10(prob)), ' '.join(ngram)) if (bow is not None): line += '\t{0}'.format(('%.7f' % math.log10(bow))) print(line, file=fout) print('', file=fout) print('\\end\\', file=fout)
class InvalidHeader(InvalidHandshake): def __init__(self, name: str, value: Optional[str]=None) -> None: self.name = name self.value = value def __str__(self) -> str: if (self.value is None): return f'missing {self.name} header' elif (self.value == ''): return f'empty {self.name} header' else: return f'invalid {self.name} header: {self.value}'
class TestChangeActivePointerGrab(EndianTest): def setUp(self): self.req_args_0 = {'cursor': , 'event_mask': 36287, 'time': } self.req_bin_0 = b'\x1e\x00\x04\x00\x8f\r\xd7<fV3y\xbf\x8d\x00\x00' def testPackRequest0(self): bin = request.ChangeActivePointerGrab._request.to_binary(*(), **self.req_args_0) self.assertBinaryEqual(bin, self.req_bin_0) def testUnpackRequest0(self): (args, remain) = request.ChangeActivePointerGrab._request.parse_binary(self.req_bin_0, dummy_display, 1) self.assertBinaryEmpty(remain) self.assertEqual(args, self.req_args_0)
def train_ram_plus(model, data_loader, optimizer, epoch, device, config, model_clip): model.train() metric_logger = utils.MetricLogger(delimiter=' ') metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}')) metric_logger.add_meter('loss_tag', utils.SmoothedValue(window_size=50, fmt='{value:.4f}')) metric_logger.add_meter('loss_dis', utils.SmoothedValue(window_size=50, fmt='{value:.4f}')) metric_logger.add_meter('loss_alignment', utils.SmoothedValue(window_size=50, fmt='{value:.4f}')) header = 'Train Epoch: [{}]'.format(epoch) print_freq = 50 data_loader.sampler.set_epoch(epoch) for (i, (image, caption, image_tag, parse_tag)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): if (epoch == 0): warmup_lr_schedule(optimizer, i, config['warmup_steps'], config['warmup_lr'], config['init_lr']) optimizer.zero_grad() batch_text_embed = build_text_embed(model_clip, caption) image = image.to(device, non_blocking=True) clip_image_feature = model_clip.encode_image(image) (loss_tag, loss_dis, loss_alignment) = model(image, caption, image_tag, clip_image_feature, batch_text_embed) loss = ((loss_tag + loss_dis) + loss_alignment) loss.backward() optimizer.step() metric_logger.update(loss_tag=loss_tag.item()) metric_logger.update(loss_dis=loss_dis.item()) metric_logger.update(loss_alignment=loss_alignment.item()) metric_logger.update(lr=optimizer.param_groups[0]['lr']) metric_logger.synchronize_between_processes() print('Averaged stats:', metric_logger.global_avg()) return {k: '{:.3f}'.format(meter.global_avg) for (k, meter) in metric_logger.meters.items()}
_model_architecture('masked_lm', 'bert_large') def bert_large_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_layers = getattr(args, 'encoder_layers', 24) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) bert_base_architecture(args)
class TestCounter(TestCase): def test_dump_counter(self): c = Counter('A counter is something that counts!') dumped = jsons.dump(c) expected = {'A': 1, ' ': 5, 'c': 2, 'o': 3, 'u': 2, 'n': 3, 't': 5, 'e': 2, 'r': 1, 'i': 2, 's': 3, 'm': 1, 'h': 2, 'g': 1, 'a': 1, '!': 1} self.assertDictEqual(expected, dumped) def test_load_counter(self): d = {'A': 1, ' ': 5, 'c': 2, 'o': 3, 'u': 2, 'n': 3, 't': 5, 'e': 2, 'r': 1, 'i': 2, 's': 3, 'm': 1, 'h': 2, 'g': 1, 'a': 1, '!': 1} loaded = jsons.load(d, Counter) self.assertEqual(Counter(d), Counter(loaded))
def test_main_reads_config_values(mirror_mock: mock.MagicMock, tmpdir: Path) -> None: base_config_path = (Path(bandersnatch.__file__).parent / 'unittest.conf') diff_file = (Path(tempfile.gettempdir()) / 'srv/pypi/mirrored-files') config_lines = [(f'''diff-file = {diff_file.as_posix()} ''' if line.startswith('diff-file') else line) for line in base_config_path.read_text().splitlines()] config_path = (tmpdir / 'unittest.conf') config_path.write_text('\n'.join(config_lines), encoding='utf-8') sys.argv = ['bandersnatch', '-c', str(config_path), 'mirror'] assert config_path.exists() main(asyncio.new_event_loop()) ((homedir, master), kwargs) = mirror_mock.call_args_list[0] assert (Path('/srv/pypi') == homedir) assert isinstance(master, bandersnatch.master.Master) assert ({'stop_on_error': False, 'hash_index': False, 'workers': 3, 'root_uri': '', 'json_save': False, 'digest_name': 'sha256', 'keep_index_versions': 0, 'release_files_save': True, 'storage_backend': 'filesystem', 'diff_file': diff_file, 'diff_append_epoch': False, 'diff_full_path': diff_file, 'cleanup': False, 'compare_method': 'hash', 'download_mirror': '', 'download_mirror_no_fallback': False, 'simple_format': SimpleFormat.ALL} == kwargs)
class TPadding(TestCase): def setUp(self): self.b = Padding((b'\x00' * 100)) def test_padding(self): self.failUnlessEqual(self.b.write(), (b'\x00' * 100)) def test_blank(self): self.failIf(Padding().write()) def test_empty(self): self.failIf(Padding(b'').write()) def test_repr(self): repr(Padding()) def test_change(self): self.b.length = 20 self.failUnlessEqual(self.b.write(), (b'\x00' * 20))
class SponsorshipsBenefitsFormTests(TestCase): def setUp(self): self.current_year = SponsorshipCurrentYear.get_year() self.psf = baker.make('sponsors.SponsorshipProgram', name='PSF') self.wk = baker.make('sponsors.SponsorshipProgram', name='Working Group') self.program_1_benefits = baker.make(SponsorshipBenefit, program=self.psf, _quantity=3, year=self.current_year) self.program_2_benefits = baker.make(SponsorshipBenefit, program=self.wk, _quantity=5, year=self.current_year) self.package = baker.make('sponsors.SponsorshipPackage', advertise=True, year=self.current_year) self.package.benefits.add(*self.program_1_benefits) self.package.benefits.add(*self.program_2_benefits) self.a_la_carte = baker.make(SponsorshipBenefit, program=self.psf, _quantity=2, year=self.current_year) self.standalone = baker.make(SponsorshipBenefit, program=self.psf, standalone=True, _quantity=2, year=self.current_year) def test_specific_field_to_select_a_la_carte_by_year(self): prev_year = (self.current_year - 1) from_prev_year = baker.make(SponsorshipBenefit, program=self.psf, _quantity=2, year=prev_year) form = SponsorshipsBenefitsForm() choices = list(form.fields['a_la_carte_benefits'].choices) self.assertEqual(len(self.a_la_carte), len(choices)) for benefit in self.a_la_carte: self.assertIn(benefit.id, [c[0] for c in choices]) form = SponsorshipsBenefitsForm(year=prev_year) choices = list(form.fields['a_la_carte_benefits'].choices) self.assertEqual(len(self.a_la_carte), len(choices)) for benefit in from_prev_year: self.assertIn(benefit.id, [c[0] for c in choices]) def test_benefits_from_current_year_organized_by_program(self): older_psf = baker.make(SponsorshipBenefit, program=self.psf, _quantity=3, year=(self.current_year - 1)) older_wk = baker.make(SponsorshipBenefit, program=self.wk, _quantity=5, year=(self.current_year - 1)) self.package.benefits.add(*older_psf) self.package.benefits.add(*older_wk) form = SponsorshipsBenefitsForm() (field1, field2) = sorted(form.benefits_programs, key=(lambda f: f.name)) self.assertEqual('benefits_psf', field1.name) self.assertEqual('PSF Benefits', field1.label) choices = list(field1.field.choices) self.assertEqual(len(self.program_1_benefits), len(choices)) for benefit in self.program_1_benefits: self.assertIn(benefit.id, [c[0] for c in choices]) self.assertEqual('benefits_working_group', field2.name) self.assertEqual('Working Group Benefits', field2.label) choices = list(field2.field.choices) self.assertEqual(len(self.program_2_benefits), len(choices)) for benefit in self.program_2_benefits: self.assertIn(benefit.id, [c[0] for c in choices]) def test_specific_field_to_select_standalone_benefits_by_year(self): prev_year = (self.current_year - 1) prev_benefits = baker.make(SponsorshipBenefit, program=self.psf, standalone=True, _quantity=2, year=prev_year) form = SponsorshipsBenefitsForm() choices = list(form.fields['standalone_benefits'].choices) self.assertEqual(len(self.standalone), len(choices)) for benefit in self.standalone: self.assertIn(benefit.id, [c[0] for c in choices]) form = SponsorshipsBenefitsForm(year=prev_year) choices = list(form.fields['standalone_benefits'].choices) self.assertEqual(len(self.standalone), len(choices)) for benefit in prev_benefits: self.assertIn(benefit.id, [c[0] for c in choices]) def test_package_list_only_advertisable_ones_from_current_year(self): ads_pkgs = baker.make('SponsorshipPackage', advertise=True, _quantity=2, year=self.current_year) baker.make('SponsorshipPackage', advertise=False) baker.make('SponsorshipPackage', advertise=False, year=self.current_year) form = SponsorshipsBenefitsForm() field = form.fields.get('package') self.assertEqual(3, field.queryset.count()) def test_invalidate_form_without_benefits(self): form = SponsorshipsBenefitsForm(data={}) self.assertFalse(form.is_valid()) self.assertIn('__all__', form.errors) form = SponsorshipsBenefitsForm(data={'benefits_psf': [self.program_1_benefits[0].id], 'package': self.package.id}) self.assertTrue(form.is_valid()) def test_validate_form_without_package_but_with_standalone_benefits(self): benefit = self.standalone[0] form = SponsorshipsBenefitsForm(data={'standalone_benefits': [benefit.id]}) self.assertTrue(form.is_valid()) self.assertEqual([], form.get_benefits()) self.assertEqual([benefit], form.get_benefits(include_standalone=True)) def test_do_not_validate_form_with_package_and_standalone_benefits(self): benefit = self.standalone[0] data = {'standalone_benefits': [benefit.id], 'package': self.package.id, 'benefits_psf': [self.program_1_benefits[0].id]} form = SponsorshipsBenefitsForm(data=data) self.assertFalse(form.is_valid()) self.assertIn('Application with package cannot have standalone benefits.', form.errors['__all__']) def test_should_not_validate_form_without_package_with_a_la_carte_benefits(self): data = {'a_la_carte_benefits': [self.a_la_carte[0].id]} form = SponsorshipsBenefitsForm(data=data) self.assertFalse(form.is_valid()) self.assertIn('You must pick a package to include the selected benefits.', form.errors['__all__']) data.update({'package': self.package.id}) form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid()) def test_do_not_validate_package_package_with_disabled_a_la_carte_benefits(self): self.package.allow_a_la_carte = False self.package.save() data = {'package': self.package.id, 'benefits_psf': [self.program_1_benefits[0].id], 'a_la_carte_benefits': [self.a_la_carte[0].id]} form = SponsorshipsBenefitsForm(data=data) self.assertFalse(form.is_valid()) self.assertIn('Package does not accept a la carte benefits.', form.errors['__all__']) data.pop('a_la_carte_benefits') form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid(), form.errors) def test_benefits_conflicts_helper_property(self): (benefit_1, benefit_2) = baker.make('sponsors.SponsorshipBenefit', _quantity=2) benefit_1.conflicts.add(*self.program_1_benefits) benefit_2.conflicts.add(*self.program_2_benefits) form = SponsorshipsBenefitsForm() map = form.benefits_conflicts self.assertEqual(((2 + len(self.program_1_benefits)) + len(self.program_2_benefits)), len(map)) self.assertEqual(sorted(map[benefit_1.id]), sorted((b.id for b in self.program_1_benefits))) self.assertEqual(sorted(map[benefit_2.id]), sorted((b.id for b in self.program_2_benefits))) for b in self.program_1_benefits: self.assertEqual(map[b.id], [benefit_1.id]) for b in self.program_2_benefits: self.assertEqual(map[b.id], [benefit_2.id]) def test_invalid_form_if_any_conflict(self): benefit_1 = baker.make('sponsors.SponsorshipBenefit', program=self.wk, year=self.current_year) benefit_1.conflicts.add(*self.program_1_benefits) self.package.benefits.add(benefit_1) data = {'benefits_psf': [b.id for b in self.program_1_benefits], 'package': self.package.id} form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid()) data['benefits_working_group'] = [benefit_1.id] form = SponsorshipsBenefitsForm(data=data) self.assertFalse(form.is_valid()) self.assertIn('The application has 1 or more benefits that conflicts.', form.errors['__all__']) def test_get_benefits_from_cleaned_data(self): benefit = self.program_1_benefits[0] data = {'benefits_psf': [benefit.id], 'a_la_carte_benefits': [b.id for b in self.a_la_carte], 'package': self.package.id} form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid()) benefits = form.get_benefits() self.assertEqual(1, len(benefits)) self.assertIn(benefit, benefits) benefits = form.get_benefits(include_a_la_carte=True) self.assertEqual(3, len(benefits)) self.assertIn(benefit, benefits) for a_la_carte in self.a_la_carte: self.assertIn(a_la_carte, benefits) def test_package_only_benefit_without_package_should_not_validate(self): SponsorshipBenefit.objects.all().update(package_only=True) data = {'benefits_psf': [self.program_1_benefits[0]]} form = SponsorshipsBenefitsForm(data=data) self.assertFalse(form.is_valid()) self.assertIn('You must pick a package to include the selected benefits.', form.errors['__all__']) def test_package_only_benefit_with_wrong_package_should_not_validate(self): SponsorshipBenefit.objects.all().update(package_only=True) package = baker.make('sponsors.SponsorshipPackage', advertise=True, year=self.current_year) package.benefits.add(*SponsorshipBenefit.objects.all()) data = {'benefits_psf': [self.program_1_benefits[0]], 'package': baker.make('sponsors.SponsorshipPackage', advertise=True, year=self.current_year).id} form = SponsorshipsBenefitsForm(data=data) self.assertFalse(form.is_valid()) self.assertIn('The application has 1 or more package only benefits but wrong sponsor package.', form.errors['__all__'][0]) data = {'benefits_psf': [self.program_1_benefits[0]], 'package': package.id} form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid()) def test_benefit_with_no_capacity_should_not_validate(self): SponsorshipBenefit.objects.all().update(capacity=0) data = {'benefits_psf': [self.program_1_benefits[0]], 'package': self.package.id} form = SponsorshipsBenefitsForm(data=data) self.assertFalse(form.is_valid()) self.assertIn('The application has 1 or more benefits with no capacity.', form.errors['__all__']) def test_benefit_with_soft_capacity_should_validate(self): SponsorshipBenefit.objects.all().update(capacity=0, soft_capacity=True) data = {'benefits_psf': [self.program_1_benefits[0]], 'package': self.package.id} form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid()) def test_get_package_return_selected_package(self): data = {'benefits_psf': [self.program_1_benefits[0]], 'package': self.package.id} form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid()) self.assertEqual(self.package, form.get_package()) def test_get_package_get_or_create_standalone_only_package(self): data = {'standalone_benefits': [self.standalone[0].id]} form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid()) self.assertEqual(1, SponsorshipPackage.objects.count()) package = form.get_package() self.assertEqual('Standalone Only', package.name) self.assertEqual('standalone-only', package.slug) self.assertEqual(175, package.logo_dimension) self.assertEqual(0, package.sponsorship_amount) self.assertFalse(package.advertise) self.assertEqual(2, SponsorshipPackage.objects.count()) data = {'standalone_benefits': [self.standalone[0].id]} form = SponsorshipsBenefitsForm(data=data) self.assertTrue(form.is_valid()) self.assertEqual(package, form.get_package()) self.assertEqual(2, SponsorshipPackage.objects.count())
class Mlp(nn.Module): def __init__(self, dim, mlp_ratio=4, out_features=None, act_layer=StarReLU, drop=0.0, bias=False, **kwargs): super().__init__() in_features = dim out_features = (out_features or in_features) hidden_features = int((mlp_ratio * in_features)) drop_probs = to_2tuple(drop) self.fc1 = nn.Linear(in_features, hidden_features, bias=bias) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.fc2 = nn.Linear(hidden_features, out_features, bias=bias) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x
class ModuleHelper(object): def BNReLU(num_features, norm_type=None, **kwargs): if (norm_type == 'batchnorm'): return nn.Sequential(nn.BatchNorm2d(num_features, **kwargs), nn.ReLU()) elif (norm_type == 'encsync_batchnorm'): from encoding.nn import BatchNorm2d return nn.Sequential(BatchNorm2d(num_features, **kwargs), nn.ReLU()) elif (norm_type == 'instancenorm'): return nn.Sequential(nn.InstanceNorm2d(num_features, **kwargs), nn.ReLU()) elif (norm_type == 'fixed_batchnorm'): return nn.Sequential(FixedBatchNorm(num_features, **kwargs), nn.ReLU()) else: raise ValueError('Not support BN type: {}.'.format(norm_type)) def BatchNorm3d(norm_type=None, ret_cls=False): if (norm_type == 'batchnorm'): return nn.BatchNorm3d elif (norm_type == 'encsync_batchnorm'): from encoding.nn import BatchNorm3d return BatchNorm3d elif (norm_type == 'instancenorm'): return nn.InstanceNorm3d else: raise ValueError('Not support BN type: {}.'.format(norm_type)) def BatchNorm2d(norm_type=None, ret_cls=False): if (norm_type == 'batchnorm'): return nn.BatchNorm2d elif (norm_type == 'encsync_batchnorm'): from encoding.nn import BatchNorm2d return BatchNorm2d elif (norm_type == 'instancenorm'): return nn.InstanceNorm2d else: raise ValueError('Not support BN type: {}.'.format(norm_type)) def BatchNorm1d(norm_type=None, ret_cls=False): if (norm_type == 'batchnorm'): return nn.BatchNorm1d elif (norm_type == 'encsync_batchnorm'): from encoding.nn import BatchNorm1d return BatchNorm1d elif (norm_type == 'instancenorm'): return nn.InstanceNorm1d else: raise ValueError('Not support BN type: {}.'.format(norm_type)) def load_model(model, pretrained=None, all_match=True, map_location='cpu'): if (pretrained is None): return model if (not os.path.exists(pretrained)): print('{} not exists.'.format(pretrained)) return model print('Loading pretrained model:{}'.format(pretrained)) if all_match: pretrained_dict = torch.load(pretrained, map_location=map_location) model_dict = model.state_dict() load_dict = dict() for (k, v) in pretrained_dict.items(): if ('prefix.{}'.format(k) in model_dict): load_dict['prefix.{}'.format(k)] = v else: load_dict[k] = v model.load_state_dict(load_dict) else: pretrained_dict = torch.load(pretrained) model_dict = model.state_dict() load_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)} print('Matched Keys: {}'.format(load_dict.keys())) model_dict.update(load_dict) model.load_state_dict(model_dict) return model def load_url(url, map_location=None): model_dir = os.path.join('~', '.TorchCV', 'model') if (not os.path.exists(model_dir)): os.makedirs(model_dir) filename = url.split('/')[(- 1)] cached_file = os.path.join(model_dir, filename) if (not os.path.exists(cached_file)): print('Downloading: "{}" to {}\n'.format(url, cached_file)) urlretrieve(url, cached_file) print('Loading pretrained model:{}'.format(cached_file)) return torch.load(cached_file, map_location=map_location) def constant_init(module, val, bias=0): nn.init.constant_(module.weight, val) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias) def xavier_init(module, gain=1, bias=0, distribution='normal'): assert (distribution in ['uniform', 'normal']) if (distribution == 'uniform'): nn.init.xavier_uniform_(module.weight, gain=gain) else: nn.init.xavier_normal_(module.weight, gain=gain) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias) def normal_init(module, mean=0, std=1, bias=0): nn.init.normal_(module.weight, mean, std) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias) def uniform_init(module, a=0, b=1, bias=0): nn.init.uniform_(module.weight, a, b) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias) def kaiming_init(module, mode='fan_in', nonlinearity='leaky_relu', bias=0, distribution='normal'): assert (distribution in ['uniform', 'normal']) if (distribution == 'uniform'): nn.init.kaiming_uniform_(module.weight, mode=mode, nonlinearity=nonlinearity) else: nn.init.kaiming_normal_(module.weight, mode=mode, nonlinearity=nonlinearity) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)