code stringlengths 281 23.7M |
|---|
class DoPredictDuringTraining(TrainerCallback):
def __init__(self, test_dataset, processor):
super(DoPredictDuringTraining, self).__init__()
self.test_dataset = test_dataset.remove_columns('label')
self.processor = processor
self.best_score = None
def on_evaluate(self, args: TrainingArguments, state: TrainerState, control: TrainerControl, metrics, **kwargs):
if args.metric_for_best_model:
metric_to_check = args.metric_for_best_model
if (not metric_to_check.startswith('eval_')):
metric_to_check = f'eval_{metric_to_check}'
operator = (np.greater if args.greater_is_better else np.less)
if ((not self.best_score) or operator(metrics[metric_to_check], self.best_score)):
self.best_score = metrics[metric_to_check]
self.do_predict(args.output_dir)
def do_predict(self, output_dir):
logits = self.trainer.predict(self.test_dataset, metric_key_prefix='predict').predictions
if hasattr(self.processor, 'save_result'):
if self.trainer.is_world_process_zero():
self.processor.save_result(logits)
else:
predictions = np.argmax(logits, axis=1)
output_predict_file = os.path.join(output_dir, f'predict_results.txt')
if self.trainer.is_world_process_zero():
with open(output_predict_file, 'w') as writer:
writer.write('index\tprediction\n')
for (index, item) in enumerate(predictions):
item = self.processor.labels[item]
writer.write(f'''{index} {item}
''') |
def run(settings):
settings.description = 'Default train settings for DiMP with ResNet50 as backbone.'
settings.batch_size = 10
settings.num_workers = 8
settings.multi_gpu = False
settings.print_interval = 1
settings.normalize_mean = [0.485, 0.456, 0.406]
settings.normalize_std = [0.229, 0.224, 0.225]
settings.search_area_factor = 5.0
settings.output_sigma_factor = (1 / 4)
settings.target_filter_sz = 4
settings.feature_sz = 18
settings.output_sz = (settings.feature_sz * 16)
settings.center_jitter_factor = {'train': 3, 'test': 4.5}
settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}
settings.hinge_threshold = 0.05
merge = 'sum'
input_dtype = 'rgb3d'
coco_train = MSCOCOSeq_depth(settings.env.cocodepth_dir, dtype=input_dtype)
lasot_depth_train = Lasot_depth(root=settings.env.lasotdepth_dir, dtype=input_dtype)
depthtrack_train = DepthTrack(root=settings.env.depthtrack_dir, split='train', dtype=input_dtype)
depthtrack_val = DepthTrack(root=settings.env.depthtrack_dir, split='val', dtype=input_dtype)
transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))
transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))
output_sigma = (settings.output_sigma_factor / settings.search_area_factor)
proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}
label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}
data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_train, joint_transform=transform_joint)
data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor, output_sz=settings.output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', proposal_params=proposal_params, label_function_params=label_params, transform=transform_val, joint_transform=transform_joint)
dataset_train = sampler.DiMPSampler([coco_train, lasot_depth_train, depthtrack_train], [1, 1, 1], samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_train)
loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=True, drop_last=True, stack_dim=1)
dataset_val = sampler.DiMPSampler([depthtrack_val], [1], samples_per_epoch=5000, max_gap=30, num_test_frames=3, num_train_frames=3, processing=data_processing_val)
loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers, shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)
net = dimpnet.dimp50_RGBD_Fusion(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5, clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512, optim_init_step=0.9, optim_init_reg=0.1, init_gauss_sigma=(output_sigma * settings.feature_sz), num_dist_bins=100, bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu', merge=merge)
if settings.multi_gpu:
net = MultiGPU(net, dim=1)
objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)}
loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400}
actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight)
optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-05}, {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 0.0005}, {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-05}, {'params': actor.net.bb_regressor.parameters()}, {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-05}], lr=0.0002)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)
trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)
trainer.train(100, load_latest=True, fail_safe=True) |
def test_custom_ellipsoid():
ce = CustomEllipsoid(semi_major_axis=6378137, inverse_flattening=298.)
assert (ce.name == 'undefined')
assert (ce.semi_major_metre == 6378137)
assert (ce.semi_minor_metre == 6356752.)
assert_almost_equal(ce.inverse_flattening, 298.)
assert (sorted(ce.to_json_dict()) == ['$schema', 'inverse_flattening', 'name', 'semi_major_axis', 'type']) |
class DnCNN(nn.Module):
def __init__(self, channels, num_of_layers=17):
super(DnCNN, self).__init__()
self.num_of_layers = num_of_layers
kernel_size = 3
padding = 1
features = 64
self.layers = nn.ModuleList()
self.layers.append(nn.Conv2d(in_channels=channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
self.layers.append(nn.ReLU(inplace=True))
for _ in range((num_of_layers - 2)):
self.layers.append(nn.Sequential(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False), nn.BatchNorm2d(features), nn.ReLU(inplace=True)))
self.layers.append(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False))
def forward(self, x):
output = list()
saved_layers = list(range(0, (self.num_of_layers - 1), 3))[1:]
for (i, l) in enumerate(self.layers):
x = l(x)
if (i in saved_layers):
output.append(x)
output.append(x)
return output |
class NewLispLexer(RegexLexer):
name = 'NewLisp'
url = '
aliases = ['newlisp']
filenames = ['*.lsp', '*.nl', '*.kif']
mimetypes = ['text/x-newlisp', 'application/x-newlisp']
version_added = '1.5'
flags = (re.IGNORECASE | re.MULTILINE)
builtins = ('^', '--', '-', ':', '!', '!=', '?', '', '*', '/', '&', '%', '+', '++', '<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10', '$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7', '$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs', 'acos', 'acosh', 'add', 'address', 'amb', 'and', 'append-file', 'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin', 'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec', 'base64-enc', 'bayes-query', 'bayes-train', 'begin', 'beta', 'betai', 'bind', 'binomial', 'bits', 'callback', 'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean', 'close', 'command-event', 'cond', 'cons', 'constant', 'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count', 'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry', 'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec', 'def-new', 'default', 'define-macro', 'define', 'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device', 'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while', 'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup', 'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event', 'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand', 'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter', 'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt', 'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln', 'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string', 'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc', 'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert', 'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error', 'last', 'legal?', 'length', 'let', 'letex', 'letn', 'list?', 'list', 'load', 'local', 'log', 'lookup', 'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat', 'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply', 'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error', 'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local', 'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping', 'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select', 'net-send-to', 'net-send-udp', 'net-send', 'net-service', 'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper', 'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack', 'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop', 'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print', 'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event', 'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand', 'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file', 'read-key', 'read-line', 'read-utf8', 'reader-event', 'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex', 'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse', 'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self', 'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all', 'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent', 'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt', 'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?', 'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term', 'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case', 'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?', 'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until', 'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while', 'write', 'write-char', 'write-file', 'write-line', 'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?')
valid_name = '([\\w!$%&*+.,/<=>?^~|-])+|(\\[.*?\\])+'
tokens = {'root': [('#!(.*?)$', Comment.Preproc), (';.*$', Comment.Single), ('#.*$', Comment.Single), ('\\s+', Whitespace), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String), ('\\{', String, 'bracestring'), ('\\[text\\]*', String, 'tagstring'), ("('|:)", Operator), (words(builtins, suffix='\\b'), Keyword), (('(?<=\\()' + valid_name), Name.Variable), (valid_name, String.Symbol), ('(\\(|\\))', Punctuation)], 'bracestring': [('\\{', String, '#push'), ('\\}', String, '#pop'), ('[^{}]+', String)], 'tagstring': [('(?s)(.*?)(\\[/text\\])', String, '#pop')]} |
class TestRealWorldLocate():
def setup_method(self) -> None:
self.dirpath = os.path.join(os.path.dirname(__file__), './data/')
network_distance = pandas.read_csv((self.dirpath + 'SF_network_distance_candidateStore_16_censusTract_205_new.csv'))
ntw_dist_piv = network_distance.pivot_table(values='distance', index='DestinationName', columns='name')
self.cost_matrix = ntw_dist_piv.to_numpy()
facility_points = pandas.read_csv((self.dirpath + 'SF_store_site_16_longlat.csv'))
self.facility_points_gdf = geopandas.GeoDataFrame(facility_points, geometry=geopandas.points_from_xy(facility_points.long, facility_points.lat)).sort_values(by=['NAME']).reset_index()
self.service_dist = 5000.0
self.p_facility = 4
def test_optimality_p_dispersion_from_cost_matrix(self):
pdispersion = PDispersion.from_cost_matrix(self.cost_matrix, self.p_facility)
pdispersion = pdispersion.solve(pulp.PULP_CBC_CMD(msg=False))
assert (pdispersion.problem.status == pulp.LpStatusOptimal)
known_solution_set = ['y_0_', 'y_1_', 'y_14_', 'y_15_']
observed_solution_set = [dv.name for dv in pdispersion.fac_vars if (dv.varValue == 1)]
assert (known_solution_set == observed_solution_set)
def test_infeasibility_p_dispersion_from_cost_matrix(self):
pdispersion = PDispersion.from_cost_matrix(self.cost_matrix, 17)
with pytest.raises(RuntimeError, match='Model is not solved:'):
pdispersion.solve(pulp.PULP_CBC_CMD(msg=False))
def test_optimality_p_dispersion_from_geodataframe(self):
pdispersion = PDispersion.from_geodataframe(self.facility_points_gdf, 'geometry', self.p_facility)
pdispersion = pdispersion.solve(pulp.PULP_CBC_CMD(msg=False))
assert (pdispersion.problem.status == pulp.LpStatusOptimal)
known_solution_set = ['y_0_', 'y_2_', 'y_8_', 'y_14_']
observed_solution_set = [dv.name for dv in pdispersion.fac_vars if (dv.varValue == 1)]
assert (known_solution_set == observed_solution_set)
def test_infeasibility_p_dispersion_from_geodataframe(self):
pdispersion = PDispersion.from_geodataframe(self.facility_points_gdf, 'geometry', 17)
with pytest.raises(RuntimeError, match='Model is not solved:'):
pdispersion.solve(pulp.PULP_CBC_CMD(msg=False)) |
def cross_layer_equalization_manual():
model = models.resnet18(pretrained=True)
model = model.eval()
layer_list = [(model.conv1, model.bn1), (model.layer1[0].conv1, model.layer1[0].bn1)]
bn_dict = {}
for conv_bn in layer_list:
bn_dict[conv_bn[0]] = conv_bn[1]
batch_norm_fold.fold_given_batch_norms(model, layer_list)
utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6, torch.nn.ReLU)
consecutive_layer_list = [(model.conv1, model.layer1[0].conv1), (model.layer1[0].conv1, model.layer1[0].conv2)]
scaling_factor_list = cross_layer_equalization.CrossLayerScaling.scale_cls_sets(consecutive_layer_list)
ClsSetInfo = cross_layer_equalization.ClsSetInfo
ClsPairInfo = cross_layer_equalization.ClsSetInfo.ClsSetLayerPairInfo
cls_set_info_list = [ClsSetInfo(ClsPairInfo(model.conv1, model.layer1[0].conv1, scaling_factor_list[0], True)), ClsSetInfo(ClsPairInfo(model.layer1[0].conv1, model.layer1[0].conv2, scaling_factor_list[1], True))]
cross_layer_equalization.HighBiasFold.bias_fold(cls_set_info_list, bn_dict) |
def sql_log(db_config, db_login_user, db_sql_content, db_sql_res, db_sql_res_thead=''):
try:
log = DBLog.objects.create(db_config=db_config, db_login_user=db_login_user, db_sql_content=db_sql_content, db_sql_res=db_sql_res, db_sql_res_thead=db_sql_res_thead)
return log.id
except Exception as e:
logging.getLogger().error('SQL:{}'.format(e)) |
(frozen=True)
class FunctionInfo():
async_kind: AsyncFunctionKind
is_classmethod: bool
is_staticmethod: bool
is_decorated_coroutine: bool
is_overload: bool
is_override: bool
is_evaluated: bool
is_abstractmethod: bool
decorators: List[Tuple[(Value, Value, ast.AST)]]
node: FunctionNode
params: Sequence[ParamInfo]
return_annotation: Optional[Value]
potential_function: Optional[object]
type_params: Sequence[TypeVarValue]
def get_generator_yield_type(self, ctx: CanAssignContext) -> Value:
if (self.return_annotation is None):
return AnyValue(AnySource.unannotated)
if isinstance(self.node, ast.AsyncFunctionDef):
iterable_val = is_async_iterable(self.return_annotation, ctx)
if isinstance(iterable_val, CanAssignError):
return AnyValue(AnySource.error)
return iterable_val
else:
iterable_val = is_iterable(self.return_annotation, ctx)
if isinstance(iterable_val, CanAssignError):
return AnyValue(AnySource.error)
return iterable_val
def get_generator_send_type(self, ctx: CanAssignContext) -> Value:
if (self.return_annotation is None):
return AnyValue(AnySource.unannotated)
if isinstance(self.node, ast.AsyncFunctionDef):
tv_map = get_tv_map(AsyncGeneratorValue, self.return_annotation, ctx)
if (not isinstance(tv_map, CanAssignError)):
return tv_map.get(SendT, AnyValue(AnySource.generic_argument))
iterable_val = is_async_iterable(self.return_annotation, ctx)
if isinstance(iterable_val, CanAssignError):
return AnyValue(AnySource.error)
return KnownValue(None)
else:
tv_map = get_tv_map(GeneratorValue, self.return_annotation, ctx)
if (not isinstance(tv_map, CanAssignError)):
return tv_map.get(SendT, AnyValue(AnySource.generic_argument))
iterable_val = is_iterable(self.return_annotation, ctx)
if isinstance(iterable_val, CanAssignError):
return AnyValue(AnySource.error)
return KnownValue(None)
def get_generator_return_type(self, ctx: CanAssignContext) -> Value:
if (self.return_annotation is None):
return AnyValue(AnySource.unannotated)
tv_map = get_tv_map(GeneratorValue, self.return_annotation, ctx)
if (not isinstance(tv_map, CanAssignError)):
return tv_map.get(ReturnT, AnyValue(AnySource.generic_argument))
iterable_val = is_iterable(self.return_annotation, ctx)
if isinstance(iterable_val, CanAssignError):
return AnyValue(AnySource.error)
return KnownValue(None) |
.parametrize('dims, args', [(2, {}), (3, {}), (2, {'how': 'pairs'}), (2, {'how': 'pairs_skewed'}), (2, {'how': 'before_after'}), (2, {'legend_iteration': 'all'}), (2, {'legend_iteration': 'grid_iteration'}), (2, {'legend_iteration': 1, 'how': 'before_after'}), (2, {'legend_iteration': 1, 'how': 'pairs'})])
def test_plot_qubism(dims, args):
if (dims == 2):
state = qutip.ket('01')
else:
state = qutip.ket('010')
(fig, ax) = qutip.plot_qubism(state, **args)
plt.close()
assert isinstance(fig, mpl.figure.Figure)
assert isinstance(ax, mpl.axes.Axes) |
class TestConfigVersioning(unittest.TestCase):
def test_upgrade_downgrade_consistency(self):
cfg = get_cfg()
cfg.USER_CUSTOM = 1
down = downgrade_config(cfg, to_version=0)
up = upgrade_config(down)
self.assertTrue((up == cfg))
def _merge_cfg_str(self, cfg, merge_str):
f = tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False)
try:
f.write(merge_str)
f.close()
cfg.merge_from_file(f.name)
finally:
os.remove(f.name)
return cfg
def test_auto_upgrade(self):
cfg = get_cfg()
latest_ver = cfg.VERSION
cfg.USER_CUSTOM = 1
self._merge_cfg_str(cfg, _V0_CFG)
self.assertEqual(cfg.MODEL.RPN.HEAD_NAME, 'TEST')
self.assertEqual(cfg.VERSION, latest_ver)
def test_guess_v1(self):
cfg = get_cfg()
latest_ver = cfg.VERSION
self._merge_cfg_str(cfg, _V1_CFG)
self.assertEqual(cfg.VERSION, latest_ver) |
class PageQuerySet(models.QuerySet):
def prefetch_elements(self):
return self.prefetch_related(*self.model.prefetch_lookups)
def filter_by_catalog(self, catalog):
ids = [descendant.id for descendant in catalog.descendants if isinstance(descendant, self.model)]
return self.filter(id__in=ids) |
.parametrize(('test_type', 'test_status', 'expected'), [(TYPE_INFO, STATUS_DRAFT, ':abbr:`I (Informational, Draft)`'), (TYPE_INFO, STATUS_ACTIVE, ':abbr:`IA (Informational, Active)`'), (TYPE_INFO, STATUS_ACCEPTED, ':abbr:`IA (Informational, Accepted)`'), (TYPE_INFO, STATUS_DEFERRED, ':abbr:`ID (Informational, Deferred)`'), (TYPE_PROCESS, STATUS_ACCEPTED, ':abbr:`PA (Process, Accepted)`'), (TYPE_PROCESS, STATUS_ACTIVE, ':abbr:`PA (Process, Active)`'), (TYPE_PROCESS, STATUS_FINAL, ':abbr:`PF (Process, Final)`'), (TYPE_PROCESS, STATUS_SUPERSEDED, ':abbr:`PS (Process, Superseded)`'), (TYPE_PROCESS, STATUS_WITHDRAWN, ':abbr:`PW (Process, Withdrawn)`'), (TYPE_STANDARDS, STATUS_ACCEPTED, ':abbr:`SA (Standards Track, Accepted)`'), (TYPE_STANDARDS, STATUS_REJECTED, ':abbr:`SR (Standards Track, Rejected)`'), (TYPE_STANDARDS, STATUS_PROVISIONAL, ':abbr:`SP (Standards Track, Provisional)`')])
def test_abbreviate_type_status(test_type, test_status, expected):
pep = parser.PEP((PEP_ROOT / 'pep-0008.rst'))
pep.pep_type = test_type
pep.status = test_status
assert (pep.shorthand == expected) |
class IdentityResidualBlock(nn.Module):
def __init__(self, in_channels, channels, stride=1, dilation=1, groups=1, norm_act=ABN, dropout=None):
super(IdentityResidualBlock, self).__init__()
if ((len(channels) != 2) and (len(channels) != 3)):
raise ValueError('channels must contain either two or three values')
if ((len(channels) == 2) and (groups != 1)):
raise ValueError('groups > 1 are only valid if len(channels) == 3')
is_bottleneck = (len(channels) == 3)
need_proj_conv = ((stride != 1) or (in_channels != channels[(- 1)]))
self.bn1 = norm_act(in_channels)
if (not is_bottleneck):
layers = [('conv1', nn.Conv2d(in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False, dilation=dilation)), ('bn2', norm_act(channels[0])), ('conv2', nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, dilation=dilation))]
if (dropout is not None):
layers = ((layers[0:2] + [('dropout', dropout())]) + layers[2:])
else:
layers = [('conv1', nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)), ('bn2', norm_act(channels[0])), ('conv2', nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, groups=groups, dilation=dilation)), ('bn3', norm_act(channels[1])), ('conv3', nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False))]
if (dropout is not None):
layers = ((layers[0:4] + [('dropout', dropout())]) + layers[4:])
self.convs = nn.Sequential(OrderedDict(layers))
if need_proj_conv:
self.proj_conv = nn.Conv2d(in_channels, channels[(- 1)], 1, stride=stride, padding=0, bias=False)
def forward(self, x):
if hasattr(self, 'proj_conv'):
bn1 = self.bn1(x)
shortcut = self.proj_conv(bn1)
else:
shortcut = x.clone()
bn1 = self.bn1(x)
out = self.convs(bn1)
out.add_(shortcut)
return out |
def make_json(clean_path, noisy_path, json_path):
clean_list = os.listdir(clean_path)
noisy_list = os.listdir(noisy_path)
clean_list.sort()
noisy_list.sort()
clean_list = get_info(clean_path, clean_list)
noisy_list = get_info(noisy_path, noisy_list)
if (not os.path.exists(json_path)):
os.makedirs(json_path)
with open(opj(json_path, 'clean.json'), 'w') as file:
json.dump(clean_list, file, indent=4)
with open(opj(json_path, 'noisy.json'), 'w') as file:
json.dump(noisy_list, file, indent=4)
print('---json is generated---') |
class VirtualFile():
_vfiles = {}
_counter = (- 1)
def readfromid(cls, id, length):
if (length is None):
return cls._vfiles[id].read()
else:
return cls._vfiles[id].read(length)
def writetoid(cls, id, buffer):
return cls._vfiles[id].write(buffer)
def closebyid(cls, id):
fp = cls._vfiles[id]
del cls._vfiles[id]
return fp.close()
def new(cls, fileobj):
cls._counter += 1
cls._vfiles[cls._counter] = fileobj
return cls._counter
def __init__(self, connection, id):
self.connection = connection
self.id = id
def read(self, length=None):
return self.connection.VirtualFile.readfromid(self.id, length)
def write(self, buf):
return self.connection.VirtualFile.writetoid(self.id, buf)
def close(self):
return self.connection.VirtualFile.closebyid(self.id) |
def call(command, args, payload=None, action='print', filter=None):
url = (args.url + command)
if payload:
if args.auth_key:
payload['auth_key'] = args.auth_key
else:
payload = {key: (getattr(args, key) if (key == 'email') else str(getattr(args, key))) for key in dir(args) if ((key not in ('host', 'port', 'func', 'url')) and (not key.startswith('_')) and (getattr(args, key) is not None) and (not ((key == 'email') and (getattr(args, key) == []))))}
response = requests.get(url, params=payload)
if (response.status_code != 200):
sys.stderr.write('{} {}\n'.format(response.status_code, response.reason))
try:
sys.exit(pprint.pformat(response.json()).strip())
except JSONError:
sys.exit(response.text)
if (action == 'print'):
try:
content = response.json()
if filter:
content = filter(content)
pprint.pprint(content)
except BrokenPipeError:
pass
elif (action == 'return'):
return response.json()
else:
raise Exception('Unrecognized action: {}'.format(action)) |
def extract_smis(library, smiles_col=0, title_line=True) -> List[str]:
if (Path(library).suffix == '.gz'):
open_ = partial(gzip.open, mode='rt')
else:
open_ = open
with open_(library) as fid:
reader = csv.reader(fid)
if title_line:
next(reader)
smis = []
for row in reader:
try:
smis.append(row[smiles_col])
except ValueError:
continue
return smis |
(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None)
(args=arglists(st.integers()), kwargs=map_reduce_kwargs_iterators(), _parallel=(st.booleans() | st.none()))
.filterwarnings('ignore:.*:pytest.PytestUnraisableExceptionWarning')
def test_map_reduce(ray_context, func, args, kwargs, _parallel):
(iterables1, iterables2) = args
def reduce_func(x, some_kwarg=None):
assert (some_kwarg is not None)
return max(x, default=None)
expected = reduce_func(map(func, *iterables1), some_kwarg=1)
actual = parallel.MapReduce(func, *iterables2, reduce_func=reduce_func, reduce_kwargs=dict(some_kwarg=1), **kwargs, parallel=_parallel).run()
assert (expected == actual) |
class BuildUsageExamplesTests(unittest.TestCase):
def setUpClass(cls):
cls.das = DummyArtifacts()
cls.tempdir = cls.das.tempdir
cls.pm = PluginManager()
def tearDownClass(cls):
cls.das.free()
('qiime2.core.archive.provenance_lib.replay.build_action_usage')
('qiime2.core.archive.provenance_lib.replay.build_import_usage')
('qiime2.core.archive.provenance_lib.replay.build_no_provenance_node_usage')
def test_build_usage_examples(self, n_p_builder, imp_builder, act_builder):
ns = NamespaceCollections()
dag = self.das.concated_ints_with_md.dag
cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm)
build_usage_examples(dag, cfg, ns)
n_p_builder.assert_not_called()
self.assertEqual(imp_builder.call_count, 3)
self.assertEqual(act_builder.call_count, 2)
('qiime2.core.archive.provenance_lib.replay.build_action_usage')
('qiime2.core.archive.provenance_lib.replay.build_import_usage')
('qiime2.core.archive.provenance_lib.replay.build_no_provenance_node_usage')
def test_build_usage_examples_lone_v0(self, n_p_builder, imp_builder, act_builder):
ns = NamespaceCollections()
uuid = self.das.table_v0.uuid
with self.assertWarnsRegex(UserWarning, f'(:?)Art.*{uuid}.*prior.*incomplete'):
dag = ProvDAG(self.das.table_v0.filepath)
cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm)
build_usage_examples(dag, cfg, ns)
n_p_builder.assert_called_once()
imp_builder.assert_not_called()
act_builder.assert_not_called()
('qiime2.core.archive.provenance_lib.replay.build_action_usage')
('qiime2.core.archive.provenance_lib.replay.build_import_usage')
('qiime2.core.archive.provenance_lib.replay.build_no_provenance_node_usage')
def test_build_usage_examples_mixed(self, n_p_builder, imp_builder, act_builder):
mixed_dir = os.path.join(self.tempdir, 'mixed-dir')
os.mkdir(mixed_dir)
shutil.copy(self.das.table_v0.filepath, mixed_dir)
shutil.copy(self.das.concated_ints_v6.filepath, mixed_dir)
ns = NamespaceCollections()
v0_uuid = self.das.table_v0.uuid
with self.assertWarnsRegex(UserWarning, f'(:?)Art.*{v0_uuid}.*prior.*incomplete'):
dag = ProvDAG(mixed_dir)
cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm)
build_usage_examples(dag, cfg, ns)
n_p_builder.assert_called_once()
self.assertEqual(imp_builder.call_count, 2)
act_builder.assert_called_once()
('qiime2.core.archive.provenance_lib.replay.build_action_usage')
('qiime2.core.archive.provenance_lib.replay.build_import_usage')
('qiime2.core.archive.provenance_lib.replay.build_no_provenance_node_usage')
def test_build_usage_examples_big(self, n_p_builder, imp_builder, act_builder):
many_dir = os.path.join(self.tempdir, 'many-dir')
os.mkdir(many_dir)
shutil.copy(self.das.concated_ints_with_md.filepath, many_dir)
shutil.copy(self.das.splitted_ints.filepath, many_dir)
shutil.copy(self.das.pipeline_viz.filepath, many_dir)
ns = NamespaceCollections()
dag = ProvDAG(many_dir)
cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm)
build_usage_examples(dag, cfg, ns)
n_p_builder.assert_not_called()
self.assertEqual(imp_builder.call_count, 6)
self.assertEqual(act_builder.call_count, 4) |
class RandomCrop(object):
def __init__(self, size, *args, **kwargs):
self.size = size
def __call__(self, im_lb):
im = im_lb['im']
lb = im_lb['lb']
assert (im.size == lb.size)
(W, H) = self.size
(w, h) = im.size
if ((W, H) == (w, h)):
return dict(im=im, lb=lb)
if ((w < W) or (h < H)):
scale = ((float(W) / w) if (w < h) else (float(H) / h))
(w, h) = (int(((scale * w) + 1)), int(((scale * h) + 1)))
im = im.resize((w, h), Image.BILINEAR)
lb = lb.resize((w, h), Image.NEAREST)
(sw, sh) = ((random.random() * (w - W)), (random.random() * (h - H)))
crop = (int(sw), int(sh), (int(sw) + W), (int(sh) + H))
return dict(im=im.crop(crop), lb=lb.crop(crop)) |
def fid_inception_v3():
inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False)
inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
inception.Mixed_7b = FIDInceptionE_1(1280)
inception.Mixed_7c = FIDInceptionE_2(2048)
state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=True)
inception.load_state_dict(state_dict)
return inception |
def test_a_decorated_singleton_is_created_as_close_to_the_root_where_dependencies_fulfilled():
class NonInjectableD():
def __init__(self, required) -> None:
self.required = required
class SingletonC():
def __init__(self, d: NonInjectableD):
self.d = d
parent_injector = Injector()
child_injector_1 = parent_injector.create_child_injector()
child_injector_2 = parent_injector.create_child_injector()
child_injector_2_1 = child_injector_2.create_child_injector()
provided_d = NonInjectableD(required=True)
child_injector_2.binder.bind(NonInjectableD, to=provided_d)
assert (child_injector_2_1.get(SingletonC) is child_injector_2.get(SingletonC))
assert (child_injector_2.get(SingletonC).d is provided_d)
with pytest.raises(CallError):
parent_injector.get(SingletonC)
with pytest.raises(CallError):
child_injector_1.get(SingletonC) |
class VolumeGANDiscriminator(nn.Module):
def __init__(self, resolution=(- 1), init_res=4, image_channels=3, architecture='resnet', use_wscale=True, wscale_gain=1.0, lr_mul=1.0, mbstd_groups=4, mbstd_channels=1, fmaps_base=(32 << 10), fmaps_max=512, filter_kernel=(1, 3, 3, 1), conv_clamp=None, eps=1e-08, label_dim=0, embedding_dim=512, embedding_bias=True, embedding_use_wscale=True, embedding_lr_mul=1.0, normalize_embedding=True, mapping_layers=0, mapping_fmaps=512, mapping_use_wscale=True, mapping_lr_mul=0.1):
super().__init__()
if (resolution not in _RESOLUTIONS_ALLOWED):
raise ValueError(f'''Invalid resolution: `{resolution}`!
Resolutions allowed: {_RESOLUTIONS_ALLOWED}.''')
architecture = architecture.lower()
if (architecture not in _ARCHITECTURES_ALLOWED):
raise ValueError(f'''Invalid architecture: `{architecture}`!
Architectures allowed: {_ARCHITECTURES_ALLOWED}.''')
self.init_res = init_res
self.init_res_log2 = int(np.log2(init_res))
self.resolution = resolution
self.final_res_log2 = int(np.log2(resolution))
self.image_channels = image_channels
self.architecture = architecture
self.use_wscale = use_wscale
self.wscale_gain = wscale_gain
self.lr_mul = lr_mul
self.mbstd_groups = mbstd_groups
self.mbstd_channels = mbstd_channels
self.fmaps_base = fmaps_base
self.fmaps_max = fmaps_max
self.filter_kernel = filter_kernel
self.conv_clamp = conv_clamp
self.eps = eps
self.label_dim = label_dim
self.embedding_dim = embedding_dim
self.embedding_bias = embedding_bias
self.embedding_use_wscale = embedding_use_wscale
self.embedding_lr_mul = embedding_lr_mul
self.normalize_embedding = normalize_embedding
self.mapping_layers = mapping_layers
self.mapping_fmaps = mapping_fmaps
self.mapping_use_wscale = mapping_use_wscale
self.mapping_lr_mul = mapping_lr_mul
self.pth_to_tf_var_mapping = {}
self.register_buffer('lod', torch.zeros(()))
self.use_embedding = ((label_dim > 0) and (embedding_dim > 0))
if self.use_embedding:
self.embedding = DenseLayer(in_channels=label_dim, out_channels=embedding_dim, add_bias=embedding_bias, init_bias=0.0, use_wscale=embedding_use_wscale, wscale_gain=wscale_gain, lr_mul=embedding_lr_mul, activation_type='linear')
self.pth_to_tf_var_mapping['embedding.weight'] = 'LabelEmbed/weight'
if self.embedding_bias:
self.pth_to_tf_var_mapping['embedding.bias'] = 'LabelEmbed/bias'
if self.normalize_embedding:
self.norm = PixelNormLayer(dim=1, eps=eps)
for i in range(mapping_layers):
in_channels = (embedding_dim if (i == 0) else mapping_fmaps)
out_channels = (embedding_dim if (i == (mapping_layers - 1)) else mapping_fmaps)
layer_name = f'mapping{i}'
self.add_module(layer_name, DenseLayer(in_channels=in_channels, out_channels=out_channels, add_bias=True, init_bias=0.0, use_wscale=mapping_use_wscale, wscale_gain=wscale_gain, lr_mul=mapping_lr_mul, activation_type='lrelu'))
self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = f'Mapping{i}/weight'
self.pth_to_tf_var_mapping[f'{layer_name}.bias'] = f'Mapping{i}/bias'
for res_log2 in range(self.final_res_log2, (self.init_res_log2 - 1), (- 1)):
res = (2 ** res_log2)
in_channels = self.get_nf(res)
out_channels = self.get_nf((res // 2))
block_idx = (self.final_res_log2 - res_log2)
layer_name = f'input{block_idx}'
self.add_module(layer_name, ConvLayer(in_channels=image_channels, out_channels=in_channels, kernel_size=1, add_bias=True, scale_factor=1, filter_kernel=None, use_wscale=use_wscale, wscale_gain=wscale_gain, lr_mul=lr_mul, activation_type='lrelu', conv_clamp=conv_clamp))
self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = f'{res}x{res}/FromRGB/weight'
self.pth_to_tf_var_mapping[f'{layer_name}.bias'] = f'{res}x{res}/FromRGB/bias'
if (res != self.init_res):
layer_name = f'layer{(2 * block_idx)}'
self.add_module(layer_name, ConvLayer(in_channels=in_channels, out_channels=in_channels, kernel_size=3, add_bias=True, scale_factor=1, filter_kernel=None, use_wscale=use_wscale, wscale_gain=wscale_gain, lr_mul=lr_mul, activation_type='lrelu', conv_clamp=conv_clamp))
self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = f'{res}x{res}/Conv0/weight'
self.pth_to_tf_var_mapping[f'{layer_name}.bias'] = f'{res}x{res}/Conv0/bias'
layer_name = f'layer{((2 * block_idx) + 1)}'
self.add_module(layer_name, ConvLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=3, add_bias=True, scale_factor=2, filter_kernel=filter_kernel, use_wscale=use_wscale, wscale_gain=wscale_gain, lr_mul=lr_mul, activation_type='lrelu', conv_clamp=conv_clamp))
self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = f'{res}x{res}/Conv1_down/weight'
self.pth_to_tf_var_mapping[f'{layer_name}.bias'] = f'{res}x{res}/Conv1_down/bias'
if (self.architecture == 'resnet'):
layer_name = f'residual{block_idx}'
self.add_module(layer_name, ConvLayer(in_channels=in_channels, out_channels=out_channels, kernel_size=1, add_bias=False, scale_factor=2, filter_kernel=filter_kernel, use_wscale=use_wscale, wscale_gain=wscale_gain, lr_mul=lr_mul, activation_type='linear', conv_clamp=None))
self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = f'{res}x{res}/Skip/weight'
else:
self.mbstd = MiniBatchSTDLayer(groups=mbstd_groups, new_channels=mbstd_channels, eps=eps)
layer_name = f'layer{(2 * block_idx)}'
self.add_module(layer_name, ConvLayer(in_channels=(in_channels + mbstd_channels), out_channels=in_channels, kernel_size=3, add_bias=True, scale_factor=1, filter_kernel=None, use_wscale=use_wscale, wscale_gain=wscale_gain, lr_mul=lr_mul, activation_type='lrelu', conv_clamp=conv_clamp))
self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = f'{res}x{res}/Conv/weight'
self.pth_to_tf_var_mapping[f'{layer_name}.bias'] = f'{res}x{res}/Conv/bias'
layer_name = f'layer{((2 * block_idx) + 1)}'
self.add_module(layer_name, DenseLayer(in_channels=((in_channels * res) * res), out_channels=in_channels, add_bias=True, init_bias=0.0, use_wscale=use_wscale, wscale_gain=wscale_gain, lr_mul=lr_mul, activation_type='lrelu'))
self.pth_to_tf_var_mapping[f'{layer_name}.weight'] = f'{res}x{res}/Dense0/weight'
self.pth_to_tf_var_mapping[f'{layer_name}.bias'] = f'{res}x{res}/Dense0/bias'
self.output = DenseLayer(in_channels=in_channels, out_channels=(embedding_dim if self.use_embedding else max(label_dim, 1)), add_bias=True, init_bias=0.0, use_wscale=use_wscale, wscale_gain=wscale_gain, lr_mul=lr_mul, activation_type='linear')
self.pth_to_tf_var_mapping['output.weight'] = 'Output/weight'
self.pth_to_tf_var_mapping['output.bias'] = 'Output/bias'
if (self.architecture == 'skip'):
self.register_buffer('filter', upfirdn2d.setup_filter(filter_kernel))
def get_nf(self, res):
return min((self.fmaps_base // res), self.fmaps_max)
def forward(self, image, lod=None, label=None, fp16_res=None, impl='cuda'):
expected_shape = (self.image_channels, self.resolution, self.resolution)
if ((image.ndim != 4) or (image.shape[1:] != expected_shape)):
raise ValueError(f'''The input tensor should be with shape [batch_size, channel, height, width], where `channel` equals to {self.image_channels}, `height`, `width` equal to {self.resolution}!
But `{image.shape}` is received!''')
if (self.label_dim > 0):
if (label is None):
raise ValueError(f'Model requires an additional label (with dimension {self.label_dim}) as input, but no label is received!')
batch_size = image.shape[0]
if ((label.ndim != 2) or (label.shape != (batch_size, self.label_dim))):
raise ValueError(f'''Input label should be with shape [batch_size, label_dim], where `batch_size` equals to that of images ({image.shape[0]}) and `label_dim` equals to {self.label_dim}!
But `{label.shape}` is received!''')
label = label.to(dtype=torch.float32)
if self.use_embedding:
embed = self.embedding(label, impl=impl)
if self.normalize_embedding:
embed = self.norm(embed)
for i in range(self.mapping_layers):
embed = getattr(self, f'mapping{i}')(embed, impl=impl)
if ((fp16_res is not None) and (self.resolution >= fp16_res)):
image = image.to(torch.float16)
lod = (self.lod.item() if (lod is None) else lod)
x = self.input0(image, impl=impl)
for res_log2 in range(self.final_res_log2, self.init_res_log2, (- 1)):
res = (2 ** res_log2)
if ((fp16_res is not None) and (res >= fp16_res)):
x = x.to(torch.float16)
else:
x = x.to(torch.float32)
idx = cur_lod = (self.final_res_log2 - res_log2)
if (cur_lod <= lod < (cur_lod + 1)):
x = getattr(self, f'input{idx}')(image, impl=impl)
elif ((cur_lod - 1) < lod < cur_lod):
alpha = (lod - np.floor(lod))
y = getattr(self, f'input{idx}')(image, impl=impl)
x = ((y * alpha) + (x * (1 - alpha)))
if (lod < (cur_lod + 1)):
if ((self.architecture == 'skip') and (idx > 0)):
image = upfirdn2d.downsample2d(image, self.filter, impl=impl)
if ((fp16_res is not None) and (res >= fp16_res)):
image = image.to(torch.float16)
else:
image = image.to(torch.float32)
y = getattr(self, f'input{idx}')(image, impl=impl)
x = (x + y)
if (self.architecture == 'resnet'):
residual = getattr(self, f'residual{idx}')(x, runtime_gain=np.sqrt(0.5), impl=impl)
x = getattr(self, f'layer{(2 * idx)}')(x, impl=impl)
x = getattr(self, f'layer{((2 * idx) + 1)}')(x, runtime_gain=np.sqrt(0.5), impl=impl)
x = (x + residual)
else:
x = getattr(self, f'layer{(2 * idx)}')(x, impl=impl)
x = getattr(self, f'layer{((2 * idx) + 1)}')(x, impl=impl)
if (lod > cur_lod):
image = F.avg_pool2d(image, kernel_size=2, stride=2, padding=0)
if (fp16_res is not None):
x = x.to(torch.float32)
if (self.architecture == 'skip'):
image = upfirdn2d.downsample2d(image, self.filter, impl=impl)
if (fp16_res is not None):
image = image.to(torch.float32)
y = getattr(self, f'input{idx}')(image, impl=impl)
x = (x + y)
x = self.mbstd(x)
x = getattr(self, f'layer{((2 * idx) + 2)}')(x, impl=impl)
x = getattr(self, f'layer{((2 * idx) + 3)}')(x, impl=impl)
x = self.output(x, impl=impl)
if self.use_embedding:
x = (x * embed).sum(dim=1, keepdim=True)
x = (x / np.sqrt(self.embedding_dim))
elif (self.label_dim > 0):
x = (x * label).sum(dim=1, keepdim=True)
results = {'score': x, 'label': label}
if self.use_embedding:
results['embedding'] = embed
return results |
def var__global(self, clusters, n_chunk, segmentation_tg_op=None):
data_json_copy = []
labels_copy = []
for l in clusters:
for d in clusters[l]:
data_json_copy.append(d)
labels_copy.append(l)
tasks = []
while data_json_copy:
data_json_copy_part = data_json_copy[:n_chunk]
labels_copy_part = labels_copy[:n_chunk]
tasks.append(self.runner.task(module='tomominer.statistics.ssnr', method='var__local', kwargs={'data_json': data_json_copy_part, 'labels': labels_copy_part, 'return_key': True, 'segmentation_tg_op': segmentation_tg_op}))
data_json_copy = data_json_copy[n_chunk:]
labels_copy = labels_copy[n_chunk:]
sum_global = {}
prod_sum = {}
mask_sum = {}
for res in self.runner.run__except(tasks):
with open(res.result['key']) as f:
re = pickle.load(f)
os.remove(res.result['key'])
for l in re['sum']:
if (l not in sum_global):
sum_global[l] = re['sum'][l]
else:
sum_global[l] += re['sum'][l]
if (l not in prod_sum):
prod_sum[l] = re['prod_sum'][l]
else:
prod_sum[l] += re['prod_sum'][l]
if (l not in mask_sum):
mask_sum[l] = re['mask_sum'][l]
else:
mask_sum[l] += re['mask_sum'][l]
return {'sum': sum_global, 'prod_sum': prod_sum, 'mask_sum': mask_sum} |
class FairseqOptimizer(object):
def __init__(self, args):
super().__init__()
self.args = args
def add_args(parser):
pass
def optimizer(self):
if (not hasattr(self, '_optimizer')):
raise NotImplementedError
if (not isinstance(self._optimizer, torch.optim.Optimizer)):
raise ValueError('_optimizer must be an instance of torch.optim.Optimizer')
return self._optimizer
def optimizer_config(self):
raise NotImplementedError
def params(self):
for param_group in self.optimizer.param_groups:
for p in param_group['params']:
(yield p)
def __getstate__(self):
return self._optimizer.__getstate__()
def get_lr(self):
return self.optimizer.param_groups[0]['lr']
def set_lr(self, lr):
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, state_dict, optimizer_overrides=None):
self.optimizer.load_state_dict(state_dict)
if ((optimizer_overrides is not None) and (len(optimizer_overrides) > 0)):
for group in self.optimizer.param_groups:
group.update(optimizer_overrides)
def backward(self, loss):
loss.backward()
def multiply_grads(self, c):
for p in self.params:
if (p.grad is not None):
p.grad.data.mul_(c)
def clip_grad_norm(self, max_norm):
if (max_norm > 0):
return torch.nn.utils.clip_grad_norm_(self.params, max_norm)
else:
return math.sqrt(sum(((p.grad.data.norm() ** 2) for p in self.params if (p.grad is not None))))
def step(self, closure=None):
self.optimizer.step(closure)
def zero_grad(self):
for p in self.params:
p.grad = None
self.optimizer.zero_grad()
def supports_memory_efficient_fp16(self):
if hasattr(self.optimizer, 'supports_memory_efficient_fp16'):
return self.optimizer.supports_memory_efficient_fp16
return False
def average_params(self):
pass |
class ActionCompareTest(unittest.TestCase):
def setUp(self):
self.base_dir = os.path.join(comtst.abs_test_dir, b'action_compare')
self.from1_struct = {'from1': {'contents': {'fileChanged': {'content': 'initial'}, 'fileOld': {}, 'fileUnchanged': {'content': 'unchanged'}}}}
self.from1_path = os.path.join(self.base_dir, b'from1')
self.from2_struct = {'from2': {'contents': {'fileChanged': {'content': 'modified'}, 'fileNew': {}, 'fileUnchanged': {'content': 'unchanged'}}}}
self.from2_path = os.path.join(self.base_dir, b'from2')
self.from3_struct = {'from3': {'contents': {'fileChanged': {'content': 'samesize'}, 'fileNew': {}, 'fileUnchanged': {'content': 'unchanged'}}}}
self.from3_path = os.path.join(self.base_dir, b'from3')
fileset.create_fileset(self.base_dir, self.from1_struct)
fileset.create_fileset(self.base_dir, self.from2_struct)
fileset.create_fileset(self.base_dir, self.from3_struct)
fileset.remove_fileset(self.base_dir, {'bak': {'type': 'dir'}})
self.bak_path = os.path.join(self.base_dir, b'bak')
self.success = False
comtst.rdiff_backup_action(True, True, self.from1_path, self.bak_path, ('--api-version', '201', '--current-time', '10000'), b'backup', ())
comtst.rdiff_backup_action(True, True, self.from2_path, self.bak_path, ('--api-version', '201', '--current-time', '20000'), b'backup', ())
def test_action_compare(self):
self.assertEqual(comtst.rdiff_backup_action(False, True, self.from1_path, self.bak_path, ('--api-version', '201'), b'compare', ('--method', 'meta')), Globals.RET_CODE_FILE_WARN)
self.assertEqual(comtst.rdiff_backup_action(True, False, self.from2_path, self.bak_path, ('--api-version', '201'), b'compare', ('--method', 'meta')), 0)
self.assertEqual(comtst.rdiff_backup_action(False, True, self.from1_path, self.bak_path, ('--api-version', '201'), b'compare', ('--at', '10000')), 0)
self.assertEqual(comtst.rdiff_backup_action(True, False, self.from2_path, self.bak_path, ('--api-version', '201'), b'compare', ('--at', '15000')), Globals.RET_CODE_FILE_WARN)
self.assertEqual(comtst.rdiff_backup_action(False, True, self.from1_path, self.bak_path, ('--api-version', '201'), b'compare', ('--method', 'hash')), Globals.RET_CODE_FILE_WARN)
self.assertEqual(comtst.rdiff_backup_action(True, False, self.from2_path, self.bak_path, ('--api-version', '201'), b'compare', ('--at', 'now', '--method', 'hash')), 0)
self.assertEqual(comtst.rdiff_backup_action(False, False, self.from3_path, self.bak_path, ('--api-version', '201', '--parsable', '-v2'), b'compare', ('--method', 'hash'), return_stdout=True), b'---\n- path: fileChanged\n reason: metadata the same, data changed\n...\n\n')
self.assertEqual(comtst.rdiff_backup_action(False, True, self.from1_path, self.bak_path, ('--api-version', '201'), b'compare', ('--method', 'full')), Globals.RET_CODE_FILE_WARN)
self.assertEqual(comtst.rdiff_backup_action(True, False, self.from1_path, self.bak_path, ('--api-version', '201'), b'compare', ('--at', '1B', '--method', 'full')), 0)
self.assertEqual(comtst.rdiff_backup_action(True, True, self.from3_path, self.bak_path, ('--api-version', '201', '--parsable', '-v2'), b'compare', ('--method', 'full'), return_stdout=True), b'---\n- path: fileChanged\n reason: metadata the same, data changed\n...\n\n')
self.success = True
def tearDown(self):
if self.success:
fileset.remove_fileset(self.base_dir, self.from1_struct)
fileset.remove_fileset(self.base_dir, self.from2_struct)
fileset.remove_fileset(self.base_dir, self.from3_struct)
fileset.remove_fileset(self.base_dir, {'bak': {'type': 'dir'}}) |
def sort_all_auto_mappings(overwrite: bool=False):
fnames = [os.path.join(PATH_TO_AUTO_MODULE, f) for f in os.listdir(PATH_TO_AUTO_MODULE) if f.endswith('.py')]
diffs = [sort_auto_mapping(fname, overwrite=overwrite) for fname in fnames]
if ((not overwrite) and any(diffs)):
failures = [f for (f, d) in zip(fnames, diffs) if d]
raise ValueError(f"The following files have auto mappings that need sorting: {', '.join(failures)}. Run `make style` to fix this.") |
class DeliveryBase(DeliveryNamedTuple):
def mu(self):
return self.monitor_units
def combine(cls, *args):
first = cls(*args[0])
if (len(args) == 1):
return first
return first.merge(*args[1:])
def merge(self: DeliveryGeneric, *args: DeliveryGeneric) -> DeliveryGeneric:
cls = type(self)
separate: List[DeliveryGeneric] = ([self] + [*args])
collection: Dict[(str, Tuple)] = {}
for delivery_data in separate:
for field in delivery_data._fields:
try:
collection[field] = np.concatenate([collection[field], getattr(delivery_data, field)], axis=0)
except KeyError:
collection[field] = getattr(delivery_data, field)
mu = np.concatenate([[0], np.diff(collection['monitor_units'])])
mu[(mu < 0)] = 0
collection['monitor_units'] = np.cumsum(mu)
merged = cls(**collection)
return merged
def __new__(cls, *args, **kwargs):
new_args = (to_tuple(arg) for arg in args)
new_kwargs = {key: to_tuple(item) for (key, item) in kwargs.items()}
return super().__new__(cls, *new_args, **new_kwargs)
def _empty(cls: Type[DeliveryGeneric]) -> DeliveryGeneric:
return cls(tuple(), tuple(), tuple(), tuple((tuple((tuple(), tuple())),)), tuple((tuple(), tuple())))
_cache()
def _filter_cps(self):
cls = type(self)
return cls(*remove_irrelevant_control_points(*self))
_cache()
def _mask_by_gantry(self, angles: Union[(Tuple, float, int)], gantry_tolerance=3, allow_missing_angles=False):
try:
_ = iter(angles)
iterable_angles = tuple(angles)
except TypeError:
iterable_angles = tuple((angles,))
masks = self._gantry_angle_masks(iterable_angles, gantry_tolerance, allow_missing_angles=allow_missing_angles)
all_masked_delivery_data = tuple((self._apply_mask_to_delivery_data(mask) for mask in masks))
return all_masked_delivery_data
_cache()
def _metersets(self, gantry_angles, gantry_tolerance):
all_masked_delivery_data = self._mask_by_gantry(gantry_angles, gantry_tolerance, allow_missing_angles=True)
metersets = []
for delivery_data in all_masked_delivery_data:
try:
metersets.append(delivery_data.monitor_units[(- 1)])
except IndexError:
continue
return tuple(metersets)
def _extract_one_gantry_angle(self: DeliveryGeneric, gantry_angle, gantry_tolerance=3) -> DeliveryGeneric:
near_angle = self._gantry_angle_mask(gantry_angle, gantry_tolerance)
return self._apply_mask_to_delivery_data(near_angle)
def _gantry_angle_masks(self, gantry_angles, gantry_tol, allow_missing_angles=False):
masks = [self._gantry_angle_mask(gantry_angle, gantry_tol) for gantry_angle in gantry_angles]
for mask in masks:
if np.all((mask == 0)):
continue
is_duplicate_gantry_angles = (np.sum(np.abs(np.diff(np.concatenate([[0], mask, [0]])))) != 2)
if is_duplicate_gantry_angles:
raise ValueError('Duplicate gantry angles not yet supported')
try:
assert np.all((np.sum(masks, axis=0) == 1)), 'Not all beams were captured by the gantry tolerance of {}'.format(gantry_tol)
except AssertionError:
if (not allow_missing_angles):
print('Allowable gantry angles = {}'.format(gantry_angles))
gantry = np.array(self.gantry, copy=False)
out_of_tolerance = np.unique(gantry[(np.sum(masks, axis=0) == 0)]).tolist()
print('The gantry angles out of tolerance were {}'.format(out_of_tolerance))
raise
return masks
def _gantry_angle_mask(self, gantry_angle, gantry_angle_tol):
near_angle = (np.abs((np.array(self.gantry) - gantry_angle)) <= gantry_angle_tol)
assert np.all((np.diff(np.where(near_angle)[0]) == 1))
return near_angle
def _apply_mask_to_delivery_data(self: DeliveryGeneric, mask) -> DeliveryGeneric:
cls = type(self)
new_delivery_data = []
for item in self:
new_delivery_data.append(np.array(item)[mask])
new_monitor_units = new_delivery_data[0]
try:
first_monitor_unit_item = new_monitor_units[0]
except IndexError:
return cls(*new_delivery_data)
new_delivery_data[0] = np.round((np.array(new_delivery_data[0], copy=False) - first_monitor_unit_item), decimals=7)
return cls(*new_delivery_data)
def _strip_delivery_data(self: DeliveryGeneric, skip_size) -> DeliveryGeneric:
cls = type(self)
new_delivery_data = []
for item in self:
new_delivery_data.append(np.array(item)[::skip_size])
return cls(*new_delivery_data) |
def test_parallel_xeb_fidelities() -> None:
sampler = cirq.Simulator()
qubit_locs = [(0, 0), (0, 1), (0, 2)]
qubits = [cirq.GridQubit(*idx) for idx in qubit_locs]
int_layers = [{(qubit_locs[0], qubit_locs[1])}, {(qubit_locs[1], qubit_locs[2])}]
xeb_configs = [[cirq.Moment([(cirq.ISWAP(qubits[0], qubits[1]) ** 0.5)])], [cirq.Moment([(cirq.ISWAP(qubits[1], qubits[2]) ** 0.5)])]]
num_circuits = 10
num_num_cycles = range(3, 23, 5)
num_cycles = len(num_num_cycles)
all_bits = []
all_sq_gates = []
for xeb_config in xeb_configs:
bits = []
sq_gates = []
for i in range(num_circuits):
(circuits, sq_gate_indices_i) = build_xeb_circuits(qubits, num_num_cycles, xeb_config, random_seed=i)
sq_gates.append(sq_gate_indices_i)
for c in circuits:
c.append(cirq.measure(*qubits, key='z'))
sweep_params = [{} for _ in range(len(circuits))]
job = sampler.run_batch(programs=circuits, params_list=sweep_params, repetitions=5000)
bits.append([job[j][0].measurements['z'] for j in range(num_cycles)])
all_bits.append(bits)
all_sq_gates.append(sq_gates)
fsim_angles_init = {'theta': ((- 0.25) * np.pi), 'delta_plus': 0, 'delta_minus_off_diag': 0, 'delta_minus_diag': 0, 'phi': 0.0}
xeb_results = parallel_xeb_fidelities(qubit_locs, num_num_cycles, all_bits, all_sq_gates, fsim_angles_init, interaction_sequence=int_layers, gate_to_fit='sqrt-iswap', num_restarts=1, num_points=4, print_fitting_progress=False)
f_01 = xeb_results.raw_data[(qubit_locs[0], qubit_locs[1])].fidelity_unoptimized[1]
f_12 = xeb_results.raw_data[(qubit_locs[1], qubit_locs[2])].fidelity_unoptimized[1]
assert np.allclose(f_01, 1, atol=0.1)
assert np.allclose(f_12, 1, atol=0.1) |
def merge_dict(to_update: dict, other_dict: dict) -> None:
for (key, value) in other_dict.items():
has_map = (isinstance(value, Mapping) and isinstance(to_update.get(key, None), Mapping))
if has_map:
merge_dict(to_update[key], value)
else:
to_update[key] = value |
class TLibraryValueCompletion(TestCase):
def setUp(self):
config.init()
def tearDown(self):
config.quit()
def test_ctr(self):
w = LibraryValueCompletion('artist', SongLibrary())
e = Gtk.Entry()
e.set_completion(w)
self.assertEqual(w.get_entry(), e)
self.assertEqual(e.get_completion(), w)
e.destroy() |
class TestMakeConfirmationTask(TestIncidents):
def test_make_confirmation_task_check(self):
with no_create_task():
self.cog_instance.make_confirmation_task(MockMessage(id=123))
self.cog_instance.bot.wait_for.assert_called_once()
created_check = self.cog_instance.bot.wait_for.call_args.kwargs['check']
self.assertTrue(created_check(payload=MagicMock(message_id=123)))
self.assertFalse(created_check(payload=MagicMock(message_id=0))) |
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('hotels', '0006_remove_hotelroomreservation_user_id_and_more')]
operations = [migrations.AlterField(model_name='hotelroomreservation', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='user'))] |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune (m)LUKE on a token classification task (such as NER) with the accelerate library')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).')
parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.')
parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.')
parser.add_argument('--text_column_name', type=str, default=None, help='The column name of text to input in the file (a csv or JSON file).')
parser.add_argument('--label_column_name', type=str, default=None, help='The column name of label to input in the file (a csv or JSON file).')
parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed.')
parser.add_argument('--max_entity_length', type=int, default=32, help='The maximum total input entity length after tokenization (Used only for (M)Luke models). Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed.')
parser.add_argument('--max_mention_length', type=int, default=30, help='The maximum total input mention length after tokenization (Used only for (M)Luke models). Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_length` is passed.')
parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.')
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--label_all_tokens', action='store_true', help='Setting labels of all special tokens to -100 and thus PyTorch will ignore them.')
parser.add_argument('--return_entity_level_metrics', action='store_true', help='Indication whether entity level metrics are to be returner.')
parser.add_argument('--task_name', type=str, default='ner', choices=['ner', 'pos', 'chunk'], help='The name of the task.')
parser.add_argument('--debug', action='store_true', help='Activate debug mode and run training only with a subset of data.')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
args = parser.parse_args()
if ((args.task_name is None) and (args.train_file is None) and (args.validation_file is None)):
raise ValueError('Need either a task name or a training/validation file.')
else:
if (args.train_file is not None):
extension = args.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (args.validation_file is not None):
extension = args.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if args.push_to_hub:
assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
return args |
class Integer(Value):
def __init__(self, value: int, rtype: RType=short_int_rprimitive, line: int=(- 1)) -> None:
if (is_short_int_rprimitive(rtype) or is_int_rprimitive(rtype)):
self.value = (value * 2)
else:
self.value = value
self.type = rtype
self.line = line
def numeric_value(self) -> int:
if (is_short_int_rprimitive(self.type) or is_int_rprimitive(self.type)):
return (self.value // 2)
return self.value |
_model
def skresnext50_32x4d(pretrained=False, **kwargs):
sk_kwargs = dict(rd_ratio=(1 / 16), rd_divisor=32, split_input=False)
model_args = dict(block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs)
return _create_skresnet('skresnext50_32x4d', pretrained, **model_args) |
class EncryptedPassportElement(TelegramObject):
__slots__ = ('selfie', 'files', 'type', 'translation', 'email', 'hash', 'phone_number', 'reverse_side', 'front_side', 'data')
def __init__(self, type: str, hash: str, data: Optional[Union[(PersonalDetails, IdDocumentData, ResidentialAddress)]]=None, phone_number: Optional[str]=None, email: Optional[str]=None, files: Optional[Sequence[PassportFile]]=None, front_side: Optional[PassportFile]=None, reverse_side: Optional[PassportFile]=None, selfie: Optional[PassportFile]=None, translation: Optional[Sequence[PassportFile]]=None, credentials: Optional['Credentials']=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.type: str = type
self.data: Optional[Union[(PersonalDetails, IdDocumentData, ResidentialAddress)]] = data
self.phone_number: Optional[str] = phone_number
self.email: Optional[str] = email
self.files: Tuple[(PassportFile, ...)] = parse_sequence_arg(files)
self.front_side: Optional[PassportFile] = front_side
self.reverse_side: Optional[PassportFile] = reverse_side
self.selfie: Optional[PassportFile] = selfie
self.translation: Tuple[(PassportFile, ...)] = parse_sequence_arg(translation)
self.hash: str = hash
self._id_attrs = (self.type, self.data, self.phone_number, self.email, self.files, self.front_side, self.reverse_side, self.selfie)
self._freeze()
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['EncryptedPassportElement']:
data = cls._parse_data(data)
if (not data):
return None
data['files'] = (PassportFile.de_list(data.get('files'), bot) or None)
data['front_side'] = PassportFile.de_json(data.get('front_side'), bot)
data['reverse_side'] = PassportFile.de_json(data.get('reverse_side'), bot)
data['selfie'] = PassportFile.de_json(data.get('selfie'), bot)
data['translation'] = (PassportFile.de_list(data.get('translation'), bot) or None)
return super().de_json(data=data, bot=bot)
def de_json_decrypted(cls, data: Optional[JSONDict], bot: 'Bot', credentials: 'Credentials') -> Optional['EncryptedPassportElement']:
if (not data):
return None
if (data['type'] not in ('phone_number', 'email')):
secure_data = getattr(credentials.secure_data, data['type'])
if (secure_data.data is not None):
if (not isinstance(data['data'], dict)):
data['data'] = decrypt_json(b64decode(secure_data.data.secret), b64decode(secure_data.data.hash), b64decode(data['data']))
if (data['type'] == 'personal_details'):
data['data'] = PersonalDetails.de_json(data['data'], bot=bot)
elif (data['type'] in ('passport', 'internal_passport', 'driver_license', 'identity_card')):
data['data'] = IdDocumentData.de_json(data['data'], bot=bot)
elif (data['type'] == 'address'):
data['data'] = ResidentialAddress.de_json(data['data'], bot=bot)
data['files'] = (PassportFile.de_list_decrypted(data.get('files'), bot, secure_data.files) or None)
data['front_side'] = PassportFile.de_json_decrypted(data.get('front_side'), bot, secure_data.front_side)
data['reverse_side'] = PassportFile.de_json_decrypted(data.get('reverse_side'), bot, secure_data.reverse_side)
data['selfie'] = PassportFile.de_json_decrypted(data.get('selfie'), bot, secure_data.selfie)
data['translation'] = (PassportFile.de_list_decrypted(data.get('translation'), bot, secure_data.translation) or None)
return super().de_json(data=data, bot=bot) |
class StableSet(GraphOptimizationApplication):
def to_quadratic_program(self) -> QuadraticProgram:
mdl = Model(name='Stable set')
n = self._graph.number_of_nodes()
x = {i: mdl.binary_var(name=f'x_{i}') for i in range(n)}
for (w, v) in self._graph.edges:
self._graph.edges[(w, v)].setdefault('weight', 1)
objective = mdl.sum((x[i] for i in x))
for (w, v) in self._graph.edges:
mdl.add_constraint(((x[w] + x[v]) <= 1))
mdl.maximize(objective)
op = from_docplex_mp(mdl)
return op
def interpret(self, result: Union[(OptimizationResult, np.ndarray)]) -> List[int]:
x = self._result_to_x(result)
stable_set = []
for (i, value) in enumerate(x):
if value:
stable_set.append(i)
return stable_set
def _draw_result(self, result: Union[(OptimizationResult, np.ndarray)], pos: Optional[Dict[(int, np.ndarray)]]=None) -> None:
x = self._result_to_x(result)
nx.draw(self._graph, node_color=self._node_colors(x), pos=pos, with_labels=True)
def _node_colors(self, x: np.ndarray):
return [('r' if (x[node] == 1) else 'darkgrey') for node in self._graph.nodes] |
class RiverSplit(MultiSplitLink):
def __init__(self, model, *args, nsteps=1, **kwargs):
factors = kwargs.pop('factors')
extra_slots = (len(factors) - 1)
costs = kwargs.pop('costs', [0.0])
max_flows = kwargs.pop('max_flows', [None])
super(RiverSplit, self).__init__(model, nsteps, *args, extra_slots=extra_slots, factors=factors, costs=costs, max_flows=max_flows, **kwargs) |
class decoder_old(nn.Module):
def __init__(self, in_dim=128, out_dim=(17 * 3), h_dim=128):
super(decoder, self).__init__()
self.in_dim = in_dim
self.h_dim = h_dim
self.out_dim = out_dim
self.fc1 = nn.Linear(in_dim, h_dim)
self.relu1 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(h_dim, h_dim)
self.relu2 = nn.ReLU(inplace=True)
self.fc3 = nn.Linear(h_dim, out_dim)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
(input, _) = input
bs = input.shape[0]
d1 = self.relu1(self.fc1(input))
d2 = self.relu2(self.fc2(d1))
d3 = self.fc3(d2)
out = self.sigmoid(d3)
out = out.view(bs, 17, 3)
return out |
def sortlist(n, m):
xx = xi
yy = y1[n]
resultX = []
x = []
y = []
resultY = list(reversed(np.sort(yy)))
le = len(xi)
for i in resultY:
pos = yy.index(i)
resultX.append(xx[pos])
for i in range(m):
x.append(resultX[i])
y.append(resultY[i])
return (x, y) |
class QueryStepComparative(QueryStep):
def __init__(self, creator):
super().__init__(creator)
def parse_comparator_value(grounding_comparative, good_values=['>', '<', '>=', '<=', '=', '!=', 'like']):
assert grounding_comparative.iscomp(), f"Comparator should be grounded to a key of type 'comparative' but have {grounding_comparative.type}"
assert (len(grounding_comparative.keys) in [2, 3]), f'Key of comparator should be of len 2 or 3 but have {grounding_comparative.keys}'
comparator = grounding_comparative.keys[0]
comparison_value = grounding_comparative.keys[1]
assert (comparator in good_values), f'Third arg of comparator should be grounded to {good_values}, but have {comparator}'
if (comparison_value is not None):
comparison_value = comparison_value.replace("'", '')
if (comparator == 'like'):
comparison_value = comparison_value.replace('%', '')
if (len(grounding_comparative.keys) == 3):
grounding_col = grounding_comparative.keys[2]
else:
grounding_col = None
return (comparator, comparison_value, grounding_col)
def make_filter_line(filter_var, comparator, comparison_value):
if (comparator != 'like'):
return f'FILTER({filter_var} {comparator} {comparison_value}).'
else:
comparison_value = str(comparison_value).replace('"', '').replace('^^xsd:string', '')
return f'FILTER(REGEX(STR({filter_var}), "(.*{comparison_value}.*)", "i")).'
def build_step_op(self, qdmr_index, inline_query, context):
grounding = self.creator.grounding
schema = self.creator.schema
get_var_name = self.creator.namer.get_var_name
op = 'comparative'
args = self.extract_args(qdmr_index, op, num_args=3)
source_index = QdmrInstance.ref_to_index(args[0], qdmr_index)
filter_index = QdmrInstance.ref_to_index(args[1], qdmr_index)
grounding_comparative = GroundingIndex(qdmr_index, 2, args[2])
if ((grounding_comparative in grounding) and grounding[grounding_comparative]):
assert (grounding_comparative in grounding), f'Comparator {grounding_comparative} should be grounded but have {grounding}'
target_grounding = grounding[grounding_comparative]
else:
grounding_comparative = None
target_grounding = None
list_of_indices = [source_index, filter_index]
try:
(comparator, comparison_value, comparator_col) = self.parse_comparator_value(grounding[grounding_comparative])
comparison_index = QdmrInstance.ref_to_index(comparison_value, qdmr_index)
list_of_indices.append(comparison_index)
except:
comparison_index = None
comparator_cols = [None]
comparison_values = [None]
context_old = context
context = copy.deepcopy(context_old)
context = self.creator.construct_set_of_args(list_of_indices, inline_query=True, context=context)
filter_units = context.output_units_for_qdmr_index[filter_index]
if (target_grounding is not None):
if target_grounding.iscol():
comparator_cols = [target_grounding]
comparison_values = [None]
elif target_grounding.istbl():
tbl_name = target_grounding.get_tbl_name()
comparator_cols = [GroundingKey.make_column_grounding(tbl_name, schema.schema.primary_keys[tbl_name])]
comparison_values = [None]
elif target_grounding.isval():
comparator_cols = [GroundingKey.make_column_grounding(target_grounding.get_tbl_name(), target_grounding.get_col_name())]
comparator = '='
comparison_values = [schema.get_key_for_comparison(target_grounding)]
elif target_grounding.iscomp():
(comparator, comparison_value, comparator_col) = self.parse_comparator_value(target_grounding)
if (comparator_col is None):
comparator_cols = [u.grnd for u in filter_units]
else:
comparator_cols = [comparator_col]
if (comparison_index is not None):
comparison_values = [u.var for u in context.output_units_for_qdmr_index[comparison_index]]
else:
comparison_values = [comparison_value]
assert (len(comparator_cols) == len(comparison_values))
comparator_cols_new = []
comparison_values_new = []
for (comparator_col, comparison_value) in zip(comparator_cols, comparison_values):
if (isinstance(comparator_col, GroundingKey) and comparator_col.isref()):
index_of_grounding = QdmrInstance.ref_to_index(comparator_col.keys[0], qdmr_index)
comparator_units = context.output_units_for_qdmr_index[index_of_grounding]
for u in comparator_units:
val = GroundingKey.make_value_grounding(u.output_col.grounding_column.get_tbl_name(), u.output_col.grounding_column.get_col_name(), comparison_value)
val = schema.get_key_for_comparison(val)
comparator_cols_new.append(u)
comparison_values_new.append(val)
else:
if isinstance(comparator_col, GroundingKey):
comparison_value = GroundingKey.make_value_grounding(comparator_col.get_tbl_name(), comparator_col.get_col_name(), comparison_value)
comparison_value = schema.get_key_for_comparison(comparison_value)
else:
pass
comparison_values_new.append(comparison_value)
comparator_cols_new.append(comparator_col)
comparator_cols = comparator_cols_new
comparison_values = comparison_values_new
else:
raise RuntimeError(f'Do not know what to do with grounding {target_grounding}')
else:
target_grounding_col = None
if ((comparison_index is None) or any((((a.grnd != b.grnd) and (b.grnd is not None)) for (a, b) in zip(context.output_units_for_qdmr_index[filter_index], context.output_units_for_qdmr_index[comparison_index])))):
assert (len(comparator_cols) == len(comparison_values)), f'This mode only supports comparator_cols and comparison_values of same len but have {comparator_cols} and {comparison_values}'
query_lines = []
filter_units = context.output_units_for_qdmr_index[filter_index]
for (i_comp, (comparator_col, comparison_value)) in enumerate(zip(comparator_cols, comparison_values)):
if ((comparator_col is not None) and isinstance(comparator_col, GroundingKey)):
context.add_column(comparator_col, schema, get_var_name, [u.grnd for u in filter_units])
filter_var = context.var_for_grounding[comparator_col]
elif isinstance(comparator_col, ContextOutputUnit):
filter_var = comparator_col.var
comparator_col = comparator_col.grnd
else:
filter_var = filter_units[i_comp].var
if (comparison_value is not None):
query_lines.append(self.make_filter_line(filter_var, comparator, comparison_value))
else:
query_lines.append('')
else:
context = copy.deepcopy(context_old)
context = self.creator.construct_set_of_args([source_index, filter_index], inline_query=True, context=context)
context_comp = self.creator.construct_set_of_args([comparison_index], inline_query=True, context=None)
filter_units = context.output_units_for_qdmr_index[filter_index]
comp_units = context_comp.output_units_for_qdmr_index[comparison_index]
context.append_query(context_comp.query)
query_lines = []
for (filter_unit, comp_unit) in zip(filter_units, comp_units):
query_line = self.make_filter_line(filter_unit.var, comparator, comp_unit.var)
query_lines.append(query_line)
for query_line in query_lines:
context.append_query(query_line)
context.output_units_for_qdmr_index[qdmr_index] = context.output_units_for_qdmr_index[source_index]
for unit in context.output_units_for_qdmr_index[qdmr_index]:
if (qdmr_index in grounding['distinct']):
context.var_with_distinct[unit.var] = True
context = QueryStepComparative.wrap_into_subquery(self, qdmr_index, context, schema, get_var_name, inline_query, context.output_units_for_qdmr_index[qdmr_index], comparator_cols)
return context
def wrap_into_subquery(self, qdmr_index, context, schema, get_var_name, inline_query, source_output_units, comparator_cols):
if ((comparator_cols is not None) and (tuple((u.grnd for u in source_output_units)) == tuple(comparator_cols))):
if (not inline_query):
context.query = self.build_full_query_from_inline(query=context.query, output_vars=[u.var for u in source_output_units], context=context)
for u in source_output_units:
if (u.grnd is not None):
context.add_and_fix_grounding(target_grounding=u.grnd, output_var=u.var, schema=schema)
return context
else:
flag_not_adding_primary_key = False
flag_output_var_with_distinct = False
for unit in reversed(source_output_units):
tbl_name = unit.grnd.get_tbl_name()
try:
col_name = unit.grnd.get_col_name()
except:
col_name = schema.schema.primary_keys[tbl_name]
cur_output_var = context.var_for_grounding[unit.grnd]
cur_output_var_with_distinct = (cur_output_var in context.var_with_distinct)
flag_output_var_with_distinct = (flag_output_var_with_distinct or cur_output_var_with_distinct)
flag_not_adding_primary_key = (flag_not_adding_primary_key or (col_name in schema.schema.column_key_in_table[tbl_name]) or cur_output_var_with_distinct)
output_units = context.output_units_for_qdmr_index[qdmr_index]
output_col_for_var_name = context.output_col_for_var_name
flag_output_extra_key = False
if flag_not_adding_primary_key:
query = context.query
query = textwrap.indent(query, self.indent_block)
query = QueryStep.template_full_with_distinct.format(output_var=' '.join([u.var for u in output_units]), query=query)
else:
tbl_names = [u.grnd.get_tbl_name() for u in output_units if (u.grnd is not None)]
tbl_name = tbl_names[0]
key_grnd = GroundingKey.make_column_grounding(tbl_name, schema.schema.primary_keys[tbl_name])
context.add_column(key_grnd, schema, get_var_name, [u.grnd for u in output_units if (u.grnd is not None)])
group_var = context.var_for_grounding[key_grnd]
query = context.query
query = textwrap.indent(query, self.indent_block)
if inline_query:
to_output = ' '.join(([u.var for u in output_units] + [group_var]))
flag_output_extra_key = True
output_col_for_var_name[group_var] = OutputColumnId.from_grounding(key_grnd, schema.schema)
else:
to_output = (f"DISTINCT {' '.join([u.var for u in output_units])}" if flag_output_var_with_distinct else ' '.join([u.var for u in output_units]))
query = QueryStep.template_full.format(output_var=to_output, query=query)
if inline_query:
query = self.build_inline_query_from_full(query)
context_new = LocalContext()
context_new.query = query
context_new.output_col_for_var_name = output_col_for_var_name
context_new.output_units_for_qdmr_index[qdmr_index] = output_units
for unit in output_units:
if (unit.var in context.var_with_distinct):
context_new.var_with_distinct[unit.var] = context.var_with_distinct[unit.var]
if (unit.grnd is not None):
context_new.add_and_fix_grounding(target_grounding=unit.grnd, output_var=unit.var, schema=schema)
if flag_output_extra_key:
context_new.add_and_fix_grounding(target_grounding=GroundingKey.make_table_grounding(tbl_name), output_var=group_var, schema=schema)
return context_new |
def hess(fcn: Callable[(..., torch.Tensor)], params: Sequence[Any], idxs: Union[(None, int, Sequence[int])]=None) -> Union[(LinearOperator, List)]:
idxs_list = _setup_idxs(idxs, params)
pfcn = get_pure_function(fcn)
res = []
def gen_pfcn2(idx):
_sibling(pfcn)
def pfcn2(*params):
with torch.enable_grad():
z = pfcn(*params)
(grady,) = torch.autograd.grad(z, (params[idx],), retain_graph=True, create_graph=torch.is_grad_enabled())
return grady
return pfcn2
for idx in idxs_list:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
hs = _Jac(gen_pfcn2(idx), params, idx, is_hermitian=True)
res.append(hs)
if isinstance(idxs, int):
return res[0]
return res |
def suggestDType(x):
if (isinstance(x, list) or isinstance(x, tuple)):
if (len(x) == 0):
raise Exception('can not determine dtype for empty list')
x = x[0]
if hasattr(x, 'dtype'):
return x.dtype
elif isinstance(x, float):
return float
elif isinstance(x, int):
return int
else:
return object |
class ClsAgnosticPredictHead(nn.Module):
def __init__(self, num_class, num_heading_bin, num_proposal, seed_feat_dim=256, objectness=True, heading=False, compute_sem_scores=True):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_proposal = num_proposal
self.seed_feat_dim = seed_feat_dim
self.objectness = objectness
self.heading = heading
self.compute_sem_scores = compute_sem_scores
if objectness:
self.objectness_scores_head = ThreeLayerMLP(seed_feat_dim, 1)
self.center_residual_head = ThreeLayerMLP(seed_feat_dim, 3)
if heading:
self.heading_class_head = nn.Conv1d(seed_feat_dim, num_heading_bin, 1)
self.heading_residual_head = nn.Conv1d(seed_feat_dim, num_heading_bin, 1)
self.size_pred_head = ThreeLayerMLP(seed_feat_dim, 3)
if compute_sem_scores:
self.sem_cls_scores_head = ThreeLayerMLP(seed_feat_dim, self.num_class)
def forward(self, features, base_xyz, end_points, prefix=''):
batch_size = features.shape[0]
num_proposal = features.shape[(- 1)]
net = features
if self.objectness:
objectness_scores = self.objectness_scores_head(net).transpose(2, 1)
end_points[f'{prefix}objectness_scores'] = objectness_scores.squeeze((- 1))
center_residual = self.center_residual_head(net).transpose(2, 1)
center = (base_xyz + center_residual)
if self.heading:
heading_scores = self.heading_class_head(net).transpose(2, 1)
heading_residuals_normalized = self.heading_residual_head(net).transpose(2, 1)
heading_residuals = (heading_residuals_normalized * (np.pi / self.num_heading_bin))
end_points[f'{prefix}heading_scores'] = heading_scores
end_points[f'{prefix}heading_residuals_normalized'] = heading_residuals_normalized
end_points[f'{prefix}heading_residuals'] = heading_residuals
pred_size = self.size_pred_head(net).transpose(2, 1).view([batch_size, num_proposal, 3])
if self.compute_sem_scores:
sem_cls_scores = self.sem_cls_scores_head(features).transpose(2, 1)
end_points[f'{prefix}base_xyz'] = base_xyz
end_points[f'{prefix}center'] = center
end_points[f'{prefix}pred_size'] = pred_size
if self.compute_sem_scores:
end_points[f'{prefix}sem_cls_scores'] = sem_cls_scores
return (center, pred_size) |
def read_item_index_to_entity_id_file():
file = 'data/item_index2entity_id_rehashed.txt'
i = 0
for line in open(file, encoding='utf-8').readlines():
item_index = line.strip().split('\t')[0]
satori_id = line.strip().split('\t')[1]
item_index_old2new[item_index] = i
entity_id2index[satori_id] = i
i += 1 |
(safer.closer)
class TestCloser(unittest.TestCase):
def test_callable_closer(self, safer_closer):
results = []
with safer_closer(results.append) as fp:
fp.write('one')
fp.write('two')
assert (results == [])
assert (results == ['onetwo'])
def test_callable_closer2(self, safer_closer):
class CB():
def __call__(self, item):
results.append(item)
def close(self, failed):
results.append(('close', failed))
results = []
with safer_closer(CB()) as fp:
fp.write('one')
fp.write('two')
assert (results == [])
assert (results == ['onetwo', ('close', False)])
def test_callable_closer3(self, safer_closer):
class CB():
def __call__(self, item):
results.append(item)
def close(self, failed):
raise ValueError('closer3')
results = []
fp = safer_closer(CB())
fp.__enter__()
fp.write('one')
fp.write('two')
with self.assertRaises(ValueError) as e:
fp.__exit__(None, None, None)
assert (e.exception.args == ('closer3',))
assert (results == ['onetwo']) |
def get_cached_module_file(pretrained_model_name_or_path: Union[(str, os.PathLike)], module_file: str, cache_dir: Optional[Union[(str, os.PathLike)]]=None, force_download: bool=False, resume_download: bool=False, proxies: Optional[Dict[(str, str)]]=None, use_auth_token: Optional[Union[(bool, str)]]=None, revision: Optional[str]=None, local_files_only: bool=False):
if (is_offline_mode() and (not local_files_only)):
logger.info('Offline mode: forcing local_files_only=True')
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
submodule = 'local'
else:
submodule = pretrained_model_name_or_path.replace('/', os.path.sep)
try:
resolved_module_file = cached_file(pretrained_model_name_or_path, module_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token)
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.')
raise
modules_needed = check_imports(resolved_module_file)
full_submodule = ((TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep) + submodule)
create_dynamic_module(full_submodule)
submodule_path = (Path(HF_MODULES_CACHE) / full_submodule)
if (submodule == 'local'):
shutil.copy(resolved_module_file, (submodule_path / module_file))
for module_needed in modules_needed:
module_needed = f'{module_needed}.py'
shutil.copy(os.path.join(pretrained_model_name_or_path, module_needed), (submodule_path / module_needed))
else:
if isinstance(use_auth_token, str):
token = use_auth_token
elif (use_auth_token is True):
token = HfFolder.get_token()
else:
token = None
commit_hash = model_info(pretrained_model_name_or_path, revision=revision, token=token).sha
submodule_path = (submodule_path / commit_hash)
full_submodule = ((full_submodule + os.path.sep) + commit_hash)
create_dynamic_module(full_submodule)
if (not (submodule_path / module_file).exists()):
shutil.copy(resolved_module_file, (submodule_path / module_file))
for module_needed in modules_needed:
if (not (submodule_path / module_needed).exists()):
get_cached_module_file(pretrained_model_name_or_path, f'{module_needed}.py', cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only)
return os.path.join(full_submodule, module_file) |
def generate_beacons(args):
default_cfg_path = '../data/configs/default.cfg'
wad_ids = util.get_sorted_wad_ids(args.wad_dir)
for (idx, wad_id) in enumerate(wad_ids):
start = time.time()
nodes = {}
edges = {}
for i in range(args.iters):
explore_map_random_policy(default_cfg_path, args.wad_dir, wad_id, nodes=nodes, edges=edges)
print('{} nodes accumulated...'.format(len(nodes)))
filter_graph(nodes, edges)
print('{} nodes after filtering...'.format(len(nodes)))
with open('./{}.pkl'.format(wad_id), 'wb') as handle:
pkl.dump(nodes, handle, protocol=pkl.HIGHEST_PROTOCOL)
end = time.time()
elapsed_time = (end - start)
print('Finished exploring map {} for {} iterations in {}s'.format(idx, args.iters, elapsed_time)) |
def special_keys_init():
for (key, val) in tuple(special_keys.items()):
special_keys[('a-' + key)] = (ALT_KEY, val)
for char in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_!{}':
special_keys[('a-' + char)] = (ALT_KEY, ord(char))
for char in 'abcdefghijklmnopqrstuvwxyz_':
special_keys[('c-' + char)] = (ord(char) - 96)
special_keys['c-space'] = 0
for n in range(64):
special_keys[('f' + str(n))] = (curses.KEY_F0 + n) |
def verify_post_install(pipx_exit_code: int, captured_outerr, caplog, package_name: str, test_error_fh: io.StringIO, using_clear_path: bool, deps: bool=False) -> Tuple[(bool, Optional[bool], Optional[Path])]:
pip_error_file = None
caplog_problem = False
install_success = (f'installed package {package_name}' in captured_outerr.out)
for record in caplog.records:
if (('' in record.message) or ('WARNING' in record.message)):
if (using_clear_path or ('was already on your PATH' not in record.message)):
caplog_problem = True
print('verify_install: WARNING IN CAPLOG:', file=test_error_fh)
print(record.message, file=test_error_fh)
if ('Fatal error from pip prevented installation' in record.message):
pip_error_file_re = re.search('pip output in file:\\s+(\\S.+)$', record.message)
if pip_error_file_re:
pip_error_file = Path(pip_error_file_re.group(1))
if (install_success and (PKG[package_name].get('apps', None) is not None)):
app_success = verify_installed_resources('app', captured_outerr, package_name, test_error_fh, deps=deps)
else:
app_success = True
if (install_success and ((PKG[package_name].get('man_pages', None) is not None) or (PKG[package_name].get('man_pages_of_dependencies', None) is not None))):
man_success = verify_installed_resources('man', captured_outerr, package_name, test_error_fh, deps=deps)
else:
man_success = True
pip_pass = (not ((pipx_exit_code != 0) and (f'Error installing {package_name}' in captured_outerr.err)))
pipx_pass: Optional[bool]
if pip_pass:
pipx_pass = (install_success and (not caplog_problem) and app_success and man_success)
else:
pipx_pass = None
return (pip_pass, pipx_pass, pip_error_file) |
def sql_pred_parse(pred):
pred = (' * FROM' + pred)
if (pred == ' * FROM WHERE '):
return {}
pred_slot_values = []
parsed = sqlparse.parse(pred)
if (not parsed):
return {}
stmt = parsed[0]
sql_toks = pred.split()
operators = [' = ', ' LIKE ', ' < ', ' > ', ' >= ', ' <= ']
if ('AS' in pred):
as_indices = [i for (i, x) in enumerate(sql_toks) if (x == 'AS')]
table_name_map_dict = {}
for indice in as_indices:
table_name_map_dict[sql_toks[(indice + 1)].replace(',', '')] = sql_toks[(indice - 1)]
slot_values_str = str(stmt.tokens[(- 1)]).replace('_', ' ').replace("'", '').replace('WHERE ', '')
for operator in operators:
slot_values_str = slot_values_str.replace(operator, '-')
slot_values = slot_values_str.split(' AND ')
for sv in slot_values:
for t_ in table_name_map_dict.keys():
sv = sv.replace((t_ + '.'), (table_name_map_dict[t_] + '-'))
pred_slot_values.append(sv)
else:
table_name = sql_toks[(sql_toks.index('FROM') + 1)]
slot_values_str = str(stmt.tokens[(- 1)]).replace('_', ' ').replace("'", '').replace('WHERE ', '')
for operator in operators:
slot_values_str = slot_values_str.replace(operator, '-')
slot_values = slot_values_str.split(' AND ')
pred_slot_values.extend([((table_name + '-') + sv) for sv in slot_values if (slot_values != [''])])
pred_slot_values = {'-'.join(sv_pair.split('-')[:(- 1)]): sv_pair.split('-')[(- 1)] for sv_pair in pred_slot_values}
pred_slot_values = {slot.replace('_', ' '): value for (slot, value) in pred_slot_values.items()}
return pred_slot_values |
def add_all_source_types(command_tester_factory: CommandTesterFactory, poetry_with_source: Poetry, source_primary: Source, source_default: Source, source_secondary: Source, source_supplemental: Source, source_explicit: Source) -> None:
add = command_tester_factory('source add', poetry=poetry_with_source)
for source in [source_primary, source_default, source_secondary, source_supplemental, source_explicit]:
add.execute(f'{source.name} {source.url} --priority={source.name}') |
class OneLayerBRNN(nn.Module):
def __init__(self, input_size, hidden_size, prefix='stack_rnn', opt={}, dropout=None):
super(OneLayerBRNN, self).__init__()
self.opt = opt
self.prefix = prefix
self.cell_type = self.opt.get('{}_cell'.format(self.prefix), 'lstm')
self.emb_dim = self.opt.get('{}_embd_dim'.format(self.prefix), 0)
self.maxout_on = self.opt.get('{}_maxout_on'.format(self.prefix), False)
self.weight_norm_on = self.opt.get('{}_weight_norm_on'.format(self.prefix), False)
self.dropout = dropout
self.output_size = (hidden_size if self.maxout_on else (hidden_size * 2))
self.hidden_size = hidden_size
self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, num_layers=1, bidirectional=True)
def forward(self, x, x_mask):
x = x.transpose(0, 1)
size = list(x.size())
(rnn_output, h) = self.rnn(x)
if self.maxout_on:
rnn_output = rnn_output.view(size[0], size[1], self.hidden_size, 2).max((- 1))[0]
hiddens = rnn_output.transpose(0, 1)
return hiddens |
class Visualizer():
def __init__(self, opt):
self.opt = opt
self.tf_log = opt.tf_log
self.use_html = (opt.isTrain and (not opt.no_html))
self.win_size = opt.display_winsize
self.name = opt.name
if self.tf_log:
import tensorflow as tf
self.tf = tf
self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs')
self.writer = tf.summary.FileWriter(self.log_dir)
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print(('create web directory %s...' % self.web_dir))
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, 'a') as log_file:
now = time.strftime('%c')
log_file.write((' Training Loss (%s) \n' % now))
def display_current_results(self, visuals, epoch, step):
if self.tf_log:
img_summaries = []
for (label, image_numpy) in visuals.items():
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(image_numpy).save(s, format='jpeg')
img_sum = self.tf.Summary.Image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1])
img_summaries.append(self.tf.Summary.Value(tag=label, image=img_sum))
summary = self.tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
if self.use_html:
for (label, image_numpy) in visuals.items():
if isinstance(image_numpy, list):
for i in range(len(image_numpy)):
img_path = os.path.join(self.img_dir, ('epoch%.3d_%s_%d.jpg' % (epoch, label, i)))
util.save_image(image_numpy[i], img_path)
else:
img_path = os.path.join(self.img_dir, ('epoch%.3d_%s.jpg' % (epoch, label)))
util.save_image(image_numpy, img_path)
webpage = html.HTML(self.web_dir, ('Experiment name = %s' % self.name), reflesh=1)
for n in range(epoch, 0, (- 1)):
webpage.add_header(('epoch [%d]' % n))
ims = []
txts = []
links = []
for (label, image_numpy) in visuals.items():
if isinstance(image_numpy, list):
for i in range(len(image_numpy)):
img_path = ('epoch%.3d_%s_%d.jpg' % (n, label, i))
ims.append(img_path)
txts.append((label + str(i)))
links.append(img_path)
else:
img_path = ('epoch%.3d_%s.jpg' % (n, label))
ims.append(img_path)
txts.append(label)
links.append(img_path)
if (len(ims) < 6):
webpage.add_images(ims, txts, links, width=self.win_size)
else:
num = int(round((len(ims) / 2.0)))
webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size)
webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size)
webpage.save()
def plot_current_errors(self, errors, step):
if self.tf_log:
for (tag, value) in errors.items():
summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def print_current_errors(self, epoch, i, errors, t):
message = ('(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t))
for (k, v) in errors.items():
if (v != 0):
message += ('%s: %.3f ' % (k, v))
print(message)
with open(self.log_name, 'a') as log_file:
log_file.write(('%s\n' % message))
def print_current_errors_new(self, epoch, i, errors, loss_names, t):
message = ('(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t))
for k in loss_names:
v = errors[k]
if (v != 0):
message += ('%s: %.3f ' % (k, v))
print(message)
with open(self.log_name, 'a') as log_file:
log_file.write(('%s\n' % message))
def save_los(self, image_dir, visuals, image_path, webpage=None):
dirname = os.path.basename(os.path.dirname(image_path[0]))
image_dir = os.path.join(image_dir, dirname)
util.mkdir(image_dir)
name = os.path.basename(image_path[0])
name = os.path.splitext(name)[0]
if (webpage is not None):
webpage.add_header(name)
(ims, txts, links) = ([], [], [])
for (label, image_numpy) in visuals.items():
save_ext = 'png'
image_name = ('%s.%s' % (name, save_ext))
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
if (webpage is not None):
ims.append(image_name)
txts.append(label)
links.append(image_name)
if (webpage is not None):
webpage.add_images(ims, txts, links, width=self.win_size)
def save_images(self, image_dir, visuals, image_path, webpage=None, no_=False):
dirname = os.path.basename(os.path.dirname(image_path[0]))
image_dir = os.path.join(image_dir, dirname)
util.mkdir(image_dir)
name = os.path.basename(image_path[0])
name = os.path.splitext(name)[0]
if (webpage is not None):
webpage.add_header(name)
(ims, txts, links) = ([], [], [])
for (label, image_numpy) in visuals.items():
save_ext = ('png' if (('real_A' in label) and (self.opt.label_nc != 0)) else 'jpg')
if no_:
image_name = ('%s.%s' % (name, save_ext))
else:
image_name = ('%s_%s.%s' % (label, name, save_ext))
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
if (webpage is not None):
ims.append(image_name)
txts.append(label)
links.append(image_name)
if (webpage is not None):
webpage.add_images(ims, txts, links, width=self.win_size)
def vis_print(self, message):
print(message)
with open(self.log_name, 'a') as log_file:
log_file.write(('%s\n' % message)) |
class ApplyKmeans(object):
def __init__(self, km_path):
self.km_model = joblib.load(km_path)
self.C_np = self.km_model.cluster_centers_.transpose()
self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True)
self.C = torch.from_numpy(self.C_np)
self.Cnorm = torch.from_numpy(self.Cnorm_np)
if torch.cuda.is_available():
self.C = self.C.cuda()
self.Cnorm = self.Cnorm.cuda()
def __call__(self, x):
if isinstance(x, torch.Tensor):
dist = ((x.pow(2).sum(1, keepdim=True) - (2 * torch.matmul(x, self.C))) + self.Cnorm)
return dist.argmin(dim=1).cpu().numpy()
else:
dist = (((x ** 2).sum(1, keepdims=True) - (2 * np.matmul(x, self.C_np))) + self.Cnorm_np)
return np.argmin(dist, axis=1) |
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for (i, sample) in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
return fig |
def get_yt_ids(req: Requirement, highest_diff: int) -> Iterable[tuple[(str, int, int)]]:
if (not isinstance(req, RequirementArrayBase)):
return
if (((diff := get_difficulty(req)) is not None) and (diff > highest_diff)):
highest_diff = diff
if (req.comment is not None):
if ('youtu' in req.comment):
for word in req.comment.split(' '):
if ('youtu' not in word):
continue
video_id = word.split('/')[(- 1)].split('watch?v=')[(- 1)].split(' ')[0]
start_time = 0
if ('?t=' in word):
start_time = int(video_id.split('?t=')[(- 1)])
video_id = video_id.split('?t=')[0]
(yield (video_id, start_time, highest_diff))
for i in req.items:
(yield from get_yt_ids(i, highest_diff)) |
def test_cancel_chunked_upload():
chunk_cleanup_queue = FakeQueue()
args = dict(base_args)
args['context'] = StorageContext('nyc', chunk_cleanup_queue, None, None)
swift = FakeSwiftStorage(**args)
(uuid, metadata) = swift.initiate_chunked_upload()
chunks = [b'this', b'is', b'some', b'chunked', b'data', b'']
offset = 0
for chunk in chunks:
(bytes_written, metadata, error) = swift.stream_upload_chunk(uuid, offset, len(chunk), io.BytesIO(chunk), metadata)
assert (error is None)
assert (len(chunk) == bytes_written)
offset += len(chunk)
swift.cancel_chunked_upload(uuid, metadata)
found = chunk_cleanup_queue.get()
assert (found is not None) |
class GPSRecord(object):
def __init__(self, al, pv, st, tm):
self._al = al
self._pv = pv
self._st = st
self._tm = tm
def time(self):
if (not (self._st.tracking_status_code == 0)):
raise NoGPSTime()
if (not self._tm.gps_utc_offset_flag):
raise GPSTimeNotUTC()
tm = self._tm
return util.to_time_float(calendar.timegm((tm.year, tm.month, tm.day, tm.hours, tm.minutes, tm.seconds)))
def latitude(self):
return self._pv.latitude
def longitude(self):
return self._pv.longitude
def altitude(self):
return self._al.altitude
def __str__(self):
try:
stime = util.time_to_str(self.time)
except GPSError:
stime = '?'
return ('%s %s %s %s' % (stime, self.latitude, self.longitude, self.altitude)) |
class LeftOuterJoin(operator):
def __init__(self, on, hints):
self.on = on
self.hints = hints
def used_vars(self):
from pythonql.Ast import get_all_vars, get_ast
return get_all_vars(self.on)
def execute(self, table, prior_locs, prior_globs, left_child, right_child):
from pythonql.Executor import processLeftOuterJoin
return processLeftOuterJoin(self, table, prior_locs, prior_globs, left_child, right_child)
def __repr__(self):
return (((('LeftOuterJoin(' + repr(self.on)) + ',') + repr(self.hints)) + ')') |
def test_bloq_as_cirq_gate_multi_dimensional_signature():
bloq = SwapWithZero(2, 3, 4)
cirq_quregs = get_named_qubits(bloq.signature.lefts())
op = BloqAsCirqGate(bloq).on_registers(**cirq_quregs)
cirq.testing.assert_has_diagram(cirq.Circuit(op), '\nselection0: SwapWithZero\n \nselection1: selection\n \ntargets[0][0]: targets\n \ntargets[0][1]: targets\n \ntargets[0][2]: targets\n \ntargets[1][0]: targets\n \ntargets[1][1]: targets\n \ntargets[1][2]: targets\n \ntargets[2][0]: targets\n \ntargets[2][1]: targets\n \ntargets[2][2]: targets\n \ntargets[3][0]: targets\n \ntargets[3][1]: targets\n \ntargets[3][2]: targets\n')
cbloq = bloq.decompose_bloq()
cirq.testing.assert_has_diagram(cbloq.to_cirq_circuit(**cirq_quregs)[0], '\nselection0: (approx)\n \nselection1: (approx)(approx)\n \ntargets[0][0]: (x)(x)\n \ntargets[0][1]: (x)(x)\n \ntargets[0][2]: (x)(x)\n \ntargets[1][0]: (y)\n \ntargets[1][1]: (y)\n \ntargets[1][2]: (y)\n \ntargets[2][0]: (x)(y)\n \ntargets[2][1]: (x)(y)\n \ntargets[2][2]: (x)(y)\n \ntargets[3][0]: (y)\n \ntargets[3][1]: (y)\n \ntargets[3][2]: (y)\n') |
_equilibrium_solver('PDD', reason_to_exclude=reason_to_exclude)
def equilibrium_pdd(junction: Junction, T: float=298.0, output_equilibrium: int=1, meshpoints: int=(- 400), **options) -> None:
output = process_structure(junction=junction, T=T, meshpoints=meshpoints, **options)
dd.gen = 0
print('Solving equilibrium...')
dd.equilibrium(output_equilibrium)
print('...done!\n')
output['Bandstructure'] = DumpBandStructure()
junction.equilibrium_data = State(**output) |
('pickle')
def test_frame_wise_iteration():
(X, Y) = _get_small_datasets(padded=False)
lengths = np.array([len(x) for x in X], dtype=int)
num_utterances = len(lengths)
X = MemoryCacheFramewiseDataset(X, lengths, cache_size=len(X))
Y = MemoryCacheFramewiseDataset(Y, lengths, cache_size=len(Y))
assert (np.sum(lengths) == len(X))
assert (len(X) == len(Y))
Dx = X[0].shape[(- 1)]
Dy = Y[0].shape[(- 1)]
for (_, (x, y)) in enumerate(zip(X, Y)):
assert (x.shape[(- 1)] == Dx)
assert (y.shape[(- 1)] == Dy)
assert (len(X.cached_utterances) == num_utterances)
assert (len(Y.cached_utterances) == num_utterances)
for (_, (_, _)) in enumerate(zip(X[:2], Y[:2])):
pass |
class TagListEditor(qltk.Window):
_WIDTH = 600
_HEIGHT = 300
def __init__(self, title, values=None):
super().__init__()
self.use_header_bar()
self.set_border_width(12)
self.set_title(title)
self.set_default_size(self._WIDTH, self._HEIGHT)
vbox = Gtk.VBox(spacing=12)
hbox = Gtk.HBox(spacing=12)
self.model = Gtk.ListStore(str)
self.__fill_values((values or []))
def on_row_activated(view, path, column):
self._renderer.set_property('editable', True)
view.set_cursor(path, view.get_columns()[0], start_editing=True)
view = self.view = HintedTreeView(model=self.model)
view.set_fixed_height_mode(True)
view.set_headers_visible(False)
view.connect('row-activated', on_row_activated)
sw = Gtk.ScrolledWindow()
sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.add(view)
sw.set_size_request((- 1), max(sw.size_request().height, 100))
hbox.pack_start(sw, True, True, 0)
self.__setup_column(view)
menu = Gtk.Menu()
remove_item = MenuItem(_('_Remove'), Icons.LIST_REMOVE)
menu.append(remove_item)
menu.show_all()
view.connect('popup-menu', self.__popup, menu)
connect_obj(remove_item, 'activate', self.__remove, view)
vbbox = Gtk.VButtonBox()
vbbox.set_layout(Gtk.ButtonBoxStyle.START)
vbbox.set_spacing(6)
add = Button(_('_Add...'), Icons.LIST_ADD)
add.connect('clicked', self.__add)
vbbox.pack_start(add, False, True, 0)
remove = Button(_('_Remove'), Icons.LIST_REMOVE)
remove.connect('clicked', self.__remove)
vbbox.pack_start(remove, False, True, 0)
edit = Button(_('_Edit'), Icons.LIST_EDIT)
edit.connect('clicked', self.__edit)
vbbox.pack_start(edit, False, True, 0)
hbox.pack_start(vbbox, False, True, 0)
vbox.pack_start(hbox, True, True, 0)
bbox = Gtk.HButtonBox()
self.remove_but = Button(_('_Remove'), Icons.LIST_REMOVE)
self.remove_but.set_sensitive(False)
close = Button(_('_Close'), Icons.WINDOW_CLOSE)
connect_obj(close, 'clicked', qltk.Window.destroy, self)
bbox.set_layout(Gtk.ButtonBoxStyle.END)
if (not self.has_close_button()):
bbox.pack_start(close, True, True, 0)
vbox.pack_start(bbox, False, True, 0)
self.add(vbox)
self.get_child().show_all()
def __start_editing(self, _render, editable, path):
editable.set_text(self.model[path][0])
def __edited(self, _render, path, new_name):
self.model[path][0] = new_name
self.model.row_changed(path, self.model.get_iter(path))
def __setup_column(self, view):
def tag_cdf(column, cell, model, iter, data):
row = model[iter]
if row:
cell.set_property('text', row[0])
def desc_cdf(column, cell, model, iter, data):
row = model[iter]
if row:
cell.set_property('markup', util.italic(util.tag(row[0])))
def __create_cell_renderer():
r = Gtk.CellRendererText()
r.connect('editing-started', self.__start_editing)
r.connect('edited', self.__edited)
return r
self._renderer = renderer = __create_cell_renderer()
column = Gtk.TreeViewColumn(_('Tag expression'), renderer)
column.set_cell_data_func(renderer, tag_cdf)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
column.set_expand(True)
view.append_column(column)
renderer = Gtk.CellRendererText()
renderer.set_property('ellipsize', Pango.EllipsizeMode.END)
renderer.set_property('sensitive', False)
column = Gtk.TreeViewColumn(_('Description'), renderer)
column.set_cell_data_func(renderer, desc_cdf)
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
column.set_expand(True)
view.append_column(column)
view.set_headers_visible(True)
def __fill_values(self, data: Iterable[str]):
for s in data:
self.model.append(row=[s])
def tags(self):
return [row[0] for row in self.model if row]
def __remove(self, *args):
self.view.remove_selection()
def __add(self, *args):
tooltip = _('Tag expression e.g. people:real or ~album~year')
dialog = GetStringDialog(self, _('Enter new tag'), '', button_icon=None, tooltip=tooltip)
new = dialog.run()
if new:
self.model.append(row=[new])
def __edit(self, *args):
(path, col) = self.view.get_cursor()
tooltip = _('Tag expression e.g. people:real or ~album~year')
dialog = GetStringDialog(self, _('Edit tag expression'), '', button_icon=None, tooltip=tooltip)
edited = dialog.run(text=self.model[path][0])
if edited:
self.model[path][0] = edited
def __popup(self, view, menu):
return view.popup_menu(menu, 0, Gtk.get_current_event_time()).show() |
def get_logger(file_path):
dir = os.path.dirname(file_path)
if (not os.path.exists(dir)):
os.makedirs(dir)
logger = logging.getLogger()
log_format = '%(asctime)s | %(message)s'
formatter = logging.Formatter(log_format, datefmt='%m/%d %H:%M:%S')
file_handler = logging.FileHandler(file_path)
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(logging.INFO)
return logger |
class TestAssertEqual(TestCase):
def test_you(self):
self.assertRegexpMatches(abc, 'xxx')
def test_me(self):
self.assertRegexpMatches(123, (xxx + y))
def test_everybody(self):
self.assertRegexpMatches('abc', 'def')
def test_message(self):
self.assertRegexpMatches((123 + z), (xxx + y), msg='This is wrong!')
self.assertRegexpMatches(123, (xxx + y), 'This is wrong!') |
def check_plugin_project_files(app_folder: Path, plugin_name: str, plugin_description: str, html_file: str='index.html', config_file: str=config['project_config_filename'], python_file: str='main.py'):
html_file_path = (app_folder / html_file)
assert html_file_path.exists(), f'{html_file} not found! :('
assert (app_folder / config_file).exists(), f'{config_file} not found! :('
assert (app_folder / python_file).exists(), f'{python_file} not found! :('
with html_file_path.open() as fp:
contents = fp.read()
contents = dedent(contents)
assert (f' <h1>{plugin_name}</h1>' in contents)
assert dedent(f''' <div>
<h2> Description </h2>
<p>{plugin_description}</p>
</div>''')
assert (f'<py-script src="./{python_file}">' in contents)
assert (f'<py-config src="./{config_file}">' in contents) |
class KCrossAttnDownBlock2D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, temb_channels: int, cross_attention_dim: int, dropout: float=0.0, num_layers: int=4, resnet_group_size: int=32, add_downsample=True, attention_head_dim: int=64, add_self_attention: bool=False, resnet_eps: float=1e-05, resnet_act_fn: str='gelu'):
super().__init__()
resnets = []
attentions = []
self.has_cross_attention = True
for i in range(num_layers):
in_channels = (in_channels if (i == 0) else out_channels)
groups = (in_channels // resnet_group_size)
groups_out = (out_channels // resnet_group_size)
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, dropout=dropout, temb_channels=temb_channels, groups=groups, groups_out=groups_out, eps=resnet_eps, non_linearity=resnet_act_fn, time_embedding_norm='ada_group', conv_shortcut_bias=False))
attentions.append(KAttentionBlock(out_channels, (out_channels // attention_head_dim), attention_head_dim, cross_attention_dim=cross_attention_dim, temb_channels=temb_channels, attention_bias=True, add_self_attention=add_self_attention, cross_attention_norm='layer_norm', group_size=resnet_group_size))
self.resnets = nn.ModuleList(resnets)
self.attentions = nn.ModuleList(attentions)
if add_downsample:
self.downsamplers = nn.ModuleList([KDownsample2D()])
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, cross_attention_kwargs: Optional[Dict[(str, Any)]]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None):
output_states = ()
for (resnet, attn) in zip(self.resnets, self.attentions):
if (self.training and self.gradient_checkpointing):
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if (return_dict is not None):
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[(str, Any)] = ({'use_reentrant': False} if is_torch_version('>=', '1.11.0') else {})
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, emb=temb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs, encoder_attention_mask=encoder_attention_mask)
if (self.downsamplers is None):
output_states += (None,)
else:
output_states += (hidden_states,)
if (self.downsamplers is not None):
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
return (hidden_states, output_states) |
class RunningMeter(object):
def __init__(self, decay):
self.decay = decay
def reset(self):
self.val = 0
self.last = 0
def record(self, val, n=1):
self.last = val
decay = (1 - ((1 - self.decay) ** n))
self.val = (((1 - decay) * self.val) + (decay * val))
def get_val(self):
return self.val
def get_last(self):
return self.last |
def test_handle_block_lower_block_number():
setup = make_target_state(block_number=10)
new_block = Block(block_number=(setup.block_number - 1), gas_limit=1, block_hash=factories.make_transaction_hash())
iteration = target.state_transition(target_state=setup.new_state, state_change=new_block, channel_state=setup.channel, pseudo_random_generator=setup.pseudo_random_generator, block_number=new_block.block_number)
assert iteration.new_state
assert (not iteration.events) |
def seperate_end_word_punctuations(data):
if verbose:
print(('#' * 10), 'Step - End word punctuations:')
temp_vocab = list(set([c for line in data for c in line.split()]))
temp_vocab = [k for k in temp_vocab if (_check_replace(k) and (not k[(len(k) - 1)].isalnum()))]
temp_dict = {}
for word in temp_vocab:
new_word = word
for i in range(len(word), 0, (- 1)):
if word[(i - 1)].isalnum():
new_word = ((word[:i] + ' ') + word[i:])
break
temp_dict[word] = new_word
temp_dict = {k: v for (k, v) in temp_dict.items() if (k != v)}
data = list(map((lambda x: ' '.join([_make_dict_cleaning(i, temp_dict) for i in x.split()])), data))
if verbose:
_print_dict(temp_dict)
return data |
def test_cylinder():
cylinder = Cylinder(10.0, 5.0, name='cylinder', color='blue', material='METAL')
assert (cylinder.name == 'cylinder')
assert (cylinder.__str__() == 'Cylinder cylinder color:blue material:METAL length:10.0 radius:5.0')
assert (cylinder.__repr__() == 'Cylinder')
assert (cylinder.length == 10.0)
assert (cylinder.radius == 5.0)
assert (cylinder.color == 'blue')
if (p3js is not None):
mesh = cylinder._p3js_mesh()
expected_mesh = p3js.Mesh(p3js.CylinderBufferGeometry(radiusTop=5.0, radiusBottom=5.0, height=10.0, radialSegments=100), p3js.MeshStandardMaterial(color='blue'), name='cylinder')
assert (repr(mesh) == repr(expected_mesh))
cylinder.name = 'cylinder1'
assert (cylinder.name == 'cylinder1')
cylinder.length = 14.0
assert (cylinder.length == 14.0)
cylinder.radius = 7.0
assert (cylinder.radius == 7.0)
cylinder.color = 'cyan'
assert (cylinder.color == 'cyan')
cylinder.material = 'FOIL'
assert (cylinder.material == 'FOIL')
assert (cylinder.generate_dict() == {'color': 'cyan', 'type': 'Cylinder', 'name': 'cylinder1', 'length': 14.0, 'radius': 7.0, 'material': 'FOIL'})
assert isinstance(cylinder, Shape)
cylinder_ = Cylinder(10.0, 5.0, color='blue')
assert (cylinder_.name == 'unnamed')
assert (cylinder_.__str__() == 'Cylinder unnamed color:blue material:default length:10.0 radius:5.0')
assert (cylinder_.__repr__() == 'Cylinder') |
(2, 'where', 'itemids')
def getVariations(itemids, groupIDs=None, where=None, eager=None):
for itemid in itemids:
if (not isinstance(itemid, int)):
raise TypeError('All passed item IDs must be integers')
if (len(itemids) == 0):
return []
itemfilter = or_(*((items_table.c.variationParentTypeID == itemid) for itemid in itemids))
filter = processWhere(itemfilter, where)
vars = get_gamedata_session().query(Item).options(*processEager(eager)).filter(filter).all()
if vars:
return vars
elif groupIDs:
itemfilter = or_(*((groups_table.c.groupID == groupID) for groupID in groupIDs))
filter = processWhere(itemfilter, where)
joinon = (items_table.c.groupID == groups_table.c.groupID)
vars = get_gamedata_session().query(Item).options(*processEager(eager)).join((groups_table, joinon)).filter(filter).all()
return vars |
class WindowRecord(SimpleBuilderApp):
def __init__(self, equipment_service, data_path=None, listSport=None, parent=None, date=None, title=None, distance=None, time=None, upositive=None, unegative=None, bpm=None, calories=None, comment=None, windowTitle=None, equipment=[]):
logging.debug('>>')
self.parent = parent
self.pytrainer_main = parent.pytrainer_main
self.uc = UC()
logging.debug('Using US system: %s', self.uc.us)
self.data_path = data_path
self.mode = 'newrecord'
self.id_record = ''
self.store = None
self.active_row = None
self.activity_data = []
SimpleBuilderApp.__init__(self, 'newrecord.ui')
self.conf_options = ['rcd_date', 'rcd_sport', 'rcd_distance', 'rcd_beats', 'rcd_comments', 'rcd_average', 'rcd_calories', 'rcd_title', 'rcd_gpxfile', 'rcd_upositive', 'rcd_unegative', 'rcd_maxbeats', 'rcd_pace', 'rcd_maxpace', 'rcd_maxvel']
self.listSport = {}
for sport in listSport:
self.listSport[sport.id] = sport.name
for i in self.listSport:
self.rcd_sport.insert_text(i, self.listSport[i])
self.rcd_sport.set_active(0)
if (windowTitle is not None):
self.newrecord.set_title(windowTitle)
if (date != None):
self.setDate(date)
if (title != None):
self.rcd_title.set_text(title)
if (distance != None):
self.rcd_distance.set_text(self.uc.distance(distance))
if (time != None):
self.setTime(time)
if ((distance != None) and (time != None)):
self.on_calcavs_clicked(None)
if (upositive != None):
self.rcd_upositive.set_text(self.uc.height(upositive))
if (unegative != None):
self.rcd_unegative.set_text(self.uc.height(unegative))
if (calories != None):
self.rcd_calories.set_text(calories)
self.label_rcd_distance.set_text((_('Distance') + (' (%s)' % self.uc.unit_distance)))
self.label_rcd_maxvel.set_text((_('Max') + (' (%s)' % self.uc.unit_speed)))
self.label_rcd_average.set_text((_('Average') + (' (%s)' % self.uc.unit_speed)))
self.label_rcd_maxpace.set_text((_('Max') + (' (%s)' % self.uc.unit_pace)))
self.label_rcd_pace.set_text((_('Pace') + (' (%s)' % self.uc.unit_pace)))
self.label_rcd_upositive.set_text((_('Ascent') + (' (%s)' % self.uc.unit_height)))
self.label_rcd_unegative.set_text((_('Descent') + (' (%s)' % self.uc.unit_height)))
self._init_equipment(equipment, equipment_service)
logging.debug('<<')
def _init_equipment(self, selected_equipment, equipment_service):
equipment = {}
active_equipment = equipment_service.get_active_equipment()
for item in active_equipment:
equipment[item] = False
if (len(active_equipment) == 0):
self.noActiveEquipmentMessageContainer.set_visible(True)
for item in selected_equipment:
equipment[item] = True
list_store = Gtk.ListStore(int, str, bool)
for item in equipment:
list_store.append((item.id, item.description, equipment[item]))
tree_view = self.treeviewRecordEquipment
cell_renderer = Gtk.CellRendererToggle()
cell_renderer.connect('toggled', self._equipment_selection_toggled)
tree_view.append_column(Gtk.TreeViewColumn('Selected', cell_renderer, active=2))
tree_view.append_column(Gtk.TreeViewColumn('Equipment Item', Gtk.CellRendererText(), text=1))
tree_view.set_model(list_store)
def _equipment_selection_toggled(self, widget, path):
list_store = self.treeviewRecordEquipment.get_model()
iter = list_store.get_iter(path)
value = list_store.get_value(iter, 2)
list_store.set(iter, 2, (not value))
def _get_selected_equipment_ids(self):
selected_ids = []
list_store = self.treeviewRecordEquipment.get_model()
iter = list_store.get_iter_first()
while (iter is not None):
id = list_store.get_value(iter, 0)
selected = list_store.get_value(iter, 2)
if selected:
selected_ids.append(id)
iter = list_store.iter_next(iter)
return selected_ids
def getActivityData(self):
return self.activity_data
def populateMultiWindow(self, activities):
logging.debug('>>')
self.mode = 'multiple_activities'
self.activity_data = []
self.store = self.build_tree_view()
for activity in activities:
iter = self.store.append()
self.store.set(iter, 0, activity[0], 1, activity[1], 2, activity[2], 3, activity[3], 4, activity[4], 5, activity[5])
details = {}
details['complete'] = False
details['rcd_distance'] = activity[2]
duration = activity[3]
if (duration.count(':') == 2):
(hours, mins, secs) = duration.split(':')
else:
hours = mins = secs = 0
details['activity_id'] = activity[0]
details['rcd_time'] = (float(hours), float(mins), float(secs))
if (activity[4] is not None):
details['rcd_sport'] = activity[4]
else:
details['rcd_sport'] = gtk_str(self.rcd_sport.get_active_text())
details['rcd_gpxfile'] = activity[5]
details['file_id'] = activity[6]
self.activity_data.append(details)
self.scrolledwindowEntries.show_all()
self.button25.hide()
self.button24.hide()
self.button10.hide()
self.button11.hide()
self.button43.hide()
self.rcd_gpxfile.set_sensitive(0)
while Gtk.events_pending():
Gtk.main_iteration()
self.treeviewEntries.set_cursor(0)
self.show_treeviewEntries_row(0)
logging.debug('<<')
def build_tree_view(self):
store = Gtk.ListStore(GObject.TYPE_STRING, GObject.TYPE_STRING, GObject.TYPE_STRING, GObject.TYPE_STRING, GObject.TYPE_STRING, GObject.TYPE_STRING)
column_names = ['id', _('Start Time'), _('Distance'), _('Duration'), _('Sport'), _('GPX File')]
for (column_index, column_name) in enumerate(column_names):
column = Gtk.TreeViewColumn(column_name, Gtk.CellRendererText(), text=column_index)
column.set_sort_column_id(column_index)
if (column_name == 'id'):
column.set_visible(False)
column.set_resizable(True)
self.treeviewEntries.append_column(column)
self.treeviewEntries.set_headers_clickable(True)
self.treeviewEntries.set_model(store)
return store
def on_accept_clicked(self, widget):
logging.debug('>>')
if (self.mode == 'multiple_activities'):
logging.debug('multiple_activities')
if (self.active_row is not None):
buffer = self.rcd_comments.get_buffer()
(start, end) = buffer.get_bounds()
comments = gtk_str(buffer.get_text(start, end, True))
self.activity_data[self.active_row]['rcd_comments'] = comments
self.activity_data[self.active_row]['rcd_maxpace'] = gtk_str(self.rcd_maxpace.get_text())
self.activity_data[self.active_row]['rcd_pace'] = gtk_str(self.rcd_pace.get_text())
self.activity_data[self.active_row]['rcd_upositive'] = gtk_str(self.rcd_upositive.get_text())
self.activity_data[self.active_row]['rcd_unegative'] = gtk_str(self.rcd_unegative.get_text())
self.activity_data[self.active_row]['rcd_maxbeats'] = gtk_str(self.rcd_maxbeats.get_text())
self.activity_data[self.active_row]['rcd_beats'] = gtk_str(self.rcd_beats.get_text())
self.activity_data[self.active_row]['rcd_calories'] = gtk_str(self.rcd_calories.get_text())
row = 0
for activity in self.activity_data:
index = self.activity_data.index(activity)
if (activity['complete'] is False):
logging.debug('Activity incomplete.. %s', activity['rcd_gpxfile'])
self.update_activity_data(row, activity['rcd_gpxfile'], activity['rcd_sport'])
activity['rcd_title'] = activity['rcd_title'].replace('"', "'")
laps = activity.pop('laps', ())
selected_equipment_ids = self._get_selected_equipment_ids()
self.activity_data[index]['db_id'] = self.parent.insertRecord(activity, laps, equipment=selected_equipment_ids)
row += 1
logging.debug('Processed %d rows of activity data', row)
else:
logging.debug('Single activity')
list_options = {}
trackSummary = {}
list_options['rcd_date'] = gtk_str(self.rcd_date.get_text())
list_options['rcd_sport'] = gtk_str(self.rcd_sport.get_active_text())
list_options['rcd_distance'] = self.uc.usr2sys_str('distance', gtk_str(self.rcd_distance.get_text()))
list_options['rcd_beats'] = gtk_str(self.rcd_beats.get_text())
list_options['rcd_average'] = self.uc.usr2sys_str('speed', gtk_str(self.rcd_average.get_text()))
list_options['rcd_calories'] = gtk_str(self.rcd_calories.get_text())
list_options['rcd_title'] = gtk_str(self.rcd_title.get_text())
list_options['rcd_gpxfile'] = gtk_str(self.rcd_gpxfile.get_text())
list_options['rcd_upositive'] = self.uc.usr2sys_str('height', gtk_str(self.rcd_upositive.get_text()))
list_options['rcd_unegative'] = self.uc.usr2sys_str('height', gtk_str(self.rcd_unegative.get_text()))
list_options['rcd_maxbeats'] = gtk_str(self.rcd_maxbeats.get_text())
list_options['rcd_pace'] = self.uc.usr2sys_str('pace', gtk_str(self.rcd_pace.get_text()).replace(':', '.'))
list_options['rcd_maxpace'] = self.uc.usr2sys_str('pace', gtk_str(self.rcd_maxpace.get_text()).replace(':', '.'))
list_options['rcd_maxvel'] = self.uc.usr2sys_str('speed', gtk_str(self.rcd_maxvel.get_text()))
list_options['rcd_time'] = [self.rcd_hour.get_value_as_int(), self.rcd_min.get_value_as_int(), self.rcd_second.get_value_as_int()]
buffer = self.rcd_comments.get_buffer()
(start, end) = buffer.get_bounds()
comment = gtk_str(buffer.get_text(start, end, True))
list_options['rcd_comments'] = comment
selected_equipment_ids = self._get_selected_equipment_ids()
record_time = gtk_str(self.rcd_starttime.get_text())
record_date = gtk_str(self.rcd_date.get_text())
localtz = getLocalTZ()
date = dateutil.parser.parse(((((record_date + ' ') + record_time) + ' ') + localtz))
local_date = str(date)
utc_date = date.astimezone(tzutc()).strftime('%Y-%m-%dT%H:%M:%SZ')
list_options['date_time_utc'] = utc_date
list_options['date_time_local'] = local_date
if (self.mode == 'newrecord'):
logging.debug('Track data: %s', list_options)
if (list_options['rcd_gpxfile'] != ''):
logging.info('Adding new activity based on GPX file')
self.parent.insertRecord(list_options, None, selected_equipment_ids)
else:
logging.info('Adding new activity based on provided data')
record_time = gtk_str(self.rcd_starttime.get_text())
record_date = gtk_str(self.rcd_date.get_text())
localtz = getLocalTZ()
date = dateutil.parser.parse(((((record_date + ' ') + record_time) + ' ') + localtz))
local_date = str(date)
utc_date = date.astimezone(tzutc()).strftime('%Y-%m-%dT%H:%M:%SZ')
list_options['date_time_utc'] = utc_date
list_options['date_time_local'] = local_date
try:
self.parent.insertRecord(list_options, equipment=selected_equipment_ids)
except ValueError:
msg = "Unable to add an activity record without any sports, please add a sport in preferences' <i>Sports</i> tab."
logging.error(msg)
warning_dialog(title=_('Error Adding Activity Record'), text=_(msg))
elif (self.mode == 'editrecord'):
self.parent.updateRecord(list_options, self.id_record, equipment=selected_equipment_ids)
logging.debug('<<')
self.close_window()
def on_cancel_clicked(self, widget):
self.close_window()
def close_window(self, widget=None):
self.newrecord.hide()
self.quit()
def on_calendar_clicked(self, widget):
calendardialog = WindowCalendar(self.data_path, self, date=gtk_str(self.rcd_date.get_text()))
calendardialog.run()
def setDate(self, date):
self.rcd_date.set_text(date)
def setTime(self, timeInSeconds):
time_in_hour = (int(timeInSeconds) / 3600.0)
hour = int(time_in_hour)
min = int(((time_in_hour - hour) * 60))
sec = ((((time_in_hour - hour) * 60) - min) * 60)
self.rcd_hour.set_value(hour)
self.rcd_min.set_value(min)
self.rcd_second.set_value(sec)
def setValue(self, var_name, value, format='%0.2f'):
var = getattr(self, var_name)
try:
valueString = (format % value)
var.set_text(valueString)
except Exception as e:
logging.debug('setValue: %s, %s, %s', var_name, value, e)
pass
def setValuesFromActivity(self, activity):
logging.debug('>>')
self.mode = 'editrecord'
if (activity is None):
logging.debug('activity is None')
logging.debug('<<')
return
self.id_record = activity.id
(h, m, s) = activity.time_tuple
self.rcd_hour.set_value(h)
self.rcd_min.set_value(m)
self.rcd_second.set_value(s)
self.rcd_date.set_text(str(activity.date))
if (activity.distance is not None):
self.rcd_distance.set_text(('%.2f' % self.uc.distance(activity.distance)))
else:
self.rcd_distance.set_text('')
self.rcd_average.set_text(('%.2f' % self.uc.speed(activity.average)))
self.rcd_calories.set_text(('%s' % activity.calories))
self.rcd_beats.set_text(('%s' % activity.beats))
self.rcd_upositive.set_text(('%.2f' % self.uc.height(activity.upositive)))
self.rcd_unegative.set_text(('%.2f' % self.uc.height(activity.unegative)))
self.rcd_maxvel.set_text(('%.2f' % self.uc.speed(activity.maxspeed)))
self.rcd_maxpace.set_text(('%s' % self.parent.pace_from_float(self.uc.pace(activity.maxpace), True)))
self.rcd_pace.set_text(('%s' % self.parent.pace_from_float(self.uc.pace(activity.pace), True)))
self.rcd_maxbeats.set_text(('%s' % activity.maxbeats))
self.rcd_title.set_text(activity.title)
if (activity.starttime is not None):
self.rcd_starttime.set_text(('%s' % activity.starttime))
sportPosition = self.getSportPosition(activity.sport_id)
self.rcd_sport.set_active(sportPosition)
buffer = self.rcd_comments.get_buffer()
(start, end) = buffer.get_bounds()
buffer.set_text(activity.comments)
if (activity.gpx_file is not None):
self.rcd_gpxfile.set_text(activity.gpx_file)
self.frameGeneral.set_sensitive(0)
self.frameVelocity.set_sensitive(0)
self.framePace.set_sensitive(0)
self.frameElevation.set_sensitive(0)
self.frameBeats.set_sensitive(0)
logging.debug('<<')
def setValues(self, values):
logging.debug('windowrecord setValues called')
self.mode = 'editrecord'
self.id_record = values[0]
self.setTime(values[4])
self.rcd_date.set_text(str(values[1]))
self.setValue('rcd_distance', values[3])
self.setValue('rcd_average', values[6])
self.setValue('rcd_calories', values[7], '%0.0f')
self.setValue('rcd_beats', values[5], '%0.0f')
self.setValue('rcd_upositive', values[11])
self.setValue('rcd_unegative', values[12])
self.setValue('rcd_maxvel', values[13])
self.setValue('rcd_maxpace', values[14])
self.setValue('rcd_pace', values[15])
self.setValue('rcd_maxbeats', values[16], '%0.0f')
self.rcd_title.set_text(('%s' % values[10]))
local_time = values[18]
if (local_time is not None):
dateTime = dateutil.parser.parse(local_time)
sTime = dateTime.strftime('%X')
self.rcd_starttime.set_text(('%s' % sTime))
sportID = values[2]
sportPosition = self.getSportPosition(sportID)
self.rcd_sport.set_active(sportPosition)
buffer = self.rcd_comments.get_buffer()
(start, end) = buffer.get_bounds()
buffer.set_text(values[8])
def getSportPosition(self, sportID):
count = 0
for (key, value) in self.listSport.items():
if (key == sportID):
return count
count += 1
return 0
def getSportPositionByName(self, sport):
count = 0
for (key, value) in self.listSport.items():
if (value == sport):
return count
count += 1
return None
def on_calctime_clicked(self, widget):
logging.debug('>>')
try:
distance = float(gtk_str(self.rcd_distance.get_text()))
try:
average = float(gtk_str(self.rcd_average.get_text()))
time_in_hour = (distance / average)
logging.debug('Distance: %0.3f km (mi) | Speed: %0.2f -> Time: %.f hours', distance, average, time_in_hour)
pace = self.parent.pace_from_float((60 / average))
logging.debug('Setting pace: %s', pace)
self.rcd_pace.set_text(pace)
except:
pace_dec = self.parent.pace_to_float(gtk_str(self.rcd_pace.get_text()))
time_in_hour = ((pace_dec * distance) / 60.0)
logging.debug('Distance: %0.3f km (mi) | Pace_dec: %0.2f -> Time: %.f hours', distance, pace_dec, time_in_hour)
speed = (distance / time_in_hour)
logging.debug('Setting average speed: %0.2f', speed)
self.rcd_average.set_text(('%0.2f' % speed))
self.set_recordtime(time_in_hour)
except:
logging.debug(('Traceback: %s' % traceback.format_exc()))
pass
logging.debug('<<')
def update_activity_data(self, row, gpx_file, sport):
logging.debug('>>')
self.activity_data[row]['rcd_comments'] = ''
(gpx_summary, laps) = self.parent.summaryFromGPX(gpx_file, (sport, ''))
local_time = gpx_summary['date_time_local']
start_date = local_time.strftime('%Y-%m-%d')
start_time = local_time.strftime('%H:%M:%S')
self.activity_data[row]['rcd_date'] = start_date
self.activity_data[row]['rcd_starttime'] = start_time
self.activity_data[row]['date_time_local'] = gpx_summary['date_time_local']
self.activity_data[row]['date_time_utc'] = gpx_summary['date_time_utc']
self.activity_data[row]['rcd_time'] = gpx_summary['rcd_time']
self.activity_data[row]['rcd_distance'] = gpx_summary['rcd_distance']
self.activity_data[row]['rcd_average'] = gpx_summary['rcd_average']
self.activity_data[row]['rcd_calories'] = gpx_summary['rcd_calories']
self.activity_data[row]['rcd_beats'] = gpx_summary['rcd_beats']
self.activity_data[row]['rcd_upositive'] = gpx_summary['rcd_upositive']
self.activity_data[row]['rcd_unegative'] = gpx_summary['rcd_unegative']
self.activity_data[row]['rcd_maxvel'] = gpx_summary['rcd_maxvel']
self.activity_data[row]['rcd_maxpace'] = gpx_summary['rcd_maxpace']
self.activity_data[row]['rcd_pace'] = gpx_summary['rcd_pace']
self.activity_data[row]['rcd_maxbeats'] = gpx_summary['rcd_maxbeats']
self.activity_data[row]['rcd_title'] = ''
self.activity_data[row]['laps'] = laps
self.activity_data[row]['complete'] = True
logging.debug('<<')
def show_treeviewEntries_row(self, row):
logging.debug('>>')
self.active_row = row
sport = self.activity_data[row]['rcd_sport']
sportPosition = self.getSportPositionByName(sport)
if (sportPosition is not None):
self.rcd_sport.set_active(sportPosition)
gpx_file = self.activity_data[row]['rcd_gpxfile']
self.setValue('rcd_gpxfile', gpx_file, '%s')
time = time2second(self.activity_data[row]['rcd_time'])
self.setTime(time)
self.setValue('rcd_distance', self.activity_data[row]['rcd_distance'], '%s')
buffer = self.rcd_comments.get_buffer()
(start, end) = buffer.get_bounds()
if ('rcd_comments' not in self.activity_data[row]):
self.activity_data[row]['rcd_comments'] = ''
buffer.set_text(self.activity_data[row]['rcd_comments'])
while Gtk.events_pending():
Gtk.main_iteration()
if (self.activity_data[row]['complete'] is False):
self.setValue('rcd_date', '', '%s')
self.setValue('rcd_starttime', '', '%s')
self.setValue('rcd_average', '', '%s')
self.setValue('rcd_calories', '', '%s')
self.setValue('rcd_beats', '', '%s')
self.setValue('rcd_upositive', '', '%s')
self.setValue('rcd_unegative', '', '%s')
self.setValue('rcd_maxvel', '', '%s')
self.rcd_maxpace.set_text('')
self.rcd_pace.set_text('')
self.setValue('rcd_maxbeats', '', '%s')
while Gtk.events_pending():
Gtk.main_iteration()
self.update_activity_data(row, gpx_file, sport)
self.setValue('rcd_distance', self.activity_data[row]['rcd_distance'], '%s')
time = time2second(self.activity_data[row]['rcd_time'])
self.setTime(time)
self.setValue('rcd_date', self.activity_data[row]['rcd_date'], '%s')
self.setValue('rcd_starttime', self.activity_data[row]['rcd_starttime'], '%s')
self.setValue('rcd_average', self.activity_data[row]['rcd_average'])
self.setValue('rcd_calories', self.activity_data[row]['rcd_calories'], '%s')
self.setValue('rcd_beats', self.activity_data[row]['rcd_beats'], '%s')
self.setValue('rcd_upositive', self.activity_data[row]['rcd_upositive'], '%s')
self.setValue('rcd_unegative', self.activity_data[row]['rcd_unegative'], '%s')
self.setValue('rcd_maxvel', self.activity_data[row]['rcd_maxvel'])
self.rcd_maxpace.set_text(self.activity_data[row]['rcd_maxpace'])
self.rcd_pace.set_text(self.activity_data[row]['rcd_pace'])
self.setValue('rcd_maxbeats', self.activity_data[row]['rcd_maxbeats'], '%s')
self.rcd_title.set_text(self.activity_data[row]['rcd_title'])
logging.debug('<<')
def on_rcd_title_changed(self, widget):
if ((self.mode == 'multiple_activities') and (self.active_row is not None)):
self.activity_data[self.active_row]['rcd_title'] = gtk_str(self.rcd_title.get_text())
def on_rcd_sport_changed(self, widget):
if ((self.mode == 'multiple_activities') and (self.active_row is not None)):
sport = gtk_str(self.rcd_sport.get_active_text())
self.activity_data[self.active_row]['rcd_sport'] = sport
self.store[self.active_row][4] = sport
def on_rcd_distance_changed(self, widget):
if ((self.mode == 'multiple_activities') and (self.active_row is not None)):
distance = gtk_str(self.rcd_distance.get_text())
self.activity_data[self.active_row]['rcd_distance'] = distance
self.store[self.active_row][2] = distance
def on_rcd_duration_value_changed(self, widget):
if ((self.mode == 'multiple_activities') and (self.active_row is not None)):
hour = self.rcd_hour.get_value()
min = self.rcd_min.get_value()
sec = self.rcd_second.get_value()
self.activity_data[self.active_row]['rcd_time'] = (hour, min, sec)
self.store[self.active_row][3] = ('%d:%.2d:%.2d' % (int(hour), int(min), int(sec)))
def on_rcd_date_changed(self, widget):
if ((self.mode == 'multiple_activities') and (self.active_row is not None)):
self.activity_data[self.active_row]['rcd_date'] = gtk_str(self.rcd_date.get_text())
def on_rcd_starttime_changed(self, widget):
if ((self.mode == 'multiple_activities') and (self.active_row is not None)):
self.activity_data[self.active_row]['rcd_starttime'] = gtk_str(self.rcd_starttime.get_text())
def on_treeviewEntries_row_activated(self, treeview, event):
if (self.active_row is not None):
buffer = self.rcd_comments.get_buffer()
(start, end) = buffer.get_bounds()
comments = gtk_str(buffer.get_text(start, end, True))
self.activity_data[self.active_row]['rcd_comments'] = comments
self.activity_data[self.active_row]['rcd_maxpace'] = gtk_str(self.rcd_maxpace.get_text())
self.activity_data[self.active_row]['rcd_pace'] = gtk_str(self.rcd_pace.get_text())
self.activity_data[self.active_row]['rcd_upositive'] = gtk_str(self.rcd_upositive.get_text())
self.activity_data[self.active_row]['rcd_unegative'] = gtk_str(self.rcd_unegative.get_text())
self.activity_data[self.active_row]['rcd_maxbeats'] = gtk_str(self.rcd_maxbeats.get_text())
self.activity_data[self.active_row]['rcd_beats'] = gtk_str(self.rcd_beats.get_text())
self.activity_data[self.active_row]['rcd_calories'] = gtk_str(self.rcd_calories.get_text())
x = int(event.x)
y = int(event.y)
time = event.time
pthinfo = treeview.get_path_at_pos(x, y)
if (pthinfo is not None):
(path, col, cellx, celly) = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
while Gtk.events_pending():
Gtk.main_iteration()
self.show_treeviewEntries_row(path[0])
def on_calcavs_clicked(self, widget):
logging.debug('>>')
hour = self.rcd_hour.get_value_as_int()
min = self.rcd_min.get_value_as_int()
sec = self.rcd_second.get_value_as_int()
time = ((sec + (min * 60)) + (hour * 3600))
if (time < 1):
logging.debug('Seems no time value (%s) has been entered, nothing to calculate.', time)
return False
distance = float(gtk_str(self.rcd_distance.get_text()))
if (distance < 1):
logging.debug('Seems no distance value (%s) has been entered, nothing to calculate.', distance)
return False
logging.debug('Time: %d seconds | Distance: %0.2f km (mi)', time, distance)
average_speed = ((distance * 3600.0) / time)
logging.debug('Average speed: %0.2f', average_speed)
self.rcd_average.set_text(('%0.2f' % average_speed))
dec_pace = (60 / average_speed)
pace = self.parent.pace_from_float(dec_pace)
logging.debug('Average pace: %s', pace)
self.rcd_pace.set_text(pace)
logging.debug('<<')
def on_calccalories_clicked(self, widget):
sport = gtk_str(self.rcd_sport.get_active_text())
hour = self.rcd_hour.get_value_as_int()
min = self.rcd_min.get_value_as_int()
sec = self.rcd_second.get_value_as_int()
hour += ((float(min) / 60.0) + (float(sec) / (60.0 * 60.0)))
weight = self.pytrainer_main.profile.getValue('pytraining', 'prf_weight')
try:
weight = float(weight)
except:
weight = 0.0
try:
met = float(self.parent.getSportMet(sport))
except:
met = None
try:
extraweight = self.parent.getSportWeight(sport)
extraweight = float(extraweight)
except:
extraweight = 0.0
if (met is not None):
calories = ((met * (weight + extraweight)) * hour)
self.rcd_calories.set_text(str(calories))
def on_calcdistance_clicked(self, widget):
logging.debug('>>')
try:
hour = self.rcd_hour.get_value_as_int()
min = self.rcd_min.get_value_as_int()
sec = self.rcd_second.get_value_as_int()
time = ((sec + (min * 60)) + (hour * 3600))
time_in_hour = (time / 3600.0)
try:
average = float(gtk_str(self.rcd_average.get_text()))
distance = (average * time_in_hour)
logging.debug('Time: %d seconds | Speed: %0.2f -> Distance: %0.3f km (mi)', time, average, distance)
pace = self.parent.pace_from_float((60 / average))
logging.debug('Setting pace: %s', pace)
self.rcd_pace.set_text(pace)
except:
pace_dec = self.parent.pace_to_float(gtk_str(self.rcd_pace.get_text()))
distance = (time / (60.0 * pace_dec))
logging.debug('Time: %d seconds | Pace_dec: %0.2f -> Distance: %0.3f km (mi)', time, pace_dec, distance)
speed = (distance / time_in_hour)
logging.debug('Setting average speed: %0.2f', speed)
self.rcd_average.set_text(('%0.2f' % speed))
self.set_distance(distance)
except:
logging.debug(('Traceback: %s' % traceback.format_exc()))
pass
logging.debug('<<')
def set_distance(self, distance):
self.rcd_distance.set_text(('%0.2f' % distance))
def set_maxspeed(self, vel):
self.rcd_maxvel.set_text(('%0.2f' % vel))
def set_maxhr(self, hr):
self.rcd_maxbeats.set_text(('%0.2f' % hr))
def set_recordtime(self, time_in_hour):
hour = int(time_in_hour)
min = int(((time_in_hour - hour) * 60))
sec = ((((time_in_hour - hour) * 60) - min) * 60)
self.rcd_hour.set_value(hour)
self.rcd_min.set_value(min)
self.rcd_second.set_value(sec)
def on_selectfile_clicked(self, widget):
logging.debug('>>')
from pytrainer.gui.dialogs import fileChooserDialog
selectedFile = fileChooserDialog(title='Choose a Google Earth file (.kml) to import', multiple=False).getFiles()
if (selectedFile is not None):
self.rcd_gpxfile.set_text(selectedFile[0])
logging.debug('<<')
def set_gpxfile(self):
logging.debug('>>')
logging.debug('<<')
def on_calculatevalues_clicked(self, widget):
gpxfile = gtk_str(self.rcd_gpxfile.get_text())
if os.path.isfile(gpxfile):
self.frameGeneral.set_sensitive(0)
self.frameVelocity.set_sensitive(0)
self.parent.actualize_fromgpx(gpxfile) |
def run_examples():
with caldav.DAVClient(url=caldav_url, username=username, password=password, headers=headers) as client:
my_principal = client.principal()
calendars = my_principal.calendars()
print_calendars_demo(calendars)
find_delete_calendar_demo(my_principal, 'Test calendar from caldav examples')
my_new_calendar = my_principal.make_calendar(name='Test calendar from caldav examples')
add_stuff_to_calendar_demo(my_new_calendar)
event = search_calendar_demo(my_new_calendar)
read_modify_event_demo(event)
calendar_by_url_demo(client, my_new_calendar.url)
event.delete()
my_new_calendar.delete() |
((gdkpixbuf2 is None), 'GdkPixBuf not available')
class GdkPixBufTest(PygletTestCase):
def test_load_image(self):
filename = self.get_test_data_file('images', '8bpp.gif')
with open(filename, 'rb') as f:
loader = gdkpixbuf2.GdkPixBufLoader(filename, f)
pixbuf = loader.get_pixbuf()
assert (pixbuf is not None)
assert (pixbuf.height == 257)
assert (pixbuf.width == 235)
assert (pixbuf.channels == 4)
assert pixbuf.has_alpha
image = pixbuf.to_image()
assert (image is not None)
assert (image.height == 257)
assert (image.width == 235)
def test_load_image_requires_loader_close(self):
filename = self.get_test_data_file('images', 'gdk_close.png')
with open(filename, 'rb') as f:
loader = gdkpixbuf2.GdkPixBufLoader(filename, f)
pixbuf = loader.get_pixbuf()
assert (pixbuf is not None)
assert (pixbuf.height == 200)
assert (pixbuf.width == 200)
assert (pixbuf.channels == 4)
assert pixbuf.has_alpha
image = pixbuf.to_image()
assert (image is not None)
assert (image.height == 200)
assert (image.width == 200)
def test_load_animation(self):
filename = self.get_test_data_file('images', 'dinosaur.gif')
with open(filename, 'rb') as f:
loader = gdkpixbuf2.GdkPixBufLoader(filename, f)
gdk_anim = loader.get_animation()
assert (gdk_anim is not None)
anim = gdk_anim.to_animation()
assert (anim is not None)
assert (len(anim.frames) == 12)
for frame in anim.frames:
assert (frame.image is not None)
assert (frame.duration is not None)
assert (frame.duration == 0.1)
assert (frame.image.width == 129)
assert (frame.image.height == 79)
def test_incomplete_load(self):
filename = self.get_test_data_file('images', 'gdk_close.png')
with open(filename, 'rb') as f:
loader = gdkpixbuf2.GdkPixBufLoader(filename, f)
del loader |
('/json/package', endpoint='package')
_required('LIST')
def package():
api = flask.current_app.config['PYLOAD_API']
try:
id = int(flask.request.args.get('id'))
data = api.get_package_data(id)
tmp = data['links']
tmp.sort(key=(lambda entry: entry['order']))
data['links'] = tmp
return jsonify(data)
except Exception:
return (jsonify(False), 500) |
def _make_stage(transformation_module, in_channels, bottleneck_channels, out_channels, block_count, num_groups, stride_in_1x1, first_stride, dilation=1):
blocks = []
stride = first_stride
for _ in range(block_count):
blocks.append(transformation_module(in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation=dilation))
stride = 1
in_channels = out_channels
return nn.Sequential(*blocks) |
def _test():
import torch
pretrained = False
models = [drnc26, drnc42, drnc58, drnd22, drnd38, drnd54, drnd105]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != drnc26) or (weight_count == ))
assert ((model != drnc42) or (weight_count == ))
assert ((model != drnc58) or (weight_count == ))
assert ((model != drnd22) or (weight_count == ))
assert ((model != drnd38) or (weight_count == ))
assert ((model != drnd54) or (weight_count == ))
assert ((model != drnd105) or (weight_count == ))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
def _generic_gaussian_circuit(qubits: Sequence[cirq.Qid], quadratic_hamiltonian: 'openfermion.QuadraticHamiltonian', occupied_orbitals: Optional[Sequence[int]], initial_state: Union[(int, Sequence[int])]) -> cirq.OP_TREE:
n_qubits = len(qubits)
(circuit_description, start_orbitals) = gaussian_state_preparation_circuit(quadratic_hamiltonian, occupied_orbitals)
if isinstance(initial_state, int):
initially_occupied_orbitals = _occupied_orbitals(initial_state, n_qubits)
else:
initially_occupied_orbitals = initial_state
(yield (cirq.X(qubits[j]) for j in range(n_qubits) if ((j in initially_occupied_orbitals) != (j in start_orbitals))))
(yield _ops_from_givens_rotations_circuit_description(qubits, circuit_description)) |
def install_legacy_fan(args):
path_fan = os.path.join(FAKE_DIRECTORY, 'class/hwmon', 'hwmon12')
print('Installing Fan sensor {path}'.format(path=path_fan))
if (not os.path.isdir(path_fan)):
print('The directory {path} is not present. Creating a new one..'.format(path=path_fan))
os.makedirs(path_fan)
write_on_file(os.path.join(path_fan, 'target_pwm'), '128')
write_on_file(os.path.join(path_fan, 'rpm_measured'), '0')
write_on_file(os.path.join(path_fan, 'temp_control'), '0') |
class RHEL5_TestCase(CommandTest):
command = 'key'
def runTest(self):
self.assert_parse('key 012345abcd', 'key 012345abcd\n')
self.assert_parse('key --skip', 'key --skip\n')
self.assert_parse_error('key')
self.assert_parse_error('key --bogus-option')
self.assert_parse_error('key --badflag foobar')
cmd = self.handler().commands[self.command]
cmd.key = ''
self.assertEqual(cmd.__str__(), '') |
def extract_macosx_min_system_version(path_to_lib):
with open(path_to_lib, 'rb') as lib_file:
(BaseClass, magic_number) = get_base_class_and_magic_number(lib_file, 0)
if (magic_number not in [FAT_MAGIC, FAT_MAGIC_64, MH_MAGIC, MH_MAGIC_64]):
return
if (magic_number in [FAT_MAGIC, FAT_CIGAM_64]):
class FatHeader(BaseClass):
_fields_ = fat_header_fields
fat_header = read_data(FatHeader, lib_file)
if (magic_number == FAT_MAGIC):
class FatArch(BaseClass):
_fields_ = fat_arch_fields
else:
class FatArch(BaseClass):
_fields_ = fat_arch_64_fields
fat_arch_list = [read_data(FatArch, lib_file) for _ in range(fat_header.nfat_arch)]
versions_list = []
for el in fat_arch_list:
try:
version = read_mach_header(lib_file, el.offset)
if (version is not None):
if ((el.cputype == CPU_TYPE_ARM64) and (len(fat_arch_list) != 1)):
if (version == (11, 0, 0)):
continue
versions_list.append(version)
except ValueError:
pass
if (len(versions_list) > 0):
return max(versions_list)
else:
return None
else:
try:
return read_mach_header(lib_file, 0)
except ValueError:
return None |
def _run_do_update(app_data, distribution, embed_filename, for_py_version, periodic, search_dirs):
from virtualenv.seed.wheels import acquire
wheel_filename = (None if (embed_filename is None) else Path(embed_filename))
embed_version = (None if (wheel_filename is None) else Wheel(wheel_filename).version_tuple)
app_data = (AppDataDiskFolder(app_data) if isinstance(app_data, str) else app_data)
search_dirs = [(Path(p) if isinstance(p, str) else p) for p in search_dirs]
wheelhouse = app_data.house
embed_update_log = app_data.embed_update_log(distribution, for_py_version)
u_log = UpdateLog.from_dict(embed_update_log.read())
now = datetime.now(tz=timezone.utc)
(update_versions, other_versions) = ([], [])
for version in u_log.versions:
if (version.source in {'periodic', 'manual'}):
update_versions.append(version)
else:
other_versions.append(version)
if periodic:
source = 'periodic'
else:
source = 'manual'
if update_versions:
update_versions[0].source = source
if (wheel_filename is not None):
dest = (wheelhouse / wheel_filename.name)
if (not dest.exists()):
copy2(str(wheel_filename), str(wheelhouse))
(last, last_version, versions, filenames) = (None, None, [], set())
while ((last is None) or (not last.use(now, ignore_grace_period_ci=True))):
download_time = datetime.now(tz=timezone.utc)
dest = acquire.download_wheel(distribution=distribution, version_spec=(None if (last_version is None) else f'<{last_version}'), for_py_version=for_py_version, search_dirs=search_dirs, app_data=app_data, to_folder=wheelhouse, env=os.environ)
if ((dest is None) or (update_versions and (update_versions[0].filename == dest.name))):
break
release_date = release_date_for_wheel_path(dest.path)
last = NewVersion(filename=dest.path.name, release_date=release_date, found_date=download_time, source=source)
logging.info('detected %s in %s', last, (datetime.now(tz=timezone.utc) - download_time))
versions.append(last)
filenames.add(last.filename)
last_wheel = last.wheel
last_version = last_wheel.version
if ((embed_version is not None) and (embed_version >= last_wheel.version_tuple)):
break
u_log.periodic = periodic
if (not u_log.periodic):
u_log.started = now
other_versions = [version for version in other_versions if (version.filename not in filenames)]
u_log.versions = ((versions + update_versions) + other_versions)
u_log.completed = datetime.now(tz=timezone.utc)
embed_update_log.write(u_log.to_dict())
return versions |
class ErrorHandler(object):
def __init__(self, error_queue):
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
self.children_pids.append(pid)
def error_listener(self):
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
(rank, original_trace) = self.error_queue.get()
msg = '\n\n-- Tracebacks above this line can probably\n be ignored --\n\n'
msg += original_trace
raise Exception(msg) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default='/afs/crc.nd.edu/user/y/ypeng4/UACANet/configs/UACANet-L.yaml')
parser.add_argument('--resume', action='store_true', default=False)
parser.add_argument('--verbose', action='store_true', default=False)
parser.add_argument('--debug', action='store_true', default=False)
args = parser.parse_args()
cuda_visible_devices = None
local_rank = (- 1)
if ('CUDA_VISIBLE_DEVICES' in os.environ.keys()):
cuda_visible_devices = [int(i) for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
if ('LOCAL_RANK' in os.environ.keys()):
local_rank = int(os.environ['LOCAL_RANK'])
if (local_rank == (- 1)):
device_num = 1
elif (cuda_visible_devices is None):
device_num = torch.cuda.device_count()
else:
device_num = len(cuda_visible_devices)
args.device_num = device_num
args.local_rank = local_rank
return args |
def create_confirmation_dialog(title, message, uid):
def _confirm():
nonlocal result
i = BrowserView.instances[uid]
ok = i.localization['global.ok']
cancel = i.localization['global.cancel']
result = BrowserView.display_confirmation_dialog(ok, cancel, message)
semaphore.release()
result = False
semaphore = Semaphore(0)
AppHelper.callAfter(_confirm)
semaphore.acquire()
return result |
def command_shighband(command, args):
def setup(parser):
add_source_options(parser)
add_sensor_options(parser)
add_filter_options(parser)
parser.set_defaults(rel_lowpass_frequency=0.125)
parser.set_defaults(rel_highpass_frequency=0.25)
(parser, opts, args) = cl_parse(command, args, setup=setup)
st_dir = verify_arguements('shighband', 1, args)
verify_options('shighband', **opts)
out_filename = opts.pop('output')
gft = gftest.runStandardCheck(st_dir, **opts)
if (out_filename is not None):
return (gft, out_filename) |
.parametrize(['ops', 'state', 'final_states', 'probabilities'], [pytest.param(PZ, basis(2, 0), [state0, None], [1, 0], id='PZ_ket'), pytest.param(PZ, basis(2, 0).proj(), [state0.proj(), None], [1, 0], id='PZ_dm'), pytest.param(PZ_ket, basis(2, 0), [state0, None], [1, 0], id='PZket_ket'), pytest.param(PZ_ket, basis(2, 0).proj(), [state0.proj(), None], [1, 0], id='PZket_dm'), pytest.param(PX, basis(2, 0), [stateplus, stateminus], [0.5, 0.5], id='PX_ket'), pytest.param(PX, basis(2, 0).proj(), [stateplus.proj(), stateminus.proj()], [0.5, 0.5], id='PX_dm'), pytest.param(PY, basis(2, 0), [stateR, stateL], [0.5, 0.5], id='PY_ket'), pytest.param(PY, basis(2, 0).proj(), [stateR.proj(), stateL.proj()], [0.5, 0.5], id='PY_dm')])
def test_measurement_statistics_povm(ops, state, final_states, probabilities):
(collapsed_states, probs) = measurement_statistics_povm(state, ops)
for (i, final_state) in enumerate(final_states):
collapsed_state = collapsed_states[i]
if final_state:
assert (collapsed_state == final_state)
else:
assert (collapsed_state is None)
np.testing.assert_almost_equal(probs, probabilities) |
class SegformerOverlapPatchEmbeddings(nn.Module):
def __init__(self, patch_size, stride, num_channels, hidden_size):
super().__init__()
self.proj = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=stride, padding=(patch_size // 2))
self.layer_norm = nn.LayerNorm(hidden_size)
def forward(self, pixel_values):
embeddings = self.proj(pixel_values)
(_, _, height, width) = embeddings.shape
embeddings = embeddings.flatten(2).transpose(1, 2)
embeddings = self.layer_norm(embeddings)
return (embeddings, height, width) |
class WrapLinker(Linker):
def __init__(self, linkers: Sequence[PerformLinker], wrapper: Callable) -> None:
self.fgraph: Optional[FunctionGraph] = None
self.linkers = linkers
self.wrapper = wrapper
def __copy__(self) -> 'WrapLinker':
other = self.__class__(linkers=[copy(x) for x in self.linkers], wrapper=self.wrapper)
return other
def clone(self, allow_gc=None):
return self.__class__(linkers=[x.clone(allow_gc=allow_gc) for x in self.linkers], wrapper=self.wrapper)
def accept(self, fgraph: FunctionGraph, no_recycling: Optional[Sequence['TensorVariable']]=None, profile: Optional[Union[(bool, 'ProfileStats')]]=None) -> 'WrapLinker':
if (no_recycling is None):
no_recycling = []
if ((self.fgraph is not None) and (self.fgraph is not fgraph)):
return type(self)(self.linkers, self.wrapper).accept(fgraph, no_recycling)
self.fgraph = fgraph
self.no_recycling = no_recycling
self.linkers = [linker.accept(fgraph, no_recycling) for linker in self.linkers]
return self
def pre(self, f: 'WrapLinker', inputs: Union[(list['NDArray'], list[Optional[float]])], order: list[Apply], thunk_groups: list[tuple[Callable]]) -> None:
pass
def make_thunk(self, **kwargs):
no_recycling = self.no_recycling
make_all = [self.linkers[0].make_all(**kwargs)]
kwargs.pop('input_storage', None)
make_all += [x.make_all(**kwargs) for x in self.linkers[1:]]
(fns, input_lists, output_lists, thunk_lists, order_lists) = zip(*make_all)
order_list0 = order_lists[0]
for order_list in order_lists[1:]:
if (order_list0 != order_list):
raise Exception('All linkers to WrapLinker should execute operations in the same order.')
inputs0 = input_lists[0]
outputs0 = output_lists[0]
thunk_groups = list(zip(*thunk_lists))
order = [x[0] for x in zip(*order_lists)]
to_reset = []
for (thunks, node) in zip(thunk_groups, order):
for (j, output) in enumerate(node.outputs):
if (output in no_recycling):
for thunk in thunks:
to_reset.append(thunk.outputs[j])
wrapper = self.wrapper
pre = self.pre
def f():
for inputs in input_lists[1:]:
for (input1, input2) in zip(inputs0, inputs):
input2.storage[0] = copy(input1.storage[0])
for x in to_reset:
x[0] = None
pre(self, [input.data for input in input_lists[0]], order, thunk_groups)
for (i, (thunks, node)) in enumerate(zip(thunk_groups, order)):
try:
wrapper(self.fgraph, i, node, *thunks)
except Exception:
raise_with_op(self.fgraph, node, *thunks)
f.thunk_groups = thunk_groups
return (f, inputs0, outputs0) |
def spatial_svd_example(config: argparse.Namespace):
data_pipeline = ImageNetDataPipeline(config)
model = models.resnet18(pretrained=True)
if config.use_cuda:
model.to(torch.device('cuda'))
model.eval()
accuracy = data_pipeline.evaluate(model, use_cuda=config.use_cuda)
logger.info('Original Model top-1 accuracy = %.2f', accuracy)
logger.info('Starting Spatial SVD')
(compressed_model, stats) = aimet_spatial_svd(model=model, evaluator=data_pipeline.evaluate)
logger.info(stats)
with open(os.path.join(config.logdir, 'log.txt'), 'w') as outfile:
outfile.write(('%s\n\n' % stats))
accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda)
logger.info('Compressed Model Top-1 accuracy = %.2f', accuracy)
logger.info('Spatial SVD Complete')
logger.info('Starting Model Finetuning')
data_pipeline.finetune(compressed_model)
accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda)
logger.info('Finetuned Compressed Model top-1 accuracy = %.2f', accuracy)
logger.info('Model Finetuning Complete')
torch.save(compressed_model, os.path.join(config.logdir, 'compressed_model.pth')) |
(debug=True)
('tab', value=cmdutils.Value.cur_tab)
('count', value=cmdutils.Value.count)
def debug_webaction(tab: apitypes.Tab, action: str, count: int=1) -> None:
for _ in range(count):
try:
tab.action.run_string(action)
except apitypes.WebTabError as e:
raise cmdutils.CommandError(str(e)) |
def get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=textwrap.dedent("\n To import bookmarks, you'll need the path to your profile or an\n exported HTML file from your browser's bookmark manager. Redirect\n the output from this script to the appropriate file in your\n qutebrowser config directory (listed in the output of :version),\n usually done with the '>' operator; for example,\n ./importer.py -i mozilla your_profile_path > ~/.config/qutebrowser/quickmarks\n\n Common browsers with native input format support:\n chrome: Chrome, Chromium, Edge\n mozilla: Firefox, SeaMonkey, Pale Moon\n "))
parser.add_argument('-i', '--input-format', help='Which input format? Defaults to html', choices=['html', 'mozilla', 'chrome'], default='html', required=False)
parser.add_argument('-b', '--bookmark-output', help='Output in bookmark format.', action='store_true', default=False, required=False)
parser.add_argument('-q', '--quickmark-output', help='Output in quickmark format (default).', action='store_true', default=False, required=False)
parser.add_argument('-s', '--search-output', help='Output config.py search engine format (negates -B and -K)', action='store_true', default=False, required=False)
parser.add_argument('--oldconfig', help='Output search engine format for old qutebrowser.conf format', default=False, action='store_true', required=False)
parser.add_argument('-B', '--import-bookmarks', help='Import plain bookmarks (can be combiend with -K)', action='store_true', default=False, required=False)
parser.add_argument('-K', '--import-keywords', help='Import keywords (can be combined with -B)', action='store_true', default=False, required=False)
parser.add_argument('bookmarks', help='Bookmarks file (html format) or profile folder (Mozilla format)')
args = parser.parse_args()
return args |
class _Normalization(pystiche.Module):
def __init__(self, mean: Sequence[float], std: Sequence[float]) -> None:
super().__init__()
self.mean = mean
self.std = std
def _channel_stats_to_tensor(image: torch.Tensor, mean: Sequence[float], std: Sequence[float]) -> Tuple[(torch.Tensor, torch.Tensor)]:
num_channels = extract_num_channels(image)
def to_tensor(seq: Sequence[float]) -> torch.Tensor:
if (len(seq) != num_channels):
msg = f'The length of the channel statistics and the number of image channels do not match: {len(seq)} != {num_channels}'
raise RuntimeError(msg)
shape = ([1] * image.ndim)
shape[(- 3)] = len(seq)
return torch.tensor(seq, dtype=image.dtype, device=image.device).view(shape)
return (to_tensor(mean), to_tensor(std))
def _format_stats(stats: Sequence[float], fmt: str='{:g}') -> str:
return str(tuple((fmt.format(stat) for stat in stats)))
def _properties(self) -> Dict[(str, Any)]:
dct = super()._properties()
dct['mean'] = self._format_stats(self.mean)
dct['std'] = self._format_stats(self.std)
return dct |
def get_default_log() -> Path:
data_directory = os.path.expandvars('$XDG_DATA_HOME')
if (data_directory == '$XDG_DATA_HOME'):
data_directory = os.path.expanduser('~/.local/share')
qtile_directory = (Path(data_directory) / 'qtile')
if (not qtile_directory.exists()):
qtile_directory.mkdir(parents=True)
return (qtile_directory / 'qtile.log') |
def report_energy(bodies=SYSTEM, pairs=PAIRS, e=0.0):
for (((x1, y1, z1), v1, m1), ((x2, y2, z2), v2, m2)) in pairs:
dx = (x1 - x2)
dy = (y1 - y2)
dz = (z1 - z2)
e -= ((m1 * m2) / ((((dx * dx) + (dy * dy)) + (dz * dz)) ** 0.5))
for (r, [vx, vy, vz], m) in bodies:
e += ((m * (((vx * vx) + (vy * vy)) + (vz * vz))) / 2.0)
return e |
def computer_move(board):
pos = board.random_move()
if (pos == PASS):
return PASS
tree = UCTNode()
tree.unexplored = board.useful_moves()
nboard = Board()
for game in range(GAMES):
node = tree
nboard.reset()
nboard.replay(board.history)
node.play(nboard)
return tree.best_visited().pos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.