code stringlengths 281 23.7M |
|---|
class SwinTransformer(nn.Module):
def __init__(self, img_size=224, patch_size=4, embed_dim=96, depths=[2, 2, 6], num_heads=[3, 6, 12], window_size=7, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, **kwargs):
super().__init__()
patches_resolution = [(img_size // patch_size), (img_size // patch_size)]
num_patches = (patches_resolution[0] * patches_resolution[1])
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int((embed_dim * (2 ** (self.num_layers - 1))))
self.mlp_ratio = mlp_ratio
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int((embed_dim * (2 ** i_layer))), input_resolution=((patches_resolution[0] // (2 ** i_layer)), (patches_resolution[1] // (2 ** i_layer))), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:(i_layer + 1)])], norm_layer=norm_layer, downsample=None)
self.layers.append(layer)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'} |
def str_to_bool(value):
if isinstance(value, basestring):
value = value.strip().lower()
if (value in ['true', 't', 'yes', 'y']):
return True
elif (value in ['false', 'f', 'no', 'n', '']):
return False
else:
raise NotImplementedError(('Unknown bool %s' % value))
return value |
def callgraphHTML(sv, hf, num, cg, title, color, devid):
html_func_top = '<article id="{0}" class="atop" style="background:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
cgid = devid
if cg.id:
cgid += cg.id
cglen = ((cg.end - cg.start) * 1000)
if (cglen < sv.mincglen):
return num
fmt = (((('<r>(%.3f ms ' + sv.timeformat) + ' to ') + sv.timeformat) + ')</r>')
flen = (fmt % (cglen, cg.start, cg.end))
hf.write(html_func_top.format(cgid, color, num, title, flen))
num += 1
for line in cg.list:
if (line.length < 1e-09):
flen = ''
else:
fmt = (('<n>(%.3f ms ' + sv.timeformat) + ')</n>')
flen = (fmt % ((line.length * 1000), line.time))
if line.isLeaf():
hf.write(html_func_leaf.format(line.name, flen))
elif line.freturn:
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
return num |
class ParseError(RuntimeError):
def __init__(self, expected, stream, index):
self.expected = expected
self.stream = stream
self.index = index
def line_info(self):
try:
return '{}:{}'.format(*line_info_at(self.stream, self.index))
except (TypeError, AttributeError):
return str(self.index)
def __str__(self):
expected_list = sorted((repr(e) for e in self.expected))
if (len(expected_list) == 1):
return f'expected {expected_list[0]} at {self.line_info()}'
else:
return f"expected one of {', '.join(expected_list)} at {self.line_info()}" |
def test_channelstate_repeated_contract_balance():
deposit_block_number = 10
block_number = ((deposit_block_number + DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS) + 1)
deposit_block_hash = make_block_hash()
(our_model1, _) = create_model(70)
(partner_model1, partner_pkey1) = create_model(100)
channel_state = create_channel_from_models(our_model1, partner_model1, partner_pkey1)
deposit_amount = 10
balance1_new = (our_model1.balance + deposit_amount)
deposit_transaction = TransactionChannelDeposit(our_model1.participant_address, balance1_new, deposit_block_number)
state_change = ContractReceiveChannelDeposit(transaction_hash=make_transaction_hash(), canonical_identifier=channel_state.canonical_identifier, deposit_transaction=deposit_transaction, block_number=deposit_block_number, block_hash=deposit_block_hash, fee_config=MediationFeeConfig())
our_model2 = our_model1._replace(balance=balance1_new, distributable=balance1_new, contract_balance=balance1_new)
partner_model2 = partner_model1
for _ in range(10):
iteration = channel.state_transition(channel_state=deepcopy(channel_state), state_change=state_change, block_number=block_number, block_hash=make_block_hash(), pseudo_random_generator=random.Random())
new_state = iteration.new_state
assert_partner_state(new_state.our_state, new_state.partner_state, our_model2)
assert_partner_state(new_state.partner_state, new_state.our_state, partner_model2) |
class InitDataset(Dataset):
def __init__(self, data_root, img_transform, mask_transform, data='train'):
super(InitDataset, self).__init__()
self.img_transform = img_transform
self.mask_transform = mask_transform
if (data == 'train'):
self.paths = glob('{}/train/**/*.jpg'.format(data_root), recursive=True)
self.mask_paths = glob('{}/mask/*.png'.format(data_root))
else:
self.paths = glob('{}/val/*.jpg'.format(data_root, data))
self.mask_paths = glob('{}/val_mask/*.png'.format(data_root))
self.N_mask = len(self.mask_paths)
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
img = self._load_img(self.paths[index])
img = self.img_transform(img.convert('RGB'))
mask = Image.open(self.mask_paths[random.randint(0, (self.N_mask - 1))])
mask = self.mask_transform(mask.convert('RGB'))
return ((img * mask), mask, img)
def _load_img(self, path):
try:
img = Image.open(path)
except:
extension = path.split('.')[(- 1)]
for i in range(10):
new_path = (((path.split('.')[0][:(- 1)] + str(i)) + '.') + extension)
try:
img = Image.open(new_path)
break
except:
continue
return img |
def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False):
detok = get_detokenizer()
nlp = get_spacy_nlp()
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
if (positive_only and ('label' in sample) and (not sample['label'])):
continue
target = sample['target']
query = target['span1_text']
if (query is not None):
if ('\n' in query):
continue
if (query.endswith('.') or query.endswith(',')):
query = query[:(- 1)]
tokens = sample['text'].split(' ')
def strip_pronoun(x):
return x.rstrip('.,"')
pronoun_idx = target['span2_index']
pronoun = strip_pronoun(target['span2_text'])
if (strip_pronoun(tokens[pronoun_idx]) != pronoun):
if (strip_pronoun(tokens[(pronoun_idx + 1)]) == pronoun):
pronoun_idx += 1
else:
raise Exception('Misaligned pronoun!')
assert (strip_pronoun(tokens[pronoun_idx]) == pronoun)
before = tokens[:pronoun_idx]
after = tokens[(pronoun_idx + 1):]
leading_space = (' ' if (pronoun_idx > 0) else '')
trailing_space = (' ' if (len(after) > 0) else '')
before = detok.detokenize(before, return_str=True)
pronoun = detok.detokenize([pronoun], return_str=True)
after = detok.detokenize(after, return_str=True)
if (pronoun.endswith('.') or pronoun.endswith(',')):
after = ((pronoun[(- 1)] + trailing_space) + after)
pronoun = pronoun[:(- 1)]
if (after.startswith('.') or after.startswith(',')):
trailing_space = ''
sentence = nlp(((((before + leading_space) + pronoun) + trailing_space) + after))
start = len((before + leading_space))
first_pronoun_tok = find_token(sentence, start_pos=start)
pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i)
assert (pronoun_span.text == pronoun)
if eval:
query_span = find_span(sentence, query)
query_with_ws = '_{}_{}'.format(query_span.text, (' ' if query_span.text_with_ws.endswith(' ') else ''))
pronoun_with_ws = '[{}]{}'.format(pronoun_span.text, (' ' if pronoun_span.text_with_ws.endswith(' ') else ''))
if (query_span.start < pronoun_span.start):
first = (query_span, query_with_ws)
second = (pronoun_span, pronoun_with_ws)
else:
first = (pronoun_span, pronoun_with_ws)
second = (query_span, query_with_ws)
sentence = ((((sentence[:first[0].start].text_with_ws + first[1]) + sentence[first[0].end:second[0].start].text_with_ws) + second[1]) + sentence[second[0].end:].text)
(yield (sentence, sample.get('label', None)))
else:
(yield (sentence, pronoun_span, query, sample.get('label', None))) |
def testQueryURL(run_cli, backends):
bz = _open_bz(REDHAT_URL, **backends)
qurl = '/buglist.cgi?f1=creation_ts&list_id=973582&o1=greaterthaneq&classification=Fedora&o2=lessthaneq&query_format=advanced&f2=creation_ts&v1=2010-01-01&component=python-bugzilla&v2=2010-06-01&product=Fedora'
url = REDHAT_URL
if ('/xmlrpc.cgi' in url):
url = url.replace('/xmlrpc.cgi', qurl)
else:
url += qurl
out = run_cli(('bugzilla query --from-url "%s"' % url), bz)
_check(out, 10, '#553878 CLOSED') |
class DockerSchema2ManifestBuilder(object):
def __init__(self):
self.config = None
self.filesystem_layers = []
def clone(self):
cloned = DockerSchema2ManifestBuilder()
cloned.config = self.config
cloned.filesystem_layers = list(self.filesystem_layers)
return cloned
def set_config(self, schema2_config):
self.set_config_digest(schema2_config.digest, schema2_config.size)
def set_config_digest(self, config_digest, config_size):
self.config = DockerV2ManifestConfig(size=config_size, digest=config_digest)
def add_layer(self, digest, size, urls=None):
self.filesystem_layers.append(DockerV2ManifestLayer(index=len(self.filesystem_layers), digest=digest, compressed_size=size, urls=urls, is_remote=bool(urls)))
def build(self, ensure_ascii=True):
assert self.config
def _build_layer(layer):
if layer.urls:
return {DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_REMOTE_LAYER_CONTENT_TYPE, DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: layer.compressed_size, DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(layer.digest), DOCKER_SCHEMA2_MANIFEST_URLS_KEY: layer.urls}
return {DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_LAYER_CONTENT_TYPE, DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: layer.compressed_size, DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(layer.digest)}
manifest_dict = {DOCKER_SCHEMA2_MANIFEST_VERSION_KEY: 2, DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE, DOCKER_SCHEMA2_MANIFEST_CONFIG_KEY: {DOCKER_SCHEMA2_MANIFEST_MEDIATYPE_KEY: DOCKER_SCHEMA2_CONFIG_CONTENT_TYPE, DOCKER_SCHEMA2_MANIFEST_SIZE_KEY: self.config.size, DOCKER_SCHEMA2_MANIFEST_DIGEST_KEY: str(self.config.digest)}, DOCKER_SCHEMA2_MANIFEST_LAYERS_KEY: [_build_layer(layer) for layer in self.filesystem_layers]}
json_str = json.dumps(manifest_dict, ensure_ascii=ensure_ascii, indent=3)
return DockerSchema2Manifest(Bytes.for_string_or_unicode(json_str)) |
def test_unique_uri_validator_serializer_create_error(db):
validator = OptionSetUniqueURIValidator()
serializer = OptionSetSerializer()
with pytest.raises(RestFameworkValidationError):
validator({'uri_prefix': settings.DEFAULT_URI_PREFIX, 'uri_path': OptionSet.objects.first().uri_path}, serializer) |
def fill_result_with_error(result, error, trace, models_to_create):
error = (error, trace)
result['error'] = error
for framework in FRAMEWORKS:
if (framework in models_to_create):
result[framework] = {}
for model_arch in models_to_create[framework]:
result[framework][model_arch.__name__] = {'model': None, 'checkpoint': None, 'error': error}
result['processor'] = {p.__class__.__name__: p.__class__.__name__ for p in result['processor'].values()} |
def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):
if isinstance(rot_x, (tuple, list)):
rot_x = max(np.abs(rot_x))
if isinstance(rot_y, (tuple, list)):
rot_y = max(np.abs(rot_y))
if isinstance(rot_z, (tuple, list)):
rot_z = max(np.abs(rot_z))
rot_x = min((((90 / 360) * 2.0) * np.pi), rot_x)
rot_y = min((((90 / 360) * 2.0) * np.pi), rot_y)
rot_z = min((((90 / 360) * 2.0) * np.pi), rot_z)
from batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d
coords = np.array(final_patch_size)
final_shape = np.copy(coords)
if (len(coords) == 3):
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, rot_x, 0, 0)), final_shape)), 0)
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, rot_y, 0)), final_shape)), 0)
final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, 0, rot_z)), final_shape)), 0)
elif (len(coords) == 2):
final_shape = np.max(np.vstack((np.abs(rotate_coords_2d(coords, rot_x)), final_shape)), 0)
final_shape /= min(scale_range)
return final_shape.astype(int) |
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = (model_keys & ckpt_keys)
unused_pretrained_keys = (ckpt_keys - model_keys)
missing_keys = (model_keys - ckpt_keys)
missing_keys = [x for x in missing_keys if (not x.endswith('num_batches_tracked'))]
if (len(missing_keys) > 0):
logger.info('[Warning] missing keys: {}'.format(missing_keys))
logger.info('missing keys:{}'.format(len(missing_keys)))
if (len(unused_pretrained_keys) > 0):
logger.info('[Warning] unused_pretrained_keys: {}'.format(unused_pretrained_keys))
logger.info('unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
logger.info('used keys:{}'.format(len(used_pretrained_keys)))
assert (len(used_pretrained_keys) > 0), 'load NONE from pretrained checkpoint'
return True |
def train(cfg, args, model, device, distributed):
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
arguments = {}
arguments['iteration'] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = (comm.get_rank() == 0)
checkpointer = DetectronCheckpointer(cfg, model, optimizer, scheduler, output_dir, save_to_disk)
if (cfg.MODEL.BACKBONE.CONV_BODY == 'DLA-34-DCN'):
ckpt = (cfg.MODEL.WEIGHT if (args.ckpt is None) else args.ckpt)
extra_checkpoint_data = checkpointer.load(ckpt, use_latest=False)
arguments.update(extra_checkpoint_data)
elif (args.ckpt is not None):
extra_checkpoint_data = checkpointer.load(args.ckpt)
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(cfg, is_train=True)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
do_train(cfg, distributed, model, data_loader, optimizer, scheduler, checkpointer, device, checkpoint_period, arguments) |
def _verify_static_class_methods(stub: nodes.FuncBase, runtime: Any, static_runtime: MaybeMissing[Any], object_path: list[str]) -> Iterator[str]:
if (stub.name in ('__new__', '__init_subclass__', '__class_getitem__')):
return
if inspect.isbuiltin(runtime):
probably_class_method = isinstance(getattr(runtime, '__self__', None), type)
if (probably_class_method and (not stub.is_class)):
(yield 'runtime is a classmethod but stub is not')
if ((not probably_class_method) and stub.is_class):
(yield 'stub is a classmethod but runtime is not')
return
if (static_runtime is MISSING):
return
if (isinstance(static_runtime, classmethod) and (not stub.is_class)):
(yield 'runtime is a classmethod but stub is not')
if ((not isinstance(static_runtime, classmethod)) and stub.is_class):
(yield 'stub is a classmethod but runtime is not')
if (isinstance(static_runtime, staticmethod) and (not stub.is_static)):
(yield 'runtime is a staticmethod but stub is not')
if ((not isinstance(static_runtime, staticmethod)) and stub.is_static):
(yield 'stub is a staticmethod but runtime is not') |
_cache(maxsize=16)
def _get_enum_field_values(enum_field):
values = []
for row in enum_field.rel_model.select():
key = getattr(row, enum_field.enum_key_field)
value = getattr(row, 'id')
values.append((key, value))
return Enum(enum_field.rel_model.__name__, values) |
_module
class SingleStageDetector(BaseDetector):
def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None):
super(SingleStageDetector, self).__init__()
self.backbone = builder.build_backbone(backbone)
if (neck is not None):
self.neck = builder.build_neck(neck)
self.bbox_head = builder.build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
self.time = 0
self.count = 0
def init_weights(self, pretrained=None):
super(SingleStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.bbox_head.init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None):
x = self.extract_feat(img)
outs = self.bbox_head(x)
loss_inputs = (outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg))
losses = self.bbox_head.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
def simple_test(self, img, img_meta, rescale=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_inputs = (outs + (img_meta, self.test_cfg, rescale))
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) for (det_bboxes, det_labels) in bbox_list]
return bbox_results[0]
def aug_test(self, imgs, img_metas, rescale=False):
raise NotImplementedError |
def rotation_matrix_axis(dim, theta):
if (dim == 0):
rm = np.array([[1.0, 0.0, 0.0], [0.0, math.cos(theta), (- math.sin(theta))], [0.0, math.sin(theta), math.cos(theta)]])
elif (dim == 1):
rm = np.array([[math.cos(theta), 0.0, math.sin(theta)], [0.0, 1.0, 0.0], [(- math.sin(theta)), 0.0, math.cos(theta)]])
elif (dim == 2):
rm = np.array([[math.cos(theta), (- math.sin(theta)), 0.0], [math.sin(theta), math.cos(theta), 0.0], [0.0, 0.0, 1.0]])
else:
raise
return rm |
def _process_css(default_css, extra_css):
with open(default_css, encoding='utf-8') as f:
css = f.read()
for path in extra_css:
css += '\n/'
css += '\n * CUSTOM CSS'
css += f'''
* {path}'''
css += '\n /\n\n'
with open(path, encoding='utf-8') as f:
css += f.read()
if _ansi_styles:
ansi_css = ['\n/', ' * ANSI2HTML STYLES', ' /\n']
ansi_css.extend([str(r) for r in _ansi_styles])
css += '\n'.join(ansi_css)
return css |
class TFAutoData():
def __init__(self):
self.features_list = []
self._train_data_path = ''
self.schema = ''
self.stats_train = ''
self.stats_eval = ''
self.anom_train = ''
self.anom_eval = ''
self.file_headers = []
self._len_train = 0
self._run = False
def collect_feature_details(self, schema):
features_list = []
features_dict = display_util.get_schema_dataframe(schema)[0].to_dict('index')
features_stats = MessageToDict(self.stats_train)
self._len_train = features_stats['datasets'][0]['numExamples']
for f_ in features_dict.keys():
features_dict[f_]['feature'] = re.sub("\\'", '', f_)
if (features_dict[f_]['Domain'] != '-'):
features_dict[f_]['categorical_values'] = [v for v in tfdv.get_domain(schema, features_dict[f_]['feature']).value]
features_dict[f_]['num_categorical_values'] = len(tfdv.get_domain(schema, features_dict[f_]['feature']).value)
if ((int(features_dict[f_]['num_categorical_values']) / int(self._len_train)) > 0.2):
features_dict[f_]['Type'] = 'STRING'
features_dict[f_]['categorical_values'] = ''
features_dict[f_]['num_categorical_values'] = 0
else:
features_dict[f_]['Type'] = 'CATEGORICAL'
for feat in features_stats['datasets'][0]['features']:
curr_feat = feat['path']['step'][0]
if ((curr_feat == features_dict[f_]['feature']) and (features_dict[f_]['Type'] in ['INT', 'FLOAT'])):
features_dict[f_]['min'] = feat['numStats'].get('min', 0.0)
features_dict[f_]['max'] = feat['numStats'].get('max', 0.0)
features_dict[f_]['mean'] = feat['numStats'].get('mean', 0.0)
features_dict[f_]['std_dev'] = feat['numStats'].get('stdDev', 1)
elif ((curr_feat == features_dict[f_]['feature']) and (features_dict[f_]['Type'] == 'CATEGORICAL')):
features_dict[f_]['categorical_values_count'] = {}
features_dict[f_]['categorical_values_count_total'] = 0
for categ in features_dict[f_]['categorical_values']:
categ_found = 0
for topvals in feat['stringStats']['topValues']:
if (categ == topvals['value']):
categ_found = 1
features_dict[f_]['categorical_values_count'][topvals.get('value', 'NA')] = topvals.get('frequency', 0)
features_dict[f_]['categorical_values_count_total'] += topvals.get('frequency', 0)
break
if (categ_found == 0):
features_dict[f_]['categorical_values_count'][categ] = 1
features_dict[f_]['categorical_values_count_total'] += 1
features_list.append(features_dict[f_])
self.features_list = features_list
return self.features_list
def get_columns_from_file_header(self, path, num_cols):
record_defaults = []
if os.path.isdir(path):
path = (path + '*')
elif os.path.isfile(path):
path = path
for _ in range(num_cols):
record_defaults.append('')
file_list = tf.io.gfile.glob(path)
dataset = tf.data.experimental.CsvDataset(file_list, header=False, record_defaults=record_defaults, use_quote_delim=False)
for example in dataset.take(1):
return [e.numpy().decode('utf-8') for e in example]
def run_initial(self, _train_data_path, _test_data_path, _tfx_root, _metadata_db_root, tfautils, viz=False):
input = proto.Input(splits=[example_gen_pb2.Input.Split(name='train', pattern=os.path.join(_train_data_path, '*')), example_gen_pb2.Input.Split(name='eval', pattern=os.path.join(_test_data_path, '*'))])
self.example_gen = CsvExampleGen(input_base='/', input_config=input)
self.statistics_gen = StatisticsGen(examples=self.example_gen.outputs['examples'])
self.infer_schema = SchemaGen(statistics=self.statistics_gen.outputs['statistics'], infer_feature_shape=False)
self.validate_stats = ExampleValidator(statistics=self.statistics_gen.outputs['statistics'], schema=self.infer_schema.outputs['schema'])
self.pipeline = pipeline.Pipeline(pipeline_name='data_pipeline', pipeline_root=_tfx_root, components=[self.example_gen, self.statistics_gen, self.infer_schema, self.validate_stats], metadata_connection_config=metadata.sqlite_metadata_connection_config(_metadata_db_root), enable_cache=True, beam_pipeline_args=[('--direct_num_workers=%d' % 0), '--direct_running_mode=multi_threading'])
print('Data: Pipeline execution started...')
LocalDagRunner().run(self.pipeline)
self._run = True
dir_stats = tfautils.get_artifacts_directories('StatisticsGen')
dir_anom = tfautils.get_artifacts_directories('ExampleValidator')
dir_schema = tfautils.get_artifacts_directories('SchemaGen')
stats_url_train = (str(dir_stats['statistics'][0].uri) + '/Split-train/FeatureStats.pb')
self.stats_train = tfdv.load_stats_binary(stats_url_train)
stats_url_eval = (str(dir_stats['statistics'][0].uri) + '/Split-eval/FeatureStats.pb')
self.stats_eval = tfdv.load_stats_binary(stats_url_eval)
anom_url_train = (str(dir_anom['anomalies'][0].uri) + '/Split-train/SchemaDiff.pb')
self.anom_train = tfautils.load_anomalies_binary(anom_url_train)
anom_url_eval = (str(dir_anom['anomalies'][0].uri) + '/Split-eval/SchemaDiff.pb')
self.anom_eval = tfautils.load_anomalies_binary(anom_url_eval)
schema_url = (str(dir_schema['schema'][0].uri) + '/schema.pbtxt')
self.schema = tfdv.load_schema_text(schema_url)
self.features_list = self.collect_feature_details(self.schema)
self.file_headers = self.get_columns_from_file_header(_train_data_path, len(self.features_list))
if (viz == True):
print('\n### Generating schema visuals')
tfdv.display_schema(self.schema)
print('\n### Generating Comparative Statistics Visuals...')
tfdv.visualize_statistics(lhs_statistics=self.stats_eval, rhs_statistics=self.stats_train, lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET')
print('\n### Generating Test Data Anomaly Visuals...')
tfdv.display_anomalies(self.anom_eval)
return self.pipeline |
def _setup_single_view_dispatcher_route(api_blueprint: Blueprint, options: Options, constructor: RootComponentConstructor) -> None:
sock = Sock(api_blueprint)
def model_stream(ws: WebSocket, path: str='') -> None:
def send(value: Any) -> None:
ws.send(json.dumps(value))
def recv() -> Any:
return json.loads(ws.receive())
_dispatch_in_thread(ws, path[len(options.url_prefix):], constructor(), send, recv)
sock.route(STREAM_PATH.name, endpoint='without_path')(model_stream)
sock.route(f'{STREAM_PATH.name}/<path:path>', endpoint='with_path')(model_stream) |
def determine_ctype_from_vconv(ctype, unit, velocity_convention=None):
unit = u.Unit(unit)
if (len(ctype) > 4):
in_physchar = ctype[5]
else:
lin_cunit = LINEAR_CUNIT_DICT[ctype]
in_physchar = PHYSICAL_TYPE_TO_CHAR[parse_phys_type(lin_cunit)]
if (parse_phys_type(unit) == 'speed'):
if ((velocity_convention is None) and (ctype[0] == 'V')):
return ctype
elif (velocity_convention is None):
raise ValueError('A velocity convention must be specified')
vcin = _parse_velocity_convention(ctype[:4])
vcout = _parse_velocity_convention(velocity_convention)
if (vcin == vcout):
return LINEAR_CTYPES[vcout]
else:
return '{type}-{s1}2{s2}'.format(type=LINEAR_CTYPES[vcout], s1=in_physchar, s2=LINEAR_CTYPE_CHARS[vcout])
else:
in_phystype = CTYPE_TO_PHYSICALTYPE[in_physchar]
if (in_phystype == parse_phys_type(unit)):
return ALL_CTYPES[in_phystype]
else:
out_physchar = PHYSICAL_TYPE_TO_CTYPE[parse_phys_type(unit)]
return '{type}-{s1}2{s2}'.format(type=ALL_CTYPES[parse_phys_type(unit)], s1=in_physchar, s2=out_physchar) |
def routedict_to_routelist_single(name, D, indent=1):
Locals = dict()
indents = dict(I0=('\t' * indent), I1=('\t' * (indent + 1)), I2=('\t' * (indent + 2)), I3=('\t' * (indent + 3)), I4=('\t' * (indent + 4)))
if False:
D0 = D
keyname = 'src'
valname = 'dest'
else:
keyname = 'dest'
valname = 'src'
D0 = dict()
for (src, destD) in D.items():
for (dest, val) in destD.items():
D0.setdefault(dest, {})[src] = val
D0 = sorted(D0.items(), key=(lambda i: eval(i[0], comedi_h.__dict__, Locals)))
lines = ['{I0}.device = "{name}",\n{I0}.routes = (struct ni_route_set[]){{'.format(name=name, **indents)]
for (D0_sig, D1_D) in D0:
D1 = [k for (k, v) in D1_D.items() if v]
D1.sort(key=(lambda i: eval(i, comedi_h.__dict__, Locals)))
lines.append('{I1}{{\n{I2}.{keyname} = {D0_sig},\n{I2}.{valname} = (int[]){{'.format(keyname=keyname, valname=valname, D0_sig=D0_sig, **indents))
for D1_sig in D1:
lines.append('{I3}{D1_sig},'.format(D1_sig=D1_sig, **indents))
lines.append('{I3}0, /* Termination */'.format(**indents))
lines.append('{I2}}}\n{I1}}},'.format(**indents))
lines.append('{I1}{{ /* Termination of list */\n{I2}.{keyname} = 0,\n{I1}}},'.format(keyname=keyname, **indents))
lines.append('{I0}}},'.format(**indents))
return '\n'.join(lines) |
def _get_ordered_conv_linears(node_layer_map: Dict, layer_out_node_map: Dict) -> List[Union[(ConvType, LinearType)]]:
list_of_ordered_layers = _get_ordered_layers(node_layer_map, layer_out_node_map)
ordered_conv_linears = []
for layer in list_of_ordered_layers:
if isinstance(layer, _supported_layers):
ordered_conv_linears.append(layer)
return ordered_conv_linears |
_tf
class TFViTBertModelTest(TFVisionTextDualEncoderMixin, unittest.TestCase):
def get_pretrained_model_and_inputs(self):
model = TFVisionTextDualEncoderModel.from_vision_text_pretrained('hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-random-bert')
batch_size = 13
pixel_values = floats_tensor([batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size])
input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
attention_mask = random_attention_mask([batch_size, 4])
inputs = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return (model, inputs)
def get_vision_text_model(self, vision_config, text_config):
vision_model = TFViTModel(vision_config, name='vision_model')
text_model = TFBertModel(text_config, name='text_model')
return (vision_model, text_model)
def prepare_config_and_inputs(self):
vit_model_tester = TFViTModelTester(self)
bert_model_tester = TFBertModelTester(self)
vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs()
text_config_and_inputs = bert_model_tester.prepare_config_and_inputs()
(vision_config, pixel_values, _) = vision_config_and_inputs
(text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = text_config_and_inputs
return {'text_config': text_config, 'vision_config': vision_config, 'pixel_values': pixel_values, 'attention_mask': input_mask, 'input_ids': input_ids, 'text_token_type_ids': token_type_ids, 'text_sequence_labels': sequence_labels, 'text_token_labels': token_labels, 'text_choice_labels': choice_labels} |
def test_init_pq_lower_limit_option(tmpdir, mocker):
mocker.patch.object(SentinelOne, '_authenticate')
cred_file_path = tmpdir.mkdir('test_dir').join('test_creds.ini')
cred_file_path.write('asdfasdfasdf')
s1_product = SentinelOne(profile='default', creds_file=cred_file_path, account_id=None, site_id=None, account_name=None, pq=True, limit=(- 1))
assert (s1_product._limit == 1000) |
def _print_collected_tasks(dictionary: dict[(Path, list[PTaskWithPath])], show_nodes: bool, editor_url_scheme: str, common_ancestor: Path) -> None:
console.print()
tree = Tree('Collected tasks:', highlight=True)
for (module, tasks) in dictionary.items():
reduced_module = relative_to(module, common_ancestor)
url_style = create_url_style_for_path(module, editor_url_scheme)
module_branch = tree.add(Text.assemble(PYTHON_ICON, '<Module ', Text(str(reduced_module), style=url_style), '>'))
for task in tasks:
reduced_task_name = format_task_name(task, editor_url_scheme=editor_url_scheme)
task_branch = module_branch.add(Text.assemble(TASK_ICON, '<Function ', reduced_task_name, '>'))
if show_nodes:
deps = list(tree_leaves(task.depends_on))
for node in sorted(deps, key=(lambda x: (x.path.as_posix() if isinstance(x, PPathNode) else x.name))):
text = format_node_name(node, (common_ancestor,))
task_branch.add(Text.assemble(FILE_ICON, '<Dependency ', text, '>'))
products = list(tree_leaves(task.produces))
for node in sorted(products, key=(lambda x: (x.path.as_posix() if isinstance(x, PPathNode) else x.name))):
text = format_node_name(node, (common_ancestor,))
task_branch.add(Text.assemble(FILE_ICON, '<Product ', text, '>'))
console.print(tree) |
class TestSimpleUpdateProcessor():
def test_slot_behaviour(self):
inst = SimpleUpdateProcessor(1)
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
.parametrize('concurrent_updates', [0, (- 1)])
def test_init(self, concurrent_updates):
processor = SimpleUpdateProcessor(3)
assert (processor.max_concurrent_updates == 3)
with pytest.raises(ValueError, match='must be a positive integer'):
SimpleUpdateProcessor(concurrent_updates)
async def test_process_update(self, mock_processor):
update = Update(1)
async def coroutine():
pass
(await mock_processor.process_update(update, coroutine()))
assert mock_processor.test_flag
async def test_do_process_update(self):
processor = SimpleUpdateProcessor(1)
update = Update(1)
test_flag = False
async def coroutine():
nonlocal test_flag
test_flag = True
(await processor.do_process_update(update, coroutine()))
assert test_flag
async def test_max_concurrent_updates_enforcement(self, mock_processor):
count = (2 * mock_processor.max_concurrent_updates)
events = {i: asyncio.Event() for i in range(count)}
queue = asyncio.Queue()
for event in events.values():
(await queue.put(event))
async def callback():
(await asyncio.sleep(0.5))
(await queue.get()).set()
tasks = [asyncio.create_task(mock_processor.process_update(update=_, coroutine=callback())) for _ in range(count)]
for i in range(count):
assert (not events[i].is_set())
(await asyncio.sleep(0.75))
for i in range(mock_processor.max_concurrent_updates):
assert events[i].is_set()
for i in range(mock_processor.max_concurrent_updates, count):
assert (not events[i].is_set())
(await asyncio.sleep(0.5))
for i in range(count):
assert events[i].is_set()
(await asyncio.gather(*tasks))
async def test_context_manager(self, monkeypatch, mock_processor):
self.test_flag = set()
async def after_initialize(*args, **kwargs):
self.test_flag.add('initialize')
async def after_shutdown(*args, **kwargs):
self.test_flag.add('stop')
monkeypatch.setattr(SimpleUpdateProcessor, 'initialize', call_after(SimpleUpdateProcessor.initialize, after_initialize))
monkeypatch.setattr(SimpleUpdateProcessor, 'shutdown', call_after(SimpleUpdateProcessor.shutdown, after_shutdown))
async with mock_processor:
pass
assert (self.test_flag == {'initialize', 'stop'})
async def test_context_manager_exception_on_init(self, monkeypatch, mock_processor):
async def initialize(*args, **kwargs):
raise RuntimeError('initialize')
async def shutdown(*args, **kwargs):
self.test_flag = 'shutdown'
monkeypatch.setattr(SimpleUpdateProcessor, 'initialize', initialize)
monkeypatch.setattr(SimpleUpdateProcessor, 'shutdown', shutdown)
with pytest.raises(RuntimeError, match='initialize'):
async with mock_processor:
pass
assert (self.test_flag == 'shutdown') |
def generate_random_object_cluster(n_objects, object_generator, max_cluster_trans=1.0, max_cluster_rot=(np.pi / 8)):
ref_obj = object_generator()
cluster_objects = []
for i in range(n_objects):
r = random_rotation_translation_rotor(maximum_translation=max_cluster_trans, maximum_angle=max_cluster_rot)
new_obj = apply_rotor(ref_obj, r)
cluster_objects.append(new_obj)
return cluster_objects |
()
def daily_update_geos(day=None, geo=True, region=True):
(start_date, end_date) = get_day(day)
if ((not geo) and (not region)):
log.error('geo or region required, please pass one as True')
return
if geo:
log.info('Updating GeoImpressions for %s-%s', start_date, end_date)
if region:
log.info('Updating RegionImpressions for %s-%s', start_date, end_date)
RegionImpression.objects.using('default').filter(date__gte=start_date, date__lt=end_date).delete()
topic_mapping = defaultdict((lambda : {'decisions': 0, 'offers': 0, 'views': 0, 'clicks': 0}))
queryset = Offer.objects.using(settings.REPLICA_SLUG).filter((Q(paid_eligible=True) | Q(publisher__allow_paid_campaigns=False)), date__gte=start_date, date__lt=end_date)
for values in queryset.values('advertisement', 'country', 'publisher').annotate(total_decisions=Count('country'), total_offers=Count('country', filter=Q(advertisement__isnull=False)), total_views=Count('country', filter=Q(viewed=True)), total_clicks=Count('country', filter=Q(clicked=True))).filter(total_decisions__gt=0).order_by('-total_decisions').iterator():
country = values['country']
if geo:
(impression, _) = GeoImpression.objects.using('default').get_or_create(publisher_id=values['publisher'], advertisement_id=values['advertisement'], country=country, date=start_date)
GeoImpression.objects.using('default').filter(pk=impression.pk).update(decisions=values['total_decisions'], offers=values['total_offers'], views=values['total_views'], clicks=values['total_clicks'])
if region:
_region = Region.get_region_from_country_code(country)
publisher = values['publisher']
advertisement = values['advertisement']
topic_mapping[f'{advertisement}:{publisher}:{_region}']['decisions'] += values['total_decisions']
topic_mapping[f'{advertisement}:{publisher}:{_region}']['offers'] += values['total_offers']
topic_mapping[f'{advertisement}:{publisher}:{_region}']['views'] += values['total_views']
topic_mapping[f'{advertisement}:{publisher}:{_region}']['clicks'] += values['total_clicks']
if region:
for (data, value) in topic_mapping.items():
(ad, publisher, _region) = data.split(':')
if (ad == 'None'):
ad = None
(impression, _) = RegionImpression.objects.using('default').get_or_create(publisher_id=publisher, advertisement_id=ad, region=_region, date=start_date)
RegionImpression.objects.using('default').filter(pk=impression.pk).update(decisions=(F('decisions') + value['decisions']), offers=(F('offers') + value['offers']), views=(F('views') + value['views']), clicks=(F('clicks') + value['clicks'])) |
class RegStage(nn.Module):
def __init__(self, depth, in_chs, out_chs, stride, dilation, drop_path_rates=None, block_fn=Bottleneck, **block_kwargs):
super(RegStage, self).__init__()
self.grad_checkpointing = False
first_dilation = (1 if (dilation in (1, 2)) else 2)
for i in range(depth):
block_stride = (stride if (i == 0) else 1)
block_in_chs = (in_chs if (i == 0) else out_chs)
block_dilation = (first_dilation, dilation)
dpr = (drop_path_rates[i] if (drop_path_rates is not None) else 0.0)
name = 'b{}'.format((i + 1))
self.add_module(name, block_fn(block_in_chs, out_chs, stride=block_stride, dilation=block_dilation, drop_path_rate=dpr, **block_kwargs))
first_dilation = dilation
def forward(self, x):
if (self.grad_checkpointing and (not torch.jit.is_scripting())):
x = checkpoint_seq(self.children(), x)
else:
for block in self.children():
x = block(x)
return x |
def stop(name, location='\\'):
if (name not in list_tasks(location)):
return '{0} not found in {1}'.format(name, location)
pythoncom.CoInitialize()
task_service = win32com.client.Dispatch('Schedule.Service')
task_service.Connect()
task_folder = task_service.GetFolder(location)
task = task_folder.GetTask(name)
try:
task.Stop(0)
return True
except pythoncom.com_error as error:
return False |
def is_valid_signature(data: bytes, signature: Signature, sender_address: Address) -> SuccessOrError:
try:
signer_address = recover(data=data, signature=signature)
except Exception:
return SuccessOrError('Signature invalid, could not be recovered.')
is_correct_sender = (sender_address == signer_address)
if is_correct_sender:
return SuccessOrError()
return SuccessOrError('Signature was valid but the expected address does not match.') |
class JaggedSparse(Callable):
def __init__(self, *, weighted: bool, combine_option: CombineOption=CombineOption.JAGGED):
self._weighted = weighted
self._combine_option = combine_option
def __call__(self, df):
assert (df.device == 'cpu')
raise NotImplementedError
def weighted(self) -> bool:
return self._weighted
def combine_option(self) -> CombineOption:
return self._combine_option |
class TestEvolution(QiskitAquaTestCase):
def test_exp_i(self):
op = Z.exp_i()
gate = op.to_circuit().data[0][0]
self.assertIsInstance(gate, qiskit.circuit.library.RZGate)
self.assertEqual(gate.params[0], 2)
def test_trotter_with_identity(self):
op = (((2.0 * I) ^ I) + (Z ^ Y))
exact_matrix = scipy.linalg.expm(((- 1j) * op.to_matrix()))
evo = PauliTrotterEvolution(trotter_mode='suzuki', reps=2)
with self.subTest('all PauliOp terms'):
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
with self.subTest('MatrixOp identity term'):
op = (((2.0 * I) ^ I).to_matrix_op() + (Z ^ Y))
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
with self.subTest('CircuitOp identity term'):
op = (((2.0 * I) ^ I).to_circuit_op() + (Z ^ Y))
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
def test_pauli_evolution(self):
op = (((((((- 1.) * I) ^ I) + ((0. * I) ^ Z)) + ((0. * X) ^ X)) + (((- 0.) * Z) ^ I)) + (((- 0.) * Z) ^ Z))
evolution = EvolutionFactory.build(operator=op)
wf = (((((np.pi / 2) * op).exp_i() CX) (H ^ I)) Zero)
mean = evolution.convert(wf)
self.assertIsNotNone(mean)
def test_parameterized_evolution(self):
thetas = ParameterVector('', length=7)
op = (((((((thetas[0] * I) ^ I) + ((thetas[1] * I) ^ Z)) + ((thetas[2] * X) ^ X)) + ((thetas[3] * Z) ^ I)) + ((thetas[4] * Y) ^ Z)) + ((thetas[5] * Z) ^ Z))
op = (op * thetas[6])
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
wf = (((op.exp_i() CX) (H ^ I)) Zero)
mean = evolution.convert(wf)
circuit = mean.to_circuit()
for p in thetas:
self.assertIn(p, circuit.parameters)
self.assertNotIn(thetas[0], circuit._parameter_table.get_keys())
def test_bind_parameters(self):
thetas = ParameterVector('', length=6)
op = ((((((thetas[1] * I) ^ Z) + ((thetas[2] * X) ^ X)) + ((thetas[3] * Z) ^ I)) + ((thetas[4] * Y) ^ Z)) + ((thetas[5] * Z) ^ Z))
op = (thetas[0] * op)
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
wf = (((op.exp_i() CX) (H ^ I)) Zero)
wf = wf.assign_parameters({thetas: np.arange(10, 16)})
mean = evolution.convert(wf)
circuit_params = mean.to_circuit().parameters
for p in thetas[1:]:
self.assertNotIn(p, circuit_params)
def test_bind_circuit_parameters(self):
thetas = ParameterVector('', length=6)
op = ((((((thetas[1] * I) ^ Z) + ((thetas[2] * X) ^ X)) + ((thetas[3] * Z) ^ I)) + ((thetas[4] * Y) ^ Z)) + ((thetas[5] * Z) ^ Z))
op = (thetas[0] * op)
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
wf = (((op.exp_i() CX) (H ^ I)) Zero)
evo = evolution.convert(wf)
mean = evo.assign_parameters({thetas: np.arange(10, 16)})
for p in thetas[1:]:
self.assertNotIn(p, mean.to_circuit().parameters)
for p in thetas:
self.assertIn(p, evo.to_circuit().parameters)
def test_bind_parameter_list(self):
thetas = ParameterVector('', length=6)
op = ((((((thetas[1] * I) ^ Z) + ((thetas[2] * X) ^ X)) + ((thetas[3] * Z) ^ I)) + ((thetas[4] * Y) ^ Z)) + ((thetas[5] * Z) ^ Z))
op = (thetas[0] * op)
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
wf = (((op.exp_i() CX) (H ^ I)) Zero)
evo = evolution.convert(wf)
param_list = np.transpose([np.arange(10, 16), np.arange(2, 8), np.arange(30, 36)]).tolist()
means = evo.assign_parameters({thetas: param_list})
self.assertIsInstance(means, ListOp)
for p in thetas[1:]:
for circop in means.oplist:
self.assertNotIn(p, circop.to_circuit().parameters)
for p in thetas:
self.assertIn(p, evo.to_circuit().parameters)
def test_qdrift(self):
op = (((((2 * Z) ^ Z) + ((3 * X) ^ X)) - ((4 * Y) ^ Y)) + ((0.5 * Z) ^ I))
trotterization = QDrift().convert(op)
self.assertGreater(len(trotterization.oplist), 150)
last_coeff = None
for op in trotterization.oplist:
self.assertIsInstance(op, (EvolvedOp, CircuitOp))
if isinstance(op, EvolvedOp):
if last_coeff:
self.assertEqual(op.primitive.coeff, last_coeff)
else:
last_coeff = op.primitive.coeff
def test_matrix_op_evolution(self):
op = (((((((- 1.) * I) ^ I) + ((0. * I) ^ Z)) + ((0. * X) ^ X)) + (((- 0.) * Z) ^ I)) + (((((- 0.) * Z) ^ Z) * np.pi) / 2))
exp_mat = op.to_matrix_op().exp_i().to_matrix()
ref_mat = scipy.linalg.expm(((- 1j) * op.to_matrix()))
np.testing.assert_array_almost_equal(ref_mat, exp_mat)
def test_log_i(self):
op = (((((((- 1.) * I) ^ I) + ((0. * I) ^ Z)) + ((0. * X) ^ X)) + (((- 0.) * Z) ^ I)) + (((((- 0.) * Z) ^ Z) * np.pi) / 2))
log_exp_op = op.to_matrix_op().exp_i().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
log_exp_op = op.to_matrix_op().exp_i().to_matrix_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
log_exp_op = op.to_matrix_op().exp_i().to_pauli_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
log_exp_op = op.exp_i().to_pauli_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
op = ListOp([((0. * I) ^ Z), ((0. * X) ^ X), (((- 0.) * Z) ^ I), (((((- 0.) * Z) ^ Z) * np.pi) / 2)])
log_exp_op = op.to_matrix_op().exp_i().to_matrix_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
def test_matrix_op_parameterized_evolution(self):
theta = Parameter('')
op = (((((((- 1.) * I) ^ I) + ((0. * I) ^ Z)) + ((0. * X) ^ X)) + (((- 0.) * Z) ^ I)) + (((- 0.) * Z) ^ Z))
op = (op * theta)
wf = (((op.to_matrix_op().exp_i() CX) (H ^ I)) Zero)
self.assertIn(theta, wf.to_circuit().parameters)
op = op.assign_parameters({theta: 1})
exp_mat = op.to_matrix_op().exp_i().to_matrix()
ref_mat = scipy.linalg.expm(((- 1j) * op.to_matrix()))
np.testing.assert_array_almost_equal(ref_mat, exp_mat)
wf = wf.assign_parameters({theta: 3})
self.assertNotIn(theta, wf.to_circuit().parameters)
def test_mixed_evolution(self):
thetas = ParameterVector('', length=6)
op = (((((thetas[1] * (I ^ Z).to_matrix_op()) + (thetas[2] * (X ^ X)).to_matrix_op()) + ((thetas[3] * Z) ^ I)) + ((thetas[4] * Y) ^ Z).to_circuit_op()) + (thetas[5] * (Z ^ I).to_circuit_op()))
op = (thetas[0] * op)
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
wf = (((op.exp_i() CX) (H ^ I)) Zero)
wf = wf.assign_parameters({thetas: np.arange(10, 16)})
mean = evolution.convert(wf)
circuit_params = mean.to_circuit().parameters
for p in thetas[1:]:
self.assertNotIn(p, circuit_params)
def test_reps(self):
reps = 7
trotter = Trotter(reps=reps)
self.assertEqual(trotter.reps, reps)
order = 5
suzuki = Suzuki(reps=reps, order=order)
self.assertEqual(suzuki.reps, reps)
self.assertEqual(suzuki.order, order)
qdrift = QDrift(reps=reps)
self.assertEqual(qdrift.reps, reps) |
class TruncatedNormal(BoundedContinuous):
rv_op = truncated_normal
bound_args_indices = (5, 6)
def dist(cls, mu: Optional[DIST_PARAMETER_TYPES]=0, sigma: Optional[DIST_PARAMETER_TYPES]=None, *, tau: Optional[DIST_PARAMETER_TYPES]=None, lower: Optional[DIST_PARAMETER_TYPES]=None, upper: Optional[DIST_PARAMETER_TYPES]=None, **kwargs) -> RandomVariable:
(tau, sigma) = get_tau_sigma(tau=tau, sigma=sigma)
sigma = pt.as_tensor_variable(sigma)
mu = pt.as_tensor_variable(floatX(mu))
lower = (pt.as_tensor_variable(floatX(lower)) if (lower is not None) else pt.constant((- np.inf)))
upper = (pt.as_tensor_variable(floatX(upper)) if (upper is not None) else pt.constant(np.inf))
return super().dist([mu, sigma, lower, upper], **kwargs)
def moment(rv, size, mu, sigma, lower, upper):
(mu, _, lower, upper) = pt.broadcast_arrays(mu, sigma, lower, upper)
moment = pt.switch(pt.eq(lower, (- np.inf)), pt.switch(pt.eq(upper, np.inf), mu, (upper - 1)), pt.switch(pt.eq(upper, np.inf), (lower + 1), ((lower + upper) / 2)))
if (not rv_size_is_none(size)):
moment = pt.full(size, moment)
return moment
def logp(value, mu, sigma, lower, upper):
is_lower_bounded = (not (isinstance(lower, TensorConstant) and np.all(np.isneginf(lower.value))))
is_upper_bounded = (not (isinstance(upper, TensorConstant) and np.all(np.isinf(upper.value))))
if (is_lower_bounded and is_upper_bounded):
lcdf_a = normal_lcdf(mu, sigma, lower)
lcdf_b = normal_lcdf(mu, sigma, upper)
lsf_a = normal_lccdf(mu, sigma, lower)
lsf_b = normal_lccdf(mu, sigma, upper)
norm = pt.switch((lower > 0), logdiffexp(lsf_a, lsf_b), logdiffexp(lcdf_b, lcdf_a))
elif is_lower_bounded:
norm = normal_lccdf(mu, sigma, lower)
elif is_upper_bounded:
norm = normal_lcdf(mu, sigma, upper)
else:
norm = 0.0
logp = (_logprob_helper(Normal.dist(mu, sigma), value) - norm)
if is_lower_bounded:
logp = pt.switch((value < lower), (- np.inf), logp)
if is_upper_bounded:
logp = pt.switch((value > upper), (- np.inf), logp)
if (is_lower_bounded and is_upper_bounded):
logp = check_parameters(logp, pt.le(lower, upper), msg='lower_bound <= upper_bound')
return logp
def logcdf(value, mu, sigma, lower, upper):
logcdf = (log_diff_normal_cdf(mu, sigma, value, lower) - log_diff_normal_cdf(mu, sigma, upper, lower))
is_lower_bounded = (not (isinstance(lower, TensorConstant) and np.all(np.isneginf(lower.value))))
is_upper_bounded = (not (isinstance(upper, TensorConstant) and np.all(np.isinf(upper.value))))
if is_lower_bounded:
logcdf = pt.switch((value < lower), (- np.inf), logcdf)
if is_upper_bounded:
logcdf = pt.switch((value <= upper), logcdf, 0.0)
if (is_lower_bounded and is_upper_bounded):
logcdf = check_parameters(logcdf, pt.le(lower, upper), msg='lower_bound <= upper_bound')
return logcdf |
class _RepoIncrementITRB(_RepoPatchITRB):
def __init__(self, basis_root_rp, inc_root_rp, rorp_cache, previous_time):
self.inc_root_rp = inc_root_rp
self.previous_time = previous_time
_RepoPatchITRB.__init__(self, basis_root_rp, rorp_cache)
def fast_process_file(self, index, diff_rorp):
(mirror_rp, inc_prefix) = map_longnames.get_mirror_inc_rps(self.CCPP.get_rorps(index), self.basis_root_rp, self.inc_root_rp)
tf = mirror_rp.get_temp_rpath(sibling=True)
result = self._patch_to_temp(mirror_rp, diff_rorp, tf)
if (result == self.UNCHANGED):
log.Log('File content unchanged, only copying attributes', log.INFO)
rpath.copy_attribs(diff_rorp, mirror_rp)
self.CCPP.flag_success(index)
elif result:
inc = robust.check_common_error(self.error_handler, increment.Increment, (tf, mirror_rp, inc_prefix, self.previous_time))
if ((inc is not None) and (not isinstance(inc, int))):
self.CCPP.set_inc(index, inc)
if inc.isreg():
inc.fsync_with_dir()
if tf.lstat():
if (robust.check_common_error(self.error_handler, rpath.rename, (tf, mirror_rp)) is None):
self.CCPP.flag_success(index)
elif mirror_rp.lstat():
mirror_rp.delete()
self.CCPP.flag_deleted(index)
tf.setdata()
if tf.lstat():
tf.delete()
def start_process_directory(self, index, diff_rorp):
(self.base_rp, inc_prefix) = map_longnames.get_mirror_inc_rps(self.CCPP.get_rorps(index), self.basis_root_rp, self.inc_root_rp)
self.base_rp.setdata()
assert (diff_rorp.isdir() or self.base_rp.isdir()), "Either diff '{ipath!r}' or base '{bpath!r}' must be a directory".format(ipath=diff_rorp, bpath=self.base_rp)
if diff_rorp.isdir():
inc = increment.Increment(diff_rorp, self.base_rp, inc_prefix, self.previous_time)
if (inc and inc.isreg()):
inc.fsync_with_dir()
self.base_rp.setdata()
self._prepare_dir(diff_rorp, self.base_rp)
elif self._set_dir_replacement(diff_rorp, self.base_rp):
inc = increment.Increment(self.dir_replacement, self.base_rp, inc_prefix, self.previous_time)
if inc:
self.CCPP.set_inc(index, inc)
self.CCPP.flag_success(index) |
def get_contrastive_aug(dataset, aug_type='simclr'):
if ((dataset == 'miniImageNet') or (dataset == 'tieredImageNet') or (dataset == 'cross')):
(mean, std) = MEAN_STD['imagenet']
crop_size = 84
elif ((dataset == 'CIFAR-FS') or (dataset == 'FC100')):
(mean, std) = MEAN_STD['cifar']
crop_size = 32
else:
raise NotImplementedError('dataset not found: {}'.format(dataset))
normalize = transforms.Normalize(mean=mean, std=std)
if (aug_type == 'standard'):
train_transform = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.RandomCrop(crop_size, padding=4), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
elif (aug_type == 'simclr'):
train_transform = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.RandomResizedCrop(size=crop_size, scale=(0.2, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.ToTensor(), normalize])
elif (aug_type == 'stacked_randaug'):
kernel_size = (crop_size // 10)
ra_params = dict(translate_const=int((crop_size * 0.45)), img_mean=tuple([min(255, round((255 * x))) for x in mean]))
train_transform = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.RandomResizedCrop(crop_size, scale=(0.2, 1.0)), transforms.RandomHorizontalFlip(), transforms.RandomApply([transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)], p=0.8), transforms.RandomApply([GaussianBlur(kernel_size)], p=0.5), rand_augment_transform('rand-n{}-m{}-mstd0.5'.format(2, 10), ra_params), transforms.RandomGrayscale(p=0.2), transforms.ToTensor(), normalize])
elif (aug_type == 'autoaugment'):
auto_augment.IMAGE_SIZE = crop_size
train_transform = transforms.Compose([(lambda x: Image.fromarray(x)), transforms.RandomResizedCrop(crop_size, scale=(0.2, 1.0)), transforms.RandomHorizontalFlip(), auto_augment.AutoAugment(crop_size), transforms.ToTensor(), normalize])
else:
raise NotImplementedError('transform not found: {}'.format(aug_type))
return train_transform |
def undo_rule(workflow, ruleid):
(r2s, s2r, r2subscopes) = utils.rule_steps_indices(workflow)
if (not (ruleid in [r.identifier for r in workflow.applied_rules])):
ruleobj = workflow.view().getRule(identifier=ruleid)
log.debug('rule %s/%s not in list of applied rules. possibly already undone during recursion.', ruleobj.offset, ruleobj.rule.name)
return
downstream_nodes_rules = list(set([s2r[s] for s in collective_downstream(workflow, r2s[ruleid])]))
r = workflow.view().getRule(identifier=ruleid)
log.debug('undoing %s/%s. downstream_nodes_rules: %s', r.offset, r.rule.name, len(downstream_nodes_rules))
assert (ruleid not in downstream_nodes_rules)
if (not downstream_nodes_rules):
idx = workflow.applied_rules.index(r)
stepids = r2s[ruleid]
steps = [workflow.dag.getNode(nid) for nid in stepids]
reset_steps(workflow, stepids)
for n in steps:
log.debug('remove DAG node %s/%s:%s', n.task.metadata['wflow_offset'], n.task.metadata['wflow_stage'], n.task.metadata['wflow_stage_node_idx'])
workflow.dag.removeNode(n)
for subscope in r2subscopes[ruleid]:
subrules = utils.stages_in_scope(workflow, subscope)
log.debug('remove subscope %s with %s subrules', subscope, len(subrules))
for (subruleidx, subrule) in enumerate(subrules):
subruleobj = workflow.view().getRule(identifier=subrule)
log.debug('undo sub DAG rule %s/%s', subruleobj.offset, subruleobj.rule.name)
undo_rule(workflow, subrule)
log.debug('removing %s subrules', len(subrules))
for (subruleidx, subrule) in enumerate(subrules):
subruleobj = workflow.view().getRule(identifier=subrule)
log.debug('remove sub DAG rule %s/%s', subruleobj.offset, subruleobj.rule.name)
remove_rule(workflow, subrule)
r = workflow.applied_rules.pop(idx)
workflow.view(r.offset).steps.pop(r.rule.name)
for s in stepids:
workflow.view(r.offset).bookkeeper['_meta']['steps'].remove(s)
workflow.view(r.offset).bookkeeper['_meta']['stages'].remove(ruleid)
log.debug('re-appened {}/{}'.format(r.offset, r.rule.name))
newid = workflow.view(r.offset).addRule(r.rule, identifier=r.identifier)
assert (newid == r.identifier)
log.debug('undo any rules that would not be applicable now')
for rule in workflow.applied_rules:
if (not rule.applicable(workflow)):
ruleobj = workflow.view().getRule(identifier=rule.identifier)
log.debug('rule would not be appilcable in current state so undo >> %s/%s', ruleobj.offset, ruleobj.rule.name)
undo_rule(workflow, rule.identifier)
return
log.debug('to undo a rule, we need to undo rules responsible for any of the downstream nodes of the rules of this stage')
while True:
if (not downstream_nodes_rules):
break
for r in downstream_nodes_rules:
log.debug('undoing a downstream rule')
undo_rule(workflow, r)
log.debug('undone a downstream rule')
(r2s, s2r, _) = utils.rule_steps_indices(workflow)
downstream_nodes_rules = list(set([s2r[s] for s in collective_downstream(workflow, r2s[ruleid])]))
log.debug('re-asses if there are still any downstream rules: {}'.format(len(downstream_nodes_rules)))
log.debug('undo rule (now without any downstream rules)')
undo_rule(workflow, ruleid) |
def check_molecule_data_structure(fname, verbose=True):
with open(fname) as f:
try:
db = json.load(f)
except json.JSONDecodeError as err:
raise json.JSONDecodeError("Error reading '{0}' (line {2} col {3}): \n{1}".format(fname, err.msg, err.lineno, err.colno), err.doc, err.pos) from err
for (molecule, db_molec) in db.items():
isotope_names = db_molec['isotopes_names']
isotopes = db_molec['isotopes']
if (len(isotopes) != len(isotope_names)):
raise ValueError(('In molecule {0}: isotope names '.format(molecule) + '({0}) dont match the number of isotopes ({1})'.format(isotope_names, list(isotopes.keys()))))
for (isotope, db_iso) in db_molec['isotopes'].items():
elec_states_names = db_iso['electronic_levels_names']
elec_states = db_iso['electronic_level']
if (len(elec_states_names) != len(elec_states)):
raise ValueError(('In molecule {0}, isotope {1}: electronic '.format(molecule, isotope) + 'levels names ({0}) dont match the number of levels ({1})'.format(elec_states_names, list(elec_states.keys()))))
for (state, db_state) in elec_states.items():
if ((elec_states_names.index(db_state['name']) + 1) != db_state['index']):
raise ValueError(('In molecule {0}, isotope {1}: index of electronic '.format(molecule, isotope) + 'state {0} ({1}): {2} does not match the list of states: {3}. '.format(state, db_state['name'], db_state['index'], elec_states_names)))
if verbose:
print('Structure of {0} looks correct'.format(fname)) |
class RolloutBaseline(Baseline):
def __init__(self, model, problem, opts, epoch=0):
super(Baseline, self).__init__()
self.problem = problem
self.opts = opts
self._update_model(model, epoch)
def _update_model(self, model, epoch, dataset=None):
self.model = copy.deepcopy(model)
if (dataset is not None):
if (len(dataset) != self.opts.val_size):
print('Warning: not using saved baseline dataset since val_size does not match')
dataset = None
elif ((dataset[0] if (self.problem.NAME == 'tsp') else dataset[0]['loc']).size(0) != self.opts.graph_size):
print('Warning: not using saved baseline dataset since graph_size does not match')
dataset = None
if (dataset is None):
self.dataset = self.problem.make_dataset(size=self.opts.graph_size, num_samples=self.opts.val_size, distribution=self.opts.data_distribution)
else:
self.dataset = dataset
print('Evaluating baseline model on evaluation dataset')
self.bl_vals = rollout(self.model, self.dataset, self.opts).cpu().numpy()
self.mean = self.bl_vals.mean()
self.epoch = epoch
def wrap_dataset(self, dataset):
print('Evaluating baseline on dataset...')
return BaselineDataset(dataset, rollout(self.model, dataset, self.opts).view((- 1), 1))
def unwrap_batch(self, batch):
return (batch['data'], batch['baseline'].view((- 1)))
def eval(self, x, c):
with torch.no_grad():
(v, _) = self.model(x)
return (v, 0)
def epoch_callback(self, model, epoch):
print('Evaluating candidate model on evaluation dataset')
candidate_vals = rollout(model, self.dataset, self.opts).cpu().numpy()
candidate_mean = candidate_vals.mean()
print('Epoch {} candidate mean {}, baseline epoch {} mean {}, difference {}'.format(epoch, candidate_mean, self.epoch, self.mean, (candidate_mean - self.mean)))
if ((candidate_mean - self.mean) < 0):
(t, p) = ttest_rel(candidate_vals, self.bl_vals)
p_val = (p / 2)
assert (t < 0), 'T-statistic should be negative'
print('p-value: {}'.format(p_val))
if (p_val < self.opts.bl_alpha):
print('Update baseline')
self._update_model(model, epoch)
def state_dict(self):
return {'model': self.model, 'dataset': self.dataset, 'epoch': self.epoch}
def load_state_dict(self, state_dict):
load_model = copy.deepcopy(self.model)
get_inner_model(load_model).load_state_dict(get_inner_model(state_dict['model']).state_dict())
self._update_model(load_model, state_dict['epoch'], state_dict['dataset']) |
class PriceViewMinimal(StatsView):
name = 'priceViewMinimal'
def __init__(self, parent):
StatsView.__init__(self)
self.parent = parent
self.settings = MarketPriceSettings.getInstance()
def getHeaderText(self, fit):
return 'Price'
def populatePanel(self, contentPanel, headerPanel):
contentSizer = contentPanel.GetSizer()
self.panel = contentPanel
self.headerPanel = headerPanel
headerContentSizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer = headerPanel.GetSizer()
hsizer.Add(headerContentSizer, 0, 0, 0)
self.labelEMStatus = wx.StaticText(headerPanel, wx.ID_ANY, '')
headerContentSizer.Add(self.labelEMStatus)
headerPanel.GetParent().AddToggleItem(self.labelEMStatus)
gridPrice = wx.GridSizer(1, 3, 0, 0)
contentSizer.Add(gridPrice, 0, (wx.EXPAND | wx.ALL), 0)
for (_type, label) in (('ship', _t('Ship')), ('fittings', _t('Fittings')), ('total', _t('Total'))):
image = (('%sPrice_big' % _type) if (_type != 'ship') else 'ship_big')
box = wx.BoxSizer(wx.HORIZONTAL)
gridPrice.Add(box, 0, wx.ALIGN_TOP)
box.Add(BitmapLoader.getStaticBitmap(image, contentPanel, 'gui'), 0, wx.ALIGN_CENTER)
vbox = wx.BoxSizer(wx.VERTICAL)
box.Add(vbox, 1, wx.EXPAND)
vbox.Add(wx.StaticText(contentPanel, wx.ID_ANY, label), 0, wx.ALIGN_LEFT)
hbox = wx.BoxSizer(wx.HORIZONTAL)
vbox.Add(hbox)
lbl = wx.StaticText(contentPanel, wx.ID_ANY, '0.00 ISK')
setattr(self, ('labelPrice%s' % _type.capitalize()), lbl)
hbox.Add(lbl, 0, wx.ALIGN_LEFT)
def refreshPanel(self, fit):
if (fit is not None):
self.fit = fit
fit_items = set(Fit.fitItemIter(fit))
Price.getInstance().getPrices(fit_items, self.processPrices, fetchTimeout=30)
self.labelEMStatus.SetLabel('Updating prices...')
self.refreshPanelPrices(fit)
self.panel.Layout()
def refreshPanelPrices(self, fit=None):
ship_price = 0
module_price = 0
drone_price = 0
fighter_price = 0
cargo_price = 0
booster_price = 0
implant_price = 0
if fit:
ship_price = fit.ship.item.price.price
if fit.modules:
for module in fit.modules:
if (not module.isEmpty):
module_price += module.item.price.price
if fit.drones:
for drone in fit.drones:
drone_price += (drone.item.price.price * drone.amount)
if fit.fighters:
for fighter in fit.fighters:
fighter_price += (fighter.item.price.price * fighter.amount)
if fit.cargo:
for cargo in fit.cargo:
cargo_price += (cargo.item.price.price * cargo.amount)
if fit.boosters:
for booster in fit.boosters:
booster_price += booster.item.price.price
if fit.appliedImplants:
for implant in fit.appliedImplants:
implant_price += implant.item.price.price
fitting_price = module_price
total_price = 0
total_price += ship_price
total_price += module_price
if self.settings.get('drones'):
total_price += (drone_price + fighter_price)
if self.settings.get('cargo'):
total_price += cargo_price
if self.settings.get('character'):
total_price += (booster_price + implant_price)
self.labelPriceShip.SetLabel(('%s ISK' % formatAmount(ship_price, 3, 3, 9, currency=True)))
self.labelPriceShip.SetToolTip(wx.ToolTip('{:,.2f} ISK'.format(ship_price)))
self.labelPriceFittings.SetLabel(('%s ISK' % formatAmount(fitting_price, 3, 3, 9, currency=True)))
self.labelPriceFittings.SetToolTip(wx.ToolTip('{:,.2f} ISK'.format(fitting_price)))
self.labelPriceTotal.SetLabel(('%s ISK' % formatAmount(total_price, 3, 3, 9, currency=True)))
self.labelPriceTotal.SetToolTip(wx.ToolTip('{:,.2f} ISK'.format(total_price)))
def processPrices(self, prices):
self.refreshPanelPrices(self.fit)
self.labelEMStatus.SetLabel('')
self.panel.Layout() |
class BaseClient():
def __init__(self, client_id: str, **kwargs):
loop = kwargs.get('loop', None)
handler = kwargs.get('handler', None)
self.pipe = kwargs.get('pipe', None)
self.isasync = kwargs.get('isasync', False)
self.connection_timeout = kwargs.get('connection_timeout', 30)
self.response_timeout = kwargs.get('response_timeout', 10)
client_id = str(client_id)
if (loop is not None):
self.update_event_loop(loop)
else:
self.update_event_loop(get_event_loop())
self.sock_reader: Optional[asyncio.StreamReader] = None
self.sock_writer: Optional[asyncio.StreamWriter] = None
self.client_id = client_id
if (handler is not None):
if (not inspect.isfunction(handler)):
raise PyPresenceException('Error handler must be a function.')
args = inspect.getfullargspec(handler).args
if (args[0] == 'self'):
args = args[1:]
if (len(args) != 2):
raise PyPresenceException('Error handler should only accept two arguments.')
if self.isasync:
if (not inspect.iscoroutinefunction(handler)):
raise InvalidArgument('Coroutine', 'Subroutine', 'You are running async mode - your error handler should be awaitable.')
err_handler = self._async_err_handle
else:
err_handler = self._err_handle
loop.set_exception_handler(err_handler)
self.handler = handler
if getattr(self, 'on_event', None):
self._events_on = True
else:
self._events_on = False
def update_event_loop(self, loop):
self.loop = loop
asyncio.set_event_loop(self.loop)
def _err_handle(self, loop, context: dict):
result = self.handler(context['exception'], context['future'])
if inspect.iscoroutinefunction(self.handler):
loop.run_until_complete(result)
async def _async_err_handle(self, loop, context: dict):
(await self.handler(context['exception'], context['future']))
async def read_output(self):
try:
preamble = (await asyncio.wait_for(self.sock_reader.read(8), self.response_timeout))
(status_code, length) = struct.unpack('<II', preamble[:8])
data = (await asyncio.wait_for(self.sock_reader.read(length), self.response_timeout))
except (BrokenPipeError, struct.error):
raise PipeClosed
except asyncio.TimeoutError:
raise ResponseTimeout
payload = json.loads(data.decode('utf-8'))
if (payload['evt'] == 'ERROR'):
raise ServerError(payload['data']['message'])
return payload
def send_data(self, op: int, payload: Union[(dict, Payload)]):
if isinstance(payload, Payload):
payload = payload.data
payload = json.dumps(payload)
assert (self.sock_writer is not None), 'You must connect your client before sending events!'
self.sock_writer.write((struct.pack('<II', op, len(payload)) + payload.encode('utf-8')))
async def handshake(self):
ipc_path = get_ipc_path(self.pipe)
if (not ipc_path):
raise DiscordNotFound
try:
if ((sys.platform == 'linux') or (sys.platform == 'darwin')):
(self.sock_reader, self.sock_writer) = (await asyncio.wait_for(asyncio.open_unix_connection(ipc_path), self.connection_timeout))
elif ((sys.platform == 'win32') or (sys.platform == 'win64')):
self.sock_reader = asyncio.StreamReader(loop=self.loop)
reader_protocol = asyncio.StreamReaderProtocol(self.sock_reader, loop=self.loop)
(self.sock_writer, _) = (await asyncio.wait_for(self.loop.create_pipe_connection((lambda : reader_protocol), ipc_path), self.connection_timeout))
except FileNotFoundError:
raise InvalidPipe
except asyncio.TimeoutError:
raise ConnectionTimeout
self.send_data(0, {'v': 1, 'client_id': self.client_id})
preamble = (await self.sock_reader.read(8))
(code, length) = struct.unpack('<ii', preamble)
data = json.loads((await self.sock_reader.read(length)))
if ('code' in data):
if (data['message'] == 'Invalid Client ID'):
raise InvalidID
raise DiscordError(data['code'], data['message'])
if self._events_on:
self.sock_reader.feed_data = self.on_event |
def _compute_adjusted_exponent_length(exponent_length: int, first_32_exponent_bytes: bytes) -> int:
exponent = big_endian_to_int(first_32_exponent_bytes)
if ((exponent_length <= 32) and (exponent == 0)):
return 0
elif (exponent_length <= 32):
return get_highest_bit_index(exponent)
else:
first_32_bytes_as_int = big_endian_to_int(first_32_exponent_bytes)
return ((8 * (exponent_length - 32)) + get_highest_bit_index(first_32_bytes_as_int)) |
def generate_kaldi_data_files(utterances, outdir):
logger.info('Exporting to {}...'.format(outdir))
speakers = {}
with open(os.path.join(outdir, 'text'), 'w', encoding='latin-1') as f:
for utt in utterances:
f.write((utt.to_kaldi_utt_str() + '\n'))
with open(os.path.join(outdir, 'wav.scp'), 'w', encoding='latin-1') as f:
for utt in utterances:
f.write((utt.to_kaldi_wave_str() + '\n'))
with open(os.path.join(outdir, 'utt2dur'), 'w', encoding='latin-1') as f:
for utt in utterances:
f.write((utt.to_kaldi_dur_str() + '\n'))
with open(os.path.join(outdir, 'utt2spk'), 'w', encoding='latin-1') as f:
for utt in utterances:
f.write((((utt.id + ' ') + utt.speaker) + '\n'))
if (utt.speaker not in speakers):
speakers[utt.speaker] = [utt.id]
else:
speakers[utt.speaker].append(utt.id)
with open(os.path.join(outdir, 'spk2utt'), 'w', encoding='latin-1') as f:
for s in speakers:
f.write((s + ' '))
for utt in speakers[s]:
f.write((utt + ' '))
f.write('\n')
logger.info("Successfully wrote {} utterances to data directory '{}'".format(len(utterances), outdir)) |
def test_extra_saturate(debug_ctx, debug_trail):
loader_getter = make_loader_getter(shape=shape(TestField('a', ParamKind.POS_ONLY, is_required=True)), name_layout=InputNameLayout(crown=InpDictCrown({'a': InpFieldCrown('a')}, extra_policy=ExtraCollect()), extra_move=ExtraSaturate(Gauge.saturate)), debug_trail=debug_trail, debug_ctx=debug_ctx)
loader = loader_getter()
assert (loader({'a': 1}) == gauge(1).with_extra({}))
assert (loader({'a': 1, 'b': 2}) == gauge(1).with_extra({'b': 2})) |
class TestDOTAR2CNNKF(TestDOTA):
def eval(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
real_test_img_list = self.get_test_image()
r2cnn_kf = build_whole_network.DetectionNetworkR2CNNKF(cfgs=self.cfgs, is_training=False)
self.test_dota(det_net=r2cnn_kf, real_test_img_list=real_test_img_list, txt_name=txt_name)
if (not self.args.show_box):
os.remove(txt_name) |
def export_foam_mesh(obj, meshfileString, foamCaseFolder=None):
gmsh = CaeMesherGmsh.CaeMesherGmsh(obj, CfdTools.getParentAnalysisObject(obj))
meshfile = gmsh.export_mesh(u'Gmsh MSH', meshfileString)
if meshfile:
msg = 'Info: Mesh is not written to `{}` by Gmsh\n'.format(meshfile)
FreeCAD.Console.PrintMessage(msg)
if (not foamCaseFolder):
comandlist = [u'gmshToFoam', u'-case', foamCaseFolder, meshfile]
else:
comandlist = [u'gmshToFoam', meshfile]
return _run_command(comandlist)
else:
error = 'Mesh is NOT written to `{}` by Gmsh\n'.format(meshfileString)
FreeCAD.Console.PrintError(error)
return error |
class TestTaskRc(TestCase):
def setUp(self):
self.path_to_taskrc = os.path.join(os.path.dirname(__file__), 'data/default.taskrc')
self.taskrc = TaskRc(self.path_to_taskrc)
def test_taskrc_parsing(self):
expected_config = {'data': {'location': '~/.task'}, 'alpha': {'one': 'yes', 'two': '2'}, 'beta': {'one': 'FALSE'}, 'gamma': {'one': 'TRUE'}, 'uda': {'a': {'type': 'numeric', 'label': 'Alpha'}, 'b': {'type': 'string', 'label': 'Beta', 'values': 'Strontium-90,Hydrogen-3'}}}
self.assertEqual(self.taskrc, expected_config)
def test_get_udas(self):
expected_udas = {'a': NumericField(label='Alpha'), 'b': ChoiceField(label='Beta', choices=['Strontium-90', 'Hydrogen-3'])}
actual_udas = self.taskrc.get_udas()
self.assertEqual(actual_udas, expected_udas)
def test_config_overrides(self):
overrides = {'uda': {'d': {'type': 'string', 'label': 'Delta'}}, 'alpha': {'two': '3'}}
taskrc = TaskRc(self.path_to_taskrc, overrides=overrides)
expected_config = {'data': {'location': '~/.task'}, 'alpha': {'one': 'yes', 'two': '3'}, 'beta': {'one': 'FALSE'}, 'gamma': {'one': 'TRUE'}, 'uda': {'a': {'type': 'numeric', 'label': 'Alpha'}, 'b': {'type': 'string', 'label': 'Beta', 'values': 'Strontium-90,Hydrogen-3'}, 'd': {'type': 'string', 'label': 'Delta'}}}
self.assertEqual(taskrc, expected_config) |
def _search_cross_references(call_graph_analysis_list, search_depth):
reference_dict = defaultdict(set)
if (not call_graph_analysis_list):
return reference_dict
apkinfo = call_graph_analysis_list[0]['apkinfo']
parent_set = {item['parent'] for item in call_graph_analysis_list}
for parent in parent_set:
called_function_set = set()
expand_queue = {parent}
for _ in range(search_depth):
for function in expand_queue:
next_expand_queue = {child_function for (child_function, _) in apkinfo.lowerfunc(function)}
called_function_set.update(next_expand_queue)
expand_queue = next_expand_queue
referenced_set = called_function_set.intersection(parent_set)
referenced_set.discard(parent)
reference_dict[parent] = referenced_set
return reference_dict |
def test_info_setup_complex_pep517_error(mocker: MockerFixture, demo_setup_complex: Path) -> None:
mocker.patch('poetry.utils.env.VirtualEnv.run', autospec=True, side_effect=EnvCommandError(CalledProcessError(1, 'mock', output='mock')))
with pytest.raises(PackageInfoError):
PackageInfo.from_directory(demo_setup_complex) |
class Generator(nn.Module):
def __init__(self, dim=64):
super(Generator, self).__init__()
self.dim = dim
self.linear1 = nn.Linear(128, (((4 * 4) * 4) * dim))
self.bn1 = nn.BatchNorm1d((((4 * 4) * 4) * dim))
self.relu1 = nn.ReLU(True)
self.block1 = nn.Sequential(nn.ConvTranspose2d((4 * dim), (2 * dim), 2, stride=2), nn.BatchNorm2d((2 * dim)), nn.ReLU(True))
self.block2 = nn.Sequential(nn.ConvTranspose2d((2 * dim), dim, 2, stride=2), nn.BatchNorm2d(dim), nn.ReLU(True))
self.deconv_out = nn.ConvTranspose2d(dim, 3, 2, stride=2)
self.tanh = nn.Tanh()
def forward(self, input):
output = self.linear1(input)
output = self.bn1(output)
output = self.relu1(output)
output = output.view((- 1), (4 * self.dim), 4, 4)
output = self.block1(output)
output = self.block2(output)
output = self.deconv_out(output)
output = self.tanh(output)
return output.view((- 1), 3, 32, 32) |
_torch
class TestActivations(unittest.TestCase):
def test_gelu_versions(self):
x = torch.tensor([(- 100), (- 1), (- 0.1), 0, 0.1, 1.0, 100])
torch_builtin = get_activation('gelu')
self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x)))
self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x)))
def test_gelu_10(self):
x = torch.tensor([(- 100), (- 1), (- 0.1), 0, 0.1, 1.0, 100])
torch_builtin = get_activation('gelu')
gelu10 = get_activation('gelu_10')
y_gelu = torch_builtin(x)
y_gelu_10 = gelu10(x)
clipped_mask = torch.where((y_gelu_10 < 10.0), 1, 0)
self.assertTrue((torch.max(y_gelu_10).item() == 10.0))
self.assertTrue(torch.allclose((y_gelu * clipped_mask), (y_gelu_10 * clipped_mask)))
def test_get_activation(self):
get_activation('swish')
get_activation('silu')
get_activation('relu')
get_activation('tanh')
get_activation('gelu_new')
get_activation('gelu_fast')
get_activation('gelu_python')
get_activation('gelu_10')
get_activation('quick_gelu')
get_activation('mish')
get_activation('linear')
get_activation('sigmoid')
with self.assertRaises(KeyError):
get_activation('bogus')
with self.assertRaises(KeyError):
get_activation(None) |
class InceptionCUnit(nn.Module):
def __init__(self, in_channels, out_channels):
super(InceptionCUnit, self).__init__()
assert (out_channels == 2048)
self.branches = Concurrent()
self.branches.add_module('branch1', Conv1x1Branch(in_channels=in_channels, out_channels=320))
self.branches.add_module('branch2', ConvSeq3x3Branch(in_channels=in_channels, out_channels_list=(384,), kernel_size_list=(1,), strides_list=(1,), padding_list=(0,)))
self.branches.add_module('branch3', ConvSeq3x3Branch(in_channels=in_channels, out_channels_list=(448, 384), kernel_size_list=(1, 3), strides_list=(1, 1), padding_list=(0, 1)))
self.branches.add_module('branch4', AvgPoolBranch(in_channels=in_channels, out_channels=192))
def forward(self, x):
x = self.branches(x)
return x |
class FrequencyValue(SensorValue):
def __init__(self, name):
super(FrequencyValue, self).__init__(name)
self.loopc = 0
self.t0 = time.monotonic()
def strobe(self):
self.loopc += 1
if (self.loopc == 4):
t1 = time.monotonic()
self.set((self.loopc / (t1 - self.t0)))
self.t0 = t1
self.loopc = 0 |
def test_properties():
prop = OSC.Properties()
prop.add_property('mything', '2')
prop.add_property('theotherthing', 'true')
prop.add_file('propfile.xml')
prettyprint(prop)
prop2 = OSC.Properties()
prop2.add_property('mything', '2')
prop2.add_property('theotherthing', 'true')
prop2.add_file('propfile.xml')
prop3 = OSC.Properties()
prop3.add_property('mything', '2')
prop3.add_property('theotherthin', 'true')
assert (prop == prop2)
assert (prop != prop3)
prop4 = OSC.Properties.parse(prop.get_element())
assert (prop4 == prop)
assert (version_validation('Properties', prop, 0) == ValidationResponse.OK)
assert (version_validation('Properties', prop, 1) == ValidationResponse.OK)
assert (version_validation('Properties', prop, 2) == ValidationResponse.OK) |
def _sharded_tensor_to_gpu(tensor: sharded_tensor.ShardedTensor) -> sharded_tensor.ShardedTensor:
device = torch.device(f'cuda:{torch.cuda.current_device()}')
shards: List[sharded_tensor.Shard] = []
for shard in tensor.local_shards():
new_tensor = shard.tensor.to(device=device)
metadata = copy.deepcopy(shard.metadata)
metadata.placement._device = device
shards.append(sharded_tensor.Shard(new_tensor, metadata))
metadata = copy.deepcopy(tensor.metadata())
for meta in metadata.shards_metadata:
meta.placement._device = device
return ShardedTensor._init_from_local_shards_and_global_metadata(shards, sharded_tensor_metadata=metadata, process_group=tensor._process_group) |
class MatrixOperator(LegacyBaseOperator):
def __init__(self, matrix, basis=None, z2_symmetries=None, atol=1e-12, name=None):
super().__init__(basis, z2_symmetries, name)
if (matrix is not None):
matrix = (matrix if scisparse.issparse(matrix) else scisparse.csr_matrix(matrix))
matrix = (matrix if scisparse.isspmatrix_csr(matrix) else matrix.to_csr(copy=True))
self._matrix = matrix
self._atol = atol
def to_opflow(self):
from qiskit.aqua.operators import PrimitiveOp
return PrimitiveOp(self.dense_matrix)
def atol(self):
return self._atol
def atol(self, new_value):
self._atol = new_value
def add(self, other, copy=False):
out = (self.copy() if copy else self)
out._matrix += other._matrix
return out
def sub(self, other, copy=False):
out = (self.copy() if copy else self)
out._matrix -= other._matrix
return out
def __add__(self, other):
return self.add(other, copy=True)
def __iadd__(self, other):
return self.add(other, copy=False)
def __sub__(self, other):
return self.sub(other, copy=True)
def __isub__(self, other):
return self.sub(other, copy=False)
def __neg__(self):
out = self.copy()
out._matrix *= (- 1.0)
return out
def __eq__(self, other):
return np.all((self._matrix == other.matrix))
def __str__(self):
curr_repr = 'matrix'
length = '{}x{}'.format((2 ** self.num_qubits), (2 ** self.num_qubits))
ret = 'Representation: {}, qubits: {}, size: {}'.format(curr_repr, self.num_qubits, length)
return ret
def copy(self):
return deepcopy(self)
def chop(self, threshold=None, copy=False):
threshold = (self._atol if (threshold is None) else threshold)
def chop_real_imag(coeff):
temp_real = (coeff.real if (np.absolute(coeff.real) >= threshold) else 0.0)
temp_imag = (coeff.imag if (np.absolute(coeff.imag) >= threshold) else 0.0)
if ((temp_real == 0.0) and (temp_imag == 0.0)):
return 0.0
else:
new_coeff = (temp_real + (1j * temp_imag))
return new_coeff
op = (self.copy() if copy else self)
(rows, cols) = op._matrix.nonzero()
for (row, col) in zip(rows, cols):
op._matrix[(row, col)] = chop_real_imag(op._matrix[(row, col)])
op._matrix.eliminate_zeros()
return op
def __mul__(self, other):
ret_matrix = self._matrix.dot(other.matrix)
return MatrixOperator(matrix=ret_matrix)
def dia_matrix(self):
dia_matrix = self._matrix.diagonal()
if (not (scisparse.csr_matrix(dia_matrix).nnz == self._matrix.nnz)):
dia_matrix = None
return dia_matrix
def matrix(self):
return (self._matrix if (self.dia_matrix is None) else self.dia_matrix)
def dense_matrix(self):
return self._matrix.toarray()
def num_qubits(self):
if self.is_empty():
logger.warning('Operator is empty, Return 0.')
return 0
return int(np.log2(self._matrix.shape[0]))
def print_details(self):
ret = str(self._matrix)
return ret
def construct_evaluation_circuit(self, wave_function, statevector_mode=True, use_simulator_snapshot_mode=None, circuit_name_prefix=''):
del use_simulator_snapshot_mode
return [wave_function.copy(name=(circuit_name_prefix + 'psi'))]
def evaluate_with_result(self, result, statevector_mode=True, use_simulator_snapshot_mode=None, circuit_name_prefix=''):
if self.is_empty():
raise AquaError('Operator is empty, check the operator.')
del use_simulator_snapshot_mode
(avg, std_dev) = (0.0, 0.0)
quantum_state = np.asarray(result.get_statevector((circuit_name_prefix + 'psi')))
avg = np.vdot(quantum_state, self._matrix.dot(quantum_state))
return (avg, std_dev)
def evaluate_with_statevector(self, quantum_state):
if self.is_empty():
raise AquaError('Operator is empty, check the operator.')
avg = np.vdot(quantum_state, self._matrix.dot(quantum_state))
return (avg, 0.0)
def _suzuki_expansion_slice_matrix(pauli_list, lam, expansion_order):
if (expansion_order == 1):
left = reduce((lambda x, y: (x y)), [scila.expm((((lam / 2) * c) * p.to_matrix(sparse=True).tocsc())) for (c, p) in pauli_list])
right = reduce((lambda x, y: (x y)), [scila.expm((((lam / 2) * c) * p.to_matrix(sparse=True).tocsc())) for (c, p) in reversed(pauli_list)])
return (left right)
else:
p_k = ((4 - (4 ** (1 / ((2 * expansion_order) - 1)))) ** (- 1))
side_base = MatrixOperator._suzuki_expansion_slice_matrix(pauli_list, (lam * p_k), (expansion_order - 1))
side = (side_base side_base)
middle = MatrixOperator._suzuki_expansion_slice_matrix(pauli_list, (lam * (1 - (4 * p_k))), (expansion_order - 1))
return ((side middle) side)
def evolve(self, state_in, evo_time=0, num_time_slices=0, expansion_mode='trotter', expansion_order=1):
from .op_converter import to_weighted_pauli_operator
if self.is_empty():
raise AquaError('Operator is empty, check the operator.')
if ((num_time_slices < 0) or (not isinstance(num_time_slices, int))):
raise ValueError('Number of time slices should be a non-negative integer.')
if (expansion_mode not in ['trotter', 'suzuki']):
raise ValueError('Expansion mode {} not supported.'.format(expansion_mode))
if (num_time_slices == 0):
return (scila.expm((((- 1j) * evo_time) * self._matrix.tocsc())) state_in)
else:
pauli_op = to_weighted_pauli_operator(self)
pauli_list = pauli_op.reorder_paulis()
if (len(pauli_list) == 1):
approx_matrix_slice = scila.expm((((((- 1j) * evo_time) / num_time_slices) * pauli_list[0][0]) * pauli_list[0][1].to_spmatrix().tocsc()))
elif (expansion_mode == 'trotter'):
approx_matrix_slice = reduce((lambda x, y: (x y)), [scila.expm((((((- 1j) * evo_time) / num_time_slices) * c) * p.to_matrix(sparse=True).tocsc())) for (c, p) in pauli_list])
elif (expansion_mode == 'suzuki'):
approx_matrix_slice = MatrixOperator._suzuki_expansion_slice_matrix(pauli_list, (((- 1j) * evo_time) / num_time_slices), expansion_order)
else:
raise ValueError('Unrecognized expansion mode {}.'.format(expansion_mode))
return (reduce((lambda x, y: (x y)), ([approx_matrix_slice] * num_time_slices)) state_in)
def is_empty(self):
return bool(((self._matrix is None) or (self._matrix.nnz == 0))) |
class Effect1020(BaseEffect):
type = 'passive'
def handler(fit, skill, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Artillery Specialization')), 'damageMultiplier', (skill.getModifiedItemAttr('damageMultiplierBonus') * skill.level), **kwargs) |
class TestLoadCFAreaPublic():
def test_load_cf_no_exist(self):
cf_file = os.path.join(TEST_FILES_PATH, 'does_not_exist.nc')
with pytest.raises(FileNotFoundError):
load_cf_area(cf_file)
def test_load_cf_from_not_nc(self):
cf_file = os.path.join(TEST_FILES_PATH, 'areas.yaml')
with pytest.raises((ValueError, OSError)):
load_cf_area(cf_file)
.parametrize(('exc_type', 'kwargs'), [(KeyError, {'variable': 'doesNotExist'}), (ValueError, {'variable': 'Polar_Stereographic_Grid'}), (KeyError, {'variable': 'Polar_Stereographic_Grid', 'y': 'doesNotExist', 'x': 'xc'}), (ValueError, {'variable': 'Polar_Stereographic_Grid', 'y': 'time', 'x': 'xc'}), (ValueError, {'variable': 'lat'})])
def test_load_cf_parameters_errors(self, exc_type, kwargs):
cf_file = _prepare_cf_nh10km()
with pytest.raises(exc_type):
load_cf_area(cf_file, **kwargs)
.parametrize('kwargs', [{'variable': 'Polar_Stereographic_Grid', 'y': 'yc', 'x': 'xc'}, {'variable': 'ice_conc'}, {}])
def test_load_cf_nh10km(self, kwargs):
cf_file = _prepare_cf_nh10km()
(adef, _) = load_cf_area(cf_file, **kwargs)
assert (adef.shape == (1120, 760))
xc = adef.projection_x_coords
yc = adef.projection_y_coords
assert (xc[0] == (- 3845000.0)), 'Wrong x axis (index 0)'
assert (xc[1] == (xc[0] + 10000.0)), 'Wrong x axis (index 1)'
assert (yc[0] == 5845000.0), 'Wrong y axis (index 0)'
assert (yc[1] == (yc[0] - 10000.0)), 'Wrong y axis (index 1)'
.parametrize(('kwargs', 'exp_var', 'exp_lat', 'exp_lon'), [({'variable': 'Polar_Stereographic_Grid', 'y': 'yc', 'x': 'xc'}, 'Polar_Stereographic_Grid', None, None), ({'variable': 'ice_conc'}, 'ice_conc', 'lat', 'lon'), ({}, 'ice_conc', 'lat', 'lon')])
def test_load_cf_nh10km_cfinfo(self, kwargs, exp_var, exp_lat, exp_lon):
cf_file = _prepare_cf_nh10km()
(_, cf_info) = load_cf_area(cf_file, **kwargs)
assert (cf_info['variable'] == exp_var)
assert (cf_info['grid_mapping_variable'] == 'Polar_Stereographic_Grid')
assert (cf_info['type_of_grid_mapping'] == 'polar_stereographic')
assert (cf_info['lon'] == exp_lon)
assert (cf_info['lat'] == exp_lat)
assert (cf_info['x']['varname'] == 'xc')
assert (cf_info['x']['first'] == (- 3845.0))
assert (cf_info['y']['varname'] == 'yc')
assert (cf_info['y']['last'] == (- 5345.0))
.parametrize('kwargs', [{'variable': 'C13'}, {}])
def test_load_cf_goes(self, kwargs):
cf_file = _prepare_cf_goes()
(adef, cf_info) = load_cf_area(cf_file, **kwargs)
assert (cf_info['grid_mapping_variable'] == 'GOES-East')
assert (cf_info['type_of_grid_mapping'] == 'geostationary')
assert (cf_info['x']['varname'] == 'x')
assert (cf_info['x']['first'] == (- 3627271.2913))
assert (cf_info['y']['varname'] == 'y')
assert (cf_info['y']['last'] == 1583173.6575)
.parametrize(('file_func', 'kwargs', 'exp_lat', 'exp_lon'), [(_prepare_cf_llwgs84, {'variable': 'crs', 'y': 'lat', 'x': 'lon'}, None, None), (_prepare_cf_llwgs84, {'variable': 'temp'}, 'lat', 'lon'), (_prepare_cf_llwgs84, {}, 'lat', 'lon'), (_prepare_cf_llnocrs, {'variable': 'temp'}, 'lat', 'lon'), (_prepare_cf_llnocrs, {}, 'lat', 'lon')])
.parametrize('future_geometries', [False, True])
def test_load_cf_latlon(self, file_func, kwargs, exp_lat, exp_lon, future_geometries):
cf_file = file_func()
with pyresample.config.set({'features.future_geometries': future_geometries}):
(adef, cf_info) = load_cf_area(cf_file, **kwargs)
_validate_lonlat_cf_area(adef, cf_info, exp_lon, exp_lat)
assert_future_geometry(adef, future_geometries) |
class CompressedData():
__slots__ = ['data', 'dtype']
def __init__(self):
self.data = None
self.dtype = None
def compression(self, a):
self.data = compress(a.tobytes())
self.dtype = a.dtype
def decompression(self):
return fromstring(decompress(self.data), dtype=self.dtype)
def __str__(self):
return self.decompression() |
class Effect5620(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Missile Launcher Rapid Heavy')), 'speed', ship.getModifiedItemAttr('shipBonusMB'), skill='Minmatar Battleship', **kwargs) |
class ProjectDeployTokenManager(RetrieveMixin, CreateMixin, DeleteMixin, RESTManager):
_path = '/projects/{project_id}/deploy_tokens'
_from_parent_attrs = {'project_id': 'id'}
_obj_cls = ProjectDeployToken
_create_attrs = RequiredOptional(required=('name', 'scopes'), optional=('expires_at', 'username'))
_list_filters = ('scopes',)
_types = {'scopes': types.ArrayAttribute}
def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> ProjectDeployToken:
return cast(ProjectDeployToken, super().get(id=id, lazy=lazy, **kwargs)) |
class FragDBInfo():
filename: str
num_compounds: int
num_error_compounds: int
num_fragmentations: int
num_constants: int
num_variables: int
max_num_pairs: int
options: object
def get_cols(self):
return [self.filename, str(self.num_compounds), str(self.num_error_compounds), str(self.num_fragmentations), str(self.num_constants), str(self.num_variables), str(self.max_num_pairs)] |
def ttrl(x, ranks, n_outputs):
weight_initializer = tf.contrib.layers.xavier_initializer()
suffix = n_outputs
input_shape = x.get_shape().as_list()[1:]
bias = tf.get_variable('bias_{}'.format(np.prod(n_outputs)), shape=(1, np.prod(n_outputs)))
cores = []
for i in range(1, (len(ranks) - 1)):
cores.append(tf.get_variable('core_{0}_output_{1}'.format(i, suffix), shape=(ranks[(i - 1)], input_shape[(i - 1)], ranks[i]), initializer=weight_initializer))
cores.append(tf.get_variable('core_{0}_last_output_{1}'.format(i, suffix), shape=(ranks[(- 2)], n_outputs, ranks[(- 1)]), initializer=weight_initializer))
regression_weights = TNSR.tt_to_tensor(cores)
return regression(x, regression_weights, input_shape, bias, n_outputs) |
def test_run_with_verbosity(tester: ApplicationTester) -> None:
tester.execute('list --verbose')
assert tester.io.is_verbose()
tester.execute('list -v')
assert tester.io.is_verbose()
tester.execute('list -vv')
assert tester.io.is_very_verbose()
tester.execute('list -vvv')
assert tester.io.is_debug() |
class RanksComparatorPlotter(AccessorABC):
_default_kind = 'box'
def __init__(self, ranks_cmp):
self._ranks_cmp = ranks_cmp
def flow(self, *, untied=False, grid_kws=None, **kwargs):
df = self._ranks_cmp.to_dataframe(untied=untied)
ax = sns.lineplot(data=df.T, estimator=None, sort=False, **kwargs)
grid_kws = ({} if (grid_kws is None) else grid_kws)
grid_kws.setdefault('alpha', 0.3)
ax.grid(**grid_kws)
ax.set_ylabel(RANKS_LABELS[untied])
return ax
def reg(self, *, untied=False, r2=True, palette=None, legend=True, r2_fmt='.2g', r2_kws=None, **kwargs):
df = self._ranks_cmp.to_dataframe(untied=untied)
if ('color' in kwargs):
cls_name = type(self).__name__
raise TypeError(f"{cls_name}.reg() got an unexpected keyword argument 'color'")
ax = kwargs.pop('ax', None)
if (legend and r2):
r2_kws = ({} if (r2_kws is None) else r2_kws)
r2_df = self._ranks_cmp.r2_score(untied=untied, **r2_kws)
colors = it.cycle(sns.color_palette(palette=palette))
for (x, y) in it.combinations(df.columns, 2):
color = next(colors)
r2_label = ''
if (legend and r2):
r2_score = format(r2_df[x][y], r2_fmt)
r2_label = f' - $R^2={r2_score}$'
label = 'x={x}, y={y}{r2}'.format(x=x, y=y, r2=r2_label)
ax = sns.regplot(x=x, y=y, data=df, ax=ax, label=label, color=color, **kwargs)
ranks_label = RANKS_LABELS[untied]
ax.set(xlabel=f"'x' {ranks_label}", ylabel=f"'y' {ranks_label}")
if legend:
ax.legend()
return ax
def heatmap(self, *, untied=False, **kwargs):
df = self._ranks_cmp.to_dataframe(untied=untied)
kwargs.setdefault('annot', True)
kwargs.setdefault('cbar_kws', {'label': RANKS_LABELS[untied]})
return sns.heatmap(data=df, **kwargs)
def corr(self, *, untied=False, corr_kws=None, **kwargs):
corr_kws = ({} if (corr_kws is None) else corr_kws)
corr = self._ranks_cmp.corr(untied=untied, **corr_kws)
kwargs.setdefault('annot', True)
kwargs.setdefault('cbar_kws', {'label': 'Correlation'})
return sns.heatmap(data=corr, **kwargs)
def cov(self, *, untied=False, cov_kws=None, **kwargs):
cov_kws = ({} if (cov_kws is None) else cov_kws)
cov = self._ranks_cmp.cov(untied=untied, **cov_kws)
kwargs.setdefault('annot', True)
kwargs.setdefault('cbar_kws', {'label': 'Covariance'})
return sns.heatmap(data=cov, **kwargs)
def r2_score(self, untied=False, r2_kws=None, **kwargs):
r2_kws = ({} if (r2_kws is None) else r2_kws)
r2 = self._ranks_cmp.r2_score(untied=untied, **r2_kws)
kwargs.setdefault('annot', True)
kwargs.setdefault('cbar_kws', {'label': '$R^2$'})
return sns.heatmap(data=r2, **kwargs)
def distance(self, *, untied=False, metric='hamming', distance_kws=None, **kwargs):
distance_kws = ({} if (distance_kws is None) else distance_kws)
dis = self._ranks_cmp.distance(untied=untied, metric=metric, **distance_kws)
kwargs.setdefault('annot', True)
kwargs.setdefault('cbar_kws', {'label': f'{metric} distance'.capitalize()})
return sns.heatmap(data=dis, **kwargs)
def box(self, *, untied=False, **kwargs):
df = self._ranks_cmp.to_dataframe(untied=untied)
ax = sns.boxplot(data=df.T, **kwargs)
ranks_label = RANKS_LABELS[untied]
if (kwargs.get('orient') in (None, 'v')):
ax.set_ylabel(ranks_label)
else:
ax.set_xlabel(ranks_label)
return ax
def bar(self, *, untied=False, **kwargs):
df = self._ranks_cmp.to_dataframe(untied=untied)
kwargs['ax'] = (kwargs.get('ax') or plt.gca())
ax = df.plot.bar(**kwargs)
ax.set_ylabel(RANKS_LABELS[untied])
return ax
def barh(self, *, untied=False, **kwargs):
df = self._ranks_cmp.to_dataframe(untied=untied)
kwargs['ax'] = (kwargs.get('ax') or plt.gca())
ax = df.plot.barh(**kwargs)
ax.set_xlabel(RANKS_LABELS[untied])
return ax |
def _load_model_file(load_path, model):
load_optimizer_state_dict = None
print(' [*] Loading model from {}'.format(load_path))
load_data = torch.load(os.path.join(os.getcwd(), load_path), map_location=(lambda storage, loc: storage))
if isinstance(load_data, dict):
load_optimizer_state_dict = load_data.get('optimizer', None)
load_model_state_dict = load_data.get('model', load_data)
else:
load_model_state_dict = load_data.state_dict()
state_dict = model.state_dict()
state_dict.update(load_model_state_dict)
model.load_state_dict(state_dict)
return (model, load_optimizer_state_dict) |
class PassThroughOpLastLayerModel(nn.Module):
def __init__(self):
super(PassThroughOpLastLayerModel, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=2, stride=2, padding=2, bias=False)
self.passthrough = torch.nn.Identity()
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.passthrough(x)
return x |
def makeUpdateMatrixSv(qnnArch, unitaries, trainingData, storedStates, lda, ep, l, j):
numInputQubits = qnnArch[(l - 1)]
summ = 0
for x in range(len(trainingData)):
firstPart = updateMatrixFirstPart(qnnArch, unitaries, storedStates, l, j, x)
secondPart = updateMatrixSecondPart(qnnArch, unitaries, trainingData, l, j, x)
mat = qt.commutator(firstPart, secondPart)
keep = list(range(numInputQubits))
keep.append((numInputQubits + j))
mat = partialTraceKeep(mat, keep)
summ = (summ + mat)
summ = ((((0 + 1j) * (2 ** numInputQubits)) / (lda * len(trainingData))) * summ)
return summ |
def _infer_single_node_init(cfg: DistributedTrainingConfig):
assert (cfg.distributed_world_size <= torch.cuda.device_count()), f'world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices'
port = random.randint(10000, 20000)
cfg.distributed_init_method = 'tcp://localhost:{port}'.format(port=port) |
class FinalBlock(nn.Module):
def __init__(self, input_dim, hidden_units=[], hidden_activations=None, dropout_rates=[], batch_norm=True, residual_type='sum'):
super(FinalBlock, self).__init__()
if (type(dropout_rates) != list):
dropout_rates = ([dropout_rates] * len(hidden_units))
if (type(hidden_activations) != list):
hidden_activations = ([hidden_activations] * len(hidden_units))
self.layer = nn.ModuleList()
self.norm = nn.ModuleList()
self.dropout = nn.ModuleList()
self.activation = nn.ModuleList()
hidden_units = ([input_dim] + hidden_units)
for idx in range((len(hidden_units) - 1)):
self.layer.append(FactorizedQuadraticInteraction(hidden_units[idx], hidden_units[(idx + 1)], residual_type=residual_type))
if batch_norm:
self.norm.append(nn.BatchNorm1d(hidden_units[(idx + 1)]))
if (dropout_rates[idx] > 0):
self.dropout.append(nn.Dropout(dropout_rates[idx]))
self.activation.append(get_activation(hidden_activations[idx]))
def forward(self, X):
X_i = X
for i in range(len(self.layer)):
X_i = self.layer[i](X_i)
if (len(self.norm) > i):
X_i = self.norm[i](X_i)
if (self.activation[i] is not None):
X_i = self.activation[i](X_i)
if (len(self.dropout) > i):
X_i = self.dropout[i](X_i)
return X_i |
(nopython=True)
def create_indices(episode_ends: np.ndarray, sequence_length: int, episode_mask: np.ndarray, pad_before: int=0, pad_after: int=0, debug: bool=True) -> np.ndarray:
(episode_mask.shape == episode_ends.shape)
pad_before = min(max(pad_before, 0), (sequence_length - 1))
pad_after = min(max(pad_after, 0), (sequence_length - 1))
indices = list()
for i in range(len(episode_ends)):
if (not episode_mask[i]):
continue
start_idx = 0
if (i > 0):
start_idx = episode_ends[(i - 1)]
end_idx = episode_ends[i]
episode_length = (end_idx - start_idx)
min_start = (- pad_before)
max_start = ((episode_length - sequence_length) + pad_after)
for idx in range(min_start, (max_start + 1)):
buffer_start_idx = (max(idx, 0) + start_idx)
buffer_end_idx = (min((idx + sequence_length), episode_length) + start_idx)
start_offset = (buffer_start_idx - (idx + start_idx))
end_offset = (((idx + sequence_length) + start_idx) - buffer_end_idx)
sample_start_idx = (0 + start_offset)
sample_end_idx = (sequence_length - end_offset)
if debug:
assert (start_offset >= 0)
assert (end_offset >= 0)
assert ((sample_end_idx - sample_start_idx) == (buffer_end_idx - buffer_start_idx))
indices.append([buffer_start_idx, buffer_end_idx, sample_start_idx, sample_end_idx])
indices = np.array(indices)
return indices |
def specify_shape(x: Union[(np.ndarray, Number, Variable)], shape: Union[(ShapeValueType, list[ShapeValueType], tuple[(ShapeValueType, ...)])]):
if (not isinstance(shape, (tuple, list))):
shape = (shape,)
if ((len(shape) == 1) and (shape[0] is not None)):
shape_vector = ptb.as_tensor_variable(shape[0])
if (shape_vector.ndim == 1):
try:
shape = tuple(shape_vector)
except ValueError:
raise ValueError('Shape vector must have fixed dimensions')
x = ptb.as_tensor_variable(x)
new_shape_info = any(((s != xts) for (s, xts) in zip(shape, x.type.shape) if (s is not None)))
if ((not new_shape_info) and (len(shape) == x.type.ndim)):
return x
return _specify_shape(x, *shape) |
.fast
def test_slit_energy_conservation(verbose=True, plot=True, close_plots=True, *args, **kwargs):
from radis.test.utils import getTestFile
_clean(plot, close_plots)
if verbose:
print('\n>>> _test_slit_energy_conservation\n')
s = calculated_spectrum(*np.loadtxt(getTestFile('calc_N2C_spectrum_Trot1200_Tvib3000_slit0.1.txt')).T, wunit='nm', Iunit='mW/cm2/sr/nm')
P = s.get_power(unit='mW/cm2/sr')
s.apply_slit(0.5, norm_by='area')
(w, I) = s.get('radiance', wunit='nm', Iunit='mW/cm2/sr/nm')
Pc = abs(np.trapz(I[(~ np.isnan(I))], x=w[(~ np.isnan(I))]))
b = np.isclose(P, Pc, 0.03)
if plot:
fig = plt.figure((fig_prefix + 'energy conservation during resampling'))
s.plot(nfig=fig.number, label='{0:.1f} mW/cm2/sr'.format(P))
s.plot('radiance_noslit', nfig=fig.number, label='{0:.1f} mW/cm2/sr'.format(Pc))
plt.title('Energy conservation: {0}'.format(b))
plt.legend()
plt.tight_layout()
assert np.isclose(P, Pc, 0.03)
return True |
def get_sde_loss_fn(sde, model, train, reduce_mean=True, continuous=True, likelihood_weighting=True, eps=1e-05):
reduce_op = (jnp.mean if reduce_mean else (lambda *args, **kwargs: (0.5 * jnp.sum(*args, **kwargs))))
def loss_fn(rng, params, states, batch):
score_fn = mutils.get_score_fn(sde, model, params, states, train=train, continuous=continuous, return_state=True)
data = batch['image']
(rng, step_rng) = random.split(rng)
t = random.uniform(step_rng, (data.shape[0],), minval=eps, maxval=sde.T)
(rng, step_rng) = random.split(rng)
z = random.normal(step_rng, data.shape)
(mean, std) = sde.marginal_prob(data, t)
perturbed_data = (mean + batch_mul(std, z))
(rng, step_rng) = random.split(rng)
(score, new_model_state) = score_fn(perturbed_data, t, rng=step_rng)
if (not likelihood_weighting):
losses = jnp.square((batch_mul(score, std) + z))
losses = reduce_op(losses.reshape((losses.shape[0], (- 1))), axis=(- 1))
else:
g2 = (sde.sde(jnp.zeros_like(data), t)[1] ** 2)
losses = jnp.square((score + batch_mul(z, (1.0 / std))))
losses = (reduce_op(losses.reshape((losses.shape[0], (- 1))), axis=(- 1)) * g2)
loss = jnp.mean(losses)
return (loss, new_model_state)
return loss_fn |
class SystemIrreducibilityAnalysis(cmp.OrderableByPhi):
def __init__(self, phi=None, ces=None, partitioned_ces=None, subsystem=None, cut_subsystem=None):
self.phi = phi
self.ces = ces
self.partitioned_ces = partitioned_ces
self.subsystem = subsystem
self.cut_subsystem = cut_subsystem
def __repr__(self):
return fmt.make_repr(self, _sia_attributes)
def __str__(self, ces=True):
return fmt.fmt_sia(self, ces=ces)
def print(self, ces=True):
print(self.__str__(ces=ces))
def cut(self):
return self.cut_subsystem.cut
def network(self):
return self.subsystem.network
unorderable_unless_eq = ['network']
def __eq__(self, other):
return cmp.general_eq(self, other, _sia_attributes)
def __bool__(self):
return (not utils.eq(self.phi, 0))
def __hash__(self):
return hash((self.phi, self.ces, self.partitioned_ces, self.subsystem, self.cut_subsystem))
def to_json(self):
return {attr: getattr(self, attr) for attr in (_sia_attributes + ['small_phi_time'])}
def from_json(cls, dct):
del dct['small_phi_time']
return cls(**dct) |
class _BertWordPieceTokenizer(AbstractTokenizer):
def __init__(self, vocab_file, lower_case=True):
if lower_case:
name = 'BERT Lower Case'
else:
name = 'BERT Upper Case'
super().__init__(name)
self.tokenizer = FullBertTokenizer(vocab_file, do_lower_case=lower_case)
self.cls_id = self.tokenizer.vocab['[CLS]']
self.sep_id = self.tokenizer.vocab['[SEP]']
self.pad_id = self.tokenizer.vocab['[PAD]']
self.mask_id = self.tokenizer.vocab['[MASK]']
def vocab_size(self):
return self.tokenizer.vocab_size()
def vocab(self):
return self.tokenizer.vocab
def inv_vocab(self):
return self.tokenizer.inv_vocab
def tokenize(self, text):
text_tokens = self.tokenizer.tokenize(text)
return self.tokenizer.convert_tokens_to_ids(text_tokens)
def decode_token_ids(self, token_ids):
tokens = self.tokenizer.convert_ids_to_tokens(token_ids)
exclude_list = ['[PAD]', '[CLS]']
non_pads = [t for t in tokens if (t not in exclude_list)]
result = ''
for s in non_pads:
if s.startswith('##'):
result += s[2:]
else:
result += (' ' + s)
return result
def cls(self):
return self.cls_id
def sep(self):
return self.sep_id
def pad(self):
return self.pad_id
def mask(self):
return self.mask_id |
class TestHook():
def test_unknown(self, isolation):
metadata = ProjectMetadata(str(isolation), PluginManager(), {'project': {'name': 'foo'}, 'tool': {'hatch': {'metadata': {'hooks': {'foo': {}}}}}})
with pytest.raises(ValueError, match='Unknown metadata hook: foo'):
_ = metadata.core
def test_custom(self, temp_dir, helpers):
classifiers = ['Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.9', 'Framework :: Foo', 'Development Status :: 4 - Beta', 'Private :: Do Not Upload']
metadata = ProjectMetadata(str(temp_dir), PluginManager(), {'project': {'name': 'foo', 'classifiers': classifiers, 'dynamic': ['version', 'description']}, 'tool': {'hatch': {'version': {'path': 'a/b'}, 'metadata': {'hooks': {'custom': {}}}}}})
file_path = ((temp_dir / 'a') / 'b')
file_path.ensure_parent_dir_exists()
file_path.write_text('__version__ = "0.0.1"')
file_path = (temp_dir / DEFAULT_BUILD_SCRIPT)
file_path.write_text(helpers.dedent("\n from hatchling.metadata.plugin.interface import MetadataHookInterface\n\n class CustomHook(MetadataHookInterface):\n def update(self, metadata):\n metadata['description'] = metadata['name'] + 'bar'\n metadata['version'] = metadata['version'] + 'rc0'\n\n def get_known_classifiers(self):\n return ['Framework :: Foo']\n "))
assert ('custom' in metadata.hatch.metadata.hooks)
assert (metadata.core.name == 'foo')
assert (metadata.core.description == 'foobar')
assert (metadata.core.version == '0.0.1rc0')
assert (metadata.core.classifiers == ['Private :: Do Not Upload', 'Development Status :: 4 - Beta', 'Framework :: Foo', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.11'])
def test_custom_missing_dynamic(self, temp_dir, helpers):
metadata = ProjectMetadata(str(temp_dir), PluginManager(), {'project': {'name': 'foo', 'dynamic': ['version']}, 'tool': {'hatch': {'version': {'path': 'a/b'}, 'metadata': {'hooks': {'custom': {}}}}}})
file_path = ((temp_dir / 'a') / 'b')
file_path.ensure_parent_dir_exists()
file_path.write_text('__version__ = "0.0.1"')
file_path = (temp_dir / DEFAULT_BUILD_SCRIPT)
file_path.write_text(helpers.dedent("\n from hatchling.metadata.plugin.interface import MetadataHookInterface\n\n class CustomHook(MetadataHookInterface):\n def update(self, metadata):\n metadata['description'] = metadata['name'] + 'bar'\n "))
with pytest.raises(ValueError, match='The field `description` was set dynamically and therefore must be listed in `project.dynamic`'):
_ = metadata.core |
_model
def test_ic_expression_with_one_parameter():
Monomer('A')
Parameter('k1', 1)
Expression('e1', k1)
Rule('A_deg', (A() >> None), k1)
Initial(A(), e1)
generate_equations(model)
t = np.linspace(0, 1000, 100)
sol = Solver(model, t, use_analytic_jacobian=True)
sol.run() |
def get_extensions():
ext_dirs = ((cwd / package_name) / 'cpp_exts')
ext_modules = []
rans_lib_dir = (cwd / 'third_party/ryg_rans')
rans_ext_dir = (ext_dirs / 'rans')
extra_compile_args = ['-std=c++17']
if os.getenv('DEBUG_BUILD', None):
extra_compile_args += ['-O0', '-g', '-UNDEBUG']
else:
extra_compile_args += ['-O3']
ext_modules.append(Pybind11Extension(name=f'{package_name}.ans', sources=[str(s) for s in rans_ext_dir.glob('*.cpp')], language='c++', include_dirs=[rans_lib_dir, rans_ext_dir], extra_compile_args=extra_compile_args))
ops_ext_dir = (ext_dirs / 'ops')
ext_modules.append(Pybind11Extension(name=f'{package_name}._CXX', sources=[str(s) for s in ops_ext_dir.glob('*.cpp')], language='c++', extra_compile_args=extra_compile_args))
return ext_modules |
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def init_weights(self, init_type='normal', gain=0.02, bias_value=0.0, target_op=None):
def init_func(m):
classname = m.__class__.__name__
if (target_op is not None):
if (classname.find(target_op) == (- 1)):
return False
if hasattr(m, 'param_inited'):
return
if hasattr(m, 'weight'):
if (init_type == 'normal'):
nn.init.normal_(m.weight.data, 0.0, gain)
elif (init_type == 'xavier_normal'):
nn.init.xavier_normal_(m.weight.data, gain=gain)
elif (init_type == 'kaiming'):
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
nn.init.orthogonal_(m.weight.data, gain=gain)
elif (init_type == 'xavier_unifrom'):
nn.init.xavier_uniform_(m.weight.data, gain=gain)
elif (init_type == 'constant'):
nn.init.constant_(m.weight.data, gain)
else:
raise NotImplementedError()
if (hasattr(m, 'bias') and (m.bias is not None)):
nn.init.constant_(m.bias.data, bias_value)
m.param_inited = True
self.init_apply(init_func)
def getParamList(self, x):
return list(x.parameters())
def init_apply(self, fn):
for m in self.children():
if hasattr(m, 'param_inited'):
if (m.param_inited is False):
m.init_apply(fn)
else:
m.apply(fn)
fn(self)
return self |
def onresource(unit, *args):
def split(lst, limit):
root_lenght = 200
filepath = None
lenght = 0
bucket = []
for item in lst:
if filepath:
lenght += ((root_lenght + len(filepath)) + len(item))
if ((lenght > limit) and bucket):
(yield bucket)
bucket = []
lenght = 0
bucket.append(filepath)
bucket.append(item)
filepath = None
else:
filepath = item
if bucket:
(yield bucket)
unit.onpeerdir(['library/resource'])
for part_args in split(args, 8000):
output = (('resource.' + listid(part_args)) + '.cpp')
inputs = [x for (x, y) in iterpair(part_args) if (x != '-')]
if inputs:
inputs = (['IN'] + inputs)
unit.onrun_program((((['tools/rescompiler', output] + part_args) + inputs) + ['OUT_NOAUTO', output]))
unit.onsrcs(['GLOBAL', output]) |
class DistanceCondition(_EntityTriggerType):
def __init__(self, value, rule, position, alongroute=True, freespace=True, distance_type=RelativeDistanceType.longitudinal, coordinate_system=CoordinateSystem.road, routing_algorithm=None):
self.value = value
self.alongroute = convert_bool(alongroute)
self.freespace = convert_bool(freespace)
self.rule = convert_enum(rule, Rule)
if (not isinstance(position, _PositionType)):
raise TypeError('position input is not a valid Position')
self.position = position
self.relative_distance_type = convert_enum(distance_type, RelativeDistanceType)
self.coordinate_system = convert_enum(coordinate_system, CoordinateSystem)
self.routing_algorithm = convert_enum(routing_algorithm, RoutingAlgorithm, True)
def __eq__(self, other):
if isinstance(other, DistanceCondition):
if ((self.get_attributes() == other.get_attributes()) and (self.position == other.position)):
return True
return False
def parse(element):
condition = element.find('DistanceCondition')
value = condition.attrib['value']
rule = convert_enum(condition.attrib['rule'], Rule)
freespace = convert_bool(condition.attrib['freespace'])
if ('alongRoute' in condition.attrib):
alongroute = convert_bool(condition.attrib['alongRoute'])
else:
alongroute = True
if ('relativeDistanceType' in condition.attrib):
reldisttype = convert_enum(condition.attrib['relativeDistanceType'], RelativeDistanceType)
else:
reldisttype = RelativeDistanceType.longitudinal
if ('coordinateSystem' in condition.attrib):
coordsystem = convert_enum(condition.attrib['coordinateSystem'], CoordinateSystem)
else:
coordsystem = CoordinateSystem.road
if ('routingAlgorithm' in condition.attrib):
routing_algorithm = convert_enum(condition.attrib['routingAlgorithm'], RoutingAlgorithm)
else:
routing_algorithm = None
position = None
position = _PositionFactory.parse_position(condition.find('Position'))
return DistanceCondition(value, rule, position, alongroute, freespace, reldisttype, coordsystem, routing_algorithm)
def get_attributes(self):
basedict = {}
basedict['value'] = str(self.value)
basedict['freespace'] = get_bool_string(self.freespace)
basedict['rule'] = self.rule.get_name()
if self.isVersion(minor=0):
basedict['alongRoute'] = get_bool_string(self.alongroute)
else:
basedict['relativeDistanceType'] = self.relative_distance_type.get_name()
basedict['coordinateSystem'] = self.coordinate_system.get_name()
if (self.routing_algorithm is not None):
if self.isVersionEqLess(minor=1):
raise OpenSCENARIOVersionError('routing algorithm was introduced in OSC 1.2')
basedict['routingAlgorithm'] = self.routing_algorithm.get_name()
return basedict
def get_element(self):
element = ET.Element('EntityCondition')
distancecond = ET.SubElement(element, 'DistanceCondition', attrib=self.get_attributes())
distancecond.append(self.position.get_element())
return element |
def vk_request_one_param_pool(vk_session, method, key, values, default_values=None):
result = {}
errors = {}
if (default_values is None):
default_values = {}
for i in range(0, len(values), 25):
current_values = values[i:(i + 25)]
response_raw = vk_one_param(vk_session, method, current_values, default_values, key)
response = response_raw['response']
response_errors = response_raw.get('execute_errors', [])
response_errors_iter = iter(response_errors)
for (x, r) in enumerate(response):
if (r is not False):
result[current_values[x]] = r
else:
errors[current_values[x]] = next(response_errors_iter)
return (result, errors) |
class Distribution(metaclass=abc.ABCMeta):
def read_text(self, filename):
def locate_file(self, path):
def from_name(cls, name: str):
if (not name):
raise ValueError('A distribution name is required.')
try:
return next(cls.discover(name=name))
except StopIteration:
raise PackageNotFoundError(name)
def discover(cls, **kwargs):
context = kwargs.pop('context', None)
if (context and kwargs):
raise ValueError('cannot accept context and kwargs')
context = (context or DistributionFinder.Context(**kwargs))
return itertools.chain.from_iterable((resolver(context) for resolver in cls._discover_resolvers()))
def at(path):
return PathDistribution(pathlib.Path(path))
def _discover_resolvers():
declared = (getattr(finder, 'find_distributions', None) for finder in sys.meta_path)
return filter(None, declared)
def metadata(self) -> _meta.PackageMetadata:
text = (self.read_text('METADATA') or self.read_text('PKG-INFO') or self.read_text(''))
return _adapters.Message(email.message_from_string(text))
def name(self):
return self.metadata['Name']
def _normalized_name(self):
return Prepared.normalize(self.name)
def version(self):
return self.metadata['Version']
def entry_points(self):
return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
def files(self):
def make_file(name, hash=None, size_str=None):
result = PackagePath(name)
result.hash = (FileHash(hash) if hash else None)
result.size = (int(size_str) if size_str else None)
result.dist = self
return result
_none
def make_files(lines):
return list(starmap(make_file, csv.reader(lines)))
return make_files((self._read_files_distinfo() or self._read_files_egginfo()))
def _read_files_distinfo(self):
text = self.read_text('RECORD')
return (text and text.splitlines())
def _read_files_egginfo(self):
text = self.read_text('SOURCES.txt')
return (text and map('"{}"'.format, text.splitlines()))
def requires(self):
reqs = (self._read_dist_info_reqs() or self._read_egg_info_reqs())
return (reqs and list(reqs))
def _read_dist_info_reqs(self):
return self.metadata.get_all('Requires-Dist')
def _read_egg_info_reqs(self):
source = self.read_text('requires.txt')
return pass_none(self._deps_from_requires_text)(source)
def _deps_from_requires_text(cls, source):
return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
def _convert_egg_info_reqs_to_simple_reqs(sections):
def make_condition(name):
return (name and f'extra == "{name}"')
def quoted_marker(section):
section = (section or '')
(extra, sep, markers) = section.partition(':')
if (extra and markers):
markers = f'({markers})'
conditions = list(filter(None, [markers, make_condition(extra)]))
return (('; ' + ' and '.join(conditions)) if conditions else '')
def url_req_space(req):
return (' ' * ('' in req))
for section in sections:
space = url_req_space(section.value)
(yield ((section.value + space) + quoted_marker(section.name))) |
def _derive_metrics(df: pd.DataFrame) -> pd.DataFrame:
logger.info('Deriving metrics...')
df['workflow_number'] = df.apply((lambda row: _get_workflow_number_from_name(row['name'])), axis=1)
def _calculate_difference(row: pd.Series, start_column: str, end_column: str) -> Optional[int]:
start_date = row[start_column]
end_date = row[end_column]
start_date_exists = (not pd.isna(start_date))
end_date_exists = (not pd.isna(end_date))
if (start_date_exists and end_date_exists):
return (_convert_str_date_to_epoch(end_date) - _convert_str_date_to_epoch(start_date))
return None
df['pending_time'] = df.apply(partial(_calculate_difference, start_column='asked_to_start_date', end_column='started'), axis=1)
df['runtime'] = df.apply(partial(_calculate_difference, start_column='started', end_column='ended'), axis=1)
return df |
class LocalScoreClass(object):
def __init__(self, data: Any, local_score_fun: Callable[([Any, int, List[int], Any], float)], parameters=None):
self.data = data
self.local_score_fun = local_score_fun
self.parameters = parameters
self.score_cache = {}
if (self.local_score_fun == local_score_BIC_from_cov):
self.cov = np.cov(self.data.T)
self.n = self.data.shape[0]
def score(self, i: int, PAi: List[int]) -> float:
if (i not in self.score_cache):
self.score_cache[i] = {}
hash_key = tuple(sorted(PAi))
if (not self.score_cache[i].__contains__(hash_key)):
if (self.local_score_fun == local_score_BIC_from_cov):
self.score_cache[i][hash_key] = self.local_score_fun((self.cov, self.n), i, PAi, self.parameters)
else:
self.score_cache[i][hash_key] = self.local_score_fun(self.data, i, PAi, self.parameters)
return self.score_cache[i][hash_key] |
def pad(x: Tensor, p: int=(2 ** (4 + 3))) -> Tuple[(Tensor, Tuple[(int, ...)])]:
(h, w) = (x.size(2), x.size(3))
new_h = ((((h + p) - 1) // p) * p)
new_w = ((((w + p) - 1) // p) * p)
padding_left = ((new_w - w) // 2)
padding_right = ((new_w - w) - padding_left)
padding_top = ((new_h - h) // 2)
padding_bottom = ((new_h - h) - padding_top)
padding = (padding_left, padding_right, padding_top, padding_bottom)
x = F.pad(x, padding, mode='constant', value=0)
return (x, padding) |
class DAM_Module(nn.Module):
def __init__(self, in_dim):
super(DAM_Module, self).__init__()
self.chanel_in = in_dim
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x):
(m_batchsize, N, C, height, width) = x.size()
proj_query = x.view(m_batchsize, N, (- 1))
proj_key = x.view(m_batchsize, N, (- 1)).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = (torch.max(energy, (- 1), keepdim=True)[0].expand_as(energy) - energy)
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, N, (- 1))
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, N, C, height, width)
out = ((self.gamma * out) + x)
out = out.view(m_batchsize, (- 1), height, width)
return out |
.end_to_end()
.parametrize(('depends_on', 'produces'), [("'in.txt'", "'out.txt'"), ("Path('in.txt')", "Path('out.txt')")])
def test_collect_file_with_relative_path(tmp_path, depends_on, produces):
source = f'''
import pytask
from pathlib import Path
.depends_on({depends_on})
.produces({produces})
def task_write_text(depends_on, produces):
produces.write_text(depends_on.read_text())
'''
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
tmp_path.joinpath('in.txt').write_text('Relative paths work.')
session = build(paths=tmp_path)
assert (session.collection_reports[0].outcome == CollectionOutcome.SUCCESS)
assert (tmp_path.joinpath('out.txt').read_text() == 'Relative paths work.') |
class ServerWatch(pypilotValue):
def __init__(self, values):
super(ServerWatch, self).__init__(values, 'watch')
def set(self, msg, connection):
(name, data) = msg.rstrip().split('=', 1)
watches = pyjson.loads(data)
values = self.server_values.values
for name in watches:
if (not (name in values)):
values[name] = pypilotValue(self.server_values, name)
values[name].watch(connection, watches[name]) |
class F17_LogVolData(F15_LogVolData):
removedKeywords = F15_LogVolData.removedKeywords
removedAttrs = F15_LogVolData.removedAttrs
def __init__(self, *args, **kwargs):
F15_LogVolData.__init__(self, *args, **kwargs)
self.resize = kwargs.get('resize', False)
def _getArgsAsStr(self):
retval = F15_LogVolData._getArgsAsStr(self)
if self.resize:
retval += ' --resize'
return retval |
class AsciiContainer(Container):
widget_layout_map = None
def __init__(self, *args, **kwargs):
Container.__init__(self, *args, **kwargs)
self.css_position = 'relative'
def set_from_asciiart(self, asciipattern, gap_horizontal=0, gap_vertical=0):
pattern_rows = asciipattern.split('\n')
for r in pattern_rows[:]:
if (len(r.replace(' ', '')) < 1):
pattern_rows.remove(r)
layout_height_in_chars = len(pattern_rows)
self.widget_layout_map = {}
row_index = 0
for row in pattern_rows:
row = row.strip()
row_width = (len(row) - row.count('|'))
row = row[1:(- 1)]
columns = row.split('|')
left_value = 0
for column in columns:
widget_key = column.strip()
widget_width = float(len(column))
if (not (widget_key in list(self.widget_layout_map.keys()))):
self.widget_layout_map[widget_key] = {'width': ('%.2f%%' % float((((widget_width / row_width) * 100.0) - gap_horizontal))), 'height': 1, 'top': ('%.2f%%' % float((((row_index / layout_height_in_chars) * 100.0) + (gap_vertical / 2.0)))), 'left': ('%.2f%%' % float((((left_value / row_width) * 100.0) + (gap_horizontal / 2.0))))}
else:
self.widget_layout_map[widget_key]['height'] += 1
left_value += widget_width
row_index += 1
for key in self.widget_layout_map.keys():
self.widget_layout_map[key]['height'] = ('%.2f%%' % float((((self.widget_layout_map[key]['height'] / layout_height_in_chars) * 100.0) - gap_vertical)))
for key in self.widget_layout_map.keys():
self.set_widget_layout(key)
def append(self, widget, key=''):
key = Container.append(self, widget, key)
self.set_widget_layout(key)
return key
def set_widget_layout(self, widget_key):
if (not ((widget_key in list(self.children.keys())) and (widget_key in list(self.widget_layout_map.keys())))):
return
self.children[widget_key].css_position = 'absolute'
self.children[widget_key].set_size(self.widget_layout_map[widget_key]['width'], self.widget_layout_map[widget_key]['height'])
self.children[widget_key].css_left = self.widget_layout_map[widget_key]['left']
self.children[widget_key].css_top = self.widget_layout_map[widget_key]['top'] |
def genee_loop_chunk(args, chunk_window_starts, chunk_window_stops, abs_chunk_start, chunk_max_window_start, epsilon_effect):
(betas, ld) = args
rows = list()
for ti in range(len(chunk_window_starts)):
window_start = chunk_window_starts[ti]
window_stop = chunk_window_stops[ti]
rows.append(genee_test(slice(window_start, window_stop), ld, betas, epsilon_effect))
cols = [('test_q', np.float32), ('q_var', np.float32), ('pval', np.float32)]
df = pd.DataFrame(rows, columns=[c[0] for c in cols])
for (k, v) in dict(cols).items():
df[k] = df[k].astype(v)
return df |
class AdaptiveRelativeAttn(nn.Module):
def __init__(self, model_size, num_heads, factor_size, dropout=0.0, adaptive_type='shared'):
super().__init__()
self.model_size = model_size
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = (model_size // num_heads)
self.factor_size = factor_size
self.adaptive_type = adaptive_type
assert ((self.head_dim * num_heads) == self.model_size), 'model_size must be divisible by num_heads'
self.bias = True
self.in_proj_weight = Parameter(torch.Tensor((3 * model_size), model_size, factor_size))
self.out_proj_weight = Parameter(torch.Tensor(model_size, model_size, factor_size))
self.pos_proj_weight = Parameter(torch.Tensor(model_size, model_size, factor_size))
self.in_proj_bias = Parameter(torch.Tensor((3 * model_size), factor_size))
self.out_proj_bias = Parameter(torch.Tensor(model_size, factor_size))
self.pos_proj_bias = Parameter(torch.Tensor(model_size, factor_size))
self.r_w_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim, factor_size))
self.r_r_bias = nn.Parameter(torch.Tensor(self.num_heads, self.head_dim, factor_size))
self.factor_map = nn.Linear(self.model_size, self.factor_size)
self.reset_parameters()
self.attn_func = relative_self_attn_func
def reset_parameters(self):
std_ = math.sqrt((2.0 / (self.model_size + self.model_size)))
nn.init.normal_(self.in_proj_weight, 0.0, std_)
nn.init.normal_(self.out_proj_weight, 0.0, std_)
nn.init.normal_(self.pos_proj_weight, 0.0, std_)
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj_bias, 0.0)
nn.init.constant_(self.pos_proj_bias, 0.0)
nn.init.normal_(self.r_w_bias, 0.0, std_)
nn.init.normal_(self.r_r_bias, 0.0, std_)
def forward(self, input, pos, factor, key_padding_mask=None, attn_mask=None, mems=None, incremental=False, incremental_cache=None):
factor = self.factor_map(factor).squeeze()
if (key_padding_mask is not None):
assert (attn_mask is None), 'ERROR attn_mask and key_padding_mask should not be both defined!'
mask = key_padding_mask
if (len(mask.shape) == 3):
mask = mask.squeeze(0).transpose(0, 1)
elif (attn_mask is not None):
mask = attn_mask
if (len(mask.shape) == 3):
mask = mask.squeeze((- 1))
else:
mask = None
in_proj_weight = torch.mv(self.in_proj_weight.view((- 1), self.factor_size), factor).view(self.in_proj_weight.size(0), self.in_proj_weight.size(1))
out_proj_weight = torch.mv(self.out_proj_weight.view((- 1), self.factor_size), factor).view(self.out_proj_weight.size(0), self.out_proj_weight.size(1))
pos_proj_weight = torch.mv(self.pos_proj_weight.view((- 1), self.factor_size), factor).view(self.out_proj_weight.size(0), self.out_proj_weight.size(1))
in_proj_bias = torch.mv(self.in_proj_bias, factor)
out_proj_bias = torch.mv(self.out_proj_bias, factor)
pos_proj_bias = torch.mv(self.pos_proj_bias, factor)
r_w_bias = torch.mv(self.r_w_bias.view((- 1), self.factor_size), factor).view(self.r_w_bias.size(0), self.r_w_bias.size(1))
r_r_bias = torch.mv(self.r_r_bias.view((- 1), self.factor_size), factor).view(self.r_r_bias.size(0), self.r_r_bias.size(1))
is_training = self.training
(outputs, coverage) = self.attn_func(input, pos, (attn_mask is not None), is_training, self.num_heads, in_proj_weight, out_proj_weight, pos_proj_weight, in_proj_bias, out_proj_bias, pos_proj_bias, r_w_bias, r_r_bias, mask, self.dropout, incremental, incremental_cache, False, False)
return (outputs, coverage) |
_module()
class ResNet(BaseModule):
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels=3, stem_channels=64, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, multi_grid=None, contract_dilation=False, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None):
super(ResNet, self).__init__(init_cfg)
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for resnet')
self.pretrained = pretrained
self.zero_init_residual = zero_init_residual
block_init_cfg = None
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be setting at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
block = self.arch_settings[depth][0]
if self.zero_init_residual:
if (block is BasicBlock):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2'))
elif (block is Bottleneck):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.depth = depth
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if (dcn is not None):
assert (len(stage_with_dcn) == num_stages)
self.plugins = plugins
self.multi_grid = multi_grid
self.contract_dilation = contract_dilation
(self.block, stage_blocks) = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
if (plugins is not None):
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
stage_multi_grid = (multi_grid if (i == (len(self.stage_blocks) - 1)) else None)
planes = (base_channels * (2 ** i))
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, multi_grid=stage_multi_grid, contract_dilation=contract_dilation, init_cfg=block_init_cfg)
self.inplanes = (planes * self.block.expansion)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = ((self.block.expansion * base_channels) * (2 ** (len(self.stage_blocks) - 1)))
def make_stage_plugins(self, plugins, stage_idx):
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert ((stages is None) or (len(stages) == self.num_stages))
if ((stages is None) or stages[stage_idx]):
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
return ResLayer(**kwargs)
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, (stem_channels // 2), kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), (stem_channels // 2), kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
def parse_mmit_splits():
def line_to_map(x):
video = osp.splitext(x[0])[0]
labels = [int(digit) for digit in x[1:]]
return (video, labels)
csv_reader = csv.reader(open('data/mmit/annotations/trainingSet.csv'))
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open('data/mmit/annotations/validationSet.csv'))
val_list = [line_to_map(x) for x in csv_reader]
test_list = val_list
splits = ((train_list, val_list, test_list),)
return splits |
class Solution(object):
def sortedSquares(self, A):
pos = 0
while ((pos < len(A)) and (A[pos] < 0)):
pos += 1
npos = (pos - 1)
res = []
while ((pos < len(A)) and (npos >= 0)):
if ((A[npos] ** 2) < (A[pos] ** 2)):
res.append((A[npos] ** 2))
npos -= 1
else:
res.append((A[pos] ** 2))
pos += 1
while (npos >= 0):
res.append((A[npos] ** 2))
npos -= 1
while (pos < len(A)):
res.append((A[pos] ** 2))
pos += 1
return res |
def test_r2plus1d():
config = get_recognizer_cfg('r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py')
config.model['backbone']['pretrained2d'] = False
config.model['backbone']['pretrained'] = None
config.model['backbone']['norm_cfg'] = dict(type='BN3d')
recognizer = build_recognizer(config.model)
recognizer.cfg = config
input_shape = (1, 3, 3, 8, 32, 32)
target_layer_name = 'backbone/layer4/1/relu'
_do_test_3D_models(recognizer, target_layer_name, input_shape) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.