code
stringlengths
281
23.7M
def create_page_xml(imageFilename, height, width): now = datetime.now() pcgts = PcGtsType(Metadata=MetadataType(Creator='SBB_QURATOR', Created=now, LastChange=now), Page=PageType(imageWidth=str(width), imageHeight=str(height), imageFilename=imageFilename, readingDirection='left-to-right', textLineOrder='top-to-bottom')) return pcgts
class IdleInTransactions(QueryStats): path = '%(datname)s.idle_in_tranactions.%(metric)s' multi_db = True base_query = "\n SELECT 'idle_in_transactions',\n max(COALESCE(ROUND(EXTRACT(epoch FROM now()-query_start)),0))\n AS idle_in_transaction\n FROM pg_stat_activity\n WHERE %s\n GROUP BY 1\n " query = (base_query % ("current_query = '<IDLE> in transaction'",)) post_92_query = (base_query % ("state LIKE 'idle in transaction%'",))
class SplitContainer(Container, QtWidgets.QSplitter): sigStretchChanged = QtCore.Signal() def __init__(self, area, orientation): QtWidgets.QSplitter.__init__(self) self.setOrientation(orientation) Container.__init__(self, area) def _insertItem(self, item, index): self.insertWidget(index, item) item.show() def saveState(self): sizes = self.sizes() if all(((x == 0) for x in sizes)): sizes = ([10] * len(sizes)) return {'sizes': sizes} def restoreState(self, state): sizes = state['sizes'] self.setSizes(sizes) for i in range(len(sizes)): self.setStretchFactor(i, sizes[i]) def childEvent(self, ev): super().childEvent(ev) Container.childEvent_(self, ev)
class CmdFinish(CmdTradeBase): key = 'end trade' aliases = 'finish trade' locks = 'cmd:all()' help_category = 'Trading' def func(self): caller = self.caller self.tradehandler.finish(force=True) caller.msg((self.str_caller % 'You |raborted|n trade. No deal was made.')) self.msg_other(caller, ((self.str_other % '%s |raborted|n trade. No deal was made.') % caller.key))
class AlbumId(NamedTuple): id_value: str title: str artist: str discs: int tracks: int last_directory_parts: str def of_song(cls, s: SongWrapper): parts = s('~dirname').rsplit(os.path.sep, maxsplit=2)[(- 2):] return AlbumId(s.album_key[0], (s('albumsort', '') or s('album')), (s('albumartistsort', '') or s('albumartist') or s('artist')), s('~#discs', 1), s('~#tracks', 1), os.path.join(*parts))
def main(args=None): if (args is None): args = sys.argv[1:] epilog = 'Talpa is part of the kite InSAR framework.\nMore at DFG Project, University of Kiel\n\n Marius Isken (marius.-potsdam.de)\n Henriette Sudhaus' desc = 'Crust deformation modeling' parser = ap.ArgumentParser(prog='talpa', epilog=epilog, description=desc, parents=[], formatter_class=ap.RawTextHelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=ap.SUPPRESS, conflict_handler='resolve', add_help=True) parser.add_argument('file', type=str, help='Load SandboxScene from file (.yml)', default=None, nargs='?') parser.add_argument('--verbose', '-v', action='count', default=1, help='Verbosity, add multiple to increase verbosity.') ns = parser.parse_args(args) log_level = (logging.WARNING - (ns.verbose * 10)) logging.basicConfig() stream_handler = logging.root.handlers[0] stream_handler.setLevel(level=(log_level if (log_level > 0) else 0)) talpa(filename=ns.file)
.slow .parametrize('orient', ['v', 'h']) _figures_equal() def test_DecisionMatrixPlotter_box(decision_matrix, orient, fig_test, fig_ref): dm = decision_matrix(seed=42, min_alternatives=3, max_alternatives=3, min_criteria=3, max_criteria=3) plotter = plot.DecisionMatrixPlotter(dm=dm) test_ax = fig_test.subplots() plotter.box(ax=test_ax, orient=orient) df = dm.matrix df.columns = [f'{c} {o.to_symbol()}' for (c, o) in zip(dm.criteria, dm.objectives)] df.columns.name = 'Criteria' exp_ax = fig_ref.subplots() sns.boxplot(data=df, ax=exp_ax, orient=orient)
def filter_empty_instances(instances, by_box=True, by_mask=True, box_threshold=1e-05): assert (by_box or by_mask) r = [] if by_box: r.append(instances.gt_boxes.nonempty(threshold=box_threshold)) if (instances.has('gt_masks') and by_mask): r.append(instances.gt_masks.nonempty()) if (not r): return instances m = r[0] for x in r[1:]: m = (m & x) instances.gt_ids[(~ m)] = (- 1) return instances
class AerospikeCollector(diamond.collector.Collector): def get_default_config_help(self): config_help = super(AerospikeCollector, self).get_default_config_help() config_help.update({'req_host': 'Hostname', 'req_port': 'Port', 'statistics': 'Collect statistics', 'latency': 'Collect latency metrics', 'throughput': 'Collect throughput metrics', 'namespaces': 'Collect per-namespace metrics', 'namespaces_whitelist': ('List of namespaces to collect metrics' + ' from (default is to collect from all)'), 'statistics_whitelist': 'List of global statistics values to collect', 'namespace_statistics_whitelist': 'List of per-namespace statistics values to collect', 'path': 'Metric path'}) return config_help def get_default_config(self): default_config = super(AerospikeCollector, self).get_default_config() default_config['req_host'] = 'localhost' default_config['req_port'] = 3003 default_config['statistics'] = True default_config['latency'] = True default_config['throughput'] = True default_config['namespaces'] = True default_config['namespaces_whitelist'] = False default_config['statistics_whitelist'] = ['total-bytes-memory', 'total-bytes-disk', 'used-bytes-memory', 'used-bytes-disk', 'free-pct-memory', 'free-pct-disk', 'data-used-bytes-memory', 'cluster_size', 'objects', 'client_connections', 'index-used-bytes-memory', 'objects', 'cluster_size', 'system_free_mem_pct', 'client_connections', 'scans_active'] default_config['namespace_statistics_whitelist'] = ['objects', 'evicted-objects', 'expired-objects', 'used-bytes-memory', 'data-used-bytes-memory', 'index-used-bytes-memory', 'used-bytes-disk', 'memory-size', 'total-bytes-memory', 'total-bytes-disk', 'migrate-tx-partitions-initial', 'migrate-tx-partitions-remaining', 'migrate-rx-partitions-initial', 'migrate-rx-partitions-remaining', 'available_pct', 'client_delete_error', 'client_delete_success', 'client_read_error', 'client_read_success', 'client_write_error', 'client_write_success', 'device_available_pct', 'device_free_pct', 'device_total_bytes', 'device_used_bytes', 'expired_objects', 'evicted_objects', 'memory-size', 'memory_free_pct', 'memory_used_bytes', 'memory_used_data_bytes', 'memory_used_index_bytes', 'memory_used_sindex_bytes', 'migrate_rx_partitions_active', 'migrate_rx_partitions_initial', 'migrate_rx_partitions_remaining', 'migrate_tx_partitions_active', 'migrate_tx_partitions_initial', 'migrate_tx_partitions_remaining', 'objects'] default_config['path'] = 'aerospike' return default_config def collect_latency(self, data): fields = ['ops', '1ms', '8ms', '64ms'] if (self.config['dialect'] >= 39): labels = data.split(';')[::2] datasets = data.split(';')[1::2] for (i, label) in enumerate(labels): match = re.match('\\{(\\w+)\\}-(\\w+)', label) if match: namespace = match.group(1) histogram = match.group(2) dataset = datasets[i].split(',')[1:] metrics = dict(zip(fields, dataset)) for field in fields: self.publish_gauge(('latency.%s.%s.%s' % (namespace, histogram, field)), metrics[field]) elif (self.config['dialect'] < 39): raw_lines = {} (raw_lines['reads'], raw_lines['writes_master'], raw_lines['proxy'], raw_lines['udf'], raw_lines['query']) = data.split(';')[1::2] for op_type in raw_lines.keys(): metrics = dict(zip(fields, raw_lines[op_type].split(',')[1:])) for metric in metrics.keys(): self.publish_gauge(('latency.%s.%s' % (op_type, metric)), metrics[metric]) def collect_statistics(self, data): gather_stats = self.config['statistics_whitelist'] for statline in data.split(';'): (stat, value) = statline.split('=') if (stat in gather_stats): self.publish_gauge(('statistics.%s' % stat), value) def collect_throughput(self, data): if (self.config['dialect'] >= 39): labels = data.split(';')[::2] datasets = data.split(';')[1::2] for (i, label) in enumerate(labels): match = re.match('\\{(\\w+)\\}-(\\w+)', label) if match: namespace = match.group(1) histogram = match.group(2) metric = datasets[i].split(',')[1] self.publish_gauge(('throughput.%s.%s' % (namespace, histogram)), metric) elif (self.config['dialect'] < 39): raw_lines = {} (raw_lines['reads'], raw_lines['writes_master'], raw_lines['proxy'], raw_lines['udf'], raw_lines['query']) = data.split(';')[1::2] for op_type in raw_lines.keys(): metric = raw_lines[op_type].split(',')[1] self.publish_gauge(('throughput.%s' % op_type), metric) def collect_namespace(self, namespace, data): gather_stats = self.config['namespace_statistics_whitelist'] for statline in data.split(';'): (stat, value) = statline.split('=') if (stat in gather_stats): self.publish_gauge(('namespace.%s.%s' % (namespace, stat)), value) def collect(self): self.log.debug(('Connecting to %s:%s' % (self.config['req_host'], self.config['req_port']))) t = telnetlib.Telnet(self.config['req_host'], self.config['req_port']) try: self.log.debug('Checking aerospike version') t.write('version\n') version = t.read_until('\n', 1) if (LooseVersion(version) >= LooseVersion('3.9')): self.config['dialect'] = 39 else: self.config['dialect'] = 27 self.log.debug(('Got version %s and selecting dialect %s' % (version, self.config['dialect']))) if self.config['latency']: self.log.debug('Polling for latency') t.write('latency:\n') latency = t.read_until('\n', 1) self.collect_latency(latency) if self.config['statistics']: self.log.debug('Polling for statistics') t.write('statistics\n') statistics = t.read_until('\n', 1) self.collect_statistics(statistics) if self.config['throughput']: self.log.debug('Polling for throughput') t.write('throughput:\n') throughput = t.read_until('\n', 1) self.collect_throughput(throughput) if self.config['namespaces']: self.log.debug('Polling for namespaces') t.write('namespaces\n') namespaces = t.read_until('\n', 1).strip() for namespace in namespaces.split(';'): self.log.debug(('Polling namespace: %s' % namespace)) if (self.config['namespaces_whitelist'] and (namespace not in self.config['namespaces_whitelist'])): self.log.debug(('Skipping non-whitelisted namespace: %s' % namespace)) continue t.write(('namespace/%s\n' % namespace)) namespace_data = t.read_until('\n', 1) self.collect_namespace(namespace, namespace_data) t.close() except (socket.error, EOFError) as e: self.log.error(('Unable to retrieve aerospike data: %s' % e)) except Exception as e: self.log.error(('Unknown failure in aerospike collection: %s' % e))
def get_groups_for_user(user, local=True, maxage=timedelta(seconds=0), targetID=None, use_volatile=True): groups_cmd = ops.cmd.getDszCommand('groups', local=local, network=(not local), user=user) local_string = ('local' if local else 'network') tag = ('%s_%s_%s' % (USERGROUPS_TAG_BASE, local_string.upper(), user.upper())) return ops.project.generic_cache_get(groups_cmd, cache_tag=tag, maxage=maxage, use_volatile=use_volatile, targetID=targetID)
class Dog(Creature): def __init__(self, rand): super().__init__(rand) self.attack = [1, 4] self.love = 2 self.hp_max = 10 self.hp = self.hp_max self.name = 'Dog' self.images = ['dog_normal'] def give_hug(self): super().give_hug() self.images = ['dog_bark'] def deal_attack(self, dmg): super().deal_attack(dmg) self.images = ['dog_normal']
def merge(seqs: list[list[TypeInfo]]) -> list[TypeInfo]: seqs = [s.copy() for s in seqs] result: list[TypeInfo] = [] while True: seqs = [s for s in seqs if s] if (not seqs): return result for seq in seqs: head = seq[0] if (not [s for s in seqs if (head in s[1:])]): break else: raise MroError() result.append(head) for s in seqs: if (s[0] is head): del s[0]
class GEN(): def __init__(self, itemNum, userNum, emb_dim, lamda, param=None, initdelta=0.05, learning_rate=0.05): self.itemNum = itemNum self.userNum = userNum self.emb_dim = emb_dim self.lamda = lamda self.param = param self.initdelta = initdelta self.learning_rate = learning_rate self.g_params = [] with tf.variable_scope('generator'): if (self.param == None): self.user_embeddings = tf.Variable(tf.random_uniform([self.userNum, self.emb_dim], minval=(- initdelta), maxval=self.initdelta, dtype=tf.float32)) self.item_embeddings = tf.Variable(tf.random_uniform([self.itemNum, self.emb_dim], minval=(- initdelta), maxval=self.initdelta, dtype=tf.float32)) self.item_bias = tf.Variable(tf.zeros([self.itemNum])) else: self.user_embeddings = tf.Variable(self.param[0]) self.item_embeddings = tf.Variable(self.param[1]) self.item_bias = tf.Variable(self.param[2]) self.g_params = [self.user_embeddings, self.item_embeddings, self.item_bias] self.u = tf.placeholder(tf.int32) self.i = tf.placeholder(tf.int32) self.reward = tf.placeholder(tf.float32) self.u_embedding = tf.nn.embedding_lookup(self.user_embeddings, self.u) self.i_embedding = tf.nn.embedding_lookup(self.item_embeddings, self.i) self.i_bias = tf.gather(self.item_bias, self.i) self.all_logits = (tf.reduce_sum(tf.multiply(self.u_embedding, self.item_embeddings), 1) + self.item_bias) self.i_prob = tf.gather(tf.reshape(tf.nn.softmax(tf.reshape(self.all_logits, [1, (- 1)])), [(- 1)]), self.i) self.gan_loss = ((- tf.reduce_mean((tf.log(self.i_prob) * self.reward))) + (self.lamda * ((tf.nn.l2_loss(self.u_embedding) + tf.nn.l2_loss(self.i_embedding)) + tf.nn.l2_loss(self.i_bias)))) g_opt = tf.train.GradientDescentOptimizer(self.learning_rate) self.gan_updates = g_opt.minimize(self.gan_loss, var_list=self.g_params) self.all_rating = (tf.matmul(self.u_embedding, self.item_embeddings, transpose_a=False, transpose_b=True) + self.item_bias)
class TransformedDataset(Dataset): def __init__(self, source, transform, img_index=0): self.source = source self.transform = transform self.img_index = img_index def __len__(self): return len(self.source) def __getitem__(self, index): out = self.source[index] if isinstance(out, tuple): return (self.transform(out[self.img_index]), out[(1 - self.img_index)]) else: return self.transform(out)
class Ball(pyglet.sprite.Sprite): ball_image = pyglet.resource.image(BALL_IMAGE) width = ball_image.width height = ball_image.height def __init__(self): x = (random.random() * (window.width - self.width)) y = (random.random() * (window.height - self.height)) super(Ball, self).__init__(self.ball_image, x, y, batch=balls_batch) self.dx = ((random.random() - 0.5) * 1000) self.dy = ((random.random() - 0.5) * 1000) def update_position(self, dt): if ((self.x <= 0) or ((self.x + self.width) >= window.width)): self.dx *= (- 1) sound.play() if ((self.y <= 0) or ((self.y + self.height) >= window.height)): self.dy *= (- 1) sound.play() self.x += (self.dx * dt) self.y += (self.dy * dt) self.x = min(max(self.x, 0), (window.width - self.width)) self.y = min(max(self.y, 0), (window.height - self.height))
class _FunctionCorrelation(torch.autograd.Function): def forward(self, first, second): rbot0 = first.new_zeros([first.size(0), (first.size(2) + 8), (first.size(3) + 8), first.size(1)]) rbot1 = first.new_zeros([first.size(0), (first.size(2) + 8), (first.size(3) + 8), first.size(1)]) self.save_for_backward(first, second, rbot0, rbot1) assert (first.is_contiguous() == True) assert (second.is_contiguous() == True) output = first.new_zeros([first.size(0), 81, first.size(2), first.size(3)]) if (first.is_cuda == True): n = (first.size(2) * first.size(3)) cupy_launch('kernel_Correlation_rearrange', cupy_kernel('kernel_Correlation_rearrange', {'input': first, 'output': rbot0}))(grid=tuple([int((((n + 16) - 1) / 16)), first.size(1), first.size(0)]), block=tuple([16, 1, 1]), args=[n, first.data_ptr(), rbot0.data_ptr()], stream=Stream) n = (second.size(2) * second.size(3)) cupy_launch('kernel_Correlation_rearrange', cupy_kernel('kernel_Correlation_rearrange', {'input': second, 'output': rbot1}))(grid=tuple([int((((n + 16) - 1) / 16)), second.size(1), second.size(0)]), block=tuple([16, 1, 1]), args=[n, second.data_ptr(), rbot1.data_ptr()], stream=Stream) n = ((output.size(1) * output.size(2)) * output.size(3)) cupy_launch('kernel_Correlation_updateOutput', cupy_kernel('kernel_Correlation_updateOutput', {'rbot0': rbot0, 'rbot1': rbot1, 'top': output}))(grid=tuple([output.size(3), output.size(2), output.size(0)]), block=tuple([32, 1, 1]), shared_mem=(first.size(1) * 4), args=[n, rbot0.data_ptr(), rbot1.data_ptr(), output.data_ptr()], stream=Stream) elif (first.is_cuda == False): raise NotImplementedError() return output def backward(self, gradOutput): (first, second, rbot0, rbot1) = self.saved_tensors assert (gradOutput.is_contiguous() == True) gradFirst = (first.new_zeros([first.size(0), first.size(1), first.size(2), first.size(3)]) if (self.needs_input_grad[0] == True) else None) gradSecond = (first.new_zeros([first.size(0), first.size(1), first.size(2), first.size(3)]) if (self.needs_input_grad[1] == True) else None) if (first.is_cuda == True): if (gradFirst is not None): for intSample in range(first.size(0)): n = ((first.size(1) * first.size(2)) * first.size(3)) cupy_launch('kernel_Correlation_updateGradFirst', cupy_kernel('kernel_Correlation_updateGradFirst', {'rbot0': rbot0, 'rbot1': rbot1, 'gradOutput': gradOutput, 'gradFirst': gradFirst, 'gradSecond': None}))(grid=tuple([int((((n + 512) - 1) / 512)), 1, 1]), block=tuple([512, 1, 1]), args=[n, intSample, rbot0.data_ptr(), rbot1.data_ptr(), gradOutput.data_ptr(), gradFirst.data_ptr(), None], stream=Stream) if (gradSecond is not None): for intSample in range(first.size(0)): n = ((first.size(1) * first.size(2)) * first.size(3)) cupy_launch('kernel_Correlation_updateGradSecond', cupy_kernel('kernel_Correlation_updateGradSecond', {'rbot0': rbot0, 'rbot1': rbot1, 'gradOutput': gradOutput, 'gradFirst': None, 'gradSecond': gradSecond}))(grid=tuple([int((((n + 512) - 1) / 512)), 1, 1]), block=tuple([512, 1, 1]), args=[n, intSample, rbot0.data_ptr(), rbot1.data_ptr(), gradOutput.data_ptr(), None, gradSecond.data_ptr()], stream=Stream) elif (first.is_cuda == False): raise NotImplementedError() return (gradFirst, gradSecond)
def update_config(config_file): exp_config = None with open(config_file) as f: exp_config = edict(yaml.load(f, Loader=yaml.FullLoader)) for (k, v) in exp_config.items(): if (k in config): if isinstance(v, dict): _update_dict(config, k, v) elif (k == 'SCALES'): config[k][0] = tuple(v) else: config[k] = v else: raise ValueError('{} not exist in config.py'.format(k))
def test_store_blob(initialized_db): location = database.ImageStorageLocation.select().get() digest = 'somecooldigest' blob_storage = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, digest, location, 1024, 0, 5000) assert (blob_storage.content_checksum == digest) assert (blob_storage.image_size == 1024) assert (blob_storage.uncompressed_size == 5000) blob_storage2 = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, digest, location, 2048, 0, 6000) assert (blob_storage2.id == blob_storage.id) assert (blob_storage2.image_size == 1024) assert (blob_storage2.uncompressed_size == 5000) otherdigest = 'anotherdigest' blob_storage3 = model.blob.store_blob_record_and_temp_link(ADMIN_ACCESS_USER, REPO, otherdigest, location, 1234, 0, 5678) assert (blob_storage3.id != blob_storage.id) assert (blob_storage3.image_size == 1234) assert (blob_storage3.uncompressed_size == 5678)
def setup_multi_processes(cfg): if (platform.system() != 'Windows'): mp_start_method = cfg.get('mp_start_method', 'fork') current_method = mp.get_start_method(allow_none=True) if ((current_method is not None) and (current_method != mp_start_method)): warnings.warn(f'Multi-processing start method `{mp_start_method}` is different from the previous setting `{current_method}`.It will be force set to `{mp_start_method}`. You can change this behavior by changing `mp_start_method` in your config.') mp.set_start_method(mp_start_method, force=True) opencv_num_threads = cfg.get('opencv_num_threads', 0) cv2.setNumThreads(opencv_num_threads) workers_per_gpu = cfg.data.get('workers_per_gpu', 1) if ('train_dataloader' in cfg.data): workers_per_gpu = max(cfg.data.train_dataloader.get('workers_per_gpu', 1), workers_per_gpu) if (('OMP_NUM_THREADS' not in os.environ) and (workers_per_gpu > 1)): omp_num_threads = 1 warnings.warn(f'Setting OMP_NUM_THREADS environment variable for each process to be {omp_num_threads} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.') os.environ['OMP_NUM_THREADS'] = str(omp_num_threads) if (('MKL_NUM_THREADS' not in os.environ) and (workers_per_gpu > 1)): mkl_num_threads = 1 warnings.warn(f'Setting MKL_NUM_THREADS environment variable for each process to be {mkl_num_threads} in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.') os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
class Migration(migrations.Migration): dependencies = [('adserver', '0028_ad_network_defaults')] operations = [migrations.CreateModel(name='PublisherPayout', fields=[('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')), ('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')), ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('amount', models.DecimalField(decimal_places=2, default=0, max_digits=8, verbose_name='Amount')), ('date', models.DateTimeField(verbose_name='Payout date')), ('note', models.TextField(blank=True, help_text='A publisher-visible note about the payout', null=True, verbose_name='Note')), ('attachment', models.FileField(blank=True, help_text='A publisher-visible attachment such as a receipt', max_length=255, null=True, upload_to='payouts/%Y/%m/', verbose_name='Attachment')), ('publisher', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='payouts', to='adserver.Publisher'))], options={'ordering': ('-date',)})]
def main(): args = parse_args() cfg = Config.fromfile(args.config) if (args.options is not None): cfg.merge_from_dict(args.options) if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True if (args.work_dir is not None): cfg.work_dir = args.work_dir elif (cfg.get('work_dir', None) is None): cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0]) if (args.load_from is not None): cfg.load_from = args.load_from if (args.resume_from is not None): cfg.resume_from = args.resume_from if (args.gpu_ids is not None): cfg.gpu_ids = args.gpu_ids else: cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus)) if (args.launcher == 'none'): distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir)) cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config))) timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = osp.join(cfg.work_dir, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) meta = dict() env_info_dict = collect_env() env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()]) dash_line = (('-' * 60) + '\n') logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line)) meta['env_info'] = env_info logger.info(f'Distributed training: {distributed}') logger.info(f'''Config: {cfg.pretty_text}''') if (args.seed is not None): logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}') set_random_seed(args.seed, deterministic=args.deterministic) cfg.seed = args.seed meta['seed'] = args.seed meta['exp_name'] = osp.basename(args.config) model = build_segmentor(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) logger.info(model) datasets = [build_dataset(cfg.data.train)] if (len(cfg.workflow) == 2): val_dataset = copy.deepcopy(cfg.data.val) val_dataset.pipeline = cfg.data.train.pipeline datasets.append(build_dataset(val_dataset)) if (cfg.checkpoint_config is not None): cfg.checkpoint_config.meta = dict(mmseg_version=f'{__version__}+{get_git_hash()[:7]}', config=cfg.pretty_text, CLASSES=datasets[0].CLASSES, PALETTE=datasets[0].PALETTE) model.CLASSES = datasets[0].CLASSES train_segmentor(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta)
class BayesianTrainer(BaseTrainer): def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True): super().__init__(model, loss_function, train_data, valid_data, dicts, opt) if self.cuda: torch.cuda.set_device(self.opt.gpus[0]) if (self.opt.seed >= 0): torch.manual_seed(self.opt.seed) self.loss_function = self.loss_function.cuda() self.model = self.model.cuda() if setup_optimizer: self.optim = onmt.Optim(opt) self.optim.set_parameters(self.model.parameters()) if (not self.opt.fp16): opt_level = 'O0' keep_batchnorm_fp32 = False elif self.opt.fp16_mixed: opt_level = 'O1' keep_batchnorm_fp32 = None else: opt_level = 'O2' keep_batchnorm_fp32 = False if self.cuda: (self.model, self.optim.optimizer) = amp.initialize(self.model, self.optim.optimizer, opt_level=opt_level, keep_batchnorm_fp32=keep_batchnorm_fp32, loss_scale='dynamic', verbosity=(1 if self.opt.verbose else 0)) if hasattr(self.model, 'relative'): if self.model.relative: self.train_data.src_align_right = True self.train_data.tgt_align_right = False self.valid_data.src_align_right = True self.valid_data.tgt_align_right = False def warm_up(self): if self.opt.memory_profiling: from pytorch_memlab import MemReporter reporter = MemReporter() batch = self.train_data.get_largest_batch() opt = self.opt if self.cuda: batch.cuda(fp16=(self.opt.fp16 and (not self.opt.fp16_mixed))) self.model.train() self.model.zero_grad() oom = False if self.opt.memory_profiling: print('Input size: ') print(batch.size, batch.src_size, batch.tgt_size) if opt.streaming: streaming_state = self.model.init_stream() else: streaming_state = None try: targets = batch.get('target_output') tgt_mask = targets.data.ne(onmt.constants.PAD) outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask, zero_encoder=opt.zero_encoder, mirror=opt.mirror_loss, streaming_state=streaming_state) outputs['tgt_mask'] = tgt_mask loss_dict = self.loss_function(outputs, targets, model=self.model) loss = loss_dict['loss'] log_prior = self.model.log_prior() log_variational_posterior = self.model.log_variational_posterior() full_loss = (loss + (log_variational_posterior - log_prior)) if opt.mirror_loss: rev_loss = loss_dict['rev_loss'] mirror_loss = loss_dict['mirror_loss'] full_loss = ((full_loss + rev_loss) + mirror_loss) if opt.reconstruct: rec_loss = loss_dict['rec_loss'] rec_loss = rec_loss full_loss = (full_loss + rec_loss) optimizer = self.optim.optimizer if self.opt.memory_profiling: reporter.report(verbose=True) if self.cuda: with amp.scale_loss(full_loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if self.opt.memory_profiling: print(' after backward ') reporter.report(verbose=True) except RuntimeError as e: if ('out of memory' in str(e)): oom = True else: raise e if oom: print('* Warning: out-of-memory in warming up. This is due to the largest batch is too big for the GPU') else: print('* Warming up successuflly.') if self.opt.memory_profiling: if hasattr(torch.cuda, 'memory_summary'): print(torch.cuda.memory_summary()) exit() def save(self, epoch, valid_ppl, itr=None): opt = self.opt model = self.model dicts = self.dicts model_state_dict = self.model.state_dict() optim_state_dict = self.optim.state_dict() if itr: itr_state_dict = itr.state_dict() else: itr_state_dict = None checkpoint = {'model': model_state_dict, 'dicts': dicts, 'opt': opt, 'epoch': epoch, 'itr': itr_state_dict, 'optim': optim_state_dict, 'additional_batch_order': getattr(self, 'additional_batch_order', None), 'additional_data_iteration': getattr(self, 'additional_data_iteration', None), 'amp': amp.state_dict()} file_name = ('%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)) print(('Writing to %s' % file_name)) torch.save(checkpoint, file_name) checkpoint_dir = os.path.dirname(opt.save_model) existed_save_files = checkpoint_paths(checkpoint_dir) for save_file in existed_save_files[opt.keep_save_files:]: print((' * Deleting old save file %s ....' % save_file)) os.remove(save_file) def eval(self, data): total_loss = 0 total_words = 0 opt = self.opt data_iterator = DataIterator(data, data.collater, data.batches, seed=self.opt.seed, num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size) epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False) self.model.eval() self.loss_function.eval() self.model.reset_states() if opt.streaming: streaming_state = self.model.init_stream() else: streaming_state = None ' PyTorch semantics: save space by not creating gradients ' data_size = len(epoch_iterator) i = 0 with torch.no_grad(): while (not data_iterator.end_of_epoch()): batch = next(epoch_iterator) batch = rewrap(batch) if self.cuda: batch.cuda(fp16=(self.opt.fp16 and (not self.opt.fp16_mixed))) ' outputs can be either \n hidden states from decoder or\n prob distribution from decoder generator\n ' targets = batch.get('target_output') tgt_mask = targets.ne(onmt.constants.PAD) outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask, mirror=opt.mirror_loss, streaming_state=streaming_state) if opt.streaming: streaming_state = outputs['streaming_state'] outputs['tgt_mask'] = tgt_mask loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True) loss_data = loss_dict['data'] total_loss += loss_data total_words += batch.tgt_size i = (i + 1) self.model.train() self.loss_function.train() return (total_loss / total_words) def train_epoch(self, epoch, resume=False, itr_progress=None): global rec_ppl opt = self.opt train_data = self.train_data streaming = opt.streaming self.model.train() self.loss_function.train() self.model.zero_grad() self.model.reset_states() dataset = train_data data_iterator = DataIterator(dataset, dataset.collater, dataset.batches, seed=self.opt.seed, num_workers=opt.num_workers, epoch=epoch, buffer_size=opt.buffer_size) if resume: data_iterator.load_state_dict(itr_progress) epoch_iterator = data_iterator.next_epoch_itr((not streaming), pin_memory=opt.pin_memory) (total_tokens, total_loss, total_words) = (0, 0, 0) total_non_pads = 0 (report_loss, report_tgt_words) = (0, 0) report_src_words = 0 report_sents = 0 (report_rec_loss, report_rev_loss, report_mirror_loss) = (0, 0, 0) report_log_prior = 0 report_log_variational_posterior = 0 start = time.time() n_samples = len(epoch_iterator) counter = 0 update_counter = 0 num_accumulated_words = 0 num_accumulated_sents = 0 nan = False nan_counter = 0 if opt.streaming: streaming_state = self.model.init_stream() else: streaming_state = None i = data_iterator.iterations_in_epoch while (not data_iterator.end_of_epoch()): curriculum = (epoch < opt.curriculum) batch = next(epoch_iterator) batch = rewrap(batch) grad_scaler = (self.opt.batch_size_words if (self.opt.update_frequency > 1) else batch.tgt_size) if self.cuda: batch.cuda(fp16=(self.opt.fp16 and (not self.opt.fp16_mixed))) oom = False try: targets = batch.get('target_output') tgt_mask = targets.data.ne(onmt.constants.PAD) outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask, zero_encoder=opt.zero_encoder, mirror=opt.mirror_loss, streaming_state=streaming_state) batch_size = batch.size outputs['tgt_mask'] = tgt_mask loss_dict = self.loss_function(outputs, targets, model=self.model) loss_data = loss_dict['data'] loss = loss_dict['loss'] log_prior = self.model.log_prior() log_variational_posterior = self.model.log_variational_posterior() kl_coeff = (1 / (batch.tgt_size * opt.update_frequency)) full_loss = (loss + (kl_coeff * (log_variational_posterior - log_prior))) if opt.mirror_loss: rev_loss = loss_dict['rev_loss'] rev_loss_data = loss_dict['rev_loss_data'] mirror_loss = loss_dict['mirror_loss'] full_loss = ((full_loss + rev_loss) + mirror_loss) mirror_loss_data = loss_dict['mirror_loss'].item() else: rev_loss = None rev_loss_data = None mirror_loss_data = 0 if opt.reconstruct: rec_loss = loss_dict['rec_loss'] rec_loss = rec_loss full_loss = (full_loss + rec_loss) rec_loss_data = loss_dict['rec_loss_data'] else: rec_loss_data = None optimizer = self.optim.optimizer full_loss.div_(grad_scaler) if self.cuda: with amp.scale_loss(full_loss, optimizer) as scaled_loss: scaled_loss.backward() else: full_loss.backward() except RuntimeError as e: if ('out of memory' in str(e)): print('| WARNING: ran out of memory on GPU , skipping batch') oom = True torch.cuda.empty_cache() loss = 0 if opt.streaming: streaming_state = self.model.init_stream() else: raise e if (loss != loss): oom = True self.model.zero_grad() self.optim.zero_grad() num_accumulated_words = 0 num_accumulated_sents = 0 nan_counter = (nan_counter + 1) print('Warning!!! Loss is Nan') if (nan_counter >= 15): raise ValueError('Training stopped because of multiple NaN occurence. For ASR, using the Relative Transformer is more stable and recommended.') else: nan_counter = 0 if (not oom): src_size = batch.src_size tgt_size = batch.tgt_size counter = (counter + 1) num_accumulated_words += tgt_size num_accumulated_sents += batch_size update_flag = False if (counter >= opt.update_frequency > 0): update_flag = True elif (0 < opt.batch_size_update <= num_accumulated_words): update_flag = True elif (i == n_samples): update_flag = True if update_flag: if (((counter == 1) and (self.opt.update_frequency != 1)) or (counter > 1)): grad_denom = (1 / grad_scaler) if self.opt.normalize_gradient: grad_denom = (num_accumulated_words * grad_denom) else: grad_denom = 1 normalize_gradients(amp.master_params(optimizer), grad_denom) if (self.opt.max_grad_norm > 0): torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm) self.optim.step() self.optim.zero_grad() self.model.zero_grad() counter = 0 num_accumulated_words = 0 num_accumulated_sents = 0 num_updates = self.optim._step update_counter += 1 if ((opt.save_every > 0) and ((num_updates % opt.save_every) == ((- 1) % opt.save_every))): valid_loss = self.eval(self.valid_data) valid_ppl = math.exp(min(valid_loss, 100)) print(('Validation perplexity: %g' % valid_ppl)) ep = ((float(epoch) - 1.0) + ((float(i) + 1.0) / n_samples)) self.save(ep, valid_ppl, itr=data_iterator) num_words = tgt_size report_loss += loss_data report_log_prior += log_prior.item() report_log_variational_posterior += log_variational_posterior.item() report_tgt_words += num_words report_src_words += src_size report_sents += 1 total_loss += loss_data total_words += num_words total_tokens += batch.get('target_output').nelement() total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item() optim = self.optim batch_efficiency = (total_non_pads / total_tokens) if opt.reconstruct: report_rec_loss += rec_loss_data if opt.mirror_loss: report_rev_loss += rev_loss_data report_mirror_loss += mirror_loss_data if ((i == 0) or ((i % opt.log_interval) == ((- 1) % opt.log_interval))): log_string = ('Epoch %2d, %5d/%5d; ; ppl: %6.2f ; ' % (epoch, (i + 1), len(data_iterator), math.exp((report_loss / report_tgt_words)))) kl_div = (report_log_variational_posterior - report_log_prior) log_string += ('KL q||p: %6.2f ; ' % (kl_div / report_sents)) if opt.reconstruct: rec_ppl = math.exp((report_rec_loss / report_src_words.item())) log_string += (' rec_ppl: %6.2f ; ' % rec_ppl) if opt.mirror_loss: rev_ppl = math.exp((report_rev_loss / report_tgt_words)) log_string += (' rev_ppl: %6.2f ; ' % rev_ppl) log_string += (' mir_loss: %6.2f ; ' % (report_mirror_loss / report_tgt_words)) log_string += ('lr: %.7f ; updates: %7d; ' % (optim.getLearningRate(), optim._step)) log_string += ('%5.0f src/s; %5.0f tgt/s; ' % ((report_src_words / (time.time() - start)), (report_tgt_words / (time.time() - start)))) log_string += ('%s elapsed' % str(datetime.timedelta(seconds=int((time.time() - self.start_time))))) print(log_string) report_loss = 0 (report_tgt_words, report_src_words) = (0, 0) report_sents = 0 (report_rec_loss, report_rev_loss, report_mirror_loss) = (0, 0, 0) (report_log_prior, report_log_variational_posterior) = (0, 0) start = time.time() i = (i + 1) return (total_loss / total_words) def run(self, checkpoint=None): opt = self.opt model = self.model optim = self.optim if (checkpoint is not None): self.model.load_state_dict(checkpoint['model']) prec_opt = (checkpoint['opt'] if ('opt' in checkpoint) else None) if (not opt.reset_optim): self.optim.load_state_dict(checkpoint['optim']) if ((prec_opt is not None) and hasattr(prec_opt, 'fp16_mixed')): if ((opt.fp16_mixed == prec_opt.fp16_mixed) and (opt.fp16 == prec_opt.fp16)): if ('amp' in checkpoint): amp.load_state_dict(checkpoint['amp']) if ('itr' in checkpoint): itr_progress = checkpoint['itr'] else: itr_progress = None opt.start_epoch = int(math.floor(float((checkpoint['epoch'] + 1)))) resume = True else: itr_progress = None resume = False del checkpoint['model'] del checkpoint['optim'] del checkpoint else: itr_progress = None print('Initializing model parameters') init_model_parameters(model, opt) resume = False if opt.load_encoder_from: self.load_encoder_weight(opt.load_encoder_from) if opt.load_decoder_from: self.load_decoder_weight(opt.load_decoder_from) if self.cuda: self.warm_up() valid_loss = self.eval(self.valid_data) valid_ppl = math.exp(min(valid_loss, 100)) print(('Validation perplexity: %g' % valid_ppl)) self.start_time = time.time() for epoch in range(opt.start_epoch, (opt.start_epoch + opt.epochs)): print('') train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress) train_ppl = math.exp(min(train_loss, 100)) print(('Train perplexity: %g' % train_ppl)) valid_loss = self.eval(self.valid_data) valid_ppl = math.exp(min(valid_loss, 100)) print(('Validation perplexity: %g' % valid_ppl)) self.save(epoch, valid_ppl) itr_progress = None resume = False
class Portfolio(): def __init__(self, data_handler: DataHandler, initial_cash: float, timer: Timer): self.initial_cash = initial_cash self.data_handler = data_handler self.timer = timer self.net_liquidation = initial_cash self.gross_exposure_of_positions = 0 self.current_cash = initial_cash self.open_positions_dict = {} self._dates = [] self._portfolio_values = [] self._leverage_list = [] self._positions_history = [] self._closed_positions = [] self.logger = qf_logger.getChild(self.__class__.__name__) def transact_transaction(self, transaction: Transaction): transaction_cost = 0.0 existing_position = self.open_positions_dict.get(transaction.ticker, None) if (existing_position is None): new_position = self._create_new_position(transaction) transaction_cost += new_position.transact_transaction(transaction) else: (results_in_opposite_direction, basic_transaction, remaining_transaction) = split_transaction_if_needed(existing_position.quantity(), transaction) transaction_cost += existing_position.transact_transaction(basic_transaction) if existing_position.is_closed(): ticker = transaction.ticker self.open_positions_dict.pop(ticker) self._closed_positions.append(existing_position) if results_in_opposite_direction: new_position = self._create_new_position(remaining_transaction) transaction_cost += new_position.transact_transaction(remaining_transaction) self.current_cash += transaction_cost def update(self, record=False): self.net_liquidation = self.current_cash self.gross_exposure_of_positions = 0 tickers = list(self.open_positions_dict.keys()) current_prices_series = self.data_handler.get_last_available_price(tickers=tickers) current_positions = {} for (ticker, position) in self.open_positions_dict.items(): security_price = current_prices_series[ticker] position.update_price(bid_price=security_price, ask_price=security_price) position_value = position.market_value() position_exposure = position.total_exposure() self.net_liquidation += position_value self.gross_exposure_of_positions += abs(position_exposure) if record: current_positions[ticker] = BacktestPositionSummary(position) if record: self._dates.append(self.timer.now()) self._portfolio_values.append(self.net_liquidation) self._leverage_list.append((self.gross_exposure_of_positions / self.net_liquidation)) self._positions_history.append(current_positions) def portfolio_eod_series(self) -> PricesSeries: end_of_day_date = list(map((lambda x: datetime(x.year, x.month, x.day)), self._dates)) portfolio_timeseries = PricesSeries(data=self._portfolio_values, index=end_of_day_date) return portfolio_timeseries def leverage_series(self) -> QFSeries: return QFSeries(data=self._leverage_list, index=self._dates) def positions_history(self) -> QFDataFrame: return QFDataFrame(data=self._positions_history, index=self._dates) def closed_positions(self) -> List[BacktestPosition]: return self._closed_positions def _create_new_position(self, transaction: Transaction): new_position = BacktestPositionFactory.create_position(transaction.ticker) self.open_positions_dict[transaction.ticker] = new_position return new_position
class TestAutoQuant(): def test_auto_quant_run_inference(self, sess, unlabeled_dataset): bn_folded_acc = 0.5 with patch_ptq_techniques(bn_folded_acc, None, None) as mocks: with create_tmp_directory() as results_dir: auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir) auto_quant.run_inference() .parametrize('bn_folded_acc, cle_acc, adaround_acc', itertools.permutations([0.5, 0.6, 0.7])) .parametrize('allowed_accuracy_drop', [0.05, 0.15]) def test_auto_quant_cpu(self, sess, unlabeled_dataset, allowed_accuracy_drop, bn_folded_acc, cle_acc, adaround_acc): self._test_auto_quant(sess, unlabeled_dataset, allowed_accuracy_drop, bn_folded_acc, cle_acc, adaround_acc) .cuda def test_auto_quant_gpu(self, gpu_session, unlabeled_dataset): (bn_folded_acc, cle_acc, adaround_acc) = (0.5, 0.6, 0.7) allowed_accuracy_drop = 0.15 self._test_auto_quant(gpu_session, unlabeled_dataset, allowed_accuracy_drop, bn_folded_acc, cle_acc, adaround_acc) def test_consecutive_calls(self, sess, unlabeled_dataset): (bn_folded_acc, cle_acc, adaround_acc) = (0.5, 0.6, 0.7) with patch_ptq_techniques(bn_folded_acc, cle_acc, adaround_acc) as mocks: with create_tmp_directory() as results_dir: auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir) for allowed_accuracy_drop in (0.5, 0.4, 0.3, 0.2, 0.1, 0.05): self._do_test_optimize_auto_quant(auto_quant, sess, allowed_accuracy_drop, bn_folded_acc, cle_acc, adaround_acc) with patch_ptq_techniques(bn_folded_acc, cle_acc, adaround_acc) as mocks: with create_tmp_directory() as results_dir: auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir) auto_quant.run_inference() auto_quant.optimize() assert (mocks.fold_all_batch_norms.call_count == 2) assert (mocks.equalize_model.call_count == 1) auto_quant.optimize() assert (mocks.fold_all_batch_norms.call_count == 3) assert (mocks.equalize_model.call_count == 2) self._do_test_optimize_auto_quant(auto_quant, sess, 0.0, bn_folded_acc, cle_acc, adaround_acc) assert (mocks.fold_all_batch_norms.call_count == 4) assert (mocks.equalize_model.call_count == 3) def _test_auto_quant(self, sess, unlabeled_dataset, allowed_accuracy_drop, bn_folded_acc, cle_acc, adaround_acc): with patch_ptq_techniques(bn_folded_acc, cle_acc, adaround_acc) as mocks: with create_tmp_directory() as results_dir: auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir) self._do_test_optimize_auto_quant(auto_quant, sess, allowed_accuracy_drop, bn_folded_acc, cle_acc, adaround_acc) def _do_test_optimize_auto_quant(self, auto_quant, input_model, allowed_accuracy_drop, bn_folded_acc, cle_acc, adaround_acc): target_acc = (FP32_ACC - allowed_accuracy_drop) (output_model, acc, encoding_path) = auto_quant.optimize(allowed_accuracy_drop) assert_same_device(output_model.graph, input_model.graph) assert_applied_techniques(output_model, acc, encoding_path, target_acc, bn_folded_acc, cle_acc, adaround_acc, auto_quant.results_dir) def test_auto_quant_invalid_input(self, sess, unlabeled_dataset): with pytest.raises(ValueError): AutoQuant(None, starting_ops, ending_ops, unlabeled_dataset, (lambda : None)) with pytest.raises(ValueError): AutoQuant(sess, None, None, unlabeled_dataset, (lambda : None)) with pytest.raises(ValueError): AutoQuant(sess, starting_ops, ending_ops, None, (lambda : None)) with pytest.raises(ValueError): AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, None) with pytest.raises(ValueError): AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, (lambda : None), results_dir=None) with pytest.raises(ValueError): AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, (lambda : None), strict_validation=None) with pytest.raises(ValueError): AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, (lambda : None), param_bw=2) with pytest.raises(ValueError): AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, (lambda : None), param_bw=64) with pytest.raises(ValueError): AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, (lambda : None), output_bw=2) with pytest.raises(ValueError): AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, (lambda : None), output_bw=64) auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, (lambda : None)) with pytest.raises(ValueError): _ = auto_quant.optimize((- 1.0)) auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset.enumerate(), (lambda : None)) def test_auto_quant_inference_fallback(self, sess, unlabeled_dataset): class _Exception(Exception): pass def error_fn(*_, **__): raise _Exception bn_folded_acc = 0.4 raw_quantsim_acc = (bn_folded_acc + 1e-05) with patch_ptq_techniques(bn_folded_acc, None, None, raw_quantsim_acc=raw_quantsim_acc) as mocks: with create_tmp_directory() as results_dir: auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir, strict_validation=False) with patch('aimet_tensorflow.auto_quant_v2.fold_all_batch_norms', side_effect=error_fn): (_, acc) = auto_quant.run_inference() assert np.allclose(acc, raw_quantsim_acc) def test_auto_quant_optimize_fallback(self, sess, unlabeled_dataset): class _Exception(Exception): pass def error_fn(*_, **__): raise _Exception (bn_folded_acc, cle_acc, adaround_acc) = (0.4, 0.5, 0.6) with patch_ptq_techniques(bn_folded_acc, cle_acc, adaround_acc) as mocks: with create_tmp_directory() as results_dir: auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir, strict_validation=False) with patch('aimet_tensorflow.auto_quant_v2.fold_all_batch_norms', side_effect=error_fn): (_, acc, _) = auto_quant.optimize() assert (acc == adaround_acc) with open(os.path.join(results_dir, 'diagnostics.html')) as f: html_parsed = BeautifulSoup(f.read(), features='html.parser') assert_html(html_parsed, {'node_batchnorm_folding': _ERROR_IGNORED, 'node_cross_layer_equalization': _SUCCESS, 'node_adaround': _SUCCESS}) auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir, strict_validation=False) with patch('aimet_tensorflow.auto_quant_v2.equalize_model', side_effect=error_fn): (_, acc, _) = auto_quant.optimize() assert (acc == adaround_acc) with open(os.path.join(results_dir, 'diagnostics.html')) as f: html_parsed = BeautifulSoup(f.read(), features='html.parser') assert_html(html_parsed, {'node_batchnorm_folding': _SUCCESS, 'node_cross_layer_equalization': _ERROR_IGNORED, 'node_adaround': _SUCCESS}) auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir, strict_validation=False) with patch('aimet_tensorflow.auto_quant_v2.Adaround.apply_adaround', side_effect=error_fn): (_, acc, _) = auto_quant.optimize() assert (acc == cle_acc) with open(os.path.join(results_dir, 'diagnostics.html')) as f: html_parsed = BeautifulSoup(f.read(), features='html.parser') assert_html(html_parsed, {'node_batchnorm_folding': _SUCCESS, 'node_cross_layer_equalization': _SUCCESS, 'node_adaround': _ERROR_IGNORED}) auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir, strict_validation=False) with patch('aimet_tensorflow.auto_quant_v2.fold_all_batch_norms', side_effect=error_fn), patch('aimet_tensorflow.auto_quant_v2.equalize_model', side_effect=error_fn), patch('aimet_tensorflow.auto_quant_v2.Adaround.apply_adaround', side_effect=error_fn): with pytest.raises(RuntimeError): auto_quant.optimize() with open(os.path.join(results_dir, 'diagnostics.html')) as f: html_parsed = BeautifulSoup(f.read(), features='html.parser') assert_html(html_parsed, {'node_batchnorm_folding': _ERROR_IGNORED, 'node_cross_layer_equalization': _ERROR_IGNORED, 'node_adaround': _ERROR_IGNORED}) auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir, strict_validation=True) with patch('aimet_tensorflow.auto_quant_v2.equalize_model', side_effect=error_fn): with pytest.raises(_Exception): auto_quant.optimize() with open(os.path.join(results_dir, 'diagnostics.html')) as f: html_parsed = BeautifulSoup(f.read(), features='html.parser') assert_html(html_parsed, {'node_batchnorm_folding': _SUCCESS, 'node_cross_layer_equalization': _ERROR_FAILED, 'node_adaround': _NOT_VISITED}) def test_auto_quant_early_exit(self, sess, unlabeled_dataset): allowed_accuracy_drop = 0.1 w32_acc = (FP32_ACC - (allowed_accuracy_drop * 2)) with create_tmp_directory() as results_dir: with patch_ptq_techniques(bn_folded_acc=0, cle_acc=0, adaround_acc=0, w32_acc=w32_acc) as mocks: auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback, results_dir=results_dir) (output_model, acc, encoding_path) = auto_quant.optimize(allowed_accuracy_drop) assert (output_model is None) assert (acc is None) assert (encoding_path is None) with open(os.path.join(results_dir, 'diagnostics.html')) as f: html_parsed = BeautifulSoup(f.read(), features='html.parser') assert_html(html_parsed, {'node_test_w32_eval_score': _VISITED, 'node_batchnorm_folding': _NOT_VISITED, 'node_cross_layer_equalization': _NOT_VISITED, 'node_adaround': _NOT_VISITED, 'node_result_fail': _VISITED}) def test_set_additional_params(self, sess, unlabeled_dataset): allowed_accuracy_drop = 0 bn_folded_acc = 0.1 cle_acc = 0.2 adaround_acc = 0.3 with patch_ptq_techniques(bn_folded_acc, cle_acc, adaround_acc) as mocks: auto_quant = AutoQuant(sess, starting_ops, ending_ops, unlabeled_dataset, mocks.eval_callback) adaround_params = AdaroundParameters(unlabeled_dataset, 1) auto_quant.set_adaround_params(adaround_params) self._do_test_optimize_auto_quant(auto_quant, sess, allowed_accuracy_drop, bn_folded_acc, cle_acc, adaround_acc) (adaround_args, _) = mocks.apply_adaround.call_args (_, _, _, actual_adaround_params) = adaround_args assert (adaround_params == actual_adaround_params)
def test_init_without_routes(): block_number = BlockNumber(1) routes = [] pseudo_random_generator = random.Random() init_state_change = ActionInitInitiator(factories.UNIT_TRANSFER_DESCRIPTION, routes) channel_map = {} iteration = initiator_manager.state_transition(payment_state=None, state_change=init_state_change, channelidentifiers_to_channels=channel_map, addresses_to_channel={}, pseudo_random_generator=pseudo_random_generator, block_number=block_number) assert (iteration.new_state is None) assert (len(iteration.events) == 1) assert isinstance(iteration.events[0], EventPaymentSentFailed) assert (iteration.new_state is None)
def get_t_mask(img, hsv_ranges=None): if (hsv_ranges is None): hsv_ranges = [[0, 255], [130, 216], [150, 230]] hsv_img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) mask = np.ones(img.shape[:2], dtype=bool) for c in range(len(hsv_ranges)): (l, h) = hsv_ranges[c] mask &= (l <= hsv_img[(..., c)]) mask &= (hsv_img[(..., c)] <= h) return mask
def visualize_changes_after_optimization(old_model: torch.nn.Module, new_model: torch.nn.Module, results_dir: str, selected_layers: List=None) -> List[plotting.Figure]: file_path = os.path.join(results_dir, 'visualize_changes_after_optimization.html') plotting.output_file(file_path) subplots = [] if selected_layers: for (name, module) in new_model.named_modules(): if ((name in selected_layers) and hasattr(module, 'weight')): old_model_module = get_layer_by_name(old_model, name) new_model_module = module subplots.append(plotting_utils.visualize_changes_after_optimization_single_layer(name, old_model_module, new_model_module)) else: for (name, module) in new_model.named_modules(): if (hasattr(module, 'weight') and isinstance(module, (torch.nn.modules.conv.Conv2d, torch.nn.modules.linear.Linear))): old_model_module = get_layer_by_name(old_model, name) new_model_module = module subplots.append(plotting_utils.visualize_changes_after_optimization_single_layer(name, old_model_module, new_model_module)) plotting.save(column(subplots)) return subplots
class _SynchronizedBatchNorm(_BatchNorm): def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True): super(_SynchronizedBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine) self._sync_master = SyncMaster(self._data_parallel_master) self._is_parallel = False self._parallel_id = None self._slave_pipe = None def forward(self, input, gain=None, bias=None): if (not (self._is_parallel and self.training)): out = F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, self.training, self.momentum, self.eps) if (gain is not None): out = (out + gain) if (bias is not None): out = (out + bias) return out input_shape = input.size() input = input.view(input.size(0), input.size(1), (- 1)) sum_size = (input.size(0) * input.size(2)) input_sum = _sum_ft(input) input_ssum = _sum_ft((input ** 2)) if (self._parallel_id == 0): (mean, inv_std) = self._sync_master.run_master(_ChildMessage(input_sum, input_ssum, sum_size)) else: (mean, inv_std) = self._slave_pipe.run_slave(_ChildMessage(input_sum, input_ssum, sum_size)) if (gain is not None): output = (((input - _unsqueeze_ft(mean)) * (_unsqueeze_ft(inv_std) * gain.squeeze((- 1)))) + bias.squeeze((- 1))) elif self.affine: output = (((input - _unsqueeze_ft(mean)) * _unsqueeze_ft((inv_std * self.weight))) + _unsqueeze_ft(self.bias)) else: output = ((input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)) return output.view(input_shape) def __data_parallel_replicate__(self, ctx, copy_id): self._is_parallel = True self._parallel_id = copy_id if (self._parallel_id == 0): ctx.sync_master = self._sync_master else: self._slave_pipe = ctx.sync_master.register_slave(copy_id) def _data_parallel_master(self, intermediates): intermediates = sorted(intermediates, key=(lambda i: i[1].sum.get_device())) to_reduce = [i[1][:2] for i in intermediates] to_reduce = [j for i in to_reduce for j in i] target_gpus = [i[1].sum.get_device() for i in intermediates] sum_size = sum([i[1].sum_size for i in intermediates]) (sum_, ssum) = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce) (mean, inv_std) = self._compute_mean_std(sum_, ssum, sum_size) broadcasted = Broadcast.apply(target_gpus, mean, inv_std) outputs = [] for (i, rec) in enumerate(intermediates): outputs.append((rec[0], _MasterMessage(*broadcasted[(i * 2):((i * 2) + 2)]))) return outputs def _compute_mean_std(self, sum_, ssum, size): assert (size > 1), 'BatchNorm computes unbiased standard-deviation, which requires size > 1.' mean = (sum_ / size) sumvar = (ssum - (sum_ * mean)) unbias_var = (sumvar / (size - 1)) bias_var = (sumvar / size) self.running_mean = (((1 - self.momentum) * self.running_mean) + (self.momentum * mean.data)) self.running_var = (((1 - self.momentum) * self.running_var) + (self.momentum * unbias_var.data)) return (mean, torch.rsqrt((bias_var + self.eps)))
class ReduceScatter_Wait(Function): def forward(ctx, pg: dist.ProcessGroup, myreq: Request[Tensor], *dummy_tensor: Tensor) -> Tensor: assert (myreq.req is not None) myreq.req.wait() myreq.req = None output = myreq.tensor myreq.tensor = None ctx.myreq = myreq ctx.pg = pg rsi = myreq.rsi if (rsi.codecs is not None): output = rsi.codecs.forward.decode(output) return output def backward(ctx, grad_output: Tensor) -> Tuple[(None, None, Tensor)]: myreq = ctx.myreq rsi = myreq.rsi if (rsi.codecs is not None): grad_output = rsi.codecs.backward.encode(grad_output) grad_inputs = [grad_output.new_empty(in_size, dtype=grad_output.dtype, device=grad_output.device) for in_size in rsi.input_sizes] with record_function('## reduce_scatter_bw (all_gather) ##'): req = dist.all_gather(grad_inputs, grad_output.contiguous(), group=ctx.pg, async_op=True) myreq.req = req myreq.tensor = grad_inputs return (None, None, myreq.dummy_tensor)
class CreateShardingInfoTest(unittest.TestCase): def setUp(self) -> None: self.tables = [EmbeddingBagConfig(name='table_0', feature_names=['feature_0'], embedding_dim=4, num_embeddings=4), EmbeddingBagConfig(name='table_1', feature_names=['feature_1'], embedding_dim=4, num_embeddings=4)] self.constraints = {'table_0': ParameterConstraints(cache_params=CacheParams(algorithm=CacheAlgorithm.LRU, load_factor=0.1, reserved_memory=8.0, precision=DataType.FP16), enforce_hbm=True, stochastic_rounding=False, bounds_check_mode=BoundsCheckMode.IGNORE), 'table_1': ParameterConstraints(cache_params=CacheParams(algorithm=CacheAlgorithm.LFU, load_factor=0.2, reserved_memory=0.0, precision=DataType.FP16), enforce_hbm=True, stochastic_rounding=False, bounds_check_mode=BoundsCheckMode.NONE)} self.model = EmbeddingBagCollection(tables=self.tables) self.sharder = EmbeddingBagCollectionSharder() planner = EmbeddingShardingPlanner(topology=Topology(world_size=1, compute_device='cpu'), constraints=self.constraints) self.expected_plan = planner.plan(self.model, [self.sharder]) self.expected_sharding_infos = create_sharding_infos_by_sharding(self.model, self.expected_plan.get_plan_for_module(''), prefix='embedding_bags.', fused_params=None) def test_create_sharding_infos_by_sharding_override(self) -> None: sharder_fused_params = {'enforce_hbm': False} overriden_sharding_infos = create_sharding_infos_by_sharding(self.model, self.expected_plan.get_plan_for_module(''), prefix='embedding_bags.', fused_params=sharder_fused_params) for (sharding_type, overriden_sharding_info) in overriden_sharding_infos.items(): expected_sharding_info = self.expected_sharding_infos[sharding_type] for (a, b) in zip(expected_sharding_info, overriden_sharding_info): self.assertEqual(a.fused_params, b.fused_params) sharder_fused_params = {'ABC': True} not_overriden_sharding_infos = create_sharding_infos_by_sharding(self.model, self.expected_plan.get_plan_for_module(''), prefix='embedding_bags.', fused_params=sharder_fused_params) for (sharding_type, not_overriden_sharding_info) in not_overriden_sharding_infos.items(): expected_sharding_info = self.expected_sharding_infos[sharding_type] for (a, b) in zip(expected_sharding_info, not_overriden_sharding_info): self.assertNotEqual(a.fused_params, b.fused_params) def test_create_sharding_infos_by_sharding_combine(self) -> None: new_constraints = copy.deepcopy(self.constraints) for (_, parameter_constraints) in new_constraints.items(): parameter_constraints.enforce_hbm = None parameter_constraints.stochastic_rounding = None new_planner = EmbeddingShardingPlanner(topology=Topology(world_size=1, compute_device='cpu'), constraints=new_constraints) new_plan = new_planner.plan(self.model, [self.sharder]) sharder_fused_params = {'enforce_hbm': True, 'stochastic_rounding': False} combined_sharding_infos = create_sharding_infos_by_sharding(self.model, new_plan.get_plan_for_module(''), prefix='embedding_bags.', fused_params=sharder_fused_params) for (sharding_type, combined_sharding_info) in combined_sharding_infos.items(): expected_sharding_info = self.expected_sharding_infos[sharding_type] for (a, b) in zip(expected_sharding_info, combined_sharding_info): self.assertEqual(a.fused_params, b.fused_params) sharder_fused_params = {'enforce_hbm': True, 'stochastic_rounding': True} wrong_combined_sharding_infos = create_sharding_infos_by_sharding(self.model, new_plan.get_plan_for_module(''), prefix='embedding_bags.', fused_params=sharder_fused_params) for (sharding_type, wrong_combined_sharding_info) in wrong_combined_sharding_infos.items(): expected_sharding_info = self.expected_sharding_infos[sharding_type] for (a, b) in zip(expected_sharding_info, wrong_combined_sharding_info): self.assertNotEqual(a.fused_params, b.fused_params)
def test_history_expanded_with_regex_argument(base_app): run_cmd(base_app, 'alias create sc shortcuts') run_cmd(base_app, 'help') run_cmd(base_app, 'help history') run_cmd(base_app, 'sc') (out, err) = run_cmd(base_app, 'history -v /sh.*cuts/') expected = normalize('\n 1 alias create sc shortcuts\n 4 sc\n 4x shortcuts\n') assert (out == expected) verify_hi_last_result(base_app, 2)
_func('float, int, int: object') def ml_get_zoo_tree(train_size=0.75, max_depth=5, random_state=245245): dataset = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'zoo.csv')) dataset = dataset.drop('animal_name', axis=1) features = dataset.drop('class', axis=1) targets = dataset['class'] (train_features, test_features, train_targets, test_targets) = train_test_split(features, targets, train_size=train_size, random_state=random_state) tree = DecisionTreeClassifier(criterion='entropy', max_depth=max_depth) tree = tree.fit(train_features, train_targets) tree._feature_names = features.columns return tree
def compute_mfcc(filename, sr=22000): try: (audio, sr) = librosa.load(filename, sr=sr, res_type='kaiser_fast') except: (audio, o_sr) = sf.read(filename) audio = librosa.core.resample(audio, o_sr, sr) mfcc = librosa.feature.mfcc(y=audio, sr=sr) mfcc_delta = librosa.feature.delta(mfcc, width=5, mode='nearest') mfcc_delta2 = librosa.feature.delta(mfcc, order=2, width=5, mode='nearest') feature = np.concatenate((np.mean(mfcc, axis=1), np.var(mfcc, axis=1), np.mean(mfcc_delta, axis=1), np.var(mfcc_delta, axis=1), np.mean(mfcc_delta2, axis=1), np.var(mfcc_delta2, axis=1))) return feature
('make-impersonator-property', [values.W_Symbol], simple=False) def make_imp_prop(sym, env, cont): from pycket.interpreter import return_multi_vals name = sym.utf8value prop = imp.W_ImpPropertyDescriptor(name) pred = imp.W_ImpPropertyPredicate(prop) accs = imp.W_ImpPropertyAccessor(prop) return return_multi_vals(values.Values.make([prop, pred, accs]), env, cont)
.unit() def test_import_optional(): match = "pytask requires .*notapackage.* pip .* conda .* 'notapackage'" with pytest.raises(ImportError, match=match) as exc_info: import_optional_dependency('notapackage') assert isinstance(exc_info.value.__context__, ImportError) result = import_optional_dependency('notapackage', errors='ignore') assert (result is None)
class ParallelSentencesDataset(Dataset): def __init__(self, student_model: SentenceTransformer, teacher_model: SentenceTransformer, batch_size: int=8, use_embedding_cache: bool=True): self.student_model = student_model self.teacher_model = teacher_model self.datasets = [] self.datasets_iterator = [] self.datasets_tokenized = [] self.dataset_indices = [] self.copy_dataset_indices = [] self.cache = [] self.batch_size = batch_size self.use_embedding_cache = use_embedding_cache self.embedding_cache = {} self.num_sentences = 0 def load_data(self, filepath: str, weight: int=100, max_sentences: int=None, max_sentence_length: int=128): logger.info(('Load ' + filepath)) parallel_sentences = [] with (gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, encoding='utf8')) as fIn: count = 0 for line in fIn: sentences = line.strip().split('\t') if ((max_sentence_length is not None) and (max_sentence_length > 0) and (max([len(sent) for sent in sentences]) > max_sentence_length)): continue parallel_sentences.append(sentences) count += 1 if ((max_sentences is not None) and (max_sentences > 0) and (count >= max_sentences)): break self.add_dataset(parallel_sentences, weight=weight, max_sentences=max_sentences, max_sentence_length=max_sentence_length) def add_dataset(self, parallel_sentences: List[List[str]], weight: int=100, max_sentences: int=None, max_sentence_length: int=128): sentences_map = {} for sentences in parallel_sentences: if ((max_sentence_length is not None) and (max_sentence_length > 0) and (max([len(sent) for sent in sentences]) > max_sentence_length)): continue source_sentence = sentences[0] if (source_sentence not in sentences_map): sentences_map[source_sentence] = set() for sent in sentences: sentences_map[source_sentence].add(sent) if ((max_sentences is not None) and (max_sentences > 0) and (len(sentences_map) >= max_sentences)): break if (len(sentences_map) == 0): return self.num_sentences += sum([len(sentences_map[sent]) for sent in sentences_map]) dataset_id = len(self.datasets) self.datasets.append(list(sentences_map.items())) self.datasets_iterator.append(0) self.dataset_indices.extend(([dataset_id] * weight)) def generate_data(self): source_sentences_list = [] target_sentences_list = [] for data_idx in self.dataset_indices: (src_sentence, trg_sentences) = self.next_entry(data_idx) source_sentences_list.append(src_sentence) target_sentences_list.append(trg_sentences) src_embeddings = self.get_embeddings(source_sentences_list) for (src_embedding, trg_sentences) in zip(src_embeddings, target_sentences_list): for trg_sentence in trg_sentences: self.cache.append(InputExample(texts=[trg_sentence], label=src_embedding)) random.shuffle(self.cache) def next_entry(self, data_idx): (source, target_sentences) = self.datasets[data_idx][self.datasets_iterator[data_idx]] self.datasets_iterator[data_idx] += 1 if (self.datasets_iterator[data_idx] >= len(self.datasets[data_idx])): self.datasets_iterator[data_idx] = 0 random.shuffle(self.datasets[data_idx]) return (source, target_sentences) def get_embeddings(self, sentences): if (not self.use_embedding_cache): return self.teacher_model.encode(sentences, batch_size=self.batch_size, show_progress_bar=False, convert_to_numpy=True) new_sentences = [] for sent in sentences: if (sent not in self.embedding_cache): new_sentences.append(sent) if (len(new_sentences) > 0): new_embeddings = self.teacher_model.encode(new_sentences, batch_size=self.batch_size, show_progress_bar=False, convert_to_numpy=True) for (sent, embedding) in zip(new_sentences, new_embeddings): self.embedding_cache[sent] = embedding return [self.embedding_cache[sent] for sent in sentences] def __len__(self): return self.num_sentences def __getitem__(self, idx): if (len(self.cache) == 0): self.generate_data() return self.cache.pop()
class CoordStage(object): def __init__(self, n_embed, down_factor): self.n_embed = n_embed self.down_factor = down_factor def eval(self): return self def encode(self, c): assert ((0.0 <= c.min()) and (c.max() <= 1.0)) (b, ch, h, w) = c.shape assert (ch == 1) c = torch.nn.functional.interpolate(c, scale_factor=(1 / self.down_factor), mode='area') c = c.clamp(0.0, 1.0) c = (self.n_embed * c) c_quant = c.round() c_ind = c_quant.to(dtype=torch.long) info = (None, None, c_ind) return (c_quant, None, info) def decode(self, c): c = (c / self.n_embed) c = torch.nn.functional.interpolate(c, scale_factor=self.down_factor, mode='nearest') return c
def _expected_no_editor_error(): expected_exception = 'OSError' if hasattr(sys, 'pypy_translation_info'): expected_exception = 'EnvironmentError' expected_text = normalize("\nEXCEPTION of type '{}' occurred with message: Please use 'set editor' to specify your text editing program of choice.\nTo enable full traceback, run the following command: 'set debug true'\n".format(expected_exception)) return expected_text
def main(): parser = argparse.ArgumentParser(description='testing neural Datalog through time (NDTT)') parser.add_argument('-d', '--Domain', required=True, type=str, help='which domain to work on?') parser.add_argument('-fn', '--FolderName', required=True, type=str, help='base name of the folder to store the model (and log)?') parser.add_argument('-s', '--Split', required=True, type=str, help='what split to test?', choices=['train', 'dev', 'test']) parser.add_argument('-r', '--Ratio', default=1.0, type=float, help='fraction of data to use') parser.add_argument('-ps', '--PathStorage', type=str, default='../..', help='Path of storage which stores domains (with data), logs, results, etc. Must be local (e.g. no HDFS allowed)') parser.add_argument('-tp', '--TrackPeriod', default=1, type=int, help='# seqs each print while doing prediction') parser.add_argument('-m', '--Multiplier', default=1, type=int, help='constant of N=O(I), where N is # of sampled time points for integral') parser.add_argument('-dm', '--DevMultiplier', default=1, type=int, help='constant of N=O(I), where N is # of sampled time points for integral') parser.add_argument('-pred', '--Predict', action='store_true', help='test on prediction?') parser.add_argument('-nobj', '--NumObject', default=1, type=int, help='default==1 : number of objects to predict, from last to first (first obj==subj)') parser.add_argument('-ns', '--NumSample', default=100, type=int, help='default==100 : number of sampled next event times via thinning algorithm, used to compute predictions') parser.add_argument('-nexp', '--NumExp', default=500, type=int, help='default==500 : number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm') parser.add_argument('-np', '--NumProcess', default=1, type=int, help='# of processes used, default is 1') parser.add_argument('-nt', '--NumThread', default=1, type=int, help='OMP NUM THREADS') parser.add_argument('-dsm', '--DownSampleMode', default='none', type=str, choices=['none', 'uniform'], help='how do you want to down sample it? none? uniform?') parser.add_argument('-dss', '--DownSampleSize', default=1, type=int, help='down sample size, 1 <= dss <= K') parser.add_argument('-gpu', '--UseGPU', action='store_true', help='use GPU?') parser.add_argument('-sd', '--Seed', default=12345, type=int, help='random seed') parser.add_argument('-v', '--Verbose', action='store_true', help='show a lot of messages while testing?') args = parser.parse_args() id_process = os.getpid() time_current = datetime.datetime.now().isoformat() args.Version = torch.__version__ args.ID = id_process args.TIME = time_current path_storage = os.path.abspath(args.PathStorage) args.PathDomain = os.path.join(path_storage, 'domains', args.Domain) if (args.Domain == args.FolderName): args.PathLog = None args.Database = 'gen' args.PathModel = os.path.join(args.PathDomain, 'gen_model') args.PathResult = os.path.join(args.PathDomain, f'results_gen_{args.Split}') else: path_logs = os.path.join(args.PathDomain, 'Logs', args.FolderName) assert os.path.exists(path_logs) args.PathLog = os.path.join(path_logs, 'log.txt') log = LogReader(args.PathLog) saved_args = log.getArgs() args.Database = saved_args['Database'] args.PathModel = os.path.join(path_logs, os.path.basename(saved_args['PathSave'])) if ('LSTMPool' in saved_args): args.LSTMPool = saved_args['LSTMPool'] if ('UpdateMode' in saved_args): args.UpdateMode = saved_args['UpdateMode'] if ('Layer' in saved_args): args.Layer = saved_args['Layer'] if ('MemorySize' in saved_args): args.MemorySize = saved_args['MemorySize'] if ('TimeEmbeddingDim' in saved_args): args.TimeEmbeddingDim = saved_args['TimeEmbeddingDim'] if ('TimeEmbeddingMode' in saved_args): args.TimeEmbeddingMode = saved_args['TimeEmbeddingMode'] if ('IntensityComputationMode' in saved_args): args.IntensityComputationMode = saved_args['IntensityComputationMode'] else: args.IntensityComputationMode = 'extra_dim' if ('AttentionTemperature' in saved_args): args.AttentionTemperature = saved_args['AttentionTemperature'] else: args.IntensityComputationMode = 'extra_dim' args.AttentionTemperature = 1.0 args.PathResult = os.path.join(path_logs, f'results_{args.Split}') args.NumProcess = 1 if (args.NumThread < 1): args.NumThread = 1 print(f'mp num threads in torch : {torch.get_num_threads()}') if (torch.get_num_threads() != args.NumThread): print(f'not equal to NumThread arg ({args.NumThread})') torch.set_num_threads(args.NumThread) print(f'set to {args.NumThread}') assert (torch.get_num_threads() == args.NumThread), 'not set yet?!' tester = Tester(args) tester.run()
class Receiver(threading.Thread): def run(self): M = [] L = [] while True: L.append(len(q)) m = q.pop(True) if (m == 'stop'): break else: M.append(m) self.M = M self.avSize = (float(sum(L)) / len(L)) def show(self): M = self.M nrs = {} for m in M: nr = m.split('-', 1)[1].split(',', 1)[0] if (nr not in nrs): nrs[nr] = 0 nrs[nr] += 1 print(('received %i messages' % len(M))) print(('average queue size was %1.2f' % self.avSize)) for nr in nrs: print((' from %s received %i' % (nr, nrs[nr])))
def wrap_builder(old_builder): app = make_app('docs_dev_app') thread_started = threading.Event() def run_in_thread(): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) server_started = asyncio.Event() async def set_thread_event_when_started(): (await server_started.wait()) thread_started.set() loop.run_until_complete(asyncio.gather(serve_development_app(app, '127.0.0.1', 5555, server_started), set_thread_event_when_started())) threading.Thread(target=run_in_thread, daemon=True).start() thread_started.wait() def new_builder(): clear_reactpy_web_modules_dir() reload_examples() old_builder() return new_builder
def _munge_variant_of(variant_of): if (variant_of is None): variant_of = () elif isinstance(variant_of, VariantField): variant_of = (variant_of,) else: variant_of = tuple(variant_of) for variant in variant_of: if (not isinstance(variant, VariantField)): raise ValueError(("Element %r of %r is not a variant field (ExampleType.field['name'])." % (variant, variant_of))) return variant_of
class Arcsinh(SpecificFunction): def __init__(self, child): super().__init__(np.arcsinh, child) def _from_json(cls, snippet: dict): instance = super()._from_json(np.arcsinh, snippet) return instance def _function_diff(self, children, idx): return (1 / sqrt(((children[0] ** 2) + 1))) def _sympy_operator(self, child): sympy = have_optional_dependency('sympy') return sympy.asinh(child)
class ZookeeperCollector(diamond.collector.Collector): def get_default_config_help(self): config_help = super(ZookeeperCollector, self).get_default_config_help() config_help.update({'publish': (("Which rows of 'status' you would like to publish." + " Telnet host port' and type stats and hit enter to see the ") + ' list of possibilities. Leave unset to publish all.'), 'hosts': ('List of hosts, and ports to collect. Set an alias by ' + ' prefixing the host:port with ')}) return config_help def get_default_config(self): config = super(ZookeeperCollector, self).get_default_config() config.update({'path': 'zookeeper', 'hosts': ['localhost:2181']}) return config def get_raw_stats(self, host, port): data = '' try: if (port is None): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(host) else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, int(port))) sock.send('mntr\n') data = sock.recv(4096) except socket.error: self.log.exception('Failed to get stats from %s:%s', host, port) return data def get_stats(self, host, port): ignored = ('zk_version', 'zk_server_state') pid = None stats = {} data = self.get_raw_stats(host, port) for line in data.splitlines(): pieces = line.split() if (pieces[0] in ignored): continue stats[pieces[0]] = pieces[1] self.log.debug('pid %s', pid) try: cmdline = ('/proc/%s/cmdline' % pid) f = open(cmdline, 'r') m = re.search('-c\x00(\\d+)', f.readline()) if (m is not None): self.log.debug('limit connections %s', m.group(1)) stats['limit_maxconn'] = m.group(1) f.close() except: self.log.debug('Cannot parse command line options for zookeeper') return stats def collect(self): hosts = self.config.get('hosts') if isinstance(hosts, basestring): hosts = [hosts] for host in hosts: matches = re.search('((.+)\\)?([^:]+)(:(\\d+))?', host) alias = matches.group(2) hostname = matches.group(3) port = matches.group(5) stats = self.get_stats(hostname, port) desired = self.config.get('publish', stats.keys()) for stat in desired: if (stat in stats): if (alias is not None): self.publish(((alias + '.') + stat), stats[stat]) else: self.publish(stat, stats[stat]) else: self.log.error("No such key '%s' available, issue 'stats' for a full list", stat)
def get_word_pair_sim_bw_models(year1, year2, model_path, selected_ngrams, all_model_vectors, top_k_acc): (word_pairs, em1, em2) = get_acceleration_bw_models(year1, year2, model_path, selected_ngrams, all_model_vectors, top_k_acc) word_pair_sim_df = pd.DataFrame(list(word_pairs.items()), columns=['Word Pair', 'Acceleration']) word_pair_sim_df = word_pair_sim_df.sort_values(by='Acceleration', ascending=False) word_pair_sim_df_words = [] for (word1, word2) in word_pair_sim_df['Word Pair'].values: if (word1 not in word_pair_sim_df_words): word_pair_sim_df_words.append(word1) if (word2 not in word_pair_sim_df_words): word_pair_sim_df_words.append(word2) return (word_pair_sim_df, word_pair_sim_df_words)
class Portal(object): def __init__(self, application): sys.path.append('.') self.services = service.MultiService() self.services.setServiceParent(application) self.amp_protocol = None self.sessions = PORTAL_SESSIONS self.sessions.portal = self self.process_id = os.getpid() self.server_process_id = None self.server_restart_mode = 'shutdown' self.server_info_dict = {} self.start_time = time.time() self.server_twistd_cmd = self._get_backup_server_twistd_cmd() reactor.addSystemEventTrigger('before', 'shutdown', self.shutdown, _reactor_stopping=True, _stop_server=True) def _get_backup_server_twistd_cmd(self): server_twistd_cmd = ['twistd', '--python={}'.format(os.path.join(dirname(dirname(abspath(__file__))), 'server.py'))] if (os.name != 'nt'): gamedir = os.getcwd() server_twistd_cmd.append('--pidfile={}'.format(os.path.join(gamedir, 'server', 'server.pid'))) return server_twistd_cmd def get_info_dict(self): return INFO_DICT def shutdown(self, _reactor_stopping=False, _stop_server=False): if (_reactor_stopping and hasattr(self, 'shutdown_complete')): return self.sessions.disconnect_all() if _stop_server: self.amp_protocol.stop_server(mode='shutdown') if (not _reactor_stopping): self.shutdown_complete = True reactor.callLater(0, reactor.stop)
class MyOp(Op): def __init__(self, name, dmap=None, x=None): if (dmap is None): dmap = {} self.name = name self.destroy_map = dmap self.x = x def make_node(self, *inputs): inputs = list(map(is_variable, inputs)) for input in inputs: if (not isinstance(input.type, MyType)): raise Exception('Error 1') outputs = [MyType()()] return Apply(self, inputs, outputs) def perform(self, *args, **kwargs): raise NotImplementedError('No Python implementation available.') def __str__(self): return self.name def __repr__(self): return self.name def __eq__(self, other): return ((self is other) or (isinstance(other, MyOp) and (self.x is not None) and (self.x == other.x))) def __hash__(self): if (self.x is not None): return self.x else: return id(self)
class PriceBasedSlippage(Slippage): def __init__(self, slippage_rate: float, data_provider: DataProvider, max_volume_share_limit: Optional[float]=None): super().__init__(data_provider, max_volume_share_limit) self.slippage_rate = slippage_rate def _get_fill_prices(self, date: datetime, orders: Sequence[Order], no_slippage_fill_prices: Sequence[float], fill_volumes: Sequence[float]) -> Sequence[float]: if (self.slippage_rate == 0.0): return no_slippage_fill_prices fill_prices = [] for (order, no_slippage_price) in zip(orders, no_slippage_fill_prices): fill_price = self._get_single_fill_price(order, no_slippage_price) fill_prices.append(fill_price) return fill_prices def _get_single_fill_price(self, order, no_slippage_price): if math.isnan(no_slippage_price): fill_price = float('nan') else: if (order.quantity > 0): multiplier = (1 + self.slippage_rate) else: multiplier = (1 - self.slippage_rate) fill_price = (no_slippage_price * multiplier) return fill_price
def split_batchnorm_params(model: nn.Module): batchnorm_params = [] other_params = [] for module in model.modules(): if (list(module.children()) != []): for params in module.parameters(recurse=False): if params.requires_grad: other_params.append(params) elif isinstance(module, nn.modules.batchnorm._BatchNorm): for params in module.parameters(): if params.requires_grad: batchnorm_params.append(params) else: for params in module.parameters(): if params.requires_grad: other_params.append(params) return (batchnorm_params, other_params)
def qdb_print(msgtype: QDB_MSG, msg: str) -> None: def print_error(msg): return f'{color.RED}[!] {msg}{color.END}' def print_info(msg): return f'{color.CYAN}[+] {msg}{color.END}' color_coated = {QDB_MSG.ERROR: print_error, QDB_MSG.INFO: print_info}.get(msgtype)(msg) print(color_coated)
def _simplify_polys(polys, minarea=0.01, tolerance=0.01, filterremote=False): if isinstance(polys, MultiPolygon): polys = sorted(polys.geoms, key=attrgetter('area'), reverse=True) mainpoly = polys[0] mainlength = np.sqrt((mainpoly.area / (2.0 * np.pi))) if (mainpoly.area > minarea): polys = MultiPolygon([p for p in takewhile((lambda p: (p.area > minarea)), polys) if ((not filterremote) or (mainpoly.distance(p) < mainlength))]) else: polys = mainpoly return polys.simplify(tolerance=tolerance)
def bfs(initial: Iterable, expand: Callable) -> Iterator: open_q = deque(list(initial)) visited = set(open_q) while open_q: node = open_q.popleft() (yield node) for next_node in expand(node): if (next_node not in visited): visited.add(next_node) open_q.append(next_node)
class Parser(html.parser.HTMLParser): def __init__(self): super().__init__() self._stream = [] def handle_starttag(self, tag, attrs): attrs = sorted(attrs, key=(lambda x: x[0])) attrs = '|'.join([((k[0] + ':') + k[1]) for k in attrs]) self._stream.append(('<', tag, attrs)) def handle_endtag(self, tag): self._stream.append(('>', tag, '')) def handle_data(self, data): self._stream.append(('_', data, '')) def stream(self): return self._stream
def main(args, init_distributed=False): utils.import_user_module(args) assert ((args.max_tokens is not None) or (args.max_sentences is not None)), 'Must specify batch size either with --max-tokens or --max-sentences' if (torch.cuda.is_available() and (not args.cpu)): torch.cuda.set_device(args.device_id) np.random.seed(args.seed) torch.manual_seed(args.seed) if init_distributed: args.distributed_rank = distributed_utils.distributed_init(args) if distributed_utils.is_master(args): checkpoint_utils.verify_checkpoint_directory(args.save_dir) print(args) task = tasks.setup_task(args) for valid_sub_split in args.valid_subset.split(','): task.load_dataset(valid_sub_split, combine=False, epoch=0) model = task.build_model(args) criterion = task.build_criterion(args) print(model) print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__)) print('| num. model params: {} (num. trained: {})'.format(sum((p.numel() for p in model.parameters())), sum((p.numel() for p in model.parameters() if p.requires_grad)))) trainer = Trainer(args, task, model, criterion) print('| training on {} GPUs'.format(args.distributed_world_size)) print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(args.max_tokens, args.max_sentences)) (extra_state, epoch_itr) = checkpoint_utils.load_checkpoint(args, trainer) max_epoch = (args.max_epoch or math.inf) max_update = (args.max_update or math.inf) lr = trainer.get_lr() train_meter = StopwatchMeter() train_meter.start() valid_subsets = args.valid_subset.split(',') while ((lr > args.min_lr) and (epoch_itr.epoch < max_epoch) and (trainer.get_num_updates() < max_update)): train(args, trainer, task, epoch_itr) if ((not args.disable_validation) and ((epoch_itr.epoch % args.validate_interval) == 0)): valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) else: valid_losses = [None] lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) if ((epoch_itr.epoch % args.save_interval) == 0): checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) reload_dataset = (':' in getattr(args, 'data', '')) epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset) train_meter.stop() print('| done training in {:.1f} seconds'.format(train_meter.sum))
class Output(): def json_encoder(obj, ignore_error=False): if isinstance(obj, Output): return obj.embed_data() elif isinstance(obj, OutputList): return obj.data if (not ignore_error): raise TypeError(('Object of type %s is not JSON serializable' % obj.__class__.__name__)) def dump_dict(cls, data): return json.loads(json.dumps(data, default=cls.json_encoder)) def safely_destruct(cls, obj): try: json.dumps(obj, default=partial(cls.json_encoder, ignore_error=True)) except Exception: pass def __init__(self, spec, on_embed=None): self.processed = False self.on_embed = (on_embed or (lambda d: d)) try: self.spec = type(self).dump_dict(spec) except TypeError: self.processed = True type(self).safely_destruct(spec) raise self.enabled_context_manager = False self.container_selector = None self.container_dom_id = None self.after_exit = None get_current_session() def enable_context_manager(self, container_selector=None, container_dom_id=None, after_exit=None): self.enabled_context_manager = True self.container_selector = container_selector self.container_dom_id = container_dom_id self.after_exit = after_exit return self def __enter__(self): if (not self.enabled_context_manager): raise RuntimeError("This output function can't be used as context manager!") self.container_dom_id = (self.container_dom_id or random_str(10)) self.spec['container_selector'] = self.container_selector self.spec['container_dom_id'] = scope2dom(self.container_dom_id, no_css_selector=True) self.send() get_current_session().push_scope(self.container_dom_id) return self.container_dom_id def __exit__(self, exc_type, exc_val, exc_tb): get_current_session().pop_scope() if self.after_exit: self.after_exit() return False def embed_data(self): self.processed = True return self.on_embed(self.spec) def send(self): self.processed = True send_msg('output', self.spec) show = send def style(self, css_style): self.spec.setdefault('style', '') self.spec['style'] += (';%s' % css_style) return self def onclick(self, callback): callback_id = output_register_callback((lambda _: callback())) self.spec.setdefault('click_callback_id', '') self.spec['click_callback_id'] += callback_id return self def __del__(self): if (not self.processed): try: self.send() except Exception: pass
def batcher(params, batch): batch = [(sent if (sent != []) else ['.']) for sent in batch] embeddings = [] for sent in batch: sentvec = [] for word in sent: if (word in params.word_vec): sentvec.append(params.word_vec[word]) if (not sentvec): vec = np.zeros(params.wvec_dim) sentvec.append(vec) sentvec = np.mean(sentvec, 0) embeddings.append(sentvec) embeddings = np.vstack(embeddings) return embeddings
def test_mult_factor_out_qm() -> None: assert (str(parse('a|b*|').reduce()) == 'a|b*') assert (str(parse('(a|b*|)').reduce()) == 'a|b*') assert (str(parse('(a|b*|)c').reduce()) == '(a|b*)c') assert (str(parse('()').reduce()) == '') assert (str(parse('([$%\\^]|){1}').reduce()) == '[$%\\^]?')
class PlayEntityRotation(Packet): id = 41 to = 1 def __init__(self, entity_id: int, yaw: float, pitch: float, on_ground: bool) -> None: super().__init__() self.entity_id = entity_id self.yaw = yaw self.pitch = pitch self.on_ground = on_ground def encode(self) -> bytes: return (((Buffer.pack_varint(self.entity_id) + Buffer.pack('f', self.yaw)) + Buffer.pack('f', self.pitch)) + Buffer.pack('?', self.on_ground))
class FC3_NFS(KickstartCommand): removedKeywords = KickstartCommand.removedKeywords removedAttrs = KickstartCommand.removedAttrs def __init__(self, writePriority=0, *args, **kwargs): KickstartCommand.__init__(self, writePriority, *args, **kwargs) self.server = kwargs.get('server', None) self.dir = kwargs.get('dir', None) self.op = self._getParser() def __eq__(self, other): if (not other): return False return ((self.server == other.server) and (self.dir == other.dir)) def __ne__(self, other): return (not (self == other)) def __str__(self): retval = KickstartCommand.__str__(self) if (not self.seen): return retval retval += ('# Use NFS installation media\nnfs --server=%s --dir=%s\n' % (self.server, self.dir)) return retval def _getParser(self): op = KSOptionParser(prog='nfs', description='\n Install from the NFS server specified. This can\n either be an exploded installation tree or a\n directory of ISO images. In the latter case, the\n install.img must also be provided subject to the\n same rules as with the harddrive installation\n method described above.', version=FC3) op.add_argument('--server', metavar='<hostname>', required=True, version=FC3, help='\n Server from which to install (hostname or IP).') op.add_argument('--dir', metavar='<directory>', required=True, version=FC3, help='\n Directory containing the ``Packages/`` directory of the\n installation tree. If doing an ISO install, this\n directory must also contain images/install.img.') return op def parse(self, args): ns = self.op.parse_args(args=args, lineno=self.lineno) self.set_to_self(ns) return self
def get_image_paths(image_id_to_image, image_ids): paths = [] for image_id in image_ids: image = image_id_to_image[image_id] (base, filename) = os.path.split(image['url']) path = os.path.join(os.path.basename(base), filename) paths.append(path) return paths
class NodeGroupManager(): def __init__(self, path: str, gname: str): self.NODE_GROUP_PREFIX = gname self.cluster_config = self._read_yaml(path) self.init_groups = self._cluster_node_groups(self.cluster_config) self.init_group_res = self._parse_node_resources() def _cluster_node_groups(self, config: Dict[(str, Any)]) -> Dict[(str, Any)]: avail_node_types = list(config['available_node_types'].items()) head_node_types = [nt for nt in avail_node_types if (('resources' in nt[1]) and ('CPU' in nt[1]['resources']) and (nt[1]['resources']['CPU'] == 0))][0] worker_node_types = [x for x in avail_node_types if (x != head_node_types)] if (len(worker_node_types) > 0): self.INSTANCE_TYPE = worker_node_types[0][1]['node_config']['InstanceType'] return worker_node_types def _read_yaml(self, path: str) -> Dict[(str, Any)]: with open(path, 'rt') as f: return yaml.safe_load(f) def _update_groups(self) -> List[Tuple[(str, float)]]: time.sleep(1.1) all_available_res = ray.available_resources() current_groups = [(k, all_available_res[k]) for k in all_available_res.keys() if (self.NODE_GROUP_PREFIX in k)] return current_groups def _parse_node_resources(self) -> Dict[(str, Dict[(str, float)])]: all_available_resources = ray._private.state.state._available_resources_per_node() group_keys = [x[0] for x in self.init_groups] group_res = {} for k in group_keys: group_res[k] = {'CPU': 0, 'memory': 0, 'object_store_memory': 0, 'node_id': []} for v in all_available_resources.values(): keys = v.keys() r = re.compile(self.NODE_GROUP_PREFIX) partition = list(filter(r.match, list(keys))) r = re.compile('node:') node_id = list(filter(r.match, list(keys))) if (len(partition) > 0): partition = partition[0] if (len(node_id) > 0): node_id = node_id[0] if (self.NODE_GROUP_PREFIX in partition): group_res[partition]['CPU'] += v['CPU'] group_res[partition]['memory'] += v['memory'] group_res[partition]['object_store_memory'] += v['object_store_memory'] group_res[partition]['node_id'].append(node_id) return group_res def _update_group_res(self, gname: str) -> Dict[(str, Union[(str, float)])]: all_available_resources = ray._private.state.state._available_resources_per_node() group_res = {'CPU': 0, 'memory': 0, 'object_store_memory': 0, 'node_id': []} for v in all_available_resources.values(): keys = v.keys() r = re.compile('node:') node_id = list(filter(r.match, list(keys))) if (len(node_id) > 0): node_id = node_id[0] if (gname in v.keys()): group_res['CPU'] += v['CPU'] group_res['memory'] += v['memory'] group_res['object_store_memory'] += v['object_store_memory'] group_res['node_id'].append(node_id) return group_res def get_one_group(self) -> Optional[Dict[(str, Union[(str, float)])]]: current_groups = self._update_groups() if (len(current_groups) > 0): gname = current_groups[(- 1)][0] group_res = self._update_group_res(gname) group_res['group'] = gname try: group_res['group_res'] = ray.available_resources()[gname] except Exception as e: logger.info(f'Error: {e}. There is no available resources for {gname}') return None return group_res else: return None def get_group_by_name(self, gname: str) -> Optional[Dict[(str, Union[(str, float)])]]: group_res = self._update_group_res(gname) group_res['group'] = gname try: group_res['group_res'] = ray.available_resources()[gname] except Exception as e: logger.info(f'Error: {e}. There is no available resources for {gname}') return None return group_res
.parametrize('index', [None, 0]) def test_memmap_new(index): t = torch.tensor([1]) m = MemoryMappedTensor.from_tensor(t) if (index is not None): m1 = m[index] else: m1 = m m2 = MemoryMappedTensor.from_tensor(m1) assert isinstance(m2, MemoryMappedTensor) assert (m2._filename == m1._filename) if (index is not None): assert (m2.contiguous() == t[index]) m2c = m2.contiguous() assert isinstance(m2c, torch.Tensor) assert (m2c == m1)
class HIDManager(EventDispatcher): def __init__(self): self.manager_ref = c_void_p(iokit.IOHIDManagerCreate(None, kIOHIDOptionsTypeNone)) self.schedule_with_run_loop() self.devices = self._get_devices() self.matching_callback = self._register_matching_callback() self.removal_callback = self._register_removal_callback() def _get_devices(self): try: iokit.IOHIDManagerSetDeviceMatching(self.manager_ref, None) cfset = c_void_p(iokit.IOHIDManagerCopyDevices(self.manager_ref)) devices = cfset_to_set(cfset) cf.CFRelease(cfset) except: return set() return devices def open(self): iokit.IOHIDManagerOpen(self.manager_ref, kIOHIDOptionsTypeNone) def close(self): iokit.IOHIDManagerClose(self.manager_ref, kIOHIDOptionsTypeNone) def schedule_with_run_loop(self): iokit.IOHIDManagerScheduleWithRunLoop(self.manager_ref, c_void_p(cf.CFRunLoopGetCurrent()), kCFRunLoopDefaultMode) def unschedule_from_run_loop(self): iokit.IOHIDManagerUnscheduleFromRunLoop(self.manager_ref, c_void_p(cf.CFRunLoopGetCurrent()), kCFRunLoopDefaultMode) def _py_matching_callback(self, context, result, sender, device): d = HIDDevice.get_device(c_void_p(device)) if (d not in self.devices): self.devices.add(d) self.dispatch_event('on_connect', d) def _register_matching_callback(self): matching_callback = HIDManagerCallback(self._py_matching_callback) iokit.IOHIDManagerRegisterDeviceMatchingCallback(self.manager_ref, matching_callback, None) return matching_callback def _py_removal_callback(self, context, result, sender, device): d = HIDDevice.get_device(c_void_p(device)) d.close() if (d in self.devices): self.devices.remove(d) self.dispatch_event('on_disconnect', d) def _register_removal_callback(self): removal_callback = HIDManagerCallback(self._py_removal_callback) iokit.IOHIDManagerRegisterDeviceRemovalCallback(self.manager_ref, removal_callback, None) return removal_callback
def transforms_imagenet_eval(img_size=224, crop_pct=None, interpolation='bilinear', use_prefetcher=False, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD): crop_pct = (crop_pct or DEFAULT_CROP_PCT) if isinstance(img_size, (tuple, list)): assert (len(img_size) == 2) if (img_size[(- 1)] == img_size[(- 2)]): scale_size = int(math.floor((img_size[0] / crop_pct))) else: scale_size = tuple([int((x / crop_pct)) for x in img_size]) else: scale_size = int(math.floor((img_size / crop_pct))) tfl = [transforms.Resize([scale_size, scale_size], interpolation=str_to_interp_mode(interpolation)), transforms.CenterCrop(img_size)] if use_prefetcher: tfl += [ToNumpy()] else: tfl += [transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))] return transforms.Compose(tfl)
class JSONPlugin(object): name = 'json' api = 2 def __init__(self, json_dumps=json_dumps): self.json_dumps = json_dumps def apply(self, callback, route): dumps = self.json_dumps if (not dumps): return callback def wrapper(*a, **ka): try: rv = callback(*a, **ka) except HTTPError: rv = _e() if isinstance(rv, dict): json_response = dumps(rv) response.content_type = 'application/json' return json_response elif (isinstance(rv, HTTPResponse) and isinstance(rv.body, dict)): rv.body = dumps(rv.body) rv.content_type = 'application/json' return rv return wrapper
class Session(): def __init__(self, verbosity, app_data, interpreter, creator, seeder, activators) -> None: self._verbosity = verbosity self._app_data = app_data self._interpreter = interpreter self._creator = creator self._seeder = seeder self._activators = activators def verbosity(self): return self._verbosity def interpreter(self): return self._interpreter def creator(self): return self._creator def seeder(self): return self._seeder def activators(self): return self._activators def run(self): self._create() self._seed() self._activate() self.creator.pyenv_cfg.write() def _create(self): logging.info('create virtual environment via %s', self.creator) self.creator.run() logging.debug(_DEBUG_MARKER) logging.debug('%s', _Debug(self.creator)) def _seed(self): if ((self.seeder is not None) and self.seeder.enabled): logging.info('add seed packages via %s', self.seeder) self.seeder.run(self.creator) def _activate(self): if self.activators: active = ', '.join((type(i).__name__.replace('Activator', '') for i in self.activators)) logging.info('add activators for %s', active) for activator in self.activators: activator.generate(self.creator) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self._app_data.close()
class BuildActionUsageTests(CustomAssertions): def setUpClass(cls): cls.das = DummyArtifacts() cls.tempdir = cls.das.tempdir cls.pm = PluginManager() def tearDownClass(cls): cls.das.free() def test_build_action_usage_python(self): plugin = 'dummy_plugin' action = 'concatenate_ints' cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm) ns = NamespaceCollections() import_var_1 = ArtifactAPIUsageVariable('imported_ints_0', (lambda : None), 'artifact', cfg.use) import_var_2 = ArtifactAPIUsageVariable('imported_ints_1', (lambda : None), 'artifact', cfg.use) import_uuid_1 = '8dea2f1a-2164-4a85-9f7d-e0641b1db22b' import_uuid_2 = '7727c060-5384-445d-b007-b64b41a090ee' ns.usg_vars = {import_uuid_1: import_var_1, import_uuid_2: import_var_2} dag = self.das.concated_ints_v6.dag action_uuid = '5035a60e-6f9a-40d4-b412-48ae52255bb5' node_uuid = '6facaf61-1676-45eb-ada0-d530be678b27' node = dag.get_node_data(node_uuid) actions = ActionCollections(std_actions={action_uuid: {node_uuid: 'concatenated_ints'}}) unique_var_name = (node.action.output_name + '_0') build_action_usage(node, ns, actions.std_actions, action_uuid, cfg) rendered = cfg.use.render() out_name = ns.usg_vars[node_uuid].to_interface_name() vars = ns.usg_vars self.assertIsInstance(vars[node_uuid], UsageVariable) self.assertEqual(vars[node_uuid].var_type, 'artifact') self.assertEqual(vars[node_uuid].name, unique_var_name) self.assertRegex(rendered, f'import.*{plugin}.actions as {plugin}_actions') self.assertIn(f'{out_name}, = dummy_plugin_actions.{action}(', rendered) def test_build_action_usage_recorded_md(self): action = 'identity_with_metadata' with tempfile.TemporaryDirectory() as tempdir: cfg = ReplayConfig(use=ReplayPythonUsage(), use_recorded_metadata=False, pm=self.pm, md_out_dir=tempdir) action_uuid = '8dae7a81-83ce-48db-9313-6e3131b0933c' node_uuid = 'be472b56-d205-43ee-8180-474da575c4d5' dag = self.das.concated_ints_with_md.dag node = dag.get_node_data(node_uuid) ns = NamespaceCollections() mapping_var = ArtifactAPIUsageVariable('imported_mapping_0', (lambda : None), 'artifact', cfg.use) intseq_var_1 = ArtifactAPIUsageVariable('imported_ints_0', (lambda : None), 'artifact', cfg.use) intseq_var_2 = ArtifactAPIUsageVariable('imported_ints_1', (lambda : None), 'artifact', cfg.use) mapping_import_uuid = '8f71b73d-b028-4cbc-9894-738bdfe718bf' intseq_import_uuid_1 = '0bb6d731-155a-4dd0-8a1e-98827bc4e0bf' intseq_import_uuid_2 = 'e6b37bae-3a14-40f7-87b4-52cf5c7c7a1d' ns.usg_vars = {mapping_import_uuid: mapping_var, intseq_import_uuid_1: intseq_var_1, intseq_import_uuid_2: intseq_var_2} actions = ActionCollections(std_actions={action_uuid: {node_uuid: 'out'}}) build_action_usage(node, ns, actions.std_actions, action_uuid, cfg) rendered = cfg.use.render() vars = ns.usg_vars self.assertIsInstance(vars[node_uuid], UsageVariable) self.assertEqual(vars[node_uuid].var_type, 'artifact') self.assertEqual(vars[node_uuid].name, 'out_0') self.assertIn('from qiime2 import Metadata', rendered) self.assertIn('.view(Metadata)', rendered) self.assertIn(f'.{action}(', rendered)
def get_label_length_seq(content): label_seq = [] length_seq = [] start = 0 for i in range(len(content)): if (content[i] != content[start]): label_seq.append(content[start]) length_seq.append((i - start)) start = i label_seq.append(content[start]) length_seq.append((len(content) - start)) return (label_seq, length_seq)
def test2(): model = load_model((str(exp_url) + 'models/theultimate.h5')) test_datagen = ImageDataGenerator(rescale=(1.0 / 255)) test_generator = test_datagen.flow_from_directory(test_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='binary') print(model.metrics_names) print(model.evaluate_generator(test_generator, (test_samples // batch_size)))
def _recall_update_input_check(input: torch.Tensor, target: torch.Tensor, num_classes: Optional[int]) -> None: if (input.size(0) != target.size(0)): raise ValueError(f'The `input` and `target` should have the same first dimension, got shapes {input.shape} and {target.shape}.') if (target.ndim != 1): raise ValueError(f'`target` should be a one-dimensional tensor, got shape {target.shape}.') if ((input.ndim != 1) and (not ((input.ndim == 2) and ((num_classes is None) or (input.shape[1] == num_classes))))): raise ValueError(f'`input` should have shape (num_samples,) or (num_samples, num_classes), got {input.shape}.')
class TestSendMediaGroupWithoutRequest(): async def test_send_media_group_throws_error_with_group_caption_and_individual_captions(self, bot, chat_id, media_group, media_group_no_caption_only_caption_entities, media_group_no_caption_only_parse_mode): for group in (media_group, media_group_no_caption_only_caption_entities, media_group_no_caption_only_parse_mode): with pytest.raises(ValueError, match='You can only supply either group caption or media with captions.'): (await bot.send_media_group(chat_id, group, caption='foo')) async def test_send_media_group_custom_filename(self, bot, chat_id, photo_file, animation_file, audio_file, video_file, monkeypatch): async def make_assertion(url, request_data: RequestData, *args, **kwargs): result = all(((field_tuple[0] == 'custom_filename') for field_tuple in request_data.multipart_data.values())) if (result is True): raise Exception('Test was successful') monkeypatch.setattr(bot.request, 'post', make_assertion) media = [InputMediaAnimation(animation_file, filename='custom_filename'), InputMediaAudio(audio_file, filename='custom_filename'), InputMediaPhoto(photo_file, filename='custom_filename'), InputMediaVideo(video_file, filename='custom_filename')] with pytest.raises(Exception, match='Test was successful'): (await bot.send_media_group(chat_id, media)) async def test_send_media_group_with_thumbs(self, bot, chat_id, video_file, photo_file, monkeypatch): async def make_assertion(method, url, request_data: RequestData, *args, **kwargs): nonlocal input_video files = request_data.multipart_data video_check = (files[input_video.media.attach_name] == input_video.media.field_tuple) thumb_check = (files[input_video.thumbnail.attach_name] == input_video.thumbnail.field_tuple) result = (video_check and thumb_check) raise Exception(f"Test was {('successful' if result else 'failing')}") monkeypatch.setattr(bot.request, '_request_wrapper', make_assertion) input_video = InputMediaVideo(video_file, thumbnail=photo_file) with pytest.raises(Exception, match='Test was successful'): (await bot.send_media_group(chat_id, [input_video, input_video])) async def test_edit_message_media_with_thumb(self, bot, chat_id, video_file, photo_file, monkeypatch): async def make_assertion(method: str, url: str, request_data: Optional[RequestData]=None, *args, **kwargs): files = request_data.multipart_data video_check = (files[input_video.media.attach_name] == input_video.media.field_tuple) thumb_check = (files[input_video.thumbnail.attach_name] == input_video.thumbnail.field_tuple) result = (video_check and thumb_check) raise Exception(f"Test was {('successful' if result else 'failing')}") monkeypatch.setattr(bot.request, '_request_wrapper', make_assertion) input_video = InputMediaVideo(video_file, thumbnail=photo_file) with pytest.raises(Exception, match='Test was successful'): (await bot.edit_message_media(chat_id=chat_id, message_id=123, media=input_video))
class RenameRefactoringTest(unittest.TestCase): def setUp(self): super().setUp() self.project = testutils.sample_project() def tearDown(self): testutils.remove_project(self.project) super().tearDown() def _local_rename(self, source_code, offset, new_name): testmod = testutils.create_module(self.project, 'testmod') testmod.write(source_code) changes = Rename(self.project, testmod, offset).get_changes(new_name, resources=[testmod]) self.project.do(changes) return testmod.read() def _rename(self, resource, offset, new_name, **kwds): changes = Rename(self.project, resource, offset).get_changes(new_name, **kwds) self.project.do(changes) def test_local_variable_but_not_parameter(self): code = dedent(' a = 10\n foo = dict(a=a)\n ') refactored = self._local_rename(code, 1, 'new_a') self.assertEqual(dedent(' new_a = 10\n foo = dict(a=new_a)\n '), refactored) def test_simple_global_variable_renaming(self): refactored = self._local_rename('a_var = 20\n', 2, 'new_var') self.assertEqual('new_var = 20\n', refactored) def test_variable_renaming_only_in_its_scope(self): refactored = self._local_rename(dedent(' a_var = 20\n def a_func():\n a_var = 10\n '), 32, 'new_var') self.assertEqual(dedent(' a_var = 20\n def a_func():\n new_var = 10\n '), refactored) def test_not_renaming_dot_name(self): refactored = self._local_rename(dedent(" replace = True\n 'aaa'.replace('a', 'b')\n "), 1, 'new_var') self.assertEqual(dedent(" new_var = True\n 'aaa'.replace('a', 'b')\n "), refactored) def test_renaming_multiple_names_in_the_same_line(self): refactored = self._local_rename(dedent(' a_var = 10\n a_var = 10 + a_var / 2\n '), 2, 'new_var') self.assertEqual(dedent(' new_var = 10\n new_var = 10 + new_var / 2\n '), refactored) def test_renaming_names_when_getting_some_attribute(self): refactored = self._local_rename(dedent(" a_var = 'a b c'\n a_var.split('\\n')\n "), 2, 'new_var') self.assertEqual(dedent(" new_var = 'a b c'\n new_var.split('\\n')\n "), refactored) def test_renaming_names_when_getting_some_attribute2(self): refactored = self._local_rename(dedent(" a_var = 'a b c'\n a_var.split('\\n')\n "), 20, 'new_var') self.assertEqual(dedent(" new_var = 'a b c'\n new_var.split('\\n')\n "), refactored) def test_renaming_function_parameters1(self): refactored = self._local_rename(dedent(' def f(a_param):\n print(a_param)\n '), 8, 'new_param') self.assertEqual(dedent(' def f(new_param):\n print(new_param)\n '), refactored) def test_renaming_function_parameters2(self): refactored = self._local_rename(dedent(' def f(a_param):\n print(a_param)\n '), 30, 'new_param') self.assertEqual(dedent(' def f(new_param):\n print(new_param)\n '), refactored) def test_renaming_occurrences_inside_functions(self): code = dedent(' def a_func(p1):\n a = p1\n a_func(1)\n ') refactored = self._local_rename(code, (code.index('p1') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param):\n a = new_param\n a_func(1)\n '), refactored) def test_renaming_comprehension_loop_variables(self): code = '[b_var for b_var, c_var in d_var if b_var == c_var]' refactored = self._local_rename(code, (code.index('b_var') + 1), 'new_var') self.assertEqual('[new_var for new_var, c_var in d_var if new_var == c_var]', refactored) def test_renaming_list_comprehension_loop_variables_in_assignment(self): code = 'a_var = [b_var for b_var, c_var in d_var if b_var == c_var]' refactored = self._local_rename(code, (code.index('b_var') + 1), 'new_var') self.assertEqual('a_var = [new_var for new_var, c_var in d_var if new_var == c_var]', refactored) def test_renaming_generator_comprehension_loop_variables(self): code = 'a_var = (b_var for b_var, c_var in d_var if b_var == c_var)' refactored = self._local_rename(code, (code.index('b_var') + 1), 'new_var') self.assertEqual('a_var = (new_var for new_var, c_var in d_var if new_var == c_var)', refactored) def test_renaming_comprehension_loop_variables_scope(self): code = dedent(' [b_var for b_var, c_var in d_var if b_var == c_var]\n b_var = 10\n ') refactored = self._local_rename(code, (code.index('b_var') + 1), 'new_var') self.assertEqual(dedent(' [new_var for new_var, c_var in d_var if new_var == c_var]\n b_var = 10\n '), refactored) _for_versions_higher('3.8') def test_renaming_inline_assignment(self): code = dedent(' while a_var := next(foo):\n print(a_var)\n ') refactored = self._local_rename(code, (code.index('a_var') + 1), 'new_var') self.assertEqual(dedent(' while new_var := next(foo):\n print(new_var)\n '), refactored) def test_renaming_arguments_for_normal_args_changing_calls(self): code = dedent(' def a_func(p1=None, p2=None):\n pass\n a_func(p2=1)\n ') refactored = self._local_rename(code, (code.index('p2') + 1), 'p3') self.assertEqual(dedent(' def a_func(p1=None, p3=None):\n pass\n a_func(p3=1)\n '), refactored) def test_renaming_function_parameters_of_class_init(self): code = dedent(' class A(object):\n def __init__(self, a_param):\n pass\n a_var = A(a_param=1)\n ') refactored = self._local_rename(code, (code.index('a_param') + 1), 'new_param') expected = dedent(' class A(object):\n def __init__(self, new_param):\n pass\n a_var = A(new_param=1)\n ') self.assertEqual(expected, refactored) def test_rename_functions_parameters_and_occurrences_in_other_modules(self): mod1 = testutils.create_module(self.project, 'mod1') mod2 = testutils.create_module(self.project, 'mod2') mod1.write(dedent(' def a_func(a_param):\n print(a_param)\n ')) mod2.write(dedent(' from mod1 import a_func\n a_func(a_param=10)\n ')) self._rename(mod1, (mod1.read().index('a_param') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param):\n print(new_param)\n '), mod1.read()) self.assertEqual(dedent(' from mod1 import a_func\n a_func(new_param=10)\n '), mod2.read()) def test_renaming_with_backslash_continued_names(self): refactored = self._local_rename("replace = True\n'ali'.\\\nreplace\n", 2, 'is_replace') self.assertEqual("is_replace = True\n'ali'.\\\nreplace\n", refactored) def test_renaming_occurrence_in_f_string(self): code = dedent(" a_var = 20\n a_string=f'value: {a_var}'\n ") expected = dedent(" new_var = 20\n a_string=f'value: {new_var}'\n ") refactored = self._local_rename(code, 2, 'new_var') self.assertEqual(expected, refactored) def test_renaming_occurrence_in_nested_f_string(self): code = dedent(' a_var = 20\n a_string=f\'{f"{a_var}"}\'\n ') expected = dedent(' new_var = 20\n a_string=f\'{f"{new_var}"}\'\n ') refactored = self._local_rename(code, 2, 'new_var') self.assertEqual(expected, refactored) def test_not_renaming_string_contents_in_f_string(self): refactored = self._local_rename('a_var = 20\na_string=f\'{"a_var"}\'\n', 2, 'new_var') self.assertEqual(dedent(' new_var = 20\n a_string=f\'{"a_var"}\'\n '), refactored) def test_not_renaming_string_contents(self): refactored = self._local_rename("a_var = 20\na_string='a_var'\n", 2, 'new_var') self.assertEqual(dedent(" new_var = 20\n a_string='a_var'\n "), refactored) def test_not_renaming_comment_contents(self): refactored = self._local_rename('a_var = 20\n# a_var\n', 2, 'new_var') self.assertEqual(dedent(' new_var = 20\n # a_var\n '), refactored) def test_renaming_all_occurrences_in_containing_scope(self): code = dedent(' if True:\n a_var = 1\n else:\n a_var = 20\n ') refactored = self._local_rename(code, 16, 'new_var') self.assertEqual(dedent(' if True:\n new_var = 1\n else:\n new_var = 20\n '), refactored) def test_renaming_a_variable_with_arguement_name(self): code = dedent(' a_var = 10\n def a_func(a_var):\n print(a_var)\n ') refactored = self._local_rename(code, 1, 'new_var') self.assertEqual(dedent(' new_var = 10\n def a_func(a_var):\n print(a_var)\n '), refactored) def test_renaming_an_arguement_with_variable_name(self): code = dedent(' a_var = 10\n def a_func(a_var):\n print(a_var)\n ') refactored = self._local_rename(code, (len(code) - 3), 'new_var') self.assertEqual(dedent(' a_var = 10\n def a_func(new_var):\n print(new_var)\n '), refactored) def test_renaming_function_with_local_variable_name(self): code = dedent(' def a_func():\n a_func=20\n a_func()') refactored = self._local_rename(code, (len(code) - 3), 'new_func') self.assertEqual(dedent(' def new_func():\n a_func=20\n new_func()'), refactored) def test_renaming_functions(self): code = dedent(' def a_func():\n pass\n a_func()\n ') refactored = self._local_rename(code, (len(code) - 5), 'new_func') self.assertEqual(dedent(' def new_func():\n pass\n new_func()\n '), refactored) def test_renaming_async_function(self): code = dedent(' async def a_func():\n pass\n a_func()') refactored = self._local_rename(code, (len(code) - 5), 'new_func') self.assertEqual(dedent(' async def new_func():\n pass\n new_func()'), refactored) def test_renaming_await(self): code = dedent(' async def b_func():\n pass\n async def a_func():\n await b_func()') refactored = self._local_rename(code, (len(code) - 5), 'new_func') self.assertEqual(dedent(' async def new_func():\n pass\n async def a_func():\n await new_func()'), refactored) def test_renaming_functions_across_modules(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' def a_func():\n pass\n a_func()\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' import mod1\n mod1.a_func()\n ')) self._rename(mod1, (len(mod1.read()) - 5), 'new_func') self.assertEqual(dedent(' def new_func():\n pass\n new_func()\n '), mod1.read()) self.assertEqual(dedent(' import mod1\n mod1.new_func()\n '), mod2.read()) def test_renaming_functions_across_modules_from_import(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' def a_func():\n pass\n a_func()\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' from mod1 import a_func\n a_func()\n ')) self._rename(mod1, (len(mod1.read()) - 5), 'new_func') self.assertEqual(dedent(' def new_func():\n pass\n new_func()\n '), mod1.read()) self.assertEqual(dedent(' from mod1 import new_func\n new_func()\n '), mod2.read()) def test_renaming_functions_from_another_module(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' def a_func():\n pass\n a_func()\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' import mod1\n mod1.a_func()\n ')) self._rename(mod2, (len(mod2.read()) - 5), 'new_func') self.assertEqual(dedent(' def new_func():\n pass\n new_func()\n '), mod1.read()) self.assertEqual(dedent(' import mod1\n mod1.new_func()\n '), mod2.read()) def test_applying_all_changes_together(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' import mod2\n mod2.a_func()\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' def a_func():\n pass\n a_func()\n ')) self._rename(mod2, (len(mod2.read()) - 5), 'new_func') self.assertEqual(dedent(' import mod2\n mod2.new_func()\n '), mod1.read()) self.assertEqual(dedent(' def new_func():\n pass\n new_func()\n '), mod2.read()) def test_renaming_modules(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' def a_func():\n pass\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write('from mod1 import a_func\n') self._rename(mod2, (mod2.read().index('mod1') + 1), 'newmod') self.assertTrue(((not mod1.exists()) and (self.project.find_module('newmod') is not None))) self.assertEqual('from newmod import a_func\n', mod2.read()) def test_renaming_modules_aliased(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' def a_func():\n pass\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' import mod1 as m\n m.a_func()\n ')) self._rename(mod1, None, 'newmod') self.assertTrue(((not mod1.exists()) and (self.project.find_module('newmod') is not None))) self.assertEqual('import newmod as m\nm.a_func()\n', mod2.read()) def test_renaming_packages(self): pkg = testutils.create_package(self.project, 'pkg') mod1 = testutils.create_module(self.project, 'mod1', pkg) mod1.write(dedent(' def a_func():\n pass\n ')) mod2 = testutils.create_module(self.project, 'mod2', pkg) mod2.write('from pkg.mod1 import a_func\n') self._rename(mod2, 6, 'newpkg') self.assertTrue((self.project.find_module('newpkg.mod1') is not None)) new_mod2 = self.project.find_module('newpkg.mod2') self.assertEqual('from newpkg.mod1 import a_func\n', new_mod2.read()) def test_module_dependencies(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' class AClass(object):\n pass\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' import mod1\n a_var = mod1.AClass()\n ')) self.project.get_pymodule(mod2).get_attributes()['mod1'] mod1.write(dedent(' def AClass():\n return 0\n ')) self._rename(mod2, (len(mod2.read()) - 3), 'a_func') self.assertEqual(dedent(' def a_func():\n return 0\n '), mod1.read()) self.assertEqual(dedent(' import mod1\n a_var = mod1.a_func()\n '), mod2.read()) def test_renaming_class_attributes(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' class AClass(object):\n def __init__(self):\n self.an_attr = 10\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' import mod1\n a_var = mod1.AClass()\n another_var = a_var.an_attr')) self._rename(mod1, mod1.read().index('an_attr'), 'attr') self.assertEqual(dedent(' class AClass(object):\n def __init__(self):\n self.attr = 10\n '), mod1.read()) self.assertEqual(dedent(' import mod1\n a_var = mod1.AClass()\n another_var = a_var.attr'), mod2.read()) def test_renaming_class_attributes2(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' class AClass(object):\n def __init__(self):\n an_attr = 10\n self.an_attr = 10\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' import mod1\n a_var = mod1.AClass()\n another_var = a_var.an_attr')) self._rename(mod1, mod1.read().rindex('an_attr'), 'attr') self.assertEqual(dedent(' class AClass(object):\n def __init__(self):\n an_attr = 10\n self.attr = 10\n '), mod1.read()) self.assertEqual(dedent(' import mod1\n a_var = mod1.AClass()\n another_var = a_var.attr'), mod2.read()) def test_renaming_methods_in_subclasses(self): mod = testutils.create_module(self.project, 'mod1') mod.write(dedent(' class A(object):\n def a_method(self):\n pass\n class B(A):\n def a_method(self):\n pass\n ')) self._rename(mod, (mod.read().rindex('a_method') + 1), 'new_method', in_hierarchy=True) self.assertEqual(dedent(' class A(object):\n def new_method(self):\n pass\n class B(A):\n def new_method(self):\n pass\n '), mod.read()) def test_renaming_methods_in_sibling_classes(self): mod = testutils.create_module(self.project, 'mod1') mod.write(dedent(' class A(object):\n def a_method(self):\n pass\n class B(A):\n def a_method(self):\n pass\n class C(A):\n def a_method(self):\n pass\n ')) self._rename(mod, (mod.read().rindex('a_method') + 1), 'new_method', in_hierarchy=True) self.assertEqual(dedent(' class A(object):\n def new_method(self):\n pass\n class B(A):\n def new_method(self):\n pass\n class C(A):\n def new_method(self):\n pass\n '), mod.read()) def test_not_renaming_methods_in_hierarchies(self): mod = testutils.create_module(self.project, 'mod1') mod.write(dedent(' class A(object):\n def a_method(self):\n pass\n class B(A):\n def a_method(self):\n pass\n ')) self._rename(mod, (mod.read().rindex('a_method') + 1), 'new_method', in_hierarchy=False) self.assertEqual(dedent(' class A(object):\n def a_method(self):\n pass\n class B(A):\n def new_method(self):\n pass\n '), mod.read()) def test_undoing_refactorings(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' def a_func():\n pass\n a_func()\n ')) self._rename(mod1, (len(mod1.read()) - 5), 'new_func') self.project.history.undo() self.assertEqual(dedent(' def a_func():\n pass\n a_func()\n '), mod1.read()) def test_undoing_renaming_modules(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write(dedent(' def a_func():\n pass\n ')) mod2 = testutils.create_module(self.project, 'mod2') mod2.write('from mod1 import a_func\n') self._rename(mod2, 6, 'newmod') self.project.history.undo() self.assertEqual('mod1.py', mod1.path) self.assertEqual('from mod1 import a_func\n', mod2.read()) def test_rename_in_module_renaming_one_letter_names_for_expressions(self): mod1 = testutils.create_module(self.project, 'mod1') mod1.write('a = 10\nprint(1+a)\n') pymod = self.project.get_module('mod1') old_pyname = pymod['a'] finder = rope.refactor.occurrences.create_finder(self.project, 'a', old_pyname) refactored = rename.rename_in_module(finder, 'new_var', pymodule=pymod, replace_primary=True) self.assertEqual(dedent(' new_var = 10\n print(1+new_var)\n '), refactored) def test_renaming_for_loop_variable(self): code = dedent(' for var in range(10):\n print(var)\n ') refactored = self._local_rename(code, (code.find('var') + 1), 'new_var') self.assertEqual(dedent(' for new_var in range(10):\n print(new_var)\n '), refactored) def test_renaming_async_for_loop_variable(self): code = dedent(' async def func():\n async for var in range(10):\n print(var)\n ') refactored = self._local_rename(code, (code.find('var') + 1), 'new_var') self.assertEqual(dedent(' async def func():\n async for new_var in range(10):\n print(new_var)\n '), refactored) def test_renaming_async_with_context_manager(self): code = dedent(' def a_cm(): pass\n async def a_func():\n async with a_cm() as x: pass') refactored = self._local_rename(code, (code.find('a_cm') + 1), 'another_cm') expected = dedent(' def another_cm(): pass\n async def a_func():\n async with another_cm() as x: pass') self.assertEqual(refactored, expected) def test_renaming_async_with_as_variable(self): code = dedent(' async def func():\n async with a_func() as var:\n print(var)\n ') refactored = self._local_rename(code, (code.find('var') + 1), 'new_var') self.assertEqual(dedent(' async def func():\n async with a_func() as new_var:\n print(new_var)\n '), refactored) def test_renaming_parameters(self): code = dedent(' def a_func(param):\n print(param)\n a_func(param=hey)\n ') refactored = self._local_rename(code, (code.find('param') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param):\n print(new_param)\n a_func(new_param=hey)\n '), refactored) def test_renaming_assigned_parameters(self): code = dedent(' def f(p):\n p = p + 1\n return p\n f(p=1)\n ') refactored = self._local_rename(code, code.find('p'), 'arg') self.assertEqual(dedent(' def f(arg):\n arg = arg + 1\n return arg\n f(arg=1)\n '), refactored) def test_renaming_parameters_not_renaming_others(self): code = dedent(' def a_func(param):\n print(param)\n param=10\n a_func(param)\n ') refactored = self._local_rename(code, (code.find('param') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param):\n print(new_param)\n param=10\n a_func(param)\n '), refactored) def test_renaming_parameters_not_renaming_others2(self): code = dedent(' def a_func(param):\n print(param)\n param=10\n a_func(param=param)') refactored = self._local_rename(code, (code.find('param') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param):\n print(new_param)\n param=10\n a_func(new_param=param)'), refactored) def test_renaming_parameters_with_multiple_params(self): code = dedent(' def a_func(param1, param2):\n print(param1)\n a_func(param1=1, param2=2)\n ') refactored = self._local_rename(code, (code.find('param1') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param, param2):\n print(new_param)\n a_func(new_param=1, param2=2)\n '), refactored) def test_renaming_parameters_with_multiple_params2(self): code = dedent(' def a_func(param1, param2):\n print(param1)\n a_func(param1=1, param2=2)\n ') refactored = self._local_rename(code, (code.rfind('param2') + 1), 'new_param') self.assertEqual(dedent(' def a_func(param1, new_param):\n print(param1)\n a_func(param1=1, new_param=2)\n '), refactored) def test_renaming_parameters_on_calls(self): code = dedent(' def a_func(param):\n print(param)\n a_func(param = hey)\n ') refactored = self._local_rename(code, (code.rfind('param') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param):\n print(new_param)\n a_func(new_param = hey)\n '), refactored) def test_renaming_parameters_spaces_before_call(self): code = dedent(' def a_func(param):\n print(param)\n a_func (param=hey)\n ') refactored = self._local_rename(code, (code.rfind('param') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param):\n print(new_param)\n a_func (new_param=hey)\n '), refactored) def test_renaming_parameter_like_objects_after_keywords(self): code = dedent(' def a_func(param):\n print(param)\n dict(param=hey)\n ') refactored = self._local_rename(code, (code.find('param') + 1), 'new_param') self.assertEqual(dedent(' def a_func(new_param):\n print(new_param)\n dict(param=hey)\n '), refactored) def test_renaming_variables_in_init_dot_pys(self): pkg = testutils.create_package(self.project, 'pkg') init_dot_py = pkg.get_child('__init__.py') init_dot_py.write('a_var = 10\n') mod = testutils.create_module(self.project, 'mod') mod.write('import pkg\nprint(pkg.a_var)\n') self._rename(mod, (mod.read().index('a_var') + 1), 'new_var') self.assertEqual('new_var = 10\n', init_dot_py.read()) self.assertEqual('import pkg\nprint(pkg.new_var)\n', mod.read()) def test_renaming_variables_in_init_dot_pys2(self): pkg = testutils.create_package(self.project, 'pkg') init_dot_py = pkg.get_child('__init__.py') init_dot_py.write('a_var = 10\n') mod = testutils.create_module(self.project, 'mod') mod.write('import pkg\nprint(pkg.a_var)\n') self._rename(init_dot_py, (init_dot_py.read().index('a_var') + 1), 'new_var') self.assertEqual('new_var = 10\n', init_dot_py.read()) self.assertEqual('import pkg\nprint(pkg.new_var)\n', mod.read()) def test_renaming_variables_in_init_dot_pys3(self): pkg = testutils.create_package(self.project, 'pkg') init_dot_py = pkg.get_child('__init__.py') init_dot_py.write('a_var = 10\n') mod = testutils.create_module(self.project, 'mod') mod.write('import pkg\nprint(pkg.a_var)\n') self._rename(mod, (mod.read().index('a_var') + 1), 'new_var') self.assertEqual('new_var = 10\n', init_dot_py.read()) self.assertEqual('import pkg\nprint(pkg.new_var)\n', mod.read()) def test_renaming_resources_using_rename_module_refactoring(self): mod1 = testutils.create_module(self.project, 'mod1') mod2 = testutils.create_module(self.project, 'mod2') mod1.write('a_var = 1') mod2.write('import mod1\nmy_var = mod1.a_var\n') renamer = rename.Rename(self.project, mod1) renamer.get_changes('newmod').do() self.assertEqual('import newmod\nmy_var = newmod.a_var\n', mod2.read()) def test_renam_resources_using_rename_module_refactor_for_packages(self): mod1 = testutils.create_module(self.project, 'mod1') pkg = testutils.create_package(self.project, 'pkg') mod1.write('import pkg\nmy_pkg = pkg') renamer = rename.Rename(self.project, pkg) renamer.get_changes('newpkg').do() self.assertEqual('import newpkg\nmy_pkg = newpkg', mod1.read()) def test_renam_resources_use_rename_module_refactor_for_init_dot_py(self): mod1 = testutils.create_module(self.project, 'mod1') pkg = testutils.create_package(self.project, 'pkg') mod1.write('import pkg\nmy_pkg = pkg') renamer = rename.Rename(self.project, pkg.get_child('__init__.py')) renamer.get_changes('newpkg').do() self.assertEqual('import newpkg\nmy_pkg = newpkg', mod1.read()) def test_renaming_global_variables(self): code = dedent(' a_var = 1\n def a_func():\n global a_var\n var = a_var\n ') refactored = self._local_rename(code, code.index('a_var'), 'new_var') self.assertEqual(dedent(' new_var = 1\n def a_func():\n global new_var\n var = new_var\n '), refactored) def test_renaming_global_variables2(self): code = dedent(' a_var = 1\n def a_func():\n global a_var\n var = a_var\n ') refactored = self._local_rename(code, code.rindex('a_var'), 'new_var') self.assertEqual(dedent(' new_var = 1\n def a_func():\n global new_var\n var = new_var\n '), refactored) def test_renaming_when_unsure(self): code = dedent(' class C(object):\n def a_func(self):\n pass\n def f(arg):\n arg.a_func()\n ') mod1 = testutils.create_module(self.project, 'mod1') mod1.write(code) self._rename(mod1, code.index('a_func'), 'new_func', unsure=self._true) self.assertEqual(dedent(' class C(object):\n def new_func(self):\n pass\n def f(arg):\n arg.new_func()\n '), mod1.read()) def _true(self, *args): return True def test_renaming_when_unsure_with_confirmation(self): def confirm(occurrence): return False code = dedent(' class C(object):\n def a_func(self):\n pass\n def f(arg):\n arg.a_func()\n ') mod1 = testutils.create_module(self.project, 'mod1') mod1.write(code) self._rename(mod1, code.index('a_func'), 'new_func', unsure=confirm) self.assertEqual(dedent(' class C(object):\n def new_func(self):\n pass\n def f(arg):\n arg.a_func()\n '), mod1.read()) def test_renaming_when_unsure_not_renaming_knowns(self): code = dedent(' class C1(object):\n def a_func(self):\n pass\n class C2(object):\n def a_func(self):\n pass\n c1 = C1()\n c1.a_func()\n c2 = C2()\n c2.a_func()\n ') mod1 = testutils.create_module(self.project, 'mod1') mod1.write(code) self._rename(mod1, code.index('a_func'), 'new_func', unsure=self._true) self.assertEqual(dedent(' class C1(object):\n def new_func(self):\n pass\n class C2(object):\n def a_func(self):\n pass\n c1 = C1()\n c1.new_func()\n c2 = C2()\n c2.a_func()\n '), mod1.read()) def test_renaming_in_strings_and_comments(self): code = dedent(' a_var = 1\n # a_var\n ') mod1 = testutils.create_module(self.project, 'mod1') mod1.write(code) self._rename(mod1, code.index('a_var'), 'new_var', docs=True) self.assertEqual(dedent(' new_var = 1\n # new_var\n '), mod1.read()) def test_not_renaming_in_strings_and_comments_where_not_visible(self): code = dedent(' def f():\n a_var = 1\n # a_var\n ') mod1 = testutils.create_module(self.project, 'mod1') mod1.write(code) self._rename(mod1, code.index('a_var'), 'new_var', docs=True) self.assertEqual(dedent(' def f():\n new_var = 1\n # a_var\n '), mod1.read()) def test_not_renaming_all_text_occurrences_in_strings_and_comments(self): code = dedent(' a_var = 1\n # a_vard _a_var\n ') mod1 = testutils.create_module(self.project, 'mod1') mod1.write(code) self._rename(mod1, code.index('a_var'), 'new_var', docs=True) self.assertEqual(dedent(' new_var = 1\n # a_vard _a_var\n '), mod1.read()) def test_renaming_occurrences_in_overwritten_scopes(self): refactored = self._local_rename(dedent(' a_var = 20\n def f():\n print(a_var)\n def f():\n print(a_var)\n '), 2, 'new_var') self.assertEqual(dedent(' new_var = 20\n def f():\n print(new_var)\n def f():\n print(new_var)\n '), refactored) def test_renaming_occurrences_in_overwritten_scopes2(self): code = dedent(' def f():\n a_var = 1\n print(a_var)\n def f():\n a_var = 1\n print(a_var)\n ') refactored = self._local_rename(code, (code.index('a_var') + 1), 'new_var') self.assertEqual(code.replace('a_var', 'new_var', 2), refactored) _for_versions_higher('3.5') def test_renaming_in_generalized_dict_unpacking(self): code = dedent(' a_var = {**{\'stuff\': \'can\'}, **{\'stuff\': \'crayon\'}}\n\n if "stuff" in a_var:\n print("ya")\n ') mod1 = testutils.create_module(self.project, 'mod1') mod1.write(code) refactored = self._local_rename(code, (code.index('a_var') + 1), 'new_var') expected = dedent(' new_var = {**{\'stuff\': \'can\'}, **{\'stuff\': \'crayon\'}}\n\n if "stuff" in new_var:\n print("ya")\n ') self.assertEqual(expected, refactored) def test_dos_line_ending_and_renaming(self): code = '\r\na = 1\r\n\r\nprint(2 + a + 2)\r\n' offset = code.replace('\r\n', '\n').rindex('a') refactored = self._local_rename(code, offset, 'b') self.assertEqual('\nb = 1\n\nprint(2 + b + 2)\n', refactored.replace('\r\n', '\n')) def test_multi_byte_strs_and_renaming(self): s = ('{LATIN SMALL LETTER I WITH DIAERESIS}' * 4) code = (('# -*- coding: utf-8 -*-\n# ' + s) + '\na = 1\nprint(2 + a + 2)\n') refactored = self._local_rename(code, code.rindex('a'), 'b') self.assertEqual((('# -*- coding: utf-8 -*-\n# ' + s) + '\nb = 1\nprint(2 + b + 2)\n'), refactored) def test_resources_parameter(self): mod1 = testutils.create_module(self.project, 'mod1') mod2 = testutils.create_module(self.project, 'mod2') mod1.write(dedent(' def f():\n pass\n ')) mod2.write(dedent(' import mod1\n mod1.f()\n ')) self._rename(mod1, mod1.read().rindex('f'), 'g', resources=[mod1]) self.assertEqual(dedent(' def g():\n pass\n '), mod1.read()) self.assertEqual(dedent(' import mod1\n mod1.f()\n '), mod2.read()) def test_resources_parameter_not_changing_defining_module(self): mod1 = testutils.create_module(self.project, 'mod1') mod2 = testutils.create_module(self.project, 'mod2') mod1.write(dedent(' def f():\n pass\n ')) mod2.write(dedent(' import mod1\n mod1.f()\n ')) self._rename(mod1, mod1.read().rindex('f'), 'g', resources=[mod2]) self.assertEqual(dedent(' def f():\n pass\n '), mod1.read()) self.assertEqual(dedent(' import mod1\n mod1.g()\n '), mod2.read()) def xxx_test_with_statement_variables_should_not_leak(self): code = dedent(' f = 1\n with open("1.txt") as f:\n print(f)\n ') if (sys.version_info < (2, 6, 0)): code = ('from __future__ import with_statement\n' + code) mod1 = testutils.create_module(self.project, 'mod1') mod1.write(code) self._rename(mod1, code.rindex('f'), 'file') expected = dedent(' f = 1\n with open("1.txt") as file:\n print(file)\n ') self.assertEqual(expected, mod1.read()) def test_rename_in_list_comprehension(self): code = dedent(' some_var = 1\n compr = [some_var for some_var in range(10)]\n ') offset = code.index('some_var') refactored = self._local_rename(code, offset, 'new_var') expected = dedent(' new_var = 1\n compr = [some_var for some_var in range(10)]\n ') self.assertEqual(refactored, expected) def test_renaming_modules_aliased_with_dots(self): pkg = testutils.create_package(self.project, 'json') mod1 = testutils.create_module(self.project, 'utils', pkg) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' import json.utils as stdlib_json_utils\n ')) self._rename(pkg, None, 'new_json') self.assertTrue(((not mod1.exists()) and (self.project.find_module('new_json.utils') is not None))) self.assertEqual('import new_json.utils as stdlib_json_utils\n', mod2.read()) def test_renaming_modules_aliased_many_dots(self): pkg = testutils.create_package(self.project, 'json') mod1 = testutils.create_module(self.project, 'utils', pkg) mod2 = testutils.create_module(self.project, 'mod2') mod2.write(dedent(' import json.utils.a as stdlib_json_utils\n ')) self._rename(pkg, None, 'new_json') self.assertTrue(((not mod1.exists()) and (self.project.find_module('new_json.utils') is not None))) self.assertEqual('import new_json.utils.a as stdlib_json_utils\n', mod2.read())
def test_update_legacy_tasks(db, settings): xml_file = ((((Path(settings.BASE_DIR) / 'xml') / 'elements') / 'legacy') / 'views.xml') root = read_xml_file(xml_file) version = root.attrib.get('version') elements = flat_xml_to_elements(root) elements = convert_elements(elements, version) elements = order_elements(elements) elements = elements.values() import_elements(elements) assert (len(root) == len(elements) == 3) assert all(((element['created'] is False) for element in elements)) assert all(((element['updated'] is True) for element in elements))
def build_transforms(cfg, is_train=True): res = [] if is_train: size_train = cfg.INPUT.SIZE_TRAIN do_augmix = cfg.INPUT.DO_AUGMIX augmix_prob = cfg.INPUT.AUGMIX_PROB do_autoaug = cfg.INPUT.DO_AUTOAUG autoaug_prob = cfg.INPUT.AUTOAUG_PROB do_flip = cfg.INPUT.DO_FLIP flip_prob = cfg.INPUT.FLIP_PROB do_pad = cfg.INPUT.DO_PAD padding = cfg.INPUT.PADDING padding_mode = cfg.INPUT.PADDING_MODE do_cj = cfg.INPUT.CJ.ENABLED cj_prob = cfg.INPUT.CJ.PROB cj_brightness = cfg.INPUT.CJ.BRIGHTNESS cj_contrast = cfg.INPUT.CJ.CONTRAST cj_saturation = cfg.INPUT.CJ.SATURATION cj_hue = cfg.INPUT.CJ.HUE do_rea = cfg.INPUT.REA.ENABLED rea_prob = cfg.INPUT.REA.PROB rea_value = cfg.INPUT.REA.VALUE do_rpt = cfg.INPUT.RPT.ENABLED rpt_prob = cfg.INPUT.RPT.PROB if do_autoaug: res.append(T.RandomApply([AutoAugment()], p=autoaug_prob)) res.append(T.Resize(size_train, interpolation=3)) if do_flip: res.append(T.RandomHorizontalFlip(p=flip_prob)) if do_pad: res.extend([T.Pad(padding, padding_mode=padding_mode), T.RandomCrop(size_train)]) if do_cj: res.append(T.RandomApply([T.ColorJitter(cj_brightness, cj_contrast, cj_saturation, cj_hue)], p=cj_prob)) if do_augmix: res.append(T.RandomApply([AugMix()], p=augmix_prob)) res.append(ToTensor()) if do_rea: res.append(T.RandomErasing(p=rea_prob, value=rea_value)) if do_rpt: res.append(RandomPatch(prob_happen=rpt_prob)) else: size_test = cfg.INPUT.SIZE_TEST res.append(T.Resize(size_test, interpolation=3)) res.append(ToTensor()) return T.Compose(res)
.parametrize('x, axis, exc', [(set_test_value(pt.vector(), rng.random(size=(2,)).astype(config.floatX)), None, None), (set_test_value(pt.matrix(), rng.random(size=(2, 3)).astype(config.floatX)), 0, None), (set_test_value(pt.matrix(), rng.random(size=(2, 3)).astype(config.floatX)), 1, None)]) def test_LogSoftmax(x, axis, exc): g = LogSoftmax(axis=axis)(x) g_fg = FunctionGraph(outputs=[g]) cm = (contextlib.suppress() if (exc is None) else pytest.warns(exc)) with cm: compare_numba_and_py(g_fg, [i.tag.test_value for i in g_fg.inputs if (not isinstance(i, (SharedVariable, Constant)))])
class Migration(migrations.Migration): dependencies = [('voting', '0012_vote_propagated')] operations = [migrations.RemoveField(model_name='vote', name='propagated'), migrations.AlterField(model_name='rankrequest', name='conference', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='conferences.Conference', verbose_name='conference'))]
def make_env(name, episode_length, action_repeat, seed, observation_type): (suite, name) = name.split('_', 1) rendered = (observation_type in ['rgb_image', 'binary_image']) if any(((env in name) for env in IMAGE_CROP_ENVS)): size = (240, 320) crop = (12, 25, 12, 25) else: size = (64, 64) crop = None if (suite == 'dmc'): env = make_dm_env(name, episode_length) render_kwargs = {'height': size[1], 'width': size[0], 'camera_id': 0} elif (suite == 'gym'): assert (not name.startswith('Safe')), "To use safety gym envs, use the 'sgym' prefix." env = make_gym_env(name, episode_length) render_mode = ('rgb_array' if ('state_pixels' not in env.metadata.get('render.modes')) else 'state_pixels') render_kwargs = {'mode': render_mode} elif (suite == 'sgym'): env = make_safety_gym_env(name, episode_length, rendered) render_kwargs = {'mode': 'vision'} else: raise NotImplementedError env = ActionRepeat(env, action_repeat, (suite == 'sgym')) env = RescaleAction(env, (- 1.0), 1.0) if rendered: env = RenderedObservation(env, observation_type, (64, 64), render_kwargs, crop) env.seed(seed) return env
class GraphConv(nn.Module): def __init__(self, in_feats, out_feats, weight=False, activation=None): super(GraphConv, self).__init__() self._in_feats = in_feats self._out_feats = out_feats self._norm = 'both' if weight: self.weight = nn.Parameter(th.Tensor(in_feats, out_feats)) else: self.register_parameter('weight', None) self.reset_parameters() self._activation = activation def reset_parameters(self): if (self.weight is not None): init.xavier_uniform_(self.weight) def forward(self, graph, feat, weight=None): graph = graph.local_var() if (self._norm == 'both'): degs = graph.out_degrees().to(feat.device).float().clamp(min=1) norm = th.pow(degs, (- 0.5)) shp = (norm.shape + ((1,) * (feat.dim() - 1))) norm = th.reshape(norm, shp) if (weight is not None): if (self.weight is not None): raise DGLError('External weight is provided while at the same time the module has defined its own weight parameter. Please create the module with flag weight=False.') else: weight = self.weight if (self._in_feats > self._out_feats): if (weight is not None): feat = th.matmul(feat, weight) feat = (feat * norm) graph.srcdata['h'] = feat graph.update_all(fn.copy_src(src='h', out='m'), fn.sum(msg='m', out='h')) rst = graph.dstdata['h'] else: graph.srcdata['h'] = feat graph.update_all(fn.copy_src(src='h', out='m'), fn.sum(msg='m', out='h')) rst = graph.dstdata['h'] if (weight is not None): rst = th.matmul(rst, weight) if (self._norm != 'none'): degs = graph.in_degrees().to(feat.device).float().clamp(min=1) if (self._norm == 'both'): norm = th.pow(degs, (- 0.5)) else: norm = (1.0 / degs) shp = (norm.shape + ((1,) * (feat.dim() - 1))) norm = th.reshape(norm, shp) rst = (rst * norm) if (self._activation is not None): rst = self._activation(rst) return rst
def render_pep440(pieces: Dict[(str, Any)]) -> str: if pieces['closest-tag']: rendered = pieces['closest-tag'] if (pieces['distance'] or pieces['dirty']): rendered += plus_or_dot(pieces) rendered += ('%d.g%s' % (pieces['distance'], pieces['short'])) if pieces['dirty']: rendered += '.dirty' else: rendered = ('0+untagged.%d.g%s' % (pieces['distance'], pieces['short'])) if pieces['dirty']: rendered += '.dirty' return rendered
def test_regularization(): X = rnd.randn(10, 2) y = np.hstack(((- np.ones((5,))), np.ones((5,)))) Z = (rnd.randn(10, 2) + 1) clf = ImportanceWeightedClassifier(loss_function='lr', l2_regularization=None) assert isinstance(clf.clf, LogisticRegressionCV) clf = ImportanceWeightedClassifier(loss_function='lr', l2_regularization=1.0) assert isinstance(clf.clf, LogisticRegression)
def eval_anomaly_detection_coldstart(model, all_train_data, all_train_labels, all_train_timestamps, all_test_data, all_test_labels, all_test_timestamps, delay): t = time.time() all_data = {} all_repr = {} all_repr_wom = {} for k in all_train_data: all_data[k] = np.concatenate([all_train_data[k], all_test_data[k]]) all_repr[k] = model.encode(all_data[k].reshape(1, (- 1), 1), mask='mask_last', casual=True, sliding_length=1, sliding_padding=200, batch_size=256).squeeze() all_repr_wom[k] = model.encode(all_data[k].reshape(1, (- 1), 1), casual=True, sliding_length=1, sliding_padding=200, batch_size=256).squeeze() res_log = [] labels_log = [] timestamps_log = [] for k in all_data: data = all_data[k] labels = np.concatenate([all_train_labels[k], all_test_labels[k]]) timestamps = np.concatenate([all_train_timestamps[k], all_test_timestamps[k]]) err = np.abs((all_repr_wom[k] - all_repr[k])).sum(axis=1) ma = np_shift(bn.move_mean(err, 21), 1) err_adj = ((err - ma) / ma) MIN_WINDOW = (len(data) // 10) thr = (bn.move_mean(err_adj, len(err_adj), MIN_WINDOW) + (4 * bn.move_std(err_adj, len(err_adj), MIN_WINDOW))) res = ((err_adj > thr) * 1) for i in range(len(res)): if ((i >= delay) and (res[(i - delay):i].sum() >= 1)): res[i] = 0 res_log.append(res[MIN_WINDOW:]) labels_log.append(labels[MIN_WINDOW:]) timestamps_log.append(timestamps[MIN_WINDOW:]) t = (time.time() - t) eval_res = eval_ad_result(res_log, labels_log, timestamps_log, delay) eval_res['infer_time'] = t return (res_log, eval_res)
class FinallyNonlocalControl(CleanupNonlocalControl): def __init__(self, outer: NonlocalControl, saved: Value) -> None: super().__init__(outer) self.saved = saved def gen_cleanup(self, builder: IRBuilder, line: int) -> None: (target, cleanup) = (BasicBlock(), BasicBlock()) builder.add(Branch(self.saved, target, cleanup, Branch.IS_ERROR)) builder.activate_block(cleanup) builder.call_c(restore_exc_info_op, [self.saved], line) builder.goto_and_activate(target)
class Action(): format_string: str = '' def __init__(self, *values: '_Operand') -> None: self.values = values def __eq__(self, other: Any) -> bool: return ((type(self) is type(other)) and (len(self.values) == len(other.values)) and all((((type(v1) is type(v2)) and v1._equals_to(v2)) for (v1, v2) in zip(self.values, other.values)))) def serialize(self, placeholder_names: Dict[(str, str)], expression_attribute_values: Dict[(str, str)]) -> str: values = [value.serialize(placeholder_names, expression_attribute_values) for value in self.values] return self.format_string.format(*values) def __repr__(self) -> str: values = [str(value) for value in self.values] return self.format_string.format(*values)
class DylanConsoleLexer(Lexer): name = 'Dylan session' aliases = ['dylan-console', 'dylan-repl'] filenames = ['*.dylan-console'] mimetypes = ['text/x-dylan-console'] url = ' version_added = '1.6' _example = 'dylan-console/console' _prompt_re = re.compile('\\?| ') def get_tokens_unprocessed(self, text): dylexer = DylanLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() m = self._prompt_re.match(line) if (m is not None): end = m.end() insertions.append((len(curcode), [(0, Generic.Prompt, line[:end])])) curcode += line[end:] else: if curcode: (yield from do_insertions(insertions, dylexer.get_tokens_unprocessed(curcode))) curcode = '' insertions = [] (yield (match.start(), Generic.Output, line)) if curcode: (yield from do_insertions(insertions, dylexer.get_tokens_unprocessed(curcode)))
class ExportMutatedModule(ContextMenuSingle): def __init__(self): self.mainFrame = gui.mainFrame.MainFrame.getInstance() def display(self, callingWindow, srcContext, mainItem): if (srcContext != 'fittingModule'): return False if (self.mainFrame.getActiveFit() is None): return False if (mainItem is None): return False if (not mainItem.isMutated): return False return True def getText(self, callingWindow, itmContext, mainItem): return _t('Copy Module to Clipboard') def activate(self, callingWindow, fullContext, mainItem, i): export = renderMutant(mainItem, prefix=' ') toClipboard(export)
_against_invalid_ecpoint def CKD_priv(parent_privkey: bytes, parent_chaincode: bytes, child_index: int) -> Tuple[(bytes, bytes)]: if (child_index < 0): raise ValueError('the bip32 index needs to be non-negative') is_hardened_child = bool((child_index & BIP32_PRIME)) return _CKD_priv(parent_privkey=parent_privkey, parent_chaincode=parent_chaincode, child_index=bfh(rev_hex(int_to_hex(child_index, 4))), is_hardened_child=is_hardened_child)
class EditorPidWatcher(QObject): appeared = pyqtSignal() def __init__(self, directory, parent=None): super().__init__(parent) self._pidfile = (directory / 'editor_pid') self._watcher = QFileSystemWatcher(self) self._watcher.addPath(str(directory)) self._watcher.directoryChanged.connect(self._check_update) self.has_pidfile = False self._check_update() () def _check_update(self): if self.has_pidfile: return if self._pidfile.check(): if self._pidfile.read(): self.has_pidfile = True self.appeared.emit() else: self._watcher.addPath(str(self._pidfile)) def manual_check(self): return self._pidfile.check()
def get_measured_qubits(transpiled_circuits): qubit_index = None qubit_mappings = {} for (idx, qc) in enumerate(transpiled_circuits): measured_qubits = [] for (inst, qargs, _) in qc.data: if (inst.name != 'measure'): continue measured_qubits.append(qargs[0][1]) measured_qubits_str = '_'.join([str(x) for x in measured_qubits]) if (measured_qubits_str not in qubit_mappings): qubit_mappings[measured_qubits_str] = [] qubit_mappings[measured_qubits_str].append(idx) if (qubit_index is None): qubit_index = measured_qubits elif (set(qubit_index) != set(measured_qubits)): raise AquaError('The used qubit index are different. ({}) vs ({}).\nCurrently, we only support all circuits using the same set of qubits regardless qubit order.'.format(qubit_index, measured_qubits)) return (sorted(qubit_index), qubit_mappings)
class Effect4558(BaseEffect): type = 'passive' def handler(fit, skill, context, projectionRange, **kwargs): fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('XL Cruise Missiles')), 'thermalDamage', (skill.getModifiedItemAttr('damageMultiplierBonus') * skill.level), **kwargs)
def getQDarkStyleDarkQPalette(): BG_DARK = QtGui.QColor('#19232D') BG_NORMAL = QtGui.QColor('#37414F') BG_LIGHT = QtGui.QColor('#455364') FG_DARK = QtGui.QColor('#9DA9B5') FG_NORMAL = QtGui.QColor('#E0E1E3') FG_LIGHT = QtGui.QColor('#F0F0F0') SEL_DARK = QtGui.QColor('#1A72BB') SEL_NORMAL = QtGui.QColor('#26486B') SEL_LIGHT = QtGui.QColor('#346792') qpal = QtGui.QPalette(QtGui.QColor(BG_DARK)) for ptype in (QtGui.QPalette.Active, QtGui.QPalette.Inactive): qpal.setColor(ptype, QtGui.QPalette.Base, BG_DARK) qpal.setColor(ptype, QtGui.QPalette.Window, BG_DARK) qpal.setColor(ptype, QtGui.QPalette.WindowText, FG_NORMAL) qpal.setColor(ptype, QtGui.QPalette.AlternateBase, BG_LIGHT) qpal.setColor(ptype, QtGui.QPalette.Button, BG_LIGHT) qpal.setColor(ptype, QtGui.QPalette.ButtonText, FG_LIGHT) qpal.setColor(ptype, QtGui.QPalette.Highlight, SEL_LIGHT) qpal.setColor(ptype, QtGui.QPalette.HighlightedText, FG_LIGHT) qpal.setColor(ptype, QtGui.QPalette.Text, FG_LIGHT) qpal.setColor(ptype, QtGui.QPalette.ToolTipBase, BG_LIGHT) qpal.setColor(ptype, QtGui.QPalette.ToolTipText, FG_LIGHT) qpal.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.Base, BG_NORMAL) qpal.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.Button, BG_NORMAL) qpal.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, FG_DARK) qpal.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, FG_DARK) qpal.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.Text, FG_DARK) qpal.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, BG_LIGHT) qpal.setColor(QtGui.QPalette.Disabled, QtGui.QPalette.HighlightedText, FG_DARK) return qpal
def test_mount_blob_into_repository(registry_model): repository_ref = registry_model.lookup_repository('devtable', 'simple') latest_tag = registry_model.get_repo_tag(repository_ref, 'latest') manifest = registry_model.get_manifest_for_tag(latest_tag) target_repository_ref = registry_model.lookup_repository('devtable', 'complex') blobs = registry_model.get_manifest_local_blobs(manifest, storage, include_placements=True) assert blobs for blob in blobs: assert (not registry_model.get_repo_blob_by_digest(target_repository_ref, blob.digest)) assert registry_model.mount_blob_into_repository(blob, target_repository_ref, 60) found = registry_model.get_repo_blob_by_digest(target_repository_ref, blob.digest) assert (found == blob)
class TestTfModuleReducer(unittest.TestCase): .tf1 def test_reducing_tf_slim_model(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] x = tf.compat.v1.placeholder(tf.float32, [1, 32, 32, 3]) _ = tf_slim_basic_model(x) init = tf.compat.v1.global_variables_initializer() sess.run(init) update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) self.assertEqual(4, len(update_ops)) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('Conv_1/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('Conv_2/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('Conv_3/Conv2D') input_channels_to_winnow = [2, 4, 6] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['Placeholder'] output_op_names = ['tf_slim_model/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) conv3_relu = new_sess.graph.get_operation_by_name('Conv_3/Relu') self.assertEqual(conv3_relu.inputs[0].op.name, 'reduced_Conv_3/BiasAdd') reduced_conv2_op = new_sess.graph.get_operation_by_name('reduced_Conv_2/Conv2D') self.assertEqual(reduced_conv2_op.inputs[0].name, 'reduced_BatchNorm_1/cond/Merge:0') self.assertEqual(reduced_conv2_op.inputs[0].shape.as_list()[(- 1)], 13) reduced_conv1_op = new_sess.graph.get_operation_by_name('reduced_Conv_1/Conv2D') self.assertEqual(reduced_conv1_op.inputs[0].name, 'reduced_BatchNorm/FusedBatchNormV3:0') reduced_batch_norm = new_sess.graph.get_operation_by_name('reduced_BatchNorm/FusedBatchNormV3') self.assertTrue(reduced_batch_norm.inputs[0].op.name, 'reduced_Conv/BiasAdd') self.assertEqual(reduced_batch_norm.get_attr('is_training'), True) reduced_batch_norm_1 = new_sess.graph.get_operation_by_name('reduced_BatchNorm_1/cond/FusedBatchNormV3') is_training_placeholder = new_sess.graph.get_tensor_by_name('is_training:0') self.assertEqual(reduced_batch_norm_1.inputs[0].op.type, 'Switch') self.assertEqual(reduced_batch_norm_1.inputs[0].op.inputs[1].op.inputs[0], is_training_placeholder) reduced_batch_norm_2 = new_sess.graph.get_operation_by_name('reduced_BatchNorm_2/FusedBatchNormV3') orig_batch_norm = new_sess.graph.get_operation_by_name('BatchNorm/FusedBatchNormV3') new_batch_norm = new_sess.graph.get_operation_by_name('reduced_BatchNorm/FusedBatchNormV3') self.assertEqual(orig_batch_norm.get_attr('epsilon'), new_batch_norm.get_attr('epsilon')) orig_batch_norm_1 = new_sess.graph.get_operation_by_name('BatchNorm_1/cond/FusedBatchNormV3_1') new_batch_norm_1 = new_sess.graph.get_operation_by_name('reduced_BatchNorm_1/cond/FusedBatchNormV3_1') self.assertEqual(orig_batch_norm_1.get_attr('epsilon'), new_batch_norm_1.get_attr('epsilon')) orig_batch_norm_2 = new_sess.graph.get_operation_by_name('BatchNorm_2/FusedBatchNormV3') new_batch_norm_2 = new_sess.graph.get_operation_by_name('reduced_BatchNorm_2/FusedBatchNormV3') self.assertEqual(orig_batch_norm_2.get_attr('epsilon'), new_batch_norm_2.get_attr('epsilon')) orig_batch_norm_momentum = new_sess.graph.get_operation_by_name('BatchNorm/Const_3') new_batch_norm_momentum = new_sess.graph.get_operation_by_name('reduced_BatchNorm/Const_2') self.assertEqual(orig_batch_norm_momentum.get_attr('value').float_val[0], new_batch_norm_momentum.get_attr('value').float_val[0]) orig_batch_norm_1_momentum = new_sess.graph.get_operation_by_name('BatchNorm_1/cond_1/Const') new_batch_norm_1_momentum = new_sess.graph.get_operation_by_name('reduced_BatchNorm_1/cond_1/Const') self.assertEqual(orig_batch_norm_1_momentum.get_attr('value').float_val[0], new_batch_norm_1_momentum.get_attr('value').float_val[0]) self.assertTrue(reduced_batch_norm_2.inputs[0].op.name, 'reduced_Conv_2/Relu') self.assertEqual(reduced_batch_norm_2.get_attr('is_training'), False) self.assertEqual(10, len(ordered_modules_list)) update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) self.assertEqual(0, len(update_ops)) with new_sess.graph.as_default(): init = tf.compat.v1.global_variables_initializer() new_sess.run(init) new_sess_2 = save_and_load_graph('.', new_sess) with new_sess_2.graph.as_default(): update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS) self.assertEqual(0, len(update_ops)) new_sess_2.close() new_sess.close() sess.close() .tf1 def test_reducing_with_downsample(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = single_residual() init = tf.compat.v1.global_variables_initializer() sess.run(init) input_op_names = ['input_1'] output_op_names = ['Relu_2'] tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_2/Conv2D') input_channels_to_winnow_2 = [7, 12, 13, 14] module_mask_pair = (tf_op, input_channels_to_winnow_2) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) with new_sess.graph.as_default(): reduced_conv2d_1_input = new_sess.graph.get_operation_by_name('reduced_conv2d_1/Conv2D').inputs[0] reduced_conv2d_2_input = new_sess.graph.get_operation_by_name('reduced_conv2d_2/Conv2D').inputs[0] reduced_relu_output = new_sess.graph.get_tensor_by_name('reduced_Relu:0') self.assertTrue(('GatherV2' in reduced_conv2d_1_input.name)) self.assertTrue(('GatherV2' in reduced_conv2d_2_input.name)) self.assertEqual(reduced_conv2d_1_input.shape.as_list()[(- 1)], 13) self.assertEqual(reduced_conv2d_2_input.shape.as_list()[(- 1)], 12) self.assertEqual(reduced_relu_output.shape.as_list()[(- 1)], 15) self.assertEqual(6, len(ordered_modules_list)) new_sess.close() sess.close() .tf2 def test_reducing_with_downsample_for_tf2(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = single_residual_for_tf2() init = tf.compat.v1.global_variables_initializer() sess.run(init) input_op_names = ['input_1'] output_op_names = ['Relu_2'] tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_2/Conv2D') input_channels_to_winnow_2 = [7, 12, 13, 14] module_mask_pair = (tf_op, input_channels_to_winnow_2) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) with new_sess.graph.as_default(): reduced_conv2d_1_input = new_sess.graph.get_operation_by_name('reduced_conv2d_1/Conv2D').inputs[0] reduced_conv2d_2_input = new_sess.graph.get_operation_by_name('reduced_conv2d_2/Conv2D').inputs[0] reduced_relu_output = new_sess.graph.get_tensor_by_name('reduced_Relu:0') self.assertTrue(('GatherV2' in reduced_conv2d_1_input.name)) self.assertTrue(('GatherV2' in reduced_conv2d_2_input.name)) self.assertEqual(reduced_conv2d_1_input.shape.as_list()[(- 1)], 13) self.assertEqual(reduced_conv2d_2_input.shape.as_list()[(- 1)], 12) self.assertEqual(reduced_relu_output.shape.as_list()[(- 1)], 15) self.assertEqual(6, len(ordered_modules_list)) new_sess.close() sess.close() .tf1 def test_reducing_inserting_downsample_upsample(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() _ = upsample_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) input_op_names = ['input_1'] output_op_names = ['upsample_model/Softmax'] module_zero_channels_list = [] tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) stack_output = new_sess.graph.get_tensor_by_name('upsample/stack:0') reduced_batch_normalization_1_output = new_sess.graph.get_tensor_by_name('reduced_batch_normalization_1/cond/Merge:0') relu_1_output = new_sess.graph.get_tensor_by_name('Relu_1:0') gather_output = new_sess.graph.get_tensor_by_name('downsample/GatherV2:0') self.assertEqual([None, 7, 7, 5], reduced_batch_normalization_1_output.shape.as_list()) self.assertEqual([None, 7, 7, 8], stack_output.shape.as_list()) self.assertEqual([None, 7, 7, 8], relu_1_output.shape.as_list()) self.assertEqual([None, 7, 7, 5], gather_output.shape.as_list()) self.assertEqual(3, len(ordered_modules_list)) module_zero_channels_list = [] tf_op = new_sess.graph.get_operation_by_name('reduced_conv2d_3/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess_2, ordered_modules_list) = winnow.winnow_tf_model(new_sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) relu_1_output = new_sess_2.graph.get_tensor_by_name('Relu_1:0') gather_output = new_sess_2.graph.get_tensor_by_name('downsample_1/GatherV2:0') self.assertEqual([None, 7, 7, 8], relu_1_output.shape.as_list()) self.assertEqual([None, 7, 7, 2], gather_output.shape.as_list()) self.assertEqual(1, len(ordered_modules_list)) new_sess.close() new_sess_2.close() sess.close() .tf2 def test_reducing_inserting_downsample_upsample_for_tf2(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() _ = upsample_model_for_tf2() init = tf.compat.v1.global_variables_initializer() sess.run(init) input_op_names = ['input_1'] output_op_names = ['upsample_model/Softmax'] module_zero_channels_list = [] tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) all_ops = new_sess.graph.get_operations() for op in all_ops: print(op.name) stack_output = new_sess.graph.get_tensor_by_name('upsample/stack:0') reduced_batch_normalization_1_output = new_sess.graph.get_tensor_by_name('reduced_batch_normalization_1/FusedBatchNormV3:0') relu_1_output = new_sess.graph.get_tensor_by_name('Relu_1:0') gather_output = new_sess.graph.get_tensor_by_name('downsample/GatherV2:0') self.assertEqual([None, 7, 7, 5], reduced_batch_normalization_1_output.shape.as_list()) self.assertEqual([None, 7, 7, 8], stack_output.shape.as_list()) self.assertEqual([None, 7, 7, 8], relu_1_output.shape.as_list()) self.assertEqual([None, 7, 7, 5], gather_output.shape.as_list()) self.assertEqual(3, len(ordered_modules_list)) module_zero_channels_list = [] tf_op = new_sess.graph.get_operation_by_name('reduced_conv2d_3/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess_2, ordered_modules_list) = winnow.winnow_tf_model(new_sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) relu_1_output = new_sess_2.graph.get_tensor_by_name('Relu_1:0') gather_output = new_sess_2.graph.get_tensor_by_name('downsample_1/GatherV2:0') self.assertEqual([None, 7, 7, 8], relu_1_output.shape.as_list()) self.assertEqual([None, 7, 7, 2], gather_output.shape.as_list()) self.assertEqual(1, len(ordered_modules_list)) new_sess.close() new_sess_2.close() sess.close() def test_reducing_with_concat(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = concat_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) input_op_names = ['input_1'] output_op_names = ['concat_model/Softmax'] tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D') input_channels_to_winnow = [2, 3, 6, 7, 17] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_4/Conv2D') input_channels_to_winnow_1 = [2, 3, 6, 7, 8, 17] module_mask_pair = (tf_op, input_channels_to_winnow_1) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) with new_sess.graph.as_default(): conv2d_3_input = new_sess.graph.get_operation_by_name('reduced_conv2d_3/Conv2D').inputs[0] conv2d_4_input = new_sess.graph.get_operation_by_name('reduced_conv2d_4/Conv2D').inputs[0] concat_output = new_sess.graph.get_tensor_by_name('concatenate/concat:0') concat_conv2d_1_input = new_sess.graph.get_operation_by_name('concatenate/concat').inputs[0] concat_conv2d_input = new_sess.graph.get_operation_by_name('concatenate/concat').inputs[1] concat_conv2d_2_input = new_sess.graph.get_operation_by_name('concatenate/concat').inputs[2] conv2d_1_output = new_sess.graph.get_tensor_by_name('reduced_conv2d_1/BiasAdd:0') conv2d_output = new_sess.graph.get_tensor_by_name('reduced_conv2d/BiasAdd:0') conv2d_2_output = new_sess.graph.get_tensor_by_name('reduced_conv2d_2/BiasAdd:0') self.assertEqual(13, conv2d_3_input.shape.as_list()[(- 1)]) self.assertEqual(12, conv2d_4_input.shape.as_list()[(- 1)]) self.assertEqual(18, concat_output.shape.as_list()[(- 1)]) self.assertEqual(6, concat_conv2d_1_input.shape.as_list()[(- 1)]) self.assertEqual(5, concat_conv2d_input.shape.as_list()[(- 1)]) self.assertEqual(7, concat_conv2d_2_input.shape.as_list()[(- 1)]) self.assertEqual(4, conv2d_1_output.shape.as_list()[(- 1)]) self.assertEqual(3, conv2d_output.shape.as_list()[(- 1)]) self.assertEqual(6, conv2d_2_output.shape.as_list()[(- 1)]) self.assertEqual(5, len(ordered_modules_list)) new_conn_graph = ConnectedGraph(new_sess.graph, input_op_names, output_op_names) self.assertEqual(27, len(new_conn_graph.get_all_ops().keys())) self.assertTrue((new_conn_graph.get_op_from_module_name('conv2d_3/Conv2D') is None)) self.assertTrue((new_conn_graph.get_op_from_module_name('conv2d_4/Conv2D') is None)) new_sess.close() sess.close() def test_reducing_pad_in_module_reducer(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = pad_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [1, 2, 3] input_op_names = ['input_1'] output_op_names = ['pad_model/Softmax'] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) self.assertEqual(3, len(ordered_modules_list)) new_sess.close() sess.close() tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = pad_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_2/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) with self.assertRaises(NotImplementedError): (_, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) sess.close() tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = pad_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_3/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_padv2 = new_sess.graph.get_operation_by_name('reduced_PadV2') self.assertTrue((len(reduced_padv2.inputs) > 1)) orig_const_val = sess.graph.get_operation_by_name('PadV2').inputs[2].eval(session=sess) new_const_val = new_sess.graph.get_operation_by_name('reduced_PadV2').inputs[2].eval(session=new_sess) self.assertEqual(orig_const_val, new_const_val) self.assertEqual(3, len(ordered_modules_list)) new_sess.close() sess.close() tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = pad_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_4/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) old_mode = sess.graph.get_operation_by_name('MirrorPad').get_attr('mode') new_mode = sess.graph.get_operation_by_name('reduced_MirrorPad').get_attr('mode') self.assertEqual(old_mode, new_mode) self.assertEqual(3, len(ordered_modules_list)) new_sess.close() sess.close() ('For some reason, with TF 1.15, regularization does not show up in the convolution op') def test_reducing_conv_with_l2_loss(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = keras_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [1, 2, 3] input_op_names = ['conv2d_input'] output_op_names = ['keras_model/Softmax'] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess, _) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) scale_op = new_sess.graph.get_operation_by_name('reduced_conv2d_1/kernel/Regularizer/l2_regularizer/scale') self.assertEqual(0.5, scale_op.get_attr('value').float_val[0]) new_sess.close() sess.close() def test_reducing_depthwise_conv2d(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = depthwise_conv2d_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('separable_conv2d/separable_conv2d') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [0, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input_1'] output_op_names = ['depthwise_conv2d_model/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_depthwise = new_sess.graph.get_operation_by_name('reduced_depthwise_conv2d/depthwise') reduced_separable_depthwise = new_sess.graph.get_operation_by_name('reduced_separable_conv2d/separable_conv2d/depthwise') self.assertEqual(7, reduced_depthwise.outputs[0].shape.as_list()[(- 1)]) self.assertEqual(7, reduced_depthwise.inputs[0].shape.as_list()[(- 1)]) self.assertEqual(13, reduced_separable_depthwise.outputs[0].shape.as_list()[(- 1)]) self.assertEqual(13, reduced_separable_depthwise.inputs[0].shape.as_list()[(- 1)]) self.assertEqual(5, len(ordered_modules_list)) new_sess.close() sess.close() .tf1 def test_reducing_with_dropout_and_identity_keras(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = dropout_keras_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) input_op_names = ['input_1'] output_op_names = ['dropout_keras_model/Softmax'] tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [2, 3, 4] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_identity = new_sess.graph.get_tensor_by_name('reduced_Identity:0') self.assertEqual(13, reduced_identity.shape.as_list()[(- 1)]) self.assertEqual(reduced_identity.op.inputs[0].name, 'reduced_dropout/cond/Merge:0') old_dropout_greater_equal_op = new_sess.graph.get_operation_by_name('dropout/cond/dropout/GreaterEqual') reduced_dropout_greater_equal_op = new_sess.graph.get_operation_by_name('reduced_dropout/cond/dropout/GreaterEqual') old_rate = old_dropout_greater_equal_op.inputs[1].op.get_attr('value').float_val[0] rate = reduced_dropout_greater_equal_op.inputs[1].op.get_attr('value').float_val[0] self.assertTrue(np.allclose(old_rate, rate)) self.assertEqual(4, len(ordered_modules_list)) new_sess.close() sess.close() .tf1 def test_reducing_with_dropout_and_identity_slim(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = dropout_slim_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) input_op_names = ['input_1'] output_op_names = ['dropout_slim_model/Softmax'] tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('Conv_1/Conv2D') input_channels_to_winnow = [2, 3, 4] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_identity = new_sess.graph.get_tensor_by_name('reduced_Identity:0') self.assertEqual(13, reduced_identity.shape.as_list()[(- 1)]) self.assertEqual(reduced_identity.op.inputs[0].name, 'reduced_Dropout/dropout/mul_1:0') old_dropout_greater_equal_op = new_sess.graph.get_operation_by_name('Dropout/dropout_1/GreaterEqual') reduced_dropout_greater_equal_op = new_sess.graph.get_operation_by_name('reduced_Dropout/dropout/GreaterEqual') old_rate = old_dropout_greater_equal_op.inputs[1].op.get_attr('value').float_val[0] rate = reduced_dropout_greater_equal_op.inputs[1].op.get_attr('value').float_val[0] self.assertTrue(np.allclose(old_rate, rate)) self.assertEqual(5, len(ordered_modules_list)) new_sess.close() sess.close() .tf1 def test_reducing_keras_fused_bn_training_true_and_false(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = keras_model_functional() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_2/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_1/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_3/Conv2D') input_channels_to_winnow = [2, 4, 6] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input_1'] output_op_names = ['keras_model_functional/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_conv2d_2_tanh_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_2/Tanh') self.assertEqual(reduced_conv2d_2_tanh_op.inputs[0].op.name, 'reduced_scope_1/conv2d_2/BiasAdd') reduced_conv2d_2_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_2/Conv2D') self.assertEqual(reduced_conv2d_2_op.inputs[0].name, 'reduced_scope_1/batch_normalization_1/cond/Merge:0') self.assertEqual(reduced_conv2d_2_op.inputs[0].shape.as_list()[(- 1)], 13) reduced_conv2d_1_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_1/Conv2D') self.assertEqual(reduced_conv2d_1_op.inputs[0].name, 'reduced_batch_normalization/FusedBatchNormV3:0') reduced_batch_norm = new_sess.graph.get_operation_by_name('reduced_batch_normalization/FusedBatchNormV3') self.assertTrue(reduced_batch_norm.inputs[0].op.name, 'reduced_conv2d/BiasAdd') self.assertEqual(reduced_batch_norm.get_attr('is_training'), True) reduced_batch_norm_1 = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_1/cond/FusedBatchNormV3') is_training_placeholder = new_sess.graph.get_tensor_by_name('is_training:0') self.assertEqual(reduced_batch_norm_1.inputs[0].op.type, 'Switch') self.assertEqual(reduced_batch_norm_1.inputs[0].op.inputs[1].op.inputs[0], is_training_placeholder) reduced_batch_norm_2 = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_2/FusedBatchNormV3') self.assertTrue(reduced_batch_norm_2.inputs[0].op.name, 'reduced_scope_1/conv2d_2/Tanh') self.assertEqual(reduced_batch_norm_2.get_attr('is_training'), False) orig_batch_norm = new_sess.graph.get_operation_by_name('batch_normalization/FusedBatchNormV3') new_batch_norm = new_sess.graph.get_operation_by_name('reduced_batch_normalization/FusedBatchNormV3') self.assertEqual(orig_batch_norm.get_attr('epsilon'), new_batch_norm.get_attr('epsilon')) orig_batch_norm_1 = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_1/cond/FusedBatchNormV3_1') new_batch_norm_1 = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_1/cond/FusedBatchNormV3_1') self.assertEqual(orig_batch_norm_1.get_attr('epsilon'), new_batch_norm_1.get_attr('epsilon')) orig_batch_norm_2 = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_2/FusedBatchNormV3') new_batch_norm_2 = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_2/FusedBatchNormV3') self.assertEqual(orig_batch_norm_2.get_attr('epsilon'), new_batch_norm_2.get_attr('epsilon')) orig_batch_norm_momentum = new_sess.graph.get_operation_by_name('batch_normalization/Const_2') new_batch_norm_momentum = new_sess.graph.get_operation_by_name('reduced_batch_normalization/Const_2') self.assertEqual(orig_batch_norm_momentum.get_attr('value').float_val[0], new_batch_norm_momentum.get_attr('value').float_val[0]) orig_batch_norm_1_momentum = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_1/cond_1/Const') new_batch_norm_1_momentum = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_1/cond_1/Const') self.assertEqual(orig_batch_norm_1_momentum.get_attr('value').float_val[0], new_batch_norm_1_momentum.get_attr('value').float_val[0]) self.assertEqual(9, len(ordered_modules_list)) new_sess.close() sess.close() .tf2 def test_reducing_keras_fused_bn_training_true_and_false_for_tf2(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = keras_model_functional_for_tf2() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_2/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_1/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_3/Conv2D') input_channels_to_winnow = [2, 4, 6] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input_1'] output_op_names = ['keras_model_functional/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_conv2d_2_tanh_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_2/Tanh') self.assertEqual(reduced_conv2d_2_tanh_op.inputs[0].op.name, 'reduced_scope_1/conv2d_2/BiasAdd') reduced_conv2d_2_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_2/Conv2D') self.assertEqual(reduced_conv2d_2_op.inputs[0].name, 'reduced_scope_1/batch_normalization_1/FusedBatchNormV3:0') self.assertEqual(reduced_conv2d_2_op.inputs[0].shape.as_list()[(- 1)], 13) reduced_conv2d_1_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_1/Conv2D') self.assertEqual(reduced_conv2d_1_op.inputs[0].name, 'reduced_batch_normalization/FusedBatchNormV3:0') reduced_batch_norm = new_sess.graph.get_operation_by_name('reduced_batch_normalization/FusedBatchNormV3') self.assertTrue(reduced_batch_norm.inputs[0].op.name, 'reduced_conv2d/BiasAdd') self.assertEqual(reduced_batch_norm.get_attr('is_training'), True) reduced_batch_norm_1 = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_1/FusedBatchNormV3') self.assertEqual(reduced_batch_norm_1.get_attr('is_training'), False) reduced_batch_norm_2 = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_2/FusedBatchNormV3') self.assertTrue(reduced_batch_norm_2.inputs[0].op.name, 'reduced_scope_1/conv2d_2/Tanh') self.assertEqual(reduced_batch_norm_2.get_attr('is_training'), False) orig_batch_norm = new_sess.graph.get_operation_by_name('batch_normalization/FusedBatchNormV3') new_batch_norm = new_sess.graph.get_operation_by_name('reduced_batch_normalization/FusedBatchNormV3') self.assertEqual(orig_batch_norm.get_attr('epsilon'), new_batch_norm.get_attr('epsilon')) orig_batch_norm_1 = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_1/FusedBatchNormV3') new_batch_norm_1 = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_1/FusedBatchNormV3') self.assertEqual(orig_batch_norm_1.get_attr('epsilon'), new_batch_norm_1.get_attr('epsilon')) orig_batch_norm_2 = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_2/FusedBatchNormV3') new_batch_norm_2 = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_2/FusedBatchNormV3') self.assertEqual(orig_batch_norm_2.get_attr('epsilon'), new_batch_norm_2.get_attr('epsilon')) orig_batch_norm_momentum = new_sess.graph.get_operation_by_name('batch_normalization/Const') new_batch_norm_momentum = new_sess.graph.get_operation_by_name('reduced_batch_normalization/Const') self.assertEqual(orig_batch_norm_momentum.get_attr('value').float_val[0], new_batch_norm_momentum.get_attr('value').float_val[0]) self.assertEqual(9, len(ordered_modules_list)) new_sess.close() sess.close() .tf1 def test_reducing_keras_non_fused_bn_training_true_and_false(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = keras_model_functional_with_non_fused_batchnorms() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_2/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_1/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_3/Conv2D') input_channels_to_winnow = [2, 4, 6] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input_1'] output_op_names = ['keras_model_functional_with_non_fused_batchnorms/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_conv2d_1_tanh_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_2/Tanh') self.assertEqual(reduced_conv2d_1_tanh_op.inputs[0].op.name, 'reduced_scope_1/conv2d_2/BiasAdd') reduced_conv2d_1_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_2/Conv2D') self.assertEqual(reduced_conv2d_1_op.inputs[0].name, 'reduced_scope_1/batch_normalization_1/batchnorm/add_1:0') self.assertEqual(reduced_conv2d_1_op.inputs[0].shape.as_list()[(- 1)], 13) reduced_conv2d_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_1/Conv2D') self.assertEqual(reduced_conv2d_op.inputs[0].name, 'reduced_batch_normalization/batchnorm/add_1:0') orig_batch_norm_epsilon = new_sess.graph.get_operation_by_name('batch_normalization/batchnorm/add/y') new_batch_norm_epsilon = new_sess.graph.get_operation_by_name('reduced_batch_normalization/batchnorm/add/y') self.assertEqual(orig_batch_norm_epsilon.get_attr('value').float_val[0], new_batch_norm_epsilon.get_attr('value').float_val[0]) orig_batch_norm_1_epsilon = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_1/batchnorm/add/y') new_batch_norm_1_epsilon = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_1/batchnorm/add/y') self.assertEqual(orig_batch_norm_1_epsilon.get_attr('value').float_val[0], new_batch_norm_1_epsilon.get_attr('value').float_val[0]) orig_batch_norm_2_epsilon = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_2/batchnorm/add/y') new_batch_norm_2_epsilon = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_2/batchnorm/add/y') self.assertEqual(orig_batch_norm_2_epsilon.get_attr('value').float_val[0], new_batch_norm_2_epsilon.get_attr('value').float_val[0]) orig_batch_norm_momentum = new_sess.graph.get_operation_by_name('batch_normalization/AssignMovingAvg_1/decay') new_batch_norm_momentum = new_sess.graph.get_operation_by_name('reduced_batch_normalization/AssignMovingAvg_1/decay') self.assertEqual(orig_batch_norm_momentum.get_attr('value').float_val[0], new_batch_norm_momentum.get_attr('value').float_val[0]) orig_batch_norm_1_momentum = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_1/cond_3/AssignMovingAvg/decay') new_batch_norm_1_momentum = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_1/cond_3/AssignMovingAvg/decay') self.assertEqual(orig_batch_norm_1_momentum.get_attr('value').float_val[0], new_batch_norm_1_momentum.get_attr('value').float_val[0]) self.assertEqual(9, len(ordered_modules_list)) new_sess.close() sess.close() .tf2 def test_reducing_keras_non_fused_bn_training_true_and_false_for_tf2(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = keras_model_functional_with_non_fused_batchnorms_for_tf2() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_2/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_1/Conv2D') input_channels_to_winnow = [3, 5, 7] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('scope_1/conv2d_3/Conv2D') input_channels_to_winnow = [2, 4, 6] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input_1'] output_op_names = ['keras_model_functional_with_non_fused_batchnorms/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_conv2d_1_tanh_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_2/Tanh') self.assertEqual(reduced_conv2d_1_tanh_op.inputs[0].op.name, 'reduced_scope_1/conv2d_2/BiasAdd') reduced_conv2d_1_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_2/Conv2D') self.assertEqual(reduced_conv2d_1_op.inputs[0].name, 'reduced_scope_1/batch_normalization_1/batchnorm/add_1:0') self.assertEqual(reduced_conv2d_1_op.inputs[0].shape.as_list()[(- 1)], 13) reduced_conv2d_op = new_sess.graph.get_operation_by_name('reduced_scope_1/conv2d_1/Conv2D') self.assertEqual(reduced_conv2d_op.inputs[0].name, 'reduced_batch_normalization/batchnorm/add_1:0') orig_batch_norm_epsilon = new_sess.graph.get_operation_by_name('batch_normalization/batchnorm/add/y') new_batch_norm_epsilon = new_sess.graph.get_operation_by_name('reduced_batch_normalization/batchnorm/add/y') self.assertEqual(orig_batch_norm_epsilon.get_attr('value').float_val[0], new_batch_norm_epsilon.get_attr('value').float_val[0]) orig_batch_norm_1_epsilon = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_1/batchnorm/add/y') new_batch_norm_1_epsilon = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_1/batchnorm/add/y') self.assertEqual(orig_batch_norm_1_epsilon.get_attr('value').float_val[0], new_batch_norm_1_epsilon.get_attr('value').float_val[0]) orig_batch_norm_2_epsilon = new_sess.graph.get_operation_by_name('scope_1/batch_normalization_2/batchnorm/add/y') new_batch_norm_2_epsilon = new_sess.graph.get_operation_by_name('reduced_scope_1/batch_normalization_2/batchnorm/add/y') self.assertEqual(orig_batch_norm_2_epsilon.get_attr('value').float_val[0], new_batch_norm_2_epsilon.get_attr('value').float_val[0]) orig_batch_norm_momentum = new_sess.graph.get_operation_by_name('batch_normalization/AssignMovingAvg_1/decay') new_batch_norm_momentum = new_sess.graph.get_operation_by_name('reduced_batch_normalization/AssignMovingAvg_1/decay') self.assertEqual(orig_batch_norm_momentum.get_attr('value').float_val[0], new_batch_norm_momentum.get_attr('value').float_val[0]) self.assertEqual(9, len(ordered_modules_list)) new_sess.close() sess.close() def test_reducing_multiple_input_model(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = multiple_input_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input1', 'input2'] output_op_names = ['multiple_input_model/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) reduced_conv2d_output = new_sess.graph.get_tensor_by_name('reduced_conv1a/BiasAdd:0') self.assertEqual(5, reduced_conv2d_output.shape.as_list()[(- 1)]) reduced_conv2d_1_output = new_sess.graph.get_tensor_by_name('reduced_conv1b/BiasAdd:0') self.assertEqual(5, reduced_conv2d_1_output.shape.as_list()[(- 1)]) self.assertEqual(4, len(ordered_modules_list)) new_sess.close() sess.close() def test_reducing_minimum_maximum_ops(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = minimum_maximum_model() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input_1'] output_op_names = ['minimum_maximum_model/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) old_minimum_op = sess.graph.get_operation_by_name('Minimum') old_minimum_rate = old_minimum_op.inputs[1].op.get_attr('value').float_val[0] reduced_minimum_tensor = new_sess.graph.get_tensor_by_name('reduced_Minimum:0') new_minimum_rate = reduced_minimum_tensor.op.inputs[1].op.get_attr('value').float_val[0] self.assertEqual(29, reduced_minimum_tensor.shape.as_list()[(- 1)]) self.assertEqual(old_minimum_rate, new_minimum_rate) old_maximum_op = sess.graph.get_operation_by_name('Maximum') old_maximum_rate = old_maximum_op.inputs[1].op.get_attr('value').float_val[0] reduced_maximum_tensor = new_sess.graph.get_tensor_by_name('reduced_Maximum:0') new_maximum_rate = reduced_maximum_tensor.op.inputs[1].op.get_attr('value').float_val[0] self.assertEqual(29, reduced_maximum_tensor.shape.as_list()[(- 1)]) self.assertEqual(old_maximum_rate, new_maximum_rate) self.assertEqual(5, len(ordered_modules_list)) new_sess.close() sess.close() def test_reducing_upsample(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = model_with_upsample_already_present() init = tf.compat.v1.global_variables_initializer() sess.run(init) tf_op = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (tf_op, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input_1'] output_op_names = ['model_with_upsample_already_present/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) stack_tensor = new_sess.graph.get_tensor_by_name('upsample/stack:0') downsample_tensor = new_sess.graph.get_tensor_by_name('downsample/GatherV2:0') downsample_indices = downsample_tensor.op.inputs[1].op.get_attr('value').tensor_content downsample_indices = struct.unpack('9i', downsample_indices) conv2d_tensor = new_sess.graph.get_tensor_by_name('conv2d/Conv2D:0') self.assertEqual((0, 4, 5, 6, 7, 8, 9, 10, 11), downsample_indices) self.assertEqual(12, stack_tensor.shape.as_list()[(- 1)]) self.assertEqual(9, downsample_tensor.shape.as_list()[(- 1)]) self.assertEqual(8, conv2d_tensor.shape.as_list()[(- 1)]) self.assertEqual(2, len(ordered_modules_list)) new_sess.close() sess.close() def test_reducing_downsample(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = model_with_multiple_downsamples() init = tf.compat.v1.global_variables_initializer() sess.run(init) conv2d = tf.compat.v1.get_default_graph().get_operation_by_name('downsample/conv2d/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (conv2d, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) conv2d_1 = tf.compat.v1.get_default_graph().get_operation_by_name('downsample_1/conv2d_1/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (conv2d_1, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) conv2d_2 = tf.compat.v1.get_default_graph().get_operation_by_name('downsample_1/conv2d_2/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (conv2d_2, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) input_op_names = ['input_1'] output_op_names = ['multiple_downsamples/Softmax'] (new_sess, _) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) sess.close() new_sess.close() self.assertEqual(0, 0) .tf1 def test_reducing_upsample2d(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = model_with_upsample2d() init = tf.compat.v1.global_variables_initializer() sess.run(init) conv2d = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (conv2d, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) const_op = sess.graph.get_operation_by_name('up_sampling2d/Const') tensor_content_length = const_op.get_attr('value').tensor_shape.dim[0].size unpack_string = (str(tensor_content_length) + 'i') orig_upsample_size = struct.unpack(unpack_string, const_op.get_attr('value').tensor_content) input_op_names = ['input_1'] output_op_names = ['model_with_upsample2d/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) self.assertEqual(3, len(ordered_modules_list)) const_op = new_sess.graph.get_operation_by_name('reduced_up_sampling2d/Const') tensor_content_length = const_op.get_attr('value').tensor_shape.dim[0].size unpack_string = (str(tensor_content_length) + 'i') reduced_upsample_size = struct.unpack(unpack_string, const_op.get_attr('value').tensor_content) self.assertEqual(orig_upsample_size, reduced_upsample_size) sess.close() new_sess.close() def test_reducing_leakyrelu(self): tf.compat.v1.reset_default_graph() sess = tf.compat.v1.Session() module_zero_channels_list = [] _ = model_with_leaky_relu() init = tf.compat.v1.global_variables_initializer() sess.run(init) conv2d = tf.compat.v1.get_default_graph().get_operation_by_name('conv2d_1/Conv2D') input_channels_to_winnow = [1, 2, 3] module_mask_pair = (conv2d, input_channels_to_winnow) module_zero_channels_list.append(module_mask_pair) orig_alpha = sess.graph.get_operation_by_name('LeakyRelu').get_attr('alpha') input_op_names = ['input_1'] output_op_names = ['model_with_leaky_relu/Softmax'] (new_sess, ordered_modules_list) = winnow.winnow_tf_model(sess, input_op_names, output_op_names, module_zero_channels_list, reshape=True, in_place=True, verbose=True) self.assertEqual(3, len(ordered_modules_list)) reduced_alpha = new_sess.graph.get_operation_by_name('reduced_LeakyRelu').get_attr('alpha') self.assertEqual(orig_alpha, reduced_alpha) sess.close() new_sess.close()
def process_examples(examples): progress = tqdm(range(len(examples['id'])), desc='Processing Samples') idxs = [] image_paths = [] for index in progress: image_path = examples['image_path'][index] idxs.append(examples['id'][index]) image_paths.append(image_path) return (idxs, image_paths)
.parametrize('username,password', users) .parametrize('export_format', export_formats) def test_export(db, client, username, password, export_format): client.login(username=username, password=password) url = ((reverse(urlnames['export']) + export_format) + '/') response = client.get(url) assert (response.status_code == status_map['list'][username]), response.content if ((response.status_code == 200) and (export_format == 'xml')): root = et.fromstring(response.content) assert (root.tag == 'rdmo') for child in root: assert (child.tag in ['option'])
def parameters(): params = TrackerParams() params.debug = 0 params.visualization = False params.use_gpu = True params.image_sample_size = (18 * 16) params.search_area_scale = 5 params.sample_memory_size = 50 params.learning_rate = 0.01 params.init_samples_minimum_weight = 0.25 params.train_skipping = 20 params.update_classifier = True params.net_opt_iter = 10 params.net_opt_update_iter = 2 params.net_opt_hn_iter = 1 params.window_output = False params.use_augmentation = True params.augmentation = {'fliplr': True, 'rotate': [10, (- 10), 45, (- 45)], 'blur': [(3, 1), (1, 3), (2, 2)], 'relativeshift': [(0.6, 0.6), ((- 0.6), 0.6), (0.6, (- 0.6)), ((- 0.6), (- 0.6))], 'dropout': (2, 0.2)} params.augmentation_expansion_factor = 2 params.random_shift_factor = (1 / 3) params.advanced_localization = True params.target_not_found_threshold = 0.25 params.distractor_threshold = 0.8 params.hard_negative_threshold = 0.5 params.target_neighborhood_scale = 2.2 params.dispalcement_scale = 0.8 params.hard_negative_learning_rate = 0.02 params.update_scale_when_uncertain = True params.iounet_augmentation = False params.iounet_use_log_scale = True params.iounet_k = 3 params.num_init_random_boxes = 9 params.box_jitter_pos = 0.1 params.box_jitter_sz = 0.5 params.maximal_aspect_ratio = 6 params.box_refinement_iter = 5 params.box_refinement_step_length = 1 params.box_refinement_step_decay = 1 params.merge_type = 'conv' params.net = NetWithBackbone(net_path='DeT_DiMP50_Mean.pth', use_gpu=params.use_gpu) params.vot_anno_conversion_type = 'preserve_area' return params
class TestAHIHSDFileHandler(): def test_bad_calibration(self): with pytest.raises(ValueError, match='Invalid calibration mode: BAD_MODE. Choose one of (.*)'): with _fake_hsd_handler(fh_kwargs={'calib_mode': 'BAD_MODE'}): pass .parametrize(('round_actual_position', 'expected_result'), [(False, (140., 0., .)), (True, (140.657, 0.0, .0))]) def test_actual_satellite_position(self, round_actual_position, expected_result): with _fake_hsd_handler(fh_kwargs={'round_actual_position': round_actual_position}) as fh: ds_id = make_dataid(name='B01', resolution=1000) ds_info = {'units': '%', 'standard_name': 'some_name', 'wavelength': (0.1, 0.2, 0.3), 'resolution': 1000} metadata = fh._get_metadata(ds_id, ds_info) orb_params = metadata['orbital_parameters'] assert (orb_params['satellite_actual_longitude'] == expected_result[0]) assert (orb_params['satellite_actual_latitude'] == expected_result[1]) assert (orb_params['satellite_actual_altitude'] == expected_result[2]) ('satpy.readers.ahi_hsd.AHIHSDFileHandler._check_fpos') def test_read_header(self, *mocks): with _fake_hsd_handler() as fh: fh._read_header(mock.MagicMock()) ('satpy.readers.ahi_hsd.AHIHSDFileHandler._read_data') ('satpy.readers.ahi_hsd.AHIHSDFileHandler._mask_invalid') ('satpy.readers.ahi_hsd.AHIHSDFileHandler.calibrate') def test_read_band(self, calibrate, *mocks): nrows = 25 ncols = 100 calibrate.return_value = np.ones((nrows, ncols)) with _fake_hsd_handler() as fh: fh.data_info['number_of_columns'] = ncols fh.data_info['number_of_lines'] = nrows with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=UserWarning, message='Actual .* header size') im = fh.read_band(mock.MagicMock(), mock.MagicMock()) mask = im.to_masked_array().mask ref_mask = np.logical_not(get_geostationary_mask(fh.area).compute()) np.testing.assert_equal(mask, ref_mask) orb_params_exp = {'projection_longitude': 140.7, 'projection_latitude': 0.0, 'projection_altitude': .0, 'satellite_actual_longitude': 140.657, 'satellite_actual_latitude': 0.0, 'satellite_actual_altitude': , 'nadir_longitude': 140.252539, 'nadir_latitude': 0.} actual_obs_params = im.attrs['orbital_parameters'] for (key, value) in orb_params_exp.items(): assert (key in actual_obs_params) np.testing.assert_allclose(value, actual_obs_params[key]) time_params_exp = {'nominal_start_time': datetime(2018, 10, 22, 3, 0, 0, 0), 'nominal_end_time': datetime(2018, 10, 22, 3, 0, 0, 0), 'observation_start_time': datetime(2018, 10, 22, 3, 0, 20, 596896), 'observation_end_time': datetime(2018, 10, 22, 3, 0, 53, 947296)} actual_time_params = im.attrs['time_parameters'] for (key, value) in time_params_exp.items(): assert (key in actual_time_params) assert (value == actual_time_params[key]) fh.mask_space = False with mock.patch('satpy.readers.ahi_hsd.AHIHSDFileHandler._mask_space') as mask_space: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=UserWarning, message='Actual .* header size') fh.read_band(mock.MagicMock(), mock.MagicMock()) mask_space.assert_not_called() def test_read_band_from_actual_file(self, hsd_file_jp01): filename_info = {'segment': 1, 'total_segments': 1} filetype_info = {'file_type': 'blahB01'} fh = AHIHSDFileHandler(hsd_file_jp01, filename_info, filetype_info) key = {'name': 'B01', 'calibration': 'counts', 'resolution': 1000} import dask with dask.config.set({'array.chunk-size': '32MiB'}): with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=UserWarning, message='Actual .* header size') data = fh.read_band(key, {'units': '%', 'standard_name': 'toa_bidirectional_reflectance', 'wavelength': 2, 'resolution': 1000}) assert (data.chunks == (((1100,) * 10), ((1100,) * 10))) assert (data.dtype == data.compute().dtype) assert (data.dtype == np.float32) ('satpy.readers.ahi_hsd.AHIHSDFileHandler._read_data') ('satpy.readers.ahi_hsd.AHIHSDFileHandler._mask_invalid') ('satpy.readers.ahi_hsd.AHIHSDFileHandler.calibrate') def test_scene_loading(self, calibrate, *mocks): from satpy import Scene nrows = 25 ncols = 100 calibrate.return_value = np.ones((nrows, ncols)) with _fake_hsd_handler() as fh: with mock.patch('satpy.readers.ahi_hsd.AHIHSDFileHandler') as fh_cls: fh_cls.return_value = fh fh.filename_info['total_segments'] = 1 fh.filename_info['segment'] = 1 fh.data_info['number_of_columns'] = ncols fh.data_info['number_of_lines'] = nrows scn = Scene(reader='ahi_hsd', filenames=['HS_H08__0700_B07_FLDK_R20_S0110.DAT']) with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=UserWarning, message='Actual .* header size') scn.load(['B07']) im = scn['B07'] mask = im.to_masked_array().mask ref_mask = np.logical_not(get_geostationary_mask(fh.area).compute()) np.testing.assert_equal(mask, ref_mask) assert (fh.area.proj_id == f"geosh{FAKE_BASIC_INFO['satellite'][(- 1)]}") def test_time_properties(self): with _fake_hsd_handler() as fh: assert (fh.start_time == datetime(2018, 10, 22, 3, 0)) assert (fh.end_time == datetime(2018, 10, 22, 3, 0)) assert (fh.observation_start_time == datetime(2018, 10, 22, 3, 0, 20, 596896)) assert (fh.observation_end_time == datetime(2018, 10, 22, 3, 0, 53, 947296)) assert (fh.nominal_start_time == datetime(2018, 10, 22, 3, 0, 0, 0)) assert (fh.nominal_end_time == datetime(2018, 10, 22, 3, 0, 0, 0)) def test_scanning_frequencies(self): with _fake_hsd_handler() as fh: fh.observation_area = 'JP04' assert (fh.nominal_start_time == datetime(2018, 10, 22, 3, 7, 30, 0)) assert (fh.nominal_end_time == datetime(2018, 10, 22, 3, 7, 30, 0)) fh.observation_area = 'R304' assert (fh.nominal_start_time == datetime(2018, 10, 22, 3, 7, 30, 0)) assert (fh.nominal_end_time == datetime(2018, 10, 22, 3, 7, 30, 0)) fh.observation_area = 'R420' assert (fh.nominal_start_time == datetime(2018, 10, 22, 3, 9, 30, 0)) assert (fh.nominal_end_time == datetime(2018, 10, 22, 3, 9, 30, 0)) fh.observation_area = 'R520' assert (fh.nominal_start_time == datetime(2018, 10, 22, 3, 9, 30, 0)) assert (fh.nominal_end_time == datetime(2018, 10, 22, 3, 9, 30, 0)) fh.observation_area = 'FLDK' assert (fh.nominal_start_time == datetime(2018, 10, 22, 3, 0, 0, 0)) assert (fh.nominal_end_time == datetime(2018, 10, 22, 3, 0, 0, 0)) def test_blocklen_error(self, *mocks): open_name = ('%s.open' % __name__) fpos = 50 with _fake_hsd_handler() as fh, mock.patch(open_name, create=True) as mock_open, mock_open(mock.MagicMock(), 'r') as fp_: fp_.tell.return_value = 50 with warnings.catch_warnings(record=True) as w: fh._check_fpos(fp_, fpos, 0, 'header 1') assert (len(w) == 0) fp_.tell.return_value = 100 with pytest.warns(UserWarning, match='Actual .* header size does not match expected'): fh._check_fpos(fp_, fpos, 0, 'header 1') def test_is_valid_time(self): assert AHIHSDFileHandler._is_valid_timeline(FAKE_BASIC_INFO['observation_timeline']) assert (not AHIHSDFileHandler._is_valid_timeline('65526')) def test_time_rounding(self): mocker = mock.MagicMock() in_date = datetime(2020, 1, 1, 12, 0, 0) with mock.patch('satpy.readers.ahi_hsd.AHIHSDFileHandler._is_valid_timeline', mocker): with _fake_hsd_handler() as fh: mocker.return_value = True assert (fh._modify_observation_time_for_nominal(in_date) == datetime(2020, 1, 1, 3, 0, 0)) mocker.return_value = False with pytest.warns(UserWarning, match='Observation timeline is fill value, not rounding observation time'): assert (fh._modify_observation_time_for_nominal(in_date) == datetime(2020, 1, 1, 12, 0, 0))
def scalar_to_float(scalar: Scalar) -> float: if isinstance(scalar, Tensor): scalar = scalar.squeeze() numel = scalar.numel() if (numel != 1): raise ValueError(f'Scalar tensor must contain a single item, {numel} given.') return float(scalar.cpu().detach().numpy().item()) elif isinstance(scalar, ndarray): numel = scalar.size if (numel != 1): raise ValueError(f'Scalar ndarray must contain a single item, {numel} given.') return float(scalar.item()) return float(scalar)
def get_scheduler(optimizer, opt): if (opt.lr_policy == 'linear'): def lambda_rule(epoch): lr_l = (1.0 - (max(0, ((epoch + opt.epoch_count) - opt.n_epochs)) / float((opt.n_epochs_decay + 1)))) return lr_l scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) elif (opt.lr_policy == 'step'): scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) elif (opt.lr_policy == 'plateau'): scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) elif (opt.lr_policy == 'cosine'): scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0) else: return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) return scheduler
class Onset(entity): def __init__(self, phonemes, lang): self.feats = {} self._p_changed = True self.featpaths = {} self.lang = lang if phonemes: self.children = phonemes else: self.children = [] def isBranching(self): return (len(self.children) > 1)
class SurvivalGFormula(): def __init__(self, df, idvar, exposure, outcome, time, weights=None): self.exposure = exposure self.outcome = outcome self.t = time self.id = idvar self._missing_indicator = '__missing_indicator__' (self.gf, self._miss_flag, self._continuous_outcome_) = check_input_data(data=df, exposure=exposure, outcome=outcome, estimator='SurvivalGFormula', drop_censoring=True, drop_missing=True, binary_exposure_only=True) self.gf = self.gf.copy().sort_values(by=[idvar, time]).reset_index(drop=True) if self._continuous_outcome_: raise ValueError('SurvivalGFormula does not support continuous outcomes') self._weights = weights self._outcome_model = None self.marginal_outcome = None self.predicted_df = None def outcome_model(self, model, print_results=True): if (self.exposure not in model): warnings.warn((("It looks like '" + self.exposure) + "' is not included in the outcome model.")) linkdist = sm.families.family.Binomial() if (self._weights is None): m = smf.glm(((self.outcome + ' ~ ') + model), self.gf, family=linkdist) else: m = smf.glm(((self.outcome + ' ~ ') + model), self.gf, family=linkdist, freq_weights=self.gf[self._weights]) self._outcome_model = m.fit() if print_results: print('') print('Outcome Model') print(self._outcome_model.summary()) print('') def fit(self, treatment): if (self._outcome_model is None): raise ValueError('Before the g-formula can be calculated, the outcome model must be specified') g = self.gf.copy() if (treatment == 'all'): g[self.exposure] = 1 elif (treatment == 'none'): g[self.exposure] = 0 elif (treatment == 'natural'): pass else: g[self.exposure] = np.where(eval(treatment), 1, 0) g[self.outcome] = np.nan g[self.outcome] = (1 - self._outcome_model.predict(g)) g[self.outcome] = (1 - g.groupby(self.id)[self.outcome].cumprod()) if (self._weights is None): marginal = g.groupby(self.t)[self.outcome].mean() else: marginal = self._weighted_average(data=g, y_col=self.outcome, weight_col=self._weights, by_col=self.t) self.marginal_outcome = marginal.rename(index='timeline') self.predicted_df = g def plot(self, **plot_kwargs): if (self.marginal_outcome is None): raise ValueError('Before plotting, the marginal outcomes must be estimated with the fit() function') ax = plt.gca() ax.step(self.marginal_outcome.index, self.marginal_outcome, where='post', **plot_kwargs) ax.set_xlabel(self.t) ax.set_ylabel(('Risk of ' + self.outcome)) return ax def _weighted_average(data, y_col, weight_col, by_col): data['_w_y_'] = (data[y_col] * data[weight_col]) data['_w_t_'] = (data[weight_col] * pd.notnull(data[y_col])) g = data.groupby(by_col) result = (g['_w_y_'].sum() / g['_w_t_'].sum()) return result