code
stringlengths
281
23.7M
class curvefi(): def __init__(self, *args, a): self.reserve = list(args) self.n = len(args) self.a_inv = a self.sum_inv = self.get_suminv() self.prod_inv = ((self.sum_inv / self.n) ** self.n) self.totalshares = self.sum_inv def poolsum(self): return sum(self.reserve) def poolprod(self): return prod(self.reserve) def get_suminv(self): sumall = self.poolsum() if (len(set(self.reserve)) == 1): suminv = sumall else: n = self.n proall = self.poolprod() a = self.a_inv if (a < 1e-10): suminv = ((proall ** (1 / n)) * n) elif (a == 1): suminv = (((proall * sumall) ** (1 / (n + 1))) * (n ** (n / (n + 1)))) elif (n == 2): sqrtand = ((proall * (((9 * a) * sumall) + sqrt((((81 * (a ** 2)) * (sumall ** 2)) + ((48 * proall) * ((a - 1) ** 3)))))) ** (1 / 3)) suminv_complex = ((((((- 2) * (6 ** (2 / 3))) * proall) * (a - 1)) + ((6 ** (1 / 3)) * (sqrtand ** 2))) / (3 * sqrtand)) suminv = suminv_complex.real else: raise Exception('Cannot handle unequal asset pool with n>2') return suminv def add_liquidity(self, addamount, addindex: int): self.reserve[addindex] += addamount sum_inv_new = self.get_suminv() newshares = (sum_inv_new - self.sum_inv) self.sum_inv = sum_inv_new self.prod_inv = ((sum_inv_new / self.n) ** self.n) return newshares def exchange(self, inamount, inindex: int, outindex: int): a = self.a_inv D = self.sum_inv X = self.prod_inv sumexo = ((self.poolsum() + inamount) - self.reserve[outindex]) inres = (self.reserve[inindex] + inamount) prodexo = ((self.poolprod() / prod([self.reserve[i] for i in [inindex, outindex]])) * inres) outres = (((((1 - (1 / a)) * D) - sumexo) + sqrt((((((1 - (1 / a)) * D) - sumexo) ** 2) + ((((4 * D) * X) / a) / prodexo)))) / 2) outamount = (self.reserve[outindex] - outres) self.reserve[inindex] = inres self.reserve[outindex] = outres return outamount
class VoskHandler(STTHandler): def __init__(self, settings, pip_path, stt): self.key = 'vosk' self.settings = settings self.pip_path = pip_path self.stt = stt def recognize_file(self, path): from vosk import Model r = sr.Recognizer() with sr.AudioFile(path) as source: audio = r.record(source) path = self.get_setting('path') r.vosk_model = Model(path) try: res = json.loads(r.recognize_vosk(audio))['text'] except sr.UnknownValueError: return None except Exception as e: print(e) return None return res
class ResNet_Final_Auxiliary_Classifer(nn.Module): def __init__(self, block, num_classes): super(ResNet_Final_Auxiliary_Classifer, self).__init__() self.conv = conv1x1(((512 * block.expansion) * 4), (512 * block.expansion)) self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear((512 * block.expansion), num_classes) def forward(self, x): sum_fea = torch.cat(x, dim=1) out = self.conv(sum_fea) out = self.avg_pool(out) out = out.view(out.size(0), (- 1)) out = self.fc(out) return out
def get_trivial_allowed_durations(utt2dur, args): lengths = list(set([((int(((float(d) * 1000) - args.frame_length)) / args.frame_shift) + 1) for (key, d) in utt2dur.items()])) lengths.sort() allowed_durations = [] with open(os.path.join(args.dir, 'allowed_durs.txt'), 'w', encoding='latin-1') as durs_fp, open(os.path.join(args.dir, 'allowed_lengths.txt'), 'w', encoding='latin-1') as lengths_fp: for length in lengths: if ((length % args.frame_subsampling_factor) != 0): length = (args.frame_subsampling_factor * (length // args.frame_subsampling_factor)) d = ((((args.frame_shift * (length - 1.0)) + args.frame_length) + (args.frame_shift / 2)) / 1000.0) allowed_durations.append(d) durs_fp.write('{}\n'.format(d)) lengths_fp.write('{}\n'.format(int(length))) assert (len(allowed_durations) > 0) start_dur = allowed_durations[0] end_dur = allowed_durations[(- 1)] logger.info('Durations in the range [{},{}] will be covered.'.format(start_dur, end_dur)) logger.info('There will be {} unique allowed lengths for the utterances.'.format(len(allowed_durations))) return allowed_durations
_image_displayer('sixel') class SixelImageDisplayer(ImageDisplayer, FileManagerAware): def __init__(self): self.win = None self.cache = {} self.fm.signal_bind('preview.cleared', (lambda signal: self._clear_cache(signal.path))) def _clear_cache(self, path): if os.path.exists(path): self.cache = {ce: cd for (ce, cd) in self.cache.items() if (ce.inode != os.stat(path).st_ino)} def _sixel_cache(self, path, width, height): stat = os.stat(path) cacheable = _CacheableSixelImage(width, height, stat.st_ino) if (cacheable not in self.cache): (font_width, font_height) = get_font_dimensions() fit_width = (font_width * width) fit_height = (font_height * height) sixel_dithering = self.fm.settings.sixel_dithering cached = TemporaryFile('w+', prefix='ranger', suffix=path.replace(os.sep, '-')) environ = dict(os.environ) environ.setdefault('MAGICK_OCL_DEVICE', 'true') try: check_call([*MAGICK_CONVERT_CMD_BASE, (path + '[0]'), '-geometry', '{0}x{1}>'.format(fit_width, fit_height), '-dither', sixel_dithering, 'sixel:-'], stdout=cached, stderr=DEVNULL, env=environ) except CalledProcessError: raise ImageDisplayError('ImageMagick failed processing the SIXEL image') except FileNotFoundError: raise ImageDisplayError('SIXEL image previews require ImageMagick') finally: cached.flush() if (os.fstat(cached.fileno()).st_size == 0): raise ImageDisplayError('ImageMagick produced an empty SIXEL image file') self.cache[cacheable] = _CachedSixelImage(mmap.mmap(cached.fileno(), 0), cached) return self.cache[cacheable].image def draw(self, path, start_x, start_y, width, height): if (self.win is None): self.win = self.fm.ui.win.subwin(height, width, start_y, start_x) else: self.win.mvwin(start_y, start_x) self.win.resize(height, width) with temporarily_moved_cursor(start_y, start_x): sixel = self._sixel_cache(path, width, height)[:] if PY3: sys.stdout.buffer.write(sixel) else: sys.stdout.write(sixel) sys.stdout.flush() def clear(self, start_x, start_y, width, height): if (self.win is not None): self.win.clear() self.win.refresh() self.win = None self.fm.ui.win.redrawwin() def quit(self): self.clear(0, 0, 0, 0) self.cache = {}
def lenet(batch_size): n = caffe.NetSpec() (n.data, n.label) = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]), dict(dim=[batch_size, 1, 1, 1])], transform_param=dict(scale=(1.0 / 255)), ntop=2) n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier')) n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier')) n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX) n.ip1 = L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier')) n.relu1 = L.ReLU(n.ip1, in_place=True) n.ip2 = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier')) n.loss = L.SoftmaxWithLoss(n.ip2, n.label) return n.to_proto()
def test_local_site_to_dict(): vsite = LocalVirtualSite(name='test', orientations=[(0, 1, 2)], p1=(0 * unit.angstrom), p2=(0 * unit.angstrom), p3=(0 * unit.angstrom), o_weights=[1.0, 0.0, 0.0], x_weights=[(- 1.0), 0.5, 0.5], y_weights=[(- 1.0), 0.0, 1.0]) vsite_dict = vsite.to_dict() assert (vsite_dict['name'] == 'test') assert (vsite_dict['p1'] == (0 * unit.nanometer)) assert (vsite_dict['o_weights'] == [1.0, 0.0, 0.0]) assert (vsite_dict['x_weights'] == [(- 1.0), 0.5, 0.5])
class Trainer(object): def __init__(self, args): self.args = args self.device = torch.device(args.device) self.num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1) if (args.dataset == 'citys'): train_dataset = CSTrainValSet(args.data, list_path='./dataset/list/cityscapes/train.lst', max_iters=(args.max_iterations * args.batch_size), crop_size=args.crop_size, scale=True, mirror=True) val_dataset = CSTrainValSet(args.data, list_path='./dataset/list/cityscapes/val.lst', crop_size=(1024, 2048), scale=False, mirror=False) elif (args.dataset == 'voc'): train_dataset = VOCDataTrainSet(args.data, './dataset/list/voc/train_aug.txt', max_iters=(args.max_iterations * args.batch_size), crop_size=args.crop_size, scale=True, mirror=True) val_dataset = VOCDataValSet(args.data, './dataset/list/voc/val.txt') elif (args.dataset == 'ade20k'): train_dataset = ADETrainSet(args.data, max_iters=(args.max_iterations * args.batch_size), ignore_label=args.ignore_label, crop_size=args.crop_size, scale=True, mirror=True) val_dataset = ADEDataValSet(args.data) elif (args.dataset == 'camvid'): train_dataset = CamvidTrainSet(args.data, './dataset/list/CamVid/camvid_train_list.txt', max_iters=(args.max_iterations * args.batch_size), ignore_label=args.ignore_label, crop_size=args.crop_size, scale=True, mirror=True) val_dataset = CamvidValSet(args.data, './dataset/list/CamVid/camvid_val_list.txt') elif (args.dataset == 'coco_stuff_164k'): train_dataset = CocoStuff164kTrainSet(args.data, './dataset/list/coco_stuff_164k/coco_stuff_164k_train.txt', max_iters=(args.max_iterations * args.batch_size), ignore_label=args.ignore_label, crop_size=args.crop_size, scale=True, mirror=True) val_dataset = CocoStuff164kValSet(args.data, './dataset/list/coco_stuff_164k/coco_stuff_164k_val.txt') else: raise ValueError('dataset unfind') args.batch_size = (args.batch_size // num_gpus) train_sampler = make_data_sampler(train_dataset, shuffle=True, distributed=args.distributed) train_batch_sampler = make_batch_data_sampler(train_sampler, args.batch_size, args.max_iterations) val_sampler = make_data_sampler(val_dataset, False, args.distributed) val_batch_sampler = make_batch_data_sampler(val_sampler, images_per_batch=1) self.train_loader = data.DataLoader(dataset=train_dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, pin_memory=True) self.val_loader = data.DataLoader(dataset=val_dataset, batch_sampler=val_batch_sampler, num_workers=args.workers, pin_memory=True) BatchNorm2d = (nn.SyncBatchNorm if args.distributed else nn.BatchNorm2d) self.t_model = get_segmentation_model(model=args.teacher_model, backbone=args.teacher_backbone, img_size=args.crop_size, pretrained=args.teacher_pretrained, batchnorm_layer=nn.BatchNorm2d, num_class=train_dataset.num_class).to(self.args.local_rank) self.s_model = get_segmentation_model(model=args.student_model, backbone=args.student_backbone, img_size=args.crop_size, pretrained=args.student_pretrained, batchnorm_layer=BatchNorm2d, num_class=train_dataset.num_class).to(self.device) for (t_n, t_p) in self.t_model.named_parameters(): t_p.requires_grad = False self.t_model.eval() self.s_model.eval() if args.resume: if os.path.isfile(args.resume): (name, ext) = os.path.splitext(args.resume) assert ((ext == '.pkl') or '.pth'), 'Sorry only .pth and .pkl files supported.' print('Resuming training, loading {}...'.format(args.resume)) self.s_model.load_state_dict(torch.load(args.resume, map_location=(lambda storage, loc: storage))) x = torch.randn(1, 3, 512, 512).cuda() t_y = self.t_model(x) s_y = self.s_model(x) t_channels = t_y[(- 1)].size(1) s_channels = s_y[(- 1)].size(1) self.criterion = SegCrossEntropyLoss(ignore_index=args.ignore_label).to(self.device) self.criterion_kd = CriterionKD(temperature=args.kd_temperature).to(self.device) self.criterion_minibatch = CriterionMiniBatchCrossImagePair(temperature=args.contrast_temperature, pooling=True).to(self.device) self.criterion_memory_contrast = StudentSegContrast(num_classes=train_dataset.num_class, pixel_memory_size=args.pixel_memory_size, region_memory_size=args.region_memory_size, region_contrast_size=((args.region_contrast_size // train_dataset.num_class) + 1), pixel_contrast_size=((args.pixel_contrast_size // train_dataset.num_class) + 1), contrast_kd_temperature=args.contrast_kd_temperature, contrast_temperature=args.contrast_temperature, s_channels=s_channels, t_channels=t_channels, ignore_label=args.ignore_label).to(self.device) params_list = nn.ModuleList([]) params_list.append(self.s_model) params_list.append(self.criterion_memory_contrast) if (args.optimizer_type == 'sgd'): self.optimizer = torch.optim.SGD(params_list.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) elif (args.optimizer_type == 'adamw'): self.optimizer = torch.optim.AdamW(params_list.parameters(), lr=args.lr, weight_decay=args.weight_decay) else: raise ValueError('no such optimizer') if args.distributed: self.s_model = nn.parallel.DistributedDataParallel(self.s_model, device_ids=[args.local_rank], output_device=args.local_rank) self.criterion_memory_contrast = nn.parallel.DistributedDataParallel(self.criterion_memory_contrast, device_ids=[args.local_rank], output_device=args.local_rank) self.metric = SegmentationMetric(train_dataset.num_class) self.best_pred = 0.0 def adjust_lr(self, base_lr, iter, max_iter, power): cur_lr = (base_lr * ((1 - (float(iter) / max_iter)) ** power)) for param_group in self.optimizer.param_groups: param_group['lr'] = cur_lr return cur_lr def reduce_tensor(self, tensor): rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) return rt def reduce_mean_tensor(self, tensor): rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) rt /= self.num_gpus return rt def train(self): save_to_disk = (get_rank() == 0) (log_per_iters, val_per_iters) = (self.args.log_iter, self.args.val_per_iters) save_per_iters = self.args.save_per_iters start_time = time.time() logger.info('Start training, Total Iterations {:d}'.format(args.max_iterations)) self.s_model.train() for (iteration, (images, targets, _)) in enumerate(self.train_loader): iteration = (iteration + 1) images = images.to(self.device) targets = targets.long().to(self.device) with torch.no_grad(): t_outputs = self.t_model(images) s_outputs = self.s_model(images) task_loss = self.criterion(s_outputs[0], targets) kd_loss = torch.tensor(0.0).cuda() wsl_kd_loss = torch.tensor(0.0).cuda() fitnet_loss = torch.tensor(0.0).cuda() kd_loss = (self.args.lambda_kd * self.criterion_kd(s_outputs[0], t_outputs[0])) minibatch_pixel_contrast_loss = (self.args.lambda_minibatch_pixel * self.criterion_minibatch(s_outputs[(- 1)], t_outputs[(- 1)])) (_, predict) = torch.max(s_outputs[0], dim=1) (memory_pixel_contrast_loss, memory_region_contrast_loss) = self.criterion_memory_contrast(s_outputs[(- 1)], t_outputs[(- 1)].detach(), targets, predict) memory_pixel_contrast_loss = (self.args.lambda_memory_pixel * memory_pixel_contrast_loss) memory_region_contrast_loss = (self.args.lambda_memory_region * memory_region_contrast_loss) losses = (((((task_loss + kd_loss) + minibatch_pixel_contrast_loss) + memory_pixel_contrast_loss) + memory_region_contrast_loss) + fitnet_loss) lr = self.adjust_lr(base_lr=args.lr, iter=(iteration - 1), max_iter=args.max_iterations, power=0.9) self.optimizer.zero_grad() losses.backward() self.optimizer.step() task_losses_reduced = self.reduce_mean_tensor(task_loss) kd_losses_reduced = self.reduce_mean_tensor(kd_loss) wsl_kd_loss_reduced = self.reduce_mean_tensor(wsl_kd_loss) minibatch_pixel_contrast_loss_reduced = self.reduce_mean_tensor(minibatch_pixel_contrast_loss) memory_pixel_contrast_loss_reduced = self.reduce_mean_tensor(memory_pixel_contrast_loss) memory_region_contrast_loss_reduced = self.reduce_mean_tensor(memory_region_contrast_loss) fitnet_loss_reduced = self.reduce_mean_tensor(fitnet_loss) eta_seconds = (((time.time() - start_time) / iteration) * (args.max_iterations - iteration)) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if (((iteration % log_per_iters) == 0) and save_to_disk): logger.info('Iters: {:d}/{:d} || Lr: {:.6f} || Task Loss: {:.4f} || KD Loss: {:.4f} || WSL_KD Loss: {:.4f} || Mini-batch p2p Loss: {:.4f} || Memory p2p Loss: {:.4f} || Memory p2r Loss: {:.4f} || Fitnet Loss: {:.4f} || Cost Time: {} || Estimated Time: {}'.format(iteration, args.max_iterations, self.optimizer.param_groups[0]['lr'], task_losses_reduced.item(), kd_losses_reduced.item(), wsl_kd_loss_reduced.item(), minibatch_pixel_contrast_loss_reduced.item(), memory_pixel_contrast_loss_reduced.item(), memory_region_contrast_loss_reduced.item(), fitnet_loss_reduced.item(), str(datetime.timedelta(seconds=int((time.time() - start_time)))), eta_string)) if (((iteration % save_per_iters) == 0) and save_to_disk): save_checkpoint(self.s_model, self.args, is_best=False) if ((not self.args.skip_val) and ((iteration % val_per_iters) == 0)): self.validation() self.s_model.train() save_checkpoint(self.s_model, self.args, is_best=False) total_training_time = (time.time() - start_time) total_training_str = str(datetime.timedelta(seconds=total_training_time)) logger.info('Total training time: {} ({:.4f}s / it)'.format(total_training_str, (total_training_time / args.max_iterations))) def validation(self): is_best = False self.metric.reset() if self.args.distributed: model = self.s_model.module else: model = self.s_model torch.cuda.empty_cache() model.eval() logger.info('Start validation, Total sample: {:d}'.format(len(self.val_loader))) for (i, (image, target, filename)) in enumerate(self.val_loader): image = image.to(self.device) target = target.to(self.device) with torch.no_grad(): outputs = model(image) (B, H, W) = target.size() outputs[0] = F.interpolate(outputs[0], (H, W), mode='bilinear', align_corners=True) self.metric.update(outputs[0], target) (pixAcc, mIoU) = self.metric.get() logger.info('Sample: {:d}, Validation pixAcc: {:.3f}, mIoU: {:.3f}'.format((i + 1), pixAcc, mIoU)) if (self.num_gpus > 1): sum_total_correct = torch.tensor(self.metric.total_correct).cuda().to(args.local_rank) sum_total_label = torch.tensor(self.metric.total_label).cuda().to(args.local_rank) sum_total_inter = torch.tensor(self.metric.total_inter).cuda().to(args.local_rank) sum_total_union = torch.tensor(self.metric.total_union).cuda().to(args.local_rank) sum_total_correct = self.reduce_tensor(sum_total_correct) sum_total_label = self.reduce_tensor(sum_total_label) sum_total_inter = self.reduce_tensor(sum_total_inter) sum_total_union = self.reduce_tensor(sum_total_union) pixAcc = ((1.0 * sum_total_correct) / (2.e-16 + sum_total_label)) IoU = ((1.0 * sum_total_inter) / (2.e-16 + sum_total_union)) mIoU = IoU.mean().item() logger.info('Overall validation pixAcc: {:.3f}, mIoU: {:.3f}'.format((pixAcc.item() * 100), (mIoU * 100))) new_pred = mIoU if (new_pred > self.best_pred): is_best = True self.best_pred = new_pred if ((args.distributed is not True) or (args.distributed and (args.local_rank == 0))): save_checkpoint(self.s_model, self.args, is_best) synchronize()
class ConvolutionalGatingMLP(torch.nn.Module): def __init__(self, size: int, linear_units: int, kernel_size: int, dropout_rate: float, use_linear_after_conv: bool=False): super().__init__() self.norm = LayerNorm(size) self.channel_proj1 = torch.nn.Sequential(torch.nn.Linear(size, linear_units), torch.nn.GELU()) self.csgu = ConvolutionalSpatialGatingUnit(size=linear_units, kernel_size=kernel_size, dropout_rate=dropout_rate, use_linear_after_conv=use_linear_after_conv) self.channel_proj2 = torch.nn.Linear((linear_units // 2), size) def forward(self, x, mask): if isinstance(x, tuple): (xs_pad, pos_emb) = x else: (xs_pad, pos_emb) = (x, None) xs_pad = self.norm(xs_pad) xs_pad = self.channel_proj1(xs_pad) xs_pad = self.csgu(xs_pad) xs_pad = self.channel_proj2(xs_pad) if (pos_emb is not None): out = (xs_pad, pos_emb) else: out = xs_pad return out
class SST(Task): VERSION = 0 DATASET_PATH = 'glue' DATASET_NAME = 'sst2' def has_training_docs(self): return True def has_validation_docs(self): return True def has_test_docs(self): return False def training_docs(self): if (self._training_docs is None): self._training_docs = list(self.dataset['train']) return self._training_docs def validation_docs(self): return self.dataset['validation'] def doc_to_text(self, doc): return '{}\nQuestion: Is this sentence positive or negative?\nAnswer:'.format(general_detokenize(doc['sentence'])) def doc_to_target(self, doc): return ' {}'.format({1: 'positive', 0: 'negative'}[doc['label']]) def construct_requests(self, doc, ctx): (ll_positive, _) = rf.loglikelihood(ctx, ' positive') (ll_negative, _) = rf.loglikelihood(ctx, ' negative') return (ll_positive, ll_negative) def process_results(self, doc, results): (ll_positive, ll_negative) = results pred = (ll_positive > ll_negative) gold = doc['label'] return {'acc': (pred == gold)} def higher_is_better(self): return {'acc': True} def aggregation(self): return {'acc': mean}
def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'main.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError("Couldn't import Django. Are you sure it's installed and available on your PYTHONPATH environment variable? Did you forget to activate a virtual environment?") from exc execute_from_command_line(sys.argv)
def raise_on_call_returned_empty(given_block_identifier: BlockIdentifier) -> NoReturn: msg = f'Either the given address is for a different smart contract, or the contract was not yet deployed at the block {format_block_id(given_block_identifier)}. Either way this call should never have happened.' raise RaidenUnrecoverableError(msg)
def Mine_ItemS(FP, ItemS): global CanNum for i in sort_item: CanNum += 1 count = 0 index_i = GetIndex(i) for j in range(SeqNum): count += len(index_i[j]) if (count >= int(minsup)): p = [] p.append(i) FP.append(p) ItemS[str(p)] = [[] for k in range(SeqNum)] ItemS[str(p)] = index_i Gen_I(FP, ItemS)
class TestCreatePixmap(EndianTest): def setUp(self): self.req_args_0 = {'depth': 161, 'drawable': , 'height': 4764, 'pid': , 'width': 57984} self.req_bin_0 = b'5\xa1\x00\x04wv\x0b\x1f,\xadRL\xe2\x80\x12\x9c' def testPackRequest0(self): bin = request.CreatePixmap._request.to_binary(*(), **self.req_args_0) self.assertBinaryEqual(bin, self.req_bin_0) def testUnpackRequest0(self): (args, remain) = request.CreatePixmap._request.parse_binary(self.req_bin_0, dummy_display, 1) self.assertBinaryEmpty(remain) self.assertEqual(args, self.req_args_0)
class AvahiService(): DBUS_NAME = 'org.freedesktop.Avahi' DBUS_PATH_SERVER = '/' DBUS_INTERFACE_ENTRY_GROUP = 'org.freedesktop.Avahi.EntryGroup' DBUS_INTERFACE_SERVER = 'org.freedesktop.Avahi.Server' def register(self, name, port, stype): try: GLib.Variant('q', port) except OverflowError as e: raise AvahiError(e) from e self.name = name self._real_name = name self.port = port self.stype = stype try: bus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None) if (not self._watch): self._watch = Gio.bus_watch_name_on_connection(bus, self.DBUS_NAME, Gio.BusNameWatcherFlags.NONE, self._owner_appeared, self._owner_vanished) else: self._try_update_service() except GLib.Error as e: raise AvahiError(e) from e def unregister(self): if self._watch: with ignored(GLib.Error): Gio.bus_unwatch_name(self._watch) self._watch = None self._remove_server() def __init__(self): self.name = None self.stype = None self.port = None self._group = None self._group_id = None self._server = None self._server_id = None self._watch = None self._real_name = None self._last_server_state = None def _on_group_signal(self, proxy, sender, signal, *args): if (signal == 'StateChanged'): self._group_state_change(args[0]) def _group_state_change(self, state, *args): if (state == AvahiEntryGroupState.COLLISION): self._real_name = alternative_service_name(self._real_name) self._try_update_service() def _group_add_service_and_commit(self, group, flags): print_d(('name=%s, flags=%x, stype=%s, port=%d' % (self._real_name, flags, self.stype, self.port))) group.AddService('(iiussssqaay)', AVAHI_IF_UNSPEC, AvahiProtocol.UNSPEC, flags, self._real_name, self.stype, '', '', self.port, []) group.Commit() def _add_service(self): assert (not self._group) assert (not self._group_id) try: bus = Gio.bus_get_sync(Gio.BusType.SYSTEM, None) server = Gio.DBusProxy.new_sync(bus, Gio.DBusProxyFlags.NONE, None, self.DBUS_NAME, self.DBUS_PATH_SERVER, self.DBUS_INTERFACE_SERVER, None) group_path = server.EntryGroupNew() group = Gio.DBusProxy.new_sync(bus, Gio.DBusProxyFlags.NONE, None, self.DBUS_NAME, group_path, self.DBUS_INTERFACE_ENTRY_GROUP, None) self._group_id = group.connect('g-signal', self._on_group_signal) self._group_add_service_and_commit(group, AvahiPublishFlags.NONE) self._group = group except GLib.Error: self._remove_service() def _try_update_service(self): if (not self._group): return assert self._group_id try: group = self._group group.Reset() self._group_add_service_and_commit(group, AvahiPublishFlags.UPDATE) except GLib.Error: self._remove_service() def _remove_service(self): if self._group: if self._group_id: with ignored(GLib.Error): self._group.disconnect(self._group_id) self._group_id = None with ignored(GLib.Error): self._group.Free() self._group = None def _remove_server(self): if self._server: if self._server_id: with ignored(GLib.Error): self._server.disconnect(self._server_id) self._server_id = None self._server = None self._last_server_state = None self._remove_service() def _add_server(self): assert (not self._server_id) try: server = Gio.DBusProxy.new_for_bus_sync(Gio.BusType.SYSTEM, Gio.DBusProxyFlags.NONE, None, self.DBUS_NAME, self.DBUS_PATH_SERVER, self.DBUS_INTERFACE_SERVER, None) self._server_id = server.connect('g-signal', self._on_server_signal) self._server_state_changed(server.GetState()) self._server = server except GLib.Error: self._remove_server() def _on_server_signal(self, proxy, sender, signal, *args): if (signal == 'StateChanged'): self._server_state_changed(args[0]) def _server_state_changed(self, state, *args): if (state == self._last_server_state): return self._last_server_state = state if (state == AvahiServerState.RUNNING): self._add_service() elif (state in (AvahiServerState.COLLISION, AvahiServerState.REGISTERING)): self._remove_service() def _owner_appeared(self, bus, name, owner): self._add_server() def _owner_vanished(self, bus, owner): self._remove_server()
def on_resize(width, height): (viewport_width, viewport_height) = window.get_framebuffer_size() glViewport(0, 0, viewport_width, viewport_height) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, (float(width) / height), 1.0, 100.0) glMatrixMode(GL_MODELVIEW) return True
class IndexURLs(): def __init__(self, repo: str): self.repo = hyperlink.parse(repo).normalize() if self.repo.host.endswith('pypi.org'): repo_url = (self.repo.replace(host='pypi.org') if (self.repo.host == 'upload.pypi.org') else self.repo) self.simple = repo_url.click('/simple/') self.project = repo_url.click('/project/') else: self.simple = self.repo.child('+simple', '') self.project = self.repo
class Const(_base_nodes.NoChildrenNode, Instance): _other_fields = ('value', 'kind') def __init__(self, value: Any, lineno: (int | None)=None, col_offset: (int | None)=None, parent: (NodeNG | None)=None, kind: (str | None)=None, *, end_lineno: (int | None)=None, end_col_offset: (int | None)=None) -> None: self.value: Any = value self.kind: (str | None) = kind super().__init__(lineno=lineno, col_offset=col_offset, end_lineno=end_lineno, end_col_offset=end_col_offset, parent=parent) Instance.__init__(self, None) infer_unary_op = protocols.const_infer_unary_op infer_binary_op = protocols.const_infer_binary_op def __getattr__(self, name): if (name == 'value'): raise AttributeError return super().__getattr__(name) def getitem(self, index, context: (InferenceContext | None)=None): if isinstance(index, Const): index_value = index.value elif isinstance(index, Slice): index_value = _infer_slice(index, context=context) else: raise AstroidTypeError(f'Could not use type {type(index)} as subscript index') try: if isinstance(self.value, (str, bytes)): return Const(self.value[index_value]) except ValueError as exc: raise AstroidValueError(f'Could not index {self.value!r} with {index_value!r}') from exc except IndexError as exc: raise AstroidIndexError(message='Index {index!r} out of range', node=self, index=index, context=context) from exc except TypeError as exc: raise AstroidTypeError(message='Type error {error!r}', node=self, index=index, context=context) from exc raise AstroidTypeError(f'{self!r} (value={self.value})') def has_dynamic_getattr(self) -> bool: return False def itered(self): if isinstance(self.value, str): return [const_factory(elem) for elem in self.value] raise TypeError(f'Cannot iterate over type {type(self.value)!r}') def pytype(self) -> str: return self._proxied.qname() def bool_value(self, context: (InferenceContext | None)=None): return bool(self.value) def _infer(self, context: (InferenceContext | None)=None, **kwargs: Any) -> Iterator[Const]: (yield self)
def get_path(prog_name): try: return _path_cache[prog_name] except KeyError: pass if (prog_name not in _path_config.keys()): raise ValueError(('%s is not a known external executable' % prog_name)) path_conf = _path_config[prog_name] if (path_conf['env_var'] in os.environ): env_var_val = os.environ[path_conf['env_var']] subdir_msg = '' try: _path_cache[prog_name] = _validate_path(prog_name, env_var_val) return _path_cache[prog_name] except ValueError: try: _path_cache[prog_name] = _validate_path(prog_name, os.path.join(env_var_val, path_conf['env_var_subdir'])) return _path_cache[prog_name] except KeyError: pass except ValueError: subdir_msg = (', or in that path\'s "%s" subdirectory' % path_conf['env_var_subdir']) raise ValueError(('Environment variable %s is set to %s, but the program %s or its executable %s could not be found there%s. Check file existence and permissions.' % (path_conf['env_var'], env_var_val, path_conf['name'], _get_executable(prog_name), subdir_msg))) try: _path_cache[prog_name] = _validate_path(prog_name, _get_anaconda_bindir()) return _path_cache[prog_name] except ValueError: pass if (os.name not in path_conf['search_paths'].keys()): raise Exception(('No default path is known for %s on your operating system "%s". Set the path using the environment variable %s or by calling the function %s.%s()' % (path_conf['name'], os.name, path_conf['env_var'], set_path.__module__, set_path.__name__))) search_paths = path_conf['search_paths'][os.name] if use_path: search_paths = (list(search_paths) + os.environ.get('PATH', '').split(os.pathsep)) for search_path in search_paths: try: _path_cache[prog_name] = _validate_path(prog_name, search_path) return _path_cache[prog_name] except ValueError: pass try: conda_install_help = ('\n\nConda users can install %s using the following command:\n\n%s' % (path_conf['name'], path_conf['conda_install_cmd'])) except KeyError: conda_install_help = '' raise Exception(('The program %s was not found in the default search path(s) for your operating system:\n\n%s\n\nEither install it to one of those paths, or set a custom path using the environment variable %s or by calling the function %s.%s()%s' % (path_conf['name'], '\n'.join(search_paths), path_conf['env_var'], set_path.__module__, set_path.__name__, conda_install_help)))
class Test_avl_del(unittest.TestCase): def setUp(self): pass def testdel_basic(self): t = avl.new() t.remove(1) t.remove((- 2)) self.assertTrue(verify_empty(t)) t = range_tree((- 2000), (+ 2000)) self.assertTrue((t.verify() == 1)) n = len(t) others = (list(range((- 2100), (- 2000))) + list(range(2000, 2100))) random.shuffle(others) for i in others: t.remove(i) self.assertTrue(verify_len(t, n)) others = None lst = list(range((- 2000), 2000)) for i in range(10): random.shuffle(lst) u = avl.new(t) for k in lst: u.remove(k) self.assertFalse((k in u)) self.assertTrue(verify_empty(u)) def testdel_lessbasic(self): n = 1000 t = avl.new() for i in range(3): for k in gen_ints(0, n): t.insert(k) self.assertTrue((t.verify() == 1)) modulo = n step = 37 for i in range(10): u = avl.new(t) for k in gen_ints_perm(step, modulo): while (k in u): u.remove(k) self.assertFalse((k in u)) self.assertTrue((u.verify() == 1)) self.assertTrue((len(u) == 0), ((('len=' + str(len(u))) + ' ') + str(u))) def testdel_bigunique(self): t = range_tree((- 10000), 10000) for i in gen_ints_perm(10007, 20000): j = (i - 10000) self.assertTrue((j in t)) t.remove(j) self.assertFalse((j in t)) def testdel_one(self): n = 4000 t = random_int_tree(0, n, n) for i in gen_ints_perm(1993, n): e = t[i] (a1, a2) = t.span(e) t.remove(e) self.assertTrue((t.verify() == 1)) (b1, b2) = t.span(e) self.assertTrue((((a2 - a1) - 1) == (b2 - b1))) t.insert(e)
def _run_basic_get_repeatedly(): from timeit import default_timer REPEAT = 10000 for _ in range(7): start = default_timer() for _ in range(REPEAT): time_server_basic_get_with_realistic_headers() finish = default_timer() print(f'{(REPEAT / (finish - start)):.1f} requests/sec')
class FloScriptLexer(RegexLexer): name = 'FloScript' url = ' aliases = ['floscript', 'flo'] filenames = ['*.flo'] version_added = '2.4' def innerstring_rules(ttype): return [('%(\\(\\w+\\))?[-#0 +]*([0-9]+|[*])?(\\.([0-9]+|[*]))?[hlL]?[E-GXc-giorsux%]', String.Interpol), ('[^\\\\\\\'"%\\n]+', ttype), ('[\\\'"\\\\]', ttype), ('%', ttype)] tokens = {'root': [('\\s+', Whitespace), ('[]{}:(),;[]', Punctuation), ('(\\\\)(\\n)', bygroups(Text, Whitespace)), ('\\\\', Text), ('(to|by|with|from|per|for|cum|qua|via|as|at|in|of|on|re|is|if|be|into|and|not)\\b', Operator.Word), ('!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), ('(load|init|server|logger|log|loggee|first|over|under|next|done|timeout|repeat|native|benter|enter|recur|exit|precur|renter|rexit|print|put|inc|copy|set|aux|rear|raze|go|let|do|bid|ready|start|stop|run|abort|use|flo|give|take)\\b', Name.Builtin), ('(frame|framer|house)\\b', Keyword), ('"', String, 'string'), include('name'), include('numbers'), ('#.+$', Comment.Single)], 'string': [('[^"]+', String), ('"', String, '#pop')], 'numbers': [('(\\d+\\.\\d*|\\d*\\.\\d+)([eE][+-]?[0-9]+)?j?', Number.Float), ('\\d+[eE][+-]?[0-9]+j?', Number.Float), ('0[0-7]+j?', Number.Oct), ('0[bB][01]+', Number.Bin), ('0[xX][a-fA-F0-9]+', Number.Hex), ('\\d+L', Number.Integer.Long), ('\\d+j?', Number.Integer)], 'name': [('[\\w.]+', Name.Decorator), ('[a-zA-Z_]\\w*', Name)]}
class outputReference(object): def __init__(self, stepid, pointer): self.stepid = stepid self.pointer = pointer def __repr__(self): return 'outputReference {}#{}'.format(self.stepid, self.pointer.path) def fromJSON(cls, data): return cls(data['stepid'], jsonpointer.JsonPointer(data['pointer_path'])) def json(self): return {'stepid': self.stepid, 'pointer_path': self.pointer.path}
def get_input_string(input_data, task_type, additional_column_name_string): input_strings = [] if (additional_column_name_string is not None): for additional_column_name in additional_column_name_string.split(','): input_strings.append(str(input_data[additional_column_name])) if (task_type == 'single-sentence'): input_strings.append(input_data['sentence']) elif (task_type == 'sentence-pair'): input_strings.extend([input_data['sentence1'], input_data['sentence2']]) elif (task_type == 'swag'): input_strings.extend([str(input_data['question']), input_data['choice0'], input_data['choice1'], input_data['choice2'], input_data['choice3'], input_data['choice4']]) return '\t'.join(input_strings)
(nopython=True) def _rescale_and_lookup1d_function(data, scale, offset, lut, out): (vmin, vmax) = (0, (lut.shape[0] - 1)) for r in range(data.shape[0]): for c in range(data.shape[1]): val = ((data[(r, c)] - offset) * scale) val = min(max(val, vmin), vmax) out[(r, c)] = lut[int(val)]
def setup_logging(debug, verbose): handler = StreamHandler(sys.stderr) handler.setFormatter(Formatter('[%(asctime)s] %(levelname)s (%(module)s:%(lineno)d) %(message)s', '%H:%M:%S')) log.addHandler(handler) if debug: log.setLevel(DEBUG) elif verbose: log.setLevel(INFO) else: log.setLevel(WARN) if _is_unittest_debug(): log.setLevel(DEBUG)
_REGISTRY.register() class ADDA(TrainerXU): def __init__(self, cfg): super().__init__(cfg) self.open_layers = ['backbone'] if isinstance(self.model.head, nn.Module): self.open_layers.append('head') self.source_model = copy.deepcopy(self.model) self.source_model.eval() for param in self.source_model.parameters(): param.requires_grad_(False) self.build_critic() self.bce = nn.BCEWithLogitsLoss() def check_cfg(self, cfg): assert check_isfile(cfg.MODEL.INIT_WEIGHTS), 'The weights of source model must be provided' def build_critic(self): cfg = self.cfg print('Building critic network') fdim = self.model.fdim critic_body = build_head('mlp', verbose=cfg.VERBOSE, in_features=fdim, hidden_layers=[fdim, (fdim // 2)], activation='leaky_relu') self.critic = nn.Sequential(critic_body, nn.Linear((fdim // 2), 1)) print('# params: {:,}'.format(count_num_param(self.critic))) self.critic.to(self.device) self.optim_c = build_optimizer(self.critic, cfg.OPTIM) self.sched_c = build_lr_scheduler(self.optim_c, cfg.OPTIM) self.register_model('critic', self.critic, self.optim_c, self.sched_c) def forward_backward(self, batch_x, batch_u): open_specified_layers(self.model, self.open_layers) (input_x, _, input_u) = self.parse_batch_train(batch_x, batch_u) domain_x = torch.ones(input_x.shape[0], 1).to(self.device) domain_u = torch.zeros(input_u.shape[0], 1).to(self.device) (_, feat_x) = self.source_model(input_x, return_feature=True) (_, feat_u) = self.model(input_u, return_feature=True) logit_xd = self.critic(feat_x) logit_ud = self.critic(feat_u.detach()) loss_critic = self.bce(logit_xd, domain_x) loss_critic += self.bce(logit_ud, domain_u) self.model_backward_and_update(loss_critic, 'critic') logit_ud = self.critic(feat_u) loss_model = self.bce(logit_ud, (1 - domain_u)) self.model_backward_and_update(loss_model, 'model') loss_summary = {'loss_critic': loss_critic.item(), 'loss_model': loss_model.item()} if ((self.batch_idx + 1) == self.num_batches): self.update_lr() return loss_summary
class Task(): def __init__(self, fn, args, kwargs): self._fn = fn self._args = args self._kwargs = kwargs self.has_run = Event() self._result = self._exception = None def __call__(self): try: self._result = self._fn(*self._args, **self._kwargs) except Exception as e: self._exception = e finally: self.has_run.set() def set_exception(self, exception): self._exception = exception def result(self): if (not self.has_run.is_set()): raise ValueError("Hasn't run.") if self._exception: raise self._exception return self._result
class Glucose4(object): def __init__(self, bootstrap_with=None, use_timer=False, incr=False, with_proof=False, warm_start=False): self.glucose = None self.status = None self.prfile = None self.new(bootstrap_with, use_timer, incr, with_proof, warm_start) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.delete() self.glucose = None def new(self, bootstrap_with=None, use_timer=False, incr=False, with_proof=False, warm_start=False): assert ((not incr) or (not with_proof)), 'Incremental mode and proof tracing cannot be set together.' if (not self.glucose): self.glucose = pysolvers.glucose41_new() if bootstrap_with: if ((type(bootstrap_with) == CNFPlus) and bootstrap_with.atmosts): raise NotImplementedError('Atmost constraints are not supported by Glucose4') for clause in bootstrap_with: self.add_clause(clause) self.use_timer = use_timer self.call_time = 0.0 self.accu_time = 0.0 if incr: pysolvers.glucose41_setincr(self.glucose) if with_proof: self.prfile = tempfile.TemporaryFile() pysolvers.glucose41_tracepr(self.glucose, self.prfile) if warm_start: self.start_mode(warm=True) def start_mode(self, warm=False): if self.glucose: pysolvers.glucose41_set_start(self.glucose, int(warm)) def delete(self): if self.glucose: pysolvers.glucose41_del(self.glucose) self.glucose = None if self.prfile: self.prfile.close() def solve(self, assumptions=[]): if self.glucose: if self.use_timer: start_time = process_time() self.status = pysolvers.glucose41_solve(self.glucose, assumptions, int(MainThread.check())) if self.use_timer: self.call_time = (process_time() - start_time) self.accu_time += self.call_time return self.status def solve_limited(self, assumptions=[], expect_interrupt=False): if self.glucose: if self.use_timer: start_time = process_time() self.status = pysolvers.glucose41_solve_lim(self.glucose, assumptions, int(MainThread.check()), int(expect_interrupt)) if self.use_timer: self.call_time = (process_time() - start_time) self.accu_time += self.call_time return self.status def conf_budget(self, budget): if self.glucose: pysolvers.glucose41_cbudget(self.glucose, budget) def prop_budget(self, budget): if self.glucose: pysolvers.glucose41_pbudget(self.glucose, budget) def dec_budget(self, budget): raise NotImplementedError('Limit on decisions is unsupported by Glucose4.') def interrupt(self): if self.glucose: pysolvers.glucose41_interrupt(self.glucose) def clear_interrupt(self): if self.glucose: pysolvers.glucose41_clearint(self.glucose) def propagate(self, assumptions=[], phase_saving=0): if self.glucose: if self.use_timer: start_time = process_time() (st, props) = pysolvers.glucose41_propagate(self.glucose, assumptions, phase_saving, int(MainThread.check())) if self.use_timer: self.call_time = (process_time() - start_time) self.accu_time += self.call_time return (bool(st), (props if (props != None) else [])) def set_phases(self, literals=[]): if self.glucose: pysolvers.glucose41_setphases(self.glucose, literals) def get_status(self): if self.glucose: return self.status def get_model(self): if (self.glucose and (self.status == True)): model = pysolvers.glucose41_model(self.glucose) return (model if (model != None) else []) def get_core(self): if (self.glucose and (self.status == False)): return pysolvers.glucose41_core(self.glucose) def get_proof(self): if (self.glucose and self.prfile): self.prfile.seek(0) return [line.rstrip().decode('ascii') for line in self.prfile.readlines()] def time(self): if self.glucose: return self.call_time def time_accum(self): if self.glucose: return self.accu_time def nof_vars(self): if self.glucose: return pysolvers.glucose41_nof_vars(self.glucose) def nof_clauses(self): if self.glucose: return pysolvers.glucose41_nof_cls(self.glucose) def accum_stats(self): if self.glucose: return pysolvers.glucose41_acc_stats(self.glucose) def enum_models(self, assumptions=[]): if self.glucose: done = False while (not done): self.status = self.solve(assumptions=assumptions) model = self.get_model() if (model is not None): self.add_clause([(- l) for l in model]) (yield model) else: done = True def add_clause(self, clause, no_return=True): if self.glucose: res = pysolvers.glucose41_add_cl(self.glucose, clause) if (res == False): self.status = False if (not no_return): return res def add_atmost(self, lits, k, no_return=True): raise NotImplementedError('Atmost constraints are not supported by Glucose.') def add_xor_clause(self, lits, value=True): raise NotImplementedError('XOR clauses are supported only by CryptoMinisat') def append_formula(self, formula, no_return=True): if self.glucose: res = None if ((type(formula) == CNFPlus) and formula.atmosts): raise NotImplementedError('Atmost constraints are not supported by Glucose4') for clause in formula: res = self.add_clause(clause, no_return) if ((not no_return) and (res == False)): return res if (not no_return): return res def supports_atmost(self): return False
class TestWithCExtension(): def _simulate_package_with_extension(self, tmp_path): files = ['benchmarks/file.py', 'docs/Makefile', 'docs/requirements.txt', 'docs/source/conf.py', 'proj/header.h', 'proj/file.py', 'py/proj.cpp', 'py/other.cpp', 'py/file.py', 'py/py.typed', 'py/tests/test_proj.py', 'README.rst'] _populate_project_dir(tmp_path, files, {}) setup_script = '\n from setuptools import Extension, setup\n\n ext_modules = [\n Extension(\n "proj",\n ["py/proj.cpp", "py/other.cpp"],\n include_dirs=["."],\n language="c++",\n ),\n ]\n setup(ext_modules=ext_modules)\n ' (tmp_path / 'setup.py').write_text(DALS(setup_script)) def test_skip_discovery_with_setupcfg_metadata(self, tmp_path): self._simulate_package_with_extension(tmp_path) pyproject = "\n [build-system]\n requires = []\n build-backend = 'setuptools.build_meta'\n " (tmp_path / 'pyproject.toml').write_text(DALS(pyproject)) setupcfg = '\n [metadata]\n name = proj\n version = 42\n ' (tmp_path / 'setup.cfg').write_text(DALS(setupcfg)) dist = _get_dist(tmp_path, {}) assert (dist.get_name() == 'proj') assert (dist.get_version() == '42') assert (dist.py_modules is None) assert (dist.packages is None) assert (len(dist.ext_modules) == 1) assert (dist.ext_modules[0].name == 'proj') def test_dont_skip_discovery_with_pyproject_metadata(self, tmp_path): self._simulate_package_with_extension(tmp_path) pyproject = "\n [project]\n name = 'proj'\n version = '42'\n " (tmp_path / 'pyproject.toml').write_text(DALS(pyproject)) with pytest.raises(PackageDiscoveryError, match='multiple (packages|modules)'): _get_dist(tmp_path, {})
def test_checkpoint_hook(tmp_path): loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner('EpochBasedRunner', max_epochs=1) runner.meta = dict() checkpointhook = CheckpointHook(interval=1, by_epoch=True) runner.register_hook(checkpointhook) runner.run([loader], [('train', 1)]) assert (runner.meta['hook_msgs']['last_ckpt'] == osp.join(runner.work_dir, 'epoch_1.pth')) shutil.rmtree(runner.work_dir) runner = _build_demo_runner('EpochBasedRunner', max_epochs=4) runner.meta = dict() out_dir = 's3://user/data' with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile: checkpointhook = CheckpointHook(interval=1, out_dir=out_dir, by_epoch=True, max_keep_ckpts=2) runner.register_hook(checkpointhook) runner.run([loader], [('train', 1)]) basename = osp.basename(runner.work_dir.rstrip(osp.sep)) assert (runner.meta['hook_msgs']['last_ckpt'] == '/'.join([out_dir, basename, 'epoch_4.pth'])) mock_put.assert_called() mock_remove.assert_called() mock_isfile.assert_called() shutil.rmtree(runner.work_dir) runner = _build_demo_runner('IterBasedRunner', max_iters=1, max_epochs=None) runner.meta = dict() checkpointhook = CheckpointHook(interval=1, by_epoch=False) runner.register_hook(checkpointhook) runner.run([loader], [('train', 1)]) assert (runner.meta['hook_msgs']['last_ckpt'] == osp.join(runner.work_dir, 'iter_1.pth')) shutil.rmtree(runner.work_dir) runner = _build_demo_runner('IterBasedRunner', max_iters=4, max_epochs=None) runner.meta = dict() out_dir = 's3://user/data' with patch.object(PetrelBackend, 'put') as mock_put, patch.object(PetrelBackend, 'remove') as mock_remove, patch.object(PetrelBackend, 'isfile') as mock_isfile: checkpointhook = CheckpointHook(interval=1, out_dir=out_dir, by_epoch=False, max_keep_ckpts=2) runner.register_hook(checkpointhook) runner.run([loader], [('train', 1)]) basename = osp.basename(runner.work_dir.rstrip(osp.sep)) assert (runner.meta['hook_msgs']['last_ckpt'] == '/'.join([out_dir, basename, 'iter_4.pth'])) mock_put.assert_called() mock_remove.assert_called() mock_isfile.assert_called() shutil.rmtree(runner.work_dir)
class E(object): def __init__(self, begin, end, fmt, dummy=False): self.advance = 1 if dummy: self.advance = 0 self.position = (begin - 1) if (end is not None): self.length = ((end - begin) + 1) else: self.length = None self.end = end if isinstance(fmt, str): t = fmt[0] if (t in 'ef'): self.parse = float_or_none self.string = float_to_string(fmt) ln = int(fmt[1:].split('.')[0]) self.help_type = 'float' elif (t == 'a'): self.parse = deserialize_string(fmt) self.string = serialize_string(fmt) ln = int(fmt[1:].rstrip('+?')) self.help_type = 'string' elif (t == 'i'): self.parse = int_or_none self.string = int_to_string(fmt) ln = int(fmt[1:]) self.help_type = 'integer' else: assert False, ('invalid format: %s' % t) assert ((self.length is None) or (ln == self.length)), ('inconsistent length for pos=%i, fmt=%s' % (self.position, fmt)) else: (self.parse, self.string) = fmt() self.help_type = fmt.help_type
def resnet_v1_200(inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None, spatial_squeeze=True, reuse=None, scope='resnet_v1_200'): blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2), resnet_v1_block('block2', base_depth=128, num_units=24, stride=2), resnet_v1_block('block3', base_depth=256, num_units=36, stride=2), resnet_v1_block('block4', base_depth=512, num_units=3, stride=1)] return resnet_v1(inputs, blocks, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, include_root_block=True, spatial_squeeze=spatial_squeeze, reuse=reuse, scope=scope)
def test_QobjEvo_isherm_flag_knowcase(): assert (QobjEvo(sigmax())(0)._isherm is True) non_hermitian = (sigmax() + 1j) non_hermitian.isherm assert (QobjEvo(non_hermitian)(0)._isherm is False) assert (QobjEvo([sigmax(), sigmaz()])(0)._isherm is True) assert (QobjEvo([sigmax(), 't'])(0)._isherm is True) assert (QobjEvo([sigmax(), '1j'])(0)._isherm is None) assert (QobjEvo([[sigmax(), 't'], [sigmaz(), '1']])(0)._isherm is True) assert (QobjEvo([[sigmax(), 't'], [sigmaz(), '1j']])(0)._isherm is None)
def test_042_parseModifier_nonstd(): def report(mod_group): return Metar.Metar((sta_time + mod_group)) assert (report('RTD').mod == 'RTD') assert (report('TEST').mod == 'TEST') assert (report('CCA').mod == 'CCA') assert (report('CCB').mod == 'CCB') assert (report('CCC').mod == 'CCC') assert (report('CCD').mod == 'CCD') assert (report('CCE').mod == 'CCE') assert (report('CCF').mod == 'CCF') assert (report('CCG').mod == 'CCG') assert (report('CORR').mod == 'COR') assert (report('FINO').mod == 'NO DATA') assert (report('NIL').mod == 'NO DATA')
def _get_satellite_unit_vector_z(attitude, orbit): v1950 = _get_satellite_z_axis_1950(attitude.angle_between_sat_spin_and_z_axis, attitude.angle_between_sat_spin_and_yz_plane) vcorr = _correct_nutation_precession(v1950, orbit.nutation_precession) return _rotate_to_greenwich(vcorr, orbit.angles.greenwich_sidereal_time)
def load_archive_file(archive_file): try: resolved_archive_file = cached_path(archive_file, cache_dir=None) except EnvironmentError: print("Archive name '{}' was not found in archive name list. We assumed '{}' was a path or URL but couldn't find any file associated to this path or URL.".format(archive_file, archive_file)) return None if (resolved_archive_file == archive_file): print('loading archive file {}'.format(archive_file)) else: print('loading archive file {} from cache at {}'.format(archive_file, resolved_archive_file)) tempdir = None if (not os.path.isdir(resolved_archive_file)): tempdir = tempfile.mkdtemp() print('extracting archive file {} to temp dir {}'.format(resolved_archive_file, tempdir)) ext = os.path.splitext(archive_file)[1][1:] with tarfile.open(resolved_archive_file, ('r:' + ext)) as archive: top_dir = os.path.commonprefix(archive.getnames()) archive.extractall(tempdir) os.remove(resolved_archive_file) shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file) shutil.rmtree(tempdir) return resolved_archive_file
class AxialImageTransformer(nn.Module): def __init__(self, dim, depth, heads=8, dim_heads=None, dim_index=1, reversible=True, axial_pos_emb_shape=None): super().__init__() permutations = calculate_permutations(2, dim_index) get_ff = (lambda : nn.Sequential(ChanLayerNorm(dim), nn.Conv2d(dim, (dim * 4), 3, padding=1), nn.LeakyReLU(inplace=True), nn.Conv2d((dim * 4), dim, 3, padding=1))) self.pos_emb = (AxialPositionalEmbedding(dim, axial_pos_emb_shape, dim_index) if exists(axial_pos_emb_shape) else nn.Identity()) layers = nn.ModuleList([]) for _ in range(depth): attn_functions = nn.ModuleList([PermuteToFrom(permutation, PreNorm(dim, SelfAttention(dim, heads, dim_heads))) for permutation in permutations]) conv_functions = nn.ModuleList([get_ff(), get_ff()]) layers.append(attn_functions) layers.append(conv_functions) execute_type = (ReversibleSequence if reversible else Sequential) self.layers = execute_type(layers) def forward(self, x): x = self.pos_emb(x) return self.layers(x)
def chamfer(query, target_feature, comparator=False): query = torch.Tensor(query).cuda() target_feature = torch.Tensor(target_feature).cuda() simmatrix = torch.einsum('ik,jk->ij', [query, target_feature]) if comparator: simmatrix = comparator(simmatrix).detach() sim = (simmatrix.max(dim=1)[0].sum().cpu().item() / simmatrix.shape[0]) return sim
def test_func(model_f, y_label, X_test_f): y_pred = [] y_label = th.Tensor(y_label) print('Testing:') print('') with tqdm(range(0, len(X_test_f), 1)) as tepoch: for i in tepoch: with th.no_grad(): x = [0, 0] x[0] = X_test_f[i][0].to(device) x[1] = X_test_f[i][1].to(device) y_pred.append(model_f(x).cpu()) y_pred = th.cat(y_pred, dim=0) y_pred_c = [round(i.item()) for i in y_pred] roce1 = get_roce(y_pred, y_label, 0.5) roce2 = get_roce(y_pred, y_label, 1) roce3 = get_roce(y_pred, y_label, 2) roce4 = get_roce(y_pred, y_label, 5) print(('AUROC: ' + str(roc_auc_score(y_label, y_pred))), end=' ') print(('PRAUC: ' + str(average_precision_score(y_label, y_pred))), end=' ') print(('F1 Score: ' + str(f1_score(y_label, y_pred_c))), end=' ') print(('Precision Score:' + str(precision_score(y_label, y_pred_c))), end=' ') print(('Recall Score:' + str(recall_score(y_label, y_pred_c))), end=' ') print(('Balanced Accuracy Score ' + str(balanced_accuracy_score(y_label, y_pred_c))), end=' ') print(('0.5 re Score ' + str(roce1)), end=' ') print(('1 re Score ' + str(roce2)), end=' ') print(('2 re Score ' + str(roce3)), end=' ') print(('5 re Score ' + str(roce4)), end=' ') print('')
class DeliverSM(SubmitSM): params = {'service_type': Param(type=str, max=6), 'source_addr_ton': Param(type=int, size=1), 'source_addr_npi': Param(type=int, size=1), 'source_addr': Param(type=str, max=21), 'dest_addr_ton': Param(type=int, size=1), 'dest_addr_npi': Param(type=int, size=1), 'destination_addr': Param(type=str, max=21), 'esm_class': Param(type=int, size=1), 'protocol_id': Param(type=int, size=1), 'priority_flag': Param(type=int, size=1), 'schedule_delivery_time': Param(type=str, max=17), 'validity_period': Param(type=str, max=17), 'registered_delivery': Param(type=int, size=1), 'replace_if_present_flag': Param(type=int, size=1), 'data_coding': Param(type=int, size=1), 'sm_default_msg_id': Param(type=int, size=1), 'sm_length': Param(type=int, size=1), 'short_message': Param(type=ostr, max=254, len_field='sm_length'), 'user_message_reference': Param(type=int, size=2), 'source_port': Param(type=int, size=2), 'destination_port': Param(type=int, size=2), 'sar_msg_ref_num': Param(type=int, size=2), 'sar_total_segments': Param(type=int, size=1), 'sar_segment_seqnum': Param(type=int, size=1), 'user_response_code': Param(type=int, size=1), 'privacy_indicator': Param(type=int, size=1), 'payload_type': Param(type=int, size=1), 'message_payload': Param(type=ostr, max=260), 'callback_num': Param(type=ostr, min=4, max=19), 'source_subaddress': Param(type=str, min=2, max=23), 'dest_subaddress': Param(type=str, min=2, max=23), 'language_indicator': Param(type=int, size=1), 'its_session_info': Param(type=int, size=2), 'network_error_code': Param(type=ostr, size=3), 'message_state': Param(type=int, size=1), 'receipted_message_id': Param(type=str, max=65), 'source_network_type': Param(type=int, size=1), 'dest_network_type': Param(type=int, size=1), 'more_messages_to_send': Param(type=int, size=1)} params_order = ('service_type', 'source_addr_ton', 'source_addr_npi', 'source_addr', 'dest_addr_ton', 'dest_addr_npi', 'destination_addr', 'esm_class', 'protocol_id', 'priority_flag', 'schedule_delivery_time', 'validity_period', 'registered_delivery', 'replace_if_present_flag', 'data_coding', 'sm_default_msg_id', 'sm_length', 'short_message', 'user_message_reference', 'source_port', 'destination_port', 'sar_msg_ref_num', 'sar_total_segments', 'sar_segment_seqnum', 'user_response_code', 'privacy_indicator', 'payload_type', 'message_payload', 'callback_num', 'source_subaddress', 'dest_subaddress', 'language_indicator', 'its_session_info', 'network_error_code', 'message_state', 'receipted_message_id', 'source_network_type', 'dest_network_type', 'more_messages_to_send') def __init__(self, command, **kwargs): super(DeliverSM, self).__init__(command, **kwargs) self._set_vars(**dict.fromkeys(self.params))
class LossMixin(): def _process_y(self, X, y, sample_weight=None, copy=True, check_input=True): loss_config = get_base_config(get_loss_config(self.loss)) if (loss_config.name in ['lin_reg', 'huber']): return process_y_lin_reg(X=X, y=y, standardize=self.standardize, fit_intercept=self.fit_intercept, sample_weight=sample_weight, copy=copy, check_input=check_input) elif (loss_config.name in ['quantile', 'smoothed_quantile']): return process_y_lin_reg(X=X, y=y, sample_weight=sample_weight, standardize=False, copy=copy, check_input=check_input) elif (loss_config.name == 'poisson'): return process_y_poisson(X=X, y=y, sample_weight=sample_weight, copy=copy, check_input=check_input) elif (loss_config.name == 'log_reg'): return process_y_log_reg(X=X, y=y, sample_weight=sample_weight, class_weight=loss_config.class_weight, copy=copy, check_input=check_input) elif (loss_config.name == 'multinomial'): return process_y_multinomial(X=X, y=y, sample_weight=sample_weight, class_weight=loss_config.class_weight, copy=copy, check_input=check_input) elif (loss_config.name in ['hinge', 'huberized_hinge', 'logistic_hinge']): return process_y_hinge(X=X, y=y, sample_weight=sample_weight, copy=copy, check_input=check_input) def predict(self, X, offsets=None): if (self._estimator_type == 'regressor'): return self.predict_expected(X, offsets=offsets) elif (self._estimator_type == 'classifier'): scores = self.decision_function(X, offsets=offsets) if (len(scores.shape) == 1): indices = (scores > 0).astype(int) else: indices = scores.argmax(axis=1) return self.classes_[indices] def score(self, X, y, sample_weight=None, offsets=None): y_pred = self.predict(X, offsets=offsets) loss_config = get_base_config(get_loss_config(self.loss)) if (loss_config.name in ['lin_reg', 'huber', 'quantile', 'poisson', 'smoothed_quantile']): return r2_score(y_true=y, y_pred=y_pred, sample_weight=sample_weight) elif (loss_config.name == 'poisson'): return poisson_dsq_score(y_true=y, y_pred=y_pred, sample_weight=sample_weight) elif (loss_config.name in ['log_reg', 'multinomial', 'hinge', 'huberized_hinge', 'logistic_hinge']): return accuracy_score(y_true=y, y_pred=y_pred, sample_weight=sample_weight) def predict_proba(self, X, offsets=None): loss_config = get_base_config(get_loss_config(self.loss)) if (loss_config.name not in ['log_reg', 'multinomial']): raise ValueError('{} does not support predict_proba'.format(loss_config.name)) return self.predict_expected(X, offsets=offsets) def predict_log_proba(self, X, offsets=None): return np.log(self.predict_proba(X, offsets=offsets)) def predict_expected(self, X, offsets=None): check_is_fitted(self) z = self.decision_function(X, offsets=offsets) loss_config = get_base_config(get_loss_config(self.loss)) if (loss_config.name in ['lin_reg', 'huber', 'quantile', 'l2', 'smoothed_quantile']): return z elif (loss_config.name == 'poisson'): return np.exp(z) elif (loss_config.name == 'log_reg'): return expit(z) elif (loss_config.name == 'multinomial'): return softmax(z) else: raise NotImplementedError def sample_log_liks(self, X, y, offsets=None): loss_config = get_base_config(get_loss_config(self.loss)) y_pred = self.predict_expected(X, offsets=offsets) if (loss_config.name == 'lin_reg'): if (hasattr(self, 'inferencer_') and hasattr(self.inferencer_, 'scale_') and (self.inferencer_.scale_ is not None)): scale = self.inferencer_.scale_ else: raise RuntimeError('No sigma estimate was found; linear regression requires a noise standard deviation estimate to compute the likelihood.') return gaussian(y_pred=y_pred, y_true=y, scale=scale) elif (loss_config.name == 'poisson'): return poisson(y_pred=y_pred, y_true=y) elif (loss_config.name == 'log_reg'): y_true = self.label_encoder_.transform(y) return bernoulli(y_pred=y_pred, y_true=y_true) elif (loss_config.name == 'multinomial'): y_true_idxs = lb_transform_to_indices(lb=self.label_binarizer_, y=y) return multinomial(y_pred=y_pred, y_true=y_true_idxs) else: raise NotImplementedError('Cannot compute sample log liklihoodsfor {}.'.format(loss_config.name))
class TestSubschemaLDIF(unittest.TestCase): def test_subschema_file(self): for test_file in TEST_SUBSCHEMA_FILES: with open(test_file, 'rb') as ldif_file: ldif_parser = ldif.LDIFRecordList(ldif_file, max_entries=1) ldif_parser.parse() (_, subschema_subentry) = ldif_parser.all_records[0] sub_schema = ldap.schema.SubSchema(subschema_subentry) for objclass in sub_schema.listall(ObjectClass): (must, may) = sub_schema.attribute_types([objclass]) for (oid, attributetype) in must.items(): self.assertEqual(attributetype.oid, oid) for (oid, attributetype) in may.items(): self.assertEqual(attributetype.oid, oid)
def truncated_cifar10(nperclass, nperclassvalid, args): ((xtrain, ytrain), (xvalid, yvalid)) = cifar10.load_data() (ytrain, yvalid) = (np.squeeze(ytrain), np.squeeze(yvalid)) (inputs, labels) = ([], []) counts = [0 for _ in range(10)] for (x, y) in zip(xtrain, ytrain): if all([(count == nperclass) for count in counts]): break if (counts[y] < nperclass): inputs.append(x) labels.append(y) counts[y] += 1 order = sorted(range(len(labels)), key=(lambda i: labels[i])) (xtrain, ytrain) = (np.array(inputs, dtype='float32'), np.array(labels)) inputs = [inputs[o] for o in order] labels = [labels[o] for o in order] (xtrain, ytrain) = (np.array(inputs, dtype='float32'), np.array(labels)) if args.multiclasspoison: xtrain = xtrain.reshape(10, nperclass, *xtrain.shape[1:]).transpose(1, 0, 2, 3, 4).reshape(*xtrain.shape) ytrain = ytrain.reshape(10, nperclass).T.reshape(ytrain.shape) args.poisonclass = 0 (inputs, labels) = ([], []) counts = [0 for _ in range(10)] for (x, y) in zip(xvalid, yvalid): if all([(count == nperclassvalid) for count in counts]): break if (counts[y] < nperclassvalid): inputs.append(x) labels.append(y) counts[y] += 1 order = sorted(range(len(labels)), key=(lambda i: labels[i])) (xvalid, yvalid) = (np.array(inputs, dtype='float32'), np.array(labels)) inputs = [inputs[o] for o in order] labels = [labels[o] for o in order] (xvalid, yvalid) = (np.array(inputs, dtype='float32'), np.array(labels)) return ((xtrain, ytrain), (xvalid, yvalid))
def test_smooth() -> None: instance = printer.Dummy() instance.set_with_default(smooth=True) expected_sequence = (TXT_NORMAL, TXT_STYLE['size']['normal'], TXT_STYLE['flip'][False], TXT_STYLE['smooth'][True], TXT_STYLE['bold'][False], TXT_STYLE['underline'][0], SET_FONT(b'\x00'), TXT_STYLE['align']['left'], TXT_STYLE['invert'][False]) assert (instance.output == b''.join(expected_sequence))
def test_strip_examples(mocker): p = asyncio.run(get_device_for_file('KP303(UK)_1.0_1.0.3.json', 'IOT')) mocker.patch('kasa.smartstrip.SmartStrip', return_value=p) mocker.patch('kasa.smartstrip.SmartStrip.update') res = xdoctest.doctest_module('kasa.smartstrip', 'all') assert (not res['failed'])
class GroundStateEigensolver(GroundStateSolver): def __init__(self, transformation: Transformation, solver: Union[(MinimumEigensolver, MinimumEigensolverFactory)]) -> None: super().__init__(transformation) self._solver = solver def solver(self) -> Union[(MinimumEigensolver, MinimumEigensolverFactory)]: return self._solver def solver(self, solver: Union[(MinimumEigensolver, MinimumEigensolverFactory)]) -> None: self._solver = solver def returns_groundstate(self) -> bool: return self._solver.supports_aux_operators() def solve(self, driver: BaseDriver, aux_operators: Optional[Union[(List[FermionicOperator], List[BosonicOperator])]]=None) -> Union[(ElectronicStructureResult, VibronicStructureResult)]: (operator, aux_ops) = self.transformation.transform(driver, aux_operators) if isinstance(self._solver, MinimumEigensolverFactory): solver = self._solver.get_solver(self.transformation) else: solver = self._solver if (not solver.supports_aux_operators()): aux_ops = None raw_mes_result = solver.compute_minimum_eigenvalue(operator, aux_ops) result = self.transformation.interpret(raw_mes_result) return result def evaluate_operators(self, state: Union[(str, dict, Result, list, np.ndarray, Statevector, QuantumCircuit, Instruction, OperatorBase)], operators: Union[(WeightedPauliOperator, OperatorBase, list, dict)]) -> Union[(Optional[float], List[Optional[float]], Dict[(str, List[Optional[float]])])]: quantum_instance = getattr(self._solver, 'quantum_instance', None) if (not isinstance(state, StateFn)): state = StateFn(state) if isinstance(operators, list): results = [] for op in operators: if (op is None): results.append(None) else: results.append(self._eval_op(state, op, quantum_instance)) elif isinstance(operators, dict): results = {} for (name, op) in operators.items(): if (op is None): results[name] = None else: results[name] = self._eval_op(state, op, quantum_instance) elif (operators is None): results = None else: results = self._eval_op(state, operators, quantum_instance) return results def _eval_op(self, state, op, quantum_instance): if isinstance(op, WeightedPauliOperator): op = op.to_opflow() if (op == 0): return [0j] exp = ((~ StateFn(op)) state) if (quantum_instance is not None): try: sampler = CircuitSampler(quantum_instance) result = sampler.convert(exp).eval() except ValueError: result = exp.eval() else: result = exp.eval() return [result]
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--save_path', default='/apdcephfs/share_1316500/donchaoyang/code3/SpecVQGAN/vocoder_audioset/logs/audioset') parser.add_argument('--load_path', default='/apdcephfs/share_1316500/donchaoyang/code3/SpecVQGAN/vocoder_audioset/logs/audioset') parser.add_argument('--n_mel_channels', type=int, default=80) parser.add_argument('--ngf', type=int, default=32) parser.add_argument('--n_residual_layers', type=int, default=3) parser.add_argument('--ndf', type=int, default=16) parser.add_argument('--num_D', type=int, default=3) parser.add_argument('--n_layers_D', type=int, default=4) parser.add_argument('--downsamp_factor', type=int, default=4) parser.add_argument('--lambda_feat', type=float, default=10) parser.add_argument('--cond_disc', action='store_true') parser.add_argument('--data_path', default='/apdcephfs/share_1316500/donchaoyang/data/audioset/features', type=Path) parser.add_argument('--splits_path', default='./data', type=Path) parser.add_argument('--batch_size', type=int, default=256) parser.add_argument('--seq_len', type=int, default=8192) parser.add_argument('--epochs', type=int, default=2000) parser.add_argument('--log_interval', type=int, default=100) parser.add_argument('--save_interval', type=int, default=3000) parser.add_argument('--n_test_samples', type=int, default=8) args = parser.parse_args() return args
def fast_encode(texts, tokenizer, chunk_size=256, maxlen=512, enable_padding=False): tokenizer.enable_truncation(max_length=maxlen) if enable_padding: tokenizer.enable_padding(max_length=maxlen) all_ids = [] for i in tqdm(range(0, len(texts), chunk_size)): text_chunk = texts[i:(i + chunk_size)].tolist() encs = tokenizer.encode_batch(text_chunk) all_ids.extend([enc.ids for enc in encs]) return np.array(all_ids)
def load_model(): base_model = InceptionV3(include_top=False, weights='imagenet', input_shape=IMSIZE) for layer in base_model.layers: layer.trainable = False x = base_model.output x = Flatten()(x) predictions = Dense(N_CLASSES, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) print(model.summary()) sgd = SGD(lr=0.001, decay=1e-06, momentum=0.5) model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) return model
def makeCfdSolverFoam(name='OpenFOAM'): obj = FreeCAD.ActiveDocument.addObject('Fem::FemSolverObjectPython', name) CfdSolverFoam(obj) if FreeCAD.GuiUp: from cfdguiobjects._ViewProviderCfdSolverFoam import _ViewProviderCfdSolverFoam _ViewProviderCfdSolverFoam(obj.ViewObject) return obj
def render_frames(frames, prediction): rendered_frames = [] for frame in frames: img = np.array(frame) (height, width, _) = img.shape cv2.putText(img, prediction, (1, int((height / 8))), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) rendered_frames.append(img) return rendered_frames
class YGate(Bloq): _property def signature(self) -> 'Signature': return Signature.build(q=1) def add_my_tensors(self, tn: qtn.TensorNetwork, tag: Any, *, incoming: Dict[(str, SoquetT)], outgoing: Dict[(str, SoquetT)]): tn.add(qtn.Tensor(data=_PAULIY, inds=(outgoing['q'], incoming['q']), tags=[self.short_name(), tag])) def as_cirq_op(self, qubit_manager: 'cirq.QubitManager', q: 'CirqQuregT') -> Tuple[('cirq.Operation', Dict[(str, 'CirqQuregT')])]: import cirq (q,) = q return (cirq.Y(q), {'q': [q]})
def gdrive_download(id='1n_oKgR81BJtqk75b00eAjdv03qVCQn2f', name='coco128.zip'): t = time.time() print(('Downloading as %s... ' % (id, name)), end='') (os.remove(name) if os.path.exists(name) else None) (os.remove('cookie') if os.path.exists('cookie') else None) out = ('NUL' if (platform.system() == 'Windows') else '/dev/null') os.system(('curl -c ./cookie -s -L "drive.google.com/uc?export=download&id=%s" > %s ' % (id, out))) if os.path.exists('cookie'): s = ('curl -Lb ./cookie "drive.google.com/uc?export=download&confirm=%s&id=%s" -o %s' % (get_token(), id, name)) else: s = ('curl -s -L -o %s "drive.google.com/uc?export=download&id=%s"' % (name, id)) r = os.system(s) (os.remove('cookie') if os.path.exists('cookie') else None) if (r != 0): (os.remove(name) if os.path.exists(name) else None) print('Download error ') return r if name.endswith('.zip'): print('unzipping... ', end='') os.system(('unzip -q %s' % name)) os.remove(name) print(('Done (%.1fs)' % (time.time() - t))) return r
class SmartProtocol(TPLinkProtocol): SLEEP_SECONDS_AFTER_TIMEOUT = 1 def __init__(self, *, transport: BaseTransport) -> None: super().__init__(transport=transport) self._terminal_uuid: str = base64.b64encode(md5(uuid.uuid4().bytes)).decode() self._request_id_generator = SnowflakeId(1, 1) self._query_lock = asyncio.Lock() def get_smart_request(self, method, params=None) -> str: request = {'method': method, 'params': params, 'requestID': self._request_id_generator.generate_id(), 'request_time_milis': round((time.time() * 1000)), 'terminal_uuid': self._terminal_uuid} return json_dumps(request) async def query(self, request: Union[(str, Dict)], retry_count: int=3) -> Dict: async with self._query_lock: return (await self._query(request, retry_count)) async def _query(self, request: Union[(str, Dict)], retry_count: int=3) -> Dict: for retry in range((retry_count + 1)): try: return (await self._execute_query(request, retry)) except as sdex: if (retry >= retry_count): (await self.close()) _LOGGER.debug('Giving up on %s after %s retries', self._host, retry) raise SmartDeviceException(f'Unable to connect to the device: {self._host}: {sdex}') from sdex continue except TimeoutError as tex: if (retry >= retry_count): (await self.close()) raise SmartDeviceException(('Unable to connect to the device, ' + f'timed out: {self._host}: {tex}')) from tex (await asyncio.sleep(self.SLEEP_SECONDS_AFTER_TIMEOUT)) continue except AuthenticationException as auex: (await self.close()) _LOGGER.debug('Unable to authenticate with %s, not retrying', self._host) raise auex except RetryableException as ex: if (retry >= retry_count): (await self.close()) _LOGGER.debug('Giving up on %s after %s retries', self._host, retry) raise ex continue except TimeoutException as ex: if (retry >= retry_count): (await self.close()) _LOGGER.debug('Giving up on %s after %s retries', self._host, retry) raise ex (await asyncio.sleep(self.SLEEP_SECONDS_AFTER_TIMEOUT)) continue except SmartDeviceException as ex: (await self.close()) _LOGGER.debug('Giving up on %s after %s retries', self._host, retry) raise ex except Exception as ex: if (retry >= retry_count): (await self.close()) _LOGGER.debug('Giving up on %s after %s retries', self._host, retry) raise SmartDeviceException(f'Unable to connect to the device: {self._host}: {ex}') from ex _LOGGER.debug('Unable to query the device %s, retrying: %s', self._host, ex) continue raise SmartDeviceException('Query reached somehow to unreachable') async def _execute_query(self, request: Union[(str, Dict)], retry_count: int) -> Dict: if isinstance(request, dict): if (len(request) == 1): smart_method = next(iter(request)) smart_params = request[smart_method] else: requests = [] for (method, params) in request.items(): requests.append({'method': method, 'params': params}) smart_method = 'multipleRequest' smart_params = {'requests': requests} else: smart_method = request smart_params = None smart_request = self.get_smart_request(smart_method, smart_params) _LOGGER.debug('%s >> %s', self._host, (_LOGGER.isEnabledFor(logging.DEBUG) and pf(smart_request))) response_data = (await self._transport.send(smart_request)) _LOGGER.debug('%s << %s', self._host, (_LOGGER.isEnabledFor(logging.DEBUG) and pf(response_data))) self._handle_response_error_code(response_data) if ((result := response_data.get('result')) is None): return {smart_method: None} if ((responses := result.get('responses')) is None): return {smart_method: result} multi_result = {} for response in responses: self._handle_response_error_code(response) result = response.get('result', None) multi_result[response['method']] = result return multi_result def _handle_response_error_code(self, resp_dict: dict): error_code = SmartErrorCode(resp_dict.get('error_code')) if (error_code == SmartErrorCode.SUCCESS): return msg = (f'Error querying device: {self._host}: ' + f'{error_code.name}({error_code.value})') if (method := resp_dict.get('method')): msg += f' for method: {method}' if (error_code in SMART_TIMEOUT_ERRORS): raise TimeoutException(msg, error_code=error_code) if (error_code in SMART_RETRYABLE_ERRORS): raise RetryableException(msg, error_code=error_code) if (error_code in SMART_AUTHENTICATION_ERRORS): raise AuthenticationException(msg, error_code=error_code) raise SmartDeviceException(msg, error_code=error_code) async def close(self) -> None: (await self._transport.close())
class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() norm_f = (weight_norm if (use_spectral_norm == False) else spectral_norm) self.convs = nn.ModuleList([norm_f(Conv1d(1, 128, 15, 1, padding=7)), norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), norm_f(Conv1d(1024, 1024, 5, 1, padding=2))]) self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) def forward(self, x): fmap = [] for l in self.convs: x = l(x) x = F.leaky_relu(x, LRELU_SLOPE) fmap.append(x) x = self.conv_post(x) fmap.append(x) x = torch.flatten(x, 1, (- 1)) return (x, fmap)
def test_get_solarposition_no_kwargs(expected_solpos, golden): times = pd.date_range(datetime.datetime(2003, 10, 17, 13, 30, 30), periods=1, freq='D', tz=golden.tz) ephem_data = solarposition.get_solarposition(times, golden.latitude, golden.longitude) expected_solpos.index = times expected_solpos = np.round(expected_solpos, 2) ephem_data = np.round(ephem_data, 2) assert_frame_equal(expected_solpos, ephem_data[expected_solpos.columns])
def test_solver_chooses_most_recent_version_amongst_repositories(package: ProjectPackage, io: NullIO) -> None: package.python_versions = '^3.7' package.add_dependency(Factory.create_dependency('tomlkit', {'version': '^0.5'})) repo = MockLegacyRepository() pool = RepositoryPool([repo, MockPyPIRepository()]) solver = Solver(package, pool, [], [], io) transaction = solver.solve() ops = check_solver_result(transaction, [{'job': 'install', 'package': get_package('tomlkit', '0.5.3')}]) assert (ops[0].package.source_type is None) assert (ops[0].package.source_url is None)
def get_ordered_lists_of_conv_fc(model: torch.nn.Module, input_shapes: Tuple, dummy_input: Union[(torch.Tensor, Tuple)]=None) -> List: device = get_device(model) if (dummy_input is None): dummy_input = create_rand_tensors_given_shapes(input_shapes, device) module_list = get_ordered_list_of_modules(model, dummy_input) module_list = [[name, module] for (name, module) in module_list if isinstance(module, (torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Linear, torch.nn.ConvTranspose2d, torch.nn.Conv3d))] return module_list
.skip(reason='unknown') class TestTools(): def checkit(self, p, q, rng): N = (p + q) (layout, blades) = Cl(p, q) A = layout.randomV(n=N, rng=rng) R = (5.0 * layout.randomRotor(rng=rng)) B = [((R * a) * (~ R)) for a in A] (R_found, rs) = of2v(A, B) self.assertTrue(((R == R_found) or (R == (- R_found)))) self.assertTrue(([((R_found * a) * (~ R_found)) for a in A] == B)) def testOrthoFrames2VersorEuclidean(self): for (p, q) in [(2, 0), (3, 0), (4, 0)]: self.checkit(p=p, q=q) .skip(reason='unknown') def testOrthoFrames2VersorMinkowski(self): for (p, q) in [(1, 1), (2, 1), (3, 1)]: self.checkit(p=p, q=q) .skip(reason='unknown') def testOrthoFrames2VersorBalanced(self): for (p, q) in [(2, 2)]: self.checkit(p=p, q=q)
def main(data_dir, client, bc, config): benchmark(read_tables, data_dir, bc, dask_profile=config['dask_profile']) query_1 = '\n SELECT i_item_sk,\n CAST(i_category_id AS TINYINT) AS i_category_id\n FROM item\n ' item_df = bc.sql(query_1) item_df = item_df.persist() wait(item_df) bc.create_table('item_df', item_df) query_2 = '\n SELECT wcs_user_sk,\n (wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec,\n i_category_id\n FROM web_clickstreams wcs, item_df i\n WHERE wcs.wcs_item_sk = i.i_item_sk\n AND i.i_category_id IS NOT NULL\n AND wcs.wcs_user_sk IS NOT NULL\n ORDER BY wcs.wcs_user_sk, tstamp_inSec, i_category_id\n ' merged_df = bc.sql(query_2) bc.drop_table('item_df') del item_df distinct_session_df = merged_df.map_partitions(get_distinct_sessions, keep_cols=['wcs_user_sk', 'i_category_id'], time_out=q30_session_timeout_inSec) del merged_df pair_df = distinct_session_df.map_partitions(get_pairs, pair_col='i_category_id', output_col_1='category_id_1', output_col_2='category_id_2') del distinct_session_df pair_df = pair_df.persist() wait(pair_df) bc.create_table('pair_df', pair_df) last_query = f''' SELECT CAST(category_id_1 AS BIGINT) AS category_id_1, CAST(category_id_2 AS BIGINT) AS category_id_2, COUNT(category_id_2) AS cnt FROM pair_df GROUP BY category_id_1, category_id_2 ORDER BY cnt desc LIMIT {q30_limit} ''' result = bc.sql(last_query) bc.drop_table('pair_df') return result
def test_no_color_env_var_overrides_cli_option(runner, monkeypatch, mock_cli_exec, boxed_context, in_tmp_dir, tmp_path): monkeypatch.setenv('NO_COLOR', '1') touch_files(tmp_path, 'foo.json') runner.invoke(cli_main, ['--color=always', '--schemafile', 'schema.json', 'foo.json']) assert (boxed_context.ref.color is False)
class AvailabilitiesPage(JsonPage): def find_best_slot(self, start_date=None, end_date=None, excluded_weekdays=[]): for a in self.doc['availabilities']: date = parse_date(a['date']).date() if ((start_date and (date < start_date)) or (end_date and (date > end_date))): continue if (date.weekday() in excluded_weekdays): continue if (len(a['slots']) == 0): continue return a['slots'][(- 1)]
def sample_for_query(qid, ranking, args_positives, depth, permissive, biased): (positives, negatives, triples) = ([], [], []) for (pid, rank, *_, label) in ranking: assert (rank >= 1), f'ranks should start at 1 got rank = {rank}' assert (label in [0, 1]) if (rank > depth): break if label: take_this_positive = any((((rank <= maxDepth) and (len(positives) < maxBest)) for (maxBest, maxDepth) in args_positives)) if take_this_positive: positives.append((pid, 0)) elif permissive: positives.append((pid, rank)) else: negatives.append(pid) for (pos, neg_start) in positives: num_sampled = (100 if (neg_start == 0) else 5) negatives_ = negatives[neg_start:] biased_ = (biased if (neg_start == 0) else None) for neg in sample_negatives(negatives_, num_sampled, biased=biased_): triples.append((qid, pos, neg)) return triples
def _treat_X_doc(doc: Optional[str]) -> Optional[str]: if doc: doc = doc.replace('Data to predict with.', 'Data to predict with. Can also be a ``RayDMatrix``.') doc = doc.replace('Feature matrix.', 'Feature matrix. Can also be a ``RayDMatrix``.') doc = doc.replace('Feature matrix', 'Feature matrix. Can also be a ``RayDMatrix``.') return doc
def _create_fileset(fullname, struct, recurse={}): set_path = SetPath(fullname, struct, recurse, struct.get('rec', {})) if (set_path.get_type() == 'directory'): _create_directory(set_path, always_delete=True) for name in struct.get('contents', {}): _multi_create_fileset(fullname, name, struct['contents'][name], set_path.recurse) _finish_directory(set_path) elif set_path.is_hardlinked(): _create_hardlink(set_path) elif (set_path.get_type() == 'link'): _create_symlink(set_path) else: _create_file(set_path)
("Too dangerous to modify 2.0.x's SourceGroups, this test will fail for them") class SourceGroupTestCase(unittest.TestCase): def test_empty(self): group = SourceGroup() self.assertIsNone(group.get_audio_data(2048)) def test_functionality(self): fake_data = ((b'a', 1000, 0.5), (b'b', 40000, 2.0), (b'c', 20000, 4.0), (b'd', 9992, 4.0)) audio_data = [AudioData((b * l), l) for (b, l, _) in fake_data] audio_data[0].timestamp = 1.23 expected_data = b''.join(((d * l) for (d, l, _) in fake_data)) expected_duration = sum((d for (_, _, d) in fake_data)) total_length = len(expected_data) sources = [mock.MagicMock(audio_format=AudioFormat(2, 8, 11025)) for _ in range(4)] exhausted = ([False] * 4) for (i, mock_source) in enumerate(sources): def _get_audio_data(_, j=i): if exhausted[j]: return None exhausted[j] = True return audio_data[j] mock_source.duration = fake_data[i][2] mock_source.get_audio_data.side_effect = _get_audio_data mock_source.get_queue_source.return_value = mock_source group = SourceGroup() for mock_source in sources: group.add(mock_source) ret_data = group.get_audio_data(total_length) self.assertEqual(expected_data, ret_data.data) self.assertAlmostEqual(ret_data.timestamp, 1.23) self.assertAlmostEqual(ret_data.duration, expected_duration) def test_inequal_audio_format(self): source_a = mock.Mock(audio_format=AudioFormat(1, 8, 44100), duration=None) source_b = mock.Mock(audio_format=AudioFormat(2, 16, 44100), duration=None) source_a.get_queue_source.return_value = source_a source_b.get_queue_source.return_value = source_b group = SourceGroup() group.add(source_a) self.assertEqual(group.audio_format, source_a.audio_format) with self.assertRaises(MediaException): group.add(source_b)
def _server_maintenance(): global EVENNIA, _MAINTENANCE_COUNT, _FLUSH_CACHE, _GAMETIME_MODULE if (not _FLUSH_CACHE): from evennia.utils.idmapper.models import conditional_flush as _FLUSH_CACHE if (not _GAMETIME_MODULE): from evennia.utils import gametime as _GAMETIME_MODULE _MAINTENANCE_COUNT += 1 now = time.time() if (_MAINTENANCE_COUNT == 1): _GAMETIME_MODULE.SERVER_START_TIME = now _GAMETIME_MODULE.SERVER_RUNTIME = ServerConfig.objects.conf('runtime', default=0.0) else: _GAMETIME_MODULE.SERVER_RUNTIME += 60.0 _GAMETIME_MODULE.SERVER_RUNTIME_LAST_UPDATED = now ServerConfig.objects.conf('runtime', _GAMETIME_MODULE.SERVER_RUNTIME) if ((_MAINTENANCE_COUNT % 300) == 0): _FLUSH_CACHE(_IDMAPPER_CACHE_MAXSIZE) if ((_MAINTENANCE_COUNT % 3600) == 0): evennia.ScriptDB.objects.validate() if ((_MAINTENANCE_COUNT % 3700) == 0): evennia.CHANNEL_HANDLER.update() if ((_MAINTENANCE_COUNT % (3600 * 7)) == 0): connection.close() if (_IDLE_TIMEOUT > 0): reason = _('idle timeout exceeded') for session in (sess for sess in SESSIONS.values() if ((now - sess.cmd_last) > _IDLE_TIMEOUT)): if ((not session.account) or (not session.account.access(session.account, 'noidletimeout', default=False))): SESSIONS.disconnect(session, reason=reason)
def get_id_fromjson(jsonobject, method=DEFAULT_ID_METHOD): method = os.environ.get('YADAGE_ID_METHOD', method) if (method == 'uuid'): return str(uuid.uuid4()) elif (method == 'jsonhash'): return json_hash(jsonobject) else: raise NotImplementedError('unkown id generation method {}'.format(method))
def render_trailing_newlines(msg, _node, source_lines=None): start_line = (msg.line - 1) (yield from render_context((start_line - 2), start_line, source_lines)) (yield from ((line, slice(None, None), LineType.OTHER, source_lines[(line - 1)]) for line in range(start_line, (len(source_lines) + 1))))
def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None): if (not isinstance(value, torch._six.string_classes)): if (arg is None): msg = 'Expected type str, but got type {type}.' else: msg = 'Expected type str for argument {arg}, but got type {type}.' msg = msg.format(type=type(value), arg=arg) raise ValueError(msg) if (valid_values is None): return value if (value not in valid_values): if (custom_msg is not None): msg = custom_msg else: msg = "Unknown value '{value}' for argument {arg}. Valid values are {{{valid_values}}}." msg = msg.format(value=value, arg=arg, valid_values=iterable_to_str(valid_values)) raise ValueError(msg) return value
def dump_readme(path): readme = "# Pyrocko Earthquake Scenario\n\nThe directory structure of a scenario is layed out as follows:\n\n## Map of the scenario\nA simple map is generated from `pyrocko.automap` in map.pdf\n\n## Earthquake Sources\n\nCan be found as events.txt and sources.yml hosts the pyrocko.gf sources.\n\n## Folder `meta`\n\nContains stations.txt and StationXML files for waveforms as well as KML data.\nThe responses are flat with gain 1.0 at 1.0 Hz.\n\n## Folder `waveforms`\n\nWaveforms as mini-seed are stored here, segregated into days.\n\n## Folder `gnss`\n\nThe GNSS campaign.yml is living here.\nUse `pyrocko.guts.load(filename='campaign.yml)` to load the campaign.\n\n## Folder `insar`\n\nKite InSAR scenes for ascending and descending tracks are stored there.\nUse `kite.Scene.load(<filename>)` to inspect the scenes.\n\n" with open(path, 'w') as f: f.write(readme) return [path]
def test_make_unique_obj_list(): object_list = [type('SomeObjectClass', (object,), {'propertyName': '1'}), type('SomeObjectClass', (object,), {'propertyName': '2'}), type('SomeObjectClass', (object,), {'propertyName': '1'})] value_list = utils.make_unique_obj_list(object_list, (lambda x: x.propertyName)) value_list = list(map((lambda x: x.propertyName), value_list)) value_list.sort() assert (value_list == ['1', '2'])
def main(): in_q = Queue() out_q = Queue() t = threading.Thread(target=worker, args=(in_q, out_q)) t.start() while True: start = time.monotonic() for _ in range(COUNT): in_q.put((lambda : None)) out_q.get() end = time.monotonic() print(f'{(((end - start) / COUNT) * 1000000.0):.2f} s/job')
class ReadOnlyObjectDict(ObjectDictProxy): def __delitem__(self, key): raise NotImplementedError def __delattr__(self, item): raise NotImplementedError def __setitem__(self, key, item): raise NotImplementedError def __setattr__(self, key, value): raise NotImplementedError
def test_channelstate_lockedtransfer_invalid_chainid(): (our_model1, _) = create_model(70) (partner_model1, privkey2) = create_model(100) channel_state = create_channel_from_models(our_model1, partner_model1, privkey2) distributable = channel.get_distributable(channel_state.partner_state, channel_state.our_state) lock_amount = (distributable - 1) lock_expiration = 10 lock_secrethash = sha256(b'test_channelstate_lockedtransfer_overspent').digest() lock = HashTimeLockState(lock_amount, lock_expiration, lock_secrethash) nonce = 1 transferred_amount = 0 receive_lockedtransfer = make_receive_transfer_mediated(channel_state, privkey2, nonce, transferred_amount, lock, chain_id=(UNIT_CHAIN_ID + 1)) (is_valid, _, _) = channel.handle_receive_lockedtransfer(channel_state, receive_lockedtransfer) assert (not is_valid), 'message is invalid because it uses different chain_id than the channel' assert_partner_state(channel_state.our_state, channel_state.partner_state, our_model1) assert_partner_state(channel_state.partner_state, channel_state.our_state, partner_model1)
class EMAImage(DifferentiableImage): def __init__(self, width, height, tensor, decay): super().__init__(width, height) self.tensor = nn.Parameter(tensor) self.register_buffer('biased', torch.zeros_like(tensor)) self.register_buffer('average', torch.zeros_like(tensor)) self.decay = decay self.register_buffer('accum', torch.tensor(1.0)) self.update() _grad() def update(self): if (not self.training): raise RuntimeError('update() should only be called during training') self.accum.mul_(self.decay) self.biased.mul_(self.decay) self.biased.add_(((1 - self.decay) * self.tensor)) self.average.copy_(self.biased) self.average.div_((1 - self.accum)) _grad() def reset(self): if (not self.training): raise RuntimeError('reset() should only be called during training') self.biased.set_(torch.zeros_like(self.biased)) self.average.set_(torch.zeros_like(self.average)) self.accum.set_(torch.ones_like(self.accum)) self.update() def decode_training_tensor(self): return self.decode(self.tensor) def decode_tensor(self): return self.decode(self.average) def decode(self, tensor): raise NotImplementedError
def rgb2lab(c): R = c[0] G = c[1] B = c[2] eps = (216.0 / 24389.0) k = (24389.0 / 27.0) Xr = 0.964221 Yr = 1.0 Zr = 0.825211 r = (R / 255.0) g = (G / 255.0) b = (B / 255.0) if (r <= 0.04045): r = (r / 12) else: r = (((r + 0.055) / 1.055) ** 2.4) if (g <= 0.04045): g = (g / 12) else: g = (((g + 0.055) / 1.055) ** 2.4) if (b <= 0.04045): b = (b / 12) else: b = (((b + 0.055) / 1.055) ** 2.4) X = (((0. * r) + (0. * g)) + (0. * b)) Y = (((0. * r) + (0. * g)) + (0. * b)) Z = (((0. * r) + (0. * g)) + (0. * b)) xr = (X / Xr) yr = (Y / Yr) zr = (Z / Zr) if (xr > eps): fx = (xr ** (1 / 3)) else: fx = (((k * xr) + 16) / 116) if (yr > eps): fy = (yr ** (1 / 3)) else: fy = (((k * yr) + 16) / 116) if (zr > eps): fz = (zr ** (1 / 3)) else: fz = (((k * zr) + 16) / 116) Ls = ((116 * fy) - 16) ass = (500 * (fx - fy)) bs = (200 * (fy - fz)) return (int(((2.55 * Ls) + 0.5)), int((ass + 0.5)), int((bs + 0.5)))
class TestParseLDAPUrl(unittest.TestCase): parse_ldap_url_tests = [('ldap://root.openldap.org/dc=openldap,dc=org', LDAPUrl(hostport='root.openldap.org', dn='dc=openldap,dc=org')), ('ldap://root.openldap.org/dc%3dboolean%2cdc%3dnet???%28objectClass%3d%2a%29', LDAPUrl(hostport='root.openldap.org', dn='dc=boolean,dc=net', filterstr='(objectClass=*)')), ('ldap://root.openldap.org/dc=openldap,dc=org??sub?', LDAPUrl(hostport='root.openldap.org', dn='dc=openldap,dc=org', scope=ldapurl.LDAP_SCOPE_SUBTREE)), ('ldap://root.openldap.org/dc=openldap,dc=org??one?', LDAPUrl(hostport='root.openldap.org', dn='dc=openldap,dc=org', scope=ldapurl.LDAP_SCOPE_ONELEVEL)), ('ldap://root.openldap.org/dc=openldap,dc=org??base?', LDAPUrl(hostport='root.openldap.org', dn='dc=openldap,dc=org', scope=ldapurl.LDAP_SCOPE_BASE)), ('ldap://x500.mh.se/o=Mitthogskolan,c=se????1.2.752.58.10.2=T.61', LDAPUrl(hostport='x500.mh.se', dn='o=Mitthogskolan,c=se', extensions=ldapurl.LDAPUrlExtensions({'1.2.752.58.10.2': ldapurl.LDAPUrlExtension(critical=0, extype='1.2.752.58.10.2', exvalue='T.61')}))), ('ldap://localhost:12345/dc=stroeder,dc=com????!bindname=cn=Michael%2Cdc=stroeder%2Cdc=com,!X-BINDPW=secretpassword', LDAPUrl(hostport='localhost:12345', dn='dc=stroeder,dc=com', extensions=ldapurl.LDAPUrlExtensions({'bindname': ldapurl.LDAPUrlExtension(critical=1, extype='bindname', exvalue='cn=Michael,dc=stroeder,dc=com'), 'X-BINDPW': ldapurl.LDAPUrlExtension(critical=1, extype='X-BINDPW', exvalue='secretpassword')}))), ('ldap://localhost:54321/dc=stroeder,dc=com????bindname=cn=Michael%2Cdc=stroeder%2Cdc=com,X-BINDPW=secretpassword', LDAPUrl(hostport='localhost:54321', dn='dc=stroeder,dc=com', who='cn=Michael,dc=stroeder,dc=com', cred='secretpassword')), ('ldaps://localhost:12345/dc=stroeder,dc=com', LDAPUrl(urlscheme='ldaps', hostport='localhost:12345', dn='dc=stroeder,dc=com')), ('LDAPS://localhost:12345/dc=stroeder,dc=com', LDAPUrl(urlscheme='ldaps', hostport='localhost:12345', dn='dc=stroeder,dc=com')), ('ldaps://localhost:12345/dc=stroeder,dc=com', LDAPUrl(urlscheme='LDAPS', hostport='localhost:12345', dn='dc=stroeder,dc=com')), ('ldapi://%2ftmp%2fopenldap2-1389/dc=stroeder,dc=com', LDAPUrl(urlscheme='ldapi', hostport='/tmp/openldap2-1389', dn='dc=stroeder,dc=com'))] def test_ldapurl(self): for (ldap_url_str, test_ldap_url_obj) in self.parse_ldap_url_tests: ldap_url_obj = LDAPUrl(ldapUrl=ldap_url_str) self.assertEqual(ldap_url_obj, test_ldap_url_obj, 'Attributes of LDAPUrl({}) are:\n{}\ninstead of:\n{}'.format(repr(ldap_url_str), repr(ldap_url_obj), repr(test_ldap_url_obj))) unparsed_ldap_url_str = test_ldap_url_obj.unparse() unparsed_ldap_url_obj = LDAPUrl(ldapUrl=unparsed_ldap_url_str) self.assertEqual(unparsed_ldap_url_obj, test_ldap_url_obj, 'Attributes of LDAPUrl({}) are:\n{}\ninstead of:\n{}'.format(repr(unparsed_ldap_url_str), repr(unparsed_ldap_url_obj), repr(test_ldap_url_obj)))
def parse_worker(q): parser = DependencyTreeParser(model_path=('Stanford Library/stanford-parser-full-%s/edu/stanford/nlp/models/lexparser/englishPCFG.ser.gz' % DATE)) parser = MetricalTreeParser(parser) for filename in iter(q.get, 'STOP'): print(('Working on %s...' % filename)) sents = [] with codecs.open(filename, encoding='utf-8') as f: for line in f: sents.extend(pause_splitter(line)) df = parser.stats_raw_parse_sents(sents, arto=True) df.to_csv(codecs.open(('%s.csv' % filename), 'w', encoding='utf-8'), index=False) print(('Finished with %s.' % filename)) return True
def tpdm_antisymmetry_constraint(dim: int) -> DualBasis: dbe_list = [] for (p, q, r, s) in product(range(dim), repeat=4): if (((p * dim) + q) <= ((r * dim) + s)): if ((p < q) and (r < s)): tensor_elements = [tuple(indices) for indices in _coord_generator(p, q, r, s)] tensor_names = (['cckk'] * len(tensor_elements)) tensor_coeffs = ([0.5] * len(tensor_elements)) dbe = DualBasisElement() for (n, e, c) in zip(tensor_names, tensor_elements, tensor_coeffs): dbe.add_element(n, e, c) dbe_list.append(dbe) return DualBasis(elements=dbe_list)
.parametrize('load_manager', [{'format': '{time}: {load:.1f}'}], indirect=True) def test_load_times_formatting(load_manager): widget = load_manager.c.widget['load'] assert (widget.info()['text'] == '1m: 0.7') widget.next_load() assert (widget.info()['text'] == '5m: 0.8') widget.next_load() assert (widget.info()['text'] == '15m: 1.0') widget.next_load() assert (widget.info()['text'] == '1m: 0.7')
def test_none_Constant(): o1 = Constant(NoneTypeT(), None, name='NoneConst') o2 = Constant(NoneTypeT(), None, name='NoneConst') assert o1.equals(o2) assert NoneConst.equals(o1) assert o1.equals(NoneConst) assert NoneConst.equals(o2) assert o2.equals(NoneConst) import pickle import pytensor x = vector('x') y = argmax(x) kwargs = {} if (pytensor.config.mode in ['DebugMode', 'DEBUG_MODE']): kwargs = {'mode': 'FAST_RUN'} f = pytensor.function([x], [y], **kwargs) pickle.loads(pickle.dumps(f))
('pypyr.steps.filewritetoml.Path') def test_filewritetoml_pass_no_payload(mock_path): context = Context({'k1': 'v1', 'fileWriteToml': {'path': '/arb/blah'}}) with io.BytesIO() as out_bytes: with patch('pypyr.toml.open', mock_open()) as mock_output: mock_output.return_value.write.side_effect = out_bytes.write filewrite.run_step(context) output = out_bytes.getvalue().decode() mocked_path = mock_path.return_value mock_path.assert_called_once_with('/arb/blah') mock_output.assert_called_once_with(mocked_path, 'wb') assert context, "context shouldn't be None" assert (len(context) == 2), 'context should have 2 items' assert (context['k1'] == 'v1') assert (context['fileWriteToml'] == {'path': '/arb/blah'}) mocked_path.parent.mkdir.assert_called_once_with(parents=True, exist_ok=True) assert (output == 'k1 = "v1"\n\n[fileWriteToml]\npath = "/arb/blah"\n')
class TBBasicCharacter(DefaultCharacter): def at_object_creation(self): self.db.max_hp = 100 self.db.hp = self.db.max_hp def at_before_move(self, destination): if is_in_combat(self): self.msg("You can't exit a room while in combat!") return False if (self.db.HP <= 0): self.msg("You can't move, you've been defeated!") return False return True
def calculate_shard_sizes_and_offsets(tensor: torch.Tensor, world_size: int, local_world_size: int, sharding_type: str, col_wise_shard_dim: Optional[int]=None) -> Tuple[(List[List[int]], List[List[int]])]: (rows, columns) = tensor.shape if (sharding_type == ShardingType.DATA_PARALLEL.value): return (([[rows, columns]] * world_size), ([[0, 0]] * world_size)) elif (sharding_type == ShardingType.TABLE_WISE.value): return ([[rows, columns]], [[0, 0]]) elif (sharding_type == ShardingType.ROW_WISE.value): return _calculate_rw_shard_sizes_and_offsets(rows, world_size, columns) elif (sharding_type == ShardingType.TABLE_ROW_WISE.value): return _calculate_rw_shard_sizes_and_offsets(rows, local_world_size, columns) elif ((sharding_type == ShardingType.COLUMN_WISE.value) or (sharding_type == ShardingType.TABLE_COLUMN_WISE.value)): return _calculate_cw_shard_sizes_and_offsets(columns, rows, col_wise_shard_dim) raise ValueError(f'Unrecognized or unsupported sharding type provided: {sharding_type}')
class TestCallable(unittest.TestCase): def test_callable(self) -> None: expected = [('callable(len)', True), ('callable("a")', False), ('callable(callable)', True), ('callable(lambda x, y: x+y)', True), ('import os; __(callable(os))', False), ('callable(int)', True), ('\n def test(): pass\n callable(test) #', True), ('\n class C1:\n def meth(self): pass\n callable(C1) #', True)] for (code, expected_value) in expected: node = extract_node(code) inferred = next(node.infer()) self.assertEqual(inferred.value, expected_value) def test_callable_methods(self) -> None: ast_nodes = extract_node('\n class C:\n def test(self): pass\n \n def static(): pass\n \n def class_method(cls): pass\n def __call__(self): pass\n class D(C):\n pass\n class NotReallyCallableDueToPythonMisfeature(object):\n __call__ = 42\n callable(C.test) #\n callable(C.static) #\n callable(C.class_method) #\n callable(C().test) #\n callable(C().static) #\n callable(C().class_method) #\n C #\n C() #\n NotReallyCallableDueToPythonMisfeature() #\n staticmethod #\n classmethod #\n property #\n D #\n D() #\n ') for node in ast_nodes: inferred = next(node.infer()) self.assertTrue(inferred) def test_inference_errors(self) -> None: ast_nodes = extract_node('\n from unknown import unknown\n callable(unknown) #\n def test():\n return unknown\n callable(test()) #\n ') for node in ast_nodes: inferred = next(node.infer()) self.assertEqual(inferred, util.Uninferable) def test_not_callable(self) -> None: ast_nodes = extract_node('\n callable("") #\n callable(1) #\n callable(True) #\n ') for node in ast_nodes: inferred = next(node.infer()) self.assertFalse(inferred.value)
class TestPortfolioDiversification(QiskitFinanceTestCase): def setUp(self): super().setUp() self.num_assets = 4 self.expected_returns = [0., (- 0.), 0., 0.] self.covariances = [[0., 7.e-05, 0., (- 9.e-05)], [7.e-05, 0., 5.e-05, 4.e-05], [0., 5.e-05, 0., (- 0.)], [(- 9.e-05), 4.e-05, (- 0.), 0.]] self.risk_factor = 0.5 self.budget = 2 self.bounds = [(0, 2), (0, 3), (0, 4), (1, 5)] def assertEqualQuadraticProgram(self, actual, expected): self.assertEqual(actual.name, expected.name) self.assertEqual(actual.get_num_vars(), expected.get_num_vars()) for (var1, var2) in zip(actual.variables, actual.variables): self.assertEqual(var1.vartype, var2.vartype) self.assertEqual(var1.lowerbound, var2.lowerbound) self.assertEqual(var1.upperbound, var2.upperbound) self.assertEqual(actual.objective.sense, expected.objective.sense) self.assertEqual(actual.objective.constant, expected.objective.constant) self.assertDictEqual(actual.objective.linear.to_dict(), expected.objective.linear.to_dict()) self.assertDictEqual(actual.objective.quadratic.to_dict(), expected.objective.quadratic.to_dict()) self.assertEqual(len(actual.linear_constraints), len(expected.linear_constraints)) for (act_lin, exp_lin) in zip(actual.linear_constraints, expected.linear_constraints): self.assertEqual(act_lin.sense, exp_lin.sense) self.assertEqual(act_lin.rhs, exp_lin.rhs) self.assertEqual(act_lin.linear.to_dict(), exp_lin.linear.to_dict()) def test_to_quadratic_program(self): portfolio_optimization = PortfolioOptimization(self.expected_returns, self.covariances, self.risk_factor, self.budget) actual_op = portfolio_optimization.to_quadratic_program() expected_op = QuadraticProgram(name='Portfolio optimization') for i in range(self.num_assets): expected_op.binary_var(name=f'x_{i}') quadratic = {(i, j): (self.risk_factor * self.covariances[i][j]) for i in range(self.num_assets) for j in range(self.num_assets)} linear = {i: (- self.expected_returns[i]) for i in range(self.num_assets)} expected_op.minimize(quadratic=quadratic, linear=linear) linear = {i: 1 for i in range(self.num_assets)} expected_op.linear_constraint(linear=linear, sense='==', rhs=self.budget) self.assertEqualQuadraticProgram(actual_op, expected_op) portfolio_optimization = PortfolioOptimization(self.expected_returns, self.covariances, self.risk_factor, self.budget, self.bounds) actual_op = portfolio_optimization.to_quadratic_program() expected_op = QuadraticProgram(name='Portfolio optimization') for i in range(self.num_assets): expected_op.integer_var(lowerbound=self.bounds[i][0], upperbound=self.bounds[i][1], name=f'x_{i}') quadratic = {(i, j): (self.risk_factor * self.covariances[i][j]) for i in range(self.num_assets) for j in range(self.num_assets)} linear = {i: (- self.expected_returns[i]) for i in range(self.num_assets)} expected_op.minimize(quadratic=quadratic, linear=linear) linear = {i: 1 for i in range(self.num_assets)} expected_op.linear_constraint(linear=linear, sense='==', rhs=self.budget) self.assertEqualQuadraticProgram(actual_op, expected_op) ([[1], [[1], [1]], 0.5, 2, None], [[1], [[1, 1]], 0.5, 2, None], [[1, 2], [[1, 2], [3, 4]], 0.5, 2, [(0, 2), (3, 1)]], [[1, 2], [[1, 2], [3, 4]], 0.5, 2, [(0, 2), (0, 2), (0, 2)]]) def test_is_compatibility(self, expected_returns, covariances, risk_factor, budget, bounds): with self.assertRaises(QiskitFinanceError): _ = PortfolioOptimization(expected_returns, covariances, risk_factor, budget, bounds) def test_interpret(self): portfolio_optimization = PortfolioOptimization(self.expected_returns, self.covariances, self.risk_factor, self.budget) result_x = np.array([0, 1, 0, 1]) self.assertEqual(portfolio_optimization.interpret(result_x), [1, 3]) def test_portfolio_expected_value(self): portfolio_optimization = PortfolioOptimization(self.expected_returns, self.covariances, self.risk_factor, self.budget) result_x = np.array([0, 1, 0, 1]) expected_value = np.dot(self.expected_returns, result_x) self.assertEqual(portfolio_optimization.portfolio_expected_value(result_x), expected_value) def test_portfolio_variance(self): portfolio_optimization = PortfolioOptimization(self.expected_returns, self.covariances, self.risk_factor, self.budget) result_x = np.array([0, 1, 0, 1]) variance = np.dot(result_x, np.dot(self.covariances, result_x)) self.assertEqual(portfolio_optimization.portfolio_variance(result_x), variance) def test_risk_factor(self): portfolio_optimization = PortfolioOptimization(self.expected_returns, self.covariances, self.risk_factor, self.budget) portfolio_optimization.risk_factor = 0.898989 self.assertEqual(portfolio_optimization.risk_factor, 0.898989) def test_budget(self): portfolio_optimization = PortfolioOptimization(self.expected_returns, self.covariances, self.risk_factor, self.budget) portfolio_optimization.budget = 3 self.assertEqual(portfolio_optimization.budget, 3) def test_bounds(self): portfolio_optimization = PortfolioOptimization(self.expected_returns, self.covariances, self.risk_factor, self.budget, self.bounds) portfolio_optimization.bounds = [(0, 4), (0, 4), (0, 4), (1, 4)] self.assertEqual(portfolio_optimization.bounds, [(0, 4), (0, 4), (0, 4), (1, 4)])
def test_pype_no_pipe_arg(mock_pipe): context = Context({'pype': {'name': 'pipe name', 'pipeArg': None, 'useParentContext': False, 'skipParse': False, 'raiseError': True}}) with patch_logger('pypyr.steps.pype', logging.INFO) as mock_logger_info: with get_arb_pipeline_scope(context): pype.run_step(context) mock_pipe.assert_called_once_with(name='pipe name', context_args=None, parse_input=True, loader=None, groups=None, success_group=None, failure_group=None, py_dir=None) mocked_runner = mock_pipe.return_value.load_and_run_pipeline mocked_runner.assert_called_once_with({}, None) assert (mock_logger_info.mock_calls == [call('pyping pipe name, without parent context.'), call('pyped pipe name.')])
_LOSS.register_module() class CeLoss(BaseLoss): def __init__(self, weight=1.0, ignore_label=(- 100), use_weight=False, cls_weight=None, input_dict=None, **kwargs): super().__init__(weight) if (input_dict is None): self.input_dict = {'ce_inputs': 'ce_inputs', 'ce_labels': 'ce_labels'} else: self.input_dict = input_dict self.loss_func = self.ce_loss self.ignore = ignore_label self.use_weight = use_weight self.cls_weight = (torch.tensor(cls_weight) if (cls_weight is not None) else None) def ce_loss(self, ce_inputs, ce_labels): ce_loss = F.cross_entropy(ce_inputs, ce_labels) return ce_loss
def catch_the_response_if_user_want_evaluate(update, context): query = update.callback_query if (query.data == (PATTERN_TO_CATCH_IF_USER_WANT_RATE_THE_PRODUCT + 'OK')): send_a_rating_message(update, context, PATTERN_TO_CATCH_THE_RATE) elif (query.data == (PATTERN_TO_CATCH_IF_USER_WANT_RATE_THE_PRODUCT + 'cancel')): query.edit_message_text(get_text('OK', context))
_mode() def multiclass_recall(input: torch.Tensor, target: torch.Tensor, *, num_classes: Optional[int]=None, average: Optional[str]='micro') -> torch.Tensor: _recall_param_check(num_classes, average) (num_tp, num_labels, num_predictions) = _recall_update(input, target, num_classes, average) return _recall_compute(num_tp, num_labels, num_predictions, average)
class QdbClientServer(QdbServerBase): def __init__(self, session_store, host='localhost', port=8002, route=DEFAULT_ROUTE, auth_fn=None, auth_timeout=60): self.auth_fn = (auth_fn or self.NO_AUTH) self.auth_timeout = auth_timeout self.route = re.compile(route, re.IGNORECASE) self.session_store = session_store if (self.route.groups != 1): raise QdbInvalidRoute(self.route) self._server = pywsgi.WSGIServer((host, port), self.handle_client, handler_class=WebSocketHandler) def address(self): return self._server.address def server_port(self): return self._server.server_port def send_error(self, ws, error_type, error_data): try: ws.send(fmt_err_msg(error_type, error_data, serial=json.dumps)) except WebSocketError: return def get_events(self, ws): while True: try: raw = ws.receive() except WebSocketError: return try: event = json.loads(raw) event['e'] except (ValueError, TypeError) as v: self.send_error(ws, 'event', str(v)) return except KeyError: self.send_error(ws, 'event', "No 'e' field sent") return (yield event) def get_event(self, ws): try: return next(self.get_events(ws)) except StopIteration: return None def handle_client(self, environ, start_response): path = environ['PATH_INFO'] ws = environ['wsgi.websocket'] addr = environ['REMOTE_ADDR'] try: match = self.route.match(path) if (not match): return log.info(('Client request from %s' % addr)) uuid = match.group(1) start_event = None with Timeout(self.auth_timeout, False): start_event = self.get_event(ws) failed = False message = '' if (not start_event): message = 'No start event received' failed = True elif (start_event['e'] != 'start'): message = "First event must be of type: 'start'" failed = True elif (not self.auth_fn(start_event.get('p', ''))): log.warn(('Client %s failed to authenticate' % addr)) message = 'Authentication failed' failed = True if failed: try: self.send_error(ws, 'auth', message) ws.send(fmt_msg('disable', serial=json.dumps)) except WebSocketError: pass return if (not self.session_store.attach_client(uuid, ws)): return self.session_store.send_to_tracer(uuid, event=start_event) for event in self.get_events(ws): self.session_store.send_to_tracer(uuid, event=event) finally: log.info(('Closing websocket to client %s' % addr)) ws.close() def start(self, *args, **kwargs): log.info('Starting qdb.server.client') self._server.start() def stop(self, *args, **kwargs): log.info('Stopping qdb.server.client') self._server.stop() def _extra_repr_args(self): return (('route=%s' % repr(self.route.pattern)),)
def setup(app): generate_keybinding_images() if os.getenv('QTILE_BUILD_SCREENSHOTS', False): generate_widget_screenshots() else: print('Skipping screenshot builds...') app.add_directive('qtile_class', QtileClass) app.add_directive('qtile_hooks', QtileHooks) app.add_directive('qtile_module', QtileModule) app.add_directive('qtile_commands', QtileCommands) app.add_directive('qtile_graph', QtileGraph) app.add_directive('qtile_migrations', QtileMigrations) app.add_directive('collapsible', CollapsibleSection) app.add_node(CollapsibleNode, html=(visit_collapsible_node, depart_collapsible_node))
def test_toml_parser_pass(): in_bytes = b'[table]\nkey= "value"' with patch('pypyr.toml.open', mock_open(read_data=in_bytes)) as mocked_open: out = toml_file.get_parsed_context(['./myfile.toml']) mocked_open.assert_called_once_with('./myfile.toml', 'rb') assert (out == {'table': {'key': 'value'}})
.parametrize('to_test', [qutip.basis, qutip.fock, qutip.fock_dm]) .parametrize('size, n', [([2, 2], [0, 1]), ([2, 3, 4], [1, 2, 0])]) def test_implicit_tensor_basis_like(to_test, size, n): implicit = to_test(size, n) explicit = qutip.tensor(*[to_test([ss], [nn]) for (ss, nn) in zip(size, n)]) assert (implicit == explicit)
def setup(args): cfg = get_cfg() add_centernet_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) if ('/auto' in cfg.OUTPUT_DIR): file_name = os.path.basename(args.config_file)[:(- 5)] cfg.OUTPUT_DIR = cfg.OUTPUT_DIR.replace('/auto', '/{}'.format(file_name)) logger.info('OUTPUT_DIR: {}'.format(cfg.OUTPUT_DIR)) cfg.freeze() default_setup(cfg, args) return cfg
def test_get_formatted_iterable_with_memo(): arb_dict = {'key4.1': 'value4.1', '{ctx2}_key4.2': 'value_{ctx3}_4.2', 'key4.3': {'4.3.1': '4.3.1value', '4.3.2': '4.3.2_{ctx1}_value'}} arb_list = [0, 1, 2] arb_string = 'arb string' arb_string_with_formatting = 'a {ctx1} string' input_obj = {'k1': arb_string, 'k2': 'v2_{ctx1}', 'k3': arb_list, 'k4': [arb_dict, 2, '3_{ctx4}here', arb_dict], 'k5': {'key5.1': arb_string, 'key5.2': arb_string_with_formatting}, 'k6': ('six6.1', False, arb_list, 77, 'six_{ctx1}_end'), 'k7': 'simple string to close 7', 'k8': arb_string_with_formatting} context = Context({'ctx1': 'ctxvalue1', 'ctx2': 'ctxvalue2', 'ctx3': 'ctxvalue3', 'ctx4': 'ctxvalue4'}) output = context.get_formatted_value(input_obj) assert (id(input_obj['k3']) == id(input_obj['k6'][2])) assert (id(input_obj['k4'][0]) == id(input_obj['k4'][3])) assert (output != input_obj) assert (input_obj['k2'] == 'v2_{ctx1}') assert (output['k2'] == 'v2_ctxvalue1') assert (input_obj['k4'][2] == '3_{ctx4}here') assert (output['k4'][2] == '3_ctxvalue4here') assert (input_obj['k4'][3]['{ctx2}_key4.2'] == 'value_{ctx3}_4.2') assert (output['k4'][3]['ctxvalue2_key4.2'] == 'value_ctxvalue3_4.2') assert (input_obj['k4'][3]['key4.3']['4.3.2'] == '4.3.2_{ctx1}_value') assert (output['k4'][3]['key4.3']['4.3.2'] == '4.3.2_ctxvalue1_value') assert (input_obj['k6'][4] == 'six_{ctx1}_end') assert (output['k6'][4] == 'six_ctxvalue1_end') assert (id(output['k4']) != id(input_obj['k4'])) assert (id(output['k4'][3]['key4.3']) != id(input_obj['k4'][3]['key4.3'])) assert (id(output['k5']) != id(input_obj['k5'])) assert (id(output['k6']) != id(input_obj['k6'])) assert (id(output['k6'][2]) != id(input_obj['k6'][2])) assert (id(output['k7']) == id(input_obj['k7'])) output['k7'] = 'mutate 7 on new' assert (input_obj['k7'] == 'simple string to close 7') assert (input_obj['k8'] == arb_string_with_formatting) assert (output['k8'] == 'a ctxvalue1 string') assert (id(output['k3']) == id(output['k6'][2])) assert (id(output['k4']) != id(input_obj['k4'])) assert (id(output['k4'][0]) == id(output['k4'][3])) assert (output['k5']['key5.1'] == input_obj['k5']['key5.1'] == arb_string) assert (id(output['k5']['key5.1']) == id(input_obj['k5']['key5.1']) == id(arb_string)) assert (id(output['k8']) == id(output['k5']['key5.2'])) assert (id(output['k8']) != id(arb_string_with_formatting))
class UNet3D(AbstractUNet): def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr', num_groups=8, num_levels=4, is_segmentation=True, conv_padding=1, conv_upscale=2, upsample='default', dropout_prob=0.1, **kwargs): super(UNet3D, self).__init__(in_channels=in_channels, out_channels=out_channels, final_sigmoid=final_sigmoid, basic_module=DoubleConv, f_maps=f_maps, layer_order=layer_order, num_groups=num_groups, num_levels=num_levels, is_segmentation=is_segmentation, conv_padding=conv_padding, conv_upscale=conv_upscale, upsample=upsample, dropout_prob=dropout_prob, is3d=True)
class Solution(object): def findMin(self, nums): (l, r) = (0, (len(nums) - 1)) while ((l < r) and (nums[l] >= nums[r])): mid = ((l + r) / 2) if (nums[mid] > nums[r]): l = (mid + 1) elif (nums[mid] < nums[l]): r = mid else: l += 1 return nums[l]