code stringlengths 281 23.7M |
|---|
def test_octahedron():
octahedron = Octahedron(12.0, name='octahedron', color='purple')
assert (octahedron.name == 'octahedron')
assert (octahedron.__str__() == 'Octahedron octahedron color:purple material:default radius:12.0')
assert (octahedron.__repr__() == 'Octahedron')
assert (octahedron.radius == 12.0)
assert (octahedron.color == 'purple')
if (p3js is not None):
mesh = octahedron._p3js_mesh()
expected_mesh = p3js.Mesh(p3js.OctahedronGeometry(radius=12.0), p3js.MeshStandardMaterial(color='purple'), name='octahedron')
assert (repr(mesh) == repr(expected_mesh))
octahedron.name = 'octahedron1'
assert (octahedron.name == 'octahedron1')
octahedron.radius = 2.0
assert (octahedron.radius == 2.0)
octahedron.color = 'red'
assert (octahedron.color == 'red')
assert (octahedron.generate_dict() == {'color': 'red', 'type': 'Octahedron', 'name': 'octahedron1', 'radius': 2.0, 'material': 'default'})
assert isinstance(octahedron, Shape)
octahedron_ = Octahedron(12.0, color='purple')
assert (octahedron_.name == 'unnamed')
assert (octahedron_.__str__() == 'Octahedron unnamed color:purple material:default radius:12.0')
assert (octahedron_.__repr__() == 'Octahedron') |
class NetworkDescription(Description):
('NetworkDescription', rus.optional(dict))
(rus.nothing)
def __init__(self, d=None):
if d:
if ((c.RTYPE in d) and (d[c.RTYPE] != c.NETWORK)):
raise se.BadParameter(("Cannot create NetworkResource type '%s'" % d[c.RTYPE]))
self._descr = super(NetworkDescription, self)
self._descr.__init__()
self.rtype = c.NETWORK |
class FP16Optimizer(_FP16OptimizerMixin, optim.FairseqOptimizer):
def __init__(self, cfg: DictConfig, params, fp32_optimizer, fp32_params, **kwargs):
super().__init__(cfg.optimizer)
self.fp16_params = params
self.fp32_optimizer = fp32_optimizer
self.fp32_params = fp32_params
if (getattr(cfg.common, 'fp16_scale_window', None) is None):
if (len(cfg.optimization.update_freq) > 1):
raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule')
data_parallel_size = int((cfg.distributed_training.distributed_world_size / cfg.common.model_parallel_size))
scale_window = int((((2 ** 14) / data_parallel_size) / cfg.optimization.update_freq[0]))
else:
scale_window = cfg.common.fp16_scale_window
if (not getattr(cfg.common, 'bf16', False)):
self.scaler = DynamicLossScaler(init_scale=cfg.common.fp16_init_scale, scale_window=scale_window, tolerance=cfg.common.fp16_scale_tolerance, threshold=cfg.common.threshold_loss_scale, min_loss_scale=cfg.common.min_loss_scale)
else:
self.scaler = None
def build_optimizer(cls, cfg: DictConfig, params, **kwargs):
flatten = (not getattr(cfg.common, 'fp16_no_flatten_grads', False))
if getattr(cfg.common, 'bf16', False):
flatten = False
fp32_params = cls.build_fp32_params(cfg.optimizer, params, flatten=flatten)
if flatten:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, [fp32_params])
else:
fp32_optimizer = optim.build_optimizer(cfg.optimizer, fp32_params)
if (flatten and (not fp32_optimizer.supports_flat_params)):
raise RuntimeError(f'chosen optimizer {fp32_optimizer.__class__.__name__} does not support flat params, please set --fp16-no-flatten-grads')
return cls(cfg, params, fp32_optimizer, fp32_params, **kwargs)
def optimizer(self):
return self.fp32_optimizer.optimizer
def optimizer(self, optimizer):
self.fp32_optimizer.optimizer = optimizer
def lr_scheduler(self):
return getattr(self.fp32_optimizer, 'lr_scheduler', None)
def optimizer_config(self):
return self.fp32_optimizer.optimizer_config
def get_lr(self):
return self.fp32_optimizer.get_lr()
def set_lr(self, lr):
self.fp32_optimizer.set_lr(lr)
def all_reduce_grads(self, module):
self.fp32_optimizer.all_reduce_grads(module)
def supports_flat_params(self):
return self.fp32_optimizer.supports_flat_params |
class SponsorshipQuerySetTests(TestCase):
def setUp(self):
self.user = baker.make(settings.AUTH_USER_MODEL)
self.contact = baker.make('sponsors.SponsorContact', user=self.user)
def test_visible_to_user(self):
visible = [baker.make(Sponsorship, submited_by=self.user, status=Sponsorship.APPLIED), baker.make(Sponsorship, sponsor=self.contact.sponsor, status=Sponsorship.APPROVED), baker.make(Sponsorship, submited_by=self.user, status=Sponsorship.FINALIZED)]
baker.make(Sponsorship)
baker.make(Sponsorship, submited_by=self.user, status=Sponsorship.REJECTED)
qs = Sponsorship.objects.visible_to(self.user)
self.assertEqual(len(visible), qs.count())
for sp in visible:
self.assertIn(sp, qs)
self.assertEqual(list(qs), list(self.user.sponsorships))
def test_enabled_sponsorships(self):
today = date.today()
two_days = timedelta(days=2)
enabled = baker.make(Sponsorship, status=Sponsorship.FINALIZED, start_date=(today - two_days), end_date=(today + two_days))
baker.make(Sponsorship, status=Sponsorship.APPLIED, start_date=(today - two_days), end_date=(today + two_days))
baker.make(Sponsorship, status=Sponsorship.FINALIZED, start_date=(today + two_days), end_date=(today + (2 * two_days)))
baker.make(Sponsorship, status=Sponsorship.FINALIZED, start_date=(today - (2 * two_days)), end_date=(today - two_days))
baker.make(Sponsorship, status=Sponsorship.FINALIZED, start_date=(today - two_days), end_date=(today + two_days), overlapped_by=enabled)
qs = Sponsorship.objects.enabled()
self.assertEqual(1, qs.count())
self.assertIn(enabled, qs)
def test_filter_sponsorship_with_logo_placement_benefits(self):
sponsorship_with_download_logo = baker.make_recipe('sponsors.tests.finalized_sponsorship')
sponsorship_with_sponsors_logo = baker.make_recipe('sponsors.tests.finalized_sponsorship')
simple_sponsorship = baker.make_recipe('sponsors.tests.finalized_sponsorship')
download_logo_benefit = baker.make(SponsorBenefit, sponsorship=sponsorship_with_download_logo)
baker.make_recipe('sponsors.tests.logo_at_download_feature', sponsor_benefit=download_logo_benefit)
sponsors_logo_benefit = baker.make(SponsorBenefit, sponsorship=sponsorship_with_sponsors_logo)
baker.make_recipe('sponsors.tests.logo_at_sponsors_feature', sponsor_benefit=sponsors_logo_benefit)
regular_benefit = baker.make(SponsorBenefit, sponsorship=simple_sponsorship)
with self.assertNumQueries(1):
qs = list(Sponsorship.objects.with_logo_placement())
self.assertEqual(2, len(qs))
self.assertIn(sponsorship_with_download_logo, qs)
self.assertIn(sponsorship_with_sponsors_logo, qs)
with self.assertNumQueries(1):
kwargs = {'logo_place': LogoPlacementChoices.DOWNLOAD_PAGE.value, 'publisher': PublisherChoices.FOUNDATION.value}
qs = list(Sponsorship.objects.with_logo_placement(**kwargs))
self.assertEqual(1, len(qs))
self.assertIn(sponsorship_with_download_logo, qs)
def test_filter_sponsorship_by_benefit_feature_type(self):
sponsorship_feature_1 = baker.make_recipe('sponsors.tests.finalized_sponsorship')
sponsorship_feature_2 = baker.make_recipe('sponsors.tests.finalized_sponsorship')
baker.make(LogoPlacement, sponsor_benefit__sponsorship=sponsorship_feature_1)
baker.make(TieredBenefit, sponsor_benefit__sponsorship=sponsorship_feature_2)
with self.assertNumQueries(1):
qs = list(Sponsorship.objects.includes_benefit_feature(LogoPlacement))
self.assertEqual(1, len(qs))
self.assertIn(sponsorship_feature_1, qs) |
def get_mesh_for_testing(xpts=None, rpts=10, Rpts=10, ypts=15, zpts=15, rcellpts=15, geometry=None, cc_submesh=None):
param = pybamm.ParameterValues(values={'Electrode width [m]': 0.4, 'Electrode height [m]': 0.5, 'Negative tab width [m]': 0.1, 'Negative tab centre y-coordinate [m]': 0.1, 'Negative tab centre z-coordinate [m]': 0.0, 'Positive tab width [m]': 0.1, 'Positive tab centre y-coordinate [m]': 0.3, 'Positive tab centre z-coordinate [m]': 0.5, 'Negative electrode thickness [m]': (1 / 3), 'Separator thickness [m]': (1 / 3), 'Positive electrode thickness [m]': (1 / 3), 'Negative particle radius [m]': 0.5, 'Positive particle radius [m]': 0.5, 'Inner cell radius [m]': 0.2, 'Outer cell radius [m]': 1.0, 'Negative minimum particle radius [m]': 0.0, 'Negative maximum particle radius [m]': 1.0, 'Positive minimum particle radius [m]': 0.0, 'Positive maximum particle radius [m]': 1.0})
if (geometry is None):
geometry = pybamm.battery_geometry(options={'particle size': 'distribution'})
param.process_geometry(geometry)
submesh_types = {'negative electrode': pybamm.Uniform1DSubMesh, 'separator': pybamm.Uniform1DSubMesh, 'positive electrode': pybamm.Uniform1DSubMesh, 'negative particle': pybamm.Uniform1DSubMesh, 'positive particle': pybamm.Uniform1DSubMesh, 'negative particle size': pybamm.Uniform1DSubMesh, 'positive particle size': pybamm.Uniform1DSubMesh, 'current collector': pybamm.SubMesh0D}
if cc_submesh:
submesh_types['current collector'] = cc_submesh
if (xpts is None):
(xn_pts, xs_pts, xp_pts) = (40, 25, 35)
else:
(xn_pts, xs_pts, xp_pts) = (xpts, xpts, xpts)
var_pts = {'x_n': xn_pts, 'x_s': xs_pts, 'x_p': xp_pts, 'r_n': rpts, 'r_p': rpts, 'y': ypts, 'z': zpts, 'r_macro': rcellpts, 'R_n': Rpts, 'R_p': Rpts}
return pybamm.Mesh(geometry, submesh_types, var_pts) |
def create(feedback, device_uuid):
device = Device.objects.get(uuid=device_uuid)
schedule_item_id = feedback.validated_data['schedule_item_id']
try:
with transaction.atomic():
(text, choices) = ([], [])
if feedback.validated_data.get('text'):
text = create_text_feedback(schedule_item_id=schedule_item_id, feedbacks=feedback.validated_data.get('text'), device=device)
if feedback.validated_data.get('choices'):
choices = create_choice_feedback(schedule_item_id=schedule_item_id, feedbacks=feedback.validated_data.get('choices'), device=device)
return {'text': text, 'choices': choices}
except (IntegrityError, ObjectDoesNotExist) as e:
print(e)
return False |
def test_should_follow_specification_comparison():
chain = ['1.0.0-alpha', '1.0.0-alpha.1', '1.0.0-beta.2', '1.0.0-beta.11', '1.0.0-rc.1', '1.0.0', '1.3.7+build']
versions = zip(chain[:(- 1)], chain[1:])
for (low_version, high_version) in versions:
assert (compare(low_version, high_version) == (- 1)), ('%s should be lesser than %s' % (low_version, high_version))
assert (compare(high_version, low_version) == 1), ('%s should be higher than %s' % (high_version, low_version)) |
class CustomRouterMixin(CreateDataMixin):
router_class = 'rapidsms.router.blocking.BlockingRouter'
backends = {}
handlers = None
def _pre_rapidsms_setup(self):
self._RAPIDSMS_HANDLERS = getattr(settings, 'RAPIDSMS_HANDLERS', None)
self.set_handlers()
self._INSTALLED_BACKENDS = getattr(settings, 'INSTALLED_BACKENDS', {})
self.set_backends()
self._RAPIDSMS_ROUTER = getattr(settings, 'RAPIDSMS_ROUTER', None)
self.set_router()
def _post_rapidsms_teardown(self):
setattr(settings, 'INSTALLED_BACKENDS', self._INSTALLED_BACKENDS)
setattr(settings, 'RAPIDSMS_ROUTER', self._RAPIDSMS_ROUTER)
if (self._RAPIDSMS_HANDLERS is None):
if hasattr(settings, 'RAPIDSMS_HANDLERS'):
delattr(settings, 'RAPIDSMS_HANDLERS')
else:
setattr(settings, 'RAPIDSMS_HANDLERS', self._RAPIDSMS_HANDLERS)
def __call__(self, result=None):
self._pre_rapidsms_setup()
super(CustomRouterMixin, self).__call__(result)
self._post_rapidsms_teardown()
def receive(self, text, connection, **kwargs):
return receive(text, connection, **kwargs)
def send(self, text, connections, **kwargs):
return send(text, connections, **kwargs)
def get_router(self):
return get_router()
def set_handlers(self):
if (self.handlers is not None):
setattr(settings, 'RAPIDSMS_HANDLERS', self.handlers)
def set_backends(self):
setattr(settings, 'INSTALLED_BACKENDS', self.backends)
def set_router(self):
setattr(settings, 'RAPIDSMS_ROUTER', self.router_class)
def lookup_connections(self, backend, identities):
return lookup_connections(backend, identities) |
class BackupDB(ProductionCommand):
keyword = 'backupdb'
def assemble(self):
super().assemble()
self.parser.add_argument('-d', '--directory', dest='directory', default='/tmp', help='the directory to back up to')
self.parser.add_argument('-U', '--super-user-name', dest='super_user_name', default=None, help='the name of the priviledged user who may perform this operation')
def execute(self, args):
super().execute(args)
return self.sys_control.backup_database(args.directory, super_user_name=args.super_user_name) |
class FollowedBy(ParseElementEnhance):
def __init__(self, expr):
super().__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
(_, ret) = self.expr._parse(instring, loc, doActions=doActions)
del ret[:]
return (loc, ret) |
class TreeConstraintsSize(TreeConstraints):
def branch(self, spec: TreeSpec) -> TreeSpec:
depth = (spec.depth + 1)
leaves = (spec.leaves * self.branch_factor)
size = (spec.size + leaves)
leaf_size = (self.total // leaves)
return TreeSpec(depth=depth, size=size, leaves=leaves, leaf_size=leaf_size)
def get_initial_chunksize(self):
return (self.total // self.branch_factor) |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
g_data = parser.add_argument_group('Data')
g_data.add_argument('--dataset_path', type=str, default='/BS/xxie-3/static00/newdata', help='path to dataset')
g_data.add_argument('--exp_name', type=str, default='train')
g_data.add_argument('--test_kid', type=int, default=1)
g_data.add_argument('--image_size', default=(2048, 1536), help='original image size')
g_data.add_argument('--net_img_size', nargs='+', type=int, default=[256, 192], help='image size sent to network')
g_data.add_argument('--focal_length', default=(1000, 1000), help='focal length for the perspective camera')
g_data.add_argument('--subfolder_name', default='recon_data', help='name for the subfolder in a dataset')
g_data.add_argument('--depth2color', default=True, help='perform depth to color transform or not', action='store_true')
g_train = parser.add_argument_group('Training')
g_train.add_argument('--batch_size', type=int, default=8, help='input batch size')
g_train.add_argument('--learning_rate', type=float, default=0.001, help='adam learning rate')
g_train.add_argument('--learning_rateC', type=float, default=0.001, help='adam learning rate')
g_train.add_argument('--num_epochs', type=int, default=100, help='num epoch to train')
g_train.add_argument('--num_samples_train', type=int, default=5000, help='number of training samples used for training')
g_train.add_argument('--multi_gpus', default=True, action='store_true', help='whether multiple GPUs are used during training')
g_train.add_argument('--split_file', default='/BS/xxie2020/work/kindata/Apr20/frames/split.json', help='the file specifying how to split train and test sequence')
g_train.add_argument('--clamp_thres', type=float, default=0.1, help='the threshold to clamp df prediction when computing loss')
g_train.add_argument('--mix_samp', default=True, action='store_true')
g_train.add_argument('--sigmas', default=[0.08, 0.02, 0.003], nargs='+', type=float, help='gaussian variance fou boundary sampling, unit meter')
g_train.add_argument('--person_obj_ratio', default=[0.5, 0.5], nargs='+', type=float, help='ratio for person and object points')
g_train.add_argument('--ratios', type=float, nargs='+', default=[0.01, 0.49, 0.5], help='ratio between different sigmas')
g_train.add_argument('--clean_only', default=False, help='use only clean data or not', action='store_true')
g_data.add_argument('--loadSize', type=int, default=512, help='load size of input image')
g_exp = parser.add_argument_group('Experiment')
g_exp.add_argument('--name', type=str, default='example', help='name of the experiment. It decides where to store samples and models')
g_exp.add_argument('--debug', action='store_true', help='debug mode or not')
g_exp.add_argument('--num_views', type=int, default=1, help='How many views to use for multiview network.')
g_exp.add_argument('--random_multiview', action='store_true', help='Select random multiview combination.')
g_train.add_argument('--gpu_id', type=int, default=0, help='gpu id for cuda')
g_train.add_argument('--gpu_ids', nargs='+', type=int, default=[0], help='gpu ids: e.g. 0 0,1,2, 0,2, -1 for CPU mode')
g_train.add_argument('--local_rank', type=int, default=(- 1), metavar='N', help='Local process rank.')
g_train.add_argument('--num_workers', default=30, type=int, help='number of thres for loading data')
g_train.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
g_train.add_argument('--pin_memory', action='store_true', help='pin_memory')
g_train.add_argument('--freq_plot', type=int, default=10, help='freqency of the error plot')
g_train.add_argument('--freq_save', type=int, default=50, help='freqency of the save_checkpoints')
g_train.add_argument('--freq_save_ply', type=int, default=100, help='freqency of the save ply')
g_train.add_argument('--no_gen_mesh', action='store_true')
g_train.add_argument('--no_num_eval', action='store_true')
g_train.add_argument('--resume_epoch', type=int, default=(- 1), help='epoch resuming the training')
g_train.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
g_train.add_argument('--scan_data', action='store_true', default=False)
g_train.add_argument('--data_name', type=str)
g_test = parser.add_argument_group('Testing')
g_test.add_argument('--resolution', type=int, default=256, help='# of grid in mesh reconstruction')
g_test.add_argument('--test_folder_path', type=str, default=None, help='the folder of test image')
g_test.add_argument('--eval_num', type=int, default=10, help='number of examples to evaluate')
g_sample = parser.add_argument_group('Sampling')
g_sample.add_argument('--sigma', type=float, default=5.0, help='perturbation standard deviation for positions')
g_sample.add_argument('--num_sample_inout', type=int, default=5000, help='# of sampling points')
g_sample.add_argument('--num_sample_color', type=int, default=0, help='# of sampling points')
g_sample.add_argument('--z_size', type=float, default=200.0, help='z normalization factor')
g_sample.add_argument('--realdepth', default=False, action='store_true', help='input real depth to the network or inverse')
g_sample.add_argument('--densepc_num', type=int, default=10000, help='number of dense point cloud to generate at evaluation time')
g_model = parser.add_argument_group('Model')
g_model.add_argument('--model_type', default='comb', help='which model to use for training')
g_model.add_argument('--input_type', default='RGBM', help='RGB, RGB+D, RGB+normal')
g_model.add_argument('--num_parts', default=15, type=int, help='number of output part labels')
g_model.add_argument('--encode_type', default='normal_hg')
g_model.add_argument('--surface_classifier', default=False, help='use surface classification or nor', action='store_true')
g_model.add_argument('--joint_df', default=False, help='joint distance field for human and object', action='store_true')
parser.add_argument('--reso_grid', type=int, default=32, help='# resolution of grid')
parser.add_argument('--pn_hid_dim', type=int, default=32, help='# hidden dim of point net')
parser.add_argument('--num_anchor_points', type=int, default=600, help='number of anchor points')
g_model.add_argument('--norm', type=str, default='group', help='instance normalization or batch normalization or group normalization')
g_model.add_argument('--norm_color', type=str, default='instance', help='instance normalization or batch normalization or group normalization')
g_model.add_argument('--bin_classifier', default=True, action='store_true', help='use binary classifier or not')
g_model.add_argument('--num_stack', type=int, default=3, help='# of hourglass')
g_model.add_argument('--num_hourglass', type=int, default=2, help='# of stacked layer of hourglass')
g_model.add_argument('--skip_hourglass', action='store_true', help='skip connection in hourglass')
g_model.add_argument('--hg_down', type=str, default='ave_pool', help='ave pool || conv64 || conv128')
g_model.add_argument('--hourglass_dim', type=int, default='256', help='256 | 512')
g_model.add_argument('--mlp_dim', nargs='+', default=[257, 1024, 512, 256, 128, 1], type=int, help='# of dimensions of mlp')
g_model.add_argument('--mlp_dim_color', nargs='+', default=[513, 1024, 512, 256, 128, 3], type=int, help='# of dimensions of color mlp')
g_model.add_argument('--use_tanh', action='store_true', help='using tanh after last conv of image_filter network')
parser.add_argument('--random_flip', action='store_true', help='if random flip')
parser.add_argument('--random_trans', action='store_true', help='if random flip')
parser.add_argument('--random_scale', action='store_true', help='if random flip')
parser.add_argument('--no_residual', action='store_true', help='no skip connection in mlp')
parser.add_argument('--schedule', type=int, nargs='+', default=[60, 80], help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--color_loss_type', type=str, default='l1', help='mse | l1')
parser.add_argument('--z_feat', help='which z feature is sent to the network')
parser.add_argument('--projection_mode', default='perspective', type=str)
parser.add_argument('--orth_size', type=int, default=512)
parser.add_argument('--orth_scale', type=float, default=0.75)
parser.add_argument('--val_test_error', action='store_true', help='validate errors of test data')
parser.add_argument('--val_train_error', action='store_true', help='validate errors of train data')
parser.add_argument('--gen_test_mesh', action='store_true', help='generate test mesh')
parser.add_argument('--gen_train_mesh', action='store_true', help='generate train mesh')
parser.add_argument('--all_mesh', action='store_true', help='generate meshs from all hourglass output')
parser.add_argument('--num_gen_mesh_test', type=int, default=1, help='how many meshes to generate during testing')
parser.add_argument('--filter_val', type=float, default=0.004, help='threshold to filter out points not on the surface')
parser.add_argument('--sparse_thres', type=float, default=0.03, help='threshold to get sparse pc around the surface')
parser.add_argument('--save_densepc', action='store_true', help='save generated dense pc during evaluation or not, do not save when evaluate over all data')
parser.add_argument('--save_npz', action='store_true', default=False, help='save npz during dense pc generation or not')
parser.add_argument('--pcsave_name', default=None, help='name to save for this experiment evaluation')
parser.add_argument('--seq_folder', type=str, help='which sequence to evaluate')
parser.add_argument('--checkpoint', type=str, default=None, help='which checkpoint to load for evaluation')
parser.add_argument('--checkpoints_path', type=str, default='./checkpoints', help='path to save checkpoints')
parser.add_argument('--load_netG_checkpoint_path', type=str, default=None, help='path to save checkpoints')
parser.add_argument('--load_netC_checkpoint_path', type=str, default=None, help='path to save checkpoints')
parser.add_argument('--results_path', type=str, default='./results', help='path to save results ply')
parser.add_argument('--load_checkpoint_path', type=str, help='path to save results ply')
parser.add_argument('--single', type=str, default='', help='single data for training')
parser.add_argument('--mask_path', type=str, help='path for input mask')
parser.add_argument('--img_path', type=str, help='path for input image')
group_aug = parser.add_argument_group('aug')
group_aug.add_argument('--aug_alstd', type=float, default=0.0, help='augmentation pca lighting alpha std')
group_aug.add_argument('--aug_bri', type=float, default=0.0, help='augmentation brightness')
group_aug.add_argument('--aug_con', type=float, default=0.0, help='augmentation contrast')
group_aug.add_argument('--aug_sat', type=float, default=0.0, help='augmentation saturation')
group_aug.add_argument('--aug_hue', type=float, default=0.0, help='augmentation hue')
group_aug.add_argument('--aug_blur', type=float, default=0.0, help='augmentation blur')
group_aug.add_argument('--nocrop', default=False, action='store_true')
self.initialized = True
parser.add_argument('--overwrite', default=False, action='store_true')
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
def parse(self):
opt = self.gather_options()
return opt |
def draw_plot(model, train_x, train_y, test_x, test_y, inducing_x, inducing_f, ax, color, show_legend=False):
inducing_x = inducing_x.detach().cpu()
inducing_f = inducing_f.detach().cpu()
(train_x, train_y) = (train_x.cpu().squeeze((- 1)), train_y.cpu().squeeze((- 1)))
(test_x, test_y) = (test_x.cpu().squeeze((- 1)), test_y.cpu().squeeze((- 1)))
x_min = min(train_x.min(), test_x.min(), inducing_x.min())
x_max = max(train_x.max(), test_x.max(), inducing_x.max())
xlim = ((x_min - 0.1), (x_max + 0.1))
x_grid = try_cuda(torch.linspace(*xlim, 200))
model.eval()
with torch.no_grad():
(mean, var) = model.predict(x_grid)
(lb, ub) = ((mean - (2 * var.sqrt())), (mean + (2 * var.sqrt())))
mean = mean.cpu().view((- 1))
(lb, ub) = (lb.cpu().view((- 1)), ub.cpu().view((- 1)))
ax.plot(x_grid.cpu(), mean, linewidth=1, color=color)
ax.fill_between(x_grid.cpu(), lb, ub, alpha=0.22, color=color)
ax.scatter(train_x.cpu(), train_y.cpu(), color='black', s=32, edgecolors='none', label='observed')
if (test_x is not None):
ax.scatter(test_x.cpu(), test_y.cpu(), color='black', s=32, facecolors='none', label='unobserved')
ax.scatter(inducing_x, inducing_f, color='red', marker='+', linewidth=3, s=128, label='inducing')
ax.set_xlim(((- 1.1), 1.1))
ax.set_xlabel('x')
ax.set_ylim(((- 3), 3))
sns.despine()
if show_legend:
ax.legend(ncol=3, bbox_to_anchor=(0.0, 1.2))
plt.tight_layout()
return ax |
def when_program_starts_5(self):
self.wait(3.0)
self.add_value_to_list('elle', 'bob')
if ((5.0 % 'NO TRANSLATION: data_lengthoflist') > 4):
self.create_clone_of('NO TRANSLATION: control_create_clone_of_menu')
self.create_clone_of('NO TRANSLATION: control_create_clone_of_menu') |
class HallucinationOrigin(nn.Module):
def __init__(self, scala=8, features=64, n_residual_blocks=9, big_short_connect=False, output_channel=1):
super(HallucinationOrigin, self).__init__()
self.n_residual_blocks = n_residual_blocks
self.scala = scala
self.connect = big_short_connect
for i in range(self.n_residual_blocks):
self.add_module(('residual_block' + str((i + 1))), residualBlock(features))
self.pad = nn.ReflectionPad2d(1)
self.conv = nn.Conv2d(features, features, 3, stride=1, padding=0)
self.bn = nn.BatchNorm2d(features)
for i in range(int(log2(self.scala))):
self.add_module(('upsample' + str((i + 1))), upsampleBlock(features, (features * 4)))
self.pad2 = nn.ReflectionPad2d(3)
self.conv2 = nn.Conv2d(features, output_channel, 7, stride=1, padding=0)
def forward(self, features):
y = features.clone()
for i in range(self.n_residual_blocks):
y = self.__getattr__(('residual_block' + str((i + 1))))(y)
if self.connect:
x = (self.bn(self.conv(self.pad(y))) + features)
else:
x = self.bn(self.conv(self.pad(y)))
for i in range(int(log2(self.scala))):
x = self.__getattr__(('upsample' + str((i + 1))))(x)
return F.tanh(self.conv2(self.pad2(x))) |
class ResNet18(Module):
def __init__(self):
super(ResNet18, self).__init__()
self.conv1 = Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.ratl = Rational(approx_func='relu', cuda=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer_1_1 = BasicBlock(inplanes=64, planes=64, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=BatchNorm2d)
self.layer_1_2 = BasicBlock(inplanes=64, planes=64, groups=1, base_width=64, dilation=1, norm_layer=BatchNorm2d)
downsample = Sequential(conv1x1(64, 128, 2), BatchNorm2d(128))
self.layer_2_1 = BasicBlock(inplanes=64, planes=128, stride=2, downsample=downsample, groups=1, base_width=64, dilation=1, norm_layer=BatchNorm2d)
self.layer_2_2 = BasicBlock(inplanes=128, planes=128, groups=1, base_width=64, dilation=1, norm_layer=BatchNorm2d)
downsample = Sequential(conv1x1(128, 256, 2), BatchNorm2d(256))
self.layer_3_1 = BasicBlock(inplanes=128, planes=256, stride=2, downsample=downsample, groups=1, base_width=64, dilation=1, norm_layer=BatchNorm2d)
self.layer_3_2 = BasicBlock(inplanes=256, planes=256, groups=1, base_width=64, dilation=1, norm_layer=BatchNorm2d)
downsample = Sequential(conv1x1(256, 512, 2), BatchNorm2d(512))
self.layer_4_1 = BasicBlock(inplanes=256, planes=512, stride=2, downsample=downsample, groups=1, base_width=64, dilation=1, norm_layer=BatchNorm2d)
self.layer_4_2 = BasicBlock(inplanes=512, planes=512, groups=1, base_width=64, dilation=1, norm_layer=BatchNorm2d)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.out_dim = (512 * BasicBlock.expansion)
for m in self.modules():
if isinstance(m, Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _forward_impl(self, x, params=None, activation='relu'):
x = self.conv1(x, get_child_dict(params, 'conv1'))
x = self.bn1(x, get_child_dict(params, 'bn1'))
x = (self.relu(x) if (activation == 'relu') else self.ratl(x))
x = self.maxpool(x)
x_1 = self.layer_1_1(x, get_child_dict(params, 'layer_1_1'), activation=activation)
x_1 = self.layer_1_2(x_1, get_child_dict(params, 'layer_1_2'), activation=activation)
x_2 = self.layer_2_1(x_1, get_child_dict(params, 'layer_2_1'), activation=activation)
x_2 = self.layer_2_2(x_2, get_child_dict(params, 'layer_2_2'), activation=activation)
x_3 = self.layer_3_1(x_2, get_child_dict(params, 'layer_3_1'), activation=activation)
x_3 = self.layer_3_2(x_3, get_child_dict(params, 'layer_3_2'), activation=activation)
x_4 = self.layer_4_1(x_3, get_child_dict(params, 'layer_4_1'), activation=activation)
x_4 = self.layer_4_2(x_4, get_child_dict(params, 'layer_4_2'), activation=activation)
pooled = self.avgpool(x_4)
features = torch.flatten(pooled, 1)
return {'fmaps': [x_1, x_2, x_3, x_4], 'features': features}
def forward(self, x, params=None, activation='relu'):
assert (activation in ['relu', 'ratl'])
return self._forward_impl(x, params, activation)
def last_conv(self):
return self.layer4_2.conv2 |
class MixedInt8TestPipeline(BaseMixedInt8Test):
def setUp(self):
super().setUp()
def tearDown(self):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def test_pipeline(self):
self.pipe = pipeline('text-generation', model=self.model_name, model_kwargs={'device_map': 'auto', 'load_in_8bit': True}, max_new_tokens=self.MAX_NEW_TOKENS)
pipeline_output = self.pipe(self.input_text)
self.assertEqual(pipeline_output[0]['generated_text'], self.EXPECTED_OUTPUT) |
class RequestInterceptor(QWebEngineUrlRequestInterceptor):
def __init__(self, parent=None):
super().__init__(parent)
self._resource_types = {QWebEngineUrlRequestInfo.ResourceType.ResourceTypeMainFrame: interceptors.ResourceType.main_frame, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeSubFrame: interceptors.ResourceType.sub_frame, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeStylesheet: interceptors.ResourceType.stylesheet, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeScript: interceptors.ResourceType.script, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeImage: interceptors.ResourceType.image, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeFontResource: interceptors.ResourceType.font_resource, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeSubResource: interceptors.ResourceType.sub_resource, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeObject: interceptors.ResourceType.object, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeMedia: interceptors.ResourceType.media, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeWorker: interceptors.ResourceType.worker, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeSharedWorker: interceptors.ResourceType.shared_worker, QWebEngineUrlRequestInfo.ResourceType.ResourceTypePrefetch: interceptors.ResourceType.prefetch, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeFavicon: interceptors.ResourceType.favicon, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeXhr: interceptors.ResourceType.xhr, QWebEngineUrlRequestInfo.ResourceType.ResourceTypePing: interceptors.ResourceType.ping, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeServiceWorker: interceptors.ResourceType.service_worker, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeCspReport: interceptors.ResourceType.csp_report, QWebEngineUrlRequestInfo.ResourceType.ResourceTypePluginResource: interceptors.ResourceType.plugin_resource, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeUnknown: interceptors.ResourceType.unknown, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeNavigationPreloadMainFrame: interceptors.ResourceType.preload_main_frame, QWebEngineUrlRequestInfo.ResourceType.ResourceTypeNavigationPreloadSubFrame: interceptors.ResourceType.preload_sub_frame}
new_types = {'WebSocket': interceptors.ResourceType.websocket}
for (qt_name, qb_value) in new_types.items():
qt_value = getattr(QWebEngineUrlRequestInfo.ResourceType, f'ResourceType{qt_name}', None)
if (qt_value is not None):
self._resource_types[qt_value] = qb_value
def install(self, profile):
profile.setUrlRequestInterceptor(self)
def interceptRequest(self, info):
if ('log-requests' in objects.debug_flags):
resource_type_str = debug.qenum_key(QWebEngineUrlRequestInfo, info.resourceType())
navigation_type_str = debug.qenum_key(QWebEngineUrlRequestInfo, info.navigationType())
log.network.debug('{} {}, first-party {}, resource {}, navigation {}'.format(bytes(info.requestMethod()).decode('ascii'), info.requestUrl().toDisplayString(), info.firstPartyUrl().toDisplayString(), resource_type_str, navigation_type_str))
url = info.requestUrl()
first_party = info.firstPartyUrl()
if (not url.isValid()):
log.network.debug('Ignoring invalid intercepted URL: {}'.format(url.errorString()))
return
try:
resource_type = self._resource_types[info.resourceType()]
except KeyError:
log.network.warning('Resource type {} not found in RequestInterceptor dict.'.format(debug.qenum_key(QWebEngineUrlRequestInfo, info.resourceType())))
resource_type = interceptors.ResourceType.unknown
is_xhr = (info.resourceType() == QWebEngineUrlRequestInfo.ResourceType.ResourceTypeXhr)
if ((url.scheme(), url.host(), url.path()) == ('qute', 'settings', '/set')):
if ((first_party != QUrl('qute://settings/')) or (not is_xhr)):
log.network.warning('Blocking malicious request from {} to {}'.format(first_party.toDisplayString(), url.toDisplayString()))
info.block(True)
return
request = WebEngineRequest(first_party_url=first_party, request_url=url, resource_type=resource_type, webengine_info=info)
interceptors.run(request)
if request.is_blocked:
info.block(True)
for (header, value) in shared.custom_headers(url=url):
if ((header.lower() == b'accept') and is_xhr):
continue
info.setHttpHeader(header, value)
if (config.cache['content.headers.referer'] == 'never'):
info.setHttpHeader(b'Referer', b'')
user_agent = websettings.user_agent(url)
info.setHttpHeader(b'User-Agent', user_agent.encode('ascii')) |
def unlinearize_term(index, n_orbitals):
if (not index):
return ()
elif (0 < index < (1 + (n_orbitals ** 2))):
shift = 1
new_index = (index - shift)
q = (new_index // n_orbitals)
p = (new_index - (q * n_orbitals))
assert (index == ((shift + p) + (q * n_orbitals)))
return ((p, 1), (q, 0))
else:
shift = (1 + (n_orbitals ** 2))
new_index = (index - shift)
s = (new_index // (n_orbitals ** 3))
r = ((new_index - (s * (n_orbitals ** 3))) // (n_orbitals ** 2))
q = (((new_index - (s * (n_orbitals ** 3))) - (r * (n_orbitals ** 2))) // n_orbitals)
p = (((new_index - (q * n_orbitals)) - (r * (n_orbitals ** 2))) - (s * (n_orbitals ** 3)))
assert (index == ((((shift + p) + (q * n_orbitals)) + (r * (n_orbitals ** 2))) + (s * (n_orbitals ** 3))))
return ((p, 1), (q, 1), (r, 0), (s, 0)) |
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, constr_activation=None):
super(NetworkBlock, self).__init__()
self.constr_activation = constr_activation
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), dropRate, constr_activation=self.constr_activation))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x) |
def simplex_projection(v, b=1):
v = np.asarray(v)
p = len(v)
v = ((v > 0) * v)
u = np.sort(v)[::(- 1)]
sv = np.cumsum(u)
rho = np.where((u > ((sv - b) / np.arange(1, (p + 1)))))[0][(- 1)]
theta = np.max([0, ((sv[rho] - b) / (rho + 1))])
w = (v - theta)
w[(w < 0)] = 0
return w |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--input_dir', help='Location of LLaMA weights, which contains tokenizer.model and model folders')
parser.add_argument('--model_size', choices=['7B', '13B', '30B', '65B', 'tokenizer_only'])
parser.add_argument('--output_dir', help='Location to write HF model and tokenizer')
args = parser.parse_args()
if (args.model_size != 'tokenizer_only'):
write_model(model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size), model_size=args.model_size)
write_tokenizer(tokenizer_path=args.output_dir, input_tokenizer_path=os.path.join(args.input_dir, 'tokenizer.model')) |
def aead_test(backend, cipher_factory, mode_factory, params):
if ((mode_factory is GCM) and backend._fips_enabled and (len(params['iv']) != 24)):
pytest.skip('Non-96-bit IVs unsupported in FIPS mode.')
tag = binascii.unhexlify(params['tag'])
mode = mode_factory(binascii.unhexlify(params['iv']), tag, len(tag))
assert isinstance(mode, GCM)
if (params.get('pt') is not None):
plaintext = binascii.unhexlify(params['pt'])
ciphertext = binascii.unhexlify(params['ct'])
aad = binascii.unhexlify(params['aad'])
if (params.get('fail') is True):
cipher = Cipher(cipher_factory(binascii.unhexlify(params['key'])), mode, backend)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(aad)
actual_plaintext = decryptor.update(ciphertext)
with pytest.raises(InvalidTag):
decryptor.finalize()
else:
cipher = Cipher(cipher_factory(binascii.unhexlify(params['key'])), mode_factory(binascii.unhexlify(params['iv']), None), backend)
encryptor = cipher.encryptor()
encryptor.authenticate_additional_data(aad)
actual_ciphertext = encryptor.update(plaintext)
actual_ciphertext += encryptor.finalize()
assert (encryptor.tag[:len(tag)] == tag)
cipher = Cipher(cipher_factory(binascii.unhexlify(params['key'])), mode_factory(binascii.unhexlify(params['iv']), tag, min_tag_length=len(tag)), backend)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(aad)
actual_plaintext = decryptor.update(ciphertext)
actual_plaintext += decryptor.finalize()
assert (actual_plaintext == plaintext) |
def print_table(table: List[List[str]]):
col_lens = ([0] * len(table[0]))
for row in table:
for (i, cell) in enumerate(row):
col_lens[i] = max(len(cell), col_lens[i])
formats = [('{0:<%d}' % x) for x in col_lens]
for row in table:
print(' '.join((formats[i].format(row[i]) for i in range(len(row))))) |
class RevUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride, bottleneck, preactivate):
super(RevUnit, self).__init__()
self.resize_identity = ((in_channels != out_channels) or (stride != 1))
body_class = (RevResBottleneck if bottleneck else RevResBlock)
if ((not self.resize_identity) and (stride == 1)):
assert ((in_channels % 2) == 0)
assert ((out_channels % 2) == 0)
in_channels2 = (in_channels // 2)
out_channels2 = (out_channels // 2)
gm = body_class(in_channels=in_channels2, out_channels=out_channels2, stride=1, preactivate=preactivate)
fm = body_class(in_channels=in_channels2, out_channels=out_channels2, stride=1, preactivate=preactivate)
self.body = ReversibleBlock(gm, fm)
else:
self.body = body_class(in_channels=in_channels, out_channels=out_channels, stride=stride, preactivate=preactivate)
if self.resize_identity:
self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
x = self.body(x)
x = (x + identity)
else:
x = self.body(x)
return x |
def Darken(color, factor):
(r, g, b, a) = color
factor = min(max(factor, 0), 1)
factor = (1 - factor)
r *= factor
g *= factor
b *= factor
r = min(max(r, 0), 255)
b = min(max(b, 0), 255)
g = min(max(g, 0), 255)
return wx.Colour(round(r), round(g), round(b), round(a)) |
def test_edge_edge_degenerate_second_edge(test, device):
p1_h = np.array([[1, 0, 0]])
q1_h = np.array([[0, 1, 0]])
p2_h = np.array([[1, 1, 0]])
q2_h = np.array([[1, 1, 0]])
res = run_closest_point_edge_edge(p1_h, q1_h, p2_h, q2_h, device)
st0 = res[0]
test.assertAlmostEqual(st0[0], 0.5)
test.assertAlmostEqual(st0[1], 0.0) |
class RPCA_gpu():
def __init__(self, D, mu=None, lmbda=None):
self.D = D
self.S = torch.zeros_like(self.D)
self.Y = torch.zeros_like(self.D)
self.mu = (mu or (np.prod(self.D.shape) / (4 * self.norm_p(self.D, 2))).item())
self.mu_inv = (1 / self.mu)
self.lmbda = (lmbda or (1 / np.sqrt(np.max(self.D.shape))))
def norm_p(M, p):
return torch.sum(torch.pow(M, p))
def shrink(M, tau):
return (torch.sign(M) * F.relu((torch.abs(M) - tau)))
def svd_threshold(self, M, tau):
(U, s, V) = torch.svd(M, some=True)
return torch.mm(U, torch.mm(torch.diag(self.shrink(s, tau)), V.t()))
def fit(self, tol=None, max_iter=1000, iter_print=100):
(i, err) = (0, np.inf)
(Sk, Yk, Lk) = (self.S, self.Y, torch.zeros_like(self.D))
_tol = (tol or (1e-07 * self.norm_p(torch.abs(self.D), 2)))
while ((err > _tol) and (i < max_iter)):
Lk = self.svd_threshold(((self.D - Sk) + (self.mu_inv * Yk)), self.mu_inv)
Sk = self.shrink(((self.D - Lk) + (self.mu_inv * Yk)), (self.mu_inv * self.lmbda))
Yk = (Yk + (self.mu * ((self.D - Lk) - Sk)))
err = (self.norm_p(torch.abs(((self.D - Lk) - Sk)), 2) / self.norm_p(self.D, 2))
i += 1
(self.L, self.S) = (Lk, Sk)
return (Lk, Sk) |
(3, 'tokens', 'where', 'join')
def searchItemsRegex(tokens, where=None, join=None, eager=None):
if ((not isinstance(tokens, (tuple, list))) or (not all((isinstance(t, str) for t in tokens)))):
raise TypeError('Need tuple or list of strings as argument')
if (join is None):
join = tuple()
if (not hasattr(join, '__iter__')):
join = (join,)
items = get_gamedata_session().query(Item).options(*processEager(eager)).join(*join)
for token in tokens:
if (where is not None):
items = items.filter(and_(Item.name.op('regexp')(token), where))
else:
items = items.filter(Item.name.op('regexp')(token))
items = items.limit(100).all()
return items |
.parametrize(('yanked', 'expected_yanked', 'expected_yanked_reason'), [(True, True, ''), (False, False, ''), ('the reason', True, 'the reason'), ('', True, '')])
def test_package_pep592_yanked(yanked: (str | bool), expected_yanked: bool, expected_yanked_reason: str) -> None:
package = Package('foo', '1.0', yanked=yanked)
assert (package.yanked == expected_yanked)
assert (package.yanked_reason == expected_yanked_reason) |
class ColorTest(unittest.TestCase):
def test_constructor_should_accept_integer(self):
color = Color(12345)
self.assertEqual(12345, color.rgb_val)
def test_constructor_should_accept_integer_string(self):
color = Color('12345')
self.assertEqual(12345, color.rgb_val)
def test_constructor_should_not_accept_non_integer_string(self):
try:
Color('ff00ff')
except ValueError:
pass
else:
self.fail()
def test_constructor_should_not_accept_none(self):
try:
Color(None)
except TypeError:
pass
else:
self.fail()
def test_constructor_should_not_accept_negative_value(self):
try:
Color((- 1))
except ValueError:
pass
else:
self.fail()
def test_constructor_should_not_accept_value_over_24_bit(self):
try:
Color((2 ** 24))
except ValueError:
pass
else:
self.fail()
def test_rgb_value_should_default_to_0(self):
color = Color()
self.assertEqual(0, color.rgb_val)
def test_rgb_value_should_be_read_only(self):
color = Color()
try:
color.rgb_val = 1
except AttributeError:
pass
else:
self.fail()
def test_rgba_value_should_be_rgb_value_with_two_trailing_zero_hex_digits(self):
color = Color(1144831)
self.assertEqual(, color.rgba_val)
def test_to_hex_string_should_create_six_digit_hex_value(self):
color = Color(4011)
self.assertEqual('000fab', color.to_hex_string())
def test_color_from_hex_string_should_correctly_decode_hex_value(self):
color = color_from_hex_string('fab')
self.assertEqual(4011, color.rgb_val) |
class BlogEntry(models.Model):
title = models.CharField(max_length=200)
summary = models.TextField(blank=True)
pub_date = models.DateTimeField()
url = models.URLField('URL')
feed = models.ForeignKey('Feed', on_delete=models.CASCADE)
class Meta():
verbose_name = 'Blog Entry'
verbose_name_plural = 'Blog Entries'
get_latest_by = 'pub_date'
def __str__(self):
return self.title
def get_absolute_url(self):
return self.url
def excerpt(self):
return text_from_html(self.summary) |
class BlockDataset(torch.utils.data.Dataset):
def __init__(self, dataset: torch.utils.data.Dataset, batch_size: int=100, block_size: int=10000) -> None:
assert (block_size >= batch_size), 'Block size should be > batch size.'
self.block_size = block_size
self.batch_size = batch_size
self.dataset = dataset
def __getitem__(self, idx: int) -> torch.Tensor:
start = (idx * self.block_size)
end = min(((idx + 1) * self.block_size), len(self.dataset))
return self.dataset[start:end]
def __len__(self) -> int:
return (((len(self.dataset) + self.block_size) - 1) // self.block_size) |
def install_pypy(tmp: Path, url: str) -> Path:
pypy_tar_bz2 = url.rsplit('/', 1)[(- 1)]
extension = '.tar.bz2'
assert pypy_tar_bz2.endswith(extension)
installation_path = (CIBW_CACHE_PATH / pypy_tar_bz2[:(- len(extension))])
with FileLock((str(installation_path) + '.lock')):
if (not installation_path.exists()):
downloaded_tar_bz2 = (tmp / pypy_tar_bz2)
download(url, downloaded_tar_bz2)
installation_path.parent.mkdir(parents=True, exist_ok=True)
call('tar', '-C', installation_path.parent, '-xf', downloaded_tar_bz2)
downloaded_tar_bz2.unlink()
return ((installation_path / 'bin') / 'pypy3') |
def preprocess(csv_file, json_file):
with open(json_file, 'w') as fout:
with open(csv_file, 'rb') as fin:
lines = csv.reader(fin)
for items in lines:
text_data = convert_multi_slots_to_single_slots(items[1:])
text_data = clean_str(text_data)
sample = dict()
sample['doc_label'] = [items[0]]
sample['doc_token'] = text_data.split(' ')
sample['doc_keyword'] = []
sample['doc_topic'] = []
json_str = json.dumps(sample, ensure_ascii=False)
fout.write(json_str) |
class TimeMeter(Meter):
def __init__(self, init: int=0, n: int=0, round: Optional[int]=None):
self.round = round
self.reset(init, n)
def reset(self, init=0, n=0):
self.init = init
self.start = time.time()
self.n = n
def update(self, val=1):
self.n += val
def state_dict(self):
return {'init': self.elapsed_time, 'n': self.n, 'round': self.round}
def load_state_dict(self, state_dict):
if ('start' in state_dict):
self.reset(init=state_dict['init'])
else:
self.reset(init=state_dict['init'], n=state_dict['n'])
self.round = state_dict.get('round', None)
def avg(self):
return (self.n / self.elapsed_time)
def elapsed_time(self):
return (self.init + (time.time() - self.start))
def smoothed_value(self) -> float:
val = self.avg
if ((self.round is not None) and (val is not None)):
val = safe_round(val, self.round)
return val |
class TestVSCFInitialPoint(QiskitNatureTestCase):
def setUp(self) -> None:
super().setUp()
self.vscf_initial_point = VSCFInitialPoint()
self.ansatz = Mock(spec=UVCC)
self.ansatz.reps = 1
self.excitation_list = [((0,), (1,))]
self.ansatz.excitation_list = self.excitation_list
def test_missing_ansatz(self):
with self.assertRaises(QiskitNatureError):
self.vscf_initial_point.compute()
def test_set_get_ansatz(self):
self.vscf_initial_point.ansatz = self.ansatz
self.assertEqual(self.vscf_initial_point.ansatz, self.ansatz)
def test_set_problem(self):
self.assertIsNone(self.vscf_initial_point.problem)
problem = Mock(spec=VibrationalStructureProblem)
self.vscf_initial_point.problem = problem
self.assertEqual(problem, self.vscf_initial_point.problem)
def test_vscf_compute(self):
self.vscf_initial_point.compute(self.ansatz)
initial_point = self.vscf_initial_point.to_numpy_array()
np.testing.assert_array_equal(initial_point, np.asarray([0.0])) |
.parametrize('method', [CGA.round, pytest.param(CGA.flat, marks=pytest.mark.xfail(raises=AssertionError, reason='gh-100'))])
def test_from_points_construction(cga, method):
blades = cga.layout.blades
e1 = blades['e1']
e2 = blades['e2']
e3 = blades['e3']
assert (method(cga, e1, e2, e3).mv == method(cga, e1, e2, cga.up(e3)).mv) |
def windowed_groupby_accumulator(acc, new, diff=None, window=None, agg=None, grouper=None, with_state=False):
if ((agg.grouper is None) and isinstance(new, tuple)):
(new, grouper) = new
else:
grouper = None
size = GroupbySize(agg.columns, agg.grouper)
if (acc is None):
acc = {'dfs': [], 'state': agg.initial(new, grouper=grouper), 'size-state': size.initial(new, grouper=grouper)}
if (isinstance(grouper, np.ndarray) or is_series_like(grouper) or is_index_like(grouper)):
acc['groupers'] = deque([])
dfs = acc['dfs']
state = acc['state']
size_state = acc['size-state']
(dfs, old) = diff(dfs, new, window=window)
if ('groupers' in acc):
groupers = deque(acc['groupers'])
if (len(grouper) > 0):
groupers.append(grouper)
(old_groupers, groupers) = diff_align(dfs, groupers)
else:
old_groupers = ([grouper] * len(old))
if (new is not None):
(state, result) = agg.on_new(state, new, grouper=grouper)
(size_state, _) = size.on_new(size_state, new, grouper=grouper)
for (o, og) in zip(old, old_groupers):
if ('groupers' in acc):
assert (len(o) == len(og))
if len(o):
(state, result) = agg.on_old(state, o, grouper=og)
(size_state, _) = size.on_old(size_state, o, grouper=og)
nonzero = (size_state != 0)
if (not nonzero.all()):
size_state = size_state[nonzero]
result = result[nonzero]
if isinstance(state, tuple):
state = tuple((s[nonzero] for s in state))
else:
state = state[nonzero]
acc2 = {'dfs': dfs, 'state': state, 'size-state': size_state}
if ('groupers' in acc):
acc2['groupers'] = groupers
return (acc2, result) |
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
(await _ahandle_event(self.handlers, 'on_llm_new_token', 'ignore_llm', token, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs))
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
(await _ahandle_event(self.handlers, 'on_llm_end', 'ignore_llm', response, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs))
async def on_llm_error(self, error: Union[(Exception, KeyboardInterrupt)], **kwargs: Any) -> None:
(await _ahandle_event(self.handlers, 'on_llm_error', 'ignore_llm', error, run_id=self.run_id, parent_run_id=self.parent_run_id, **kwargs)) |
class MobileHairNetV2(nn.Module):
def __init__(self, decode_block=LayerDepwiseDecode, *args, **kwargs):
super(MobileHairNetV2, self).__init__()
self.mobilenet = mobilenet_v2(*args, **kwargs)
self.decode_block = decode_block
self.make_layers()
self._init_weight()
def make_layers(self):
self.encode_layer1 = nn.Sequential(*list(self.mobilenet.features)[:2])
self.encode_layer2 = nn.Sequential(*list(self.mobilenet.features)[2:4])
self.encode_layer3 = nn.Sequential(*list(self.mobilenet.features)[4:7])
self.encode_layer4 = nn.Sequential(*list(self.mobilenet.features)[7:14])
self.encode_layer5 = nn.Sequential(*list(self.mobilenet.features)[14:19])
self.decode_layer1 = nn.Sequential(nn.Upsample(scale_factor=2))
self.decode_layer2 = nn.Sequential(nn.Conv2d(in_channels=1280, out_channels=64, kernel_size=1), self.decode_block(in_channel=64, out_channel=64, kernel_size=3), nn.Upsample(scale_factor=2))
self.decode_layer3 = nn.Sequential(self.decode_block(in_channel=64, out_channel=64, kernel_size=3), nn.Upsample(scale_factor=2))
self.decode_layer4 = nn.Sequential(self.decode_block(in_channel=64, out_channel=64, kernel_size=3), nn.Upsample(scale_factor=2))
self.decode_layer5 = nn.Sequential(self.decode_block(in_channel=64, out_channel=64, kernel_size=3), nn.Upsample(scale_factor=2), self.decode_block(in_channel=64, out_channel=64, kernel_size=3), nn.Conv2d(in_channels=64, out_channels=2, kernel_size=3, padding=1))
self.encode_to_decoder4 = nn.Conv2d(in_channels=96, out_channels=1280, kernel_size=1)
self.encode_to_decoder3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=1)
self.encode_to_decoder2 = nn.Conv2d(in_channels=24, out_channels=64, kernel_size=1)
self.encode_to_decoder1 = nn.Conv2d(in_channels=16, out_channels=64, kernel_size=1)
self.soft_max = nn.Softmax(dim=1)
def forward(self, x):
x = self._forward_implement(x)
return x
def _forward_implement(self, x):
encode_layer1 = self.encode_layer1(x)
encode_layer2 = self.encode_layer2(encode_layer1)
encode_layer3 = self.encode_layer3(encode_layer2)
encode_layer4 = self.encode_layer4(encode_layer3)
encode_layer5 = self.encode_layer5(encode_layer4)
encode_layer4 = self.encode_to_decoder4(encode_layer4)
encode_layer3 = self.encode_to_decoder3(encode_layer3)
encode_layer2 = self.encode_to_decoder2(encode_layer2)
encode_layer1 = self.encode_to_decoder1(encode_layer1)
decode_layer1 = (self.decode_layer1(encode_layer5) + encode_layer4)
decode_layer2 = (self.decode_layer2(decode_layer1) + encode_layer3)
decode_layer3 = (self.decode_layer3(decode_layer2) + encode_layer2)
decode_layer4 = (self.decode_layer4(decode_layer3) + encode_layer1)
out = self.decode_layer5(decode_layer4)
return out
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias) |
class _SklearnSVMMulticlass(_SklearnSVMABC):
def __init__(self, training_dataset, test_dataset, datapoints, gamma, multiclass_classifier):
super().__init__(training_dataset, test_dataset, datapoints, gamma)
self.multiclass_classifier = multiclass_classifier
self._qalgo = None
def train(self, data, labels):
self.multiclass_classifier.train(data, labels)
def test(self, data, labels):
accuracy = self.multiclass_classifier.test(data, labels)
self._ret['testing_accuracy'] = accuracy
self._ret['test_success_ratio'] = accuracy
return accuracy
def predict(self, data):
predicted_labels = self.multiclass_classifier.predict(data)
self._ret['predicted_labels'] = predicted_labels
return predicted_labels
def run(self):
self.train(self.training_dataset[0], self.training_dataset[1])
if (self.test_dataset is not None):
self.test(self.test_dataset[0], self.test_dataset[1])
if (self.datapoints is not None):
predicted_labels = self.predict(self.datapoints)
predicted_classes = map_label_to_class_name(predicted_labels, self.label_to_class)
self._ret['predicted_classes'] = predicted_classes
return self._ret
def load_model(self, file_path):
model_npz = np.load(file_path, allow_pickle=True)
for i in range(len(self.multiclass_classifier.estimators)):
self.multiclass_classifier.estimators.ret['svm']['alphas'] = model_npz['alphas_{}'.format(i)]
self.multiclass_classifier.estimators.ret['svm']['bias'] = model_npz['bias_{}'.format(i)]
self.multiclass_classifier.estimators.ret['svm']['support_vectors'] = model_npz['support_vectors_{}'.format(i)]
self.multiclass_classifier.estimators.ret['svm']['yin'] = model_npz['yin_{}'.format(i)]
try:
self.class_to_label = model_npz['class_to_label']
self.label_to_class = model_npz['label_to_class']
except KeyError as ex:
logger.warning('The model saved in Aqua 0.5 does not contain the mapping between class names and labels. Please setup them and save the model again for further use. Error: %s', str(ex))
def save_model(self, file_path):
model = {}
for (i, estimator) in enumerate(self.multiclass_classifier.estimators):
model['alphas_{}'.format(i)] = estimator.ret['svm']['alphas']
model['bias_{}'.format(i)] = estimator.ret['svm']['bias']
model['support_vectors_{}'.format(i)] = estimator.ret['svm']['support_vectors']
model['yin_{}'.format(i)] = estimator.ret['svm']['yin']
model['class_to_label'] = self._qalgo.class_to_label
model['label_to_class'] = self._qalgo.label_to_class
np.savez(file_path, **model) |
def get_summary_and_prune(model: torch.nn.Module, *, max_depth: int, module_args: Optional[Tuple[(object, ...)]]=None, module_kwargs: Optional[Dict[(str, Any)]]=None) -> ModuleSummary:
module_summary = get_module_summary(model, module_args=module_args, module_kwargs=module_kwargs)
prune_module_summary(module_summary, max_depth=max_depth)
return module_summary |
class ProphetNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names: List[str] = ['input_ids', 'attention_mask']
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', x_sep_token='[X_SEP]', pad_token='[PAD]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, x_sep_token=x_sep_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
self.unique_no_split_tokens.append(x_sep_token)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([0] * len(token_ids_0)) + [1])
return (((([0] * len(token_ids_0)) + [1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
if (token_ids_1 is None):
return (len((token_ids_0 + sep)) * [0])
return ((len((token_ids_0 + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (token_ids_0 + [self.sep_token_id])
sep = [self.sep_token_id]
return (((token_ids_0 + sep) + token_ids_1) + sep) |
class CustomBenchUsedDistributions(SphinxDirective):
required_arguments = 0
def get_list_table(self) -> str:
distributions: Dict[(str, str)] = {}
for hub_description in BENCHMARK_HUBS:
with ZipFile((RELEASE_DATA / f'{hub_description.key}.zip')) as release_zip:
index = json.loads(release_zip.read('index.json'))
for file_list in index['env_files'].values():
for file in file_list:
distributions.update(pyperf_bench_to_measure(release_zip.read(file)).distributions)
result = dedent('\n .. list-table::\n :header-rows: 1\n\n * - Library\n - Used version\n - Last version\n ')
for dist in sorted(distributions.keys()):
version = distributions[dist]
result += dedent(f'''
* - `{dist} <
- ``{version}``
- .. image::
:target:
:class: only-light
.. image::
:target:
:class: only-dark
''').replace('\n', '\n ')
return result
def run(self):
list_table = self.get_list_table()
rst = StringList(list_table.split('\n'), source='fake.rst')
node = docutils.nodes.paragraph()
self.state.nested_parse(rst, 0, node)
return node.children |
class ResNet(SimpleNet):
def __init__(self, block, layers, num_classes=1000, name=None, created_time=None):
self.inplanes = 64
super(ResNet, self).__init__(name, created_time)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 32, layers[0])
self.layer2 = self._make_layer(block, 64, layers[1], stride=2)
self.layer3 = self._make_layer(block, 128, layers[2], stride=2)
self.layer4 = self._make_layer(block, 256, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def _union_primary_key_indices(hash_bucket_index: int, df_envelopes_list: List[List[DeltaFileEnvelope]]) -> pa.Table:
logger.info(f'[Hash bucket index {hash_bucket_index}] Reading dedupe input for {len(df_envelopes_list)} delta file envelope lists...')
hb_tables = []
df_envelopes = [d for dfe_list in df_envelopes_list for d in dfe_list]
df_envelopes = sorted(df_envelopes, key=(lambda df: (df.stream_position, df.file_index)), reverse=False)
for df_envelope in df_envelopes:
hb_tables.append(sc.project_delta_file_metadata_on_table(df_envelope))
hb_table = pa.concat_tables(hb_tables)
logger.info(f'Total records in hash bucket {hash_bucket_index} is {hb_table.num_rows}')
return hb_table |
_fixtures(ReahlSystemFixture, PartyAccountFixture)
def test_create_account(reahl_system_fixture, party_account_fixture):
fixture = party_account_fixture
login_email = ''
mailer_stub = fixture.mailer
account_management_interface = fixture.account_management_interface
account_management_interface.email = login_email
mailer_stub.reset()
other_system_account = fixture.system_account
new_email = ''
Session.add(ChangeAccountEmail(other_system_account, new_email))
with expected(NotUniqueException):
account_management_interface.email = new_email
account_management_interface.register()
assert (not mailer_stub.mail_sent)
reahl_system_fixture.system_control.rollback()
assert (Session.query(ActivateAccount).count() == 0)
account_management_interface.email = login_email
system_account = account_management_interface.register()
[activation_action] = Session.query(ActivateAccount).filter_by(system_account=system_account).all()
activation_request = activation_action.requirements[0]
assert mailer_stub.mail_sent
assert (system_account.email == account_management_interface.email)
assert_recent((activation_action.deadline - timedelta(days=10)))
assert (not system_account.registration_activated)
assert (not system_account.account_enabled)
assert (not system_account.registration_date)
assert isinstance(system_account, EmailAndPasswordSystemAccount)
assert (system_account.owner is None)
assert system_account.id
mailer_stub.reset()
with expected(NotUniqueException):
account_management_interface.register()
assert (not mailer_stub.mail_sent) |
def test_inheritance_overriden_types_functional_parent():
Parent = namedtuple('Parent', 'a b')
class Child(Parent):
a: bool
c: str
assert (get_named_tuple_shape(Child) == Shape(input=InputShape(constructor=Child, kwargs=None, fields=(InputField(type=bool, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY), InputField(type=Any, id='b', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY)), params=(Param(field_id='a', name='a', kind=ParamKind.POS_OR_KW), Param(field_id='b', name='b', kind=ParamKind.POS_OR_KW)), overriden_types=frozenset({'a'})), output=OutputShape(fields=(OutputField(type=bool, id='a', default=NoDefault(), metadata=MappingProxyType({}), accessor=create_key_accessor(0, access_error=None), original=ANY), OutputField(type=Any, id='b', default=NoDefault(), metadata=MappingProxyType({}), accessor=create_key_accessor(1, access_error=None), original=ANY)), overriden_types=frozenset({'a'})))) |
class QuantLinear(nn.Module):
def __init__(self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer('weight_integer', torch.zeros_like(self.weight))
self.register_buffer('fc_scaling_factor', torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer('bias_integer', torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f'({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})'
return s
def forward(self, x, prev_act_scaling_factor=None):
if (not self.quant_mode):
return (nn.functional.linear(x, weight=self.weight, bias=self.bias), None)
assert ((prev_act_scaling_factor is not None) and (prev_act_scaling_factor.shape == (1,))), 'Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. Please add a QuantAct layer with `per_channel = True` before this QuantAct layer'
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
(w_min, _) = torch.min(w_transform, dim=1, out=None)
(w_max, _) = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel)
self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor)
bias_scaling_factor = (self.fc_scaling_factor * prev_act_scaling_factor)
if (self.bias is not None):
self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, (- 1))
x_int = (x / prev_act_scaling_factor)
return ((nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor), bias_scaling_factor) |
def prepare_parser():
usage = 'Parser for all scripts.'
parser = ArgumentParser(description=usage)
parser.add_argument('--G_path', type=str, default=None, help='Path to pre-trained BigGAN checkpoint folder (default: auto-download checkpoint)')
parser.add_argument('--A_lr', type=float, default=0.01, help='Learning rate for the directions')
parser.add_argument('--ndirs', type=int, default=120, help='Number of directions to learn')
parser.add_argument('--fix_class', type=int, default=None, help='If specified, restricts sampled y to always be the given ImageNet class')
parser.add_argument('--load_A', type=str, default='random', help='The method for initializing/loading the direction matrix. Either "random" or "coord" if you want to use a random/standard basis A, or a path to a *.pt file containing an A matrix checkpoint.')
parser.add_argument('--search_space', type=str, default='all', choices=['all', 'coarse', 'mid', 'fine'], help='This argument controls the subspace within Z-space in which directions will be searched. Using "all" will search for directions in all of Z-space. Using "coarse" will only search for directions in the first 40 Z components. Using "mid" will search for directions in Z components 41-80, and "fine" will search for directionsin Z components 81-120 (i.e., the last 40 components). Note that this assumes dim_z=120 (the default). If you use a different dimensionality for Z, arguments besides "all" will not work.')
parser.add_argument('--path_size', type=float, default=5.0, help='Maximum possible distance to travel in the learned directions (used in both training and visualizations)')
parser.add_argument('--no_ortho', action='store_true', help='If specified, does NOT apply Gram Schmidt to orthogonalize the A matrix.')
parser.add_argument('--vis_during_training', action='store_true', help='If specified, periodically logs GIFs of learned directions during training to TensorBoard/WandB')
parser.add_argument('--wandb_entity', type=str, default=None, help='To use WandB logging, supply your entity/username (if left as None, uses TensorBoard instead)')
parser.add_argument('--directions_to_vis', type=int, nargs='+', default=None, help='If specified, only visualizes (default: visualize all directions; used for visualizations only)')
parser.add_argument('--n_samples', type=int, default=6, help='Number of samples to visualize per-direction (used for visualizations only)')
parser.add_argument('--nz_per_vid', type=int, default=8, help='Number of directions to visualize per mp4 video (used for visualizations only)')
parser.add_argument('--val_minibatch_size', type=int, default=80, help='Batch size to use when generating visuals of directions (used for visualizations only)')
parser.add_argument('--dataset', type=str, default='I128_hdf5', help='Which Dataset to train on, out of I128, I256, C10, C100;Append "_hdf5" to use the hdf5 version for ISLVRC (default: %(default)s)')
parser.add_argument('--augment', action='store_true', default=False, help='Augment with random crops and flips (default: %(default)s)')
parser.add_argument('--num_workers', type=int, default=8, help='Number of dataloader workers; consider using less for HDF5 (default: %(default)s)')
parser.add_argument('--no_pin_memory', action='store_false', dest='pin_memory', default=True, help='Pin data into memory through dataloader? (default: %(default)s)')
parser.add_argument('--shuffle', action='store_true', default=False, help='Shuffle the data (strongly recommended)? (default: %(default)s)')
parser.add_argument('--load_in_mem', action='store_true', default=False, help='Load all data into memory? (default: %(default)s)')
parser.add_argument('--use_multiepoch_sampler', action='store_true', default=False, help='Use the multi-epoch sampler for dataloader? (default: %(default)s)')
parser.add_argument('--model', type=str, default='BigGAN', help='Name of the model module (default: %(default)s)')
parser.add_argument('--G_param', type=str, default='SN', help='Parameterization style to use for G, spectral norm (SN) or SVD (SVD) or None (default: %(default)s)')
parser.add_argument('--D_param', type=str, default='SN', help='Parameterization style to use for D, spectral norm (SN) or SVD (SVD) or None (default: %(default)s)')
parser.add_argument('--G_ch', type=int, default=64, help='Channel multiplier for G (default: %(default)s)')
parser.add_argument('--D_ch', type=int, default=64, help='Channel multiplier for D (default: %(default)s)')
parser.add_argument('--G_depth', type=int, default=1, help='Number of resblocks per stage in G? (default: %(default)s)')
parser.add_argument('--D_depth', type=int, default=1, help='Number of resblocks per stage in D? (default: %(default)s)')
parser.add_argument('--D_thin', action='store_false', dest='D_wide', default=True, help='Use the SN-GAN channel pattern for D? (default: %(default)s)')
parser.add_argument('--G_shared', action='store_true', default=False, help='Use shared embeddings in G? (default: %(default)s)')
parser.add_argument('--shared_dim', type=int, default=0, help='Gs shared embedding dimensionality; if 0, will be equal to dim_z. (default: %(default)s)')
parser.add_argument('--dim_z', type=int, default=128, help='Noise dimensionality: %(default)s)')
parser.add_argument('--z_var', type=float, default=1.0, help='Noise variance: %(default)s)')
parser.add_argument('--hier', action='store_true', default=False, help='Use hierarchical z in G? (default: %(default)s)')
parser.add_argument('--cross_replica', action='store_true', default=False, help='Cross_replica batchnorm in G?(default: %(default)s)')
parser.add_argument('--mybn', action='store_true', default=False, help='Use my batchnorm (which supports standing stats?) %(default)s)')
parser.add_argument('--G_nl', type=str, default='relu', help='Activation function for G (default: %(default)s)')
parser.add_argument('--D_nl', type=str, default='relu', help='Activation function for D (default: %(default)s)')
parser.add_argument('--G_attn', type=str, default='64', help='What resolutions to use attention on for G (underscore separated) (default: %(default)s)')
parser.add_argument('--D_attn', type=str, default='64', help='What resolutions to use attention on for D (underscore separated) (default: %(default)s)')
parser.add_argument('--norm_style', type=str, default='bn', help='Normalizer style for G, one of bn [batchnorm], in [instancenorm], ln [layernorm], gn [groupnorm] (default: %(default)s)')
parser.add_argument('--seed', type=int, default=0, help='Random seed to use; affects both initialization and dataloading. (default: %(default)s)')
parser.add_argument('--G_init', type=str, default='ortho', help='Init style to use for G (default: %(default)s)')
parser.add_argument('--D_init', type=str, default='ortho', help='Init style to use for D(default: %(default)s)')
parser.add_argument('--skip_init', action='store_true', default=False, help='Skip initialization, ideal for testing when ortho init was used (default: %(default)s)')
parser.add_argument('--G_lr', type=float, default=5e-05, help='Learning rate to use for Generator (default: %(default)s)')
parser.add_argument('--D_lr', type=float, default=0.0002, help='Learning rate to use for Discriminator (default: %(default)s)')
parser.add_argument('--G_B1', type=float, default=0.0, help='Beta1 to use for Generator (default: %(default)s)')
parser.add_argument('--D_B1', type=float, default=0.0, help='Beta1 to use for Discriminator (default: %(default)s)')
parser.add_argument('--G_B2', type=float, default=0.999, help='Beta2 to use for Generator (default: %(default)s)')
parser.add_argument('--D_B2', type=float, default=0.999, help='Beta2 to use for Discriminator (default: %(default)s)')
parser.add_argument('--batch_size', type=int, default=64, help='Default overall batchsize (default: %(default)s)')
parser.add_argument('--G_batch_size', type=int, default=0, help='Batch size to use for G; if 0, same as D (default: %(default)s)')
parser.add_argument('--num_G_accumulations', type=int, default=1, help='Number of passes to accumulate Gs gradients over (default: %(default)s)')
parser.add_argument('--num_D_steps', type=int, default=2, help='Number of D steps per G step (default: %(default)s)')
parser.add_argument('--num_D_accumulations', type=int, default=1, help='Number of passes to accumulate Ds gradients over (default: %(default)s)')
parser.add_argument('--split_D', action='store_true', default=False, help='Run D twice rather than concatenating inputs? (default: %(default)s)')
parser.add_argument('--num_epochs', type=int, default=100, help='Number of epochs to train for (default: %(default)s)')
parser.add_argument('--parallel', action='store_true', default=False, help='Train with multiple GPUs (default: %(default)s)')
parser.add_argument('--G_fp16', action='store_true', default=False, help='Train with half-precision in G? (default: %(default)s)')
parser.add_argument('--D_fp16', action='store_true', default=False, help='Train with half-precision in D? (default: %(default)s)')
parser.add_argument('--D_mixed_precision', action='store_true', default=False, help='Train with half-precision activations but fp32 params in D? (default: %(default)s)')
parser.add_argument('--G_mixed_precision', action='store_true', default=False, help='Train with half-precision activations but fp32 params in G? (default: %(default)s)')
parser.add_argument('--accumulate_stats', action='store_true', default=False, help='Accumulate "standing" batchnorm stats? (default: %(default)s)')
parser.add_argument('--num_standing_accumulations', type=int, default=16, help='Number of forward passes to use in accumulating standing stats? (default: %(default)s)')
parser.add_argument('--G_eval_mode', action='store_true', default=False, help='Run G in eval mode (running/standing stats?) at sample/test time? (default: %(default)s)')
parser.add_argument('--save_every', type=int, default=2000, help='Save every X iterations (default: %(default)s)')
parser.add_argument('--num_save_copies', type=int, default=2, help='How many copies to save (default: %(default)s)')
parser.add_argument('--num_best_copies', type=int, default=2, help='How many previous best checkpoints to save (default: %(default)s)')
parser.add_argument('--which_best', type=str, default='IS', help='Which metric to use to determine when to save new "best"checkpoints, one of IS or FID (default: %(default)s)')
parser.add_argument('--no_fid', action='store_true', default=False, help='Calculate IS only, not FID? (default: %(default)s)')
parser.add_argument('--test_every', type=int, default=5000, help='Test every X iterations (default: %(default)s)')
parser.add_argument('--num_inception_images', type=int, default=50000, help='Number of samples to compute inception metrics with (default: %(default)s)')
parser.add_argument('--hashname', action='store_true', default=False, help='Use a hash of the experiment name instead of the full config (default: %(default)s)')
parser.add_argument('--base_root', type=str, default='', help='Default location to store all weights, samples, data, and logs (default: %(default)s)')
parser.add_argument('--data_root', type=str, default='data', help='Default location where data is stored (default: %(default)s)')
parser.add_argument('--weights_root', type=str, default='weights', help='Default location to store weights (default: %(default)s)')
parser.add_argument('--logs_root', type=str, default='logs', help='Default location to store logs (default: %(default)s)')
parser.add_argument('--samples_root', type=str, default='samples', help='Default location to store samples (default: %(default)s)')
parser.add_argument('--pbar', type=str, default='mine', help='Type of progressbar to use; one of "mine" or "tqdm" (default: %(default)s)')
parser.add_argument('--name_suffix', type=str, default='', help='Suffix for experiment name for loading weights for sampling (consider "best0") (default: %(default)s)')
parser.add_argument('--experiment_name', type=str, default='', help='Optionally override the automatic experiment naming with this arg. (default: %(default)s)')
parser.add_argument('--config_from_name', action='store_true', default=False, help='Use a hash of the experiment name instead of the full config (default: %(default)s)')
parser.add_argument('--ema', action='store_true', default=False, help='Keep an ema of Gs weights? (default: %(default)s)')
parser.add_argument('--ema_decay', type=float, default=0.9999, help='EMA decay rate (default: %(default)s)')
parser.add_argument('--use_ema', action='store_true', default=False, help='Use the EMA parameters of G for evaluation? (default: %(default)s)')
parser.add_argument('--ema_start', type=int, default=0, help='When to start updating the EMA weights (default: %(default)s)')
parser.add_argument('--adam_eps', type=float, default=1e-08, help='epsilon value to use for Adam (default: %(default)s)')
parser.add_argument('--BN_eps', type=float, default=1e-05, help='epsilon value to use for BatchNorm (default: %(default)s)')
parser.add_argument('--SN_eps', type=float, default=1e-08, help='epsilon value to use for Spectral Norm(default: %(default)s)')
parser.add_argument('--num_G_SVs', type=int, default=1, help='Number of SVs to track in G (default: %(default)s)')
parser.add_argument('--num_D_SVs', type=int, default=1, help='Number of SVs to track in D (default: %(default)s)')
parser.add_argument('--num_G_SV_itrs', type=int, default=1, help='Number of SV itrs in G (default: %(default)s)')
parser.add_argument('--num_D_SV_itrs', type=int, default=1, help='Number of SV itrs in D (default: %(default)s)')
parser.add_argument('--G_ortho', type=float, default=0.0, help='Modified ortho reg coefficient in G(default: %(default)s)')
parser.add_argument('--D_ortho', type=float, default=0.0, help='Modified ortho reg coefficient in D (default: %(default)s)')
parser.add_argument('--toggle_grads', action='store_true', default=True, help='Toggle D and Gs "requires_grad" settings when not training them? (default: %(default)s)')
parser.add_argument('--which_train_fn', type=str, default='GAN', help='How2trainyourbois (default: %(default)s)')
parser.add_argument('--load_weights', type=str, default='', help='Suffix for which weights to load (e.g. best0, copy0) (default: %(default)s)')
parser.add_argument('--resume', action='store_true', default=False, help='Resume training? (default: %(default)s)')
parser.add_argument('--logstyle', type=str, default='%3.3e', help='What style to use when logging training metrics?One of: %#.#f/ %#.#e (float/exp, text),pickle (python pickle),npz (numpy zip),mat (MATLAB .mat file) (default: %(default)s)')
parser.add_argument('--log_G_spectra', action='store_true', default=False, help='Log the top 3 singular values in each SN layer in G? (default: %(default)s)')
parser.add_argument('--log_D_spectra', action='store_true', default=False, help='Log the top 3 singular values in each SN layer in D? (default: %(default)s)')
parser.add_argument('--sv_log_interval', type=int, default=10, help='Iteration interval for logging singular values (default: %(default)s)')
return parser |
class SessionManager(QObject):
def __init__(self, base_path, parent=None):
super().__init__(parent)
self.current: Optional[str] = None
self._base_path = base_path
self._last_window_session = None
self.did_load = False
self.save_autosave = throttle.Throttle(self._save_autosave, (60 * 1000))
def _get_session_path(self, name, check_exists=False):
path = os.path.expanduser(name)
if (os.path.isabs(path) and ((not check_exists) or os.path.exists(path))):
return path
else:
path = os.path.join(self._base_path, (name + '.yml'))
if (check_exists and (not os.path.exists(path))):
raise SessionNotFoundError(path)
return path
def exists(self, name):
try:
self._get_session_path(name, check_exists=True)
except SessionNotFoundError:
return False
else:
return True
def _save_tab_item(self, tab, idx, item):
data: _JsonType = {'url': bytes(item.url().toEncoded()).decode('ascii')}
if item.title():
data['title'] = item.title()
elif (tab.history.current_idx() == idx):
data['title'] = tab.title()
else:
data['title'] = data['url']
if (item.originalUrl() != item.url()):
encoded = item.originalUrl().toEncoded()
data['original-url'] = bytes(encoded).decode('ascii')
if (tab.history.current_idx() == idx):
data['active'] = True
try:
user_data = item.userData()
except AttributeError:
user_data = None
data['last_visited'] = item.lastVisited().toString(Qt.DateFormat.ISODate)
if (tab.history.current_idx() == idx):
pos = tab.scroller.pos_px()
data['zoom'] = tab.zoom.factor()
data['scroll-pos'] = {'x': pos.x(), 'y': pos.y()}
elif (user_data is not None):
if ('zoom' in user_data):
data['zoom'] = user_data['zoom']
if ('scroll-pos' in user_data):
pos = user_data['scroll-pos']
data['scroll-pos'] = {'x': pos.x(), 'y': pos.y()}
data['pinned'] = tab.data.pinned
return data
def _save_tab(self, tab, active, with_history=True):
data: _JsonType = {'history': []}
if active:
data['active'] = True
history = (tab.history if with_history else [tab.history.current_item()])
for (idx, item) in enumerate(history):
qtutils.ensure_valid(item)
item_data = self._save_tab_item(tab, idx, item)
if (not item.url().isValid()):
log.sessions.debug(f'Skipping invalid history item: {item}')
continue
if ((item.url().scheme() == 'qute') and (item.url().host() == 'back')):
if (item_data.get('active', False) and data['history']):
data['history'][(- 1)]['active'] = True
else:
data['history'].append(item_data)
return data
def _save_all(self, *, only_window=None, with_private=False, with_history=True):
data: _JsonType = {'windows': []}
if (only_window is not None):
winlist: Iterable[int] = [only_window]
else:
winlist = objreg.window_registry
for win_id in sorted(winlist):
tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id)
main_window = objreg.get('main-window', scope='window', window=win_id)
if sip.isdeleted(main_window):
continue
if (tabbed_browser.is_private and (not with_private)):
continue
win_data: _JsonType = {}
active_window = objects.qapp.activeWindow()
if (getattr(active_window, 'win_id', None) == win_id):
win_data['active'] = True
win_data['geometry'] = bytes(main_window.saveGeometry())
win_data['tabs'] = []
if tabbed_browser.is_private:
win_data['private'] = True
for (i, tab) in enumerate(tabbed_browser.widgets()):
active = (i == tabbed_browser.widget.currentIndex())
win_data['tabs'].append(self._save_tab(tab, active, with_history=with_history))
data['windows'].append(win_data)
return data
def _get_session_name(self, name):
if (name is default):
name = config.val.session.default_name
if (name is None):
if (self.current is not None):
name = self.current
else:
name = 'default'
return name
def save(self, name, last_window=False, load_next_time=False, only_window=None, with_private=False, with_history=True):
name = self._get_session_name(name)
path = self._get_session_path(name)
log.sessions.debug('Saving session {} to {}...'.format(name, path))
if last_window:
data = self._last_window_session
if (data is None):
log.sessions.error('last_window_session is None while saving!')
return None
else:
data = self._save_all(only_window=only_window, with_private=with_private, with_history=with_history)
log.sessions.vdebug('Saving data: {}'.format(data))
try:
with qtutils.savefile_open(path) as f:
utils.yaml_dump(data, f)
except (OSError, UnicodeEncodeError, yaml.YAMLError) as e:
raise SessionError(e)
if load_next_time:
configfiles.state['general']['session'] = name
return name
def _save_autosave(self):
try:
self.save('_autosave')
except SessionError as e:
log.sessions.error('Failed to save autosave session: {}'.format(e))
def delete_autosave(self):
self.save_autosave.cancel()
try:
self.delete('_autosave')
except SessionNotFoundError:
pass
except SessionError as e:
log.sessions.error('Failed to delete autosave session: {}'.format(e))
def save_last_window_session(self):
self._last_window_session = self._save_all()
def _load_tab(self, new_tab, data):
entries = []
lazy_load: MutableSequence[_JsonType] = []
lazy_index = len(data['history'])
gen = itertools.chain(itertools.takewhile((lambda _: (not lazy_load)), enumerate(data['history'])), enumerate(lazy_load), itertools.dropwhile((lambda i: (i[0] < lazy_index)), enumerate(data['history'])))
for (i, histentry) in gen:
user_data = {}
if ('zoom' in data):
user_data['zoom'] = data['zoom']
elif ('zoom' in histentry):
user_data['zoom'] = histentry['zoom']
if ('scroll-pos' in data):
pos = data['scroll-pos']
user_data['scroll-pos'] = QPoint(pos['x'], pos['y'])
elif ('scroll-pos' in histentry):
pos = histentry['scroll-pos']
user_data['scroll-pos'] = QPoint(pos['x'], pos['y'])
if ('pinned' in histentry):
new_tab.data.pinned = histentry['pinned']
if (config.val.session.lazy_restore and histentry.get('active', False) and (not histentry['url'].startswith('qute://back'))):
lazy_index = (i + 1)
lazy_load.append({'title': histentry['title'], 'url': ('qute://back#' + urllib.parse.quote(histentry['title'])), 'active': True})
histentry['active'] = False
active = histentry.get('active', False)
url = QUrl.fromEncoded(histentry['url'].encode('ascii'))
if ('original-url' in histentry):
orig_url = QUrl.fromEncoded(histentry['original-url'].encode('ascii'))
else:
orig_url = url
if histentry.get('last_visited'):
last_visited: Optional[QDateTime] = QDateTime.fromString(histentry.get('last_visited'), Qt.DateFormat.ISODate)
else:
last_visited = None
entry = TabHistoryItem(url=url, original_url=orig_url, title=histentry['title'], active=active, user_data=user_data, last_visited=last_visited)
entries.append(entry)
if active:
new_tab.title_changed.emit(histentry['title'])
try:
new_tab.history.private_api.load_items(entries)
except ValueError as e:
raise SessionError(e)
def _load_window(self, win):
window = mainwindow.MainWindow(geometry=win['geometry'], private=win.get('private', None))
tabbed_browser = objreg.get('tabbed-browser', scope='window', window=window.win_id)
tab_to_focus = None
for (i, tab) in enumerate(win['tabs']):
new_tab = tabbed_browser.tabopen(background=False)
self._load_tab(new_tab, tab)
if tab.get('active', False):
tab_to_focus = i
if new_tab.data.pinned:
new_tab.set_pinned(True)
if (tab_to_focus is not None):
tabbed_browser.widget.setCurrentIndex(tab_to_focus)
window.show()
if win.get('active', False):
QTimer.singleShot(0, tabbed_browser.widget.activateWindow)
def load(self, name, temp=False):
path = self._get_session_path(name, check_exists=True)
try:
with open(path, encoding='utf-8') as f:
data = utils.yaml_load(f)
except (OSError, UnicodeDecodeError, yaml.YAMLError) as e:
raise SessionError(e)
log.sessions.debug('Loading session {} from {}...'.format(name, path))
if (data is None):
raise SessionError('Got empty session file')
if qtutils.is_single_process():
if any((win.get('private') for win in data['windows'])):
raise SessionError("Can't load a session with private windows in single process mode.")
for win in data['windows']:
self._load_window(win)
if data['windows']:
self.did_load = True
if ((not name.startswith('_')) and (not temp)):
self.current = name
def delete(self, name):
path = self._get_session_path(name, check_exists=True)
try:
os.remove(path)
except OSError as e:
raise SessionError(e)
def list_sessions(self):
sessions = []
for filename in os.listdir(self._base_path):
(base, ext) = os.path.splitext(filename)
if (ext == '.yml'):
sessions.append(base)
return sorted(sessions) |
def lisp_to_nested_expression(lisp_string):
stack: List = []
current_expression: List = []
tokens = lisp_string.split()
for token in tokens:
while (token[0] == '('):
nested_expression: List = []
current_expression.append(nested_expression)
stack.append(current_expression)
current_expression = nested_expression
token = token[1:]
current_expression.append(token.replace(')', ''))
while (token[(- 1)] == ')'):
current_expression = stack.pop()
token = token[:(- 1)]
return current_expression[0] |
def gen_src1_dep_taken_test():
return [gen_br2_src1_dep_test(5, 'bne', 7, 1, True), gen_br2_src1_dep_test(4, 'bne', 7, 2, True), gen_br2_src1_dep_test(3, 'bne', 7, 3, True), gen_br2_src1_dep_test(2, 'bne', 7, 4, True), gen_br2_src1_dep_test(1, 'bne', 7, 5, True), gen_br2_src1_dep_test(0, 'bne', 7, 6, True)] |
def load_env_from_file(filename):
if (not os.path.exists(filename)):
raise FileNotFoundError('Environment file {} does not exist.'.format(filename))
with open(filename) as f:
for (lineno, line) in enumerate(f):
line = line.strip()
if ((not line) or line.startswith('#')):
continue
if ('=' not in line):
raise SyntaxError('Invalid environment file syntax in {} at line {}.'.format(filename, (lineno + 1)))
(name, value) = parse_var(line)
(yield (name, value)) |
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10, deconv=None, delinear=None, channel_deconv=None):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = (2 * growth_rate)
if (not deconv):
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
else:
self.conv1 = deconv(3, num_planes, kernel_size=3, padding=1)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0], deconv)
num_planes += (nblocks[0] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans1 = Transition(num_planes, out_planes, deconv)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1], deconv)
num_planes += (nblocks[1] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans2 = Transition(num_planes, out_planes, deconv)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2], deconv)
num_planes += (nblocks[2] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans3 = Transition(num_planes, out_planes, deconv)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3], deconv)
num_planes += (nblocks[3] * growth_rate)
if (not deconv):
self.bn = nn.BatchNorm2d(num_planes)
elif channel_deconv:
self.channel_deconv = channel_deconv()
if delinear:
self.linear = delinear(num_planes, num_classes)
else:
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock, deconv):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate, deconv))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
if hasattr(self, 'bn'):
out = self.bn(out)
out = F.relu(out)
if hasattr(self, 'channel_deconv'):
out = self.channel_deconv(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def get_data(name):
data = []
sents = get_sents(((final_dir + name) + '.txt'))
final_data.append(sents)
user_files = ['PGN_both', 'PGN_only', 'fast_rl_both', 'fast_rl_only']
for name in user_files:
sents = get_sents(((user_dir + name) + '.txt'))
user_data.append(sents)
agent_files = ['PGN_both', 'PGN_only', 'fast_rl_both', 'fast_rl_only']
for name in agent_files:
sents = get_sents(((agent_dir + name) + '.txt'))
agent_data.append(sents)
return [final_data, user_data, agent_data] |
class ScarletC(nn.Module):
def __init__(self, n_class=1000, input_size=224):
super(ScarletC, self).__init__()
assert ((input_size % 32) == 0)
mb_config = [[3, 32, 5, 2, True], [3, 32, 3, 1, True], [3, 40, 5, 2, True], 'identity', 'identity', [3, 40, 3, 1, False], [6, 80, 7, 2, True], [3, 80, 3, 1, True], [3, 80, 3, 1, True], [3, 80, 5, 1, False], [3, 96, 7, 1, True], [3, 96, 7, 1, False], [3, 96, 3, 1, True], [3, 96, 7, 1, True], [3, 192, 3, 2, True], 'identity', [6, 192, 3, 1, True], [6, 192, 7, 1, True], [6, 320, 5, 1, True]]
input_channel = 16
last_channel = 1280
self.last_channel = last_channel
self.stem = stem(3, 32, 2)
self.separable_conv = separable_conv(32, 16)
self.mb_module = list()
for each_config in mb_config:
if (each_config == 'identity'):
self.mb_module.append(Identity())
continue
(t, c, k, s, e) = each_config
output_channel = c
self.mb_module.append(InvertedResidual(input_channel, output_channel, k, s, expand_ratio=t, is_use_se=e))
input_channel = output_channel
self.mb_module = nn.Sequential(*self.mb_module)
self.conv_before_pooling = conv_before_pooling(input_channel, self.last_channel)
self.classifier = nn.Linear(self.last_channel, n_class)
self._initialize_weights()
def forward(self, x):
x = self.stem(x)
x = self.separable_conv(x)
x = self.mb_module(x)
x = self.conv_before_pooling(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(0)
init_range = (1.0 / math.sqrt(n))
m.weight.data.uniform_((- init_range), init_range)
m.bias.data.zero_() |
def _parse_output(pipe: Optional[IO[bytes]]) -> tuple[(str, list[str])]:
failed_tests = []
conformance = ''
test_name = ''
for line in iter(pipe.readline, b''):
line = line.decode('utf-8').strip('\r\n')
if (not line):
continue
if ('Test [' in line):
test_name = line
if (('failed:' in line) or ('timed out:' in line)):
failed_tests.append(test_name)
if ('tests passed' in line):
conformance = line
logging.info(line)
return (conformance, failed_tests) |
class Label(object):
def __init__(self, x, y, label_str, anchor='BL', style=None, keep_inside=None, head=None):
if (style is None):
style = TextStyle()
text = qg.QTextDocument()
font = style.qt_font
if font:
text.setDefaultFont(font)
color = style.color.qt_color
text.setDefaultStyleSheet(('span { color: %s; }' % color.name()))
text.setHtml(('<span>%s</span>' % label_str))
self.position = (x, y)
self.anchor = anchor
self.text = text
self.style = style
self.keep_inside = keep_inside
if head:
self.head = head
else:
self.head = (0.0, 0.0)
def draw(self, p):
s = self.text.size()
rect = qc.QRectF(0.0, 0.0, s.width(), s.height())
(tx, ty) = (x, y) = self.position
anchor = self.anchor
pxs = self.text.defaultFont().pointSize()
oy = (self.head[0] * pxs)
ox = ((self.head[1] / 2.0) * pxs)
if ('L' in anchor):
tx -= min((ox * 2), (rect.width() / 2.0))
elif ('R' in anchor):
tx -= (rect.width() - min((ox * 2.0), (rect.width() / 2.0)))
elif ('C' in anchor):
tx -= (rect.width() / 2.0)
if ('B' in anchor):
ty -= (rect.height() + oy)
elif ('T' in anchor):
ty += oy
elif ('M' in anchor):
ty -= (rect.height() / 2.0)
rect.translate(tx, ty)
if self.keep_inside:
keep_inside = self.keep_inside
if (rect.top() < keep_inside.top()):
rect.moveTop(keep_inside.top())
if (rect.bottom() > keep_inside.bottom()):
rect.moveBottom(keep_inside.bottom())
if (rect.left() < keep_inside.left()):
rect.moveLeft(keep_inside.left())
if (rect.right() > keep_inside.right()):
rect.moveRight(keep_inside.right())
poly = None
if (self.head[0] != 0.0):
(l, r, t, b) = (rect.left(), rect.right(), rect.top(), rect.bottom())
if ('T' in anchor):
(a, b) = (t, b)
elif ('B' in anchor):
(a, b) = (b, t)
elif ('M' in anchor):
assert False, 'label cannot have head with M alignment'
(c1, c2) = (lim(l, (x - ox), r), lim(l, (x + ox), r))
px = (l, c1, x, c2, r, r, l)
py = (a, a, y, a, a, b, b)
poly = make_QPolygonF(px, py)
tx = rect.left()
ty = rect.top()
if (self.style.outline or self.style.background_color):
oldpen = p.pen()
oldbrush = p.brush()
if (not self.style.outline):
p.setPen(qg.QPen(qc.Qt.NoPen))
p.setBrush(self.style.background_color.qt_color)
if poly:
p.drawPolygon(poly)
else:
p.drawRect(rect)
if self.style.background_color:
p.fillRect(rect, self.style.background_color.qt_color)
p.setPen(oldpen)
p.setBrush(oldbrush)
p.translate(tx, ty)
self.text.drawContents(p)
p.translate((- tx), (- ty)) |
def check_kill(session):
conn = get_database_conn()
curs = query_execute_wrapper(conn, query_string='SELECT kill FROM scansweep_metadata WHERE session=?', query_list=[session], no_return=False)
kill_data = curs.fetchone()
if (kill_data['kill'] == 'True'):
return True
else:
return False |
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = round((inp * expand_ratio))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
if (expand_ratio == 1):
self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))
else:
self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup))
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
return self.conv(x) |
def sample_from_model(sample, model, device, categories_num, diffusion):
shape = sample['box_cond'].shape
model.eval()
noisy_batch = {'box': torch.randn(*shape, dtype=torch.float32, device=device), 'cat': ((categories_num - 1) * torch.ones((shape[0], shape[1]), dtype=torch.long, device=device))}
for i in range(diffusion.num_cont_steps)[::(- 1)]:
t = torch.tensor(([i] * shape[0]), device=device)
with torch.no_grad():
(bbox_pred, cat_pred) = model(sample, noisy_batch, timesteps=t)
desc_pred = {'cat': cat_pred}
(bbox_pred, cat_pred) = diffusion.step_jointly(bbox_pred, desc_pred, timestep=torch.tensor([i], device=device), sample=noisy_batch['box'])
noisy_batch['box'] = bbox_pred.prev_sample
noisy_batch['cat'] = cat_pred['cat']
return (bbox_pred.pred_original_sample, cat_pred['cat']) |
class RepositoryGCWorker(QueueWorker):
def process_queue_item(self, job_details):
try:
with GlobalLock('LARGE_GARBAGE_COLLECTION', lock_ttl=(REPOSITORY_GC_TIMEOUT + LOCK_TIMEOUT_PADDING)):
self._perform_gc(job_details)
except LockNotAcquiredException:
logger.debug('Could not acquire global lock for garbage collection')
raise WorkerSleepException
def _perform_gc(self, job_details):
logger.debug('Got repository GC queue item: %s', job_details)
marker_id = job_details['marker_id']
try:
marker = database.DeletedRepository.get(id=marker_id)
except database.DeletedRepository.DoesNotExist:
logger.debug('Found no matching delete repo marker: %s', job_details)
return
logger.debug('Purging repository %s', marker.repository)
if (not model.gc.purge_repository(marker.repository)):
raise Exception('GC interrupted; will retry') |
class PythonFileRunnerTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def make_sample_python_file(self, file_path, get_text_function_source=None):
self.project.root.create_file(file_path)
file = self.project.get_resource(file_path)
if (not get_text_function_source):
get_text_function_source = "def get_text():\n return 'run'\n\n"
file_content = (get_text_function_source + "output = open('output.txt', 'w')\noutput.write(get_text())\noutput.close()\n")
file.write(file_content)
def get_output_file_content(self, file_path):
try:
output_path = ''
last_slash = file_path.rfind('/')
if (last_slash != (- 1)):
output_path = file_path[0:(last_slash + 1)]
file = self.project.get_resource((output_path + 'output.txt'))
return file.read()
except exceptions.ResourceNotFoundError:
return ''
def test_making_runner(self):
file_path = 'sample.py'
self.make_sample_python_file(file_path)
file_resource = self.project.get_resource(file_path)
runner = self.pycore.run_module(file_resource)
runner.wait_process()
self.assertEqual('run', self.get_output_file_content(file_path))
def test_passing_arguments(self):
file_path = 'sample.py'
function_source = dedent(' import sys\n def get_text():\n return str(sys.argv[1:])\n ')
self.make_sample_python_file(file_path, function_source)
file_resource = self.project.get_resource(file_path)
runner = self.pycore.run_module(file_resource, args=['hello', 'world'])
runner.wait_process()
self.assertTrue(self.get_output_file_content(file_path).endswith("['hello', 'world']"))
def test_passing_arguments_with_spaces(self):
file_path = 'sample.py'
function_source = dedent(' import sys\n def get_text():\n return str(sys.argv[1:])\n ')
self.make_sample_python_file(file_path, function_source)
file_resource = self.project.get_resource(file_path)
runner = self.pycore.run_module(file_resource, args=['hello world'])
runner.wait_process()
self.assertTrue(self.get_output_file_content(file_path).endswith("['hello world']"))
def test_killing_runner(self):
file_path = 'sample.py'
code = dedent(" def get_text():\n import time\n time.sleep(1)\n return 'run'\n ")
self.make_sample_python_file(file_path, code)
file_resource = self.project.get_resource(file_path)
runner = self.pycore.run_module(file_resource)
runner.kill_process()
self.assertEqual('', self.get_output_file_content(file_path))
def test_running_nested_files(self):
self.project.root.create_folder('src')
file_path = 'src/sample.py'
self.make_sample_python_file(file_path)
file_resource = self.project.get_resource(file_path)
runner = self.pycore.run_module(file_resource)
runner.wait_process()
self.assertEqual('run', self.get_output_file_content(file_path))
def test_setting_process_input(self):
file_path = 'sample.py'
code = dedent(' def get_text():\n import sys\n return sys.stdin.readline()\n ')
self.make_sample_python_file(file_path, code)
temp_file_name = 'processtest.tmp'
try:
temp_file = open(temp_file_name, 'w')
temp_file.write('input text\n')
temp_file.close()
file_resource = self.project.get_resource(file_path)
stdin = open(temp_file_name)
runner = self.pycore.run_module(file_resource, stdin=stdin)
runner.wait_process()
stdin.close()
self.assertEqual('input text\n', self.get_output_file_content(file_path))
finally:
os.remove(temp_file_name)
def test_setting_process_output(self):
file_path = 'sample.py'
code = dedent(" def get_text():\n print('output text')\n return 'run'\n ")
self.make_sample_python_file(file_path, code)
temp_file_name = 'processtest.tmp'
try:
file_resource = self.project.get_resource(file_path)
stdout = open(temp_file_name, 'w')
runner = self.pycore.run_module(file_resource, stdout=stdout)
runner.wait_process()
stdout.close()
temp_file = open(temp_file_name)
self.assertEqual('output text\n', temp_file.read())
temp_file.close()
finally:
os.remove(temp_file_name)
def test_setting_pythonpath(self):
src = self.project.root.create_folder('src')
src.create_file('sample.py')
src.get_child('sample.py').write('def f():\n pass\n')
self.project.root.create_folder('test')
file_path = 'test/test.py'
code = dedent(" def get_text():\n import sample\n sample.f()\n return'run'\n ")
self.make_sample_python_file(file_path, code)
file_resource = self.project.get_resource(file_path)
runner = self.pycore.run_module(file_resource)
runner.wait_process()
self.assertEqual('run', self.get_output_file_content(file_path))
def test_making_runner_when_doi_is_disabled(self):
self.project.set('enable_doi', False)
file_path = 'sample.py'
self.make_sample_python_file(file_path)
file_resource = self.project.get_resource(file_path)
runner = self.pycore.run_module(file_resource)
runner.wait_process()
self.assertEqual('run', self.get_output_file_content(file_path)) |
def _test():
import torch
pretrained = False
models = [channelnet]
for model in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != channelnet) or (weight_count == 3875112))
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000)) |
class ItemDependents(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent, style=wx.TAB_TRAVERSAL)
self.romanNb = ['0', 'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X']
self.skillIdHistory = []
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.reqTree = wx.TreeCtrl(self, style=((wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT) | wx.NO_BORDER))
mainSizer.Add(self.reqTree, 1, (wx.ALL | wx.EXPAND), 0)
self.SetSizer(mainSizer)
self.root = self.reqTree.AddRoot('WINRARZOR')
self.reqTree.SetItemData(self.root, None)
self.imageList = wx.ImageList(16, 16)
self.reqTree.SetImageList(self.imageList)
skillBookId = self.imageList.Add(BitmapLoader.getBitmap('skill_small', 'gui'))
self.getFullSkillTree(item, self.root, skillBookId)
self.Layout()
def getFullSkillTree(self, parentSkill, parent, sbIconId):
levelToItems = {}
for (item, level) in parentSkill.requiredFor.items():
if (level not in levelToItems):
levelToItems[level] = []
levelToItems[level].append(item)
for x in sorted(levelToItems.keys()):
items = levelToItems[x]
items.sort(key=(lambda x: x.name))
child = self.reqTree.AppendItem(parent, _t('Level {}').format(self.romanNb[int(x)]), sbIconId)
for item in items:
if item.iconID:
bitmap = BitmapLoader.getBitmap(item.iconID, 'icons')
itemIcon = (self.imageList.Add(bitmap) if bitmap else (- 1))
else:
itemIcon = (- 1)
self.reqTree.AppendItem(child, '{}'.format(item.name), itemIcon) |
def test_contextmerge_list():
context = Context({'ctx1': 'ctxvalue1', 'ctx2': 'ctxvalue2', 'ctx3': 'ctxvalue3', 'ctx4': [1, 2, 3], 'contextMerge': {'ctx4': ['k1', 'k2', '{ctx3}', True, False, 44]}})
pypyr.steps.contextmerge.run_step(context)
assert (context['ctx1'] == 'ctxvalue1')
assert (context['ctx2'] == 'ctxvalue2')
assert (context['ctx3'] == 'ctxvalue3')
output = context['ctx4']
assert (len(output) == 9)
assert (output[0] == 1)
assert (output[1] == 2)
assert (output[2] == 3)
assert (output[3] == 'k1')
assert (output[4] == 'k2')
assert (output[5] == 'ctxvalue3')
assert output[6]
assert (not output[7])
assert (output[8] == 44) |
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=DEFAULT_SAVE_DIR, delay=15, downloader_factory=get_best_downloader):
to_dir = os.path.abspath(to_dir)
zip_name = ('setuptools-%s.zip' % version)
url = (download_base + zip_name)
saveto = os.path.join(to_dir, zip_name)
if (not os.path.exists(saveto)):
log.warn('Downloading %s', url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto) |
class FBNetInitBlock(nn.Module):
def __init__(self, in_channels, out_channels, bn_eps):
super(FBNetInitBlock, self).__init__()
self.conv1 = conv3x3_block(in_channels=in_channels, out_channels=out_channels, stride=2, bn_eps=bn_eps)
self.conv2 = FBNetUnit(in_channels=out_channels, out_channels=out_channels, stride=1, bn_eps=bn_eps, use_kernel3=True, exp_factor=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x |
def test_fixed_shape_convert_variable():
t1 = TensorType('float64', shape=(1, 1))
t2 = TensorType('float64', shape=(1, 1))
assert (t1 == t2)
assert (t1.shape == t2.shape)
t2_var = t2()
res = t2.convert_variable(t2_var)
assert (res is t2_var)
res = t1.convert_variable(t2_var)
assert (res is t2_var)
t1_var = t1()
res = t2.convert_variable(t1_var)
assert (res is t1_var)
t3 = TensorType('float64', shape=(None, 1))
t3_var = t3()
res = t2.convert_variable(t3_var)
assert isinstance(res.owner.op, SpecifyShape)
t3 = TensorType('float64', shape=(None, None))
t4 = TensorType('float64', shape=(3, 2))
t4_var = t4()
assert (t3.shape == (None, None))
res = t3.convert_variable(t4_var)
assert (res.type == t4)
assert (res.type.shape == (3, 2)) |
class TestDIORRR3Det(TestDIORR):
def eval(self):
r3det = build_whole_network.DetectionNetworkR3Det(cfgs=self.cfgs, is_training=False)
all_boxes_r = self.eval_with_plac(img_dir=self.args.img_dir, det_net=r3det, image_ext=self.args.image_ext)
imgs = os.listdir(self.args.img_dir)
real_test_imgname_list = [i.split(self.args.image_ext)[0] for i in imgs]
print((10 * '**'))
print('rotation eval:')
evaler = EVAL(self.cfgs)
evaler.voc_evaluate_detections(all_boxes=all_boxes_r, test_imgid_list=real_test_imgname_list, test_annotation_path=self.args.test_annotation_path) |
def test_cinsk1_control():
cinsk = new_corpse_in_sk1_control(rabi_rotation=(np.pi / 2), azimuthal_angle=0.5, maximum_rabi_rate=(2 * np.pi))
segments = np.vstack((cinsk.amplitude_x, cinsk.amplitude_y, cinsk.detunings, cinsk.durations)).T
_segments = np.array([[5., 3.0123195, 0.0, 1.], [(- 5.), (- 3.0123195), 0.0, 0.], [5., 3.0123195, 0.0, 0.], [2., (- 5.), 0.0, 1.0], [(- 3.), 5., 0.0, 1.0]])
assert np.allclose(segments, _segments)
cinsk = new_corpse_in_sk1_control(rabi_rotation=(2 * np.pi), azimuthal_angle=(- 0.5), maximum_rabi_rate=(2 * np.pi))
segments = np.vstack((cinsk.amplitude_x, cinsk.amplitude_y, cinsk.detunings, cinsk.durations)).T
_segments = np.array([[5., (- 3.0123195), 0.0, 1.5], [(- 5.), 3.0123195, 0.0, 1.0], [5., (- 3.0123195), 0.0, 0.5], [(- 5.), (- 3.), 0.0, 1.0], [(- 0.), 6., 0.0, 1.0]])
assert np.allclose(segments, _segments) |
def test_AssertionError_message(pytester: Pytester) -> None:
pytester.makepyfile('\n def test_hello():\n x,y = 1,2\n assert 0, (x,y)\n ')
result = pytester.runpytest()
result.stdout.fnmatch_lines('\n *def test_hello*\n *assert 0, (x,y)*\n *AssertionError: (1, 2)*\n ') |
def RegQuery(hive, subkey, searchterms, searchvalues=True, searchkeys=False, haltonerror=False, **kwargs):
subkeys = None
values = None
regdata = GetRegistryQuery(hive, subkey, **kwargs)
ret = dict()
if (regdata is not None):
try:
if searchkeys:
subkeys = regdata.key[0].subkey
if searchvalues:
values = regdata.key[0].value
except:
psplog.debug('This is probably just an error because there are no keys and/or values.', exc_info=True)
if (subkeys is None):
subkeys = []
if (values is None):
values = []
if ((searchterms is None) or (len(searchterms) == 0)):
if searchkeys:
for key in subkeys:
ret[key.name] = None
if searchvalues:
for val in values:
ret[val.name] = val.value
else:
if isinstance(searchterms, str):
searchterms = [searchterms]
for search in searchterms:
if searchkeys:
matches = filter((lambda x: (x.name == search)), subkeys)
if (len(matches) > 0):
ret[search] = None
if searchvalues:
matches = filter((lambda x: (x.name == search)), values)
if (len(matches) > 0):
ret[search] = matches[0].value
elif haltonerror:
raise RegistryError('Could not open subkey: {0}'.format(subkey), hive=hive, subkey=subkey)
return ret |
class MopidyPlayer(player.Player):
def __init__(self):
self.playback_started = Event()
with mopidy_command(important=True):
PLAYER.playback.stop()
PLAYER.tracklist.clear()
PLAYER.tracklist.set_consume(True)
_event('track_playback_started')
def _on_playback_started(_event) -> None:
self.playback_started.set()
def start_song(self, song, catch_up: float):
with mopidy_command(important=True):
PLAYER.tracklist.clear()
PLAYER.tracklist.set_consume(True)
PLAYER.tracklist.add(uris=[song.internal_url])
volume = PLAYER.mixer.get_volume()
if ((catch_up is not None) and (catch_up >= 0)):
PLAYER.mixer.set_volume(0)
PLAYER.playback.play()
if (not self.playback_started.wait(timeout=1)):
logging.warning('playback_started event did not trigger')
player.set_playback_error(True)
PLAYER.mixer.set_volume(volume)
raise PlaybackError('playback_started event did not trigger')
player.set_playback_error(False)
if ((catch_up is not None) and (catch_up >= 0)):
PLAYER.playback.get_time_position()
PLAYER.playback.seek(catch_up)
PLAYER.playback.get_time_position()
if redis.get('paused'):
PLAYER.playback.pause()
PLAYER.mixer.set_volume(volume)
def should_stop_waiting(self, previous_error: bool) -> bool:
error = False
with mopidy_command() as allowed:
if allowed:
try:
if ((PLAYER.playback.get_state() == 'stopped') and previous_error):
return True
except (requests.exceptions.ConnectionError, MopidyError):
error = True
if error:
raise PlaybackError
return False
def play_alarm(self, interrupt: bool, alarm_path: str) -> None:
self.playback_started.clear()
with mopidy_command(important=True):
if interrupt:
PLAYER.tracklist.clear()
PLAYER.tracklist.add(uris=[('file://' + urllib.parse.quote(alarm_path))])
PLAYER.playback.play()
self.playback_started.wait(timeout=1)
def play_backup_stream(self):
PLAYER.tracklist.add(uris=[storage.get('backup_stream')])
PLAYER.playback.play() |
def test(epoch, checkpoint, data_test, label_test, n_classes):
net = ModelFedCon(args.model, args.out_dim, n_classes=n_classes)
if (len(args.gpu.split(',')) > 1):
net = torch.nn.DataParallel(net, device_ids=[i for i in range(round((len(args.gpu) / 2)))])
model = net.cuda()
model.load_state_dict(checkpoint)
if ((args.dataset == 'SVHN') or (args.dataset == 'cifar100')):
(test_dl, test_ds) = get_dataloader(args, data_test, label_test, args.dataset, args.datadir, args.batch_size, is_labeled=True, is_testing=True)
elif (args.dataset == 'skin'):
(test_dl, test_ds) = get_dataloader(args, data_test, label_test, args.dataset, args.datadir, args.batch_size, is_labeled=True, is_testing=True, pre_sz=args.pre_sz, input_sz=args.input_sz)
(AUROCs, Accus) = epochVal_metrics_test(model, test_dl, args.model, thresh=0.4, n_classes=n_classes)
AUROC_avg = np.array(AUROCs).mean()
Accus_avg = np.array(Accus).mean()
return (AUROC_avg, Accus_avg) |
class IBFIGItoIBContractMapper():
def __init__(self, clientId: int=0, host: str='127.0.0.1', port: int=7497):
self.logger = ib_logger.getChild(self.__class__.__name__)
self.lock = Lock()
self.waiting_time = 30
self.action_event_lock = Event()
self.wrapper = IBWrapper(self.action_event_lock, IBContractTickerMapper({}))
self.client = EClient(wrapper=self.wrapper)
self.clientId = clientId
self.client.connect(host, port, self.clientId)
thread = Thread(target=self.client.run)
thread.start()
if (not self._wait_for_results()):
raise ConnectionError('IB IBFIGItoIBContractMapper was not initialized correctly')
def get_ticker_to_contract_mapping_from_figi_contracts(self, ticker_to_contract: Dict[(Ticker, IBContract)]) -> Dict[(Ticker, IBContract)]:
new_ticker_to_contract_mapping = {}
reqId = 0
for (ticker, contract) in ticker_to_contract.items():
contract_details = self._get_contract_details(reqId, contract)
if contract_details:
new_ticker_to_contract_mapping[ticker] = IBContract.from_string(str(contract_details.contract))
else:
self.logger.info(f'Could not find corresponding contract details for the ticker {ticker} and contract {contract}. Using FIGI contract instead')
new_ticker_to_contract_mapping[ticker] = contract
return new_ticker_to_contract_mapping
def stop(self):
with self.lock:
self.client.disconnect()
self.logger.info('Disconnecting from the interactive brokers client')
def _get_contract_details(self, reqId, contract):
with self.lock:
self.wrapper.reset_contract_details()
self._reset_action_lock()
self.client.reqContractDetails(reqId, contract)
if self._wait_for_results(self.waiting_time):
return self.wrapper.contract_details
else:
error_msg = f'Time out while getting positions for contract {contract}'
self.logger.error(error_msg)
def _wait_for_results(self, waiting_time: Optional[int]=None) -> bool:
waiting_time = (waiting_time or self.waiting_time)
wait_result = self.action_event_lock.wait(waiting_time)
return wait_result
def _reset_action_lock(self):
self.action_event_lock.clear() |
def renameUser(username, new_name):
if (username == new_name):
raise Exception('Must give a new username')
check = model.user.get_user_or_org(new_name)
if (check is not None):
raise Exception(('New username %s already exists' % new_name))
existing = model.user.get_user_or_org(username)
if (existing is None):
raise Exception(('Username %s does not exist' % username))
print('Renaming user...')
model.user.change_username(existing.id, new_name)
print('Rename complete') |
class SshPw_TestCase(unittest.TestCase):
def runTest(self):
data1 = F13_SshPwData()
data2 = F13_SshPwData()
self.assertEqual(data1, data2)
self.assertFalse((data1 != data2))
self.assertNotEqual(data1, None)
self.assertFalse(data1.isCrypted)
self.assertFalse(data1.lock)
for atr in ['username']:
setattr(data1, atr, None)
setattr(data2, atr, 'test')
self.assertNotEqual(data1, data2)
self.assertNotEqual(data2, data1)
setattr(data1, atr, None)
setattr(data2, atr, None)
for action in F13_SshPw()._getParser()._actions:
for a in ['--username']:
if (a in action.option_strings):
self.assertTrue(action.required)
for a in ['--lock']:
if (a in action.option_strings):
self.assertFalse(action.default)
self.assertFalse(F24_SshPwData().sshkey) |
class SEResNeXt(nn.Module):
def __init__(self, channels, init_block_channels, cardinality, bottleneck_width, in_channels=3, in_size=(224, 224), num_classes=1000):
super(SEResNeXt, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', ResInitBlock(in_channels=in_channels, out_channels=init_block_channels))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
stride = (2 if ((j == 0) and (i != 0)) else 1)
stage.add_module('unit{}'.format((j + 1)), SEResNeXtUnit(in_channels=in_channels, out_channels=out_channels, stride=stride, cardinality=cardinality, bottleneck_width=bottleneck_width))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
class DeepLabV3PlusDecoder(nn.Module):
def __init__(self, encoder_channels, out_channels=256, atrous_rates=(12, 24, 36), output_stride=16):
super().__init__()
if (output_stride not in {8, 16}):
raise ValueError('Output stride should be 8 or 16, got {}.'.format(output_stride))
self.out_channels = out_channels
self.output_stride = output_stride
self.aspp = nn.Sequential(ASPP(encoder_channels[(- 1)], out_channels, atrous_rates, separable=True), SeparableConv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
scale_factor = (2 if (output_stride == 8) else 4)
self.up = nn.UpsamplingBilinear2d(scale_factor=scale_factor)
highres_in_channels = encoder_channels[(- 4)]
highres_out_channels = 48
self.block1 = nn.Sequential(nn.Conv2d(highres_in_channels, highres_out_channels, kernel_size=1, bias=False), nn.BatchNorm2d(highres_out_channels), nn.ReLU())
self.block2 = nn.Sequential(SeparableConv2d((highres_out_channels + out_channels), out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
def forward(self, *features):
aspp_features = self.aspp(features[(- 1)])
aspp_features = self.up(aspp_features)
high_res_features = self.block1(features[(- 4)])
concat_features = torch.cat([aspp_features, high_res_features], dim=1)
fused_features = self.block2(concat_features)
return fused_features |
def test_convert_variable():
test_type = TensorType(config.floatX, shape=(None, None))
test_var = test_type()
test_type2 = TensorType(config.floatX, shape=(1, None))
test_var2 = test_type2()
res = test_type.convert_variable(test_var)
assert (res is test_var)
res = test_type.convert_variable(test_var2)
assert (res is test_var2)
res = test_type2.convert_variable(test_var)
assert (res.type == test_type2)
test_type3 = TensorType(config.floatX, shape=(1, None, 1))
test_var3 = test_type3()
res = test_type2.convert_variable(test_var3)
assert (res is None)
const_var = pt.as_tensor([[1, 2], [3, 4]], dtype=config.floatX)
res = test_type.convert_variable(const_var)
assert (res is const_var) |
.unit()
.parametrize(('expr', 'expected'), [(' true ', True), (' ((((((true)))))) ', True), (' ( ((\t (((true))))) \t \t)', True), ('( true and (((false))))', False), ('not not not not true', True), ('not not not not not true', False)])
def test_syntax_oddeties(expr: str, expected: bool) -> None:
matcher = {'true': True, 'false': False}.__getitem__
assert (evaluate(expr, matcher) is expected) |
.skipif((not _aead_supported(AESCCM)), reason='Does not support AESCCM')
class TestAESCCM():
.skipif((sys.platform not in {'linux', 'darwin'}), reason='mmap required')
def test_data_too_large(self):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = (b'0' * 12)
large_data = large_mmap()
with pytest.raises(OverflowError):
aesccm.encrypt(nonce, large_data, b'')
with pytest.raises(OverflowError):
aesccm.encrypt(nonce, b'', large_data)
def test_default_tag_length(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
pt = b'hello'
ct = aesccm.encrypt(nonce, pt, None)
assert (len(ct) == (len(pt) + 16))
def test_invalid_tag_length(self, backend):
key = AESCCM.generate_key(128)
with pytest.raises(ValueError):
AESCCM(key, tag_length=7)
with pytest.raises(ValueError):
AESCCM(key, tag_length=2)
with pytest.raises(TypeError):
AESCCM(key, tag_length='notanint')
def test_invalid_nonce_length(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = b'hello'
nonce = os.urandom(14)
with pytest.raises(ValueError):
aesccm.encrypt(nonce, pt, None)
with pytest.raises(ValueError):
aesccm.encrypt(nonce[:6], pt, None)
def test_vectors(self, subtests, backend):
vectors = _load_all_params(os.path.join('ciphers', 'AES', 'CCM'), ['DVPT128.rsp', 'DVPT192.rsp', 'DVPT256.rsp', 'VADT128.rsp', 'VADT192.rsp', 'VADT256.rsp', 'VNT128.rsp', 'VNT192.rsp', 'VNT256.rsp', 'VPT128.rsp', 'VPT192.rsp', 'VPT256.rsp'], load_nist_ccm_vectors)
for vector in vectors:
with subtests.test():
key = binascii.unhexlify(vector['key'])
nonce = binascii.unhexlify(vector['nonce'])
adata = binascii.unhexlify(vector['adata'])[:vector['alen']]
ct = binascii.unhexlify(vector['ct'])
pt = binascii.unhexlify(vector['payload'])[:vector['plen']]
aesccm = AESCCM(key, vector['tlen'])
if vector.get('fail'):
with pytest.raises(InvalidTag):
aesccm.decrypt(nonce, ct, adata)
else:
computed_pt = aesccm.decrypt(nonce, ct, adata)
assert (computed_pt == pt)
assert (aesccm.encrypt(nonce, pt, adata) == ct)
def test_roundtrip(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = b'encrypt me'
ad = b'additional'
nonce = os.urandom(12)
ct = aesccm.encrypt(nonce, pt, ad)
computed_pt = aesccm.decrypt(nonce, ct, ad)
assert (computed_pt == pt)
def test_nonce_too_long(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = (b'encrypt me' * 6600)
nonce = os.urandom(13)
with pytest.raises(ValueError):
aesccm.encrypt(nonce, pt, None)
.parametrize(('nonce', 'data', 'associated_data'), [[object(), b'data', b''], [(b'0' * 12), object(), b''], [(b'0' * 12), b'data', object()]])
def test_params_not_bytes(self, nonce, data, associated_data, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
with pytest.raises(TypeError):
aesccm.encrypt(nonce, data, associated_data)
def test_bad_key(self, backend):
with pytest.raises(TypeError):
AESCCM(object())
with pytest.raises(ValueError):
AESCCM((b'0' * 31))
def test_bad_generate_key(self, backend):
with pytest.raises(TypeError):
AESCCM.generate_key(object())
with pytest.raises(ValueError):
AESCCM.generate_key(129)
def test_associated_data_none_equal_to_empty_bytestring(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
ct1 = aesccm.encrypt(nonce, b'some_data', None)
ct2 = aesccm.encrypt(nonce, b'some_data', b'')
assert (ct1 == ct2)
pt1 = aesccm.decrypt(nonce, ct1, None)
pt2 = aesccm.decrypt(nonce, ct2, b'')
assert (pt1 == pt2)
def test_decrypt_data_too_short(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
with pytest.raises(InvalidTag):
aesccm.decrypt((b'0' * 12), b'0', None)
def test_buffer_protocol(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = b'encrypt me'
ad = b'additional'
nonce = os.urandom(12)
ct = aesccm.encrypt(nonce, pt, ad)
computed_pt = aesccm.decrypt(nonce, ct, ad)
assert (computed_pt == pt)
aesccm2 = AESCCM(bytearray(key))
ct2 = aesccm2.encrypt(bytearray(nonce), pt, ad)
assert (ct2 == ct)
computed_pt2 = aesccm2.decrypt(bytearray(nonce), ct2, ad)
assert (computed_pt2 == pt) |
class TMid3Iconv(_TTools):
TOOL_NAME = u'mid3iconv'
def setUp(self):
super(TMid3Iconv, self).setUp()
self.filename = get_temp_copy(os.path.join(DATA_DIR, 'silence-44-s.mp3'))
def tearDown(self):
super(TMid3Iconv, self).tearDown()
os.unlink(self.filename)
def test_noop(self):
(res, out) = self.call()
self.failIf(res)
self.failUnless(('Usage:' in out))
def test_debug(self):
(res, out) = self.call('-d', '-p', self.filename)
self.failIf(res)
self.assertFalse(("b'" in out))
self.failUnless(('TCON=Silence' in out))
def test_quiet(self):
(res, out) = self.call('-q', self.filename)
self.failIf(res)
self.failIf(out)
def test_test_data(self):
results = set()
for codec in CODECS:
results.add(AMBIGUOUS.decode(codec))
self.failUnlessEqual(len(results), len(CODECS))
def test_conv_basic(self):
from mutagen.id3 import TALB
for codec in CODECS:
f = ID3(self.filename)
f.add(TALB(text=[AMBIGUOUS.decode('latin-1')], encoding=0))
f.save()
(res, out) = self.call('-d', '-e', str(codec), self.filename)
f = ID3(self.filename)
self.failUnlessEqual(f['TALB'].encoding, 1)
self.failUnlessEqual(f['TALB'].text[0], AMBIGUOUS.decode(codec))
def test_comm(self):
from mutagen.id3 import COMM
for codec in CODECS:
f = ID3(self.filename)
frame = COMM(desc='', lang='eng', encoding=0, text=[AMBIGUOUS.decode('latin-1')])
f.add(frame)
f.save()
(res, out) = self.call('-d', '-e', str(codec), self.filename)
f = ID3(self.filename)
new_frame = f[frame.HashKey]
self.failUnlessEqual(new_frame.encoding, 1)
self.failUnlessEqual(new_frame.text[0], AMBIGUOUS.decode(codec))
def test_remove_v1(self):
from mutagen.id3 import ParseID3v1
(res, out) = self.call('--remove-v1', self.filename)
with open(self.filename, 'rb') as h:
h.seek((- 128), 2)
data = h.read()
self.failUnlessEqual(len(data), 128)
self.failIf(ParseID3v1(data)) |
def swap_network(qubits: Sequence[cirq.Qid], operation: Callable[([int, int, cirq.Qid, cirq.Qid], cirq.OP_TREE)]=(lambda p, q, p_qubit, q_qubit: ()), fermionic: bool=False, offset: bool=False) -> List[cirq.Operation]:
n_qubits = len(qubits)
order = list(range(n_qubits))
swap_gate = (FSWAP if fermionic else cirq.SWAP)
result = []
for layer_num in range(n_qubits):
lowest_active_qubit = ((layer_num + offset) % 2)
active_pairs = ((i, (i + 1)) for i in range(lowest_active_qubit, (n_qubits - 1), 2))
for (i, j) in active_pairs:
(p, q) = (order[i], order[j])
extra_ops = operation(p, q, qubits[i], qubits[j])
result.extend(cast(Iterable[cirq.Operation], cirq.flatten_op_tree(extra_ops)))
result.append(swap_gate(qubits[i], qubits[j]))
(order[i], order[j]) = (q, p)
return result |
class TestIncrementDisplay(unittest.TestCase):
def test_loop_count(self):
def some_loop():
for i in range(12):
a = 1
profiler = LineProfiler()
wrapped = profiler(some_loop)
wrapped()
show_results(profiler)
for_line = list(list(profiler.code_map.values())[0].values())[(- 2)]
looped_instruction = list(list(profiler.code_map.values())[0].values())[(- 1)]
self.assertEqual(for_line[2], 13)
self.assertEqual(looped_instruction[2], 12)
def test_normal_incr(self):
def normal_incr():
use_some_memory = ([1] * (10 ** 6))
profiler = LineProfiler()
wrapped = profiler(normal_incr)
wrapped()
show_results(profiler)
results = list(list(profiler.code_map.values())[0].values())[(- 1)]
self.assertGreater(results[0], 0)
self.assertGreater(results[1], results[0])
self.assertEqual(results[2], 1)
def test_loop_incr(self):
def loop_incr():
a = []
b = ([2] * (2 * (10 ** 7)))
for i in range(3):
c = ([2] * (2 * (10 ** 7)))
a.append(c)
profiler = LineProfiler()
wrapped = profiler(loop_incr)
wrapped()
show_results(profiler)
b_line = list(list(profiler.code_map.values())[0].values())[(- 4)]
c_line = list(list(profiler.code_map.values())[0].values())[(- 2)]
self.assertAlmostEqual((b_line[2] * 3), c_line[2], delta=1)
self.assertEqual(c_line[2], 3)
def test_decr(self):
def del_stuff():
b = ([2] * (2 * (10 ** 7)))
del b
profiler = LineProfiler()
wrapped = profiler(del_stuff)
wrapped()
show_results(profiler)
b_line = list(list(profiler.code_map.values())[0].values())[(- 2)]
del_line = list(list(profiler.code_map.values())[0].values())[(- 1)]
self.assertGreater(0, del_line[0])
self.assertGreater(del_line[1], 0)
self.assertAlmostEqual((- del_line[0]), b_line[0], delta=1) |
def markdown_statistics(file_names):
total = collections.Counter()
for file_name in sorted(file_names):
total.update(get_types(file_name))
result = ['|Field|Class|Empty|Count|', '|---|---|---|---|']
for (field, class_, void) in sorted(total, key=str):
result.append('|{}|{}|{}|{}|'.format(field, class_, void, total[(field, class_, void)]))
logging.debug('result: {}'.format(result))
return '\n'.join(result) |
class LvmFileSystem(LoopbackFileSystemMixin, FileSystem):
type = 'lvm'
aliases = ['0x8e', 'lvm2']
guids = ['E6D6D379-F507-44C2-A23C-238F2A3DF928', '79D3D6E6-07F5-C244-A23C-238F2A3DF928']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vgname = None
(dependencies.lvm)
def mount(self):
os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1'
if ('/dev/nbd' not in self.volume.get_raw_path()):
self._find_loopback()
time.sleep(0.2)
try:
result = _util.check_output_(['lvm', 'pvscan'])
for line in result.splitlines():
if (((self.loopback is not None) and (self.loopback in line)) or (self.volume.get_raw_path() in line)):
for vg in re.findall('VG (\\S+)', line):
self.vgname = vg
if (not self.vgname):
logger.warning('Volume is not a volume group. (Searching for %s)', self.loopback)
raise IncorrectFilesystemError()
_util.check_call_(['lvm', 'vgchange', '-a', 'y', self.vgname], stdout=subprocess.PIPE)
except Exception:
self._free_loopback()
self.vgname = None
raise
self.volume.info['volume_group'] = self.vgname
self.volume.volumes.vstype = 'lvm'
for _ in self.volume.volumes.detect_volumes('lvm', 'lvm'):
pass
def unmount(self, allow_lazy=False):
if self.vgname:
_util.check_call_(['lvm', 'vgchange', '-a', 'n', self.vgname], wrap_error=True, stdout=subprocess.PIPE)
self.vgname = None
super().unmount(allow_lazy=allow_lazy) |
class MergeResolveTestCase(unittest.TestCase):
def test_merge_items(self):
d = {1: 'foo', 3: 'baz'}
localedata.merge(d, {1: 'Foo', 2: 'Bar'})
assert (d == {1: 'Foo', 2: 'Bar', 3: 'baz'})
def test_merge_nested_dict(self):
d1 = {'x': {'a': 1, 'b': 2, 'c': 3}}
d2 = {'x': {'a': 1, 'b': 12, 'd': 14}}
localedata.merge(d1, d2)
assert (d1 == {'x': {'a': 1, 'b': 12, 'c': 3, 'd': 14}})
def test_merge_nested_dict_no_overlap(self):
d1 = {'x': {'a': 1, 'b': 2}}
d2 = {'y': {'a': 11, 'b': 12}}
localedata.merge(d1, d2)
assert (d1 == {'x': {'a': 1, 'b': 2}, 'y': {'a': 11, 'b': 12}})
def test_merge_with_alias_and_resolve(self):
alias = localedata.Alias('x')
d1 = {'x': {'a': 1, 'b': 2, 'c': 3}, 'y': alias}
d2 = {'x': {'a': 1, 'b': 12, 'd': 14}, 'y': {'b': 22, 'e': 25}}
localedata.merge(d1, d2)
assert (d1 == {'x': {'a': 1, 'b': 12, 'c': 3, 'd': 14}, 'y': (alias, {'b': 22, 'e': 25})})
d = localedata.LocaleDataDict(d1)
assert (dict(d.items()) == {'x': {'a': 1, 'b': 12, 'c': 3, 'd': 14}, 'y': {'a': 1, 'b': 22, 'c': 3, 'd': 14, 'e': 25}}) |
class OrgAddUserViewTest(TestCase):
def setUpTestData(cls):
add_default_data()
def login(self, name, password=None):
self.client.login(username=name, password=(password if password else name))
self.pu = PytitionUser.objects.get(user__username=name)
return self.pu
def test_OrgAddUserViewOk(self):
julia = self.login('julia')
response = self.client.get((reverse('org_add_user', args=['rap']) + '?user=max'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
user = PytitionUser.objects.get(user__username='max')
rap = Organization.objects.get(slugname='rap')
invitations = user.invitations.all()
self.assertIn(rap, invitations)
def test_OrgAddUserViewKoForbidden(self):
self.login('john')
response = self.client.get((reverse('org_add_user', args=['rap']) + '?user=max'))
self.assertEqual(response.status_code, 403)
self.assertEqual(response['Content-Type'], 'application/json')
self.login('max')
org = Organization.objects.get(name='Les Amis de la Terre')
response = self.client.get((reverse('org_add_user', args=[org.slugname]) + '?user=john'))
self.assertEqual(response.status_code, 403)
self.assertEqual(response['Content-Type'], 'application/json')
self.login('julia')
response = self.client.get((reverse('org_add_user', args=[org.slugname]) + '?user=max'))
self.assertEqual(response.status_code, 500)
self.assertEqual(response['Content-Type'], 'application/json') |
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='JobListing', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')), ('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')), ('title', i18n.fields.I18nCharField(max_length=200, verbose_name='title')), ('slug', i18n.fields.I18nCharField(blank=True, max_length=200, verbose_name='slug')), ('company', models.CharField(max_length=100, verbose_name='company')), ('company_logo', models.ImageField(blank=True, null=True, upload_to='job-listings', verbose_name='company logo')), ('description', i18n.fields.I18nTextField(blank=True, verbose_name='description')), ('apply_url', models.URLField(blank=True, verbose_name='URL where you can apply'))], options={'ordering': ['created']}, managers=[('published_posts', django.db.models.manager.Manager())])] |
def parse_args():
parser = argparse.ArgumentParser(description='Upload models to OSS')
parser.add_argument('model_zoo', type=str, help='model_zoo input')
parser.add_argument('--dst-folder', type=str, default='mmsegmentation/v0.5', help='destination folder')
args = parser.parse_args()
return args |
def _iter_fixes(testcase: DataDrivenTestCase, actual: list[str], *, incremental_step: int) -> Iterator[DataFileFix]:
reports_by_line: dict[(tuple[(str, int)], list[tuple[(str, str)]])] = defaultdict(list)
for error_line in actual:
comment_match = re.match('^(?P<filename>[^:]+):(?P<lineno>\\d+): (?P<severity>error|note|warning): (?P<msg>.+)$', error_line)
if comment_match:
filename = comment_match.group('filename')
lineno = int(comment_match.group('lineno'))
severity = comment_match.group('severity')
msg = comment_match.group('msg')
reports_by_line[(filename, lineno)].append((severity, msg))
test_items = parse_test_data(testcase.data, testcase.name)
if any((re.match('^out\\d*$', test_item.id) for test_item in test_items)):
for test_item in test_items:
if (((incremental_step < 2) and (test_item.id == 'out')) or ((incremental_step >= 2) and (test_item.id == f'out{incremental_step}'))):
(yield DataFileFix(lineno=((testcase.line + test_item.line) - 1), end_lineno=((testcase.line + test_item.end_line) - 1), lines=(actual + ([''] * test_item.trimmed_newlines))))
return
for test_item in test_items:
if (test_item.id == 'case'):
source_lines = test_item.data
file_path = 'main'
elif (test_item.id == 'file'):
source_lines = test_item.data
file_path = f'tmp/{test_item.arg}'
else:
continue
fix_lines = []
for (lineno, source_line) in enumerate(source_lines, start=1):
reports = reports_by_line.get((file_path, lineno))
comment_match = re.search('(?P<indent>\\s+)(?P<comment># [EWN]: .+)$', source_line)
if comment_match:
source_line = source_line[:comment_match.start('indent')]
if reports:
indent = (comment_match.group('indent') if comment_match else ' ')
for (j, (severity, msg)) in enumerate(reports):
out_l = (source_line if (j == 0) else (' ' * len(source_line)))
is_last = (j == (len(reports) - 1))
severity_char = severity[0].upper()
continuation = ('' if is_last else ' \\')
fix_lines.append(f'{out_l}{indent}# {severity_char}: {msg}{continuation}')
else:
fix_lines.append(source_line)
(yield DataFileFix(lineno=((testcase.line + test_item.line) - 1), end_lineno=((testcase.line + test_item.end_line) - 1), lines=(fix_lines + ([''] * test_item.trimmed_newlines)))) |
def ddpOrient(node_a: Node, node_b: Node, node_c: Node, graph: Graph, maxPathLength: int, data: ndarray, independence_test_method, alpha: float, sep_sets: Dict[(Tuple[(int, int)], Set[int])], change_flag: bool, bk: (BackgroundKnowledge | None), verbose: bool=False) -> bool:
Q = Queue()
V = set()
e = None
distance = 0
previous = {}
cParents = graph.get_parents(node_c)
Q.put(node_a)
V.add(node_a)
V.add(node_b)
previous[node_a] = node_b
while (not Q.empty()):
node_t = Q.get_nowait()
if ((e is None) or (e == node_t)):
e = node_t
distance += 1
if ((distance > 0) and (distance > (1000 if (maxPathLength == (- 1)) else maxPathLength))):
return change_flag
nodesInTo = graph.get_nodes_into(node_t, Endpoint.ARROW)
for node_d in nodesInTo:
if V.__contains__(node_d):
continue
previous[node_d] = node_t
node_p = previous[node_t]
if (not graph.is_def_collider(node_d, node_t, node_p)):
continue
previous[node_d] = node_t
if ((not graph.is_adjacent_to(node_d, node_c)) and (node_d != node_c)):
(res, change_flag) = doDdpOrientation(node_d, node_a, node_b, node_c, previous, graph, data, independence_test_method, alpha, sep_sets, change_flag, bk, verbose)
if res:
return change_flag
if cParents.__contains__(node_d):
Q.put(node_d)
V.add(node_d)
return change_flag |
def z1_pre_encoder(x, z2, hus=[1024, 1024]):
with tf.variable_scope('z1_pre_enc'):
(T, F) = x.get_shape().as_list()[1:]
x = tf.reshape(x, ((- 1), (T * F)))
out = tf.concat([x, z2], axis=(- 1))
for (i, hu) in enumerate(hus):
out = fully_connected(out, hu, activation_fn=tf.nn.relu, scope=('fc%s' % i))
return out |
class RestrictChatMember():
async def restrict_chat_member(self: 'pyrogram.Client', chat_id: Union[(int, str)], user_id: Union[(int, str)], permissions: 'types.ChatPermissions', until_date: datetime=utils.zero_datetime()) -> 'types.Chat':
r = (await self.invoke(raw.functions.channels.EditBanned(channel=(await self.resolve_peer(chat_id)), participant=(await self.resolve_peer(user_id)), banned_rights=raw.types.ChatBannedRights(until_date=utils.datetime_to_timestamp(until_date), send_messages=(not permissions.can_send_messages), send_media=(not permissions.can_send_media_messages), send_stickers=(not permissions.can_send_other_messages), send_gifs=(not permissions.can_send_other_messages), send_games=(not permissions.can_send_other_messages), send_inline=(not permissions.can_send_other_messages), embed_links=(not permissions.can_add_web_page_previews), send_polls=(not permissions.can_send_polls), change_info=(not permissions.can_change_info), invite_users=(not permissions.can_invite_users), pin_messages=(not permissions.can_pin_messages)))))
return types.Chat._parse_chat(self, r.chats[0]) |
class TestMakeClass():
.parametrize('ls', [list, tuple])
def test_simple(self, ls):
C1 = make_class('C1', ls(['a', 'b']))
class C2():
a = attr.ib()
b = attr.ib()
assert (C1.__attrs_attrs__ == C2.__attrs_attrs__)
def test_dict(self):
C1 = make_class('C1', {'a': attr.ib(default=42), 'b': attr.ib(default=None)})
class C2():
a = attr.ib(default=42)
b = attr.ib(default=None)
assert (C1.__attrs_attrs__ == C2.__attrs_attrs__)
def test_attr_args(self):
C = make_class('C', ['x'], repr=False)
assert repr(C(1)).startswith('<tests.test_make.C object at 0x')
def test_catches_wrong_attrs_type(self):
with pytest.raises(TypeError) as e:
make_class('C', object())
assert (('attrs argument must be a dict or a list.',) == e.value.args)
def test_bases(self):
class D():
pass
cls = make_class('C', {})
assert (cls.__mro__[(- 1)] == object)
cls = make_class('C', {}, bases=(D,))
assert (D in cls.__mro__)
assert isinstance(cls(), D)
def test_additional_class_body(self):
def echo_func(cls, *args):
return args
cls = make_class('C', {}, class_body={'echo': classmethod(echo_func)})
assert (('a', 'b') == cls.echo('a', 'b'))
def test_clean_class(self, slots):
C = make_class('C', ['x'], slots=slots)
x = getattr(C, 'x', None)
assert (not isinstance(x, _CountingAttr))
def test_missing_sys_getframe(self, monkeypatch):
monkeypatch.delattr(sys, '_getframe')
C = make_class('C', ['x'])
assert (1 == len(C.__attrs_attrs__))
def test_make_class_ordered(self):
b = attr.ib(default=2)
a = attr.ib(default=1)
C = attr.make_class('C', {'a': a, 'b': b})
assert ('C(a=1, b=2)' == repr(C()))
def test_generic_dynamic_class(self):
from types import new_class
from typing import Generic, TypeVar
MyTypeVar = TypeVar('MyTypeVar')
MyParent = new_class('MyParent', (Generic[MyTypeVar],), {})
attr.make_class('test', {'id': attr.ib(type=str)}, (MyParent[int],)) |
class Migration(migrations.Migration):
dependencies = [('adserver', '0042_add_keyword_impressions')]
operations = [migrations.AlterField(model_name='publisher', name='render_pixel', field=models.BooleanField(default=False, help_text='Render ethical-pixel in ad templates. This is needed for users not using the ad client.'))] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.