code stringlengths 281 23.7M |
|---|
def test_solver_does_not_raise_conflict_for_locked_conditional_dependencies(solver: Solver, repo: Repository, package: ProjectPackage) -> None:
set_package_python_versions(solver.provider, '~2.7 || ^3.4')
dependency_a = Factory.create_dependency('A', {'version': '^1.0', 'python': '^3.6'})
package.add_dependency(dependency_a)
package.add_dependency(Factory.create_dependency('B', '^1.0'))
package_a = get_package('A', '1.0.0')
package_a.python_versions = '>=3.6'
package_a.marker = parse_marker('python_version >= "3.6" and python_version < "4.0"')
package_b = get_package('B', '1.0.0')
repo.add_package(package_a)
repo.add_package(package_b)
dep_package_a = DependencyPackage(dependency_a, package_a)
solver.provider._locked = {canonicalize_name('A'): [dep_package_a]}
transaction = solver.solve(use_latest=[package_b.name])
check_solver_result(transaction, [{'job': 'install', 'package': package_a}, {'job': 'install', 'package': package_b}]) |
class XPath():
def __init__(self, *xpaths):
self.xpaths = xpaths
def __str__(self):
return self.xpath
def __repr__(self):
return ("%s('%s')" % (self.__class__.__name__, str(self)))
def __getitem__(self, n):
return self.__class__(*[('%s[%s]' % (xpath, n)) for xpath in self.xpaths])
def xpath(self):
return '|'.join(self.xpaths)
def inside_of(self, another):
return self.__class__(*[('%s/%s%s' % (b, ('.' if a.startswith('/') else ''), a)) for (a, b) in itertools.product(self.xpaths, another.xpaths)])
def __or__(self, other):
return self.__class__(*(self.xpaths + other.xpaths))
def including_class(self, css_class):
return self.__class__(*[('%s[contains(concat(" ", , " "), " %s ")]' % (xpath, css_class)) for xpath in self.xpaths])
def delimit_text(cls, text):
bits = text.split('"')
if (len(bits) > 1):
return ('concat(%s)' % ',\'"\','.join([('"%s"' % bit) for bit in bits]))
else:
return ('"%s"' % text)
def with_id(self, text):
return self.__class__(*[('%s[=%s]' % (xpath, self.delimit_text(text))) for xpath in self.xpaths])
def with_text(self, text):
return self.__class__(*[('%s[normalize-space()=normalize-space(%s)]' % (xpath, self.delimit_text(text))) for xpath in self.xpaths])
def including_text(self, text):
return self.__class__(*[('%s[contains(normalize-space(), normalize-space(%s))]' % (xpath, self.delimit_text(text))) for xpath in self.xpaths])
def with_text_starting(self, text):
return self.__class__(*[('%s[starts-with(normalize-space(), normalize-space(%s))]' % (xpath, self.delimit_text(text))) for xpath in self.xpaths])
def containing(self, another):
return self.__class__(*[final_xpath for xpath in another.xpaths for final_xpath in self[re.sub('^//', '', xpath)].xpaths])
def any(cls, tag_name):
return cls(('//%s' % tag_name))
def button_labelled(cls, label, **arguments):
arguments = (arguments or {})
value_selector = ('normalize-space()=normalize-space(%s)' % cls.delimit_text(label))
input_button = cls.any('input')[value_selector]
if arguments:
encoded_arguments = ('?' + urllib.parse.urlencode(arguments))
argument_selector = ('substring(, string-length()-string-length("%s")+1) = "%s"' % (encoded_arguments, encoded_arguments))
input_button = input_button[argument_selector]
button = cls.any('button').with_text(label)
return (button | input_button)
def caption(cls):
return cls.any('caption')
def checkbox(cls):
return cls.any('input')['="checkbox"']
def div(cls):
return cls.any('div')
def fieldset_with_legend(cls, legend_text):
legend = cls.legend().with_text(legend_text)
return cls.any('fieldset').containing(legend)
def heading(cls, level):
return cls.any(('h%s' % level))
def input(cls):
return cls.any('input')
def input_labelled(cls, label_text):
label = cls.any('label').with_text(label_text)
for_based_xpath = cls.any('input')[('=%s/' % label)]
nested_xpath = cls.any('input').inside_of(label)
return cls(str(for_based_xpath), str(nested_xpath))
def input_named(cls, name):
return cls.any('input')[('="%s"' % name)]
def input_of_type(cls, input_type):
return cls.any('input')[('="%s"' % input_type)]
def label(cls):
return cls.any('label')
def legend(cls):
return cls.any('legend')
def link(cls):
return cls.any('a')
def option(cls):
return cls.any('option')
def paragraph(cls):
return cls.any('p')
def select_labelled(cls, label_text):
label = cls.any('label').with_text(label_text)
return cls.any('select')[('=%s/' % label)]
def select_named(cls, name):
return cls.any('select')[('="%s"' % name)]
def span(cls):
return cls.any('span')
def table(cls):
return cls.any('table')
def table_with_summary(cls, text):
return cls.any('table')[('="%s"' % text)]
def table_header(cls):
return cls.any('thead')
def table_body(cls):
return cls.any('tbody')
def table_row(cls):
return cls.any('tr')
def table_footer(cls):
return cls.any('tfoot')
def table_cell(cls):
return cls.any('*')['self::td or self::th']
def table_cell_aligned_to(cls, column_heading_text, search_column_heading, search_cell_text):
target_column_index = ('count(%s/preceding-sibling::th)+1' % cls.table_cell().with_text(column_heading_text).inside_of(cls.table_header()))
search_column_index = ('count(%s/preceding-sibling::th)+1' % cls.table_cell().with_text(search_column_heading).inside_of(cls.table_header()))
found_cell = cls.any('td')[search_column_index].with_text(search_cell_text).xpath
found_row_index = ('count(%s/parent::tr/preceding-sibling::tr)+1' % found_cell)
return cls.table_cell()[target_column_index].inside_of(XPath.table_row()[found_row_index])
def ul(cls):
return cls.any('ul')
def li(cls):
return cls.any('li') |
def get_cover_and_lastbit0image(reshaped_image):
cover_temp = np.zeros(reshaped_image.shape[0]).astype('int8')
for i in range(reshaped_image.shape[0]):
last_bit = int(bin(reshaped_image[i])[2:].zfill(8)[(- 1)])
cover_temp[i] = last_bit
lastbit0image = (reshaped_image - cover_temp)
return (cover_temp, lastbit0image) |
.parametrize('auth, pj_type, deprecated', [(None, None, False), ('EPSG', PJType.PROJECTED_CRS, False), ('EPSG', PJType.PROJECTED_CRS, True), ('IGNF', [PJType.GEOGRAPHIC_3D_CRS, PJType.GEOGRAPHIC_2D_CRS], False), ('EPSG', 'PROJECTED_CRS', False), ('EPSG', 'Projected_Crs', True)])
def test_query_crs_info(auth, pj_type, deprecated):
crs_info_list = query_crs_info(auth, pj_type, allow_deprecated=deprecated)
assert crs_info_list
any_deprecated = any((crs_info.deprecated for crs_info in crs_info_list))
if deprecated:
assert any_deprecated
else:
assert (not any_deprecated) |
.parametrize(['method', 'order'], [pytest.param('euler', 0.5, id='Euler'), pytest.param('milstein', 1.0, id='Milstein'), pytest.param('milstein_imp', 1.0, id='Milstein implicit'), pytest.param('platen', 1.0, id='Platen'), pytest.param('pred_corr', 1.0, id='PredCorr'), pytest.param('rouchon', 1.0, id='rouchon'), pytest.param('explicit1.5', 1.5, id='Explicit15'), pytest.param('taylor1.5_imp', 1.5, id='Taylor15 implicit')])
.parametrize(['H', 'c_ops', 'sc_ops'], [pytest.param('qeye', [], ['destroy'], id='simple'), pytest.param('qeye', ['destroy'], ['destroy'], id='simple + collapse'), pytest.param('herm', ['destroy', 'destroy2'], [], id='2 c_ops'), pytest.param('herm', [], ['destroy', 'destroy2'], id='2 sc_ops'), pytest.param('herm', ['create', 'destroy'], ['destroy', 'destroy2'], id='many terms'), pytest.param('herm', [], ['random'], id='random'), pytest.param('herm', ['random'], ['random'], id='complex'), pytest.param('herm td', ['random'], ['destroy'], id='H td'), pytest.param('herm', ['random'], ['destroy td'], id='sc_ops td')])
def test_open_integrator(method, order, H, c_ops, sc_ops):
N = 5
H = _make_oper(H, N)
c_ops = [_make_oper(op, N) for op in c_ops]
sc_ops = [_make_oper(op, N) for op in sc_ops]
rhs = _StochasticRHS(StochasticOpenSystem, H, sc_ops, c_ops, False)
ref_sode = SMESolver.avail_integrators()['taylor1.5'](rhs, {'dt': 0.01})
sode = SMESolver.avail_integrators()[method](rhs, {'dt': 0.01})
state = operator_to_vector(fock_dm(5, 3, dtype='Dense')).data
error_order = get_error_order_integrator(sode, ref_sode, state)
assert ((order + 0.35) < error_order) |
_cache()
def get_executable_even_when_embedded():
exe = str(sys.executable)
if pathlib.Path(exe).name.startswith('python'):
try:
test_exe(exe)
return exe
except FileNotFoundError:
pass
current_path = pathlib.Path(np.__file__).parents[4]
exe = str(current_path.joinpath('bin', 'python'))
try:
test_exe(exe)
return exe
except FileNotFoundError:
pass
current_path = pathlib.Path(np.__file__).parents[3]
exe = str(current_path.joinpath('python'))
try:
test_exe(exe)
except FileNotFoundError as e:
raise ValueError('Tried to determine the python interpreter path, but was unsuccessful.') from e
return exe |
def test_format_currency():
assert (numbers.format_currency(1099.98, 'USD', locale='en_US') == '$1,099.98')
assert (numbers.format_currency(1099.98, 'USD', locale='en_US', numbering_system='default') == '$1,099.98')
assert (numbers.format_currency(0, 'USD', locale='en_US') == '$0.00')
assert (numbers.format_currency(1099.98, 'USD', locale='es_CO') == 'US$1.099,98')
assert (numbers.format_currency(1099.98, 'EUR', locale='de_DE') == '1.099,98\xa0')
assert (numbers.format_currency(1099.98, 'USD', locale='ar_EG', numbering_system='default') == '\u200f109998\xa0US$')
assert (numbers.format_currency(1099.98, 'EUR', ' #,##0.00', locale='en_US') == 'EUR 1,099.98')
assert (numbers.format_currency(1099.98, 'EUR', locale='nl_NL') != numbers.format_currency((- 1099.98), 'EUR', locale='nl_NL'))
assert (numbers.format_currency(1099.98, 'USD', format=None, locale='en_US') == '$1,099.98')
assert (numbers.format_currency(1, 'USD', locale='es_AR') == 'US$1,00')
assert (numbers.format_currency(1000000, 'USD', locale='es_AR') == 'US$1.000.000,00')
assert (numbers.format_currency(0, 'USD', locale='es_AR') == 'US$0,00') |
(os.environ, {'CUDA_VISIBLE_DEVICES': '0'})
def test_worker_fraction_limits(loop):
pytest.importorskip('rmm')
with popen(['dask', 'scheduler', '--port', '9369', '--no-dashboard']):
with popen(['dask', 'cuda', 'worker', '127.0.0.1:9369', '--host', '127.0.0.1', '--device-memory-limit', '0.1', '--rmm-pool-size', '0.2', '--rmm-maximum-pool-size', '0.3', '--no-dashboard', '--rmm-track-allocations']):
with Client('127.0.0.1:9369', loop=loop) as client:
assert wait_workers(client, n_gpus=get_n_gpus())
device_total_memory = client.run(get_device_total_memory)
wait(device_total_memory)
(_, device_total_memory) = device_total_memory.popitem()
ret = get_cluster_configuration(client)
wait(ret)
assert (ret['device-memory-limit'] == int((device_total_memory * 0.1)))
assert (ret['[plugin] RMMSetup']['initial_pool_size'] == (((device_total_memory * 0.2) // 256) * 256))
assert (ret['[plugin] RMMSetup']['maximum_pool_size'] == (((device_total_memory * 0.3) // 256) * 256)) |
def test_uniquifier():
obj1 = [1]
obj2 = [2]
obj3 = [3]
obj4 = [4]
obj5 = [5]
objs = [obj1, obj2, obj1, obj2, obj3, obj3]
objsA = [obj2, obj1, obj2, obj1, obj4, obj4]
uniq = Uniquifier(objs)
unique_objs = uniq.get_unique_objs()
assert (len(unique_objs) == 3)
assert (unique_objs[0] is obj1)
assert (unique_objs[1] is obj2)
assert (unique_objs[2] is obj3)
unique_objsA = uniq.get_unique_objs(objsA)
assert (len(unique_objsA) == 3)
assert (unique_objsA[0] is obj2)
assert (unique_objsA[1] is obj1)
assert (unique_objsA[2] is obj4)
unique_objs2 = [obj3, obj4, obj5]
objs2 = uniq.map_unique_objs(unique_objs2)
assert (len(objs2) == len(objs))
assert (objs2[0] is obj3)
assert (objs2[1] is obj4)
assert (objs2[2] is obj3)
assert (objs2[3] is obj4)
assert (objs2[4] is obj5)
assert (objs2[5] is obj5) |
class SDFusionImage2ShapeModel(BaseModel):
def name(self):
return 'SDFusionImage2ShapeModel'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.model_name = self.name()
self.device = opt.device
assert (opt.df_cfg is not None)
assert (opt.vq_cfg is not None)
df_conf = OmegaConf.load(opt.df_cfg)
vq_conf = OmegaConf.load(opt.vq_cfg)
ddconfig = vq_conf.model.params.ddconfig
shape_res = ddconfig.resolution
(z_ch, n_down) = (ddconfig.z_channels, (len(ddconfig.ch_mult) - 1))
z_sp_dim = (shape_res // (2 ** n_down))
self.z_shape = (z_ch, z_sp_dim, z_sp_dim, z_sp_dim)
df_model_params = df_conf.model.params
unet_params = df_conf.unet.params
self.uc_scale = 1.0
self.df = DiffusionUNet(unet_params, vq_conf=vq_conf, conditioning_key=df_model_params.conditioning_key)
self.df.to(self.device)
self.init_diffusion_params(uc_scale=self.uc_scale, opt=opt)
self.ddim_sampler = DDIMSampler(self)
self.vqvae = load_vqvae(vq_conf, vq_ckpt=opt.vq_ckpt, opt=opt)
clip_param = df_conf.clip.params
self.cond_model = CLIPImageEncoder(model=clip_param.model)
self.cond_model.to(self.device)
for param in self.cond_model.parameters():
param.requires_grad = True
trainable_models = [self.df, self.cond_model]
trainable_params = []
for m in trainable_models:
trainable_params += [p for p in m.parameters() if (p.requires_grad == True)]
if self.isTrain:
self.optimizer = optim.AdamW(trainable_params, lr=opt.lr)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 1000, 0.9)
self.optimizers = [self.optimizer]
self.schedulers = [self.scheduler]
self.print_networks(verbose=False)
if (opt.ckpt is not None):
self.load_ckpt(opt.ckpt, load_opt=self.isTrain)
self.to_tensor = transforms.ToTensor()
(dist, elev, azim) = (1.7, 20, 20)
self.renderer = init_mesh_renderer(image_size=256, dist=dist, elev=elev, azim=azim, device=self.opt.device)
if self.opt.distributed:
self.make_distributed(opt)
self.df_module = self.df.module
self.vqvae_module = self.vqvae.module
self.cond_model_module = self.cond_model.module
else:
self.df_module = self.df
self.vqvae_module = self.vqvae
self.cond_model_module = self.cond_model
self.ddim_steps = 100
if (self.opt.debug == '1'):
self.ddim_steps = 20
cprint(f'[*] setting ddim_steps={self.ddim_steps}', 'blue')
def make_distributed(self, opt):
self.df = nn.parallel.DistributedDataParallel(self.df, device_ids=[opt.local_rank], output_device=opt.local_rank, broadcast_buffers=False)
self.vqvae = nn.parallel.DistributedDataParallel(self.vqvae, device_ids=[opt.local_rank], output_device=opt.local_rank, broadcast_buffers=False)
self.cond_model = nn.parallel.DistributedDataParallel(self.cond_model, device_ids=[opt.local_rank], output_device=opt.local_rank, broadcast_buffers=False, find_unused_parameters=True)
def init_diffusion_params(self, uc_scale=1.0, opt=None):
df_conf = OmegaConf.load(opt.df_cfg)
df_model_params = df_conf.model.params
self.parameterization = 'eps'
self.learn_logvar = False
self.v_posterior = 0.0
self.original_elbo_weight = 0.0
self.l_simple_weight = 1.0
self.register_schedule(timesteps=df_model_params.timesteps, linear_start=df_model_params.linear_start, linear_end=df_model_params.linear_end)
logvar_init = 0.0
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
self.uc_scale = uc_scale
def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=0.0001, linear_end=0.02, cosine_s=0.008):
if exists(given_betas):
betas = given_betas
else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
alphas = (1.0 - betas)
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:(- 1)])
(timesteps,) = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
assert (alphas_cumprod.shape[0] == self.num_timesteps), 'alphas have to be defined for each timestep'
to_torch = partial(torch.tensor, dtype=torch.float32)
self.betas = to_torch(betas).to(self.device)
self.alphas_cumprod = to_torch(alphas_cumprod).to(self.device)
self.alphas_cumprod_prev = to_torch(alphas_cumprod_prev).to(self.device)
self.sqrt_alphas_cumprod = to_torch(np.sqrt(alphas_cumprod)).to(self.device)
self.sqrt_one_minus_alphas_cumprod = to_torch(np.sqrt((1.0 - alphas_cumprod))).to(self.device)
self.log_one_minus_alphas_cumprod = to_torch(np.log((1.0 - alphas_cumprod))).to(self.device)
self.sqrt_recip_alphas_cumprod = to_torch(np.sqrt((1.0 / alphas_cumprod))).to(self.device)
self.sqrt_recipm1_alphas_cumprod = to_torch(np.sqrt(((1.0 / alphas_cumprod) - 1))).to(self.device)
posterior_variance = (((((1 - self.v_posterior) * betas) * (1.0 - alphas_cumprod_prev)) / (1.0 - alphas_cumprod)) + (self.v_posterior * betas))
self.posterior_variance = to_torch(posterior_variance).to(self.device)
self.posterior_log_variance_clipped = to_torch(np.log(np.maximum(posterior_variance, 1e-20))).to(self.device)
self.posterior_mean_coef1 = to_torch(((betas * np.sqrt(alphas_cumprod_prev)) / (1.0 - alphas_cumprod))).to(self.device)
self.posterior_mean_coef2 = to_torch((((1.0 - alphas_cumprod_prev) * np.sqrt(alphas)) / (1.0 - alphas_cumprod))).to(self.device)
if (self.parameterization == 'eps'):
lvlb_weights = ((self.betas ** 2) / (((2 * self.posterior_variance) * to_torch(alphas).to(self.device)) * (1 - self.alphas_cumprod)))
elif (self.parameterization == 'x0'):
lvlb_weights = ((0.5 * np.sqrt(torch.Tensor(alphas_cumprod))) / ((2.0 * 1) - torch.Tensor(alphas_cumprod)))
else:
raise NotImplementedError('mu not supported')
lvlb_weights[0] = lvlb_weights[1]
self.lvlb_weights = lvlb_weights
assert (not torch.isnan(self.lvlb_weights).all())
def set_input(self, input=None, gen_order=None, max_sample=None):
self.x = input['sdf']
self.img = input['img']
self.uc_img = torch.zeros_like(self.img).to(self.device)
if (max_sample is not None):
self.x = self.x[:max_sample]
self.img = self.img[:max_sample]
self.uc_img = self.uc_img[:max_sample]
vars_list = ['x', 'img']
self.tocuda(var_names=vars_list)
def switch_train(self):
self.df.train()
self.cond_model.train()
def switch_eval(self):
self.df.eval()
self.vqvae.eval()
self.cond_model.eval()
def q_sample(self, x_start, t, noise=None):
noise = default(noise, (lambda : torch.randn_like(x_start)))
return ((extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + (extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise))
def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict):
pass
else:
if (not isinstance(cond, list)):
cond = [cond]
key = ('c_concat' if (self.df_module.conditioning_key == 'concat') else 'c_crossattn')
cond = {key: cond}
out = self.df(x_noisy, t, **cond)
if (isinstance(out, tuple) and (not return_ids)):
return out[0]
else:
return out
def get_loss(self, pred, target, loss_type='l2', mean=True):
if (loss_type == 'l1'):
loss = (target - pred).abs()
if mean:
loss = loss.mean()
elif (loss_type == 'l2'):
if mean:
loss = torch.nn.functional.mse_loss(target, pred)
else:
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
else:
raise NotImplementedError("unknown loss type '{loss_type}'")
return loss
def p_losses(self, x_start, cond, t, noise=None):
noise = default(noise, (lambda : torch.randn_like(x_start)))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_output = self.apply_model(x_noisy, t, cond)
loss_dict = {}
if (self.parameterization == 'x0'):
target = x_start
elif (self.parameterization == 'eps'):
target = noise
else:
raise NotImplementedError()
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3, 4])
loss_dict.update({f'loss_simple': loss_simple.mean()})
logvar_t = self.logvar[t].to(self.device)
loss = ((loss_simple / torch.exp(logvar_t)) + logvar_t)
if self.learn_logvar:
loss_dict.update({f'loss_gamma': loss.mean()})
loss_dict.update({'logvar': self.logvar.data.mean()})
loss = (self.l_simple_weight * loss.mean())
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3, 4))
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
loss_dict.update({f'loss_vlb': loss_vlb})
loss += (self.original_elbo_weight * loss_vlb)
loss_dict.update({f'loss_total': loss.clone().detach().mean()})
return (x_noisy, target, loss, loss_dict)
def forward(self):
self.switch_train()
c_img = self.cond_model(self.img).float()
with torch.no_grad():
z = self.vqvae(self.x, forward_no_quant=True, encode_only=True).detach()
t = torch.randint(0, self.num_timesteps, (z.shape[0],), device=self.device).long()
(z_noisy, target, loss, loss_dict) = self.p_losses(z, c_img, t)
self.loss_df = loss
self.loss_dict = loss_dict
_grad()
def inference(self, data, ddim_steps=None, ddim_eta=0.0, uc_scale=None, infer_all=False, max_sample=16):
self.switch_eval()
if (not infer_all):
self.set_input(data, max_sample=max_sample)
else:
self.set_input(data)
if (ddim_steps is None):
ddim_steps = self.ddim_steps
if (uc_scale is None):
uc_scale = self.uc_scale
uc = self.cond_model(self.uc_img).float()
c_img = self.cond_model(self.img).float()
B = c_img.shape[0]
shape = self.z_shape
(samples, intermediates) = self.ddim_sampler.sample(S=ddim_steps, batch_size=B, shape=shape, conditioning=c_img, verbose=False, unconditional_guidance_scale=uc_scale, unconditional_conditioning=uc, eta=ddim_eta, quantize_x0=False)
self.gen_df = self.vqvae_module.decode_no_quant(samples)
self.switch_train()
_grad()
def img2shape(self, image, mask, ddim_steps=None, ddim_eta=0.0, uc_scale=None, infer_all=False, max_sample=16):
from utils.demo_util import preprocess_image
import torchvision.transforms as transforms
(mean, std) = ([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std), transforms.Resize((256, 256))])
(_, img) = preprocess_image(image, mask)
img = transforms(img)
self.img = img.unsqueeze(0).to(self.device)
self.uc_img = torch.zeros_like(self.img).to(self.device)
self.switch_eval()
if (ddim_steps is None):
ddim_steps = self.ddim_steps
if (uc_scale is None):
uc_scale = self.uc_scale
uc = self.cond_model(self.uc_img).float()
c_img = self.cond_model(self.img).float()
B = c_img.shape[0]
shape = self.z_shape
(samples, intermediates) = self.ddim_sampler.sample(S=ddim_steps, batch_size=B, shape=shape, conditioning=c_img, verbose=False, unconditional_guidance_scale=uc_scale, unconditional_conditioning=uc, eta=ddim_eta, quantize_x0=False)
self.gen_df = self.vqvae_module.decode_no_quant(samples)
return self.gen_df
_grad()
def eval_metrics(self, dataloader, thres=0.0, global_step=0):
self.switch_eval()
ret = OrderedDict([('dummy_metrics', 0.0)])
self.switch_train()
return ret
def backward(self):
self.loss = self.loss_df
self.loss_dict = reduce_loss_dict(self.loss_dict)
self.loss_total = self.loss_dict['loss_total']
self.loss_simple = self.loss_dict['loss_simple']
self.loss_vlb = self.loss_dict['loss_vlb']
if ('loss_gamma' in self.loss_dict):
self.loss_gamma = self.loss_dict['loss_gamma']
self.loss.backward()
def optimize_parameters(self, total_steps):
self.set_requires_grad([self.df], requires_grad=True)
self.set_requires_grad([self.cond_model], requires_grad=True)
self.forward()
self.optimizer.zero_grad()
self.backward()
self.optimizer.step()
def get_logs_data(self):
raise NotImplementedError
return ret
def get_current_errors(self):
ret = OrderedDict([('total', self.loss_total.data), ('simple', self.loss_simple.data), ('vlb', self.loss_vlb.data)])
if hasattr(self, 'loss_gamma'):
ret['gamma'] = self.loss_gamma.data
return ret
def get_current_visuals(self):
with torch.no_grad():
self.img = self.img
self.img_gt = render_sdf(self.renderer, self.x)
self.img_gen_df = render_sdf(self.renderer, self.gen_df)
vis_tensor_names = ['img', 'img_gt', 'img_gen_df']
vis_ims = self.tnsrs2ims(vis_tensor_names)
visuals = zip(vis_tensor_names, vis_ims)
return OrderedDict(visuals)
def save(self, label, global_step, save_opt=False):
state_dict = {'vqvae': self.vqvae_module.state_dict(), 'cond_model': self.cond_model_module.state_dict(), 'df': self.df_module.state_dict(), 'global_step': global_step}
if save_opt:
state_dict['opt'] = self.optimizer.state_dict()
save_filename = ('df_%s.pth' % label)
save_path = os.path.join(self.opt.ckpt_dir, save_filename)
torch.save(state_dict, save_path)
def load_ckpt(self, ckpt, load_opt=False):
map_fn = (lambda storage, loc: storage)
if (type(ckpt) == str):
state_dict = torch.load(ckpt, map_location=map_fn)
else:
state_dict = ckpt
self.vqvae.load_state_dict(state_dict['vqvae'])
self.df.load_state_dict(state_dict['df'])
self.cond_model.load_state_dict(state_dict['cond_model'])
print(colored(('[*] weight successfully load from: %s' % ckpt), 'blue'))
if load_opt:
self.optimizer.load_state_dict(state_dict['opt'])
print(colored(('[*] optimizer successfully restored from: %s' % ckpt), 'blue')) |
.parametrize('freq, start_date, days_in_current_year, days_in_next_year', [('1D', '2012-12-31', 1, 0), ('1D', '2011-12-31', 1, 0), ('3D', '2012-12-30', 2, 1), (7, '2012-12-29', 3, 4), ('60h', '2012-12-31', 1, 1.5), ('15h', '2012-12-31', 0.625, 0), ('39h', '2012-12-31', 1, 0.625)])
def test_timestep_days_in_year_methods(simple_linear_model, freq, start_date, days_in_current_year, days_in_next_year):
simple_linear_model.timestepper.start = start_date
simple_linear_model.timestepper.end = '2013-01-30'
simple_linear_model.timestepper.delta = freq
simple_linear_model.setup()
simple_linear_model.step()
ts = simple_linear_model.timestepper.current
assert (days_in_current_year == ts.days_in_current_year())
assert (days_in_next_year == ts.days_in_next_year()) |
class ResearchModels():
def __init__(self, nb_classes, num_of_snip, opt_flow_len, image_shape=(224, 224), saved_model=None):
self.num_of_snip = num_of_snip
self.opt_flow_len = opt_flow_len
self.load_model = load_model
self.saved_model = saved_model
self.nb_classes = nb_classes
print('Number of classes:')
print(self.nb_classes)
metrics = ['accuracy']
if (self.nb_classes >= 10):
metrics.append('top_k_categorical_accuracy')
if (self.saved_model is not None):
print(('Loading model %s' % self.saved_model))
self.model = load_model(self.saved_model)
else:
print('Loading CNN model for the temporal stream.')
self.input_shape = (image_shape[0], image_shape[1], ((opt_flow_len * 2) * self.num_of_snip))
self.model = self.cnn_temporal()
optimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=metrics)
print(self.model.summary())
def cnn_temporal(self):
print('Input shape:')
print(self.input_shape)
print('Numer of classes:')
print(self.nb_classes)
model = Sequential()
model.add(Conv2D(96, (7, 7), strides=2, padding='same', input_shape=self.input_shape))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (5, 5), strides=2, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), strides=1, activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), strides=1, activation='relu', padding='same'))
model.add(Conv2D(512, (3, 3), strides=1, activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.9))
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.9))
model.add(Dense(self.nb_classes, activation='softmax'))
return model |
def kbkdf_counter_mode_test(backend, params):
supported_counter_locations = {'before_fixed': CounterLocation.BeforeFixed, 'after_fixed': CounterLocation.AfterFixed, 'middle_fixed': CounterLocation.MiddleFixed}
ctr_loc = supported_counter_locations[params.pop('ctrlocation')]
brk_loc = None
if (ctr_loc == CounterLocation.MiddleFixed):
assert ('fixedinputdata' not in params)
params['fixedinputdata'] = (params.pop('databeforectrdata') + params.pop('dataafterctrdata'))
brk_loc = params.pop('databeforectrlen')
assert isinstance(brk_loc, int)
prf = params.get('prf')
assert (prf is not None)
assert isinstance(prf, str)
del params['prf']
if prf.startswith('hmac'):
_kbkdf_hmac_counter_mode_test(backend, prf, ctr_loc, brk_loc, params)
else:
assert prf.startswith('cmac')
_kbkdf_cmac_counter_mode_test(backend, prf, ctr_loc, brk_loc, params) |
class ChaCha20(CipherAlgorithm):
name = 'ChaCha20'
key_sizes = frozenset([256])
def __init__(self, key: bytes, nonce: bytes):
self.key = _verify_key_size(self, key)
utils._check_byteslike('nonce', nonce)
if (len(nonce) != 16):
raise ValueError('nonce must be 128-bits (16 bytes)')
self._nonce = nonce
def nonce(self) -> bytes:
return self._nonce
def key_size(self) -> int:
return (len(self.key) * 8) |
def test_upgrade_specifier(pipx_temp_env, capsys):
name = 'pylint'
pkg_spec = PKG[name]['spec']
initial_version = pkg_spec.split('==')[(- 1)]
assert (not run_pipx_cli(['install', f'{pkg_spec}']))
assert (not run_pipx_cli(['upgrade', f'{name}']))
captured = capsys.readouterr()
assert (f'upgraded package {name} from {initial_version} to' in captured.out) |
class StartupsDataset(Dataset):
def __init__(self, path: str, max_samples: int=500):
super().__init__()
with open(path, 'r', encoding='utf8') as f:
lines = f.readlines()[:max_samples]
random.shuffle(lines)
self.data = [json.loads(line) for line in lines]
industries = set(sorted([item['industry'] for item in self.data]))
self._label2idx = {label: idx for (idx, label) in enumerate(industries)}
def __getitem__(self, index: int) -> SimilarityGroupSample:
item = self.data[index]
return SimilarityGroupSample(obj=item, group=self._label2idx[item['industry']])
def __len__(self) -> int:
return len(self.data)
def get_num_industries(self) -> int:
return len(self._label2idx) |
class FakeDirectory(FakeFile):
def __init__(self, name: str, perm_bits: int=helpers.PERM_DEF, filesystem: Optional['FakeFilesystem']=None):
FakeFile.__init__(self, name, (S_IFDIR | perm_bits), '', filesystem=filesystem)
self.st_nlink += 1
self._entries: Dict[(str, AnyFile)] = {}
def set_contents(self, contents: AnyStr, encoding: Optional[str]=None) -> bool:
raise self.filesystem.raise_os_error(errno.EISDIR, self.path)
def entries(self) -> Dict[(str, FakeFile)]:
return self._entries
def ordered_dirs(self) -> List[str]:
return [item[0] for item in sorted(self._entries.items(), key=(lambda entry: entry[1].st_ino))]
def add_entry(self, path_object: FakeFile) -> None:
if ((not helpers.is_root()) and (not (self.st_mode & helpers.PERM_WRITE)) and (not self.filesystem.is_windows_fs)):
raise OSError(errno.EACCES, 'Permission Denied', self.path)
path_object_name: str = to_string(path_object.name)
if (path_object_name in self.entries):
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self._entries[path_object_name] = path_object
path_object.parent_dir = self
if (path_object.st_ino is None):
self.filesystem.last_ino += 1
path_object.st_ino = self.filesystem.last_ino
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if (path_object.st_nlink == 1):
self.filesystem.change_disk_usage(path_object.size, path_object.name, self.st_dev)
def get_entry(self, pathname_name: str) -> AnyFile:
pathname_name = self._normalized_entryname(pathname_name)
return self.entries[to_string(pathname_name)]
def _normalized_entryname(self, pathname_name: str) -> str:
if (not self.filesystem.is_case_sensitive):
matching_names = [name for name in self.entries if (name.lower() == pathname_name.lower())]
if matching_names:
pathname_name = matching_names[0]
return pathname_name
def remove_entry(self, pathname_name: str, recursive: bool=True) -> None:
pathname_name = self._normalized_entryname(pathname_name)
entry = self.get_entry(pathname_name)
if self.filesystem.is_windows_fs:
if ((entry.st_mode & helpers.PERM_WRITE) == 0):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if self.filesystem.has_open_file(entry):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
elif ((not helpers.is_root()) and ((self.st_mode & (helpers.PERM_WRITE | helpers.PERM_EXE)) != (helpers.PERM_WRITE | helpers.PERM_EXE))):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if (recursive and isinstance(entry, FakeDirectory)):
while entry.entries:
entry.remove_entry(list(entry.entries)[0])
elif (entry.st_nlink == 1):
self.filesystem.change_disk_usage((- entry.size), pathname_name, entry.st_dev)
self.st_nlink -= 1
entry.st_nlink -= 1
assert (entry.st_nlink >= 0)
del self.entries[to_string(pathname_name)]
def size(self) -> int:
return sum([item[1].size for item in self.entries.items()])
def size(self, st_size: int) -> None:
raise self.filesystem.raise_os_error(errno.EISDIR, self.path)
def has_parent_object(self, dir_object: 'FakeDirectory') -> bool:
obj: Optional[FakeDirectory] = self
while obj:
if (obj == dir_object):
return True
obj = obj.parent_dir
return False
def __str__(self) -> str:
description = (super(FakeDirectory, self).__str__() + ':\n')
for item in self.entries:
item_desc = self.entries[item].__str__()
for line in item_desc.split('\n'):
if line:
description = (((description + ' ') + line) + '\n')
return description |
def build_custom_train_loader(cfg, mapper=None):
source_aware = cfg.DATALOADER.SOURCE_AWARE
if source_aware:
dataset_dicts = get_detection_dataset_dicts_with_source(cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=(cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0), proposal_files=(cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None))
sizes = [0 for _ in range(len(cfg.DATASETS.TRAIN))]
for d in dataset_dicts:
sizes[d['dataset_source']] += 1
print('dataset sizes', sizes)
else:
dataset_dicts = get_detection_dataset_dicts(cfg.DATASETS.TRAIN, filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, min_keypoints=(cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE if cfg.MODEL.KEYPOINT_ON else 0), proposal_files=(cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None))
dataset = DatasetFromList(dataset_dicts, copy=False)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info('Using training sampler {}'.format(sampler_name))
if (sampler_name == 'TrainingSampler'):
sampler = TrainingSampler(len(dataset))
elif (sampler_name == 'MultiDatasetSampler'):
assert source_aware
sampler = MultiDatasetSampler(cfg, sizes, dataset_dicts)
elif (sampler_name == 'RepeatFactorTrainingSampler'):
repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD)
sampler = RepeatFactorTrainingSampler(repeat_factors)
else:
raise ValueError('Unknown training sampler: {}'.format(sampler_name))
return build_batch_data_loader(dataset, sampler, cfg.SOLVER.IMS_PER_BATCH, aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING, num_workers=cfg.DATALOADER.NUM_WORKERS) |
def _read_classes(csv_reader):
result = OrderedDict()
for (line, row) in enumerate(csv_reader):
line += 1
try:
(class_name, class_id) = row
except ValueError:
raise_from(ValueError("line {}: format should be 'class_name,class_id'".format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if (class_name in result):
raise ValueError("line {}: duplicate class name: '{}'".format(line, class_name))
result[class_name] = class_id
return result |
class AdaptiveBatchNorm2d(nn.BatchNorm2d):
def __init__(self, num_features, num_w=512, eps=1e-05, momentum=0.1, affine=False, track_running_stats=True):
super(AdaptiveBatchNorm2d, self).__init__(num_features, eps, momentum, affine, track_running_stats)
self.weight_proj = nn.Linear(num_w, num_features)
self.bias_proj = nn.Linear(num_w, num_features)
def forward(self, x, w):
self._check_input_dim(x)
exponential_average_factor = 0.0
if (self.training and self.track_running_stats):
self.num_batches_tracked += 1
if (self.momentum is None):
exponential_average_factor = (1.0 / self.num_batches_tracked.item())
else:
exponential_average_factor = self.momentum
output = F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias, (self.training or (not self.track_running_stats)), exponential_average_factor, self.eps)
size = output.size()
(weight, bias) = ((self.weight_proj(w) + 1), self.bias_proj(w))
weight = weight.unsqueeze((- 1)).unsqueeze((- 1)).expand(size)
bias = bias.unsqueeze((- 1)).unsqueeze((- 1)).expand(size)
return ((weight * output) + bias)
def __repr__(self):
return (((self.__class__.__name__ + '(') + str(self.num_features)) + ')') |
class OrthographicPoisson():
def __init__(self, data):
self.method_name = 'orthographic_poisson'
print('running {}...'.format(self.method_name))
method_start = time.time()
p = ((- data.n[(data.mask, 0)]) / data.n[(data.mask, 2)])
q = ((- data.n[(data.mask, 1)]) / data.n[(data.mask, 2)])
(dvp, dvn, dup, dun) = generate_dx_dy(data.mask, data.step_size)
A = (0.5 * ((((dup.T dup) + (dun.T dun)) + (dvp.T dvp)) + (dvn.T dvn)))
b = (((0.5 * (dup.T + dun.T)) p) + ((0.5 * (dvp.T + dvn.T)) q))
solver_start = time.time()
(z, _) = cg(A, b, maxiter=1000, tol=1e-09)
solver_end = time.time()
self.solver_runtime = (solver_end - solver_start)
self.residual = ((A z) - b)
method_end = time.time()
self.total_runtime = (method_end - method_start)
self.depth_map = (np.ones_like(data.mask, dtype=np.float) * np.nan)
self.depth_map[data.mask] = z
self.facets = construct_facets_from_depth_map_mask(data.mask)
self.vertices = construct_vertices_from_depth_map_and_mask(data.mask, self.depth_map, data.step_size)
self.surface = pv.PolyData(self.vertices, self.facets) |
def _find_common_ancestor_of_all_nodes(tasks: list[PTaskWithPath], paths: list[Path], show_nodes: bool) -> Path:
all_paths = []
for task in tasks:
all_paths.append(task.path)
if show_nodes:
all_paths.extend((x.path for x in tree_leaves(task.depends_on) if isinstance(x, PPathNode)))
all_paths.extend((x.path for x in tree_leaves(task.produces) if isinstance(x, PPathNode)))
return find_common_ancestor(*all_paths, *paths) |
class Denoised_Classifier(torch.nn.Module):
def __init__(self, diffusion, model, classifier, t):
super().__init__()
self.diffusion = diffusion
self.model = model
self.classifier = classifier
self.t = t
def sdedit(self, x, t, to_01=True):
x = ((x * 2) - 1)
t = torch.full((x.shape[0],), t).long().to(x.device)
x_t = self.diffusion.q_sample(x, t)
sample = x_t
indices = list(range((t + 1)))[::(- 1)]
l_sample = []
l_predxstart = []
for i in indices:
out = self.diffusion.ddim_sample(self.model, sample, torch.full((x.shape[0],), i).long().to(x.device))
sample = out['sample']
l_sample.append(out['sample'])
l_predxstart.append(out['pred_xstart'])
si(torch.cat(l_sample), 'l_sample.png', to_01=1)
si(torch.cat(l_predxstart), 'l_pxstart.png', to_01=1)
if to_01:
sample = ((sample + 1) / 2)
return sample
def forward(self, x):
out = self.sdedit(x, self.t)
out = self.classifier(out)
return out |
class UtilTest(parameterized.TestCase, tf.test.TestCase):
def test_tfdata(self):
ground_truth_data = dummy_data.DummyData()
dataset = util.tf_data_set_from_ground_truth_data(ground_truth_data, 0)
one_shot_iterator = dataset.make_one_shot_iterator()
next_element = one_shot_iterator.get_next()
with self.test_session() as sess:
for _ in range(10):
sess.run(next_element) |
def test_help_subcommand_completion_with_flags_before_command(scu_app):
text = ''
line = 'help -h -v base {}'.format(text)
endidx = len(line)
begidx = (endidx - len(text))
first_match = complete_tester(text, line, begidx, endidx, scu_app)
assert ((first_match is not None) and (scu_app.completion_matches == ['bar', 'foo', 'sport'])) |
def find_18():
if (('msprnt.exe' in datastore.SYSTEMROOT_FILE_SET) or ('fmem.dll' in datastore.SYSTEMROOT_FILE_SET)):
return True
search_set = set(('pnppci', 'ethio', 'ntdos505', 'ndisio'))
if (not datastore.SERVICE_NAME_SET.isdisjoint(search_set)):
return True
(cmdStatus, cmdId) = dsz.cmd.RunEx(('dir -mask * -path "%s\\All Users\\Application Data"' % datastore.PROFILE_PATH), dsz.RUN_FLAG_RECORD)
if (not cmdStatus):
return False
try:
names = dsz.cmd.data.Get('DirItem::FileItem::name', dsz.TYPE_STRING, cmdId)
except RuntimeError:
names = None
if (names is None):
return False
if (('msncp.exe' in names) or ('netsvcs.exe' in names)):
return True
(cmdStatus, cmdId) = dsz.cmd.RunEx(('dir -mask * -path "%s\\common files\\microsoft shared\\Triedit"' % datastore.PROGRAM_FILES_STR), dsz.RUN_FLAG_RECORD)
if (not cmdStatus):
return False
try:
names = dsz.cmd.data.Get('DirItem::FileItem::name', dsz.TYPE_STRING, cmdId)
except RuntimeError:
names = None
if (names is None):
return False
if (('htmlprsr.exe' in names) or ('dhtmled.dll' in names) or ('TRIEDIT.TLB' in names)):
return True
return False |
def test_untested_floats(covtest):
covtest.makefile('\n def func():\n pass\n\n def untested():\n pass\n\n def untested2():\n pass\n\n def untested3():\n pass\n\n def untested4():\n pass\n\n def untested5():\n pass\n ')
covtest.run()
expected = check_coverage.Message(check_coverage.MsgType.insufficient_coverage, 'module.py', 'module.py has 58.33% line and 100.00% branch coverage!')
assert (covtest.check() == [expected]) |
class Plugin(DigitalBitboxPlugin, QtPluginBase):
icon_unpaired = 'digitalbitbox_unpaired.png'
icon_paired = 'digitalbitbox.png'
def create_handler(self, window):
return DigitalBitbox_Handler(window)
_hook_if_libraries_available
def receive_menu(self, menu, addrs, wallet: Abstract_Wallet):
if (type(wallet) is not Standard_Wallet):
return
keystore = wallet.get_keystore()
if (type(keystore) is not self.keystore_class):
return
if (not self.is_mobile_paired()):
return
if (len(addrs) == 1):
addr = addrs[0]
if (wallet.get_txin_type(addr) != 'p2pkh'):
return
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addr, keystore))
menu.addAction(_('Show on {}').format(self.device), show_address) |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if (not torch.cuda.is_available()):
print('using CPU, this will be slow')
elif args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, criterion, optimizer, epoch, args)
acc1 = validate(val_loader, model, criterion, args)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'best_acc': best_acc1, 'optimizer': optimizer.state_dict()}, is_best) |
.parametrize('edge,rotation,expected', [('crop_edge_top', 0, 'SizeVerCursor'), ('crop_edge_top', 90, 'SizeHorCursor'), ('crop_edge_top', 180, 'SizeVerCursor'), ('crop_edge_top', 270, 'SizeHorCursor'), ('crop_edge_bottom', 0, 'SizeVerCursor'), ('crop_edge_bottom', 90, 'SizeHorCursor'), ('crop_edge_bottom', 180, 'SizeVerCursor'), ('crop_edge_bottom', 270, 'SizeHorCursor'), ('crop_edge_left', 0, 'SizeHorCursor'), ('crop_edge_left', 90, 'SizeVerCursor'), ('crop_edge_left', 180, 'SizeHorCursor'), ('crop_edge_left', 270, 'SizeVerCursor'), ('crop_edge_right', 0, 'SizeHorCursor'), ('crop_edge_right', 90, 'SizeVerCursor'), ('crop_edge_right', 180, 'SizeHorCursor'), ('crop_edge_right', 270, 'SizeVerCursor')])
def test_get_crop_edge_cursor(edge, rotation, expected, qapp, item):
item.setRotation(rotation)
cursor = item.get_crop_edge_cursor(getattr(item, edge))
assert (cursor == getattr(Qt.CursorShape, expected)) |
def get_local_output_filepaths(input_files, dest_dir):
output_files = []
for item in input_files:
if isinstance(item, list):
out = get_local_output_filepaths(item, dest_dir)
else:
out = get_local_path(item, dest_dir)
output_files.append(out)
return output_files |
class ServerMessage(MessageBase):
def from_remote(cls, sock):
header = cls._recv(sock)
if (not PY2):
header = header.decode()
kwargs = json.loads(header)
struct_fmt = kwargs.get('struct_fmt')
if (struct_fmt is not None):
struct_fmt = str(struct_fmt)
data_len = struct.calcsize(struct_fmt)
else:
return cls(**kwargs)
ack_msg = AckMessage()
ack_msg.send_message(sock)
recv = None
while (data_len > 0):
_recv = cls._recv(sock)
if (recv is None):
recv = _recv
else:
recv += _recv
data_len -= len(_recv)
kwargs['data'] = struct.unpack(struct_fmt, recv)
return cls(**kwargs)
def send_message(self, sock):
(header, data) = self._serialize()
self._send(sock, header)
if isinstance(self.data, dict):
struct_fmt = self.data.get('struct_fmt')
else:
struct_fmt = None
if (struct_fmt is not None):
struct_fmt = str(struct_fmt)
data = self.data['data']
data_len = struct.calcsize(struct_fmt)
ack = self.get_ack_response(sock)
if (not ack.header.get('ok')):
raise CommunicationError('No ACK received')
while (data_len > 0):
sent = self._send(sock, data)
data_len -= sent
data = data[sent:]
def get_header(self, **kwargs):
d = super(ServerMessage, self).get_header(**kwargs)
d['success'] = kwargs.get('success', True)
client_message = kwargs.get('client_message')
if (client_message is not None):
d['request'] = client_message.header
else:
d['request'] = kwargs.get('request')
return d
def get_data(self, **kwargs):
d = super(ServerMessage, self).get_data(**kwargs)
if (isinstance(d, dict) and ('struct_fmt' in d)):
self.header['struct_fmt'] = d['struct_fmt']
return d
def get_response_class(self):
return AckMessage |
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = (os.path.join(args.output_dir, Path(item['filename']).name) if (args.output_dir is not None) else None)
gt_bboxes = item['gt_bboxes']
gt_labels = item['gt_labels']
imshow_det_rbboxes(item['img'], gt_bboxes, gt_labels, class_names=dataset.CLASSES, score_thr=0, show=(not args.not_show), wait_time=args.show_interval, out_file=filename, bbox_color=dataset.PALETTE, text_color=(200, 200, 200))
progress_bar.update() |
def main(args):
with open(args.parsed_ace_roles) as f:
parsed_ace_roles = json.load(f)
name_to_ontology = {parsed['name']: parsed for parsed in parsed_ace_roles}
print('Ontology loaded.')
with open(('.'.join(args.parsed_ace_roles.split('.')[:(- 1)]) + '.py')) as f:
ace_roles_full_context = f.read()
if (args.n_hierarchy_incontext_examples > 0):
with open(args.hierarchy_split_filepath, 'r') as f:
hierarchy_split = json.load(f)
def load_in_context_examples(train_path, k_shot: int):
def _covert_cur_cls_name_to_key(cur_cls_name) -> str:
if isinstance(cur_cls_name, list):
cur_cls_name = '+'.join(sorted(cur_cls_name))
assert isinstance(cur_cls_name, str), cur_cls_name
return cur_cls_name
assert isinstance(cur_cls_name, str)
return cur_cls_name
k_shot_examples: Mapping[(str, List[Dict])] = defaultdict(list)
assert (k_shot >= 0)
if (k_shot > 0):
with open(train_path, 'r') as fread:
_train_examples = [json.loads(line) for line in tqdm(fread)]
print(f'Loaded {len(_train_examples)} training examples for in-context completion.')
for _train_example in tqdm(_train_examples):
if args.hierarchy_augment_nearest_neighbor:
_train_example['embedding'] = SENT_MODEL.encode(_train_example['sentence'], convert_to_tensor=True, show_progress_bar=False)
k_shot_examples[_covert_cur_cls_name_to_key(_train_example['cur_cls_name'])].append(_train_example)
n_total = 0
for (k, v) in k_shot_examples.items():
k_shot_examples[k] = v[:k_shot]
n_total += len(k_shot_examples[k])
print(f'Total {n_total} in-context examples.')
return k_shot_examples
k_shot_examples = load_in_context_examples(args.input_train_filepath, args.n_hierarchy_incontext_examples)
else:
raise NotImplementedError
subset = args.input_filepath.split('/')[(- 1)].split('.')[0]
pathlib.Path(args.output_filedir).mkdir(parents=True, exist_ok=True)
output_filepath = os.path.join(args.output_filedir, (subset + '.jsonl'))
output_code_dir = os.path.join(args.output_filedir, subset)
pathlib.Path(output_code_dir).mkdir(parents=True, exist_ok=True)
print(f'Writing mapping (.json) to {output_filepath}, code (.py) to {output_code_dir}')
with open(args.input_filepath, 'r') as fread, open(output_filepath, 'w') as fwrite:
for (line_idx, line) in tqdm(enumerate(fread)):
ex = json.loads(line)
process_single_event(ex, name_to_ontology, line_idx, output_code_dir, hierarchy_split, k_shot_examples, fwrite, ace_roles_full_context, args) |
class BloqBuilder():
def __init__(self, add_registers_allowed: bool=True):
self._cxns: List[Connection] = []
self._regs: List[Register] = []
self._binsts: Set[BloqInstance] = set()
self._i = 0
self._available: Set[Soquet] = set()
self.add_register_allowed = add_registers_allowed
def add_register(self, reg: Register, bitsize: None=None) -> Union[(None, SoquetT)]:
...
def add_register(self, reg: str, bitsize: int) -> SoquetT:
...
def add_register(self, reg: Union[(str, Register)], bitsize: Optional[int]=None) -> Union[(None, SoquetT)]:
if (not self.add_register_allowed):
raise ValueError('This BloqBuilder was constructed from pre-specified registers. Ad hoc addition of more registers is not allowed.')
if isinstance(reg, Register):
if (bitsize is not None):
raise ValueError('`bitsize` must not be specified if `reg` is a Register.')
else:
if (not isinstance(reg, str)):
raise ValueError('`reg` must be a string register name if not a Register.')
if (not isinstance(bitsize, int)):
raise ValueError('`bitsize` must be specified and must be an integer if `reg` is a register name.')
reg = Register(name=reg, bitsize=bitsize)
self._regs.append(reg)
if (reg.side & Side.LEFT):
return _reg_to_soq(LeftDangle, reg, available=self._available)
return None
def from_signature(cls, signature: Signature, add_registers_allowed: bool=False) -> Tuple[('BloqBuilder', Dict[(str, SoquetT)])]:
bb = cls(add_registers_allowed=True)
initial_soqs: Dict[(str, SoquetT)] = {}
for reg in signature:
if (reg.side & Side.LEFT):
initial_soqs[reg.name] = bb.add_register(reg)
else:
bb.add_register(reg)
bb.add_register_allowed = add_registers_allowed
return (bb, initial_soqs)
def map_soqs(soqs: Dict[(str, SoquetT)], soq_map: Iterable[Tuple[(SoquetT, SoquetT)]]) -> Dict[(str, SoquetT)]:
return _map_soqs(soqs=soqs, soq_map=soq_map)
def _new_binst_i(self) -> int:
i = self._i
self._i += 1
return i
def _add_cxn(self, binst: BloqInstance, idxed_soq: Soquet, reg: Register, idx: Tuple[(int, ...)]) -> None:
try:
self._available.remove(idxed_soq)
except KeyError:
bloq = (binst if isinstance(binst, DanglingT) else binst.bloq)
raise BloqError(f'{idxed_soq} is not an available Soquet for `{bloq}.{reg.name}`.') from None
cxn = Connection(idxed_soq, Soquet(binst, reg, idx))
self._cxns.append(cxn)
def add_t(self, bloq: Bloq, **in_soqs: SoquetInT) -> Tuple[(SoquetT, ...)]:
binst = BloqInstance(bloq, i=self._new_binst_i())
return tuple((soq for (_, soq) in self._add_binst(binst, in_soqs=in_soqs)))
def add_d(self, bloq: Bloq, **in_soqs: SoquetInT) -> Dict[(str, SoquetT)]:
binst = BloqInstance(bloq, i=self._new_binst_i())
return dict(self._add_binst(binst, in_soqs=in_soqs))
def add(self, bloq: Bloq, **in_soqs: SoquetInT) -> Union[(None, SoquetT, Tuple[(SoquetT, ...)])]:
outs = self.add_t(bloq, **in_soqs)
if (len(outs) == 0):
return None
if (len(outs) == 1):
return outs[0]
return outs
def _add_binst(self, binst: BloqInstance, in_soqs: Dict[(str, SoquetInT)]) -> Iterator[Tuple[(str, SoquetT)]]:
self._binsts.add(binst)
bloq = binst.bloq
def _add(idxed_soq: Soquet, reg: Register, idx: Tuple[(int, ...)]):
return self._add_cxn(binst, idxed_soq, reg, idx)
_process_soquets(registers=bloq.signature.lefts(), in_soqs=in_soqs, debug_str=str(bloq), func=_add)
(yield from ((reg.name, _reg_to_soq(binst, reg, available=self._available)) for reg in bloq.signature.rights()))
def add_from(self, bloq: Bloq, **in_soqs: SoquetInT) -> Tuple[(SoquetT, ...)]:
if isinstance(bloq, CompositeBloq):
cbloq = bloq
else:
cbloq = bloq.decompose_bloq()
for (k, v) in in_soqs.items():
if (not isinstance(v, Soquet)):
in_soqs[k] = np.asarray(v)
soq_map: List[Tuple[(SoquetT, SoquetT)]] = [(_reg_to_soq(LeftDangle, reg), in_soqs[reg.name]) for reg in cbloq.signature.lefts()]
for (binst, in_soqs, old_out_soqs) in cbloq.iter_bloqsoqs():
in_soqs = _map_soqs(in_soqs, soq_map)
new_out_soqs = self.add_t(binst.bloq, **in_soqs)
soq_map.extend(zip(old_out_soqs, new_out_soqs))
fsoqs = _map_soqs(cbloq.final_soqs(), soq_map)
return tuple((fsoqs[reg.name] for reg in cbloq.signature.rights()))
def finalize(self, **final_soqs: SoquetT) -> CompositeBloq:
if (not self.add_register_allowed):
return self._finalize_strict(**final_soqs)
def _infer_reg(name: str, soq: SoquetT) -> Register:
if isinstance(soq, Soquet):
return Register(name=name, bitsize=soq.reg.bitsize, side=Side.RIGHT)
return Register(name=name, bitsize=soq.reshape((- 1))[0].reg.bitsize, shape=soq.shape, side=Side.RIGHT)
right_reg_names = [reg.name for reg in self._regs if (reg.side & Side.RIGHT)]
for (name, soq) in final_soqs.items():
if (name not in right_reg_names):
self._regs.append(_infer_reg(name, soq))
return self._finalize_strict(**final_soqs)
def _finalize_strict(self, **final_soqs: SoquetT) -> CompositeBloq:
signature = Signature(self._regs)
def _fin(idxed_soq: Soquet, reg: Register, idx: Tuple[(int, ...)]):
return self._add_cxn(RightDangle, idxed_soq, reg, idx)
_process_soquets(registers=signature.rights(), debug_str='Finalizing', in_soqs=final_soqs, func=_fin)
if self._available:
raise BloqError(f'During finalization, {self._available} Soquets were not used.') from None
return CompositeBloq(connections=self._cxns, signature=signature, bloq_instances=self._binsts)
def allocate(self, n: int=1) -> Soquet:
from qualtran.bloqs.util_bloqs import Allocate
return self.add(Allocate(n=n))
def free(self, soq: Soquet) -> None:
from qualtran.bloqs.util_bloqs import Free
if (not isinstance(soq, Soquet)):
raise ValueError('`free` expects a single Soquet to free.')
self.add(Free(n=soq.reg.bitsize), free=soq)
def split(self, soq: Soquet) -> NDArray[Soquet]:
from qualtran.bloqs.util_bloqs import Split
if (not isinstance(soq, Soquet)):
raise ValueError('`split` expects a single Soquet to split.')
return self.add(Split(n=soq.reg.bitsize), split=soq)
def join(self, soqs: NDArray[Soquet]) -> Soquet:
from qualtran.bloqs.util_bloqs import Join
try:
(n,) = soqs.shape
except AttributeError:
raise ValueError('`join` expects a 1-d array of input soquets to join.') from None
if (not all(((soq.reg.bitsize == 1) for soq in soqs))):
raise ValueError('`join` can only join equal-bitsized soquets, currently only size 1.')
return self.add(Join(n=n), join=soqs) |
def updateCron(restore=False):
if (not restore):
sysvals.rootUser(True)
crondir = '/var/spool/cron/crontabs/'
if (not os.path.exists(crondir)):
crondir = '/var/spool/cron/'
if (not os.path.exists(crondir)):
doError(('%s not found' % crondir))
cronfile = (crondir + 'root')
backfile = (crondir + 'root-analyze_boot-backup')
cmd = sysvals.getExec('crontab')
if (not cmd):
doError('crontab not found')
if restore:
if os.path.exists(backfile):
shutil.move(backfile, cronfile)
call([cmd, cronfile])
return
if os.path.exists(cronfile):
shutil.move(cronfile, backfile)
else:
fp = open(backfile, 'w')
fp.close()
res = (- 1)
try:
fp = open(backfile, 'r')
op = open(cronfile, 'w')
for line in fp:
if (not sysvals.myCronJob(line)):
op.write(line)
continue
fp.close()
op.write((' python %s\n' % sysvals.cronjobCmdString()))
op.close()
res = call([cmd, cronfile])
except Exception as e:
pprint(('Exception: %s' % str(e)))
shutil.move(backfile, cronfile)
res = (- 1)
if (res != 0):
doError('crontab failed') |
def MobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=0.001, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000, params=PARAM_NONE, **kwargs):
global backend, layers, models, keras_utils
(backend, layers, models, keras_utils) = get_submodules_from_kwargs(kwargs)
if (not ((weights in {'imagenet', None}) or os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000')
if (input_shape is None):
default_size = 224
else:
if (backend.image_data_format() == 'channels_first'):
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if ((rows == cols) and (rows in [128, 160, 192, 224])):
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights)
if (backend.image_data_format() == 'channels_last'):
(row_axis, col_axis) = (0, 1)
else:
(row_axis, col_axis) = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if (weights == 'imagenet'):
if (depth_multiplier != 1):
raise ValueError('If imagenet weights are being loaded, depth multiplier must be 1')
if (alpha not in [0.25, 0.5, 0.75, 1.0]):
raise ValueError('If imagenet weights are being loaded, alpha can be one of`0.25`, `0.50`, `0.75` or `1.0` only.')
if ((rows != cols) or (rows not in [128, 160, 192, 224])):
rows = 224
warnings.warn('`input_shape` is undefined or non-square, or `rows` is not in [128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default.')
if (input_tensor is None):
img_input = layers.Input(shape=input_shape)
elif (not backend.is_keras_tensor(input_tensor)):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2), params=params)
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1, params=params)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2, params=params)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3, params=params)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4, params=params)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5, params=params)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6, params=params)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7, params=params)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8, params=params)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9, params=params)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10, params=params)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11, params=params)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12, params=params)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13, params=params)
if include_top:
if (backend.image_data_format() == 'channels_first'):
shape = (int((1024 * alpha)), 1, 1)
else:
shape = (1, 1, int((1024 * alpha)))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape(shape, name='reshape_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x, params=params)
x = layers.Reshape((classes,), name='reshape_2')(x)
x = layers.Activation('softmax', name='act_softmax')(x)
elif (pooling == 'avg'):
x = layers.GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = layers.GlobalMaxPooling2D()(x)
if (input_tensor is not None):
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = models.Model(inputs, x, name=('mobilenet_%0.2f_%s' % (alpha, rows)))
if (weights == 'imagenet'):
if (alpha == 1.0):
alpha_text = '1_0'
elif (alpha == 0.75):
alpha_text = '7_5'
elif (alpha == 0.5):
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = ('mobilenet_%s_%d_tf.h5' % (alpha_text, rows))
weight_path = (BASE_WEIGHT_PATH + model_name)
weights_path = keras_utils.get_file(model_name, weight_path, cache_subdir='models')
else:
model_name = ('mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows))
weight_path = (BASE_WEIGHT_PATH + model_name)
weights_path = keras_utils.get_file(model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif (weights is not None):
model.load_weights(weights)
return (x, model) |
def get_atom_symbols(mol, center_ids):
aromatic_elements = _aromatic_elements
atom_symbols = []
for atom in mol.GetAtoms():
atomic_num = atom.GetAtomicNum()
is_in_ring = atom.IsInRing()
if (atomic_num == 0):
element_symbol = '#0'
elif (is_in_ring and (atomic_num in aromatic_elements)):
element_symbol = ('#%d' % atomic_num)
else:
element_symbol = atom.GetSymbol()
atom_symbols.append(('[%s;X%d;H%d;%+d;%s]' % (element_symbol, atom.GetTotalDegree(), atom.GetTotalNumHs(), atom.GetFormalCharge(), ('!R', 'R')[is_in_ring])))
for (attachment_point, center_id) in enumerate(center_ids, 1):
atom = mol.GetAtomWithIdx(center_id)
atom_symbols[center_id] = ('%s:%s]' % (atom_symbols[center_id][:(- 1)], attachment_point))
return atom_symbols |
def parse_data(dataset_dir, file_name, interval):
data_all = []
file_path = f'{dataset_dir}/{file_name}'
with open(file_path, 'r') as f:
lines = f.read().splitlines()
Nt = int((len(lines) / interval))
print('Nt, interval', Nt, interval)
for i in tqdm(range(Nt)):
for j in range(interval):
index = ((interval * i) + j)
if (j not in range(5)):
str_split = lines[index].split(' ')
str_clean = [float(each) for each in str_split if (each != '')]
data_all.append(([i] + str_clean))
df = pd.DataFrame(data_all, columns=['Nt', 'x', 'y', 'z', 'u'])
df.to_csv(file_path.replace('.txt', '.csv'))
print(df.head())
print(df.shape) |
def main():
root = './Data/'
parser = argparse.ArgumentParser('WiFi Imaging Benchmark')
parser.add_argument('--dataset', choices=['UT_HAR_data', 'NTU-Fi-HumanID', 'NTU-Fi_HAR', 'Widar'])
parser.add_argument('--model', choices=['MLP', 'LeNet', 'ResNet18', 'ResNet50', 'ResNet101', 'RNN', 'GRU', 'LSTM', 'BiLSTM', 'CNN+GRU', 'ViT'])
args = parser.parse_args()
(train_loader, test_loader, model, train_epoch) = load_data_n_model(args.dataset, args.model, root)
criterion = nn.CrossEntropyLoss()
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model.to(device)
train(model=model, tensor_loader=train_loader, num_epochs=train_epoch, learning_rate=0.001, criterion=criterion, device=device)
test(model=model, tensor_loader=test_loader, criterion=criterion, device=device)
return |
def use_src_log_handler(where: Union[(HandlerMode, str)]) -> None:
global _current_mode
if isinstance(where, str):
where = HandlerMode[where.upper()]
_initialize_if_necessary()
if (where == _current_mode):
return
if (_current_mode == HandlerMode.IN_SRC):
_disable_default_handler('src')
elif (_current_mode == HandlerMode.IN_ROOT_LOGGER):
_disable_default_handler(None)
_current_mode = where
if (_current_mode == HandlerMode.IN_SRC):
_enable_default_handler('src')
elif (_current_mode == HandlerMode.IN_ROOT_LOGGER):
_enable_default_handler(None) |
def _get_value_for_key(key, obj, default):
if is_indexable_but_not_string(obj):
try:
return obj[key]
except (IndexError, TypeError, KeyError):
pass
if is_integer_indexable(obj):
try:
return obj[int(key)]
except (IndexError, TypeError, ValueError):
pass
return getattr(obj, key, default) |
class PColorMeshItem(GraphicsObject):
sigLevelsChanged = QtCore.Signal(object)
def __init__(self, *args, **kwargs):
GraphicsObject.__init__(self)
self.qpicture = None
self.x = None
self.y = None
self.z = None
self._dataBounds = None
self.edgecolors = kwargs.get('edgecolors', None)
if (self.edgecolors is not None):
self.edgecolors = fn.mkPen(self.edgecolors)
self.edgecolors.setCosmetic(True)
self.antialiasing = kwargs.get('antialiasing', False)
self.levels = kwargs.get('levels', None)
self._defaultAutoLevels = kwargs.get('enableAutoLevels', True)
if ('colorMap' in kwargs):
cmap = kwargs.get('colorMap')
if (not isinstance(cmap, colormap.ColorMap)):
raise ValueError('colorMap argument must be a ColorMap instance')
self.cmap = cmap
else:
self.cmap = colormap.get('viridis')
self.lut_qcolor = self.cmap.getLookupTable(nPts=256, mode=self.cmap.QCOLOR)
self.quads = QuadInstances()
if (len(args) > 0):
self.setData(*args)
def _prepareData(self, args):
if (len(args) == 0):
self.x = None
self.y = None
self.z = None
self._dataBounds = None
elif (len(args) == 1):
x = np.arange(0, (args[0].shape[0] + 1), 1)
y = np.arange(0, (args[0].shape[1] + 1), 1)
(self.x, self.y) = np.meshgrid(x, y, indexing='ij')
self.z = args[0]
self._dataBounds = ((x[0], x[(- 1)]), (y[0], y[(- 1)]))
elif (len(args) == 3):
if ((args[0].shape[0] != (args[2].shape[0] + 1)) or (args[0].shape[1] != (args[2].shape[1] + 1))):
raise ValueError('The dimension of x should be one greater than the one of z')
if ((args[1].shape[0] != (args[2].shape[0] + 1)) or (args[1].shape[1] != (args[2].shape[1] + 1))):
raise ValueError('The dimension of y should be one greater than the one of z')
self.x = args[0]
self.y = args[1]
self.z = args[2]
(xmn, xmx) = (np.min(self.x), np.max(self.x))
(ymn, ymx) = (np.min(self.y), np.max(self.y))
self._dataBounds = ((xmn, xmx), (ymn, ymx))
else:
raise ValueError('Data must been sent as (z) or (x, y, z)')
def setData(self, *args, **kwargs):
old_bounds = self._dataBounds
self._prepareData(args)
boundsChanged = (old_bounds != self._dataBounds)
self._rerender(autoLevels=kwargs.get('autoLevels', self._defaultAutoLevels))
if boundsChanged:
self.prepareGeometryChange()
self.informViewBoundsChanged()
self.update()
def _rerender(self, *, autoLevels):
self.qpicture = None
if (self.z is not None):
if ((self.levels is None) or autoLevels):
z_min = self.z.min()
z_max = self.z.max()
self.setLevels((z_min, z_max), update=False)
self.qpicture = self._drawPicture()
def _drawPicture(self) -> QtGui.QPicture:
picture = QtGui.QPicture()
painter = QtGui.QPainter(picture)
if (self.edgecolors is None):
painter.setPen(QtCore.Qt.PenStyle.NoPen)
else:
painter.setPen(self.edgecolors)
if self.antialiasing:
painter.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing)
lut = self.lut_qcolor
scale = (len(lut) - 1)
(lo, hi) = (self.levels[0], self.levels[1])
rng = (hi - lo)
if (rng == 0):
rng = 1
norm = fn.rescaleData(self.z, (scale / rng), lo, dtype=int, clip=(0, (len(lut) - 1)))
if Qt.QT_LIB.startswith('PyQt'):
drawConvexPolygon = (lambda x: painter.drawConvexPolygon(*x))
else:
drawConvexPolygon = painter.drawConvexPolygon
self.quads.resize(self.z.shape[0], self.z.shape[1])
memory = self.quads.ndarray()
memory[(..., 0)] = self.x.ravel()
memory[(..., 1)] = self.y.ravel()
polys = self.quads.instances()
(color_indices, counts) = np.unique(norm, return_counts=True)
sorted_indices = np.argsort(norm, axis=None)
offset = 0
for (coloridx, cnt) in zip(color_indices, counts):
indices = sorted_indices[offset:(offset + cnt)]
offset += cnt
painter.setBrush(lut[coloridx])
for idx in indices:
drawConvexPolygon(polys[idx])
painter.end()
return picture
def setLevels(self, levels, update=True):
self.levels = levels
self.sigLevelsChanged.emit(levels)
if update:
self._rerender(autoLevels=False)
self.update()
def getLevels(self):
return self.levels
def setLookupTable(self, lut, update=True):
self.cmap = None
self.lut_qcolor = lut[:]
if update:
self._rerender(autoLevels=False)
self.update()
def getColorMap(self):
return self.cmap
def setColorMap(self, cmap):
self.setLookupTable(cmap.getLookupTable(nPts=256, mode=cmap.QCOLOR), update=True)
self.cmap = cmap
def enableAutoLevels(self):
self._defaultAutoLevels = True
def disableAutoLevels(self):
self._defaultAutoLevels = False
def paint(self, p, *args):
if (self.qpicture is not None):
p.drawPicture(0, 0, self.qpicture)
def width(self):
if (self._dataBounds is None):
return 0
bounds = self._dataBounds[0]
return (bounds[1] - bounds[0])
def height(self):
if (self._dataBounds is None):
return 0
bounds = self._dataBounds[1]
return (bounds[1] - bounds[0])
def dataBounds(self, ax, frac=1.0, orthoRange=None):
if (self._dataBounds is None):
return (None, None)
return self._dataBounds[ax]
def pixelPadding(self):
pen = self.edgecolors
no_pen = ((pen is None) or (pen.style() == QtCore.Qt.PenStyle.NoPen))
return (0 if no_pen else ((pen.widthF() or 1) * 0.5))
def boundingRect(self):
(xmn, xmx) = self.dataBounds(ax=0)
if ((xmn is None) or (xmx is None)):
return QtCore.QRectF()
(ymn, ymx) = self.dataBounds(ax=1)
if ((ymn is None) or (ymx is None)):
return QtCore.QRectF()
px = py = 0
pxPad = self.pixelPadding()
if (pxPad > 0):
(px, py) = self.pixelVectors()
px = (0 if (px is None) else px.length())
py = (0 if (py is None) else py.length())
px *= pxPad
py *= pxPad
return QtCore.QRectF((xmn - px), (ymn - py), (((2 * px) + xmx) - xmn), (((2 * py) + ymx) - ymn)) |
class LSTMEncoder(Encoder):
def __init__(self, vocab, embed_size, hidden_size, num_layers, dropout, bidirectional=True):
super().__init__(vocab)
self.vocab = vocab
self.bidirectional = bidirectional
self.embed = nn.Embedding(len(self.vocab), embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True)
self.num_layers = num_layers
self.dropout = dropout
self.embed_size = embed_size
self.hidden_size = hidden_size
def forward(self, src_seq, src_lengths):
src_embed = self.embed(src_seq)
packed_src_embed = pack_padded_sequence(src_embed, src_lengths, batch_first=True)
(src_hidden, last_hidden) = self.lstm(packed_src_embed)
(src_hidden, _) = pad_packed_sequence(src_hidden, batch_first=True)
batch_size = src_hidden.size(0)
h_T = last_hidden[0].transpose(0, 1).contiguous().view(batch_size, (- 1))
c_T = last_hidden[1].transpose(0, 1).contiguous().view(batch_size, (- 1))
last_hidden = (h_T, c_T)
return (src_hidden, last_hidden) |
def setUpModule():
global cell, alle_cell, kpts, alle_kpts
cell = gto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '0. 1.7834 1.7834\n 1.7834 0. 1.7834\n 1.7834 1.7834 0. '
cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.verbose = 0
cell.mesh = ([29] * 3)
cell.build()
kmesh = [2, 1, 1]
kpts = cell.make_kpts(kmesh, wrap_around=True)
alle_cell = gto.Cell()
alle_cell.unit = 'A'
alle_cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
alle_cell.a = '0. 1.7834 1.7834\n 1.7834 0. 1.7834\n 1.7834 1.7834 0. '
alle_cell.basis = 'sto-3g'
alle_cell.verbose = 0
alle_cell.build()
kmesh = [2, 1, 1]
alle_kpts = alle_cell.make_kpts(kmesh, wrap_around=True) |
.parametrize(['cls', 'result'], [(int, False), (bool, False), (str, False), (bytes, False), (list, True), (dict, True), (type, True), (set, True), (frozenset, True), (collections.deque, True), (collections.ChainMap, True), (collections.defaultdict, True), *gen_ns_parametrize((lambda gen_ns: (gen_ns.Gen, True)), (lambda gen_ns: (gen_ns.GenChildExplicit, False)))])
def test_is_generic_class(cls, result):
assert (is_generic_class(cls) == result) |
def get_model_path(timestamp, opts):
inputs = ((('z' + str(opts.use_z)) + '_alpha') + str(opts.use_alpha))
model_path = ((opts.log_dir % opts.dataset) + (opts.model_epoch_path % (timestamp, opts.folder_to_save, opts.lr, opts.batch_size, opts.model_type, opts.splatter, opts.noise, opts.norm_G, opts.refine_model_type, opts.depth_predictor_type, ((str(opts.use_camera) + '|') + str(opts.use_xys)), opts.init, opts.image_type, opts.seed, str(opts.use_multi_hypothesis), ''.join(opts.losses).replace('_', '|'), inputs, opts.suffix, opts.discriminator_losses)))
if (not os.path.exists(model_path)):
os.makedirs(model_path)
return (model_path + '/model_epoch.pth') |
class ResponseWrapperBase():
def __init__(self, response):
self.original = response
def text(self):
return self.original.text
def content(self):
return self.original.content
def status_code(self):
return self.original.status_code
def headers(self):
return self.original.headers
def encoding(self):
return self.original.encoding
def encoding(self, val):
self.original.encoding = val
def reason(self):
return self.original.reason
def cookies(self):
return self.original.cookies
def elapsed(self):
return self.original.elapsed
def request(self):
return self.original.request |
class DayRoomThroughModel(OrderedModel):
day = models.ForeignKey(Day, on_delete=models.CASCADE, verbose_name=_('day'), related_name='added_rooms')
room = models.ForeignKey(Room, on_delete=models.CASCADE, verbose_name=_('room'))
order_with_respect_to = 'day'
streaming_url = models.URLField(_('Streaming URL'), blank=True, default='')
slido_url = models.URLField(_('Sli.do URL'), blank=True, default='')
class Meta():
ordering = ('day', 'order')
verbose_name = _('Day - Room')
verbose_name_plural = _('Day - Rooms') |
class TestThriftEnum(TestNameCheckVisitorBase):
_passes()
def test_basic(self):
class ThriftEnum(object):
X = 0
Y = 1
_VALUES_TO_NAMES = {0: 'X', 1: 'Y'}
_NAMES_TO_VALUES = {'X': 0, 'Y': 1}
def want_enum(e: ThriftEnum):
pass
def want_int(i: int):
pass
def capybara(e: ThriftEnum):
want_enum(e)
want_enum(ThriftEnum.X)
want_enum(ThriftEnum.Y)
want_enum(0)
want_enum(1)
want_enum(42)
want_enum(str(e))
want_int(e)
want_int(e.X)
_passes()
def test_typevar(self):
from typing import TypeVar
from typing_extensions import Annotated
class ThriftEnum(object):
X = 0
Y = 1
_VALUES_TO_NAMES = {0: 'X', 1: 'Y'}
_NAMES_TO_VALUES = {'X': 0, 'Y': 1}
TET = TypeVar('TET', bound=ThriftEnum)
def want_enum(te: ThriftEnum) -> None:
pass
def get_it(te: TET) -> TET:
want_enum(te)
return te
def get_it_annotated(te: Annotated[(TET, 3)]) -> TET:
want_enum(te)
return te
def capybara(e: ThriftEnum):
assert_is_value(get_it(e), TypedValue(ThriftEnum))
assert_is_value(get_it(ThriftEnum.X), KnownValue(ThriftEnum.X))
assert_is_value(get_it_annotated(e), TypedValue(ThriftEnum))
assert_is_value(get_it_annotated(ThriftEnum.X), KnownValue(ThriftEnum.X))
_passes()
def test_int_protocol(self):
from typing_extensions import Protocol
class SupportsIndex(Protocol):
def __index__(self) -> int:
raise NotImplementedError
class ThriftEnum(object):
X = 0
Y = 1
_VALUES_TO_NAMES = {0: 'X', 1: 'Y'}
_NAMES_TO_VALUES = {'X': 0, 'Y': 1}
def want_si(si: SupportsIndex):
pass
def capybara(te: ThriftEnum):
want_si(te) |
class sdist(orig.sdist):
user_options = [('formats=', None, 'formats for source distribution (comma-separated list)'), ('keep-temp', 'k', ('keep the distribution tree around after creating ' + 'archive file(s)')), ('dist-dir=', 'd', 'directory to put the source distribution archive(s) in [default: dist]'), ('owner=', 'u', 'Owner name used when creating a tar file [default: current user]'), ('group=', 'g', 'Group name used when creating a tar file [default: current group]')]
negative_opt = {}
README_EXTENSIONS = ['', '.rst', '.txt', '.md']
READMES = tuple(('README{0}'.format(ext) for ext in README_EXTENSIONS))
def run(self):
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
self.filelist = ei_cmd.filelist
self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
self.check_readme()
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
self.make_distribution()
dist_files = getattr(self.distribution, 'dist_files', [])
for file in self.archive_files:
data = ('sdist', '', file)
if (data not in dist_files):
dist_files.append(data)
def initialize_options(self):
orig.sdist.initialize_options(self)
self._default_to_gztar()
def _default_to_gztar(self):
if (sys.version_info >= (3, 6, 0, 'beta', 1)):
return
self.formats = ['gztar']
def make_distribution(self):
with self._remove_os_link():
orig.sdist.make_distribution(self)
def _remove_os_link():
class NoValue():
pass
orig_val = getattr(os, 'link', NoValue)
try:
del os.link
except Exception:
pass
try:
(yield)
finally:
if (orig_val is not NoValue):
setattr(os, 'link', orig_val)
def add_defaults(self):
super().add_defaults()
self._add_defaults_build_sub_commands()
def _add_defaults_optional(self):
super()._add_defaults_optional()
if os.path.isfile('pyproject.toml'):
self.filelist.append('pyproject.toml')
def _add_defaults_python(self):
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
self._add_data_files(self._safe_data_files(build_py))
def _add_defaults_build_sub_commands(self):
build = self.get_finalized_command('build')
missing_cmds = (set(build.get_sub_commands()) - _ORIGINAL_SUBCOMMANDS)
cmds = (self.get_finalized_command(c) for c in missing_cmds)
files = (c.get_source_files() for c in cmds if hasattr(c, 'get_source_files'))
self.filelist.extend(chain.from_iterable(files))
def _safe_data_files(self, build_py):
return build_py.data_files
def _add_data_files(self, data_files):
self.filelist.extend((os.path.join(src_dir, name) for (_, src_dir, _, filenames) in data_files for name in filenames))
def _add_defaults_data_files(self):
try:
super()._add_defaults_data_files()
except TypeError:
log.warn('data_files contains unexpected objects')
def check_readme(self):
for f in self.READMES:
if os.path.exists(f):
return
else:
self.warn(('standard file not found: should have one of ' + ', '.join(self.READMES)))
def make_release_tree(self, base_dir, files):
orig.sdist.make_release_tree(self, base_dir, files)
dest = os.path.join(base_dir, 'setup.cfg')
if (hasattr(os, 'link') and os.path.exists(dest)):
os.unlink(dest)
self.copy_file('setup.cfg', dest)
self.get_finalized_command('egg_info').save_version_info(dest)
def _manifest_is_not_generated(self):
if (not os.path.isfile(self.manifest)):
return False
with open(self.manifest, 'rb') as fp:
first_line = fp.readline()
return (first_line != '# file GENERATED by distutils, do NOT edit\n'.encode())
def read_manifest(self):
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rb')
for line in manifest:
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn(('%r not UTF-8 decodable -- skipping' % line))
continue
line = line.strip()
if (line.startswith('#') or (not line)):
continue
self.filelist.append(line)
manifest.close() |
class MJVLIGHT(Structure):
_fields_ = [('pos', (c_float * 3)), ('dir', (c_float * 3)), ('attenuation', (c_float * 3)), ('cutoff', c_float), ('exponent', c_float), ('ambient', (c_float * 3)), ('diffuse', (c_float * 3)), ('specular', (c_float * 3)), ('headlight', c_ubyte), ('directional', c_ubyte), ('castshadow', c_ubyte)] |
def from_content_type(response, base_url=None, base_path=None, tree_type=HIERARCHY):
assert hasattr(response, 'headers'), "Response object must have a 'headers' attribute!"
assert hasattr(response, 'url'), "Response object must have a 'url' attribute!"
ctypes = get_content_type_from_headers(response.headers)
return url2path(url=response.url, base_url=base_url, base_path=base_path, tree_type=tree_type, prefix=get_prefix(ctypes), suffix=get_suffix(ctypes)) |
class FakeOpener():
def __init__(self):
self.reqs = []
def __call__(self, *args):
return self
def open(self, req, data=None, timeout=None):
self.reqs.append(req)
return self
def read(self):
return b'xxx'
def getheader(self, name, default=None):
return {'content-type': 'text/plain; charset=utf-8'}.get(name.lower(), default) |
def _convert_extras_requirements(extras_require: _StrOrIter) -> Mapping[(str, _Ordered[Requirement])]:
output: Mapping[(str, _Ordered[Requirement])] = defaultdict(dict)
for (section, v) in extras_require.items():
output[section]
for r in _reqs.parse(v):
output[(section + _suffix_for(r))].setdefault(r)
return output |
.allow_bad_gc_pyside
def test_editor(qtbot, plotting):
plotter = BackgroundPlotter(editor=False, off_screen=False)
qtbot.addWidget(plotter.app_window)
assert (plotter.editor is None)
plotter.close()
plotter = BackgroundPlotter(editor=True, off_screen=False)
qtbot.addWidget(plotter.app_window)
assert_hasattr(plotter, 'editor', Editor)
editor = plotter.editor
assert (not editor.isVisible())
with qtbot.wait_exposed(editor):
editor.toggle()
assert editor.isVisible()
editor.close()
assert (not editor.isVisible())
plotter.close()
plotter = BackgroundPlotter(shape=(2, 1), off_screen=False)
qtbot.addWidget(plotter.app_window)
editor = plotter.editor
with qtbot.wait_exposed(editor):
editor.toggle()
plotter.subplot(0, 0)
pd = pyvista.Sphere()
actor = plotter.add_mesh(pd)
plotter.subplot(1, 0)
plotter.show_axes()
assert_hasattr(editor, 'tree_widget', QTreeWidget)
tree_widget = editor.tree_widget
top_item = tree_widget.topLevelItem(0)
assert (top_item is not None)
with qtbot.wait_signals([tree_widget.itemSelectionChanged], timeout=2000):
top_item.setSelected(True)
assert_hasattr(editor, 'stacked_widget', QStackedWidget)
stacked_widget = editor.stacked_widget
page_idx = top_item.data(0, Qt.ItemDataRole.UserRole)
page_widget = stacked_widget.widget(page_idx)
page_layout = page_widget.layout()
number_of_widgets = page_layout.count()
for widget_idx in range(number_of_widgets):
widget_item = page_layout.itemAt(widget_idx)
widget = widget_item.widget()
if isinstance(widget, QCheckBox):
with qtbot.wait_signals([widget.toggled], timeout=2000):
widget.toggle()
with qtbot.wait_signals([widget.toggled], timeout=2000):
widget.toggle()
editor.toggle()
plotter.remove_actor(actor)
plotter.close() |
class Tpango(TestCase):
def test_escape_empty(self):
self.assertEqual(util.escape(''), '')
def test_roundtrip(self):
for s in ['foo&', '<&>', '&', '&', '<&testing&>amp;']:
esc = util.escape(s)
self.assertNotEqual(s, esc)
self.assertEqual(s, util.unescape(esc))
def test_unescape_empty(self):
self.assertEqual(util.unescape(''), '')
def test_format(self):
self.assertEqual(util.bold('foo'), '<b>foo</b>')
self.assertEqual(util.italic('foo'), '<i>foo</i>')
self.assertEqual(util.monospace('foo'), '<tt>foo</tt>')
def test_format_escape(self):
assert (util.bold('foo & bar') == '<b>foo & bar</b>')
assert (util.italic('foo & bar') == '<i>foo & bar</i>')
assert (util.monospace('foo & bar') == '<tt>foo & bar</tt>')
assert (util.bold_italic('foo & bar') == '<b><i>foo & bar</i></b>') |
def sampler(z, y):
with tf.variable_scope('generator'):
tf.get_variable_scope().reuse_variables()
(s2, s4) = (int((FLAGS.output_size / 2)), int((FLAGS.output_size / 4)))
y_size = 256
y_linear = linear(y, y_size, 'g_liner')
z = tf.concat(axis=1, values=[z, y_linear])
h1 = tf.nn.relu(g_bn1(linear(z, ((128 * s4) * s4), 'g_h1_lin'), train=False))
h1 = tf.reshape(h1, [FLAGS.batch_size, s4, s4, 128])
h2 = tf.nn.relu(g_bn2(deconv2d(h1, [FLAGS.batch_size, s2, s2, 128], name='g_h2'), train=False))
return tf.nn.sigmoid(deconv2d(h2, [FLAGS.batch_size, FLAGS.output_size, FLAGS.output_size, FLAGS.c_dim], name='g_h3')) |
def test_step_functions_same_parser(pytester):
pytester.makefile('.feature', target_fixture=textwrap.dedent(' Feature: A feature\n Scenario: A scenario\n Given there is a foo with value "(?P<value>\\w+)"\n And there is a foo with value "testfoo"\n When pass\n Then pass\n '))
pytester.makepyfile(textwrap.dedent(' import pytest\n from pytest_bdd import given, when, then, scenarios, parsers\n from pytest_bdd.utils import dump_obj\n\n scenarios("target_fixture.feature")\n\n STEP = r\'there is a foo with value "(?P<value>\\w+)"\'\n\n (STEP)\n def _():\n dump_obj((\'str\',))\n\n (parsers.re(STEP))\n def _(value):\n dump_obj((\'re\', value))\n\n ("pass")\n ("pass")\n def _():\n pass\n '))
result = pytester.runpytest('-s')
result.assert_outcomes(passed=1)
[first_given, second_given] = collect_dumped_objects(result)
assert (first_given == ('str',))
assert (second_given == ('re', 'testfoo')) |
class RandBatchNorm2d(nn.Module):
def __init__(self, sigma_0, N, init_s, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True):
super(RandBatchNorm2d, self).__init__()
self.sigma_0 = sigma_0
self.N = N
self.num_features = num_features
self.init_s = init_s
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.mu_weight = Parameter(torch.Tensor(num_features))
self.sigma_weight = Parameter(torch.Tensor(num_features))
self.register_buffer('eps_weight', torch.Tensor(num_features))
self.mu_bias = Parameter(torch.Tensor(num_features))
self.sigma_bias = Parameter(torch.Tensor(num_features))
self.register_buffer('eps_bias', torch.Tensor(num_features))
else:
self.register_parameter('mu_weight', None)
self.register_parameter('sigma_weight', None)
self.register_buffer('eps_weight', None)
self.register_parameter('mu_bias', None)
self.register_parameter('sigma_bias', None)
self.register_buffer('eps_bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.mu_weight.data.uniform_()
self.sigma_weight.data.fill_(self.init_s)
self.mu_bias.data.zero_()
self.sigma_bias.data.fill_(self.init_s)
self.eps_weight.data.zero_()
self.eps_bias.data.zero_()
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
def forward_(self, input):
self._check_input_dim(input)
exponential_average_factor = 0.0
if (self.training and self.track_running_stats):
self.num_batches_tracked += 1
if (self.momentum is None):
exponential_average_factor = (1.0 / self.num_batches_tracked.item())
else:
exponential_average_factor = self.momentum
weight = bias = None
if self.affine:
weight = noise_fn(self.mu_weight, self.sigma_weight, self.eps_weight, self.sigma_0, self.N)
bias = noise_fn(self.mu_bias, self.sigma_bias, self.eps_bias, self.sigma_0, self.N)
return F.batch_norm(input, self.running_mean, self.running_var, weight, bias, (self.training or (not self.track_running_stats)), exponential_average_factor, self.eps)
def forward(self, input):
self._check_input_dim(input)
exponential_average_factor = 0.0
if (self.training and self.track_running_stats):
self.num_batches_tracked += 1
if (self.momentum is None):
exponential_average_factor = (1.0 / self.num_batches_tracked.item())
else:
exponential_average_factor = self.momentum
weight = bias = None
if self.affine:
sig_weight = torch.exp(self.sigma_weight)
weight = (self.mu_weight + (sig_weight * self.eps_weight.normal_()))
kl_weight = (((math.log(self.sigma_0) - self.sigma_weight) + (((sig_weight ** 2) + (self.mu_weight ** 2)) / (2 * (self.sigma_0 ** 2)))) - 0.5)
sig_bias = torch.exp(self.sigma_bias)
bias = (self.mu_bias + (sig_bias * self.eps_bias.normal_()))
kl_bias = (((math.log(self.sigma_0) - self.sigma_bias) + (((sig_bias ** 2) + (self.mu_bias ** 2)) / (2 * (self.sigma_0 ** 2)))) - 0.5)
out = F.batch_norm(input, self.running_mean, self.running_var, weight, bias, (self.training or (not self.track_running_stats)), exponential_average_factor, self.eps)
kl = (kl_weight.sum() + kl_bias.sum())
return (out, kl) |
class Series(Frame, _SeriesMixin):
def __init__(self, *args, **kwargs):
example = None
if ('example' in kwargs):
example = kwargs.get('example')
elif (len(args) > 1):
example = args[1]
if isinstance(self, Index):
self._subtype = get_base_frame_type(self.__class__.__name__, is_index_like, example)
else:
self._subtype = get_base_frame_type(self.__class__.__name__, is_series_like, example)
super(Series, self).__init__(*args, **kwargs)
def value_counts(self):
return self.accumulate_partitions(aggregations.accumulator, agg=aggregations.ValueCounts(), start=None, stream_type='updating', returns_state=True) |
class Effect2756(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
for type in ('Gravimetric', 'Magnetometric', 'Ladar', 'Radar'):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'scan{0}StrengthBonus'.format(type), ship.getModifiedItemAttr('shipBonusCC'), skill='Caldari Cruiser', **kwargs) |
_REGISTRY.register()
def build_fcos_resnet_bifpn_backbone(cfg, input_shape: ShapeSpec):
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
top_levels = 2
backbone = BiFPN(bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, num_top_levels=top_levels, num_repeats=num_repeats, norm=cfg.MODEL.BIFPN.NORM)
return backbone |
class PerTensorCompression(AdaptiveCompressionBase):
def __init__(self, tensor_compressions: Union[(Sequence[CompressionBase], Mapping[(Key, CompressionBase)])]):
self.tensor_compressions = tensor_compressions
def choose_compression(self, info: CompressionInfo) -> CompressionBase:
return self.tensor_compressions[info.key] |
class ChargeBase(StageBase):
type: Literal['ChargeBase'] = 'ChargeBase'
solvent_settings: Optional[T] = Field(None, description='The settings used to calculate the electron density in implicit solvent.')
program: Literal['gaussian'] = Field('gaussian', description='The name of the QM program to calculate the electron density.')
basis: Optional[str] = Field(None, description='The alternative basis set name, to specify a different one from that used for optimisations.')
method: Optional[str] = Field(None, description='The alternative method name, to specify a different one from that used for optimisations.')
td_settings: Optional[TDSettings] = Field(None, description='The alternative Time-Dependent calculation settings that should be used in the calculation.')
def finish_message(self, **kwargs) -> str:
return 'Charges calculated and AIM reference data stored.'
def apply_symmetrisation(cls, molecule: 'Ligand') -> 'Ligand':
atom_types = {}
for (atom_index, cip_type) in molecule.atom_types.items():
atom_types.setdefault(cip_type, []).append(atom_index)
for sym_set in atom_types.values():
mean_charge = np.array([molecule.atoms[ind].aim.charge for ind in sym_set]).mean()
mean_volume = np.array([molecule.atoms[ind].aim.volume for ind in sym_set]).mean()
for atom_index in sym_set:
molecule.atoms[atom_index].aim.charge = mean_charge
molecule.atoms[atom_index].aim.volume = mean_volume
return molecule
def _get_qc_options(self) -> Optional[QCOptions]:
if ((self.basis is not None) and (self.method is not None)):
return QCOptions(program=self.program, method=self.method, basis=self.basis, td_settings=self.td_settings)
return None
def _run(self, molecule: 'Ligand', **kwargs) -> 'Ligand':
with folder_setup(molecule.name):
molecule.extra_sites.clear_sites()
local_options = kwargs.get('local_options')
qc_spec = (self._get_qc_options() or kwargs.get('qc_spec'))
molecule = self._execute(molecule, local_options=local_options, qc_spec=qc_spec)
molecule = self.apply_symmetrisation(molecule=molecule)
for i in range(molecule.n_atoms):
atom = molecule.atoms[i]
molecule.NonbondedForce[(i,)].charge = atom.aim.charge
molecule.fix_net_charge()
return molecule
def _gas_calculation_settings(self) -> Dict[(str, Any)]:
...
def _get_calculation_settings(self) -> Dict[(str, Any)]:
if (self.solvent_settings is not None):
return self.solvent_settings.format_keywords()
else:
return self._gas_calculation_settings()
def _execute(self, molecule: 'Ligand', local_options: LocalResource, qc_spec: QCOptions) -> 'Ligand':
... |
class CatalogReference(VersionBase):
def __init__(self, catalogname, entryname):
self.catalogname = catalogname
self.entryname = entryname
self.parameterassignments = []
def __eq__(self, other):
if isinstance(other, CatalogReference):
if ((self.get_attributes() == other.get_attributes()) and (self.parameterassignments == other.parameterassignments)):
return True
return False
def parse(element):
catalogname = element.attrib['catalogName']
entryname = element.attrib['entryName']
reference = CatalogReference(catalogname, entryname)
parameter_assignments = element.find('ParameterAssignments')
if (parameter_assignments != None):
parameters = parameter_assignments.findall('ParameterAssignment')
for parameter in parameters:
parameter_assignment = ParameterAssignment.parse(parameter)
reference.parameterassignments.append(parameter_assignment)
return reference
def add_parameter_assignment(self, parameterref, value):
self.parameterassignments.append(ParameterAssignment(parameterref, value))
return self
def get_attributes(self):
return {'catalogName': self.catalogname, 'entryName': self.entryname}
def get_element(self):
element = ET.Element('CatalogReference', attrib=self.get_attributes())
if self.parameterassignments:
parameterassigns = ET.SubElement(element, 'ParameterAssignments')
for parass in self.parameterassignments:
parameterassigns.append(parass.get_element())
return element |
def kmeans_embeddings(embs, k_opt=None, k_max=10, method='faiss', return_fitted_model=False):
if (k_opt is None):
silhouette_scores = []
for n_cluster in range(2, min((k_max + 1), len(embs))):
labels = kmeans_train(embs, n_cluster, method)
silhouette_scores.append(silhouette_score(embs, labels, metric='euclidean'))
k_opt = (2 + silhouette_scores.index(max(silhouette_scores)))
if return_fitted_model:
(labels, kmeans) = kmeans_train(embs, k_opt, method, return_fitted_model)
return (labels, k_opt, kmeans)
else:
labels = kmeans_train(embs, k_opt, method, return_fitted_model)
return (labels, k_opt) |
def _get_listener():
global _listener
if (_listener is None):
_lock.acquire()
try:
if (_listener is None):
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener |
def is_send_transfer_almost_equal(send_channel: NettingChannelState, send: LockedTransferUnsignedState, received: LockedTransferSignedState) -> bool:
return ((send.payment_identifier == received.payment_identifier) and (send.token == received.token) and (send.lock.expiration == received.lock.expiration) and (send.lock.secrethash == received.lock.secrethash) and (send.initiator == received.initiator) and (send.target == received.target)) |
class ProjectManagerOptions(PymiereBaseObject):
def __init__(self, pymiere_id=None):
super(ProjectManagerOptions, self).__init__(pymiere_id)
' Which transfer option to use; will be one of these: \t`CLIP_TRANSFER_COPY` `CLIP_TRANSFER_TRANSCODE` '
def clipTransferOption(self):
return self._eval_on_this_object('clipTransferOption')
def clipTransferOption(self, clipTransferOption):
self._eval_on_this_object('clipTransferOption = {}'.format(_format_object_to_es(clipTransferOption)))
' Which transcode option to use; will be one of these: \t`CLIP_TRANSCODE_MATCH_PRESET` `CLIP_TRANSCODE_MATCH_CLIPS` \t`CLIP_TRANSCODE_MATCH_SEQUENCE` '
def clipTranscoderOption(self):
return self._eval_on_this_object('clipTranscoderOption')
def clipTranscoderOption(self, clipTranscoderOption):
self._eval_on_this_object('clipTranscoderOption = {}'.format(_format_object_to_es(clipTranscoderOption)))
' If `true`, projectItems not used in a sequence are not transferred '
def excludeUnused(self):
return self._eval_on_this_object('excludeUnused')
def excludeUnused(self, excludeUnused):
self._eval_on_this_object('excludeUnused = {}'.format(_format_object_to_es(excludeUnused)))
" The number of 'handle' frames to provide, before and after the in/out points of clips in the sequence. "
def handleFrameCount(self):
return self._eval_on_this_object('handleFrameCount')
def handleFrameCount(self, handleFrameCount):
self._eval_on_this_object('handleFrameCount = {}'.format(_format_object_to_es(handleFrameCount)))
' If `true`, preview files will also be transferred. '
def includePreviews(self):
return self._eval_on_this_object('includePreviews')
def includePreviews(self, includePreviews):
self._eval_on_this_object('includePreviews = {}'.format(_format_object_to_es(includePreviews)))
' If `true`, conformed audio files will also be transferred. '
def includeConformedAudio(self):
return self._eval_on_this_object('includeConformedAudio')
def includeConformedAudio(self, includeConformedAudio):
self._eval_on_this_object('includeConformedAudio = {}'.format(_format_object_to_es(includeConformedAudio)))
' If `true`, media files will be renamed to match clip names. '
def renameMedia(self):
return self._eval_on_this_object('renameMedia')
def renameMedia(self, renameMedia):
self._eval_on_this_object('renameMedia = {}'.format(_format_object_to_es(renameMedia)))
' The containing directory for the consolidation/transfer. '
def destinationPath(self):
return self._eval_on_this_object('destinationPath')
def destinationPath(self, destinationPath):
self._eval_on_this_object('destinationPath = {}'.format(_format_object_to_es(destinationPath)))
' If `true`, all sequences in the project will be transferred. '
def includeAllSequences(self):
return self._eval_on_this_object('includeAllSequences')
def includeAllSequences(self, includeAllSequences):
self._eval_on_this_object('includeAllSequences = {}'.format(_format_object_to_es(includeAllSequences)))
' An `Array` of all sequences affected by the transfer. '
def affectedSequences(self):
return self._eval_on_this_object('affectedSequences')
def affectedSequences(self, affectedSequences):
self._eval_on_this_object('affectedSequences = {}'.format(_format_object_to_es(affectedSequences)))
' Path the the encoder preset (.epr file) to be used. '
def encoderPresetFilePath(self):
return self._eval_on_this_object('encoderPresetFilePath')
def encoderPresetFilePath(self, encoderPresetFilePath):
self._eval_on_this_object('encoderPresetFilePath = {}'.format(_format_object_to_es(encoderPresetFilePath)))
' If `true`, image sequences will be transcoded. '
def convertImageSequencesToClips(self):
return self._eval_on_this_object('convertImageSequencesToClips')
def convertImageSequencesToClips(self, convertImageSequencesToClips):
self._eval_on_this_object('convertImageSequencesToClips = {}'.format(_format_object_to_es(convertImageSequencesToClips)))
' If `true`, synthetic importer clips will be transcoded. '
def convertSyntheticsToClips(self):
return self._eval_on_this_object('convertSyntheticsToClips')
def convertSyntheticsToClips(self, convertSyntheticsToClips):
self._eval_on_this_object('convertSyntheticsToClips = {}'.format(_format_object_to_es(convertSyntheticsToClips)))
' If `true`, After Effects compositions will be transcoded. '
def convertAECompsToClips(self):
return self._eval_on_this_object('convertAECompsToClips')
def convertAECompsToClips(self, convertAECompsToClips):
self._eval_on_this_object('convertAECompsToClips = {}'.format(_format_object_to_es(convertAECompsToClips)))
' If `true`, source media will be copied not transcoded, if transcoding would have resulted in loss of alpha information. '
def copyToPreventAlphaLoss(self):
return self._eval_on_this_object('copyToPreventAlphaLoss')
def copyToPreventAlphaLoss(self, copyToPreventAlphaLoss):
self._eval_on_this_object('copyToPreventAlphaLoss = {}'.format(_format_object_to_es(copyToPreventAlphaLoss)))
' Transfer mode setting: Copy source media '
def CLIP_TRANSFER_COPY(self):
return self._eval_on_this_object('CLIP_TRANSFER_COPY')
_TRANSFER_COPY.setter
def CLIP_TRANSFER_COPY(self, CLIP_TRANSFER_COPY):
raise AttributeError("Attribute 'CLIP_TRANSFER_COPY' is read-only")
' Transfer mode setting: Transcode source media '
def CLIP_TRANSFER_TRANSCODE(self):
return self._eval_on_this_object('CLIP_TRANSFER_TRANSCODE')
_TRANSFER_TRANSCODE.setter
def CLIP_TRANSFER_TRANSCODE(self, CLIP_TRANSFER_TRANSCODE):
raise AttributeError("Attribute 'CLIP_TRANSFER_TRANSCODE' is read-only")
' Transcode mode setting: Transcode source media to a specific preset '
def CLIP_TRANSCODE_MATCH_PRESET(self):
return self._eval_on_this_object('CLIP_TRANSCODE_MATCH_PRESET')
_TRANSCODE_MATCH_PRESET.setter
def CLIP_TRANSCODE_MATCH_PRESET(self, CLIP_TRANSCODE_MATCH_PRESET):
raise AttributeError("Attribute 'CLIP_TRANSCODE_MATCH_PRESET' is read-only")
' Transcode mode setting: Transcode source media to match clips '
def CLIP_TRANSCODE_MATCH_CLIPS(self):
return self._eval_on_this_object('CLIP_TRANSCODE_MATCH_CLIPS')
_TRANSCODE_MATCH_CLIPS.setter
def CLIP_TRANSCODE_MATCH_CLIPS(self, CLIP_TRANSCODE_MATCH_CLIPS):
raise AttributeError("Attribute 'CLIP_TRANSCODE_MATCH_CLIPS' is read-only")
' Transcode mode setting: Transcode source media to match sequence settings '
def CLIP_TRANSCODE_MATCH_SEQUENCE(self):
return self._eval_on_this_object('CLIP_TRANSCODE_MATCH_SEQUENCE')
_TRANSCODE_MATCH_SEQUENCE.setter
def CLIP_TRANSCODE_MATCH_SEQUENCE(self, CLIP_TRANSCODE_MATCH_SEQUENCE):
raise AttributeError("Attribute 'CLIP_TRANSCODE_MATCH_SEQUENCE' is read-only")
def bind(self, eventName, function):
self._check_type(eventName, str, 'arg "eventName" of function "ProjectManagerOptions.bind"')
self._check_type(function, any, 'arg "function" of function "ProjectManagerOptions.bind"')
self._eval_on_this_object('bind({}, {})'.format(_format_object_to_es(eventName), _format_object_to_es(function)))
def unbind(self, eventName):
self._check_type(eventName, str, 'arg "eventName" of function "ProjectManagerOptions.unbind"')
self._eval_on_this_object('unbind({})'.format(_format_object_to_es(eventName)))
def setTimeout(self, eventName, function, milliseconds):
self._check_type(eventName, str, 'arg "eventName" of function "ProjectManagerOptions.setTimeout"')
self._check_type(function, any, 'arg "function" of function "ProjectManagerOptions.setTimeout"')
self._check_type(milliseconds, float, 'arg "milliseconds" of function "ProjectManagerOptions.setTimeout"')
self._eval_on_this_object('setTimeout({}, {}, {})'.format(_format_object_to_es(eventName), _format_object_to_es(function), _format_object_to_es(milliseconds))) |
def test_cannot_reset_password_of_not_active_user(graphql_client):
user = UserFactory(email='', password='old-password', jwt_auth_id=1, is_active=False)
token = _create_reset_password_token(user=user)
body = graphql_client.query('mutation($input: ResetPasswordInput!) {\n resetPassword(input: $input) {\n __typename\n ... on OperationSuccess {\n ok\n }\n }\n }', variables={'input': {'token': token, 'newPassword': 'newpassword1'}})
assert (body['data']['resetPassword']['ok'] is False)
user.refresh_from_db()
assert user.check_password('old-password')
assert (user.jwt_auth_id == 1) |
def wandb_log(prefix, sp_values, com_values, update_summary=False, wandb_summary_dict={}):
new_values = {}
for (k, _) in sp_values.items():
new_key = ((prefix + '/') + k)
new_values[new_key] = sp_values[k]
if update_summary:
if (new_key not in wandb_summary_dict):
wandb_summary_dict[new_key] = AverageMeter()
wandb_summary_dict[new_key].update(new_values[new_key], n=1)
summary = wandb_summary_dict[new_key].make_summary(new_key)
for (key, valaue) in summary.items():
wandb.run.summary[key] = valaue
new_values.update(com_values)
wandb.log(new_values) |
def main(config, db, **kwargs):
_check_missing_stream_info(config, db, update_db=(not config.debug))
if config.record_scores:
_check_new_episode_scores(config, db, update_db=(not config.debug))
_record_poll_scores(config, db, update_db=(not config.debug))
_check_show_lengths(config, db, update_db=(not config.debug))
_disable_finished_shows(config, db, update_db=(not config.debug)) |
def evaluate(args, model, tokenizer, prefix='', output_layer=(- 1), eval_highway=False):
eval_task_names = (('mnli', 'mnli-mm') if (args.task_name == 'mnli') else (args.task_name,))
eval_outputs_dirs = ((args.output_dir, (args.output_dir + '-MM')) if (args.task_name == 'mnli') else (args.output_dir,))
results = {}
for (eval_task, eval_output_dir) in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(eval_output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = (SequentialSampler(eval_dataset) if (args.local_rank == (- 1)) else DistributedSampler(eval_dataset))
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if (args.n_gpu > 1):
model = nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
exit_layer_counter = {(i + 1): 0 for i in range(model.num_layers)}
st = time.time()
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet']) else None)
if (output_layer >= 0):
inputs['output_layer'] = output_layer
outputs = model(**inputs)
if eval_highway:
exit_layer_counter[outputs[(- 1)]] += 1
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_time = (time.time() - st)
logger.info('Eval time: {}'.format(eval_time))
eval_loss = (eval_loss / nb_eval_steps)
if (args.output_mode == 'classification'):
preds = np.argmax(preds, axis=1)
elif (args.output_mode == 'regression'):
preds = np.squeeze(preds)
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
if eval_highway:
logger.info('Exit layer counter: {}'.format(exit_layer_counter))
actual_cost = sum([(l * c) for (l, c) in exit_layer_counter.items()])
full_cost = (len(eval_dataloader) * model.num_layers)
logger.info('Expected saving: {}'.format((actual_cost / full_cost)))
if (args.early_exit_entropy >= 0):
save_fname = (((args.plot_data_dir + '/') + args.model_name_or_path[2:]) + '/entropy_{}.npy'.format(args.early_exit_entropy))
if (not os.path.exists(os.path.dirname(save_fname))):
os.makedirs(os.path.dirname(save_fname))
print_result = get_wanted_result(result)
np.save(save_fname, np.array([exit_layer_counter, eval_time, (actual_cost / full_cost), print_result]))
logger.info('Entropy={}\tResult={:.2f}'.format(args.early_exit_entropy, (100 * print_result)))
output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt')
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
return results |
class Visualizer():
def __init__(self, opt):
self.display_id = opt.display_id
self.use_html = (opt.isTrain and (not opt.no_html))
self.win_size = opt.display_winsize
self.name = opt.name
self.opt = opt
self.saved = False
if (self.display_id > 0):
import visdom
self.vis = visdom.Visdom(port=opt.display_port)
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir)
print(('create web directory %s...' % self.web_dir))
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, 'a') as log_file:
now = time.strftime('%c')
log_file.write((' Training Loss (%s) \n' % now))
def reset(self):
self.saved = False
def display_current_results(self, visuals, epoch, save_result):
if (self.display_id > 0):
ncols = self.opt.display_single_pane_ncols
if (ncols > 0):
(h, w) = next(iter(visuals.values())).shape[:2]
table_css = ('<style>\n table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}\n table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}\n </style>' % (w, h))
title = self.name
label_html = ''
label_html_row = ''
nrows = int(np.ceil((len(visuals.items()) / ncols)))
images = []
idx = 0
for (label, image_numpy) in visuals.items():
label_html_row += ('<td>%s</td>' % label)
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if ((idx % ncols) == 0):
label_html += ('<tr>%s</tr>' % label_html_row)
label_html_row = ''
white_image = (np.ones_like(image_numpy.transpose([2, 0, 1])) * 255)
while ((idx % ncols) != 0):
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if (label_html_row != ''):
label_html += ('<tr>%s</tr>' % label_html_row)
self.vis.images(images, nrow=ncols, win=(self.display_id + 1), padding=2, opts=dict(title=(title + ' images')))
label_html = ('<table>%s</table>' % label_html)
self.vis.text((table_css + label_html), win=(self.display_id + 2), opts=dict(title=(title + ' labels')))
else:
idx = 1
for (label, image_numpy) in visuals.items():
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), win=(self.display_id + idx))
idx += 1
if (self.use_html and (save_result or (not self.saved))):
self.saved = True
for (label, image_numpy) in visuals.items():
img_path = os.path.join(self.img_dir, ('epoch%.3d_%s.png' % (epoch, label)))
util.save_image(image_numpy, img_path)
webpage = html.HTML(self.web_dir, ('Experiment name = %s' % self.name), reflesh=1)
for n in range(epoch, 0, (- 1)):
webpage.add_header(('epoch [%d]' % n))
ims = []
txts = []
links = []
for (label, image_numpy) in visuals.items():
img_path = ('epoch%.3d_%s.png' % (n, label))
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
if (not hasattr(self, 'plot_data')):
self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data['X'].append((epoch + counter_ratio))
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
self.vis.line(X=np.stack(([np.array(self.plot_data['X'])] * len(self.plot_data['legend'])), 1), Y=np.array(self.plot_data['Y']), opts={'title': (self.name + ' loss over time'), 'legend': self.plot_data['legend'], 'xlabel': 'epoch', 'ylabel': 'loss'}, win=self.display_id)
def print_current_errors(self, epoch, i, errors, t):
message = ('(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t))
for (k, v) in errors.items():
message += ('%s: %.3f ' % (k, v))
print(message)
with open(self.log_name, 'a') as log_file:
log_file.write(('%s\n' % message))
def save_images(self, webpage, visuals, image_path, aspect_ratio=0.0):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for (label, im) in visuals.items():
if (label is 'fake_B'):
image_name = ('%s.png' % name)
save_path = os.path.join(image_dir, image_name)
(h, w, _) = im.shape
if (aspect_ratio > 1.0):
print('in > 1')
im = imresize(im, (h, int((w * aspect_ratio))), interp='bicubic')
if (aspect_ratio < 1.0):
print('in < 1')
util.save_image(im, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size) |
class BaseDraftWire(BaseSketch):
_id = (- 1)
def check(cls, elements, checkCount=False):
super(BaseDraftWire, cls).check(elements, checkCount)
if (not checkCount):
return
for info in elements:
if utils.isDraftWire(info.Part):
return
raise RuntimeError('Constraint "{}" requires at least one linear edge from a none-subdivided Draft.Wire'.format(cls.getName())) |
def test_multi_marker_union_with_multi_union_is_single_marker() -> None:
m = parse_marker('sys_platform == "darwin" and python_version == "3"')
m2 = parse_marker('sys_platform == "darwin" and python_version < "3" or sys_platform == "darwin" and python_version > "3"')
assert (str(m.union(m2)) == 'sys_platform == "darwin"')
assert (str(m2.union(m)) == 'sys_platform == "darwin"') |
.supported(only_if=(lambda backend: backend.cipher_supported(algorithms.TripleDES((b'\x00' * 8)), modes.CFB((b'\x00' * 8)))), skip_message='Does not support TripleDES CFB')
class TestTripleDESModeCFB():
test_kat = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', '3DES', 'CFB'), ['TCFB64invperm.rsp', 'TCFB64permop.rsp', 'TCFB64subtab.rsp', 'TCFB64varkey.rsp', 'TCFB64vartext.rsp'], (lambda keys, **kwargs: algorithms.TripleDES(binascii.unhexlify(keys))), (lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv))))
test_mmt = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', '3DES', 'CFB'), ['TCFB64MMT1.rsp', 'TCFB64MMT2.rsp', 'TCFB64MMT3.rsp'], (lambda key1, key2, key3, **kwargs: algorithms.TripleDES(binascii.unhexlify(((key1 + key2) + key3)))), (lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)))) |
def _ButtonTruncInfo(win):
lineFormat = win32defines.DT_SINGLELINE
if win.has_style(win32defines.BS_MULTILINE):
lineFormat = win32defines.DT_WORDBREAK
heightAdj = 4
widthAdj = 5
if win.has_style(win32defines.BS_PUSHLIKE):
widthAdj = 3
heightAdj = 3
if win.has_style(win32defines.BS_MULTILINE):
widthAdj = 9
heightAdj = 2
if (win.has_style(win32defines.BS_BITMAP) or win.has_style(win32defines.BS_ICON)):
heightAdj = (- 9000)
widthAdj = (- 9000)
lineFormat = win32defines.DT_WORDBREAK
newRect = win.client_rects()[0]
newRect.right -= widthAdj
newRect.bottom -= heightAdj
return [(win.window_text(), newRect, win.font(), lineFormat)] |
class Effect6436(BaseEffect):
displayName = 'Warp Disruption'
grouped = True
prefix = 'fighterAbilityWarpDisruption'
type = ('active', 'projected')
def handler(cls, fit, src, context, projectionRange, **kwargs):
if ('projected' not in context):
return
if fit.ship.getModifiedItemAttr('disallowOffensiveModifiers'):
return
if (src.getModifiedItemAttr('{}Range'.format(cls.prefix), 0) < (projectionRange or 0)):
return
fit.ship.increaseItemAttr('warpScrambleStatus', (src.getModifiedItemAttr('{}PointStrength'.format(cls.prefix)) * src.amount), **kwargs) |
.parametrize('b, loc, scale, size', [(np.array(5, dtype=config.floatX), np.array(0, dtype=config.floatX), np.array(1, dtype=config.floatX), None), (np.array(5, dtype=config.floatX), np.array(0, dtype=config.floatX), np.array(1, dtype=config.floatX), []), (np.array(5, dtype=config.floatX), np.array(0, dtype=config.floatX), np.array(1, dtype=config.floatX), [2, 3]), (np.full((1, 2), 5, dtype=config.floatX), np.array(0, dtype=config.floatX), np.array(1, dtype=config.floatX), None)])
def test_truncexpon_samples(b, loc, scale, size):
compare_sample_values(truncexpon, b, loc, scale, size=size, test_fn=(lambda *args, size=None, random_state=None, **kwargs: truncexpon.rng_fn(random_state, *(args + (size,))))) |
def test_animal_fly_dataset():
dataset = 'AnimalFlyDataset'
dataset_class = DATASETS.get(dataset)
dataset_info = Config.fromfile('configs/_base_/datasets/fly.py').dataset_info
channel_cfg = dict(num_output_channels=32, dataset_joints=32, dataset_channel=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]], inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])
data_cfg = dict(image_size=[192, 192], heatmap_size=[48, 48], num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'])
data_cfg_copy = copy.deepcopy(data_cfg)
_ = dataset_class(ann_file='tests/data/fly/test_fly.json', img_prefix='tests/data/fly/', data_cfg=data_cfg_copy, dataset_info=dataset_info, pipeline=[], test_mode=True)
custom_dataset = dataset_class(ann_file='tests/data/fly/test_fly.json', img_prefix='tests/data/fly/', data_cfg=data_cfg_copy, dataset_info=dataset_info, pipeline=[], test_mode=False)
assert (custom_dataset.dataset_name == 'fly')
assert (custom_dataset.test_mode is False)
assert (custom_dataset.num_images == 2)
_ = custom_dataset[0]
outputs = convert_db_to_output(custom_dataset.db)
with tempfile.TemporaryDirectory() as tmpdir:
infos = custom_dataset.evaluate(outputs, tmpdir, ['PCK'])
assert_almost_equal(infos['PCK'], 1.0)
with pytest.raises(KeyError):
infos = custom_dataset.evaluate(outputs, tmpdir, 'mAP') |
class TestTrainingExtensionsUtils(unittest.TestCase):
def test_round_up_to_higher_multiplicity(self):
self.assertEqual(round_up_to_multiplicity(8, 3, 32), 8)
self.assertEqual(round_up_to_multiplicity(8, 13, 32), 16)
self.assertEqual(round_up_to_multiplicity(8, 17, 32), 24)
self.assertEqual(round_up_to_multiplicity(8, 29, 32), 32)
def test_round_down_to_lower_multiplicity(self):
self.assertEqual(round_down_to_multiplicity(8, 3), 3)
self.assertEqual(round_down_to_multiplicity(8, 13), 8)
self.assertEqual(round_down_to_multiplicity(8, 17), 16)
self.assertEqual(round_down_to_multiplicity(8, 29), 24)
self.assertEqual(round_down_to_multiplicity(8, 16), 8)
self.assertEqual(round_down_to_multiplicity(32, 64), 32)
def test_replace_relu_with_relu6(self):
model = torchvision.models.resnet18()
model.eval()
utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU, torch.nn.ReLU6)
for module in model.modules():
self.assertTrue((not isinstance(module, torch.nn.ReLU)))
with torch.no_grad():
x = torch.rand(1, 3, 224, 224)
output = model(x)
def test_replace_some_bns_with_passthrough(self):
model = torchvision.models.resnet18()
model.eval()
utils.replace_modules_with_instances_of_new_type(model, [model.layer1[0].bn1, model.layer1[1].bn1], torch.nn.Identity)
self.assertTrue(isinstance(model.layer1[0].bn1, torch.nn.Identity))
self.assertTrue(isinstance(model.layer1[1].bn1, torch.nn.Identity))
self.assertFalse(isinstance(model.layer1[0].bn2, torch.nn.Identity))
self.assertFalse(isinstance(model.layer1[1].bn2, torch.nn.Identity))
with torch.no_grad():
x = torch.rand(1, 3, 224, 224)
output = model(x)
def test_get_ordered_ops(self):
model = torchvision.models.resnet18(pretrained=False)
model.eval()
dummy_input = torch.randn(1, 3, 224, 224)
all_ops = utils.get_ordered_list_of_modules(model, dummy_input)
self.assertEqual(60, len(all_ops))
def test_get_reused_modules(self):
model = ModelWithReusedNodes()
model_input = torch.randn((1, 3, 32, 32))
reused_modules = aimet_torch.utils.get_reused_modules(model, model_input)
self.assertEqual(1, len(reused_modules))
self.assertEqual(reused_modules[0][1], model.relu1)
.cuda
def test_create_rand_tensors_given_shapes(self):
shape_1 = (1, 32)
shape_2 = (3, 3)
rand_tensors = utils.create_rand_tensors_given_shapes([shape_1, shape_2], device=torch.device('cpu'))
self.assertEqual(2, len(rand_tensors))
self.assertEqual(shape_1, rand_tensors[0].shape)
self.assertEqual(shape_2, rand_tensors[1].shape)
self.assertEqual(torch.device('cpu'), rand_tensors[0].device)
rand_tensors = utils.create_rand_tensors_given_shapes([shape_1, shape_2], device=torch.device('cuda:0'))
self.assertEqual(torch.device('cuda:0'), rand_tensors[0].device)
.cuda
def test_change_tensor_device(self):
random_tensor = torch.rand(2, 2)
random_tensor_new = utils.change_tensor_device_placement(random_tensor, device=torch.device('cuda:0'))
assert (random_tensor.device == torch.device('cpu'))
assert (random_tensor_new.device == torch.device('cuda:0'))
random_tensor = torch.rand(2, 2).to(device='cuda:0')
random_tensor_new = utils.change_tensor_device_placement(random_tensor, device=torch.device('cpu'))
assert (random_tensor.device == torch.device('cuda:0'))
assert (random_tensor_new.device == torch.device('cpu'))
random_tensor = [torch.rand(2, 2), torch.rand(2, 2), torch.rand(2, 2)]
random_tensor_new = utils.change_tensor_device_placement(random_tensor, device=torch.device('cuda:0'))
for item in random_tensor_new:
assert (item.device == torch.device('cuda:0'))
for item in random_tensor:
assert (item.device == torch.device('cpu'))
assert (len(random_tensor) == len(random_tensor_new))
random_tensor = [torch.rand(2, 2).to(device='cuda:0'), torch.rand(2, 2).to(device='cuda:0'), torch.rand(2, 2).to(device='cuda:0')]
random_tensor_new = utils.change_tensor_device_placement(random_tensor, device=torch.device('cpu'))
for item in random_tensor_new:
assert (item.device == torch.device('cpu'))
for item in random_tensor:
assert (item.device == torch.device('cuda:0'))
assert (len(random_tensor) == len(random_tensor_new))
random_tensor = [[torch.rand(1, 1), torch.rand(1, 1)], [torch.rand(2, 2), torch.rand(2, 2), torch.rand(2, 2), torch.rand(2, 2)], torch.rand(2, 2)]
random_tensor_new = utils.change_tensor_device_placement(random_tensor, device=torch.device('cuda:0'))
assert (random_tensor_new[0][0].device == torch.device('cuda:0'))
assert (random_tensor_new[0][1].device == torch.device('cuda:0'))
assert (random_tensor_new[1][0].device == torch.device('cuda:0'))
assert (random_tensor_new[1][1].device == torch.device('cuda:0'))
assert (random_tensor_new[1][2].device == torch.device('cuda:0'))
assert (random_tensor_new[1][3].device == torch.device('cuda:0'))
assert (random_tensor_new[2].device == torch.device('cuda:0'))
assert (random_tensor[0][0].device == torch.device('cpu'))
assert (random_tensor[0][1].device == torch.device('cpu'))
assert (random_tensor[1][0].device == torch.device('cpu'))
assert (random_tensor[1][1].device == torch.device('cpu'))
assert (random_tensor[1][2].device == torch.device('cpu'))
assert (random_tensor[1][3].device == torch.device('cpu'))
assert (random_tensor[2].device == torch.device('cpu'))
assert (len(random_tensor) == len(random_tensor_new))
random_tensor = ([torch.rand(1, 1), torch.rand(1, 1)], [torch.rand(2, 2), torch.rand(2, 2), torch.rand(2, 2), torch.rand(2, 2)], (torch.rand(2, 2), torch.rand(2, 2), torch.rand(2, 2), torch.rand(2, 2)))
random_tensor_new = utils.change_tensor_device_placement(random_tensor, device=torch.device('cuda:0'))
assert (random_tensor_new[0][0].device == torch.device('cuda:0'))
assert (random_tensor_new[0][1].device == torch.device('cuda:0'))
assert (random_tensor_new[1][0].device == torch.device('cuda:0'))
assert (random_tensor_new[1][1].device == torch.device('cuda:0'))
assert (random_tensor_new[1][2].device == torch.device('cuda:0'))
assert (random_tensor_new[1][3].device == torch.device('cuda:0'))
assert (random_tensor_new[2][0].device == torch.device('cuda:0'))
assert (random_tensor_new[2][1].device == torch.device('cuda:0'))
assert (random_tensor_new[2][2].device == torch.device('cuda:0'))
assert (random_tensor_new[2][3].device == torch.device('cuda:0'))
assert (random_tensor[0][0].device == torch.device('cpu'))
assert (random_tensor[0][1].device == torch.device('cpu'))
assert (random_tensor[1][0].device == torch.device('cpu'))
assert (random_tensor[1][1].device == torch.device('cpu'))
assert (random_tensor[1][2].device == torch.device('cpu'))
assert (random_tensor[1][3].device == torch.device('cpu'))
assert (random_tensor[2][0].device == torch.device('cpu'))
assert (random_tensor[2][1].device == torch.device('cpu'))
assert (random_tensor[2][2].device == torch.device('cpu'))
assert (random_tensor[2][3].device == torch.device('cpu'))
assert (len(random_tensor) == len(random_tensor_new))
assert isinstance(random_tensor_new, tuple)
assert isinstance(random_tensor_new[0], list)
assert isinstance(random_tensor_new[1], list)
assert isinstance(random_tensor_new[2], tuple)
random_tensor = ((torch.rand(1, 1), torch.rand(1, 1)), torch.rand(2, 2), torch.rand(2, 2))
random_tensor_new = utils.change_tensor_device_placement(random_tensor, device=torch.device('cuda:0'))
assert (random_tensor_new[0][0].device == torch.device('cuda:0'))
assert (random_tensor_new[0][1].device == torch.device('cuda:0'))
assert (random_tensor_new[1].device == torch.device('cuda:0'))
assert (random_tensor_new[2].device == torch.device('cuda:0'))
assert (random_tensor[0][0].device == torch.device('cpu'))
assert (random_tensor[0][1].device == torch.device('cpu'))
assert (random_tensor[1].device == torch.device('cpu'))
assert (random_tensor[2].device == torch.device('cpu'))
assert (len(random_tensor) == len(random_tensor_new))
assert isinstance(random_tensor_new, tuple)
assert isinstance(random_tensor_new[0], tuple)
def _collect_inp_out_data(self, device):
model = TinyModel().to(device=device)
model.eval()
model_input = torch.randn(1, 3, 32, 32).to(device=device)
module_data = utils.ModuleData(model, model.conv1)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=False, collect_output=False)
self.assertEqual(inp, None)
self.assertEqual(out, None)
module_data = utils.ModuleData(model, model.conv1)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=True, collect_output=False)
self.assertTrue(np.array_equal(utils.to_numpy(inp), utils.to_numpy(model_input)))
self.assertEqual(out, None)
module_data = utils.ModuleData(model, model.conv1)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=False, collect_output=True)
conv1_out = model.conv1(model_input)
self.assertTrue(np.array_equal(utils.to_numpy(out), utils.to_numpy(conv1_out)))
self.assertEqual(inp, None)
module_data = utils.ModuleData(model, model.conv1)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=True, collect_output=True)
conv1_out = model.conv1(model_input)
self.assertTrue(np.array_equal(utils.to_numpy(out), utils.to_numpy(conv1_out)))
self.assertTrue(np.array_equal(utils.to_numpy(inp), utils.to_numpy(model_input)))
module_data = utils.ModuleData(model, model.fc)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=False, collect_output=True)
fc_out = model(model_input)
self.assertTrue(np.array_equal(utils.to_numpy(out), utils.to_numpy(fc_out)))
self.assertEqual(inp, None)
def test_collect_inp_out_data_cpu(self):
self._collect_inp_out_data(torch.device('cpu'))
.cuda
def test_collect_inp_out_data_gpu(self):
self._collect_inp_out_data(torch.device('cuda:0'))
def _collect_inp_out_data_multi_input(self, device):
model = MultiInput().to(device=device)
model.eval()
inp_shape_1 = (1, 3, 32, 32)
inp_shape_2 = (1, 3, 20, 20)
model_input = utils.create_rand_tensors_given_shapes([inp_shape_1, inp_shape_2], device)
def forward_fn(model, inputs):
model(*inputs)
module_data = utils.ModuleData(model, model.conv1, forward_fn)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=True, collect_output=False)
self.assertTrue(np.array_equal(utils.to_numpy(inp), utils.to_numpy(model_input[0])))
self.assertEqual(out, None)
module_data = utils.ModuleData(model, model.conv1, forward_fn)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=False, collect_output=True)
conv1_out = model.conv1(model_input[0])
self.assertTrue(np.array_equal(utils.to_numpy(out), utils.to_numpy(conv1_out)))
self.assertEqual(inp, None)
module_data = utils.ModuleData(model, model.conv3, forward_fn)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=True, collect_output=True)
conv3_out = model.conv3(model_input[1])
self.assertTrue(np.array_equal(utils.to_numpy(out), utils.to_numpy(conv3_out)))
self.assertTrue(np.array_equal(utils.to_numpy(inp), utils.to_numpy(model_input[1])))
module_data = utils.ModuleData(model, model.fc, forward_fn)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=False, collect_output=True)
fc_out = model(*model_input)
self.assertTrue(np.array_equal(utils.to_numpy(out), utils.to_numpy(fc_out)))
self.assertEqual(inp, None)
def test_collect_inp_out_data_multi_input_cpu(self):
self._collect_inp_out_data_multi_input(torch.device('cpu'))
.cuda
def test_collect_inp_out_data_multi_input_gpu(self):
self._collect_inp_out_data_multi_input(torch.device('cuda:0'))
def _collect_inp_out_data_int_input(self, device):
model = EmbeddingModel().to(device=device)
model.eval()
input_ids = torch.randint(1000, (10, 128))
module_data = utils.ModuleData(model, model.linear)
(inp, out) = module_data.collect_inp_out_data(input_ids, collect_input=True, collect_output=True)
assert torch.equal(inp, model.embedding(input_ids.to(device)))
assert torch.equal(out, model.linear(model.embedding(input_ids.to(device))))
def test_collect_inp_out_data_int_input_cpu(self):
self._collect_inp_out_data_int_input(torch.device('cpu'))
.cuda
def test_collect_inp_out_data_int_input_gpu(self):
self._collect_inp_out_data_int_input(torch.device('cuda:0'))
def test_collect_inp_out_data_quantsim_model_cpu(self):
device_list = [torch.device('cpu')]
for device in device_list:
model = TinyModel().to(device=device)
model_input = torch.randn(1, 3, 32, 32).to(device=device)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 3, 32, 32))
module_data = utils.ModuleData(model, model.fc)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=False, collect_output=True)
fc_out = sim.model(model_input)
self.assertFalse(np.array_equal(utils.to_numpy(out), utils.to_numpy(fc_out)))
module_data = utils.ModuleData(model, model.conv1)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=True, collect_output=False)
self.assertTrue(np.array_equal(utils.to_numpy(inp), utils.to_numpy(model_input)))
.cuda
def test_collect_inp_out_data_quantsim_model_gpu(self):
device_list = [torch.device('cuda:0')]
for device in device_list:
model = TinyModel().to(device=device)
model_input = torch.randn(1, 3, 32, 32).to(device=device)
sim = QuantizationSimModel(model, dummy_input=torch.rand(1, 3, 32, 32).to(device=device))
module_data = utils.ModuleData(model, model.fc)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=False, collect_output=True)
fc_out = sim.model(model_input)
self.assertFalse(np.array_equal(utils.to_numpy(out), utils.to_numpy(fc_out)))
module_data = utils.ModuleData(model, model.conv1)
(inp, out) = module_data.collect_inp_out_data(model_input, collect_input=True, collect_output=False)
self.assertTrue(np.array_equal(utils.to_numpy(inp), utils.to_numpy(model_input)))
def test_cached_dataset(self):
dataset_size = 256
batch_size = 16
data_loader = utils.create_fake_data_loader(dataset_size=dataset_size, batch_size=batch_size, image_size=(1, 2, 2))
num_batches = 6
path = '/tmp/test_cached_dataset/'
cached_dataset = utils.CachedDataset(data_loader, num_batches, path)
self.assertEqual(len(cached_dataset), 6)
possible_batches = math.ceil((dataset_size / batch_size))
with pytest.raises(ValueError):
utils.CachedDataset(data_loader, (possible_batches + 1), path)
shutil.rmtree('/tmp/test_cached_dataset/')
def test_find_num_inout_map(self):
model = SingleResidual()
inout_map = utils.find_num_inout_tensors_per_module(model, [torch.rand(1, 3, 32, 32)])
inout_counts_check = [(num_outputs == (1, 1)) for num_outputs in inout_map.values()]
self.assertTrue(all(inout_counts_check))
class MyLayer(torch.nn.Module):
def __init__(self):
super(MyLayer, self).__init__()
def forward(self, inputs):
return ((inputs * 100), (inputs + 100))
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 32, 3)
self.relu1 = torch.nn.ReLU()
self.layer1 = MyLayer()
self.conv2 = torch.nn.Conv2d(32, 32, 3)
self.conv3 = torch.nn.Conv2d(32, 32, 3)
self.add = elementwise_ops.Add()
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
(x1, x2) = self.layer1(x)
x1 = self.conv2(x1)
x2 = self.conv2(x2)
x = self.add(x1, x2)
return x
model = MyModel()
inout_map = utils.find_num_inout_tensors_per_module(model, [torch.rand(1, 3, 32, 32)])
inout_counts_check = [(num_outputs == (1, 1)) for num_outputs in inout_map.values()]
self.assertFalse(all(inout_counts_check))
self.assertEqual(2, inout_counts_check.count(False))
self.assertEqual((1, 2), inout_map[model.layer1])
self.assertEqual((2, 1), inout_map[model.add])
def test_model_in_eval_mode(self):
model = TinyModel().eval()
model_input = torch.randn(1, 3, 32, 32)
model.eval()
with utils.in_eval_mode(model):
model(model_input)
_assert_mode_recursive(model, training=False)
_assert_mode_recursive(model, training=False)
model.train()
with utils.in_eval_mode(model):
model(model_input)
_assert_mode_recursive(model, training=False)
_assert_mode_recursive(model, training=True)
model.train()
try:
with utils.in_eval_mode(model):
model(model_input)
_assert_mode_recursive(model, training=False)
raise AssertionError
except:
pass
_assert_mode_recursive(model, training=True)
def test_model_in_train_mode(self):
model = TinyModel().eval()
model_input = torch.randn(1, 3, 32, 32)
model.eval()
with utils.in_train_mode(model):
model(model_input)
_assert_mode_recursive(model, training=True)
_assert_mode_recursive(model, training=False)
model.train()
with utils.in_train_mode(model):
model(model_input)
_assert_mode_recursive(model, training=True)
_assert_mode_recursive(model, training=True)
model.eval()
try:
with utils.in_train_mode(model):
model(model_input)
_assert_mode_recursive(model, training=True)
raise AssertionError
except:
pass
_assert_mode_recursive(model, training=False)
def test_is_torch_module(self):
assert utils.is_torch_nn_module(torch.nn.Conv2d(3, 3, 2))
assert utils.is_torch_nn_module(torch.nn.Linear(3, 10))
assert utils.is_torch_nn_module(torch.nn.BatchNorm2d(3))
assert utils.is_torch_nn_module(torch.nn.RNN(input_size=3, hidden_size=5, num_layers=1))
assert utils.is_torch_nn_module(torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=1, bidirectional=True))
assert utils.is_torch_nn_module(torch.nn.Sequential(torch.nn.Conv2d(3, 16, 2), torch.nn.BatchNorm2d(16)))
assert utils.is_torch_nn_module(torch.nn.ModuleList([torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1), torch.nn.ReLU(inplace=True), torch.nn.Conv2d(16, 8, kernel_size=2)]))
assert (not utils.is_torch_nn_module(elementwise_ops.Add()))
assert (not utils.is_torch_nn_module(elementwise_ops.Multiply()))
assert (not utils.is_torch_nn_module(elementwise_ops.Concat()))
class CustomModule(torch.nn.Module):
def forward(x):
return (x * F.softplus(x).sigmoid())
assert (not utils.is_torch_nn_module(CustomModule()))
.cuda
def test_match_model_settings(self):
class NoParamsModel(torch.nn.Module):
def __init__(self):
super(NoParamsModel, self).__init__()
self.relu1 = torch.nn.ReLU()
self.relu2 = torch.nn.ReLU()
def forward(self, inp):
x = self.relu1(inp)
x = self.relu2(inp)
return x
model1 = SingleResidual()
model1.to('cpu')
model1.train()
model2 = SingleResidual()
model2.to('cuda:0')
model2.eval()
assert (not model2.training)
assert (utils.get_device(model1) != utils.get_device(model2))
utils.match_model_settings(model1, model2)
assert model2.training
assert (utils.get_device(model1) == utils.get_device(model2))
model1 = NoParamsModel()
model1.train()
model2 = NoParamsModel()
model2.eval()
assert (not model2.training)
utils.match_model_settings(model1, model2)
assert model2.training
def test_load_pytorch_model(self):
class MiniModel(torch.nn.Module):
def __init__(self):
super(MiniModel, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 8, kernel_size=2, stride=2, padding=2, bias=False)
self.bn1 = torch.nn.BatchNorm2d(8)
self.relu1 = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.fc = torch.nn.Linear(128, 12)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x
with open('./data/mini_model.py', 'w') as f:
print('\nimport torch\nimport torch.nn\nclass MiniModel(torch.nn.Module):\n\n def __init__(self):\n super(MiniModel, self).__init__()\n self.conv1 = torch.nn.Conv2d(3, 8, kernel_size=2, stride=2, padding=2, bias=False)\n self.bn1 = torch.nn.BatchNorm2d(8)\n self.relu1 = torch.nn.ReLU(inplace=True)\n self.maxpool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1)\n self.fc = torch.nn.Linear(128, 12)\n\n def forward(self, *inputs):\n x = self.conv1(inputs[0])\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.maxpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n ', file=f)
model = MiniModel()
model.eval()
dummy_input = torch.randn(1, 3, 8, 8)
out1 = model(dummy_input)
torch.save(model.state_dict(), './data/mini_model.pth')
new_model = utils.load_pytorch_model('MiniModel', './data', 'mini_model', load_state_dict=True)
utils.match_model_settings(model, new_model)
torch.save(new_model, './data/saved_mini_model.pth')
new_model = torch.load('./data/saved_mini_model.pth')
out2 = new_model(dummy_input)
assert torch.allclose(out1, out2)
if os.path.exists('./data/mini_model.pth'):
os.remove('./data/mini_model.pth')
if os.path.exists('./data/saved_mini_model.pth'):
os.remove('./data/saved_mini_model.pth')
with self.assertRaises(AssertionError):
_ = utils.load_pytorch_model('MiniModel', './data', 'mini_model', load_state_dict=True)
_ = utils.load_pytorch_model('MiniModel', './data', 'mini_model', load_state_dict=False)
if os.path.exists('./data/mini_model.py'):
os.remove('./data/mini_model.py')
with self.assertRaises(AssertionError):
_ = utils.load_pytorch_model('MiniModel', './data', 'mini_model', load_state_dict=False)
def test_disable_all_quantizers(self):
model = TinyModel().to(device='cpu')
dummy_input = torch.rand(1, 3, 32, 32)
sim = QuantizationSimModel(model, dummy_input=dummy_input)
all_quantizers = sum(utils.get_all_quantizers(sim.model), start=[])
active_quantizers = set((quantizer for quantizer in all_quantizers if quantizer.enabled))
with utils.disable_all_quantizers(sim.model):
for quantizer in all_quantizers:
assert (not quantizer.enabled)
for quantizer in active_quantizers:
assert quantizer.enabled
utils.disable_all_quantizers(sim.model)
for quantizer in all_quantizers:
assert (not quantizer.enabled) |
def build_doc_eval_file(out_file, encodings_dir, encoder_model, k, per_doc=True):
print('loading data...')
corpus = SquadRelevanceCorpus()
questions = corpus.get_dev()
spec = QuestionAndParagraphsSpec(batch_size=None, max_num_contexts=1, max_num_question_words=None, max_num_context_words=None)
voc = corpus.get_vocab()
encoder = SentenceEncoderSingleContext(model_dir_path=encoder_model, vocabulary=voc, spec=spec, loader=corpus.get_resource_loader())
par_encs = np.load(join(encodings_dir, 'encodings.npz'))
with open(join(encodings_dir, 'docs.json'), 'r') as f:
documents = json.load(f)
questions_eval_format = []
questions = sorted(questions, key=(lambda x: x.paragraph.doc_title))
if per_doc:
title2par_encs = {}
for (p_name, rep) in par_encs.items():
title = '_'.join(p_name.split('_')[:(- 1)])
if (title in title2par_encs):
title2par_encs[title].update({p_name: rep})
else:
title2par_encs[title] = {p_name: rep}
for (title, doc_qs) in tqdm(itertools.groupby(questions, key=(lambda x: x.paragraph.doc_title))):
doc_qs = list(doc_qs)
q_encodings = encode_squad.encode_questions(encoder, doc_qs)
par2ids = {}
reps = []
total_sentences = 0
for (p_name, rep) in title2par_encs[title].items():
par2ids[p_name] = list(range(total_sentences, (total_sentences + len(rep))))
reps.append(rep)
total_sentences += len(rep)
id2par = {i: p for (p, ids) in par2ids.items() for i in ids}
reps = np.concatenate(reps, axis=0)
top_k = simple_numpy_knn(q_encodings, reps, (k * 2))
for (idx, question) in enumerate(doc_qs):
seen = set()
p_names = [id2par[x] for x in top_k[idx] if (not ((id2par[x] in seen) or seen.add(id2par[x])))][:k]
questions_eval_format.append({'qid': question.question_id, 'question': ' '.join(question.question), 'answers': list(question.answers), 'paragraphs': [documents['_'.join(p_name.split('_')[:(- 1)])][int(p_name.split('_')[(- 1)])] for p_name in p_names]})
else:
print('encoding questions')
q_encodings = encode_squad.encode_questions(encoder, questions)
par2ids = {}
reps = []
total_sentences = 0
for (p_name, rep) in par_encs.items():
par2ids[p_name] = list(range(total_sentences, (total_sentences + len(rep))))
reps.append(rep)
total_sentences += len(rep)
id2par = {i: p for (p, ids) in par2ids.items() for i in ids}
reps = np.concatenate(reps, axis=0)
print('scoring')
top_k = simple_numpy_knn(q_encodings, reps, (k * 2))
for (idx, question) in enumerate(questions):
seen = set()
p_names = [id2par[x] for x in top_k[idx] if (not ((id2par[x] in seen) or seen.add(id2par[x])))][:k]
questions_eval_format.append({'qid': question.question_id, 'question': ' '.join(question.question), 'answers': list(question.answers), 'paragraphs': [documents['_'.join(p_name.split('_')[:(- 1)])][int(p_name.split('_')[(- 1)])] for p_name in p_names]})
with open(out_file, 'w') as f:
json.dump(questions_eval_format, f) |
class PhysERI4(PhysERI):
def __init__(self, model, frozen=None):
td.PhysERI4.__init__.im_func(self, model, frozen=frozen)
symmetries = [((0, 1, 2, 3), False), ((1, 0, 3, 2), False), ((2, 3, 0, 1), True), ((3, 2, 1, 0), True)]
def __calc_block__(self, item, k):
if (self.kconserv[k[:3]] == k[3]):
return td.PhysERI4.__calc_block__.im_func(self, item, k)
else:
raise ValueError('K is not conserved: {}, expected {}'.format(repr(k), (k[:3] + (self.kconserv[k[:3]],)))) |
def main(args):
modelpath = (args.loadDir + args.loadModel)
weightspath = (args.loadDir + args.loadWeights)
print(('Loading model: ' + modelpath))
print(('Loading weights: ' + weightspath))
model = Net(NUM_CLASSES)
model = torch.nn.DataParallel(model)
if (not args.cpu):
model = model.cuda()
def load_my_state_dict(model, state_dict):
own_state = model.state_dict()
for (name, param) in state_dict.items():
if (name not in own_state):
continue
own_state[name].copy_(param)
return model
model = load_my_state_dict(model, torch.load(weightspath))
print('Model and weights LOADED successfully')
model.eval()
if (not os.path.exists(args.datadir)):
print('Error: datadir could not be loaded')
loader = DataLoader(cityscapes(args.datadir, input_transform_cityscapes, target_transform_cityscapes, subset=args.subset), num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False)
if args.visualize:
vis = visdom.Visdom()
for (step, (images, labels, filename, filenameGt)) in enumerate(loader):
if (not args.cpu):
images = images.cuda()
inputs = Variable(images)
with torch.no_grad():
outputs = model(inputs)
label = outputs[0].max(0)[1].byte().cpu().data
label_color = Colorize()(label.unsqueeze(0))
filenameSave = ('./save_color/' + filename[0].split('leftImg8bit/')[1])
os.makedirs(os.path.dirname(filenameSave), exist_ok=True)
label_save = ToPILImage()(label_color)
label_save.save(filenameSave)
if args.visualize:
vis.image(label_color.numpy())
print(step, filenameSave) |
class FieldDefinition(object):
FIELDS = {}
def lookup(cls, name):
try:
field = cls.FIELDS[name]
except KeyError:
field = TorrentProxy.add_manifold_attribute(name)
return ({'matcher': field._matcher} if field else None)
def __init__(self, valtype, name, doc, accessor=None, matcher=None, formatter=None, engine_name=None):
self.valtype = valtype
self.name = name
self.__doc__ = doc
self._engine_name = engine_name
self._accessor = accessor
self._matcher = matcher
self._formatter = formatter
if (name in FieldDefinition.FIELDS):
raise RuntimeError('INTERNAL ERROR: Duplicate field definition')
FieldDefinition.FIELDS[name] = self
def __repr__(self):
return ('<%s(%r, %r, %r)>' % (self.__class__.__name__, self.valtype, self.name, self.__doc__))
def __get__(self, obj, cls=None):
if (obj is None):
return self
return self.valtype((self._accessor(obj) if self._accessor else obj._fields[self.name]))
def __delete__(self, obj):
raise RuntimeError(("Can't delete field %r" % (self.name,))) |
def test_column_lineage_multiple_paths_for_same_column():
sql = 'INSERT INTO tab2\nSELECT tab1.id,\n coalesce(join_table_1.col1, join_table_2.col1, join_table_3.col1) AS col1\nFROM tab1\n LEFT JOIN (SELECT id, col1 FROM tab1 WHERE flag = 1) AS join_table_1\n ON tab1.id = join_table_1.id\n LEFT JOIN (SELECT id, col1 FROM tab1 WHERE flag = 2) AS join_table_2\n ON tab1.id = join_table_2.id\n LEFT JOIN (SELECT id, col1 FROM tab1 WHERE flag = 3) AS join_table_3\n ON tab1.id = join_table_3.id'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('id', 'tab1'), ColumnQualifierTuple('id', 'tab2')), (ColumnQualifierTuple('col1', 'tab1'), ColumnQualifierTuple('col1', 'tab2'))]) |
def filter_by_size(indices, dataset, max_positions, raise_exception=False):
if (isinstance(max_positions, float) or isinstance(max_positions, int)):
if (hasattr(dataset, 'sizes') and isinstance(dataset.sizes, np.ndarray)):
ignored = indices[(dataset.sizes[indices] > max_positions)].tolist()
indices = indices[(dataset.sizes[indices] <= max_positions)]
elif (hasattr(dataset, 'sizes') and isinstance(dataset.sizes, list) and (len(dataset.sizes) == 1)):
ignored = indices[(dataset.sizes[0][indices] > max_positions)].tolist()
indices = indices[(dataset.sizes[0][indices] <= max_positions)]
else:
(indices, ignored) = _filter_by_size_dynamic(indices, dataset.size, max_positions)
else:
(indices, ignored) = _filter_by_size_dynamic(indices, dataset.size, max_positions)
if ((len(ignored) > 0) and raise_exception):
raise Exception('Size of sample #{} is invalid (={}) since max_positions={}, skip this example with --skip-invalid-size-inputs-valid-test'.format(ignored[0], dataset.size(ignored[0]), max_positions))
if (len(ignored) > 0):
print('| WARNING: {} samples have invalid sizes and will be skipped, max_positions={}, first few sample ids={}'.format(len(ignored), max_positions, ignored[:10]))
return indices |
def test_assert_reactpy_logged_ignores_level():
original_level = ROOT_LOGGER.level
ROOT_LOGGER.setLevel(logging.INFO)
try:
with testing.assert_reactpy_did_log(match_message='.*'):
ROOT_LOGGER.debug('my message')
finally:
ROOT_LOGGER.setLevel(original_level) |
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName('Form')
Form.resize(481, 840)
self.averageGroup = QtWidgets.QGroupBox(Form)
self.averageGroup.setGeometry(QtCore.QRect(0, 640, 242, 182))
self.averageGroup.setCheckable(True)
self.averageGroup.setChecked(False)
self.averageGroup.setObjectName('averageGroup')
self.gridLayout_5 = QtWidgets.QGridLayout(self.averageGroup)
self.gridLayout_5.setContentsMargins(0, 0, 0, 0)
self.gridLayout_5.setSpacing(0)
self.gridLayout_5.setObjectName('gridLayout_5')
self.avgParamList = QtWidgets.QListWidget(self.averageGroup)
self.avgParamList.setObjectName('avgParamList')
self.gridLayout_5.addWidget(self.avgParamList, 0, 0, 1, 1)
self.decimateGroup = QtWidgets.QFrame(Form)
self.decimateGroup.setGeometry(QtCore.QRect(10, 140, 191, 171))
self.decimateGroup.setObjectName('decimateGroup')
self.gridLayout_4 = QtWidgets.QGridLayout(self.decimateGroup)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setSpacing(0)
self.gridLayout_4.setObjectName('gridLayout_4')
self.clipToViewCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.clipToViewCheck.setObjectName('clipToViewCheck')
self.gridLayout_4.addWidget(self.clipToViewCheck, 7, 0, 1, 3)
self.maxTracesCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.maxTracesCheck.setObjectName('maxTracesCheck')
self.gridLayout_4.addWidget(self.maxTracesCheck, 8, 0, 1, 2)
self.downsampleCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.downsampleCheck.setObjectName('downsampleCheck')
self.gridLayout_4.addWidget(self.downsampleCheck, 0, 0, 1, 3)
self.peakRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.peakRadio.setChecked(True)
self.peakRadio.setObjectName('peakRadio')
self.gridLayout_4.addWidget(self.peakRadio, 6, 1, 1, 2)
self.maxTracesSpin = QtWidgets.QSpinBox(self.decimateGroup)
self.maxTracesSpin.setObjectName('maxTracesSpin')
self.gridLayout_4.addWidget(self.maxTracesSpin, 8, 2, 1, 1)
self.forgetTracesCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.forgetTracesCheck.setObjectName('forgetTracesCheck')
self.gridLayout_4.addWidget(self.forgetTracesCheck, 9, 0, 1, 3)
self.meanRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.meanRadio.setObjectName('meanRadio')
self.gridLayout_4.addWidget(self.meanRadio, 3, 1, 1, 2)
self.subsampleRadio = QtWidgets.QRadioButton(self.decimateGroup)
self.subsampleRadio.setObjectName('subsampleRadio')
self.gridLayout_4.addWidget(self.subsampleRadio, 2, 1, 1, 2)
self.autoDownsampleCheck = QtWidgets.QCheckBox(self.decimateGroup)
self.autoDownsampleCheck.setChecked(True)
self.autoDownsampleCheck.setObjectName('autoDownsampleCheck')
self.gridLayout_4.addWidget(self.autoDownsampleCheck, 1, 2, 1, 1)
spacerItem = QtWidgets.QSpacerItem(30, 20, QtWidgets.QSizePolicy.Policy.Maximum, QtWidgets.QSizePolicy.Policy.Minimum)
self.gridLayout_4.addItem(spacerItem, 2, 0, 1, 1)
self.downsampleSpin = QtWidgets.QSpinBox(self.decimateGroup)
self.downsampleSpin.setMinimum(1)
self.downsampleSpin.setMaximum(100000)
self.downsampleSpin.setProperty('value', 1)
self.downsampleSpin.setObjectName('downsampleSpin')
self.gridLayout_4.addWidget(self.downsampleSpin, 1, 1, 1, 1)
self.transformGroup = QtWidgets.QFrame(Form)
self.transformGroup.setGeometry(QtCore.QRect(10, 10, 171, 101))
self.transformGroup.setObjectName('transformGroup')
self.gridLayout = QtWidgets.QGridLayout(self.transformGroup)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName('gridLayout')
self.logYCheck = QtWidgets.QCheckBox(self.transformGroup)
self.logYCheck.setObjectName('logYCheck')
self.gridLayout.addWidget(self.logYCheck, 2, 0, 1, 1)
self.logXCheck = QtWidgets.QCheckBox(self.transformGroup)
self.logXCheck.setObjectName('logXCheck')
self.gridLayout.addWidget(self.logXCheck, 1, 0, 1, 1)
self.fftCheck = QtWidgets.QCheckBox(self.transformGroup)
self.fftCheck.setObjectName('fftCheck')
self.gridLayout.addWidget(self.fftCheck, 0, 0, 1, 1)
self.derivativeCheck = QtWidgets.QCheckBox(self.transformGroup)
self.derivativeCheck.setObjectName('derivativeCheck')
self.gridLayout.addWidget(self.derivativeCheck, 3, 0, 1, 1)
self.phasemapCheck = QtWidgets.QCheckBox(self.transformGroup)
self.phasemapCheck.setObjectName('phasemapCheck')
self.gridLayout.addWidget(self.phasemapCheck, 4, 0, 1, 1)
self.pointsGroup = QtWidgets.QGroupBox(Form)
self.pointsGroup.setGeometry(QtCore.QRect(10, 550, 234, 58))
self.pointsGroup.setCheckable(True)
self.pointsGroup.setObjectName('pointsGroup')
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.pointsGroup)
self.verticalLayout_5.setObjectName('verticalLayout_5')
self.autoPointsCheck = QtWidgets.QCheckBox(self.pointsGroup)
self.autoPointsCheck.setChecked(True)
self.autoPointsCheck.setObjectName('autoPointsCheck')
self.verticalLayout_5.addWidget(self.autoPointsCheck)
self.gridGroup = QtWidgets.QFrame(Form)
self.gridGroup.setGeometry(QtCore.QRect(10, 460, 221, 81))
self.gridGroup.setObjectName('gridGroup')
self.gridLayout_2 = QtWidgets.QGridLayout(self.gridGroup)
self.gridLayout_2.setObjectName('gridLayout_2')
self.xGridCheck = QtWidgets.QCheckBox(self.gridGroup)
self.xGridCheck.setObjectName('xGridCheck')
self.gridLayout_2.addWidget(self.xGridCheck, 0, 0, 1, 2)
self.yGridCheck = QtWidgets.QCheckBox(self.gridGroup)
self.yGridCheck.setObjectName('yGridCheck')
self.gridLayout_2.addWidget(self.yGridCheck, 1, 0, 1, 2)
self.gridAlphaSlider = QtWidgets.QSlider(self.gridGroup)
self.gridAlphaSlider.setMaximum(255)
self.gridAlphaSlider.setProperty('value', 128)
self.gridAlphaSlider.setOrientation(QtCore.Qt.Orientation.Horizontal)
self.gridAlphaSlider.setObjectName('gridAlphaSlider')
self.gridLayout_2.addWidget(self.gridAlphaSlider, 2, 1, 1, 1)
self.label = QtWidgets.QLabel(self.gridGroup)
self.label.setObjectName('label')
self.gridLayout_2.addWidget(self.label, 2, 0, 1, 1)
self.alphaGroup = QtWidgets.QGroupBox(Form)
self.alphaGroup.setGeometry(QtCore.QRect(10, 390, 234, 60))
self.alphaGroup.setCheckable(True)
self.alphaGroup.setObjectName('alphaGroup')
self.horizontalLayout = QtWidgets.QHBoxLayout(self.alphaGroup)
self.horizontalLayout.setObjectName('horizontalLayout')
self.autoAlphaCheck = QtWidgets.QCheckBox(self.alphaGroup)
self.autoAlphaCheck.setChecked(False)
self.autoAlphaCheck.setObjectName('autoAlphaCheck')
self.horizontalLayout.addWidget(self.autoAlphaCheck)
self.alphaSlider = QtWidgets.QSlider(self.alphaGroup)
self.alphaSlider.setMaximum(1000)
self.alphaSlider.setProperty('value', 1000)
self.alphaSlider.setOrientation(QtCore.Qt.Orientation.Horizontal)
self.alphaSlider.setObjectName('alphaSlider')
self.horizontalLayout.addWidget(self.alphaSlider)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate('Form', 'PyQtGraph'))
self.averageGroup.setToolTip(_translate('Form', 'Display averages of the curves displayed in this plot. The parameter list allows you to choose parameters to average over (if any are available).'))
self.averageGroup.setTitle(_translate('Form', 'Average'))
self.clipToViewCheck.setToolTip(_translate('Form', 'Plot only the portion of each curve that is visible. This assumes X values are uniformly spaced.'))
self.clipToViewCheck.setText(_translate('Form', 'Clip to View'))
self.maxTracesCheck.setToolTip(_translate('Form', 'If multiple curves are displayed in this plot, check this box to limit the number of traces that are displayed.'))
self.maxTracesCheck.setText(_translate('Form', 'Max Traces:'))
self.downsampleCheck.setText(_translate('Form', 'Downsample'))
self.peakRadio.setToolTip(_translate('Form', 'Downsample by drawing a saw wave that follows the min and max of the original data. This method produces the best visual representation of the data but is slower.'))
self.peakRadio.setText(_translate('Form', 'Peak'))
self.maxTracesSpin.setToolTip(_translate('Form', 'If multiple curves are displayed in this plot, check "Max Traces" and set this value to limit the number of traces that are displayed.'))
self.forgetTracesCheck.setToolTip(_translate('Form', 'If MaxTraces is checked, remove curves from memory after they are hidden (saves memory, but traces can not be un-hidden).'))
self.forgetTracesCheck.setText(_translate('Form', 'Forget hidden traces'))
self.meanRadio.setToolTip(_translate('Form', 'Downsample by taking the mean of N samples.'))
self.meanRadio.setText(_translate('Form', 'Mean'))
self.subsampleRadio.setToolTip(_translate('Form', 'Downsample by taking the first of N samples. This method is fastest and least accurate.'))
self.subsampleRadio.setText(_translate('Form', 'Subsample'))
self.autoDownsampleCheck.setToolTip(_translate('Form', 'Automatically downsample data based on the visible range. This assumes X values are uniformly spaced.'))
self.autoDownsampleCheck.setText(_translate('Form', 'Auto'))
self.downsampleSpin.setToolTip(_translate('Form', 'Downsample data before plotting. (plot every Nth sample)'))
self.downsampleSpin.setSuffix(_translate('Form', 'x'))
self.logYCheck.setText(_translate('Form', 'Log Y'))
self.logXCheck.setText(_translate('Form', 'Log X'))
self.fftCheck.setText(_translate('Form', 'Power Spectrum (FFT)'))
self.derivativeCheck.setText(_translate('Form', 'dy/dx'))
self.phasemapCheck.setText(_translate('Form', "Y vs. Y'"))
self.pointsGroup.setTitle(_translate('Form', 'Points'))
self.autoPointsCheck.setText(_translate('Form', 'Auto'))
self.xGridCheck.setText(_translate('Form', 'Show X Grid'))
self.yGridCheck.setText(_translate('Form', 'Show Y Grid'))
self.label.setText(_translate('Form', 'Opacity'))
self.alphaGroup.setTitle(_translate('Form', 'Alpha'))
self.autoAlphaCheck.setText(_translate('Form', 'Auto')) |
def ql_syscall_writev(ql: Qiling, fd: int, vec: int, vlen: int):
regreturn = 0
size_t_len = ql.arch.pointersize
iov = ql.mem.read(vec, ((vlen * size_t_len) * 2))
ql.log.debug('writev() CONTENT:')
for i in range(vlen):
addr = ql.unpack(iov[((i * size_t_len) * 2):(((i * size_t_len) * 2) + size_t_len)])
l = ql.unpack(iov[(((i * size_t_len) * 2) + size_t_len):(((i * size_t_len) * 2) + (size_t_len * 2))])
regreturn += l
buf = ql.mem.read(addr, l)
ql.log.debug(f'{bytes(buf)}')
if hasattr(ql.os.fd[fd], 'write'):
ql.os.fd[fd].write(buf)
return regreturn |
class SmilesRnnSampler():
def __init__(self, device: str, batch_size=64) -> None:
self.device = device
self.batch_size = batch_size
self.sd = SelfiesCharDictionary()
def sample(self, model: SmilesRnn, num_to_sample: int, max_seq_len=100):
sampler = ActionSampler(max_batch_size=self.batch_size, max_seq_length=max_seq_len, device=self.device)
model.eval()
with torch.no_grad():
indices = sampler.sample(model, num_samples=num_to_sample)
selfies = self.sd.matrix_to_smiles(indices)
(smiles_list, selfies_list) = ([], [])
for s in selfies:
try:
ss = selfies2smiles(s)
except:
continue
smiles_list.append(ss)
selfies_list.append(s)
return (selfies_list, smiles_list) |
def _layer_dict(manifest_layer, index):
command = None
if manifest_layer.command:
try:
command = json.loads(manifest_layer.command)
except (TypeError, ValueError):
command = [manifest_layer.command]
return {'index': index, 'compressed_size': manifest_layer.compressed_size, 'is_remote': manifest_layer.is_remote, 'urls': manifest_layer.urls, 'command': command, 'comment': manifest_layer.comment, 'author': manifest_layer.author, 'blob_digest': str(manifest_layer.blob_digest), 'created_datetime': format_date(manifest_layer.created_datetime)} |
class EnvironmentAction(_ActionType):
def __init__(self, environment):
if (not (isinstance(environment, Environment) or isinstance(environment, CatalogReference))):
raise TypeError('environment input not of type Environment or CatalogReference')
self.environment = environment
def __eq__(self, other):
if isinstance(other, EnvironmentAction):
if (self.environment == other.environment):
return True
return False
def parse(element):
action_element = element.find('EnvironmentAction')
if (action_element.find('Environment') != None):
environment = Environment.parse(action_element.find('Environment'))
elif (action_element.find('CatalogReference') != None):
environment = CatalogReference.parse(action_element.find('CatalogReference'))
return EnvironmentAction(environment)
def get_element(self):
element = ET.Element('GlobalAction')
envaction = ET.SubElement(element, 'EnvironmentAction')
envaction.append(self.environment.get_element())
return element |
def test_duplicate_robot_creation(app):
with client_with_identity('devtable', app) as cl:
resp = conduct_api_call(cl, UserRobot, 'PUT', {'robot_shortname': 'dtrobot'}, expected_code=400)
assert (resp.json['error_message'] == 'Existing robot with name: devtable+dtrobot')
resp = conduct_api_call(cl, OrgRobot, 'PUT', {'orgname': 'buynlarge', 'robot_shortname': 'coolrobot'}, expected_code=400)
assert (resp.json['error_message'] == 'Existing robot with name: buynlarge+coolrobot') |
class Rectangularity():
def __init__(self, gdf, areas=None):
self.gdf = gdf
gdf = gdf.copy()
if (areas is None):
areas = gdf.geometry.area
if (not isinstance(areas, str)):
gdf['mm_a'] = areas
areas = 'mm_a'
self.areas = gdf[areas]
mrr = shapely.minimum_rotated_rectangle(gdf.geometry.array)
mrr_area = shapely.area(mrr)
self.series = (gdf[areas] / mrr_area) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--img-folder', type=str, required=True)
parser.add_argument('--prefix', type=str, required=True)
parser.add_argument('--out-path', type=str, required=True)
parser.add_argument('--start', type=int, default=None)
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts |
class W_InterposeStructBase(values_struct.W_RootStruct):
EMPTY_HANDLER_MAP = make_caching_map_type('get_storage_index', int).EMPTY
EMPTY_PROPERTY_MAP = make_map_type('get_storage_index', W_Object).EMPTY
_attrs_ = ['inner', 'base', 'map']
_immutable_fields_ = ['inner', 'base', 'map']
def __init__(self, inner, map):
self.inner = inner
self.map = map
if (isinstance(inner, W_InterposeStructBase) and (map is inner.map)):
self.base = inner.base
else:
self.base = inner
def get_storage_index(self, idx):
return self._get_list(idx)
def get_proxied(self):
return self.inner
def get_base(self):
return self.base
def is_proxy(self):
return True
def get_property(self, prop, default=None):
return self.map.lookup_property(prop, self, default=default)
def immutable(self):
return get_base_object(self.base).immutable()
def tostring(self):
return get_base_object(self.base).tostring()
def post_ref_cont(self, interp, app, env, cont):
raise NotImplementedError('abstract method')
def post_set_cont(self, op, field, val, app, env, cont):
raise NotImplementedError('abstract method')
def is_non_interposing_chaperone(self):
map = jit.promote(self.map)
return ((not has_accessor(map.handlers)) and has_property_descriptor(map.properties))
def replace_proxied(self, other):
storage = self._get_full_list()
return self.make(storage, other, self.map)
def struct_type(self):
return get_base_object(self.base).struct_type()
def get_handler_accessor(self, field):
idx = tag_handler_accessor(field)
return self.map.lookup_handler(idx, self)
def get_override_accessor(self, field):
idx = tag_override_accessor(field)
return self.map.lookup_handler(idx, self)
def get_handler_mutator(self, field):
idx = tag_handler_mutator(field)
return self.map.lookup_handler(idx, self)
def get_override_mutator(self, field):
idx = tag_override_mutator(field)
return self.map.lookup_handler(idx, self)
_loop(enter_above_depth(5), always_use_labels=False)
def ref_with_extra_info(self, field, app, env, cont):
handler = self.get_handler_accessor(field)
override = self.get_override_accessor(field)
if ((handler is None) and (override is None)):
return self.base.ref_with_extra_info(field, app, env, cont)
if (handler is not None):
cont = self.post_ref_cont(handler, app, env, cont)
if (override is not None):
return override.call_with_extra_info([self.inner], env, cont, app)
return self.inner.ref_with_extra_info(field, app, env, cont)
_loop(enter_above_depth(5), always_use_labels=False)
def set_with_extra_info(self, field, val, app, env, cont):
handler = self.get_handler_mutator(field)
override = self.get_override_mutator(field)
if ((handler is None) and (override is None)):
return self.base.set_with_extra_info(field, val, app, env, cont)
if (handler is None):
return self.inner.set_with_extra_info(field, val, app, env, cont)
after = self.post_set_cont(override, field, val, app, env, cont)
return handler.call_with_extra_info([self, val], env, after, app)
def get_prop(self, property, env, cont):
pair = self.get_property(property, default=NONE_PAIR)
assert (type(pair) is Pair)
(op, interp) = pair
if ((op is None) or (interp is None)):
return self.inner.get_prop(property, env, cont)
after = self.post_ref_cont(interp, None, env, cont)
return op.call([self.inner], env, after)
_loop(enter_above_depth(5), always_use_labels=False)
def get_struct_info(self, env, cont):
handler = self.map.lookup_handler(INFO_HANDLER_IDX, self)
if (handler is not None):
cont = call_cont(handler, env, cont)
return self.inner.get_struct_info(env, cont)
def get_arity(self, promote=False):
return get_base_object(self.base).get_arity(promote)
def vals(self):
base = get_base_object(self.base)
assert isinstance(base, values_struct.W_RootStruct)
return base.vals() |
def test_multiline_import_snippets(config, workspace):
document = 'from datetime import(\n date,\n datetime)\na=date'
doc = Document(DOC_URI, workspace, document)
config.capabilities['textDocument'] = {'completion': {'completionItem': {'snippetSupport': True}}}
config.update({'plugins': {'jedi_completion': {'include_params': True}}})
position = {'line': 1, 'character': 5}
completions = pylsp_jedi_completions(config, doc, position)
assert (completions[0]['insertText'] == 'date')
position = {'line': 2, 'character': 9}
completions = pylsp_jedi_completions(config, doc, position)
assert (completions[0]['insertText'] == 'datetime') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.