code stringlengths 281 23.7M |
|---|
def process_rule_tables(c, filenames, reporter):
start_time = time.time()
reporter.report('[Stage 3/7] Merging rule tables ...')
create_rule_table(c)
for (db_id, progress_str, filename) in enumerate_progress(filenames):
with transaction(c):
create_rule_map_table(c, db_id)
with attach_as_old(c, filename):
(num_rules,) = next(c.execute('SELECT count(*) FROM old.rule'))
with progress(reporter, f'[Stage 3/7] #rules: {num_rules} {progress_str}'):
process_rule_table(c, db_id)
with transaction(c):
(num_rules,) = next(c.execute('SELECT count(*) FROM rule'))
with progress(reporter, f'[Stage 3/7] Exporting {num_rules} rule records'):
export_rule_table(c)
clear_rule_table(c)
end_time = time.time()
reporter.report(f'[Stage 3/7] Merged {num_rules} rule records in {SECS(start_time, end_time)}.') |
def volume_weighted_average_price(prices_tms: PricesSeries, volumes_tms: QFSeries, interval: Timedelta) -> PricesSeries:
assert prices_tms.index.equals(volumes_tms.index)
last_date = prices_tms.index[(- 1)]
beginning_of_window = prices_tms.index[0]
end_of_window = (beginning_of_window + interval)
weighted_avg_price_tms = PricesSeries(name=prices_tms.name)
while (end_of_window < last_date):
prices_in_window = prices_tms.loc[beginning_of_window:end_of_window].drop([end_of_window]).values
volumes_in_window = volumes_tms.loc[beginning_of_window:end_of_window].drop([end_of_window]).values
if (prices_in_window.size == 0):
continue
if (count_nonzero(volumes_in_window) == 0):
weighted_avg_price = mean(prices_in_window)
else:
weighted_price_sum = prices_in_window.dot(volumes_in_window)
volume_sum = sum(volumes_in_window)
weighted_avg_price = (weighted_price_sum / volume_sum)
if (is_finite_number(weighted_avg_price) and (weighted_avg_price != 0)):
weighted_avg_price_tms[end_of_window] = weighted_avg_price
beginning_of_window = end_of_window
end_of_window = (end_of_window + interval)
return weighted_avg_price_tms |
class TAM(nn.Module):
def __init__(self, in_channels, n_segment, kernel_size=3, stride=1, padding=1):
super(TAM, self).__init__()
self.in_channels = in_channels
self.n_segment = n_segment
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
print('TAM with kernel_size {}.'.format(kernel_size))
self.G = nn.Sequential(nn.Linear(n_segment, (n_segment * 2), bias=False), nn.BatchNorm1d((n_segment * 2)), nn.ReLU(inplace=True), nn.Linear((n_segment * 2), kernel_size, bias=False), nn.Softmax((- 1)))
self.L = nn.Sequential(nn.Conv1d(in_channels, (in_channels // 4), kernel_size, stride=1, padding=(kernel_size // 2), bias=False), nn.BatchNorm1d((in_channels // 4)), nn.ReLU(inplace=True), nn.Conv1d((in_channels // 4), in_channels, 1, bias=False), nn.Sigmoid())
def forward(self, x):
(nt, c, h, w) = x.size()
t = self.n_segment
n_batch = (nt // t)
new_x = x.view(n_batch, t, c, h, w).permute(0, 2, 1, 3, 4).contiguous()
out = F.adaptive_avg_pool2d(new_x.view((n_batch * c), t, h, w), (1, 1))
out = out.view((- 1), t)
conv_kernel = self.G(out.view((- 1), t)).view((n_batch * c), 1, (- 1), 1)
local_activation = self.L(out.view(n_batch, c, t)).view(n_batch, c, t, 1, 1)
new_x = (new_x * local_activation)
out = F.conv2d(new_x.view(1, (n_batch * c), t, (h * w)), conv_kernel, bias=None, stride=(self.stride, 1), padding=(self.padding, 0), groups=(n_batch * c))
out = out.view(n_batch, c, t, h, w)
out = out.permute(0, 2, 1, 3, 4).contiguous().view(nt, c, h, w)
return out |
def _set_device(args):
device_type = args['device']
gpus = []
for device in device_type:
if (device_type == (- 1)):
device = torch.device('cpu')
else:
device = torch.device('cuda:{}'.format(device))
gpus.append(device)
args['device'] = gpus |
class SawyerDisassembleV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'gripper': obs[3], 'wrench_pos': obs[4:7], 'peg_pos': obs[(- 3):], 'unused_info': obs[7:(- 3)]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.0)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_wrench = (o_d['wrench_pos'] + np.array([(- 0.02), 0.0, 0.01]))
pos_peg = (o_d['peg_pos'] + np.array([0.12, 0.0, 0.14]))
if (np.linalg.norm((pos_curr[:2] - pos_wrench[:2])) > 0.02):
return (pos_wrench + np.array([0.0, 0.0, 0.1]))
elif (abs((pos_curr[2] - pos_wrench[2])) > 0.03):
return pos_wrench
else:
return (pos_curr + np.array([0.0, 0.0, 0.1]))
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_wrench = (o_d['wrench_pos'] + np.array([(- 0.02), 0.0, 0.01]))
if ((np.linalg.norm((pos_curr[:2] - pos_wrench[:2])) > 0.02) or (abs((pos_curr[2] - pos_wrench[2])) > 0.07)):
return 0.0
else:
return 0.8 |
class PrefetchDataLoader(DataLoader):
def __init__(self, num_prefetch_queue, **kwargs):
self.num_prefetch_queue = num_prefetch_queue
super(PrefetchDataLoader, self).__init__(**kwargs)
def __iter__(self):
return PrefetchGenerator(super().__iter__(), self.num_prefetch_queue) |
class _F0Gate(cirq.MatrixGate):
def __init__(self):
cirq.MatrixGate.__init__(self, np.array([[1, 0, 0, 0], [0, (- (2 ** (- 0.5))), (2 ** (- 0.5)), 0], [0, (2 ** (- 0.5)), (2 ** (- 0.5)), 0], [0, 0, 0, (- 1)]]), qid_shape=(2, 2))
def _circuit_diagram_info_(self, args: cirq.CircuitDiagramInfoArgs) -> cirq.CircuitDiagramInfo:
if args.use_unicode_characters:
symbols = ('F0', 'F0')
else:
symbols = ('F0', 'F0')
return cirq.CircuitDiagramInfo(wire_symbols=symbols) |
class TConfigCheckMenuItem(TestCase):
def setUp(self):
config.init()
def tearDown(self):
config.quit()
def test_toggle(self):
config.set('memory', 'bar', 'on')
c = ConfigCheckMenuItem('dummy', 'memory', 'bar')
c.set_active(True)
self.assertTrue((config.getboolean('memory', 'bar') and c.get_active()))
c.set_active(False)
run_gtk_loop()
self.assertFalse((config.getboolean('memory', 'bar') or c.get_active()))
def test_populate(self):
config.set('memory', 'bar', 'on')
c = ConfigCheckMenuItem('dummy', 'memory', 'bar', populate=True)
run_gtk_loop()
self.assertTrue(c.get_active())
config.set('memory', 'bar', 'off')
c = ConfigCheckMenuItem('dummy', 'memory', 'bar', populate=True)
run_gtk_loop()
self.assertFalse(c.get_active()) |
def _matmul_flop_jit(inputs: Tuple[torch.Tensor], outputs: Tuple[Any]) -> Number:
input_shapes = [v.shape for v in inputs]
assert (len(input_shapes) == 2), input_shapes
assert (input_shapes[0][(- 1)] == input_shapes[1][(- 2)]), input_shapes
flop = (inputs[0].numel() * input_shapes[(- 1)][(- 1)])
return flop |
class MLP():
def __init__(self, env_spec, hidden_sizes=(64, 64), min_log_std=(- 3), init_log_std=0, seed=None, device=torch.device('cpu')):
self.n = env_spec.observation_dim
self.m = env_spec.action_dim
self.min_log_std = min_log_std
self.device = device
if (seed is not None):
torch.manual_seed(seed)
np.random.seed(seed)
self.model = FCNetwork(self.n, self.m, hidden_sizes)
for param in list(self.model.parameters())[(- 2):]:
param.data = (0.01 * param.data)
self.log_std = Variable((torch.ones(self.m) * init_log_std), requires_grad=True)
self.trainable_params = (list(self.model.parameters()) + [self.log_std])
self.old_model = FCNetwork(self.n, self.m, hidden_sizes)
self.old_log_std = Variable((torch.ones(self.m) * init_log_std))
self.old_params = (list(self.old_model.parameters()) + [self.old_log_std])
for (idx, param) in enumerate(self.old_params):
param.data = self.trainable_params[idx].data.clone()
self.log_std_val = np.float64(self.log_std.data.numpy().ravel())
self.param_shapes = [p.cpu().data.numpy().shape for p in self.trainable_params]
self.param_sizes = [p.cpu().data.numpy().size for p in self.trainable_params]
self.d = np.sum(self.param_sizes)
self.obs_var = Variable(torch.randn(self.n), requires_grad=False)
self.to(self.device)
def seed(self, seed):
torch.manual_seed(seed)
np.random.seed(seed)
def to(self, device):
self.device = device
self.obs_var = self.obs_var.to(self.device)
self.log_std = self.log_std.to(self.device)
self.old_log_std = self.old_log_std.to(self.device)
self.model.to(self.device)
self.old_model.to(self.device)
self.trainable_params = (list(self.model.parameters()) + [self.log_std])
def get_param_values(self):
params = np.concatenate([p.contiguous().view((- 1)).cpu().data.numpy() for p in self.trainable_params])
return params.copy()
def set_param_values(self, new_params, set_new=True, set_old=True):
if set_new:
current_idx = 0
for (idx, param) in enumerate(self.trainable_params):
vals = new_params[current_idx:(current_idx + self.param_sizes[idx])]
vals = vals.reshape(self.param_shapes[idx])
param.data = torch.from_numpy(vals).to(self.device).float()
current_idx += self.param_sizes[idx]
self.trainable_params[(- 1)].data = torch.clamp(self.trainable_params[(- 1)], self.min_log_std).data
self.log_std_val = np.float64(self.log_std.cpu().data.numpy().ravel())
if set_old:
current_idx = 0
for (idx, param) in enumerate(self.old_params):
vals = new_params[current_idx:(current_idx + self.param_sizes[idx])]
vals = vals.reshape(self.param_shapes[idx])
param.data = torch.from_numpy(vals).to(self.device).float()
current_idx += self.param_sizes[idx]
self.old_params[(- 1)].data = torch.clamp(self.old_params[(- 1)], self.min_log_std).data
def get_action(self, observation):
o = np.float32(observation.reshape(1, (- 1)))
self.obs_var = torch.from_numpy(o).to(self.device)
mean = self.model(self.obs_var).cpu().data.numpy().ravel()
noise = (np.exp(self.log_std_val) * np.random.randn(self.m))
action = (mean + noise)
return [action, {'mean': mean, 'log_std': self.log_std_val, 'evaluation': mean}]
def mean_LL(self, observations, actions, model=None, log_std=None):
model = (self.model if (model is None) else model)
log_std = (self.log_std if (log_std is None) else log_std)
if (type(observations) is not torch.Tensor):
obs_var = Variable(torch.from_numpy(observations).float(), requires_grad=False)
else:
obs_var = observations
obs_var = obs_var.to(self.device)
if (type(actions) is not torch.Tensor):
act_var = Variable(torch.from_numpy(actions).float(), requires_grad=False)
else:
act_var = actions
act_var = act_var.to(self.device)
mean = model(obs_var)
zs = ((act_var - mean) / torch.exp(log_std))
LL = ((((- 0.5) * torch.sum((zs ** 2), dim=1)) + (- torch.sum(log_std))) + (((- 0.5) * self.m) * np.log((2 * np.pi))))
return (mean, LL)
def log_likelihood(self, observations, actions, model=None, log_std=None):
(mean, LL) = self.mean_LL(observations, actions, model, log_std)
return LL.data.numpy()
def old_dist_info(self, observations, actions):
(mean, LL) = self.mean_LL(observations, actions, self.old_model, self.old_log_std)
return [LL, mean, self.old_log_std]
def new_dist_info(self, observations, actions):
(mean, LL) = self.mean_LL(observations, actions, self.model, self.log_std)
return [LL, mean, self.log_std]
def likelihood_ratio(self, new_dist_info, old_dist_info):
LL_old = old_dist_info[0]
LL_new = new_dist_info[0]
LR = torch.exp((LL_new - LL_old))
return LR
def mean_kl(self, new_dist_info, old_dist_info):
old_log_std = old_dist_info[2]
new_log_std = new_dist_info[2]
old_std = torch.exp(old_log_std)
new_std = torch.exp(new_log_std)
old_mean = old_dist_info[1]
new_mean = new_dist_info[1]
Nr = ((((old_mean - new_mean) ** 2) + (old_std ** 2)) - (new_std ** 2))
Dr = ((2 * (new_std ** 2)) + 1e-08)
sample_kl = torch.sum((((Nr / Dr) + new_log_std) - old_log_std), dim=1)
return torch.mean(sample_kl) |
_fixtures(WebFixture)
def test_adding_chart_with_ajax(web_fixture):
class MyForm(Form):
def __init__(self, view):
self.choice = 1
super().__init__(view, 'my_form')
self.enable_refresh()
self.use_layout(FormLayout())
self.layout.add_input(SelectInput(self, self.fields.choice, refresh_widget=self))
if (self.choice == 2):
self.add_child(Chart(view, go.Figure(), 'thechart'))
fields = ExposedNames()
fields.choice = (lambda i: ChoiceField([Choice(1, IntegerField(label='Hide chart')), Choice(2, IntegerField(label='Show chart'))], label='Choice'))
wsgi_app = web_fixture.new_wsgi_app(child_factory=MyForm.factory(), enable_js=True)
web_fixture.reahl_server.set_app(wsgi_app)
browser = web_fixture.driver_browser
browser.open('/')
assert (not browser.is_element_present(chart))
assert (not browser.is_element_present(plotly_js))
select_input = XPath.select_labelled('Choice')
browser.select(select_input, 'Show chart')
assert browser.is_element_present(chart)
assert browser.is_element_present(plotly_js) |
def _load_paradigms(filename):
paradigms = []
with open(filename, 'rb') as f:
paradigms_count = struct.unpack(str('<H'), f.read(2))[0]
for x in range(paradigms_count):
paradigm_len = struct.unpack(str('<H'), f.read(2))[0]
para = array.array(str('H'))
para.fromfile(f, paradigm_len)
paradigms.append(para)
return paradigms |
def launch_experiments(variant_generator):
variants = variant_generator.variants()
for (i, variant) in enumerate(variants):
tag = 'finetune__'
print(variant['snapshot_filename'])
tag += variant['snapshot_filename'].split('/')[(- 2)]
tag += '____'
tag += '__'.join([('%s_%s' % (key, variant[key])) for key in TAG_KEYS])
log_dir = os.path.join(args.log_dir, tag)
variant['video_dir'] = os.path.join(log_dir, 'videos')
print('Launching {} experiments.'.format(len(variants)))
run_sac_experiment(run_experiment, mode=args.mode, variant=variant, exp_prefix=((variant['prefix'] + '/') + args.exp_name), exp_name=((((variant['prefix'] + '-') + args.exp_name) + '-') + str(i).zfill(2)), n_parallel=1, seed=variant['seed'], terminate_machine=True, log_dir=log_dir, snapshot_mode=variant['snapshot_mode'], snapshot_gap=variant['snapshot_gap'], sync_s3_pkl=variant['sync_pkl']) |
_transform('generic_image_transform')
class GenericImageTransform(ClassyTransform):
def __init__(self, transform: Optional[Callable]=None, split: Optional[str]=None):
assert ((split is None) or (transform is None)), 'If split is not None then transform must be None'
assert (split in [None, 'train', 'test']), "If specified, split should be either 'train' or 'test', instead got {}".format(split)
self._transform = transform
if (split is not None):
self._transform = (ImagenetAugmentTransform() if (split == 'train') else ImagenetNoAugmentTransform())
def from_config(cls, config: Dict[(str, Any)]):
transform = None
if ('transforms' in config):
transform = build_transforms(config['transforms'])
split = config.get('split')
return cls(transform, split)
def __call__(self, sample: Tuple[Any]):
image = sample[0]
transformed_image = (self._transform(image) if (self._transform is not None) else image)
new_sample = {'input': transformed_image, 'target': sample[1]}
if (len(sample) > 2):
for i in range(2, len(sample)):
new_sample[str(i)] = sample[i]
return new_sample |
class Binary():
file_path: Path
fw_path: Path
id: int = None
lib_names: list[str] = field(default_factory=list)
libs: list['Binary'] = field(default_factory=list)
imported_symbols: list[str] = field(default_factory=list)
imported_symbol_ids: list[int] = field(default_factory=list)
non_resolved_libs: list[str] = field(default_factory=list)
non_resolved_symbol_imports: list[str] = field(default_factory=list)
exported_function_ids: dict[(str, int)] = field(default_factory=dict)
version_requirement: dict[(str, list[str])] = field(default_factory=dict)
exported_symbol_ids: dict[(str, int)] = field(default_factory=dict)
def is_supported(p: Path) -> bool:
return (p.is_file() and (not p.is_symlink()) and (lief.is_elf(str(p)) or lief.is_pe(str(p))))
def __post_init__(self):
lief_obj: lief.Binary = lief.parse(str(self.file_path))
is_elf = isinstance(lief_obj, lief.ELF.Binary)
if is_elf:
if (self.name.startswith('libcrypto') and (len(lief_obj.exported_functions) == 0)):
lief_obj = lief.ELF.parse(str(self.file_path), lief.ELF.DYNSYM_COUNT_METHODS.HASH)
self.lib_names = lief_obj.libraries
if is_elf:
lief_obj: lief.ELF.Binary
self.imported_symbols = [s.name for s in lief_obj.imported_symbols]
for s in lief_obj.exported_symbols:
if s.is_function:
self.exported_function_ids[s.name] = None
else:
self.exported_symbol_ids[s.name] = None
for req in lief_obj.symbols_version_requirement:
for symb in req.get_auxiliary_symbols():
if (symb.name in self.version_requirement):
self.version_requirement[symb.name].append(req.name)
else:
self.version_requirement[symb.name] = [req.name]
else:
self.imported_symbols = [f.name for f in lief_obj.imported_functions]
for s in lief_obj.exported_functions:
self.exported_function_ids[s.name] = None
def name(self):
return self.file_path.name
def record_in_db(self, db: SourcetrailDB) -> None:
self.id = db.record_class(self.name, prefix=f'{self.fw_path.parent}/', delimiter=':')
for name in self.exported_symbol_ids.keys():
self.exported_symbol_ids[name] = db.record_symbol_node(name, parent_id=self.id)
for name in self.exported_function_ids.keys():
self.exported_function_ids[name] = db.record_method(name, parent_id=self.id) |
class SuperResDataset(BaseDataset):
def __init__(self, opt, training):
BaseDataset.__init__(self, opt, training)
self.dir = opt.dir
self.paths = file_utils.load_paths(self.dir)
def generate(self, cache=True, shuffle_buffer_size=1000):
dataset = tf.data.Dataset.from_tensor_slices(self.paths)
if self.training:
dataset = dataset.map(self._train_preprocess, num_parallel_calls=AUTOTUNE)
else:
dataset = dataset.map(self._test_preprocess, num_parallel_calls=AUTOTUNE)
if cache:
if isinstance(cache, str):
dataset = dataset.cache(cache)
else:
dataset = dataset.cache()
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.repeat()
dataset = dataset.batch(self.opt.batch_size)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def _train_preprocess(self, path):
img = tf.io.read_file(path)
img = tf.image.decode_jpeg(img, channels=self.opt.channels)
img = tf.image.random_crop(img, size=[self.opt.crop_size, self.opt.crop_size, self.opt.channels])
img = tf.cast(img, dtype=tf.float32)
highres = tf.image.random_flip_left_right(img)
lowres = tf.image.resize(highres, [self.opt.scale_size, self.opt.scale_size])
highres = ((highres / 127.5) - 1.0)
lowres = (lowres / 255.0)
return (highres, lowres)
def _test_preprocess(self, path):
img = tf.io.read_file(path)
img = tf.image.decode_jpeg(img, channels=self.opt.channels)
img = tf.cast(img, dtype=tf.float32)
lowres = (img / 255.0)
return lowres |
class SendCode():
async def send_code(self: 'pyrogram.Client', phone_number: str) -> 'types.SentCode':
phone_number = phone_number.strip(' +')
while True:
try:
r = (await self.invoke(raw.functions.auth.SendCode(phone_number=phone_number, api_id=self.api_id, api_hash=self.api_hash, settings=raw.types.CodeSettings())))
except (PhoneMigrate, NetworkMigrate) as e:
(await self.session.stop())
(await self.storage.dc_id(e.value))
(await self.storage.auth_key((await Auth(self, (await self.storage.dc_id()), (await self.storage.test_mode())).create())))
self.session = Session(self, (await self.storage.dc_id()), (await self.storage.auth_key()), (await self.storage.test_mode()))
(await self.session.start())
else:
return types.SentCode._parse(r) |
def encode_content(content, nRows, nCols, actions_dict):
encoded_content = np.zeros([nRows, nCols])
start = 0
s = 0
e = 0
for i in range(len(content)):
if (content[i] != content[start]):
frame_label = np.zeros(nCols)
frame_label[actions_dict[content[start]]] = 1
s = int((nRows * ((1.0 * start) / len(content))))
e = int((nRows * ((1.0 * i) / len(content))))
encoded_content[s:e] = frame_label
start = i
frame_label = np.zeros(nCols)
frame_label[actions_dict[content[start]]] = 1
encoded_content[e:] = frame_label
return encoded_content |
class Metric(object):
def __init__(self):
self.name = 'Metric'
def compare_mse(self, x1, x2):
err = np.sum(((x1 - x2) ** 2))
if (len(x1) == 4):
err /= float((((x1.shape[0] * x1.shape[1]) * x1.shape[2]) * x1.shape[3]))
else:
err /= float(((x1.shape[0] * x1.shape[1]) * x1.shape[2]))
return err |
def load_backbone_pretrained(model, backbone):
if ((cfg.PHASE == 'train') and cfg.TRAIN.BACKBONE_PRETRAINED and (not cfg.TRAIN.PRETRAINED_MODEL_PATH)):
if os.path.isfile(cfg.TRAIN.BACKBONE_PRETRAINED_PATH):
logging.info('Load backbone pretrained model from {}'.format(cfg.TRAIN.BACKBONE_PRETRAINED_PATH))
msg = model.load_state_dict(torch.load(cfg.TRAIN.BACKBONE_PRETRAINED_PATH), strict=False)
logging.info(msg)
elif (backbone not in model_urls):
logging.info('{} has no pretrained model'.format(backbone))
return
else:
logging.info('load backbone pretrained model from url..')
try:
msg = model.load_state_dict(model_zoo.load_url(model_urls[backbone]), strict=False)
except Exception as e:
logging.warning(e)
logging.info('Use torch download failed, try custom method!')
msg = model.load_state_dict(torch.load(download(model_urls[backbone], path=os.path.join(torch.hub._get_torch_home(), 'checkpoints'))), strict=False)
logging.info(msg) |
def test_get_build_requires_import():
expected = ['numpy >=1.16.0']
with cwd(osp.join(samples_dir, 'constructed_version')):
assert (buildapi.get_requires_for_build_wheel() == expected)
assert (buildapi.get_requires_for_build_editable() == expected)
assert (buildapi.get_requires_for_build_sdist() == expected) |
def main():
parser = HfArgumentParser(PyTorchBenchmarkArguments)
try:
benchmark_args = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
arg_error_msg = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
begin_error_msg = ' '.join(str(e).split(' ')[:(- 1)])
full_error_msg = ''
depreciated_args = eval(str(e).split(' ')[(- 1)])
wrong_args = []
for arg in depreciated_args:
if (arg[2:] in PyTorchBenchmarkArguments.deprecated_args):
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(arg)
if (len(wrong_args) > 0):
full_error_msg = ((full_error_msg + begin_error_msg) + str(wrong_args))
raise ValueError(full_error_msg)
benchmark = PyTorchBenchmark(args=benchmark_args)
benchmark.run() |
def test_recovering_indices(tmpdir, bace_fragmented):
for fragment in bace_fragmented.fragments:
for pair in fragment.bond_indices:
atom_indices = {a.atom_index for a in bace_fragmented.atoms if (a.map_index in pair)}
assert (len(atom_indices) == 2)
assert any((({b.atom1_index, b.atom2_index} == atom_indices) for b in bace_fragmented.bonds)) |
(frozen=True, slots=True)
class SystemClock(Clock):
offset: float = attr.ib(factory=(lambda : _r.uniform(10000, 200000)))
def start_clock(self) -> None:
pass
def current_time(self) -> float:
return (self.offset + perf_counter())
def deadline_to_sleep_time(self, deadline: float) -> float:
return (deadline - self.current_time()) |
(scope='module')
def inline_query_result_cached_photo():
return InlineQueryResultCachedPhoto(TestInlineQueryResultCachedPhotoBase.id_, TestInlineQueryResultCachedPhotoBase.photo_file_id, title=TestInlineQueryResultCachedPhotoBase.title, description=TestInlineQueryResultCachedPhotoBase.description, caption=TestInlineQueryResultCachedPhotoBase.caption, parse_mode=TestInlineQueryResultCachedPhotoBase.parse_mode, caption_entities=TestInlineQueryResultCachedPhotoBase.caption_entities, input_message_content=TestInlineQueryResultCachedPhotoBase.input_message_content, reply_markup=TestInlineQueryResultCachedPhotoBase.reply_markup) |
def benchmark(min_size=min(SIZES), max_size=max(SIZES), step_size=STEP, stars=STARS, noise=NOISE, seed=None, repeats=REPEATS, n_jobs=(- 1), comb_number=COMB_NUMBER):
grid = get_parameters(min_size=min_size, max_size=max_size, step_size=step_size, repeats=repeats, stars=stars, noise=noise, seed=seed, comb_number=comb_number)
with joblib.Parallel(n_jobs=n_jobs) as parallel:
results = parallel((joblib.delayed(_test)(**params) for params in tqdm.tqdm(grid)))
df = pd.DataFrame(results)
return df |
class PluginsList(ListView):
model = Plugin
queryset = Plugin.approved_objects.all()
title = _('All plugins')
additional_context = {}
paginate_by = settings.PAGINATION_DEFAULT_PAGINATION
def get_paginate_by(self, queryset):
try:
paginate_by = int(self.request.GET.get('per_page', self.paginate_by))
except ValueError:
paginate_by = self.paginate_by
return paginate_by
def get_filtered_queryset(self, qs):
return qs
def get_queryset(self):
qs = super(PluginsList, self).get_queryset()
qs = self.get_filtered_queryset(qs)
sort_by = self.request.GET.get('sort', None)
if sort_by:
if (sort_by[0] == '-'):
_sort_by = sort_by[1:]
else:
_sort_by = sort_by
try:
((_sort_by == 'average_vote') or (_sort_by == 'latest_version_date') or self.model._meta.get_field(_sort_by))
except FieldDoesNotExist:
return qs
qs = qs.order_by(sort_by)
elif (not qs.ordered):
qs = qs.order_by(Lower('name'))
return qs
def get_context_data(self, **kwargs):
context = super(PluginsList, self).get_context_data(**kwargs)
context.update({'title': self.title})
context.update(self.additional_context)
context['current_sort_query'] = self.get_sortstring()
context['current_querystring'] = self.get_querystring()
context['per_page_list'] = [20, 50, 75, 100]
try:
next_per_page_id = (context['per_page_list'].index(context['paginator'].per_page) + 1)
next_per_page = context['per_page_list'][next_per_page_id]
except (ValueError, IndexError):
next_per_page = (context['paginator'].count + 1)
context['show_more_items_number'] = next_per_page
return context
def get_sortstring(self):
if self.request.GET.get('sort', None):
return ('sort=%s' % self.request.GET.get('sort'))
return ''
def get_querystring(self):
to_remove = ['page', 'sort']
query_string = urlparse(self.request.get_full_path()).query
query_dict = parse_qs(query_string)
for arg in to_remove:
if (arg in query_dict):
del query_dict[arg]
clean_query_string = urlencode(query_dict, doseq=True)
return clean_query_string |
class BddQuantifierEliminator(QuantifierEliminator):
LOGICS = [pysmt.logics.BOOL]
def __init__(self, environment, logic=None):
QuantifierEliminator.__init__(self)
self.environment = environment
self.logic = logic
self.ddmanager = repycudd.DdManager()
self.converter = BddConverter(environment=environment, ddmanager=self.ddmanager)
def eliminate_quantifiers(self, formula):
logic = get_logic(formula, self.environment)
if (not (logic <= pysmt.logics.BOOL)):
raise NotImplementedError(('BDD-based quantifier elimination only supports pure-boolean formulae.(detected logic is: %s)' % str(logic)))
bdd = self.converter.convert(formula)
pysmt_res = self.converter.back(bdd)
return pysmt_res
def _exit(self):
del self.ddmanager |
def augment_triplet_from_asins(triplets: List[Tuple[(str, str, str)]], docs: List[List[str]], sample_per_asin=3):
negative_profile = defaultdict(list)
for doc in docs:
if (((len(doc) * (len(doc) - 1)) / 2) < sample_per_asin):
continue
pairs = utils.Rnd.random_pairs(doc, sample_per_asin)
for pair in pairs:
negative_profile[pair[0]].append(pair[1])
negative_profile[pair[1]].append(pair[0])
positive_profile = defaultdict(set)
for (anchor, pos, _) in triplets:
positive_profile[anchor].add(pos)
positive_profile[pos].add(anchor)
augmented_triplets = []
for (anchor, pos, _) in triplets:
if (anchor in negative_profile):
neg_asin = random.choice(negative_profile[anchor])
if (neg_asin not in positive_profile[anchor]):
augmented_triplets.append((anchor, pos, neg_asin))
if (pos in negative_profile):
neg_asin = random.choice(negative_profile[pos])
if (neg_asin not in positive_profile[pos]):
augmented_triplets.append((pos, anchor, neg_asin))
return augmented_triplets |
class Maker(AttributeDevice):
_state_property = 'switch_state'
_attributes: _Attributes
def _required_services(self) -> list[RequiredService]:
return (super()._required_services + [RequiredService(name='basicevent', actions=['SetBinaryState'])])
def set_state(self, state: int) -> None:
self.basicevent.SetBinaryState(BinaryState=int(state))
self.get_state(True)
def switch_state(self) -> int:
return self._attributes['Switch']
def sensor_state(self) -> int:
return self._attributes['Sensor']
def switch_mode(self) -> int:
return self._attributes['SwitchMode']
def has_sensor(self) -> int:
return self._attributes['SensorPresent'] |
def test_while_exec_iteration_stop_evals_false():
wd = WhileDecorator({'stop': '{stop}'})
context = Context({'stop': False})
mock = MagicMock()
assert (not wd.exec_iteration(2, context, mock))
assert (context['whileCounter'] == 2)
assert (wd.while_counter == 2)
assert (len(context) == 2)
mock.assert_called_once_with({'stop': False, 'whileCounter': 2}) |
.all_locales
def test_smoke_numbers(locale):
locale = Locale.parse(locale)
for number in NUMBERS:
assert numbers.format_decimal(number, locale=locale)
assert numbers.format_decimal(number, locale=locale, numbering_system='default')
assert numbers.format_currency(number, 'EUR', locale=locale)
assert numbers.format_currency(number, 'EUR', locale=locale, numbering_system='default')
assert numbers.format_scientific(number, locale=locale)
assert numbers.format_scientific(number, locale=locale, numbering_system='default')
assert numbers.format_percent((number / 100), locale=locale)
assert numbers.format_percent((number / 100), locale=locale, numbering_system='default') |
def continue2discrete_coordY(y):
if (y < 150):
return 100.0
elif (y < 260):
return 205.0
elif (y < 370):
return 315.0
elif (y < 480):
return 425.0
elif (y < 590):
return 535.0
elif (y < 700):
return 645.0
elif (y < 810):
return 755.0
else:
return 900.0 |
class InceptionI3d(nn.Module):
VALID_ENDPOINTS = ('Conv3d_1a_7x7', 'MaxPool3d_2a_3x3', 'Conv3d_2b_1x1', 'Conv3d_2c_3x3', 'MaxPool3d_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool3d_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool3d_5a_2x2', 'Mixed_5b', 'Mixed_5c', 'Logits', 'Predictions')
def __init__(self, num_classes=400, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d', in_channels=3, dropout_keep_prob=0.5):
if (final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % final_endpoint))
super(InceptionI3d, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self.logits = None
if (self._final_endpoint not in self.VALID_ENDPOINTS):
raise ValueError(('Unknown final endpoint %s' % self._final_endpoint))
self.end_points = {}
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7], stride=(2, 2, 2), padding=(3, 3, 3), name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1, name=(name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3b'
self.end_points[end_point] = InceptionModule(192, [64, 96, 128, 16, 32, 32], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_3c'
self.end_points[end_point] = InceptionModule(256, [128, 128, 192, 32, 96, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4b'
self.end_points[end_point] = InceptionModule((((128 + 192) + 96) + 64), [192, 96, 208, 16, 48, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4c'
self.end_points[end_point] = InceptionModule((((192 + 208) + 48) + 64), [160, 112, 224, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4d'
self.end_points[end_point] = InceptionModule((((160 + 224) + 64) + 64), [128, 128, 256, 24, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4e'
self.end_points[end_point] = InceptionModule((((128 + 256) + 64) + 64), [112, 144, 288, 32, 64, 64], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_4f'
self.end_points[end_point] = InceptionModule((((112 + 288) + 64) + 64), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(2, 2, 2), padding=0)
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5b'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [256, 160, 320, 32, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Mixed_5c'
self.end_points[end_point] = InceptionModule((((256 + 320) + 128) + 128), [384, 192, 384, 48, 128, 128], (name + end_point))
if (self._final_endpoint == end_point):
return
end_point = 'Logits'
self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7], stride=(1, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.logits = Unit3D(in_channels=(((384 + 384) + 128) + 128), output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits')
self.build()
def replace_logits(self, num_classes):
self._num_classes = num_classes
self.logits = Unit3D(in_channels=(((384 + 384) + 128) + 128), output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits')
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x):
for (num, end_point) in enumerate(self.VALID_ENDPOINTS):
if (end_point == 'MaxPool3d_5a_2x2'):
break
if (end_point in self.end_points):
x = self._modules[end_point](x)
return x
def extract_features(self, x):
for end_point in self.VALID_ENDPOINTS:
if (end_point in self.end_points):
x = self._modules[end_point](x)
return self.avg_pool(x) |
()
('-n', '--nsteps', default=(- 1), help='number of steps to process. use -1 to for no limit (will run workflow to completion)')
('--track/--no-track', default=False)
('-u', '--update-interval', default=1)
('-g', '--strategy', help='set execution stragegy')
('-y', '--strategyopt', help='strategy option', multiple=True, default=None)
_options
_options
def step(track, strategy, nsteps, update_interval, metadir, accept_metadir, controller, ctrlopt, modelsetup, modelopt, backend, local, strategyopt, verbosity):
handle_common_options(verbosity)
ys = handle_connection_options(metadir, accept_metadir, controller, ctrlopt, modelsetup, modelopt, backend, local)
strategyopts = utils.options_from_eqdelimstring(strategyopt)
execute_steering(ys, updateinterval=update_interval, default_trackers=track, strategy=strategy, strategyopts=strategyopts) |
def read_tb(path):
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, 'events.*'))
elif osp.basename(path).startswith('events.'):
fnames = [path]
else:
raise NotImplementedError(('Expected tensorboard file or directory containing them. Got %s' % path))
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if (summary.step > 0):
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx, tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[((step - 1), colidx)] = value
return pandas.DataFrame(data, columns=tags) |
def save_file(data, filename):
logging.info(f'Saving data to file: {filename}')
file_ext = os.path.splitext(filename)[1]
if (file_ext in ['.pkl', '.pickle']):
with PathManager.open(filename, 'wb') as fopen:
pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL)
elif (file_ext == '.npy'):
with PathManager.open(filename, 'wb') as fopen:
np.save(fopen, data)
elif (file_ext == '.json'):
with PathManager.open(filename, 'a') as fopen:
fopen.write((json.dumps(data, sort_keys=True) + '\n'))
fopen.flush()
else:
raise Exception(f'Saving {file_ext} is not supported yet')
logging.info(f'Saved data to file: {filename}') |
class BSDMountPoint(MountPoint):
def _iter_mountpoints(cls):
check_output = cls(None).check_output
for line in check_output('mount -p').splitlines():
splitted = line.split()
(yield {'path': splitted[1], 'device': splitted[0], 'filesystem': splitted[2], 'options': splitted[3].split(',')}) |
class Records(Base):
def _backfill_fields(self, fields: Optional[List[str]], forms: Optional[List[str]]):
if (forms and (not fields)):
return ([f'{form}_complete' for form in forms] + [self.def_field])
if (fields and (self.def_field not in fields)):
return (fields + [self.def_field])
if (not fields):
return (self.field_names + [f'{form}_complete' for form in self.forms])
return fields
def export_records(self, format_type: Literal[('json', 'csv', 'xml', 'df')]='json', records: Optional[List[str]]=None, fields: Optional[Union[(List[str], str)]]=None, forms: Optional[Union[(List[str], str)]]=None, events: Optional[List[str]]=None, raw_or_label: Literal[('raw', 'label', 'both')]='raw', raw_or_label_headers: Literal[('raw', 'label')]='raw', event_name: Literal[('label', 'unique')]='label', record_type: Literal[('flat', 'eav')]='flat', export_survey_fields: bool=False, export_data_access_groups: bool=False, export_checkbox_labels: bool=False, filter_logic: Optional[str]=None, date_begin: Optional[datetime]=None, date_end: Optional[datetime]=None, decimal_character: Optional[Literal[(',', '.')]]=None, export_blank_for_gray_form_status: Optional[bool]=None, df_kwargs: Optional[Dict[(str, Any)]]=None):
payload: Dict[(str, Any)] = self._initialize_payload(content='record', format_type=format_type, record_type=record_type)
if isinstance(fields, str):
fields = [fields]
if isinstance(forms, str):
forms = [forms]
fields = self._backfill_fields(fields, forms)
keys_to_add = (records, fields, forms, events, raw_or_label, raw_or_label_headers, event_name, export_survey_fields, export_data_access_groups, export_checkbox_labels, filter_logic, decimal_character, export_blank_for_gray_form_status)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel', 'rawOrLabelHeaders', 'eventName', 'exportSurveyFields', 'exportDataAccessGroups', 'exportCheckboxLabel', 'filterLogic', 'decimalCharacter', 'exportBlankForGrayFormStatus')
for (key, data) in zip(str_keys, keys_to_add):
if data:
if (key in ('fields', 'records', 'forms', 'events')):
data = cast(List[str], data)
for (i, value) in enumerate(data):
payload[f'{key}[{i}]'] = value
else:
payload[key] = data
if date_begin:
payload['dateRangeBegin'] = date_begin.strftime('%Y-%m-%d %H:%M:%S')
if date_end:
payload['dateRangeEnd'] = date_end.strftime('%Y-%m-%d %H:%M:%S')
return_type = self._lookup_return_type(format_type, request_type='export')
response = cast(Union[(Json, str)], self._call_api(payload, return_type))
return self._return_data(response=response, content='record', format_type=format_type, df_kwargs=df_kwargs, record_type=record_type)
def import_records(self, to_import: Union[(str, List[Dict[(str, Any)]], 'pd.DataFrame')], return_format_type: Literal[('json', 'csv', 'xml')]='json', return_content: Literal[('count', 'ids', 'auto_ids', 'nothing')]='count', overwrite: Literal[('normal', 'overwrite')]='normal', import_format: Literal[('json', 'csv', 'xml', 'df')]='json', date_format: Literal[('YMD', 'DMY', 'MDY')]='YMD', force_auto_number: bool=False):
payload = self._initialize_import_payload(to_import=to_import, import_format=import_format, return_format_type=return_format_type, content='record')
payload['overwriteBehavior'] = overwrite
payload['returnContent'] = return_content
payload['dateFormat'] = date_format
payload['forceAutoNumber'] = force_auto_number
return_type = self._lookup_return_type(format_type=return_format_type, request_type='import', import_records_format=return_content)
response = cast(Union[(Json, str)], self._call_api(payload, return_type))
return response
def delete_records(self, records: List[str], return_format_type: Literal[('json', 'csv', 'xml')]='json'):
payload = self._initialize_payload(content='record', return_format_type=return_format_type)
payload['action'] = 'delete'
records_dict = {f'records[{idx}]': record for (idx, record) in enumerate(records)}
payload.update(records_dict)
return_type = self._lookup_return_type(format_type=return_format_type, request_type='delete')
response = cast(Union[(Json, str)], self._call_api(payload, return_type))
return response
def generate_next_record_name(self) -> str:
payload = self._initialize_payload(content='generateNextRecordName', format_type='csv')
return cast(str, self._call_api(payload, return_type='str')) |
def _populate_kernel_cache(np_type, k_type):
if (np_type not in _SUPPORTED_TYPES):
raise ValueError("Datatype {} not found for '{}'".format(np_type, k_type))
if ((str(np_type), k_type) in _cupy_kernel_cache):
return
_cupy_kernel_cache[(str(np_type), k_type)] = _get_function('/peak_finding/_peak_finding.fatbin', ((('_cupy_' + k_type) + '_') + str(np_type))) |
class AsType(InDataMutatingTransform):
def __init__(self, indices, dtypes):
assert (len(indices) == len(dtypes))
self._indices = indices
self._dtypes = dtypes
def transform(self, in_data):
for (index, dtype) in zip(self._indices, self._dtypes):
in_data[index] = in_data[index].astype(dtype) |
('os.execvpe', new=execvpe_mock)
def test_run_script_by_absolute_name(caplog, pipx_temp_env, tmp_path):
script = (tmp_path / 'test.py')
out = (tmp_path / 'output.txt')
test_str = 'Hello, world!'
script.write_text(textwrap.dedent(f'''
from pathlib import Path
Path({repr(str(out))}).write_text({repr(test_str)})
''').strip())
run_pipx_cli_exit(['run', '--path', str(script)])
assert (out.read_text() == test_str) |
def check_for_ccdc_structures(cid):
url0 = '
cid = str(cid)
url = ((url0 + cid) + '/JSON')
csd_codes = []
try:
response = urllib.request.urlopen(url)
data = json.loads(response.read())
if (len(data['Record']['Section'][0]['Section']) == 3):
infos = data['Record']['Section'][0]['Section'][2]['Section'][0]['Information']
for info in infos:
csd_codes.append(info['Value']['StringWithMarkup'][0]['String'])
except:
print('Fail to parse the following url', url)
return csd_codes |
def load_file(filename, onehot=True):
ID1 = []
ID2 = []
D1 = []
D2 = []
L = []
with open(filename, 'r', encoding='utf-8') as read:
for (i, line) in enumerate(read):
if (not (len(line.split('\t')) == 5)):
print(line.split('\t'))
(id1, id2, d1, d2, label) = line.rstrip().split('\t')
ID1.append(id1)
ID2.append(id2)
D1.append(d1)
D2.append(d2)
if ('s_' in filename):
if (float(label) >= 4):
label = 1
elif (float(label) < 4):
label = 0
else:
ValueError()
L.append(int(label))
L = np.array(L)
if onehot:
classes = (L.shape[1] + 1)
L = get_onehot_encoding(L)
print('Encoding labels as one hot vector.')
return (ID1, ID2, D1, D2, L) |
def test_reading_and_writing_repository():
repository_state_dir = temp_dir()
(PackageIndex)
class RepositoryStub():
def unique_id(self):
return 'myid'
def transfer(self, package):
pass
repository_state_directory = repository_state_dir.name
repository = RepositoryStub()
expected_repository_state_file = os.path.join(repository_state_dir.name, ('%s.uploaded' % repository.unique_id))
local_state = RepositoryLocalState(repository)
assert (not os.path.exists(expected_repository_state_file))
local_state.read()
assert (local_state.uploaded_project_ids == set([]))
assert (not os.path.exists(expected_repository_state_file))
local_state.uploaded_project_ids = {'someid1', 'someid2'}
local_state.write()
assert os.path.isfile(expected_repository_state_file)
local_state.uploaded_project_ids = set([])
local_state.read()
assert (local_state.uploaded_project_ids == {'someid1', 'someid2'}) |
class BatchedFusedEmbedding(BaseBatchedEmbedding[torch.Tensor], FusedOptimizerModule):
def __init__(self, config: GroupedEmbeddingConfig, pg: Optional[dist.ProcessGroup]=None, device: Optional[torch.device]=None) -> None:
super().__init__(config, pg, device)
managed: List[EmbeddingLocation] = []
compute_devices: List[ComputeDevice] = []
for table in config.embedding_tables:
if ((device is not None) and (device.type == 'cuda')):
compute_devices.append(ComputeDevice.CUDA)
managed.append(compute_kernel_to_embedding_location(table.compute_kernel))
elif ((device is not None) and (device.type == 'mtia')):
compute_devices.append(ComputeDevice.MTIA)
managed.append(EmbeddingLocation.HOST)
else:
compute_devices.append(ComputeDevice.CPU)
managed.append(EmbeddingLocation.HOST)
weights_precision = data_type_to_sparse_type(config.data_type)
fused_params = (config.fused_params or {})
if ('cache_precision' not in fused_params):
fused_params['cache_precision'] = weights_precision
self._emb_module: SplitTableBatchedEmbeddingBagsCodegen = SplitTableBatchedEmbeddingBagsCodegen(embedding_specs=list(zip(self._local_rows, self._local_cols, managed, compute_devices)), feature_table_map=self._feature_table_map, pooling_mode=PoolingMode.NONE, weights_precision=weights_precision, device=device, **fused_params)
self._optim: EmbeddingFusedOptimizer = EmbeddingFusedOptimizer(config, self._emb_module, pg)
self._param_per_table: Dict[(str, TableBatchedEmbeddingSlice)] = dict(_gen_named_parameters_by_table_fused(emb_module=self._emb_module, table_name_to_count=self.table_name_to_count.copy(), config=self._config, pg=pg))
self.init_parameters()
def emb_module(self) -> SplitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def fused_optimizer(self) -> FusedOptimizer:
return self._optim
def named_buffers(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[Tuple[(str, torch.Tensor)]]:
(yield from ())
def named_parameters(self, prefix: str='', recurse: bool=True, remove_duplicate: bool=True) -> Iterator[Tuple[(str, nn.Parameter)]]:
for (name, tensor) in self.named_split_embedding_weights(prefix, recurse, remove_duplicate):
param = nn.Parameter(tensor)
param._in_backward_optimizers = [EmptyFusedOptimizer()]
(yield (name, param))
def flush(self) -> None:
self._emb_module.flush() |
.parametrize('text, expected', [('foo|bar', 'fo|bar'), ('foobar|', 'fooba|'), ('|foobar', '|foobar'), ('f<oo>bar', 'f|bar')])
def test_rl_backward_delete_char(text, expected, lineedit):
lineedit.set_aug_text(text)
readlinecommands.rl_backward_delete_char()
assert (lineedit.aug_text() == expected) |
def test_reveal(hatch, config_file, helpers, default_cache_dir, default_data_dir):
config_file.model.project = 'foo'
config_file.model.publish['index']['auth'] = 'bar'
config_file.save()
result = hatch('config', 'show', '-a')
default_cache_directory = str(default_cache_dir).replace('\\', '\\\\')
default_data_directory = str(default_data_dir).replace('\\', '\\\\')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
mode = "local"
project = "foo"
shell = ""
[dirs]
project = []
python = "isolated"
data = "{default_data_directory}"
cache = "{default_cache_directory}"
[dirs.env]
[projects]
[publish.index]
repo = "main"
auth = "bar"
[template]
name = "Foo Bar"
email = ""
[template.licenses]
headers = true
default = [
"MIT",
]
[template.plugins.default]
tests = true
ci = false
src-layout = true
[terminal.styles]
info = "bold"
success = "bold cyan"
error = "bold red"
warning = "bold yellow"
waiting = "bold magenta"
debug = "bold"
spinner = "simpleDotsScrolling"
''')) |
class AlreadyBuiltWheelError(Exception):
def __init__(self, wheel_name: str) -> None:
message = textwrap.dedent(f'''
cibuildwheel: Build failed because a wheel named {wheel_name} was already generated in the current run.
If you expected another wheel to be generated, check your project configuration, or run
cibuildwheel with CIBW_BUILD_VERBOSITY=1 to view build logs.
''')
super().__init__(message) |
def decode_spans(start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray) -> Tuple:
if (start.ndim == 1):
start = start[None]
if (end.ndim == 1):
end = end[None]
outer = np.matmul(np.expand_dims(start, (- 1)), np.expand_dims(end, 1))
candidates = np.tril(np.triu(outer), (max_answer_len - 1))
scores_flat = candidates.flatten()
if (topk == 1):
idx_sort = [np.argmax(scores_flat)]
elif (len(scores_flat) < topk):
idx_sort = np.argsort((- scores_flat))
else:
idx = np.argpartition((- scores_flat), topk)[0:topk]
idx_sort = idx[np.argsort((- scores_flat[idx]))]
(starts, ends) = np.unravel_index(idx_sort, candidates.shape)[1:]
desired_spans = (np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()))
starts = starts[desired_spans]
ends = ends[desired_spans]
scores = candidates[(0, starts, ends)]
return (starts, ends, scores) |
def test_connection_list_tables():
with patch(PATCH_METHOD) as req:
req.return_value = LIST_TABLE_DATA
conn = Connection(REGION)
conn.list_tables(exclusive_start_table_name='Thread')
assert (req.call_args[0][1] == {'ExclusiveStartTableName': 'Thread'})
with patch(PATCH_METHOD) as req:
req.return_value = LIST_TABLE_DATA
conn = Connection(REGION)
conn.list_tables(limit=3)
assert (req.call_args[0][1] == {'Limit': 3})
with patch(PATCH_METHOD) as req:
req.return_value = LIST_TABLE_DATA
conn = Connection(REGION)
conn.list_tables()
assert (req.call_args[0][1] == {})
with patch(PATCH_METHOD) as req:
req.side_effect = BotoCoreError
conn = Connection(REGION)
with pytest.raises(TableError):
conn.list_tables() |
class Bottleneck(nn.Module):
def __init__(self, in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation, norm_func=BatchNorm2d, dcn_config={}):
super(Bottleneck, self).__init__()
self.downsample = None
if (in_channels != out_channels):
down_stride = (stride if (dilation == 1) else 1)
self.downsample = nn.Sequential(Conv2d(in_channels, out_channels, kernel_size=1, stride=down_stride, bias=False), norm_func(out_channels))
for modules in [self.downsample]:
for l in modules.modules():
if isinstance(l, Conv2d):
nn.init.kaiming_uniform_(l.weight, a=1)
if (dilation > 1):
stride = 1
(stride_1x1, stride_3x3) = ((stride, 1) if stride_in_1x1 else (1, stride))
self.conv1 = Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False)
self.bn1 = norm_func(bottleneck_channels)
with_dcn = dcn_config.get('stage_with_dcn', False)
if with_dcn:
deformable_groups = dcn_config.get('deformable_groups', 1)
with_modulated_dcn = dcn_config.get('with_modulated_dcn', False)
self.conv2 = DFConv2d(bottleneck_channels, bottleneck_channels, with_modulated_dcn=with_modulated_dcn, kernel_size=3, stride=stride_3x3, groups=num_groups, dilation=dilation, deformable_groups=deformable_groups, bias=False)
else:
self.conv2 = Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=dilation, bias=False, groups=num_groups, dilation=dilation)
nn.init.kaiming_uniform_(self.conv2.weight, a=1)
self.bn2 = norm_func(bottleneck_channels)
self.conv3 = Conv2d(bottleneck_channels, out_channels, kernel_size=1, bias=False)
self.bn3 = norm_func(out_channels)
for l in [self.conv1, self.conv3]:
nn.init.kaiming_uniform_(l.weight, a=1)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = F.relu_(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu_(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = F.relu_(out)
return out |
class MaxPool3x3Conv1x1(BaseOp):
def build(self, inputs, channels):
with tf.variable_scope('MaxPool3x3-Conv1x1'):
net = tf.layers.max_pooling2d(inputs=inputs, pool_size=(3, 3), strides=(1, 1), padding='same', data_format=self.data_format)
net = conv_bn_relu(net, 1, channels, self.is_training, self.data_format)
return net |
def initialize_model(train, input_vocab, output_vocab, max_len=10, hidden_size=256, dropout_p=0.5, bidirectional=True, n_beam=5):
encoder = EncoderRNN(len(input_vocab), max_len, hidden_size, bidirectional=bidirectional, variable_lengths=True)
decoder = DecoderRNN(len(output_vocab), max_len, (hidden_size * (2 if bidirectional else 1)), dropout_p=dropout_p, use_attention=True, bidirectional=bidirectional, eos_id=train.tgt_field.eos_id, sos_id=train.tgt_field.sos_id)
seq2seq = Seq2seq(encoder, decoder)
if False:
seq2seq = seq2seq.cuda()
for param in seq2seq.parameters():
param.data.uniform_((- 0.08), 0.08)
optimizer = Optimizer(torch.optim.Adam(seq2seq.parameters()), max_grad_norm=5)
scheduler = StepLR(optimizer.optimizer, 1)
optimizer.set_scheduler(scheduler)
return (seq2seq, optimizer, scheduler) |
def validate_list_of_str(name, data):
if (name in data):
if (not isinstance(data[name], list)):
raise DistutilsSetupError(('"%s" should be a list' % name))
elif (not all([isinstance(i, str) for i in data[name]])):
raise DistutilsSetupError(('"%s" should be a list of strings' % name)) |
def test_unused_udp_port_factory_duplicate(unused_udp_port_factory, monkeypatch):
counter = 0
def mock_unused_udp_port(_ignored):
nonlocal counter
counter += 1
if (counter < 5):
return 10000
else:
return (10000 + counter)
monkeypatch.setattr(pytest_asyncio.plugin, '_unused_port', mock_unused_udp_port)
assert (unused_udp_port_factory() == 10000)
assert (unused_udp_port_factory() > 10000) |
_funcify.register(ptr.BernoulliRV)
def numba_funcify_BernoulliRV(op, node, **kwargs):
out_dtype = node.outputs[1].type.numpy_dtype
def body_fn(a):
return f'''
if {a} < np.random.uniform(0, 1):
return direct_cast(0, out_dtype)
else:
return direct_cast(1, out_dtype)
'''
return create_numba_random_fn(op, node, body_fn, {'out_dtype': out_dtype, 'direct_cast': numba_basic.direct_cast}) |
def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0):
if hasattr(model, 'module'):
model = model.module
model.show_result(img, result, score_thr=score_thr, show=True, wait_time=wait_time, win_name=title, bbox_color=(72, 101, 241), text_color=(72, 101, 241)) |
def _fuzzy_contiguity(geoms, ids, tolerance=None, buffer=None, predicate='intersects'):
if ((buffer is not None) and (tolerance is not None)):
raise ValueError('Only one of `tolerance` and `buffer` can be speciifed, not both.')
if (not isinstance(geoms, geopandas.base.GeoPandasBase)):
geoms = geopandas.GeoSeries(geoms, index=ids)
if (tolerance is not None):
(minx, miny, maxx, maxy) = geoms.total_bounds
buffer = ((tolerance * 0.5) * abs(min((maxx - minx), (maxy - miny))))
if (buffer is not None):
geoms = geoms.buffer(buffer)
if GPD_013:
(head, tail) = geoms.sindex.query(geoms.geometry, predicate=predicate)
else:
(head, tail) = geoms.sindex.query_bulk(geoms.geometry, predicate=predicate)
itself = (head == tail)
heads = ids[head[(~ itself)]]
tails = ids[tail[(~ itself)]]
weights = numpy.ones_like(heads, dtype=int)
return _resolve_islands(heads, tails, ids.values, weights=weights) |
def test_etuples():
x_pt = pt.vector('x')
y_pt = pt.vector('y')
z_pt = etuple(x_pt, y_pt)
res = apply(pt.add, z_pt)
assert (res.owner.op == pt.add)
assert (res.owner.inputs == [x_pt, y_pt])
w_pt = etuple(pt.add, x_pt, y_pt)
res = w_pt.evaled_obj
assert (res.owner.op == pt.add)
assert (res.owner.inputs == [x_pt, y_pt])
op1_np = CustomOpNoProps(1)
res = apply(op1_np, z_pt)
assert (res.owner.op == op1_np)
q_pt = op1_np(x_pt, y_pt)
res = etuplize(q_pt)
assert (res[0] == op1_np)
with pytest.raises(TypeError):
etuplize(op1_np)
class MyMultiOutOp(Op):
def make_node(self, *inputs):
outputs = [MyType()(), MyType()()]
return Apply(self, list(inputs), outputs)
def perform(self, node, inputs, outputs):
outputs[0] = np.array(inputs[0])
outputs[1] = np.array(inputs[0])
x_pt = pt.vector('x')
op1_np = MyMultiOutOp()
res = apply(op1_np, etuple(x_pt))
assert (len(res) == 2)
assert (res[0].owner.op == op1_np)
assert (res[1].owner.op == op1_np) |
def test_central_subprocess(testdir):
scripts = testdir.makepyfile(parent_script=SCRIPT_PARENT, child_script=SCRIPT_CHILD)
parent_script = scripts.dirpath().join('parent_script.py')
result = testdir.runpytest('-v', f'--cov={scripts.dirpath()}', '--cov-report=term-missing', parent_script)
result.stdout.fnmatch_lines(['*- coverage: platform *, python * -*', f'child_script* {CHILD_SCRIPT_RESULT}*', f'parent_script* {PARENT_SCRIPT_RESULT}*'])
assert (result.ret == 0) |
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.0])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert (outputs.dtype == dst_type)
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.0])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(tensor_a=torch.FloatTensor([1.0]), tensor_b=torch.FloatTensor([2.0]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert (outputs['tensor_a'].dtype == dst_type)
assert (outputs['tensor_b'].dtype == dst_type)
inputs = [torch.FloatTensor([1.0]), torch.FloatTensor([2.0])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert (outputs[0].dtype == dst_type)
assert (outputs[1].dtype == dst_type)
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int) |
(qseis.have_backend(), 'backend qseis not available')
class GFQSeisTestCase(unittest.TestCase):
tempdirs = []
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
def tearDownClass(cls):
for d in cls.tempdirs:
shutil.rmtree(d)
def test_pyrocko_gf_vs_qseis(self):
random.seed(2017)
mod = cake.LayeredModel.from_scanlines(cake.read_nd_model_str('\n 0. 5.8 3.46 2.6 1264. 600.\n 20. 5.8 3.46 2.6 1264. 600.\n 20. 6.5 3.85 2.9 1283. 600.\n 35. 6.5 3.85 2.9 1283. 600.\nmantle\n 35. 8.04 4.48 3.58 1449. 600.\n 77.5 8.045 4.49 3.5 1445. 600.\n 77.5 8.045 4.49 3.5 180.6 75.\n 120. 8.05 4.5 3.427 180. 75.\n 120. 8.05 4.5 3.427 182.6 76.06\n 165. 8.175 4.509 3.371 188.7 76.55\n 210. 8.301 4.518 3.324 201. 79.4\n 210. 8.3 4.52 3.321 336.9 133.3\n 410. 9.03 4.871 3.504 376.5 146.1\n 410. 9.36 5.08 3.929 414.1 162.7\n 660. 10.2 5.611 3.918 428.5 172.9\n 660. 10.79 5.965 4.229 1349. 549.6'.lstrip()))
store_dir = mkdtemp(prefix='gfstore')
self.tempdirs.append(store_dir)
qsconf = qseis.QSeisConfig()
qsconf.qseis_version = '2006b'
qsconf.time_region = (gf.meta.Timing('0'), gf.meta.Timing('end+100'))
qsconf.cut = (gf.meta.Timing('0'), gf.meta.Timing('end+100'))
qsconf.wavelet_duration_samples = 0.001
qsconf.sw_flat_earth_transform = 0
config = gf.meta.ConfigTypeA(id='qseis_test', sample_rate=0.25, receiver_depth=(0.0 * km), source_depth_min=(10 * km), source_depth_max=(10 * km), source_depth_delta=(1 * km), distance_min=(550 * km), distance_max=(560 * km), distance_delta=(1 * km), modelling_code_id='qseis.2006b', earthmodel_1d=mod, tabulated_phases=[gf.meta.TPDef(id='begin', definition='p,P,p\\,P\\'), gf.meta.TPDef(id='end', definition='2.5')])
config.validate()
gf.store.Store.create_editables(store_dir, config=config, extra={'qseis': qsconf})
store = gf.store.Store(store_dir, 'r')
store.make_travel_time_tables()
store.close()
try:
qseis.build(store_dir, nworkers=1)
except qseis.QSeisError as e:
if (str(e).find('could not start qseis') != (- 1)):
logger.warning('qseis not installed; skipping test_pyrocko_gf_vs_qseis')
return
else:
raise
source = gf.MTSource(lat=0.0, lon=0.0, depth=(10.0 * km))
source.m6 = tuple((((random.random() * 2.0) - 1.0) for x in range(6)))
azi = (random.random() * 365.0)
dist = (553.0 * km)
dnorth = (dist * math.cos((azi * d2r)))
deast = (dist * math.sin((azi * d2r)))
targets = []
for cha in 'rtz':
target = gf.Target(quantity='displacement', codes=('', '0000', 'PG', cha), north_shift=dnorth, east_shift=deast, depth=config.receiver_depth, store_id='qseis_test')
dist = source.distance_to(target)
(azi, bazi) = source.azibazi_to(target)
if (cha == 'r'):
target.azimuth = (bazi + 180.0)
target.dip = 0.0
elif (cha == 't'):
target.azimuth = (bazi - 90.0)
target.dip = 0.0
elif (cha == 'z'):
target.azimuth = 0.0
target.dip = 90.0
targets.append(target)
runner = qseis.QSeisRunner()
conf = qseis.QSeisConfigFull()
conf.qseis_version = '2006b'
conf.receiver_distances = [(dist / km)]
conf.receiver_azimuths = [azi]
conf.source_depth = (source.depth / km)
conf.time_start = 0.0
conf.time_window = 508.0
conf.time_reduction_velocity = 0.0
conf.nsamples = 128
conf.source_mech = qseis.QSeisSourceMechMT(mnn=source.mnn, mee=source.mee, mdd=source.mdd, mne=source.mne, mnd=source.mnd, med=source.med)
conf.earthmodel_1d = mod
conf.sw_flat_earth_transform = 0
runner.run(conf)
trs = runner.get_traces()
for tr in trs:
tr.shift((- config.deltat))
tr.snap(interpolate=True)
tr.lowpass(4, 0.05)
tr.highpass(4, 0.01)
engine = gf.LocalEngine(store_dirs=[store_dir])
def process_wrap(nthreads=0):
(('pyrocko.gf.process (nthreads-%d)' % nthreads))
def process(nthreads):
return engine.process(source, targets, nthreads=nthreads).pyrocko_traces()
return process(nthreads)
for nthreads in range(1, (cpu_count() + 1)):
trs2 = process_wrap(nthreads)
for tr in trs2:
tr.snap(interpolate=True)
tr.lowpass(4, 0.05)
tr.highpass(4, 0.01)
for cha in 'rtz':
t1 = g(trs, cha)
t2 = g(trs2, cha)
tmin = max(t1.tmin, t2.tmin)
tmax = min(t1.tmax, t2.tmax)
t1.chop(tmin, tmax)
t2.chop(tmin, tmax)
d = ((2.0 * num.sum(((t1.ydata - t2.ydata) ** 2))) / (num.sum((t1.ydata ** 2)) + num.sum((t2.ydata ** 2))))
assert (d < 0.05)
def test_qseis_vs_ahfull(self):
random.seed(23)
vp = (5.8 * km)
vs = (3.46 * km)
mod = cake.LayeredModel.from_scanlines(cake.read_nd_model_str(('\n 0. %(vp)g %(vs)g 2.6 1264. 600.\n 20. %(vp)g %(vs)g 2.6 1264. 600.'.lstrip() % dict(vp=(vp / km), vs=(vs / km)))))
store_id_qseis = 'homogeneous_qseis'
store_id_ahfull = 'homogeneous_ahfull'
ahconf = ahfullgreen.AhfullgreenConfig()
qsconf = qseis.QSeisConfig()
qsconf.qseis_version = '2006b'
textra = 5.0
qsconf.time_region = (gf.meta.Timing(('{vel:%g}-%g' % ((vp / km), textra))), gf.meta.Timing(('{vel:%g}+%g' % ((vs / km), textra))))
qsconf.cut = (gf.meta.Timing(('{vel:%g}-%g' % ((vp / km), textra))), gf.meta.Timing(('{vel:%g}+%g' % ((vs / km), textra))))
qsconf.relevel_with_fade_in = True
qsconf.fade = (gf.meta.Timing(('{vel:%g}-%g' % ((vp / km), textra))), gf.meta.Timing(('{vel:%g}-%g' % ((vp / km), 0.0))), gf.meta.Timing(('{vel:%g}+%g' % ((vs / km), 0.0))), gf.meta.Timing(('{vel:%g}+%g' % ((vs / km), textra))))
qsconf.wavelet_duration_samples = 0.001
qsconf.sw_flat_earth_transform = 0
qsconf.filter_surface_effects = 1
qsconf.wavenumber_sampling = 5.0
qsconf.aliasing_suppression_factor = 0.01
qsconf.source_disk_radius = 0.0
sample_rate = 10.0
config = gf.meta.ConfigTypeA(id=store_id_qseis, sample_rate=sample_rate, receiver_depth=(0.0 * km), source_depth_min=(1.0 * km), source_depth_max=(19 * km), source_depth_delta=(6.0 * km), distance_min=(2.0 * km), distance_max=(20 * km), distance_delta=(2 * km), modelling_code_id='qseis.2006b', earthmodel_1d=mod, tabulated_phases=[gf.meta.TPDef(id='begin', definition='p,P,p\\,P\\'), gf.meta.TPDef(id='end', definition='s,S,s\\,S\\')])
config.validate()
store_dir_qseis = mkdtemp(prefix=store_id_qseis)
self.tempdirs.append(store_dir_qseis)
gf.store.Store.create_editables(store_dir_qseis, config=config, extra={'qseis': qsconf})
store = gf.store.Store(store_dir_qseis, 'r')
store.make_travel_time_tables()
store.close()
try:
qseis.build(store_dir_qseis, nworkers=1)
except qseis.QSeisError as e:
if (str(e).find('could not start qseis') != (- 1)):
logger.warning('qseis not installed; skipping test_pyrocko_gf_vs_qseis')
return
else:
raise
config = gf.meta.ConfigTypeA(id=store_id_ahfull, sample_rate=sample_rate, receiver_depth=(0.0 * km), source_depth_min=(1.0 * km), source_depth_max=(19 * km), source_depth_delta=(6.0 * km), distance_min=(2.0 * km), distance_max=(20 * km), distance_delta=(2 * km), modelling_code_id='ahfullgreen', earthmodel_1d=mod, tabulated_phases=[gf.meta.TPDef(id='begin', definition='p,P,p\\,P\\'), gf.meta.TPDef(id='end', definition='s,S,s\\,S\\')])
config.validate()
store_dir_ahfull = mkdtemp(prefix=store_id_qseis)
self.tempdirs.append(store_dir_ahfull)
gf.store.Store.create_editables(store_dir_ahfull, config=config, extra={'ahfullgreen': ahconf})
store = gf.store.Store(store_dir_ahfull, 'r')
store.make_travel_time_tables()
store.close()
ahfullgreen.build(store_dir_ahfull, nworkers=1)
sdepth = rand(config.source_depth_min, config.source_depth_max)
sdepth = ((round(((sdepth - config.source_depth_min) / config.source_depth_delta)) * config.source_depth_delta) + config.source_depth_min)
source = gf.MTSource(lat=0.0, lon=0.0, depth=sdepth)
source.m6 = tuple((rand((- 1.0), 1.0) for x in range(6)))
for ii in range(5):
azi = (random.random() * 365.0)
dist = rand(config.distance_min, config.distance_max)
dist = (round((dist / config.distance_delta)) * config.distance_delta)
dnorth = (dist * math.cos((azi * d2r)))
deast = (dist * math.sin((azi * d2r)))
targets = []
for cha in 'rtz':
target = gf.Target(quantity='displacement', codes=('', '0000', 'PG', cha), north_shift=dnorth, east_shift=deast, depth=config.receiver_depth, store_id=store_id_ahfull)
dist = source.distance_to(target)
(azi, bazi) = source.azibazi_to(target)
if (cha == 'r'):
target.azimuth = (bazi + 180.0)
target.dip = 0.0
elif (cha == 't'):
target.azimuth = (bazi - 90.0)
target.dip = 0.0
elif (cha == 'z'):
target.azimuth = 0.0
target.dip = 90.0
targets.append(target)
runner = qseis.QSeisRunner()
conf = qseis.QSeisConfigFull()
conf.qseis_version = '2006b'
conf.receiver_distances = [(dist / km)]
conf.receiver_azimuths = [azi]
conf.receiver_depth = (config.receiver_depth / km)
conf.source_depth = (source.depth / km)
distance_3d_max = math.sqrt(((config.distance_max ** 2) + ((config.source_depth_max - config.source_depth_min) ** 2)))
nsamples = trace.nextpow2(int((math.ceil((((distance_3d_max / vs) * 2.0) + (2.0 * textra))) * config.sample_rate)))
conf.time_start = (- textra)
conf.time_window = ((nsamples - 1) / config.sample_rate)
conf.time_reduction_velocity = 0.0
conf.nsamples = nsamples
conf.source_mech = qseis.QSeisSourceMechMT(mnn=source.mnn, mee=source.mee, mdd=source.mdd, mne=source.mne, mnd=source.mnd, med=source.med)
conf.earthmodel_1d = mod
conf.sw_flat_earth_transform = 0
conf.filter_surface_effects = 1
conf.wavenumber_sampling = 10.0
conf.wavelet_duration_samples = 0.001
conf.aliasing_suppression_factor = 0.01
conf.validate()
runner.run(conf)
trs = runner.get_traces()
for tr in trs:
pass
tr.lowpass(4, (config.sample_rate / 8.0), demean=False)
tr.highpass(4, (config.sample_rate / 80.0))
engine = gf.LocalEngine(store_dirs=[store_dir_ahfull, store_dir_qseis])
trs2 = engine.process(source, targets).pyrocko_traces()
for tr in trs2:
tr.shift(config.deltat)
tr.lowpass(4, (config.sample_rate / 8.0), demean=False)
tr.highpass(4, (config.sample_rate / 80.0))
tmin = (store.t(('{vel:%g}' % (vp / km)), source, target) - (textra * 0.2))
tmax = (store.t(('{vel:%g}' % (vs / km)), source, target) + (textra * 0.2))
for tr in (trs + trs2):
tr.chop(tmin, tmax)
denom = 0.0
for cha in 'rtz':
t1 = g(trs, cha)
t2 = g(trs2, cha)
denom += (num.sum((t1.ydata ** 2)) + num.sum((t2.ydata ** 2)))
ds = []
for cha in 'rtz':
t1 = g(trs, cha)
t2 = g(trs2, cha)
ds.append(((2.0 * num.sum(((t1.ydata - t2.ydata) ** 2))) / denom))
ds = num.array(ds)
assert num.all((ds < 0.05)) |
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_ch, out_ch, stride=1, kernel_size=3, bias=False):
super().__init__()
if isinstance(kernel_size, list):
padding = [(i // 2) for i in kernel_size]
else:
padding = (kernel_size // 2)
self.depthwise = nn.Conv3d(in_channels=in_ch, out_channels=in_ch, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_ch, bias=bias)
self.pointwise = nn.Conv3d(in_channels=in_ch, out_channels=out_ch, kernel_size=1, stride=1, padding=0, groups=1, bias=bias)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out |
def language_eval(dataset, preds, model_id, split):
import sys
sys.path.append('coco-caption')
annFile = 'coco-caption/annotations/captions_val2014.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
if (not os.path.isdir('eval_results')):
os.mkdir('eval_results')
cache_path = os.path.join('eval_results/', (((model_id + '_') + split) + '.json'))
coco = COCO(annFile)
valids = coco.getImgIds()
preds_filt = [p for p in preds if (p['image_id'] in valids)]
print(('using %d/%d predictions' % (len(preds_filt), len(preds))))
json.dump(preds_filt, open(cache_path, 'w'))
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
out = {}
for (metric, score) in cocoEval.eval.items():
out[metric] = score
imgToEval = cocoEval.imgToEval
for p in preds_filt:
(image_id, caption) = (p['image_id'], p['caption'])
imgToEval[image_id]['caption'] = caption
with open(cache_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out |
def batched(iterable, n):
if (hexversion >= ):
warnings.warn('batched will be removed in a future version of more-itertools. Use the standard library itertools.batched function instead', DeprecationWarning)
it = iter(iterable)
while True:
batch = list(islice(it, n))
if (not batch):
break
(yield batch) |
.parametrize('replace_missing_translation', boolean_toggle)
.parametrize('test_language', test_languages)
def test_translationmixin_trans_empty_field(settings, replace_missing_translation, test_language):
settings.REPLACE_MISSING_TRANSLATION = replace_missing_translation
empty_lang = 'en'
settings.LANGUAGE_CODE = test_language
instance = TestTranslationModel()
instance.title_lang1 = ''
instance.text_lang1 = ''
other_lang = ('de' if (settings.LANGUAGE_CODE == 'en') else 'en')
if ((settings.LANGUAGE_CODE == empty_lang) and settings.REPLACE_MISSING_TRANSLATION):
assert (instance.trans('title') == getattr(instance, test_lang_mapper[other_lang]['title']))
assert (instance.trans('text') == getattr(instance, test_lang_mapper[other_lang]['text']))
else:
assert (instance.trans('title') == getattr(instance, test_lang_mapper[settings.LANGUAGE_CODE]['title']))
assert (instance.trans('text') == getattr(instance, test_lang_mapper[settings.LANGUAGE_CODE]['text']))
del instance |
def _get_abc_helper(view_vector, sat_pos, ellipsoid):
flat2 = ((1 - ellipsoid.flattening) ** 2)
(ux, uy, uz) = (view_vector.x, view_vector.y, view_vector.z)
(x, y, z) = (sat_pos.x, sat_pos.y, sat_pos.z)
a = ((flat2 * ((ux ** 2) + (uy ** 2))) + (uz ** 2))
b = ((flat2 * ((x * ux) + (y * uy))) + (z * uz))
c = ((flat2 * (((x ** 2) + (y ** 2)) - (ellipsoid.equatorial_radius ** 2))) + (z ** 2))
return (a, b, c) |
def test_unionize_dataframe_categories(uniontest_df1, uniontest_df2, uniontest_df3):
(udf1, udf2, udf3) = janitor.unionize_dataframe_categories(uniontest_df1, uniontest_df2, uniontest_df3)
assert (set(udf1['jerbs'].dtype.categories) == set(udf2['jerbs'].dtype.categories))
assert (set(udf1['jerbs'].dtype.categories) == set(udf3['jerbs'].dtype.categories))
assert (set(udf1['fruits'].dtype.categories) == set(udf3['fruits'].dtype.categories))
assert (set(udf1['fruits'].dtype.categories) == set(udf3['fruits'].dtype.categories))
assert (set(udf2['animals'].dtype.categories) == set(udf3['animals'].dtype.categories))
assert ('df2_exclusive' not in udf1.columns)
assert ('df2_exclusive' not in udf3.columns)
assert ('df1_exclusive' not in udf2.columns)
assert ('df1_exclusive' not in udf3.columns)
assert ('df3_exclusive' not in udf1.columns)
assert ('df3_exclusive' not in udf2.columns)
assert ('animals' not in udf1.columns)
udf = pd.concat([udf1, udf2, udf3], ignore_index=True)
assert isinstance(udf['jerbs'].dtype, pd.CategoricalDtype)
assert isinstance(udf['fruits'].dtype, pd.CategoricalDtype)
assert_frame_equal(udf1, uniontest_df1, check_categorical=False)
assert_frame_equal(udf2, uniontest_df2, check_categorical=False)
assert_frame_equal(udf3, uniontest_df3, check_categorical=False) |
(integers(min_value=0, max_value=100), integers(min_value=0, max_value=10000), integers(min_value=0, max_value=50000), integers(min_value=1, max_value=), integers(min_value=1, max_value=), integers(min_value=1, max_value=))
(suppress_health_check=[HealthCheck.filter_too_much])
def test_fee_round_trip(flat_fee, prop_fee, imbalance_fee, amount, balance1, balance2):
amount = int(min(amount, ((balance1 * 0.95) - 1), ((balance2 * 0.95) - 1)))
assume((amount > 0))
total_balance = TokenAmount()
prop_fee_per_channel = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))
imbalance_fee = calculate_imbalance_fees(channel_capacity=total_balance, proportional_imbalance_fee=ProportionalFeeAmount(imbalance_fee))
channel_in = factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=(total_balance - balance1)), partner_state=NettingChannelEndStateProperties(balance=balance1), fee_schedule=FeeScheduleState(cap_fees=False, flat=FeeAmount(flat_fee), proportional=prop_fee_per_channel, imbalance_penalty=imbalance_fee)))
channel_out = factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance2), partner_state=NettingChannelEndStateProperties(balance=(total_balance - balance2)), fee_schedule=FeeScheduleState(cap_fees=False, flat=FeeAmount(flat_fee), proportional=prop_fee_per_channel, imbalance_penalty=imbalance_fee)))
fee_calculation = get_initial_amount_for_amount_after_fees(amount_after_fees=PaymentAmount(amount), channels=[(channel_in, channel_out)])
assume(fee_calculation)
assert fee_calculation
amount_without_margin_after_fees = get_amount_without_fees(amount_with_fees=fee_calculation.total_amount, channel_in=channel_in, channel_out=channel_out)
assume(amount_without_margin_after_fees)
assert (abs((amount - amount_without_margin_after_fees)) <= 1)
amount_with_fee_and_margin = calculate_safe_amount_with_fee(fee_calculation.amount_without_fees, FeeAmount(sum(fee_calculation.mediation_fees)))
amount_with_margin_after_fees = get_amount_without_fees(amount_with_fees=amount_with_fee_and_margin, channel_in=channel_in, channel_out=channel_out)
assume(amount_with_margin_after_fees)
assert (amount_with_margin_after_fees >= amount) |
class DummyDataset(FairseqDataset):
def __init__(self, batch, num_items, item_size):
super().__init__()
self.batch = batch
self.num_items = num_items
self.item_size = item_size
def __getitem__(self, index):
return index
def __len__(self):
return self.num_items
def collater(self, samples):
return self.batch
def sizes(self):
return np.array(([self.item_size] * self.num_items))
def num_tokens(self, index):
return self.item_size
def size(self, index):
return self.item_size
def ordered_indices(self):
return np.arange(self.num_items)
def supports_prefetch(self):
return False |
class VocabWithLock(VocabBase):
def __init__(self, words=(), lock=None):
self.lock = lock
super().__init__(words)
def word2index(self, word, train=False):
if isinstance(word, (list, tuple)):
return [self.word2index(w, train=train) for w in word]
with self.lock:
self.counts[word] += train
if (word in self._word2index):
return self._word2index[word]
elif train:
self._index2word += [word]
self._word2index[word] = len(self._word2index)
else:
return self._handle_oov_word(word)
index = self._word2index[word]
return index |
class BlocksEndpoint(Endpoint):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.children = BlocksChildrenEndpoint(*args, **kwargs)
def retrieve(self, block_id: str, **kwargs: Any) -> SyncAsync[Any]:
return self.parent.request(path=f'blocks/{block_id}', method='GET', auth=kwargs.get('auth'))
def update(self, block_id: str, **kwargs: Any) -> SyncAsync[Any]:
return self.parent.request(path=f'blocks/{block_id}', method='PATCH', body=pick(kwargs, 'embed', 'type', 'archived', 'bookmark', 'image', 'video', 'pdf', 'file', 'audio', 'code', 'equation', 'divider', 'breadcrumb', 'table_of_contents', 'link_to_page', 'table_row', 'heading_1', 'heading_2', 'heading_3', 'paragraph', 'bulleted_list_item', 'numbered_list_item', 'quote', 'to_do', 'toggle', 'template', 'callout', 'synced_block', 'table'), auth=kwargs.get('auth'))
def delete(self, block_id: str, **kwargs: Any) -> SyncAsync[Any]:
return self.parent.request(path=f'blocks/{block_id}', method='DELETE', auth=kwargs.get('auth')) |
def ql_syscall_dup(ql: Qiling, oldfd: int):
f = get_opened_fd(ql.os, oldfd)
if (f is None):
return (- 1)
newfd = next((i for i in range(NR_OPEN) if (ql.os.fd[i] is None)), (- 1))
if (newfd == (- 1)):
return (- EMFILE)
ql.os.fd[newfd] = f.dup()
ql.log.debug(f'dup({oldfd:d}) = {newfd:d}')
return newfd |
def rtn_fopen(se: 'SymbolicExecutor', pstate: 'ProcessState'):
logger.debug('fopen hooked')
arg0 = pstate.get_argument_value(0)
arg1 = pstate.get_argument_value(1)
arg0s = pstate.memory.read_string(arg0)
arg1s = pstate.memory.read_string(arg1)
pstate.concretize_memory_bytes(arg0, (len(arg0s) + 1))
pstate.concretize_argument(1)
if se.seed.is_file_defined(arg0s):
logger.info(f'opening an input file: {arg0s}')
data = se.seed.get_file_input(arg0s)
filedesc = pstate.create_file_descriptor(arg0s, io.BytesIO(data))
return filedesc.id
else:
try:
fd = open(arg0s, arg1s)
filedesc = pstate.create_file_descriptor(arg0s, fd)
return filedesc.id
except Exception as e:
logger.debug(f'Failed to open {arg0s} {e}')
return NULL_PTR |
class SetDataset():
def __init__(self, data_file, batch_size, transform):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.cl_list = np.unique(self.meta['image_labels']).tolist()
self.sub_meta = {}
for cl in self.cl_list:
self.sub_meta[cl] = []
for (x, y) in zip(self.meta['image_names'], self.meta['image_labels']):
self.sub_meta[y].append(x)
self.sub_dataloader = []
sub_data_loader_params = dict(batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=False)
for cl in self.cl_list:
sub_dataset = SubDataset(self.sub_meta[cl], cl, transform=transform)
self.sub_dataloader.append(torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params))
def __getitem__(self, i):
return next(iter(self.sub_dataloader[i]))
def __len__(self):
return len(self.cl_list) |
_fixtures(WebFixture, MaxNumberOfFilesFileUploadInputFixture)
def test_async_number_files_validation(web_fixture, max_number_of_files_file_upload_input_fixture):
fixture = max_number_of_files_file_upload_input_fixture
web_fixture.reahl_server.set_app(fixture.new_wsgi_app(enable_js=True))
browser = web_fixture.driver_browser
browser.open('/')
assert (not fixture.uploaded_file_is_listed(fixture.file_to_upload1.name))
assert (not fixture.uploaded_file_is_listed(fixture.file_to_upload2.name))
browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload1.name)
assert fixture.uploaded_file_is_listed(fixture.file_to_upload1.name)
assert browser.wait_for_not(browser.is_visible, XPath.span().including_text('a maximum of 1 files may be uploaded'))
browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload2.name)
assert (not fixture.uploaded_file_is_listed(fixture.file_to_upload2.name))
assert browser.wait_for(browser.is_visible, XPath.span().including_text('a maximum of 1 files may be uploaded'))
browser.click(XPath.button_labelled('Remove', filename=fixture.file_to_upload1_name))
assert browser.wait_for_not(browser.is_visible, XPath.span().including_text('a maximum of 1 files may be uploaded')) |
class KFACOptimizer(optim.Optimizer):
def __init__(self, model, lr=0.25, momentum=0.9, stat_decay=0.99, kl_clip=0.001, damping=0.01, weight_decay=0, fast_cnn=False, Ts=1, Tf=10):
defaults = dict()
def split_bias(module):
for (mname, child) in module.named_children():
if (hasattr(child, 'bias') and (child.bias is not None)):
module._modules[mname] = SplitBias(child)
else:
split_bias(child)
split_bias(model)
super(KFACOptimizer, self).__init__(model.parameters(), defaults)
self.known_modules = {'Linear', 'Conv2d', 'AddBias'}
self.modules = []
self.grad_outputs = {}
self.model = model
self._prepare_model()
self.steps = 0
(self.m_aa, self.m_gg) = ({}, {})
(self.Q_a, self.Q_g) = ({}, {})
(self.d_a, self.d_g) = ({}, {})
self.momentum = momentum
self.stat_decay = stat_decay
self.lr = lr
self.kl_clip = kl_clip
self.damping = damping
self.weight_decay = weight_decay
self.fast_cnn = fast_cnn
self.Ts = Ts
self.Tf = Tf
self.optim = optim.SGD(model.parameters(), lr=(self.lr * (1 - self.momentum)), momentum=self.momentum)
def _save_input(self, module, input):
if (torch.is_grad_enabled() and ((self.steps % self.Ts) == 0)):
classname = module.__class__.__name__
layer_info = None
if (classname == 'Conv2d'):
layer_info = (module.kernel_size, module.stride, module.padding)
aa = compute_cov_a(input[0].data, classname, layer_info, self.fast_cnn)
if (self.steps == 0):
self.m_aa[module] = aa.clone()
update_running_stat(aa, self.m_aa[module], self.stat_decay)
def _save_grad_output(self, module, grad_input, grad_output):
if self.acc_stats:
classname = module.__class__.__name__
layer_info = None
if (classname == 'Conv2d'):
layer_info = (module.kernel_size, module.stride, module.padding)
gg = compute_cov_g(grad_output[0].data, classname, layer_info, self.fast_cnn)
if (self.steps == 0):
self.m_gg[module] = gg.clone()
update_running_stat(gg, self.m_gg[module], self.stat_decay)
def _prepare_model(self):
for module in self.model.modules():
classname = module.__class__.__name__
if (classname in self.known_modules):
assert (not ((classname in ['Linear', 'Conv2d']) and (module.bias is not None))), 'You must have a bias as a separate layer'
self.modules.append(module)
module.register_forward_pre_hook(self._save_input)
module.register_backward_hook(self._save_grad_output)
def step(self):
if (self.weight_decay > 0):
for p in self.model.parameters():
p.grad.data.add_(self.weight_decay, p.data)
updates = {}
for (i, m) in enumerate(self.modules):
assert (len(list(m.parameters())) == 1), 'Can handle only one parameter at the moment'
classname = m.__class__.__name__
p = next(m.parameters())
la = (self.damping + self.weight_decay)
if ((self.steps % self.Tf) == 0):
(self.d_a[m], self.Q_a[m]) = torch.symeig(self.m_aa[m], eigenvectors=True)
(self.d_g[m], self.Q_g[m]) = torch.symeig(self.m_gg[m], eigenvectors=True)
self.d_a[m].mul_((self.d_a[m] > 1e-06).float())
self.d_g[m].mul_((self.d_g[m] > 1e-06).float())
if (classname == 'Conv2d'):
p_grad_mat = p.grad.data.view(p.grad.data.size(0), (- 1))
else:
p_grad_mat = p.grad.data
v1 = ((self.Q_g[m].t() p_grad_mat) self.Q_a[m])
v2 = (v1 / ((self.d_g[m].unsqueeze(1) * self.d_a[m].unsqueeze(0)) + la))
v = ((self.Q_g[m] v2) self.Q_a[m].t())
v = v.view(p.grad.data.size())
updates[p] = v
vg_sum = 0
for p in self.model.parameters():
v = updates[p]
vg_sum += (((v * p.grad.data) * self.lr) * self.lr).sum()
nu = min(1, math.sqrt((self.kl_clip / vg_sum)))
for p in self.model.parameters():
v = updates[p]
p.grad.data.copy_(v)
p.grad.data.mul_(nu)
self.optim.step()
self.steps += 1 |
def test_connect_wr_x_conn_As_wr_y_conn_At_disjoint():
class Top(ComponentLevel3):
def construct(s):
s.x = Wire(Bits24)
s.A = Wire(Bits32)
s.y = Wire(Bits4)
connect(s.A[8:32], s.x)
connect(s.A[0:4], s.y)
def up_wr_x():
s.x = Bits24(1193046)
def up_wr_y():
s.y = Bits4(15)
def up_rd_A():
assert (s.A == )
_test_model(Top) |
class ElfPatcher():
def replace_needed(self, file_name: str, *old_new_pairs: tuple[(str, str)]) -> None:
raise NotImplementedError
def set_soname(self, file_name: str, new_so_name: str) -> None:
raise NotImplementedError
def set_rpath(self, file_name: str, rpath: str) -> None:
raise NotImplementedError
def get_rpath(self, file_name: str) -> str:
raise NotImplementedError |
def test_options_1(tmp_path, monkeypatch):
with tmp_path.joinpath('pyproject.toml').open('w') as f:
f.write(PYPROJECT_1)
args = CommandLineArguments.defaults()
args.package_dir = tmp_path
monkeypatch.setattr(platform_module, 'machine', (lambda : 'x86_64'))
options = Options(platform='linux', command_line_arguments=args, env={})
module = get_platform_module('linux')
identifiers = get_build_identifiers(platform_module=module, build_selector=options.globals.build_selector, architectures=options.globals.architectures)
override_display = ' *: pyproject\n cp37-manylinux_x86_64, cp37-manylinux_i686: pyproject-override'
print(options.summary(identifiers))
assert (override_display in options.summary(identifiers))
default_build_options = options.build_options(identifier=None)
assert (default_build_options.environment == parse_environment('FOO="BAR"'))
all_pinned_container_images = _get_pinned_container_images()
pinned_x86_64_container_image = all_pinned_container_images['x86_64']
local = options.build_options('cp38-manylinux_x86_64')
assert (local.manylinux_images is not None)
assert (local.test_command == 'pyproject')
assert (local.manylinux_images['x86_64'] == pinned_x86_64_container_image['manylinux1'])
local = options.build_options('cp37-manylinux_x86_64')
assert (local.manylinux_images is not None)
assert (local.test_command == 'pyproject-override')
assert (local.manylinux_images['x86_64'] == pinned_x86_64_container_image['manylinux2014']) |
def _dist_train(model, dataset, cfg, validate=False, logger=None):
data_loaders = [build_dataloader(dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=True)]
model = MMDistributedDataParallel(model.cuda())
optimizer = build_optimizer(model, cfg.optimizer)
runner = EpochBasedRunner(model, batch_processor, optimizer, cfg.work_dir, logger)
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config)
runner.register_hook(DistSamplerSeedHook())
if validate:
val_dataset_cfg = cfg.data.val
eval_cfg = cfg.get('evaluation', {})
runner.register_hook(DistEvalmAPHook(val_dataset_cfg, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
load_modules = cfg.get('load_modules', [])
load_certain_checkpoint(runner.model, runner.logger, cfg.load_from, load_modules)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs) |
def inference(model_save_path, clip):
with tf.Session() as sess:
meta_graph_def = tf.saved_model.loader.load(sess, [MODEL_NAME], model_save_path)
signature = meta_graph_def.signature_def
bn_tensor_name = signature[SIGNATURE_KEY].inputs[BATCH_NORM_KEY].name
x_tensor_name = signature[SIGNATURE_KEY].inputs[INPUT_KEY].name
y_tensor_name = signature[SIGNATURE_KEY].outputs[OUTPUT_KEY].name
bn_tensor = sess.graph.get_tensor_by_name(bn_tensor_name)
x_tensor = sess.graph.get_tensor_by_name(x_tensor_name)
y_tensor = sess.graph.get_tensor_by_name(y_tensor_name)
output = sess.run(y_tensor, feed_dict={x_tensor: clip, bn_tensor: False})
return output |
class F19_TestCase(FC3_TestCase):
def runTest(self):
self.assert_parse('lang en_US')
self.assert_parse('lang en_US --addsupport=cs_CZ', 'lang en_US --addsupport=cs_CZ\n')
self.assert_parse('lang en_US --addsupport=sr_RS.UTF-', 'lang en_US --addsupport=sr_RS.UTF-\n')
self.assert_parse('lang en_US --addsupport=cs_CZ,fr_FR', 'lang en_US --addsupport=cs_CZ,fr_FR\n')
self.assert_parse_error('lang --bogus-option')
self.assert_parse_error('lang')
self.assert_parse_error('lang en_US en_CA')
self.assert_parse_error('lang --addsupport=en_US')
self.assert_parse_error('lang --addsupport=,bg_BG') |
def install_pyqt_binary(venv_dir: pathlib.Path, version: str) -> None:
utils.print_title('Installing PyQt from binary')
utils.print_col('No proprietary codec support will be available in qutebrowser.', 'bold')
if _is_qt6_version(version):
supported_archs = {'linux': {'x86_64'}, 'win32': {'AMD64'}, 'darwin': {'x86_64', 'arm64'}}
else:
supported_archs = {'linux': {'x86_64'}, 'win32': {'x86', 'AMD64'}, 'darwin': {'x86_64'}}
if (sys.platform not in supported_archs):
utils.print_error(f'{sys.platform} is not a supported platform by PyQt binary packages, this will most likely fail.')
elif (platform.machine() not in supported_archs[sys.platform]):
utils.print_error(f'{platform.machine()} is not a supported architecture for PyQt binaries on {sys.platform}, this will most likely fail.')
elif ((sys.platform == 'linux') and (platform.libc_ver()[0] != 'glibc')):
utils.print_error('Non-glibc Linux is not a supported platform for PyQt binaries, this will most likely fail.')
pip_install(venv_dir, '-r', pyqt_requirements_file(version), '--only-binary', ','.join(PYQT_PACKAGES)) |
def file_info_from_modpath(modpath: list[str], path: (Sequence[str] | None)=None, context_file: (str | None)=None) -> spec.ModuleSpec:
if (context_file is not None):
context: (str | None) = os.path.dirname(context_file)
else:
context = context_file
if (modpath[0] == 'xml'):
try:
return _spec_from_modpath((['_xmlplus'] + modpath[1:]), path, context)
except ImportError:
return _spec_from_modpath(modpath, path, context)
elif (modpath == ['os', 'path']):
return spec.ModuleSpec(name='os.path', location=os.path.__file__, type=spec.ModuleType.PY_SOURCE)
return _spec_from_modpath(modpath, path, context) |
class LenPredicate():
expected_length: int
has_star: bool
ctx: CanAssignContext
def __call__(self, value: Value, positive: bool) -> Optional[Value]:
value_len = len_of_value(value)
if (isinstance(value_len, KnownValue) and isinstance(value_len.val, int)):
if self.has_star:
match = (value_len.val >= self.expected_length)
else:
match = (value_len.val == self.expected_length)
if (not positive):
match = (not match)
if match:
return value
else:
return None
cleaned = unannotate(value)
if ((not self.has_star) and isinstance(cleaned, TypedValue) and (cleaned.typ is tuple)):
arg = cleaned.get_generic_arg_for_type(tuple, self.ctx, 0)
return SequenceValue(tuple, [(False, arg) for _ in range(self.expected_length)])
return value |
def test_do_export_broken_internal_copy(tmp_path: Path):
patch_data = {'menu_mod': False}
export_params = EchoesGameExportParams(input_path=None, output_path=MagicMock(), contents_files_path=tmp_path.joinpath('contents'), asset_cache_path=tmp_path.joinpath('asset_cache_path'), backup_files_path=tmp_path.joinpath('backup'), prime_path=MagicMock(), use_prime_models=False, spoiler_output=None)
progress_update = MagicMock()
exporter = EchoesGameExporter()
with pytest.raises(UnableToExportError):
exporter._do_export_game(patch_data, export_params, progress_update) |
def gen_evaluation_file(semantic_mask, instance_mask, confidence_array, instance_mask_path, txt_path, file_name, additive=False, slide_semantic=False, mask_label=None, mask_idx=0, result_path=None):
assert result_path, 'result_path must be specified'
result_path = (result_path + '/')
_ids = np.array(['0', '24', '25', '26', '27', '28', '31', '32', '33'])
instance_list = np.unique(instance_mask)
refined_instance_mask = np.copy(instance_mask)
if slide_semantic:
semantic_part_idx = (mask_label == (mask_idx + 1))
file_op = 'w'
ins_prefix = str(mask_idx)
if additive:
file_op = 'a'
with open((((result_path + txt_path) + file_name) + '.txt'), file_op) as file:
for i in range(instance_list.shape[0]):
if (instance_list[i] == 0):
continue
ins_mask = (instance_mask == instance_list[i])
instance_semantic_label = semantic_mask[ins_mask]
instance_semantic_label = instance_semantic_label[(instance_semantic_label != 0)]
if (instance_semantic_label.size == 0):
continue
(labels, ratios) = get_mode(instance_semantic_label, 4)
for (label_i, label) in enumerate(labels):
if (label == 0):
continue
_confidence = (confidence_array[i] * ratios[label_i])
_id = _ids[label]
if slide_semantic:
if (not np.isin(label, np.arange(9)[semantic_part_idx])):
refined_instance_mask[ins_mask] = 0
continue
curr_ins = (ins_prefix + ('_%d_%d.png' % (i, label_i)))
_result_mask_path = ((instance_mask_path + file_name) + curr_ins)
_result_mask = ins_mask.astype('uint8')
_result_mask_image = Image.fromarray(_result_mask, mode='P')
mask_palette = get_default_palette.get_default_palette()
_result_mask_image.putpalette(mask_palette)
_result_mask_image.save((result_path + _result_mask_path))
file.write(('.' + _result_mask_path))
file.write(' ')
file.write(_id)
file.write(' ')
file.write(str(_confidence))
file.write('\n')
return refined_instance_mask |
class EventChannel(RemoteMethod):
def __init__(self, form, controller, name):
super().__init__(form.view, name, self.delegate_event, None, idempotent=False, immutable=False, disable_csrf_check=True)
self.controller = controller
self.form = form
def make_result(self, input_values):
if ('_noredirect' in input_values.keys()):
return WidgetResult([self.form.rendered_form])
else:
return RedirectAfterPost()
def delegate_event(self, event=None):
try:
return self.controller.handle_event(event)
except NoEventHandlerFound:
raise ProgrammerError(('No suitable handler found for event %s on %s' % (event.name, self.form.view)))
def parse_arguments(self, input_values):
event = self.form.handle_form_input(input_values)
return {'event': event}
def cleanup_after_exception(self, input_values, ex):
self.form.persisted_userinput_class.clear_for_view(self.form.view)
self.form.cleanup_after_exception(input_values, ex)
self.form.view.save_last_construction_state()
def cleanup_after_success(self):
self.form.cleanup_after_success()
self.form.persisted_userinput_class.clear_for_view(self.form.view)
self.form.view.clear_last_construction_state() |
def euler2mat(z=0, y=0, x=0):
Ms = []
if z:
cosz = math.cos(z)
sinz = math.sin(z)
Ms.append(np.array([[cosz, (- sinz), 0], [sinz, cosz, 0], [0, 0, 1]]))
if y:
cosy = math.cos(y)
siny = math.sin(y)
Ms.append(np.array([[cosy, 0, siny], [0, 1, 0], [(- siny), 0, cosy]]))
if x:
cosx = math.cos(x)
sinx = math.sin(x)
Ms.append(np.array([[1, 0, 0], [0, cosx, (- sinx)], [0, sinx, cosx]]))
if Ms:
return reduce(np.dot, Ms[::(- 1)])
return np.eye(3) |
class DIAPreResNet(nn.Module):
def __init__(self, channels, init_block_channels, bottleneck, conv1_stride, in_channels=3, in_size=(224, 224), num_classes=1000):
super(DIAPreResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', PreResInitBlock(in_channels=in_channels, out_channels=init_block_channels))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = DualPathSequential(return_two=False)
attention = DIAAttention(in_x_features=channels_per_stage[0], in_h_features=channels_per_stage[0])
for (j, out_channels) in enumerate(channels_per_stage):
stride = (1 if ((i == 0) or (j != 0)) else 2)
stage.add_module('unit{}'.format((j + 1)), DIAPreResUnit(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck=bottleneck, conv1_stride=conv1_stride, attention=attention))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('post_activ', PreResActivation(in_channels=in_channels))
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
def random_pad_clip_list(x, num):
x = deepcopy(list(x))
if (len(x) > num):
shuffle(x)
return x[:num]
else:
ret = []
for i in range((num // len(x))):
shuffle(x)
ret = (ret + x)
ret = (ret + x[:(num - len(ret))])
return ret |
class Migration(migrations.Migration):
dependencies = [('projects', '0051_alter_value_value_type')]
operations = [migrations.AlterField(model_name='invite', name='email', field=models.EmailField(blank=True, help_text='The e-mail for this membership.', max_length=254, verbose_name='E-mail')), migrations.AlterField(model_name='value', name='value_type', field=models.CharField(choices=[('text', 'Text'), ('url', 'URL'), ('integer', 'Integer'), ('float', 'Float'), ('boolean', 'Boolean'), ('datetime', 'Datetime'), ('email', 'E-mail'), ('phone', 'Phone'), ('option', 'Option'), ('file', 'File')], default='text', help_text='Type of this value.', max_length=8, verbose_name='Value type'))] |
class AstroidManagerBrain(TypedDict):
astroid_cache: dict[(str, nodes.Module)]
_mod_file_cache: dict[(tuple[(str, (str | None))], (spec.ModuleSpec | exceptions.AstroidImportError))]
_failed_import_hooks: list[Callable[([str], nodes.Module)]]
always_load_extensions: bool
optimize_ast: bool
max_inferable_values: int
extension_package_whitelist: set[str]
_transform: transforms.TransformVisitor |
.parametrize('regimes', [['n', 'n', 's', 's', 'e', 'e', 'w', 'w', 'e', 'j'], [0, 0, 2, 2, 3, 3, 4, 4, 3, 1]])
def test_block_contiguity(regimes):
neighbors = _block_contiguity(regimes)
wn = {0: [1], 1: [0], 2: [3], 3: [2], 4: [5, 8], 5: [4, 8], 6: [7], 7: [6], 8: [4, 5], 9: []}
assert ({f: n.tolist() for (f, n) in neighbors.items()} == wn)
ids = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
neighbors = _block_contiguity(regimes, ids=ids)
wn_str = {ids[f]: [ids[o] for o in n] for (f, n) in wn.items()}
assert ({f: n.tolist() for (f, n) in neighbors.items()} == wn_str)
regimes = pandas.Series(regimes, index=ids)
neighbors = _block_contiguity(regimes)
assert ({f: n.tolist() for (f, n) in neighbors.items()} == wn_str) |
class Stats(object):
def __init__(self, tracker: 'Optional[ClassTracker]'=None, filename: Optional[str]=None, stream: Optional[IO]=None):
if stream:
self.stream = stream
else:
self.stream = sys.stdout
self.tracker = tracker
self.index = {}
self.snapshots = []
if tracker:
self.index = tracker.index
self.snapshots = tracker.snapshots
self.history = tracker.history
self.sorted = []
if filename:
self.load_stats(filename)
def load_stats(self, fdump: Union[(str, IO[bytes])]) -> None:
if isinstance(fdump, str):
fdump = open(fdump, 'rb')
self.index = pickle.load(fdump)
self.snapshots = pickle.load(fdump)
self.sorted = []
def dump_stats(self, fdump: Union[(str, IO[bytes])], close: bool=True) -> None:
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, str):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close()
def _init_sort(self) -> None:
if (not self.sorted):
tmax = None
maxsize = 0
for snapshot in self.snapshots:
if (snapshot.tracked_total > maxsize):
tmax = snapshot.timestamp
for key in list(self.index.keys()):
for tobj in self.index[key]:
tobj.classname = key
tobj.size = tobj.get_max_size()
tobj.tsize = tobj.get_size_at_time(tmax)
self.sorted.extend(self.index[key])
def sort_stats(self, *args: str) -> 'Stats':
criteria = ('classname', 'tsize', 'birth', 'death', 'name', 'repr', 'size')
if (not set(criteria).issuperset(set(args))):
raise ValueError('Invalid sort criteria')
if (not args):
args = criteria
def args_to_tuple(obj: 'TrackedObject') -> Tuple[(str, ...)]:
keys: List[str] = []
for attr in args:
attribute = getattr(obj, attr, '')
if (attr in ('tsize', 'size')):
attribute = (- int(attribute))
keys.append(attribute)
return tuple(keys)
self._init_sort()
self.sorted.sort(key=args_to_tuple)
return self
def reverse_order(self) -> 'Stats':
self._init_sort()
self.sorted.reverse()
return self
def annotate(self) -> None:
for snapshot in self.snapshots:
self.annotate_snapshot(snapshot)
def annotate_snapshot(self, snapshot: 'Snapshot') -> Dict[(str, Dict[(str, Any)])]:
if (snapshot.classes is not None):
return snapshot.classes
snapshot.classes = {}
for classname in list(self.index.keys()):
total = 0
active = 0
merged = Asized(0, 0)
for tobj in self.index[classname]:
_merge_objects(snapshot.timestamp, merged, tobj)
total += tobj.get_size_at_time(snapshot.timestamp)
if ((tobj.birth < snapshot.timestamp) and ((tobj.death is None) or (tobj.death > snapshot.timestamp))):
active += 1
try:
pct = ((total * 100.0) / snapshot.total)
except ZeroDivisionError:
pct = 0
try:
avg = (total / active)
except ZeroDivisionError:
avg = 0
snapshot.classes[classname] = dict(sum=total, avg=avg, pct=pct, active=active)
snapshot.classes[classname]['merged'] = merged
return snapshot.classes
def tracked_classes(self) -> List[str]:
return sorted(list(self.index.keys())) |
class CanvasConfig(Config):
def __init__(self, canvas, base_config):
self.canvas = canvas
self.major_version = base_config.major_version
self.minor_version = base_config.minor_version
self.forward_compatible = base_config.forward_compatible
self.opengl_api = (base_config.opengl_api or self.opengl_api)
self.debug = base_config.debug
def compatible(self, canvas):
raise NotImplementedError('abstract')
def create_context(self, share):
raise NotImplementedError('abstract')
def is_complete(self):
return True |
def get_model(name, **kwargs):
models = {'fcn_resnet50_pcontext': get_fcn_resnet50_pcontext, 'encnet_resnet50_pcontext': get_encnet_resnet50_pcontext, 'encnet_resnet101_pcontext': get_encnet_resnet101_pcontext, 'encnet_resnet50_ade': get_encnet_resnet50_ade, 'encnet_resnet101_ade': get_encnet_resnet101_ade, 'fcn_resnet50_ade': get_fcn_resnet50_ade, 'psp_resnet50_ade': get_psp_resnet50_ade}
name = name.lower()
if (name not in models):
raise ValueError(('%s\n\t%s' % (str(name), '\n\t'.join(sorted(models.keys())))))
net = models[name](**kwargs)
return net |
def parse_permissions(session=flask.session):
perms = {x.name: False for x in Perms}
perms['ADMIN'] = False
perms['is_admin'] = False
if (not session.get('authenticated', False)):
return perms
perms['ANY'] = True
if (session.get('role') == Role.ADMIN):
for key in perms.keys():
perms[key] = True
elif session.get('perms'):
p = session.get('perms')
perms.update(get_permission(p))
return perms |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.