code stringlengths 281 23.7M |
|---|
def main():
scene = SceneManager.AddScene('Scene')
scene.gameObjects[1].GetComponent(Light).type = LightType.Point
scene.mainCamera.transform.position = Vector3(0, 3, (- 10))
lookAt = scene.mainCamera.AddComponent(LookAt)
cube = GameObject('Cube')
renderer = cube.AddComponent(MeshRenderer)
renderer.mat = Material(RGB(0, 255, 0))
renderer.mesh = Loader.Primitives.cube
cube.transform.position = Vector3((- 20), 0, 0)
cube.AddComponent(Mover).speed = 6
scene.Add(cube)
lookAt.other = cube
SceneManager.LoadScene(scene) |
_start_docstrings('The bare RegNet model outputting raw features without any specific head on top.', REGNET_START_DOCSTRING)
class TFRegNetModel(TFRegNetPreTrainedModel):
def __init__(self, config: RegNetConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.regnet = TFRegNetMainLayer(config, name='regnet')
_inputs
_start_docstrings_to_model_forward(REGNET_INPUTS_DOCSTRING)
_code_sample_docstrings(processor_class=_FEAT_EXTRACTOR_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPoolingAndNoAttention, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE)
def call(self, pixel_values: tf.Tensor, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training=False) -> Union[(TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor])]:
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict)
outputs = self.regnet(pixel_values=pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
if (not return_dict):
return ((outputs[0],) + outputs[1:])
return TFBaseModelOutputWithPoolingAndNoAttention(last_hidden_state=outputs.last_hidden_state, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states)
def serving_output(self, output: TFBaseModelOutputWithPoolingAndNoAttention) -> TFBaseModelOutputWithPoolingAndNoAttention:
return TFBaseModelOutputWithPoolingAndNoAttention(last_hidden_state=output.last_hidden_state, pooler_output=output.pooler_output, hidden_states=output.hidden_states) |
class ForIterable(ForGenerator):
def need_cleanup(self) -> bool:
return True
def init(self, expr_reg: Value, target_type: RType) -> None:
builder = self.builder
iter_reg = builder.call_c(iter_op, [expr_reg], self.line)
builder.maybe_spill(expr_reg)
self.iter_target = builder.maybe_spill(iter_reg)
self.target_type = target_type
def gen_condition(self) -> None:
builder = self.builder
line = self.line
self.next_reg = builder.call_c(next_op, [builder.read(self.iter_target, line)], line)
builder.add(Branch(self.next_reg, self.loop_exit, self.body_block, Branch.IS_ERROR))
def begin_body(self) -> None:
builder = self.builder
line = self.line
next_reg = builder.coerce(self.next_reg, self.target_type, line)
builder.assign(builder.get_assignment_target(self.index), next_reg, line)
def gen_step(self) -> None:
pass
def gen_cleanup(self) -> None:
self.builder.call_c(no_err_occurred_op, [], self.line) |
def main():
parser = ArgumentParser()
parser.add_argument('-c', '--config', type=Path, required=True)
parser.add_argument('-o', '--output_folder', type=Path, required=True)
parser.add_argument('-n', '--num_seqs', type=int, required=True)
parser.add_argument('-s', '--name_prefix', type=str, default='SyntheticSeq')
parser.add_argument('-t', '--temperature', type=float, default=1.0, help='Temperature argument to pass to generate')
parser.add_argument('-k', '--known_sequence_files', required=False, nargs='+', help='Space separated list of known sequence files.')
parser.add_argument('--top_k', default=50, type=int)
parser.add_argument('--top_p', default=0.95, type=float)
args = parser.parse_args()
gpu_number = os.environ.get('SUBNODE_RANK')
pmi_rank = os.environ.get('PMI_RANK')
os.makedirs(args.output_folder, exist_ok=True)
output_fasta = (args.output_folder / 'rank{}.fasta'.format(pmi_rank))
seq_name = (args.name_prefix + '_{}'.format(pmi_rank))
config = ModelSettings.from_yaml(args.config)
if (config.load_pt_checkpoint is not None):
load_strategy = LoadPTCheckpointStrategy(config.load_pt_checkpoint, cfg=config, generation_flag=True)
elif (config.load_ds_checkpoint is not None):
load_strategy = LoadDeepSpeedStrategy(config.load_ds_checkpoint, cfg=config, generation_flag=True)
else:
raise ValueError('load_ds_checkpoint or load_pt_checkpoint must be set in the config file')
gpu_number = int(gpu_number)
if (gpu_number not in [0, 1, 2, 3]):
print(gpu_number, socket.gethostname())
model = load_strategy.get_model(DNATransformer)
try:
model.cuda(gpu_number)
except Exception:
print('ERROR: ', gpu_number, socket.gethostname())
print("Running on CPU.... don't expect any sequences out of this one.")
model.eval()
if (args.known_sequence_files is not None):
for i in args.known_sequence_files:
print(i)
print('Using known sequence files: {}'.format(args.known_sequence_files))
try:
results = non_redundant_generation(model.model, model.tokenizer, num_seqs=args.num_seqs, known_sequence_files=args.known_sequence_files, start_sequence=None, to_stop_codon=False, max_length=config.block_size, write_to_file=output_fasta, custom_seq_name=seq_name, temperature=args.temperature, top_p=args.top_p, top_k=args.top_k)
(unique_seqs, all_seqs) = (results['unique_seqs'], results['all_generated_seqs'])
print(f'Proportion of unique seqs: {(len(unique_seqs) / len(all_seqs))}')
seqs_to_fasta(unique_seqs, args.output_fasta, custom_seq_name=args.name_prefix)
except Exception:
print('Failure generating on {}, rank {}'.format(socket.gethostname(), pmi_rank)) |
class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast):
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_outputs = super(OnnxConfigWithPast, self).outputs
for (name, axes_names) in common_outputs.items():
sequence_name = ('encoder_sequence' if ('encoder' in name) else 'decoder_sequence')
for (axis_idx, name) in axes_names.items():
if ('sequence' in name):
axes_names[axis_idx] = sequence_name
else:
axes_names[axis_idx] = name
if self.use_past:
self.fill_with_past_key_values_(common_outputs, direction='outputs')
return common_outputs
def num_layers(self) -> Tuple[int]:
try:
num_layers = super().num_layers
num_layers = (num_layers, num_layers)
except AttributeError:
if (hasattr(self._config, 'encoder_layers') and hasattr(self._config, 'decoder_layers')):
num_layers = (self._config.encoder_layers, self._config.decoder_layers)
else:
raise AttributeError('could not find the number of encoder and decoder layers attributes in the model configuration, override the num_layers property of the model OnnxConfig to solve this')
return num_layers
def num_attention_heads(self) -> Tuple[int]:
try:
num_attention_heads = super().num_attention_heads
num_attention_heads = (num_attention_heads, num_attention_heads)
except AttributeError:
if (hasattr(self._config, 'encoder_attention_heads') and hasattr(self._config, 'decoder_attention_heads')):
num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads)
else:
raise AttributeError('could not find the number of attention heads for the encoder and the decoder attributes in the model configuration, override the num_attention_heads property of the model OnnxConfig to solve this')
return num_attention_heads
def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size, seq_length, is_pair, framework)
decoder_seq_length = (seq_length if (not self.use_past) else 1)
decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size, decoder_seq_length, is_pair, framework)
decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch = common_inputs['input_ids'].shape[0]
encoder_seq_length = common_inputs['input_ids'].shape[1]
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
(num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, (self._config.hidden_size // num_encoder_attention_heads))
decoder_shape = (batch, num_decoder_attention_heads, (decoder_seq_length + 3), (self._config.hidden_size // num_decoder_attention_heads))
common_inputs['past_key_values'] = []
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = (encoder_shape if (remaining_side_name == 'encoder') else decoder_shape)
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[(str, Mapping[(int, str)])], direction: str):
if (direction not in ['inputs', 'outputs']):
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = ('past_key_values' if (direction == 'inputs') else 'present')
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
encoder_sequence = 'past_encoder_sequence'
decoder_sequence = ('past_decoder_sequence' if (direction == 'inputs') else 'past_decoder_sequence + sequence')
for i in range(min_num_layers):
inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'batch', 2: encoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'batch', 2: encoder_sequence}
for i in range(min_num_layers, max_num_layers):
if (remaining_side_name == 'encoder'):
axes_info = {0: 'batch', 2: encoder_sequence}
else:
axes_info = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.{remaining_side_name}.key'] = axes_info
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output[f'{name}.{idx}.decoder.key'] = t[0]
flattened_output[f'{name}.{idx}.decoder.value'] = t[1]
flattened_output[f'{name}.{idx}.encoder.key'] = t[2]
flattened_output[f'{name}.{idx}.encoder.value'] = t[3] |
def test_tracker_diverges():
box = np.array([0, 0, 10, 10])
mot = MultiObjectTracker(dt=0.1)
mot.step([Detection(box=box)])
assert (len(mot.trackers) == 1)
first_track_id = mot.active_tracks()[0].id
assert_almost_equal(mot.trackers[0].model.dt, 0.1)
assert (not mot.trackers[0].is_invalid())
mot.trackers[0]._tracker.x[2] = np.nan
assert mot.trackers[0].is_invalid()
mot.cleanup_trackers()
assert (len(mot.trackers) == 0)
mot.step([Detection(box=box)])
assert (len(mot.trackers) == 1)
assert (mot.active_tracks()[0].id != first_track_id) |
_fixtures(WebFixture)
def test_dropdown_menu_with_header(web_fixture):
sub_menu = DropdownMenu(web_fixture.view)
my_header = H(web_fixture.view, 6, text='My header text')
header = sub_menu.add_header(my_header)
assert (header is my_header)
[header] = sub_menu.html_representation.children
assert ('dropdown-header' in header.get_attribute('class').split()) |
.parametrize(('line', 'expected_warnings'), [('Governance', set()), ('Packaging', set()), ('Typing', set()), ('Release', set()), ('Governance, Packaging', set()), ('Packaging, Typing', set()), ('Governance, Governance', {'duplicates'}), ('Release, Release', {'duplicates'}), ('Packaging, Packaging', {'duplicates'}), ('Spam, Spam', {'duplicates', 'valid'}), ('lobster, lobster', {'duplicates', 'capitalisation', 'valid'}), ('governance, governance', {'duplicates', 'capitalisation'}), ('governance', {'capitalisation'}), ('packaging', {'capitalisation'}), ('typing', {'capitalisation'}), ('release', {'capitalisation'}), ('Governance, release', {'capitalisation'}), ('Spam', {'valid'}), ('lobster', {'capitalisation', 'valid'}), ('Packaging, Governance', {'sorted'}), ('Typing, Release', {'sorted'}), ('Release, Governance', {'sorted'}), ('spam, packaging', {'capitalisation', 'valid', 'sorted'})], ids=str)
def test_validate_topic(line: str, expected_warnings: set):
warnings = [warning for (_, warning) in check_peps._validate_topic(1, line)]
found_warnings = set()
if ('duplicates' in expected_warnings):
found_warnings.add('duplicates')
expected = 'Topic must not contain duplicates'
matching = [w for w in warnings if (w == expected)]
assert (matching == [expected]), warnings
if ('capitalisation' in expected_warnings):
found_warnings.add('capitalisation')
expected = 'Topic must be properly capitalised (Title Case)'
matching = [w for w in warnings if (w == expected)]
assert (matching == [expected]), warnings
if ('valid' in expected_warnings):
found_warnings.add('valid')
expected = 'Topic must be for a valid sub-index'
matching = [w for w in warnings if (w == expected)]
assert (matching == [expected]), warnings
if ('sorted' in expected_warnings):
found_warnings.add('sorted')
expected = 'Topic must be sorted lexicographically'
matching = [w for w in warnings if (w == expected)]
assert (matching == [expected]), warnings
if (expected_warnings == set()):
assert (warnings == []), warnings
assert (found_warnings == expected_warnings) |
class Solution(object):
def generateParenthesis(self, n):
if (n == 1):
return ['()']
last_list = self.generateParenthesis((n - 1))
res = []
for t in last_list:
curr = (t + ')')
for index in range(len(curr)):
if (curr[index] == ')'):
res.append(((curr[:index] + '(') + curr[index:]))
return list(set(res)) |
.end_to_end()
def test_collapsing_of_warnings(tmp_path, runner):
source = '\n import warnings\n from pytask import task\n\n for i in range(6):\n\n \n def task_example():\n warnings.warn("Warning", category=UserWarning)\n '
tmp_path.joinpath('task_example.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert ('... in 1 more locations' in result.output) |
(scope='session')
def gitlab_runner(gl):
container = 'gitlab-runner-test'
runner_name = 'python-gitlab-runner'
token = 'registration-token'
url = '
docker_exec = ['docker', 'exec', container, 'gitlab-runner']
register = ['register', '--run-untagged', '--non-interactive', '--registration-token', token, '--name', runner_name, '--url', url, '--clone-url', url, '--executor', 'shell']
unregister = ['unregister', '--name', runner_name]
(yield check_output((docker_exec + register)).decode())
check_output((docker_exec + unregister)).decode() |
class PdfTextSearcher(pdfium_i.AutoCloseable):
def __init__(self, raw, textpage):
self.raw = raw
self.textpage = textpage
super().__init__(pdfium_c.FPDFText_FindClose)
def parent(self):
return self.textpage
def _get_occurrence(self, find_func):
ok = find_func(self)
if (not ok):
return None
index = pdfium_c.FPDFText_GetSchResultIndex(self)
count = pdfium_c.FPDFText_GetSchCount(self)
return (index, count)
def get_next(self):
return self._get_occurrence(pdfium_c.FPDFText_FindNext)
def get_prev(self):
return self._get_occurrence(pdfium_c.FPDFText_FindPrev) |
class TrainLoopDLT():
def __init__(self, accelerator: Accelerator, model, diffusion: JointDiffusionScheduler, train_data, val_data, opt_conf, log_interval: int, save_interval: int, categories_num: int, device: str='cpu', resume_from_checkpoint: str=None):
self.categories_num = categories_num
self.train_data = train_data
self.val_data = val_data
self.accelerator = accelerator
self.save_interval = save_interval
self.diffusion = diffusion
self.opt_conf = opt_conf
self.log_interval = log_interval
self.device = device
optimizer = torch.optim.AdamW(model.parameters(), lr=opt_conf.lr, betas=opt_conf.betas, weight_decay=opt_conf.weight_decay, eps=opt_conf.epsilon)
train_loader = DataLoader(train_data, batch_size=opt_conf.batch_size, shuffle=True, num_workers=opt_conf.num_workers)
val_loader = DataLoader(val_data, batch_size=opt_conf.batch_size, shuffle=False, num_workers=opt_conf.num_workers)
lr_scheduler = get_scheduler(opt_conf.lr_scheduler, optimizer, num_warmup_steps=(opt_conf.num_warmup_steps * opt_conf.gradient_accumulation_steps), num_training_steps=(len(train_loader) * opt_conf.num_epochs))
(self.model, self.optimizer, self.train_dataloader, self.val_dataloader, self.lr_scheduler) = accelerator.prepare(model, optimizer, train_loader, val_loader, lr_scheduler)
LOG.info((model.device, self.device))
self.total_batch_size = ((opt_conf.batch_size * accelerator.num_processes) * opt_conf.gradient_accumulation_steps)
self.num_update_steps_per_epoch = math.ceil((len(train_loader) / opt_conf.gradient_accumulation_steps))
self.max_train_steps = (opt_conf.num_epochs * self.num_update_steps_per_epoch)
LOG.info('***** Running training *****')
LOG.info(f' Num examples = {len(train_data)}')
LOG.info(f' Num Epochs = {opt_conf.num_epochs}')
LOG.info(f' Instantaneous batch size per device = {opt_conf.batch_size}')
LOG.info(f' Total train batch size (w. parallel, distributed & accumulation) = {self.total_batch_size}')
LOG.info(f' Gradient Accumulation steps = {opt_conf.gradient_accumulation_steps}')
LOG.info(f' Total optimization steps = {self.max_train_steps}')
self.global_step = 0
self.first_epoch = 0
self.resume_from_checkpoint = resume_from_checkpoint
if resume_from_checkpoint:
LOG.print(f'Resuming from checkpoint {resume_from_checkpoint}')
accelerator.load_state(resume_from_checkpoint)
last_epoch = int(resume_from_checkpoint.split('-')[1])
self.global_step = (last_epoch * self.num_update_steps_per_epoch)
self.first_epoch = last_epoch
self.resume_step = 0
def train(self):
for epoch in range(self.first_epoch, self.opt_conf.num_epochs):
self.train_epoch(epoch)
(orig, pred) = self.generate_images()
wandb.log({'pred': [wandb.Image(pil, caption=f'pred_{self.global_step}_{i:02d}.jpg') for (i, pil) in pred], 'orig': [wandb.Image(pil, caption=f'orig_{self.global_step}.jpg') for (i, pil) in orig]}, step=self.global_step)
def sample2dev(self, sample):
for (k, v) in sample.items():
if isinstance(v, dict):
for (k1, v1) in v.items():
sample[k][k1] = v1.to(self.device)
else:
sample[k] = v.to(self.device)
def train_epoch(self, epoch):
self.model.train()
device = self.model.device
progress_bar = tqdm(total=self.num_update_steps_per_epoch, disable=(not self.accelerator.is_local_main_process))
progress_bar.set_description(f'Epoch {epoch}')
losses = {}
for (step, batch) in enumerate(self.train_dataloader):
if (self.resume_from_checkpoint and (epoch == self.first_epoch) and (step < self.resume_step)):
if ((step % self.opt_conf.gradient_accumulation_steps) == 0):
progress_bar.update(1)
continue
self.sample2dev(batch)
noise = torch.randn(batch['box'].shape).to(device)
bsz = batch['box'].shape[0]
t = torch.randint(0, self.diffusion.num_cont_steps, (bsz,), device=device).long()
(cont_vec, noisy_batch) = self.diffusion.add_noise_jointly(batch['box'], batch, t, noise)
noisy_batch['box'] = cont_vec
with self.accelerator.accumulate(self.model):
(boxes_predict, cls_predict) = self.model(batch, noisy_batch, t)
loss_mse = masked_l2(batch['box_cond'], boxes_predict, batch['mask_box'])
loss_cls = masked_cross_entropy(cls_predict, batch['cat'], batch['mask_cat'])
loss = ((self.opt_conf.lmb * loss_mse) + loss_cls).mean()
self.accelerator.backward(loss)
if self.accelerator.sync_gradients:
self.accelerator.clip_grad_norm_(self.model.parameters(), 1.0)
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
losses.setdefault('mse', []).append(loss_mse.mean().detach().item())
losses.setdefault('cls', []).append(loss_cls.mean().detach().item())
acc_cat = masked_acc(batch['cat'].detach(), cls_predict, batch['mask_cat'].detach())
losses.setdefault('acc_cat', []).append(acc_cat.mean().detach().item())
losses.setdefault('loss', []).append(loss.detach().item())
if self.accelerator.sync_gradients:
progress_bar.update(1)
self.global_step += 1
logs = {'loss': loss.detach().item(), 'lr': self.lr_scheduler.get_last_lr()[0], 'step': self.global_step}
progress_bar.set_postfix(**logs)
if ((self.global_step % self.log_interval) == 0):
wandb.log({k: np.mean(v) for (k, v) in losses.items()}, step=self.global_step)
wandb.log({'lr': self.lr_scheduler.get_last_lr()[0]}, step=self.global_step)
progress_bar.close()
self.accelerator.wait_for_everyone()
save_path = (self.opt_conf.ckpt_dir / f'checkpoint-{epoch}/')
if self.opt_conf.ckpt_dir.exists():
ckpts = list(self.opt_conf.ckpt_dir.glob('checkpoint-*'))
ckpts = sorted(ckpts, key=(lambda x: int(x.name.split('-')[1])))
if (len(ckpts) > 30):
LOG.info(f'Deleting checkpoint {ckpts[0]}')
shutil.rmtree(ckpts[0])
self.accelerator.save_state(save_path)
self.model.save_pretrained(save_path)
LOG.info(f'Saving checkpoint to {save_path}')
def generate_images(self):
ixs = range(len(self.val_data))
all_res = []
orig = []
ct = 0
for ix in np.random.choice(ixs, 5, replace=False):
(box, cat, ind, name) = self.val_data.get_data_by_ix(ix)
orig.append((ct, Image.fromarray(plot_sample((((box / 2) + 1) / 2), cat, ind, self.val_data.idx2color_map))))
if (ct == 0):
(mask, full_mask_cat) = mask_loc(box.shape, 1.0)
elif (ct == 1):
(mask, full_mask_cat) = mask_size(box.shape, 1.0)
elif (ct == 2):
(mask, full_mask_cat) = mask_whole_box(box.shape, 1.0)
elif (ct == 3):
(mask, full_mask_cat) = mask_random_box_and_cat(box.shape, np.random.uniform(0.5, 1.0, size=1)[0], np.random.uniform(0.5, 1.0, size=1)[0])
elif (ct == 4):
(mask, full_mask_cat) = mask_all(box.shape)
(box, cat, mask, mask4cat) = self.val_data.pad_instance(box, cat, mask, full_mask_cat, self.val_data.max_num_comp)
sample = {'box': torch.tensor(box.astype(np.float32), device=self.device), 'cat': torch.tensor(cat.astype(int), device=self.device), 'mask_box': torch.tensor(mask.astype(int), device=self.device), 'mask_cat': torch.tensor(mask4cat.astype(int), device=self.device), 'box_cond': torch.tensor(box.copy().astype(np.float32), device=self.device)}
sample_cond = self.val_dataloader.collate_fn([sample])
predicted = self.sample_from_model(sample_cond)
(box, cat) = predicted
box = ((sample['mask_box'] * box) + ((1 - sample['mask_box']) * sample['box_cond']))
cat = ((sample['mask_cat'] * cat) + ((1 - sample['mask_cat']) * sample['cat']))
box = box.cpu().numpy()[0]
cat = cat.cpu().numpy()[0]
box = box[(~ (box == 0.0).all(1))]
cat = cat[(~ (cat == 0))]
box = (((box / 2) + 1) / 2)
canvas = plot_sample(box, cat, None, self.val_data.idx2color_map, height=512)
all_res.append((ct, Image.fromarray(canvas)))
ct += 1
if (ct > 5):
break
return (orig, all_res)
def sample_from_model(self, sample):
shape = sample['box_cond'].shape
model = self.accelerator.unwrap_model(self.model)
model.eval()
noisy_batch = {'box': torch.randn(*shape, dtype=torch.float32, device=self.device), 'cat': ((self.categories_num - 1) * torch.ones((shape[0], shape[1]), dtype=torch.long, device=self.device))}
for i in range(self.diffusion.num_cont_steps)[::(- 1)]:
t = torch.tensor(([i] * shape[0]), device=self.device)
with torch.no_grad():
(bbox_pred, cat_pred) = model(sample, noisy_batch, timesteps=t)
desc_pred = {'cat': cat_pred}
(bbox_pred, cat_pred) = self.diffusion.step_jointly(bbox_pred, desc_pred, timestep=t, sample=noisy_batch['box'])
noisy_batch['box'] = bbox_pred.prev_sample
noisy_batch['cat'] = cat_pred['cat']
return (bbox_pred.pred_original_sample, cat_pred['cat']) |
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self.func = func
self.q = queue
def run(self):
with self.default_sess():
player = get_player_fn()
while (not self.stopped()):
try:
val = play_one_episode(player, self.func)
except RuntimeError:
return
self.queue_put_stoppable(self.q, val)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1)
stat = StatCounter()
def fetch():
val = q.get()
stat.feed(val)
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
fetch()
logger.info('Waiting for all the workers to finish the last run...')
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
fetch()
farmer_win_rate = stat.average
return farmer_win_rate |
def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None, json_output_file=None):
modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit)
print(f'''
### MODIFIED FILES ###
{_print_list(modified_files)}''')
impacted_modules_map = create_reverse_dependency_map()
impacted_files = modified_files.copy()
for f in modified_files:
if (f in impacted_modules_map):
impacted_files.extend(impacted_modules_map[f])
impacted_files = sorted(set(impacted_files))
print(f'''
### IMPACTED FILES ###
{_print_list(impacted_files)}''')
if ('setup.py' in impacted_files):
test_files_to_run = ['tests']
repo_utils_launch = True
else:
test_files_to_run = []
for f in impacted_files:
if f.startswith('tests/'):
test_files_to_run.append(f)
elif f.startswith('examples/pytorch'):
test_files_to_run.append('examples/pytorch/test_pytorch_examples.py')
test_files_to_run.append('examples/pytorch/test_accelerate_examples.py')
elif f.startswith('examples/tensorflow'):
test_files_to_run.append('examples/tensorflow/test_tensorflow_examples.py')
elif f.startswith('examples/flax'):
test_files_to_run.append('examples/flax/test_flax_examples.py')
else:
new_tests = module_to_test_file(f)
if (new_tests is not None):
if isinstance(new_tests, str):
test_files_to_run.append(new_tests)
else:
test_files_to_run.extend(new_tests)
test_files_to_run = sorted(set(test_files_to_run))
test_files_to_run = [f for f in test_files_to_run if (os.path.isfile(f) or os.path.isdir(f))]
if (filters is not None):
filtered_files = []
for filter in filters:
filtered_files.extend([f for f in test_files_to_run if f.startswith(filter)])
test_files_to_run = filtered_files
repo_utils_launch = any(((f.split(os.path.sep)[1] == 'repo_utils') for f in test_files_to_run))
if repo_utils_launch:
repo_util_file = (Path(output_file).parent / 'test_repo_utils.txt')
with open(repo_util_file, 'w', encoding='utf-8') as f:
f.write('tests/repo_utils')
print(f'''
### TEST TO RUN ###
{_print_list(test_files_to_run)}''')
if (len(test_files_to_run) > 0):
with open(output_file, 'w', encoding='utf-8') as f:
f.write(' '.join(test_files_to_run))
if ('tests' in test_files_to_run):
test_files_to_run = get_all_tests()
if (json_output_file is not None):
test_map = {}
for test_file in test_files_to_run:
names = test_file.split(os.path.sep)
if (names[1] == 'models'):
key = '/'.join(names[1:3])
elif ((len(names) > 2) or (not test_file.endswith('.py'))):
key = '/'.join(names[1:2])
else:
key = 'common'
if (key not in test_map):
test_map[key] = []
test_map[key].append(test_file)
keys = sorted(test_map.keys())
test_map = {k: ' '.join(sorted(test_map[k])) for k in keys}
with open(json_output_file, 'w', encoding='UTF-8') as fp:
json.dump(test_map, fp, ensure_ascii=False) |
class RDKit():
def mol_to_file(rdkit_mol: Chem.Mol, file_name: str) -> None:
file_path = Path(file_name)
if (file_path.suffix == '.pdb'):
return Chem.MolToPDBFile(rdkit_mol, file_name)
elif ((file_path.suffix == '.sdf') or (file_path.suffix == '.mol')):
return Chem.MolToMolFile(rdkit_mol, file_name)
elif (file_path.suffix == '.xyz'):
return Chem.MolToXYZFile(rdkit_mol, file_name)
else:
raise FileTypeError(f'The file type {file_path.suffix} is not supported please chose from xyz, pdb, mol or sdf.')
def mol_to_multiconformer_file(rdkit_mol: Chem.Mol, file_name: str) -> None:
file_path = Path(file_name)
if (file_path.suffix == '.pdb'):
writer = Chem.MolToPDBBlock
elif ((file_path.suffix == '.mol') or (file_path.suffix == '.sdf')):
writer = Chem.MolToMolBlock
elif (file_path.suffix == '.xyz'):
writer = Chem.MolToXYZBlock
else:
raise FileTypeError(f'The file type {file_path.suffix} is not supported please chose from xyz, pdb, mol or sdf.')
with open(file_name, 'w') as out:
for i in range(rdkit_mol.GetNumConformers()):
out.write(writer(rdkit_mol, confId=i))
def file_to_rdkit_mol(file_path: Path) -> Chem.Mol:
if (file_path.suffix == '.pdb'):
mol = Chem.MolFromPDBFile(file_path.as_posix(), removeHs=False, sanitize=False)
elif (file_path.suffix == '.mol2'):
mol = Chem.MolFromMol2File(file_path.as_posix(), removeHs=False, sanitize=False)
elif ((file_path.suffix == '.mol') or (file_path.suffix == '.sdf')):
mol = Chem.MolFromMolFile(file_path.as_posix(), removeHs=False, sanitize=False, strictParsing=True)
else:
raise FileTypeError(f'The file type {file_path.suffix} is not supported.')
Chem.SanitizeMol(mol, ((Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY) ^ Chem.SANITIZE_ADJUSTHS))
Chem.SetAromaticity(mol, Chem.AromaticityModel.AROMATICITY_MDL)
Chem.AssignStereochemistryFrom3D(mol)
mol.SetProp('_Name', file_path.stem)
return mol
def smiles_to_rdkit_mol(smiles_string: str, name: Optional[str]=None):
mol = AllChem.MolFromSmiles(smiles_string, sanitize=False)
if (name is None):
name = input('Please enter a name for the molecule:\n>')
mol.SetProp('_Name', name)
atom_index_to_map = {}
for atom in mol.GetAtoms():
atom_index_to_map[atom.GetIdx()] = atom.GetAtomMapNum()
atom.SetAtomMapNum(0)
Chem.SanitizeMol(mol)
Chem.SetAromaticity(mol, Chem.AromaticityModel.AROMATICITY_MDL)
Chem.AssignStereochemistry(mol)
mol = AllChem.AddHs(mol)
AllChem.EmbedMolecule(mol, randomSeed=1)
for atom in mol.GetAtoms():
atom.SetAtomMapNum(atom_index_to_map.get(atom.GetIdx(), 0))
return mol
def rdkit_descriptors(rdkit_mol: Chem.Mol) -> Dict[(str, float)]:
return {'Heavy atoms': Descriptors.HeavyAtomCount(rdkit_mol), 'H-bond donors': Descriptors.NumHDonors(rdkit_mol), 'H-bond acceptors': Descriptors.NumHAcceptors(rdkit_mol), 'Molecular weight': Descriptors.MolWt(rdkit_mol), 'LogP': Descriptors.MolLogP(rdkit_mol)}
def get_smiles(rdkit_mol: Chem.Mol, isomeric: bool=True, explicit_hydrogens: bool=True, mapped: bool=False) -> str:
cp_mol = copy.deepcopy(rdkit_mol)
if mapped:
explicit_hydrogens = True
for atom in cp_mol.GetAtoms():
atom.SetAtomMapNum((atom.GetIdx() + 1))
if (not explicit_hydrogens):
cp_mol = Chem.RemoveHs(cp_mol)
return Chem.MolToSmiles(cp_mol, isomericSmiles=isomeric, allHsExplicit=explicit_hydrogens)
def get_smirks_matches(rdkit_mol: Chem.Mol, smirks: str) -> List[Tuple[(int, ...)]]:
cp_mol = copy.deepcopy(rdkit_mol)
smarts_mol = Chem.MolFromSmarts(smirks)
if (smarts_mol is None):
raise SmartsError(f'RDKit could not understand the query {smirks} please check again.')
mapping = {}
for atom in smarts_mol.GetAtoms():
smart_index = atom.GetAtomMapNum()
if (smart_index != 0):
mapping[(smart_index - 1)] = atom.GetIdx()
all_matches = set()
for match in cp_mol.GetSubstructMatches(smarts_mol, uniquify=False, useChirality=True):
smirks_atoms = [match[atom] for atom in mapping.values()]
if (smirks_atoms[0] < smirks_atoms[(- 1)]):
all_matches.add(tuple(smirks_atoms))
else:
all_matches.add(tuple(reversed(smirks_atoms)))
return list(all_matches)
def get_smarts(rdkit_mol: Chem.Mol) -> str:
return Chem.MolToSmarts(rdkit_mol)
def generate_conformers(rdkit_mol: Chem.Mol, conformer_no: int) -> List[np.ndarray]:
AllChem.EmbedMultipleConfs(rdkit_mol, numConfs=conformer_no, randomSeed=1, clearConfs=False, useBasicKnowledge=True, pruneRmsThresh=1, enforceChirality=True)
positions = rdkit_mol.GetConformers()
return [conformer.GetPositions() for conformer in positions]
def find_symmetry_classes(rdkit_mol: Chem.Mol) -> Dict[(int, int)]:
if (not rdkit_mol.GetAtomWithIdx(0).HasProp('_CIPRank')):
Chem.AssignStereochemistry(rdkit_mol, cleanIt=True, force=True, flagPossibleStereoCenters=True)
cip_ranks = np.array([int(atom.GetProp('_CIPRank')) for atom in rdkit_mol.GetAtoms()])
atom_symmetry_classes = [np.where((cip_ranks == rank))[0].tolist() for rank in range((max(cip_ranks) + 1))]
atom_symmetry_classes_dict = {}
for (i, sym_class) in enumerate(atom_symmetry_classes):
for atom in sym_class:
atom_symmetry_classes_dict[atom] = i
return atom_symmetry_classes_dict
def get_conformer_rmsd(rdkit_mol: Chem.Mol, ref_index: int, align_index: int) -> float:
return Chem.AllChem.GetConformerRMS(rdkit_mol, ref_index, align_index)
def add_conformer(rdkit_mol: Chem.Mol, conformer_coordinates: np.ndarray) -> Chem.Mol:
conformer = Chem.Conformer()
for (i, coord) in enumerate(conformer_coordinates):
atom_position = Point3D(*coord)
conformer.SetAtomPosition(i, atom_position)
rdkit_mol.AddConformer(conformer, assignId=True)
return rdkit_mol |
class _XyzTileServiceNonEarth(_XyzTileService):
def __call__(self, *args, **kwargs):
_log.info(f"EOmaps: The WebMap service '{self.name}' shows images from a different celestrial body projected to an earth-based crs! Units used in scalebars, geod_crices etc. represent earth-based units!")
super().__call__(*args, **kwargs) |
class SceneModel(QtCore.QObject):
sigSceneModelChanged = QtCore.pyqtSignal(object)
sigSceneChanged = QtCore.pyqtSignal()
sigConfigChanged = QtCore.pyqtSignal()
sigFrameChanged = QtCore.pyqtSignal()
sigQuadtreeChanged = QtCore.pyqtSignal()
_sigQuadtreeChanged = QtCore.pyqtSignal()
sigQuadtreeConfigChanged = QtCore.pyqtSignal()
sigCovarianceChanged = QtCore.pyqtSignal()
sigCovarianceConfigChanged = QtCore.pyqtSignal()
sigProgressStarted = QtCore.pyqtSignal(object)
sigProgressFinished = QtCore.pyqtSignal()
sigCalculateWeightMatrixFinished = QtCore.pyqtSignal(object)
sigHasElevation = QtCore.pyqtSignal()
sigLogRecord = QtCore.pyqtSignal(object)
def __init__(self, spool):
QtCore.QObject.__init__(self)
self.spool = spool
self.scene = None
self.frame = None
self.quadtree = None
self.covariance = None
self.aps = None
self.log = SceneLogModel(self)
self._ = SignalProxy(self._sigQuadtreeChanged, rateLimit=10, delay=0, slot=(lambda : self.sigQuadtreeChanged.emit()))
self._log_handler = logging.Handler()
self._log_handler.setLevel(logging.DEBUG)
self._log_handler.emit = self.sigLogRecord.emit
logging.root.addHandler(self._log_handler)
self._download_status = None
if pyrocko_download_callback:
pyrocko_download_callback(self.download_progress)
self.qtproxy = QSceneQuadtreeProxy(self)
self.worker_thread = QtCore.QThread()
self.moveToThread(self.worker_thread)
self.worker_thread.start()
def setScene(self, scene):
self.disconnectSlots()
self.scene = scene
self.frame = scene.frame
self.quadtree = scene.quadtree
self.covariance = scene.covariance
self.aps = scene.aps
self.connectSlots()
self.sigSceneModelChanged.emit(object)
def getScene(self):
return self.scene
def disconnectSlots(self):
if (self.scene is None):
return
self.scene.evChanged.unsubscribe(self.sigSceneChanged.emit)
self.scene.evConfigChanged.unsubscribe(self.sigConfigChanged.emit)
self.scene.frame.evChanged.unsubscribe(self.sigFrameChanged.emit)
self.quadtree.evChanged.unsubscribe(self._sigQuadtreeChanged.emit)
self.quadtree.evConfigChanged.unsubscribe(self.sigQuadtreeConfigChanged.emit)
self.covariance.evChanged.unsubscribe(self.sigCovarianceChanged.emit)
self.covariance.evConfigChanged.unsubscribe(self.sigCovarianceConfigChanged.emit)
self.aps.evChanged.unsubscribe(self.sigAPSChanged.emit)
def connectSlots(self):
self.scene.evChanged.subscribe(self.sigSceneChanged.emit)
self.scene.evConfigChanged.subscribe(self.sigCovarianceConfigChanged.emit)
self.scene.frame.evChanged.subscribe(self.sigFrameChanged.emit)
self.quadtree.evChanged.subscribe(self._sigQuadtreeChanged.emit)
self.quadtree.evConfigChanged.subscribe(self.sigQuadtreeConfigChanged.emit)
self.covariance.evChanged.subscribe(self.sigCovarianceChanged.emit)
self.covariance.evConfigChanged.subscribe(self.sigCovarianceConfigChanged.emit)
(str)
def exportWeightMatrix(self, filename):
t0 = datetime.now()
quadtree = self.quadtree
covariance = self.covariance
def progress_func():
return covariance.finished_combinations
ncombinations = ((quadtree.nleaves * (quadtree.nleaves + 1)) / 2)
self.sigProgressStarted.emit(('Calculating <span style="font-family: monospace">Covariance.weight_matrix</span>, this can take a few minutes...', ncombinations, progress_func))
self.scene.covariance.export_weight_matrix(filename)
self.sigProgressFinished.emit()
self.sigCalculateWeightMatrixFinished.emit((datetime.now() - t0))
()
def calculateWeightMatrix(self):
t0 = datetime.now()
quadtree = self.quadtree
covariance = self.covariance
def progress_func():
return covariance.finished_combinations
ncombinations = ((quadtree.nleaves * (quadtree.nleaves + 1)) / 2)
self.sigProgressStarted.emit(('Calculating <span style="font-family: monospace">Covariance.weight_matrix</span>, this can take a few minutes...', ncombinations, progress_func))
self.scene.covariance.weight_matrix
self.sigProgressFinished.emit()
self.sigCalculateWeightMatrixFinished.emit((datetime.now() - t0))
(str)
def importFile(self, filename):
self.sigProgressStarted.emit(('Importing scene...',))
self.setScene(Scene.import_data(filename))
self.sigProgressFinished.emit()
(str)
def loadFile(self, filename):
self.sigProgressStarted.emit(('Loading scene...',))
self.setScene(Scene.load(filename))
self.sigProgressFinished.emit()
(str)
def loadConfig(self, filename):
self.scene.load_config(filename)
def download_progress(self, context_str, status):
progress = self.spool.progress
progress.setWindowTitle('Downloading...')
progress.setLabelText(context_str)
progress.setMaximum(status.get('ntotal_bytes_all_files', 0))
progress.setValue(status.get('nread_bytes_all_files', 0))
QtCore.QCoreApplication.processEvents()
if progress.isHidden():
progress.show()
QtCore.QCoreApplication.processEvents()
if status['finished']:
progress.reset() |
class Xpub(MasterPublicKeyMixin):
def __init__(self, *, derivation_prefix: str=None, root_fingerprint: str=None):
self.xpub = None
self.xpub_receive = None
self.xpub_change = None
self._xpub_bip32_node = None
self._derivation_prefix = derivation_prefix
self._root_fingerprint = root_fingerprint
def get_master_public_key(self):
return self.xpub
def get_bip32_node_for_xpub(self) -> Optional[BIP32Node]:
if (self._xpub_bip32_node is None):
if (self.xpub is None):
return None
self._xpub_bip32_node = BIP32Node.from_xkey(self.xpub)
return self._xpub_bip32_node
def get_derivation_prefix(self) -> Optional[str]:
if (self._derivation_prefix is None):
return None
return normalize_bip32_derivation(self._derivation_prefix)
def get_root_fingerprint(self) -> Optional[str]:
return self._root_fingerprint
def get_fp_and_derivation_to_be_used_in_partial_tx(self, der_suffix: Sequence[int], *, only_der_suffix: bool) -> Tuple[(bytes, Sequence[int])]:
fingerprint_hex = self.get_root_fingerprint()
der_prefix_str = self.get_derivation_prefix()
if ((not only_der_suffix) and (fingerprint_hex is not None) and (der_prefix_str is not None)):
fingerprint_bytes = bfh(fingerprint_hex)
der_prefix_ints = convert_bip32_strpath_to_intpath(der_prefix_str)
else:
fingerprint_bytes = self.get_bip32_node_for_xpub().calc_fingerprint_of_this_node()
der_prefix_ints = convert_bip32_strpath_to_intpath('m')
der_full = (der_prefix_ints + list(der_suffix))
return (fingerprint_bytes, der_full)
def get_xpub_to_be_used_in_partial_tx(self, *, only_der_suffix: bool) -> str:
assert self.xpub
(fp_bytes, der_full) = self.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=only_der_suffix)
bip32node = self.get_bip32_node_for_xpub()
depth = len(der_full)
child_number_int = (der_full[(- 1)] if (len(der_full) >= 1) else 0)
child_number_bytes = child_number_int.to_bytes(length=4, byteorder='big')
fingerprint = (bytes(4) if (depth == 0) else bip32node.fingerprint)
bip32node = bip32node._replace(depth=depth, fingerprint=fingerprint, child_number=child_number_bytes, xtype='standard')
return bip32node.to_xpub()
def get_key_origin_info(self) -> Optional[KeyOriginInfo]:
(fp_bytes, der_full) = self.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=False)
origin = KeyOriginInfo(fingerprint=fp_bytes, path=der_full)
return origin
def get_pubkey_provider(self, sequence: 'AddressIndexGeneric') -> Optional[PubkeyProvider]:
strpath = convert_bip32_intpath_to_strpath(sequence)
strpath = strpath[1:]
bip32node = self.get_bip32_node_for_xpub()
return PubkeyProvider(origin=self.get_key_origin_info(), pubkey=bip32node._replace(xtype='standard').to_xkey(), deriv_path=strpath)
def add_key_origin_from_root_node(self, *, derivation_prefix: str, root_node: BIP32Node):
assert self.xpub
child_node1 = root_node.subkey_at_private_derivation(derivation_prefix)
child_pubkey_bytes1 = child_node1.eckey.get_public_key_bytes(compressed=True)
child_node2 = self.get_bip32_node_for_xpub()
child_pubkey_bytes2 = child_node2.eckey.get_public_key_bytes(compressed=True)
if (child_pubkey_bytes1 != child_pubkey_bytes2):
raise Exception('(xpub, derivation_prefix, root_node) inconsistency')
self.add_key_origin(derivation_prefix=derivation_prefix, root_fingerprint=root_node.calc_fingerprint_of_this_node().hex().lower())
def add_key_origin(self, *, derivation_prefix: str=None, root_fingerprint: str=None) -> None:
assert self.xpub
if (not ((root_fingerprint is None) or (is_hex_str(root_fingerprint) and (len(root_fingerprint) == 8)))):
raise Exception('root fp must be 8 hex characters')
derivation_prefix = normalize_bip32_derivation(derivation_prefix)
if (not is_xkey_consistent_with_key_origin_info(self.xpub, derivation_prefix=derivation_prefix, root_fingerprint=root_fingerprint)):
raise Exception('xpub inconsistent with provided key origin info')
if (root_fingerprint is not None):
self._root_fingerprint = root_fingerprint
if (derivation_prefix is not None):
self._derivation_prefix = derivation_prefix
self.is_requesting_to_be_rewritten_to_wallet_file = True
_cache(maxsize=None)
def derive_pubkey(self, for_change: int, n: int) -> bytes:
for_change = int(for_change)
if (for_change not in (0, 1)):
raise CannotDerivePubkey('forbidden path')
xpub = (self.xpub_change if for_change else self.xpub_receive)
if (xpub is None):
rootnode = self.get_bip32_node_for_xpub()
xpub = rootnode.subkey_at_public_derivation((for_change,)).to_xpub()
if for_change:
self.xpub_change = xpub
else:
self.xpub_receive = xpub
return self.get_pubkey_from_xpub(xpub, (n,))
def get_pubkey_from_xpub(self, xpub: str, sequence) -> bytes:
node = BIP32Node.from_xkey(xpub).subkey_at_public_derivation(sequence)
return node.eckey.get_public_key_bytes(compressed=True) |
class SawyerDoorUnlockV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'lock_pos': obs[3:6], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = 1.0
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_lock = (o_d['lock_pos'] + np.array([(- 0.03), (- 0.03), (- 0.1)]))
if (np.linalg.norm((pos_curr[:2] - pos_lock[:2])) > 0.04):
return (pos_lock + np.array([0.0, 0.0, 0.3]))
elif (abs((pos_curr[2] - pos_lock[2])) > 0.02):
return pos_lock
else:
return (pos_lock + np.array([0.1, 0.0, 0.0])) |
def test_commonpath() -> None:
path = Path('/foo/bar/baz/path')
subpath = (path / 'sampledir')
assert (commonpath(path, subpath) == path)
assert (commonpath(subpath, path) == path)
assert (commonpath(Path((str(path) + 'suffix')), path) == path.parent)
assert (commonpath(path, path.parent.parent) == path.parent.parent) |
def test_entrypoint_injection(pytester, monkeypatch):
(pytester.path / 'test_one.py').write_text('def test_one(): pass\n')
class _FakeEntryPoint():
def __init__(self, name: str, obj: mock.Mock) -> None:
self.name = name
self._obj = obj
def load(self) -> mock.Mock:
return self._obj
entry_points: list[_FakeEntryPoint] = []
def fake_entry_points(*, group):
return entry_points
monkeypatch.setattr(pytest_randomly, 'entry_points', fake_entry_points)
reseed = mock.Mock()
entry_points.append(_FakeEntryPoint('test_seeder', reseed))
pytester.runpytest_inprocess('--randomly-seed=1')
assert (reseed.mock_calls == [mock.call(1), mock.call(1), mock.call(0), mock.call(1), mock.call(2)])
reseed.mock_calls[:] = []
pytester.runpytest_inprocess('--randomly-seed=424242')
assert (reseed.mock_calls == [mock.call(424242), mock.call(424242), mock.call(424241), mock.call(424242), mock.call(424243)]) |
class Command(BaseCommand):
def handle(self, *args, **options):
if (len(args) != 2):
raise CommandError('Usage: python manage.py fill_data <in_file> <out_file>')
(in_file, out_file) = args
ticket_nums = [line.rstrip('\n') for line in open(in_file).readlines()]
fh = open(out_file, 'w')
header = ('Ticket Number', 'Name', 'Email', 'Gender', 'Designation', 'Company', 'City', 'Address')
fh.write((','.join(header) + '\n'))
for ticket_num in ticket_nums:
try:
ticket = Ticket.objects.get(ticket_no=ticket_num)
except ObjectDoesNotExist:
print('Ticket num: {} not found'.format(ticket_num))
details = ticket.others
for attendee in details['attendee']:
if (attendee['ticketNo'] == ticket_num):
attendee = attendee
break
else:
attendee = {}
if (not ticket.address):
ticket.address = ''
if (not attendee['details']):
gender = ''
designation = ''
city = ''
company = ''
else:
gender = attendee['details']['Gender']
company = attendee['details']['Company/Organisation']
designation = attendee['details']['Designation']
city = attendee['details']['City']
data = (ticket_num, ticket.name, attendee['email'], gender, designation, company, city, ticket.address)
fh.write((','.join(data) + '\n')) |
.dask_deserialize.register(ProxyObject)
.cuda.cuda_deserialize.register(ProxyObject)
def obj_pxy_dask_deserialize(header, frames):
args = pickle.loads(header['obj-pxy-detail'])
if (args['subclass'] is None):
subclass = ProxyObject
else:
subclass = pickle.loads(args['subclass'])
pxy = ProxyDetail(obj=(header['proxied-header'], frames), **args)
if (pxy.serializer == 'disk'):
(header, _) = pxy.obj
path = header['disk-io-header']['path']
if (not isinstance(path, SpillToDiskFile)):
header['disk-io-header']['path'] = SpillToDiskFile(path)
assert os.path.exists(path)
return subclass(pxy) |
class Bruggeman(BaseModel):
def __init__(self, param, component, options=None):
super().__init__(param, component, options=options)
def get_coupled_variables(self, variables):
if (self.component == 'Electrolyte'):
tor_dict = {}
for domain in self.options.whole_cell_domains:
Domain = domain.capitalize()
eps_k = variables[f'{Domain} porosity']
b_k = self.param.domain_params[domain.split()[0]].b_e
tor_dict[domain] = (eps_k ** b_k)
elif (self.component == 'Electrode'):
tor_dict = {}
for domain in self.options.whole_cell_domains:
if (domain == 'separator'):
tor_k = pybamm.FullBroadcast(0, 'separator', 'current collector')
else:
Domain = domain.capitalize()
eps_k = variables[f'{Domain} active material volume fraction']
b_k = self.param.domain_params[domain.split()[0]].b_s
tor_k = (eps_k ** b_k)
tor_dict[domain] = tor_k
variables.update(self._get_standard_transport_efficiency_variables(tor_dict))
return variables |
def reformat_to_coco(predictions: List[str], ground_truths: List[List[str]], ids: Union[(List[int], None)]=None) -> Tuple[(List[Dict[(str, Any)]], Dict[(str, Any)])]:
if (ids is None):
ids = range(len(predictions))
pred = []
ref = {'info': {'description': 'Clotho reference captions (2019)'}, 'audio samples': [], 'licenses': [{'id': 1}, {'id': 2}, {'id': 3}], 'type': 'captions', 'annotations': []}
cap_id = 0
for (audio_id, p, gt) in zip(ids, predictions, ground_truths):
p = (p[0] if isinstance(p, list) else p)
pred.append({'audio_id': audio_id, 'caption': p})
ref['audio samples'].append({'id': audio_id})
for cap in gt:
ref['annotations'].append({'audio_id': audio_id, 'id': cap_id, 'caption': cap})
cap_id += 1
return (pred, ref) |
class VersionCommand(Command):
name = 'version'
description = 'Shows the version of the project or bumps it when a valid bump rule is provided.'
arguments = [argument('version', 'The version number or the rule to update the version.', optional=True)]
options = [option('short', 's', 'Output the version number only'), option('dry-run', None, 'Do not update pyproject.toml file'), option('next-phase', None, 'Increment the phase of the current version')]
help = 'The version command shows the current version of the project or bumps the version of\nthe project and writes the new version back to <comment>pyproject.toml</> if a valid\nbump rule is provided.\n\nThe new version should ideally be a valid semver string or a valid bump rule:\npatch, minor, major, prepatch, preminor, premajor, prerelease.\n'
RESERVED = {'major', 'minor', 'patch', 'premajor', 'preminor', 'prepatch', 'prerelease'}
def handle(self) -> int:
version = self.argument('version')
if version:
version = self.increment_version(self.poetry.package.pretty_version, version, self.option('next-phase'))
if self.option('short'):
self.line(version.to_string())
else:
self.line(f'Bumping version from <b>{self.poetry.package.pretty_version}</> to <fg=green>{version}</>')
if (not self.option('dry-run')):
content: dict[(str, Any)] = self.poetry.file.read()
poetry_content = content['tool']['poetry']
poetry_content['version'] = version.text
assert isinstance(content, TOMLDocument)
self.poetry.file.write(content)
elif self.option('short'):
self.line(self.poetry.package.pretty_version)
else:
self.line(f'<comment>{self.poetry.package.name}</> <info>{self.poetry.package.pretty_version}</>')
return 0
def increment_version(self, version: str, rule: str, next_phase: bool=False) -> Version:
from poetry.core.constraints.version import Version
try:
parsed = Version.parse(version)
except InvalidVersion:
raise ValueError("The project's version doesn't seem to follow semver")
if (rule in {'major', 'premajor'}):
new = parsed.next_major()
if (rule == 'premajor'):
new = new.first_prerelease()
elif (rule in {'minor', 'preminor'}):
new = parsed.next_minor()
if (rule == 'preminor'):
new = new.first_prerelease()
elif (rule in {'patch', 'prepatch'}):
new = parsed.next_patch()
if (rule == 'prepatch'):
new = new.first_prerelease()
elif (rule == 'prerelease'):
if parsed.is_unstable():
pre = parsed.pre
assert (pre is not None)
pre = (pre.next_phase() if next_phase else pre.next())
new = Version(parsed.epoch, parsed.release, pre)
else:
new = parsed.next_patch().first_prerelease()
else:
new = Version.parse(rule)
return new |
class TestGetProvider(SetUpTest, TestCase):
def test_get_provider_should_succeed(self):
with open(self.qlr_file) as f:
self.assertEqual(get_provider(f), 'wms')
def test_get_provider_should_return_none(self):
tf = NamedTemporaryFile(mode='w+t', suffix='.qlr')
tf.write('<!DOCTYPE qgis-layer-definition><qlr><maplayers><maplayer><provider></provider></maplayer></maplayers></qlr>')
self.assertIsNone(get_provider(tf)) |
def eth_nodes_configuration(blockchain_number_of_nodes, blockchain_key_seed, port_generator, blockchain_type, blockchain_extra_config) -> List[EthNodeDescription]:
eth_nodes = []
for position in range(blockchain_number_of_nodes):
key = keccak(blockchain_key_seed.format(position).encode())
eth_node = EthNodeDescription(private_key=key, rpc_port=next(port_generator), p2p_port=next(port_generator), miner=(position == 0), extra_config=blockchain_extra_config, blockchain_type=blockchain_type)
eth_nodes.append(eth_node)
return eth_nodes |
class TestStat(unittest.TestCase):
def test_silent_file(self):
expected = {'Samples read': 627456, 'Length (seconds)': 14.228027, 'Scaled by': .0, 'Maximum amplitude': 0.010895, 'Minimum amplitude': (- 0.004883), 'Midline amplitude': 0.003006, 'Mean norm': 0.000137, 'Mean amplitude': (- 6.2e-05), 'RMS amplitude': 0.0002, 'Maximum delta': 0.015778, 'Minimum delta': 0.0, 'Mean delta': 9.6e-05, 'RMS delta': 0.000124, 'Rough frequency': 4349, 'Volume adjustment': 91.787}
actual = file_info.stat(SILENT_FILE)
self.assertEqual(expected, actual) |
def test_cancel_merge_when_pipeline_succeeds(project, merge_request_with_pipeline, wait_for_sidekiq):
wait_for_sidekiq(timeout=60)
merge_request_with_pipeline.merge(merge_when_pipeline_succeeds=True)
wait_for_sidekiq(timeout=60)
mr = project.mergerequests.get(merge_request_with_pipeline.iid)
assert (mr.merged_at is None)
assert (mr.merge_when_pipeline_succeeds is True)
cancel = mr.cancel_merge_when_pipeline_succeeds()
assert (cancel == {'status': 'success'}) |
class F_RandomProj(nn.Module):
def __init__(self, backbone, loops_type=None, model_path=None, im_res=256, cout=64, expand=True, proj_type=2, **kwargs):
super().__init__()
self.proj_type = proj_type
self.backbone = backbone
self.loops_type = loops_type
self.cout = cout
self.expand = expand
(self.pretrained, self.scratch) = _make_projector(res=im_res, backbone=self.backbone, model_path=model_path, cout=self.cout, proj_type=self.proj_type, loops_type=self.loops_type, expand=self.expand)
self.CHANNELS = self.pretrained.CHANNELS
self.RESOLUTIONS = self.pretrained.RESOLUTIONS
def forward(self, x):
out0 = self.pretrained.layer0(x)
out1 = self.pretrained.layer1(out0)
out2 = self.pretrained.layer2(out1)
out3 = self.pretrained.layer3(out2)
out = {'0': out0, '1': out1, '2': out2, '3': out3}
if (self.proj_type == 0):
return out
out0_channel_mixed = self.scratch.layer0_ccm(out['0'])
out1_channel_mixed = self.scratch.layer1_ccm(out['1'])
out2_channel_mixed = self.scratch.layer2_ccm(out['2'])
out3_channel_mixed = self.scratch.layer3_ccm(out['3'])
out = {'0': out0_channel_mixed, '1': out1_channel_mixed, '2': out2_channel_mixed, '3': out3_channel_mixed}
if (self.proj_type == 1):
return out
out3_scale_mixed = self.scratch.layer3_csm(out3_channel_mixed)
out2_scale_mixed = self.scratch.layer2_csm(out3_scale_mixed, out2_channel_mixed)
out1_scale_mixed = self.scratch.layer1_csm(out2_scale_mixed, out1_channel_mixed)
out0_scale_mixed = self.scratch.layer0_csm(out1_scale_mixed, out0_channel_mixed)
out = {'0': out0_scale_mixed, '1': out1_scale_mixed, '2': out2_scale_mixed, '3': out3_scale_mixed}
return out |
def _remove_dup_initializers_from_model(model, model_without_ext, ind_to_replace):
inits_with_data = list(model.graph.initializer)
inits = list(model_without_ext.graph.initializer)
for (i, ref_i) in ind_to_replace:
assert (inits_with_data[i].name == inits[i].name)
assert (inits_with_data[ref_i].name == inits[ref_i].name)
assert (i > ref_i)
name_i = inits[i].name
name_ref = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
_graph_replace_input_with(model_without_ext.graph, name_i, name_ref) |
def l2_afa_schema(settings=None):
settings = (settings or {})
npix = settings.get('num_pixels', 120)
nacc = settings.get('num_accumulations', 20)
return {'providers': settings.get('providers', {}), 'variable_path': settings.get('variable_path', ''), 'dimensions': accumulation_dimensions(nacc, npix), 'variables': {'accumulation_start_times': {'format': 'f4', 'shape': ('accumulations',), 'long_name': 'Accumulation start time', 'units': 'seconds since 2000-01-01 00:00:00.0', 'default_data': (lambda : np.linspace(0.0, 1.0, nacc))}, 'accumulated_flash_area': {'format': 'u4', 'shape': ('pixels',), 'fill_value': , 'long_name': 'Number of contributing unique flashes to each pixel', 'default_data': (lambda : (np.mod(np.arange(npix), 10) + 1))}, 'mtg_geos_projection': mtg_geos_projection(), 'x': fci_grid_definition('X', npix), 'y': fci_grid_definition('Y', npix)}} |
def rtn_fwrite(se: 'SymbolicExecutor', pstate: 'ProcessState'):
logger.debug('fwrite hooked')
arg0 = pstate.get_argument_value(0)
arg1 = pstate.get_argument_value(1)
arg2 = pstate.get_argument_value(2)
arg3 = pstate.get_argument_value(3)
size = (arg1 * arg2)
data = pstate.memory.read(arg0, size)
if pstate.file_descriptor_exists(arg3):
fdesc = pstate.get_file_descriptor(arg3)
if (arg3 == 0):
return 0
elif (arg3 == 1):
if se.config.pipe_stdout:
fdesc.fd.buffer.write(data)
fdesc.fd.flush()
elif (arg3 == 2):
if se.config.pipe_stderr:
fdesc.fd.buffer.write(data)
fdesc.fd.flush()
else:
fdesc.fd.write(data)
else:
return 0
return size |
def _test():
import torch
pretrained = False
models = [(rir_cifar10, 10), (rir_cifar100, 100), (rir_svhn, 10)]
for (model, num_classes) in models:
net = model(pretrained=pretrained)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
assert ((model != rir_cifar10) or (weight_count == 9492980))
assert ((model != rir_cifar100) or (weight_count == 9527720))
assert ((model != rir_svhn) or (weight_count == 9492980))
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes)) |
.parametrize('input_type', [(lambda x: x[0]), tuple, list])
def test_run_model_from_effective_irradiance(sapm_dc_snl_ac_system, location, weather, total_irrad, input_type):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effective_irradiance'] = data['poa_global']
mc = ModelChain(sapm_dc_snl_ac_system, location, aoi_model='no_loss', spectral_model='no_loss')
ac = mc.run_model_from_effective_irradiance(input_type((data,))).results.ac
expected = pd.Series(np.array([149.280238, 96.678385]), index=data.index)
assert_series_equal(ac, expected) |
class SawyerFaucetOpenEnvV2(SawyerXYZEnv):
def __init__(self):
hand_low = ((- 0.5), 0.4, (- 0.15))
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.05), 0.8, 0.0)
obj_high = (0.05, 0.85, 0.0)
self._handle_length = 0.175
self._target_radius = 0.07
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_pos': np.array([0, 0.8, 0.0]), 'hand_init_pos': np.array([0.0, 0.4, 0.2])}
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
goal_low = self.hand_low
goal_high = self.hand_high
self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_faucet.xml')
_assert_task_is_set
def evaluate_state(self, obs, action):
(reward, tcp_to_obj, _, target_to_obj, object_grasped, in_place) = self.compute_reward(action, obs)
info = {'success': float((target_to_obj <= 0.07)), 'near_object': float((tcp_to_obj <= 0.01)), 'grasp_success': 1.0, 'grasp_reward': object_grasped, 'in_place_reward': in_place, 'obj_to_target': target_to_obj, 'unscaled_reward': reward}
return (reward, info)
def _target_site_config(self):
return [('goal_open', self._target_pos), ('goal_close', np.array([10.0, 10.0, 10.0]))]
def _get_pos_objects(self):
return (self._get_site_pos('handleStartOpen') + np.array([0.0, 0.0, (- 0.01)]))
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('faucetBase')
def reset_model(self):
self._reset_hand()
self.obj_init_pos = (self._get_state_rand_vec() if self.random_init else self.init_config['obj_init_pos'])
self.sim.model.body_pos[self.model.body_name2id('faucetBase')] = self.obj_init_pos
self._target_pos = (self.obj_init_pos + np.array([(+ self._handle_length), 0.0, 0.125]))
return self._get_obs()
def _reset_hand(self):
super()._reset_hand()
self.reachCompleted = False
def compute_reward(self, action, obs):
del action
obj = (obs[4:7] + np.array([(- 0.04), 0.0, 0.03]))
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = (obj - target)
target_to_obj = np.linalg.norm(target_to_obj)
target_to_obj_init = (self.obj_init_pos - target)
target_to_obj_init = np.linalg.norm(target_to_obj_init)
in_place = reward_utils.tolerance(target_to_obj, bounds=(0, self._target_radius), margin=abs((target_to_obj_init - self._target_radius)), sigmoid='long_tail')
faucet_reach_radius = 0.01
tcp_to_obj = np.linalg.norm((obj - tcp))
tcp_to_obj_init = np.linalg.norm((self.obj_init_pos - self.init_tcp))
reach = reward_utils.tolerance(tcp_to_obj, bounds=(0, faucet_reach_radius), margin=abs((tcp_to_obj_init - faucet_reach_radius)), sigmoid='gaussian')
tcp_opened = 0
object_grasped = reach
reward = ((2 * reach) + (3 * in_place))
reward *= 2
reward = (10 if (target_to_obj <= self._target_radius) else reward)
return (reward, tcp_to_obj, tcp_opened, target_to_obj, object_grasped, in_place) |
def get_config(args, logger=None):
if args.resume:
cfg_path = os.path.join(args.experiment_path, 'config.yaml')
if (not os.path.exists(cfg_path)):
print_log('Failed to resume', logger=logger)
raise FileNotFoundError()
print_log(f'Resume yaml from {cfg_path}', logger=logger)
args.config = cfg_path
config = cfg_from_yaml_file(args.config)
if ((not args.resume) and (args.local_rank == 0)):
save_experiment_config(args, config, logger)
return config |
class RedundantAssignmentChecker(BaseChecker):
name = 'redundant_assignment'
msgs = {'E9959': ('This assignment statement is redundant; You can remove it from the program.', 'redundant-assignment', 'This assignment statement is redundant; You can remove it from the program.')}
def __init__(self, linter=None) -> None:
super().__init__(linter=linter)
self._redundant_assignment: Set[nodes.Assign] = set()
_required_for_messages('redundant-assignment')
def visit_assign(self, node: nodes.Assign) -> None:
if (node in self._redundant_assignment):
self.add_message('redundant-assignment', node=node)
_required_for_messages('redundant-assignment')
def visit_augassign(self, node: nodes.AugAssign) -> None:
if (node in self._redundant_assignment):
self.add_message('redundant-assignment', node=node)
def visit_module(self, node: nodes.Module) -> None:
self._analyze(node)
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
self._analyze(node)
def _analyze(self, node: Union[(nodes.Module, nodes.FunctionDef)]) -> None:
out_facts = {}
cfg = ControlFlowGraph()
cfg.start = node.cfg_block
worklist = list(cfg.get_blocks_postorder())
worklist.reverse()
all_assigns = self._get_assigns(node)
for block in worklist:
out_facts[block] = all_assigns.copy()
while (len(worklist) != 0):
b = worklist.pop()
outs = [out_facts[p.target] for p in b.successors if (p.target in out_facts)]
if (outs == []):
in_facts = set()
else:
in_facts = set.intersection(*outs)
temp = self._transfer(b, in_facts)
if (temp != out_facts[b]):
out_facts[b] = temp
worklist.extend([pred.source for pred in b.predecessors if pred.source.reachable])
def _transfer(self, block: CFGBlock, out_facts: Set[str]) -> Set[str]:
gen = out_facts.copy()
kill = set()
for statement in reversed(block.statements):
if isinstance(statement, nodes.FunctionDef):
continue
for node in statement.nodes_of_class((nodes.AssignName, nodes.DelName, nodes.Name, nodes.Nonlocal, nodes.Global), nodes.FunctionDef):
if isinstance(node, nodes.AssignName):
if (node.name in gen.difference(kill)):
self._redundant_assignment.add(node.parent)
elif (node.parent in self._redundant_assignment):
self._redundant_assignment.remove(node.parent)
if isinstance(node.parent, nodes.AugAssign):
kill.add(node.name)
else:
kill.discard(node.name)
gen.add(node.name)
elif isinstance(node, (nodes.Nonlocal, nodes.Global)):
kill.difference_update(set(node.names))
else:
kill.add(node.name)
return gen.difference(kill)
def _get_assigns(self, node: Union[(nodes.FunctionDef, nodes.Module)]) -> Set[str]:
assigns = set()
kills = set()
for (name, assign_nodes) in node.locals.items():
if any((isinstance(elem, nodes.AssignName) for elem in assign_nodes)):
assigns.add(name)
return assigns.difference(kills) |
def sphinx_built_file(test_dir, test_file):
os.chdir('tests/{0}'.format(test_dir))
try:
app = Sphinx(srcdir='.', confdir='.', outdir='_build/text', doctreedir='_build/.doctrees', buildername='html', verbosity=1)
app.build(force_all=True)
with io.open(test_file, encoding='utf-8') as fin:
(yield fin.read().strip())
finally:
shutil.rmtree('_build')
os.chdir('../..') |
class DataTrainingArguments():
data_dir: str = field(metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'})
labels: Optional[str] = field(default=None, metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'})
max_seq_length: int = field(default=196, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) |
def singleton(cls):
(cls)
def wrapper_singleton(*args, **kwargs):
if (not wrapper_singleton.instance):
try:
wrapper_singleton.instance = cls(*args, **kwargs)
except TypeError:
wrapper_singleton.instance = data(cls)(*args, **kwargs)
return wrapper_singleton.instance
wrapper_singleton.instance = None
return wrapper_singleton |
def besselFilter(data, cutoff, order=1, dt=None, btype='low', bidir=True):
try:
import scipy.signal
except ImportError:
raise Exception('besselFilter() requires the package scipy.signal.')
if (dt is None):
try:
tvals = data.xvals('Time')
dt = ((tvals[(- 1)] - tvals[0]) / (len(tvals) - 1))
except:
dt = 1.0
(b, a) = scipy.signal.bessel(order, (cutoff * dt), btype=btype)
return applyFilter(data, b, a, bidir=bidir) |
class TestAtmosphere():
def test_standard_atmosphere(self):
a = Atmosphere()
assert (a.temperature == 293.15)
assert (a.pressure == 101.325)
assert (a.relative_humidity == 0.0)
assert (abs((a.soundspeed - 343.2)) < 1e-09)
assert (abs((a.saturation_pressure - 2.)) < 1e-08)
assert (abs((a.molar_concentration_water_vapour - 0.0)) < 1e-09)
assert (abs((a.relaxation_frequency_nitrogen - 9.0)) < 1e-09)
assert (abs((a.relaxation_frequency_oxygen - 24.0)) < 1e-09)
def test_attenuation_coefficient(self):
data = np.loadtxt((data_path() / 'absorption_coefficient.csv'), skiprows=1, delimiter=',')
f = np.array([50.0, 63.0, 80.0, 100.0, 125.0, 160.0, 200.0, 250.0, 315.0, 400.0, 500.0, 630.0, 800.0, 1000.0, 1250.0, 1600.0, 2000.0, 2500.0, 3150.0, 4000.0, 5000.0, 6300.0, 8000.0, 10000.0])
for row in data:
temperature = (273.15 + row[0])
relative_humidity = row[1]
alpha = (row[2:] / 1000.0)
assert (f.shape == alpha.shape)
a = Atmosphere(temperature=temperature, relative_humidity=relative_humidity)
calculated_alpha = a.attenuation_coefficient(f)
np.testing.assert_array_almost_equal(alpha, calculated_alpha, decimal=2) |
def cnn_decoder(lstm1_out, lstm2_out, lstm3_out, lstm4_out):
d_filter4 = tensor_variable([2, 2, 128, 256], 'd_filter4')
dec4 = cnn_decoder_layer(lstm4_out, d_filter4, [1, 8, 8, 128], (1, 2, 2, 1))
dec4_concat = tf.concat([dec4, lstm3_out], axis=3)
d_filter3 = tensor_variable([2, 2, 64, 256], 'd_filter3')
dec3 = cnn_decoder_layer(dec4_concat, d_filter3, [1, 15, 15, 64], (1, 2, 2, 1))
dec3_concat = tf.concat([dec3, lstm2_out], axis=3)
d_filter2 = tensor_variable([3, 3, 32, 128], 'd_filter2')
dec2 = cnn_decoder_layer(dec3_concat, d_filter2, [1, 30, 30, 32], (1, 2, 2, 1))
dec2_concat = tf.concat([dec2, lstm1_out], axis=3)
d_filter1 = tensor_variable([3, 3, 3, 64], 'd_filter1')
dec1 = cnn_decoder_layer(dec2_concat, d_filter1, [1, 30, 30, 3], (1, 1, 1, 1))
return dec1 |
_model
def caformer_b36_in21k(pretrained=False, **kwargs):
model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], head_fn=MlpHead, **kwargs)
model.default_cfg = default_cfgs['caformer_b36_in21k']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
class TestHome():
def test_default(self):
actual = pystiche.home()
desired = path.expanduser(path.join('~', '.cache', 'pystiche'))
assert (actual == desired)
def test_env(self):
tmp_dir = tempfile.mkdtemp()
pystiche_home = os.getenv('PYSTICHE_HOME')
os.environ['PYSTICHE_HOME'] = tmp_dir
try:
actual = pystiche.home()
desired = tmp_dir
assert (actual == desired)
finally:
if (pystiche_home is None):
del os.environ['PYSTICHE_HOME']
else:
os.environ['PYSTICHE_HOME'] = pystiche_home |
_BBOX_CODERS.register_module()
class CSLCoder(BaseBBoxCoder):
def __init__(self, angle_version, omega=1, window='gaussian', radius=6):
super().__init__()
self.angle_version = angle_version
assert (angle_version in ['oc', 'le90', 'le135'])
assert (window in ['gaussian', 'triangle', 'rect', 'pulse'])
self.angle_range = (90 if (angle_version == 'oc') else 180)
self.angle_offset_dict = {'oc': 0, 'le90': 90, 'le135': 45}
self.angle_offset = self.angle_offset_dict[angle_version]
self.omega = omega
self.window = window
self.radius = radius
self.coding_len = int((self.angle_range // omega))
def encode(self, angle_targets):
angle_targets_deg = (angle_targets * (180 / math.pi))
smooth_label = torch.zeros_like(angle_targets).repeat(1, self.coding_len)
angle_targets_deg = ((angle_targets_deg + self.angle_offset) / self.omega)
angle_targets_long = angle_targets_deg.long()
if (self.window == 'pulse'):
radius_range = (angle_targets_long % self.coding_len)
smooth_value = 1.0
elif (self.window == 'rect'):
base_radius_range = torch.arange((- self.radius), self.radius, device=angle_targets_long.device)
radius_range = ((base_radius_range + angle_targets_long) % self.coding_len)
smooth_value = 1.0
elif (self.window == 'triangle'):
base_radius_range = torch.arange((- self.radius), self.radius, device=angle_targets_long.device)
radius_range = ((base_radius_range + angle_targets_long) % self.coding_len)
smooth_value = (1.0 - torch.abs(((1 / self.radius) * base_radius_range)))
elif (self.window == 'gaussian'):
base_radius_range = torch.arange(((- self.angle_range) // 2), (self.angle_range // 2), device=angle_targets_long.device)
radius_range = ((base_radius_range + angle_targets_long) % self.coding_len)
smooth_value = torch.exp(((- torch.pow(base_radius_range, 2)) / (2 * (self.radius ** 2))))
else:
raise NotImplementedError
if isinstance(smooth_value, torch.Tensor):
smooth_value = smooth_value.unsqueeze(0).repeat(smooth_label.size(0), 1)
return smooth_label.scatter(1, radius_range, smooth_value)
def decode(self, angle_preds):
angle_cls_inds = torch.argmax(angle_preds, dim=1)
angle_pred = ((((angle_cls_inds + 0.5) * self.omega) % self.angle_range) - self.angle_offset)
return (angle_pred * (math.pi / 180)) |
class JuliaLexer(RegexLexer):
name = 'Julia'
url = '
aliases = ['julia', 'jl']
filenames = ['*.jl']
mimetypes = ['text/x-julia', 'application/x-julia']
version_added = '1.6'
tokens = {'root': [('\\n', Whitespace), ('[^\\S\\n]+', Whitespace), ('#=', Comment.Multiline, 'blockcomment'), ('#.*$', Comment), ('[\\[\\](),;]', Punctuation), ((((('(' + allowed_variable) + ')(\\s*)(:)(') + allowed_variable) + ')'), bygroups(Name, Whitespace, Operator, Name)), ((('(?<![\\]):<>\\d.])(:' + allowed_variable) + ')'), String.Symbol), ((('(?<=::)(\\s*)(' + allowed_variable) + ')\\b(?![(\\[])'), bygroups(Whitespace, Keyword.Type)), ((((('(' + allowed_variable) + ')(\\s*)([<>]:)(\\s*)(') + allowed_variable) + ')\\b(?![(\\[])'), bygroups(Keyword.Type, Whitespace, Operator, Whitespace, Keyword.Type)), ((('([<>]:)(\\s*)(' + allowed_variable) + ')\\b(?![(\\[])'), bygroups(Operator, Whitespace, Keyword.Type)), ((('\\b(' + allowed_variable) + ')(\\s*)([<>]:)'), bygroups(Keyword.Type, Whitespace, Operator)), (words([*OPERATORS_LIST, *DOTTED_OPERATORS_LIST], suffix=operator_suffixes), Operator), (words([('.' + o) for o in DOTTED_OPERATORS_LIST], suffix=operator_suffixes), Operator), (words(['...', '..']), Operator), ("'(\\\\.|\\\\[0-7]{1,3}|\\\\x[a-fA-F0-9]{1,3}|\\\\u[a-fA-F0-9]{1,4}|\\\\U[a-fA-F0-9]{1,6}|[^\\\\\\'\\n])'", String.Char), ((("(?<=[.\\w)\\]])(\\'" + operator_suffixes) + ')+'), Operator), ('(raw)(""")', bygroups(String.Affix, String), 'tqrawstring'), ('(raw)(")', bygroups(String.Affix, String), 'rawstring'), ('(r)(""")', bygroups(String.Affix, String.Regex), 'tqregex'), ('(r)(")', bygroups(String.Affix, String.Regex), 'regex'), ((('(' + allowed_variable) + ')?(""")'), bygroups(String.Affix, String), 'tqstring'), ((('(' + allowed_variable) + ')?(")'), bygroups(String.Affix, String), 'string'), ((('(' + allowed_variable) + ')?(```)'), bygroups(String.Affix, String.Backtick), 'tqcommand'), ((('(' + allowed_variable) + ')?(`)'), bygroups(String.Affix, String.Backtick), 'command'), ((('(' + allowed_variable) + ')(\\{)'), bygroups(Keyword.Type, Punctuation), 'curly'), ((('(where)(\\s+)(' + allowed_variable) + ')'), bygroups(Keyword, Whitespace, Keyword.Type)), ('(\\{)', Punctuation, 'curly'), ((('(abstract|primitive)([ \\t]+)(type\\b)([\\s()]+)(' + allowed_variable) + ')'), bygroups(Keyword, Whitespace, Keyword, Text, Keyword.Type)), ((('(mutable(?=[ \\t]))?([ \\t]+)?(struct\\b)([\\s()]+)(' + allowed_variable) + ')'), bygroups(Keyword, Whitespace, Keyword, Text, Keyword.Type)), (('' + allowed_variable), Name.Decorator), (words([*OPERATORS_LIST, '..', '.', *DOTTED_OPERATORS_LIST], prefix='', suffix=operator_suffixes), Name.Decorator), (words(KEYWORD_LIST, suffix='\\b'), Keyword), (words(BUILTIN_LIST, suffix='\\b'), Keyword.Type), (words(LITERAL_LIST, suffix='\\b'), Name.Builtin), (allowed_variable, Name), ('(\\d+((_\\d+)+)?\\.(?!\\.)(\\d+((_\\d+)+)?)?|\\.\\d+((_\\d+)+)?)([eEf][+-]?[0-9]+)?', Number.Float), ('\\d+((_\\d+)+)?[eEf][+-]?[0-9]+', Number.Float), ('0x[a-fA-F0-9]+((_[a-fA-F0-9]+)+)?(\\.([a-fA-F0-9]+((_[a-fA-F0-9]+)+)?)?)?p[+-]?\\d+', Number.Float), ('0b[01]+((_[01]+)+)?', Number.Bin), ('0o[0-7]+((_[0-7]+)+)?', Number.Oct), ('0x[a-fA-F0-9]+((_[a-fA-F0-9]+)+)?', Number.Hex), ('\\d+((_\\d+)+)?', Number.Integer), (words(['.']), Operator)], 'blockcomment': [('[^=#]', Comment.Multiline), ('#=', Comment.Multiline, '#push'), ('=#', Comment.Multiline, '#pop'), ('[=#]', Comment.Multiline)], 'curly': [('\\{', Punctuation, '#push'), ('\\}', Punctuation, '#pop'), (allowed_variable, Keyword.Type), include('root')], 'tqrawstring': [('"""', String, '#pop'), ('([^"]|"[^"][^"])+', String)], 'rawstring': [('"', String, '#pop'), ('\\\\"', String.Escape), ('([^"\\\\]|\\\\[^"])+', String)], 'interp': [(('\\$' + allowed_variable), String.Interpol), ('(\\$)(\\()', bygroups(String.Interpol, Punctuation), 'in-intp')], 'in-intp': [('\\(', Punctuation, '#push'), ('\\)', Punctuation, '#pop'), include('root')], 'string': [((('(")(' + allowed_variable) + '|\\d+)?'), bygroups(String, String.Affix), '#pop'), ('\\\\([\\\\"\\\'$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\\d+)', String.Escape), include('interp'), ('%[-#0 +]*([0-9]+|[*])?(\\.([0-9]+|[*]))?[hlL]?[E-GXc-giorsux%]', String.Interpol), ('[^"$%\\\\]+', String), ('.', String)], 'tqstring': [((('(""")(' + allowed_variable) + '|\\d+)?'), bygroups(String, String.Affix), '#pop'), ('\\\\([\\\\"\\\'$nrbtfav]|(x|u|U)[a-fA-F0-9]+|\\d+)', String.Escape), include('interp'), ('[^"$%\\\\]+', String), ('.', String)], 'regex': [('(")([imsxa]*)?', bygroups(String.Regex, String.Affix), '#pop'), ('\\\\"', String.Regex), ('[^\\\\"]+', String.Regex)], 'tqregex': [('(""")([imsxa]*)?', bygroups(String.Regex, String.Affix), '#pop'), ('[^"]+', String.Regex)], 'command': [((('(`)(' + allowed_variable) + '|\\d+)?'), bygroups(String.Backtick, String.Affix), '#pop'), ('\\\\[`$]', String.Escape), include('interp'), ('[^\\\\`$]+', String.Backtick), ('.', String.Backtick)], 'tqcommand': [((('(```)(' + allowed_variable) + '|\\d+)?'), bygroups(String.Backtick, String.Affix), '#pop'), ('\\\\\\$', String.Escape), include('interp'), ('[^\\\\`$]+', String.Backtick), ('.', String.Backtick)]}
def analyse_text(text):
return shebang_matches(text, 'julia') |
def test_commented_extension(monkeypatch):
config = {'comment': ['--option'], 'ignore': []}
monkeypatch.setattr(interactive, 'get_config', (lambda x: config[x]))
parser = ArgumentParser()
fake_extension = Mock(flag='--option')
action = parser.add_argument('--option', dest='extensions', action='append_const', const=fake_extension)
option_line = interactive.example_no_value(parser, action, {'extensions': [fake_extension]})
assert (option_line.strip() == '# --option') |
def print_cuda_usage():
print('Memory Allocated:', (torch.cuda.memory_allocated() / (1024 * 1024)))
print('Max Memory Allocated:', (torch.cuda.max_memory_allocated() / (1024 * 1024)))
print('Memory Cached:', (torch.cuda.memory_cached() / (1024 * 1024)))
print('Max Memory Cached:', (torch.cuda.max_memory_cached() / (1024 * 1024))) |
class CifarPairTransform():
def __init__(self, train_transform=True, pair_transform=True):
if (train_transform is True):
self.transform = transforms.Compose([transforms.RandomResizedCrop(32), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.ToTensor(), transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.201])])
else:
self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.201])])
self.pair_transform = pair_transform
def __call__(self, x):
if (self.pair_transform is True):
y1 = self.transform(x)
y2 = self.transform(x)
return (y1, y2)
else:
return self.transform(x) |
.skipif((not is_py310_plus), reason='3.10+ union syntax')
(simple_typed_classes(defaults=False))
def test_310_optional_field_roundtrip(cl_and_vals):
converter = Converter()
(cl, vals, kwargs) = cl_and_vals
class C():
a: (cl | None)
inst = C(a=cl(*vals, **kwargs))
assert (inst == converter.structure(converter.unstructure(inst), C))
inst = C(a=None)
unstructured = converter.unstructure(inst)
assert (inst == converter.structure(unstructured, C)) |
def a2c_train_step(agent, abstractor, loader, opt, grad_fn, gamma=0.99, reward_fn=compute_rouge_l, stop_reward_fn=compute_rouge_n(n=1), stop_coeff=1.0):
opt.zero_grad()
indices = []
probs = []
baselines = []
ext_sents = []
(art_batch, abs_batch) = next(loader)
for raw_arts in art_batch:
((inds, ms), bs) = agent(raw_arts)
baselines.append(bs)
indices.append(inds)
probs.append(ms)
ext_sents += [raw_arts[idx.item()] for idx in inds if (idx.item() < len(raw_arts))]
with torch.no_grad():
summaries = abstractor(ext_sents)
i = 0
rewards = []
avg_reward = 0
for (inds, abss) in zip(indices, abs_batch):
rs = (([reward_fn(summaries[(i + j)], abss[j]) for j in range(min((len(inds) - 1), len(abss)))] + [0 for _ in range(max(0, ((len(inds) - 1) - len(abss))))]) + [(stop_coeff * stop_reward_fn(list(concat(summaries[i:((i + len(inds)) - 1)])), list(concat(abss))))])
assert (len(rs) == len(inds))
avg_reward += (rs[(- 1)] / stop_coeff)
i += (len(inds) - 1)
R = 0
disc_rs = []
for r in rs[::(- 1)]:
R = (r + (gamma * R))
disc_rs.insert(0, R)
rewards += disc_rs
indices = list(concat(indices))
probs = list(concat(probs))
baselines = list(concat(baselines))
reward = torch.Tensor(rewards).to(baselines[0].device)
reward = ((reward - reward.mean()) / (reward.std() + float(np.finfo(np.float32).eps)))
baseline = torch.cat(baselines).squeeze()
avg_advantage = 0
losses = []
for (action, p, r, b) in zip(indices, probs, reward, baseline):
advantage = (r - b)
avg_advantage += advantage
losses.append(((- p.log_prob(action)) * (advantage / len(indices))))
baseline = baseline[:len(reward)]
critic_loss = F.mse_loss(baseline, reward)
autograd.backward(([critic_loss.view(1)] + losses), ([torch.ones(1).to(critic_loss.device)] * (1 + len(losses))))
grad_log = grad_fn()
opt.step()
log_dict = {}
log_dict.update(grad_log)
log_dict['reward'] = (avg_reward / len(art_batch))
log_dict['advantage'] = (avg_advantage.item() / len(indices))
log_dict['mse'] = critic_loss.item()
assert (not math.isnan(log_dict['grad_norm']))
return log_dict |
def test_nested_start_rc():
checker = StackEndRC([AnyRequestChecker(), StackEndRC([create_request_checker(bool), create_request_checker(int), create_request_checker(str)]), create_request_checker(bool)])
checker.check_request(create_mediator(LocatedRequest(loc_map=LocMap(TypeHintLoc(bool))), LocatedRequest(loc_map=LocMap(TypeHintLoc(int))), LocatedRequest(loc_map=LocMap(TypeHintLoc(str))), LocatedRequest(loc_map=LocMap(TypeHintLoc(bool)))), LocatedRequest(loc_map=LocMap(TypeHintLoc(bool))))
with pytest.raises(CannotProvide, match='Request stack is too small'):
checker.check_request(create_mediator(LocatedRequest(loc_map=LocMap(TypeHintLoc(int))), LocatedRequest(loc_map=LocMap(TypeHintLoc(str))), LocatedRequest(loc_map=LocMap(TypeHintLoc(bool)))), LocatedRequest(loc_map=LocMap(TypeHintLoc(bool))))
with pytest.raises(CannotProvide):
checker.check_request(create_mediator(LocatedRequest(loc_map=LocMap(TypeHintLoc(str))), LocatedRequest(loc_map=LocMap(TypeHintLoc(int))), LocatedRequest(loc_map=LocMap(TypeHintLoc(str))), LocatedRequest(loc_map=LocMap(TypeHintLoc(bool)))), LocatedRequest(loc_map=LocMap(TypeHintLoc(bool)))) |
.parametrize(**test_case_table)
def test_2port(test_params, cmdline_opts):
msgs0 = test_params.msg_func(4096)
msgs1 = test_params.msg_func(8192)
run_sim(TestHarness(MagicMemoryRTL, 2, ([(req_cls, resp_cls)] * 2), [msgs0[::2], msgs1[::2]], [msgs0[1::2], msgs1[1::2]], test_params.stall, test_params.extra_lat, test_params.src_init, test_params.src_intv, test_params.sink_init, test_params.sink_intv), cmdline_opts) |
def run_louvain_multilayer(intralayer_graph, interlayer_graph, layer_vec, weight='weight', resolution=1.0, omega=1.0, nruns=1):
logging.debug('Shuffling node ids')
t = time()
mu = (np.sum(intralayer_graph.es[weight]) + interlayer_graph.ecount())
use_RBCweighted = hasattr(louvain, 'RBConfigurationVertexPartitionWeightedLayers')
outparts = []
for run in range(nruns):
rand_perm = list(np.random.permutation(interlayer_graph.vcount()))
rperm = rev_perm(rand_perm)
interslice_layer_rand = interlayer_graph.permute_vertices(rand_perm)
rlayer_vec = permute_vector(rand_perm, layer_vec)
rintralayer_graph = intralayer_graph.permute_vertices(rand_perm)
if use_RBCweighted:
rlayers = [intralayer_graph]
else:
rlayers = _create_multilayer_igraphs_from_super_adj_igraph(rintralayer_graph, layer_vec=rlayer_vec)
logging.debug('time: {:.4f}'.format((time() - t)))
t = time()
layer_partition_objs = []
logging.debug('creating partition objects')
t = time()
for (i, layer) in enumerate(rlayers):
try:
res = resolution[i]
except:
res = resolution
if use_RBCweighted:
cpart = louvain.RBConfigurationVertexPartitionWeightedLayers(layer, layer_vec=rlayer_vec, weights=weight, resolution_parameter=res)
else:
cpart = louvain.RBConfigurationVertexPartition(layer, weights=weight, resolution_parameter=res)
layer_partition_objs.append(cpart)
coupling_partition = louvain.RBConfigurationVertexPartition(interslice_layer_rand, weights=weight, resolution_parameter=0)
all_layer_partobjs = (layer_partition_objs + [coupling_partition])
optimiser = louvain.Optimiser()
logging.debug('time: {:.4f}'.format((time() - t)))
logging.debug('running optimiser')
t = time()
layer_weights = (([1] * len(rlayers)) + [omega])
improvement = optimiser.optimise_partition_multiplex(all_layer_partobjs, layer_weights=layer_weights)
finalpartition = permute_vector(rperm, all_layer_partobjs[0].membership)
reversed_partobj = []
for layer in layer_partition_objs:
if use_RBCweighted:
reversed_partobj.append(louvain.RBConfigurationVertexPartitionWeightedLayers(graph=layer.graph.permute_vertices(rperm), initial_membership=finalpartition, weights=weight, layer_vec=layer_vec, resolution_parameter=layer.resolution_parameter))
else:
reversed_partobj.append(louvain.RBConfigurationVertexPartition(graph=layer.graph.permute_vertices(rperm), initial_membership=finalpartition, weights=weight, resolution_parameter=layer.resolution_parameter))
coupling_partition_rev = louvain.RBConfigurationVertexPartition(graph=coupling_partition.graph.permute_vertices(rperm), initial_membership=finalpartition, weights=weight, resolution_parameter=0)
A = _get_sum_internal_edges_from_partobj_list(reversed_partobj, weight=weight)
if use_RBCweighted:
P = get_expected_edges_ml(reversed_partobj[0], layer_vec=layer_vec, weight=weight)
else:
P = _get_sum_expected_edges_from_partobj_list(reversed_partobj, weight=weight)
C = get_sum_internal_edges(coupling_partition_rev, weight=weight)
outparts.append({'partition': np.array(finalpartition), 'resolution': resolution, 'coupling': omega, 'orig_mod': ((0.5 / mu) * (_get_modularity_from_partobj_list(reversed_partobj) + (omega * coupling_partition_rev.quality()))), 'int_edges': A, 'exp_edges': P, 'int_inter_edges': C})
logging.debug('time: {:.4f}'.format((time() - t)))
return outparts |
def test_solvers():
cnf = CNF(from_clauses=[[1, 2, 3], [(- 1), 2], [(- 2)]])
for name in solvers:
with Solver(name=name, bootstrap_with=cnf) as solver:
assert solver.solve(), 'wrong outcome by {0}'.format(name)
assert (solver.get_model() == [(- 1), (- 2), 3]), 'wrong model by {0}'.format(name)
solver.add_clause([(- l) for l in solver.get_model()])
assert (not solver.solve()), 'wrong outcome by {0}'.format(name) |
class EigenstateResult(AlgorithmResult):
def eigenenergies(self) -> Optional[np.ndarray]:
return self.get('eigenenergies')
def eigenenergies(self, value: np.ndarray) -> None:
self.data['eigenenergies'] = value
def eigenstates(self) -> Optional[List[Union[(str, dict, Result, list, np.ndarray, Statevector, QuantumCircuit, Instruction, OperatorBase)]]]:
return self.get('eigenstates')
def eigenstates(self, value: List[Union[(str, dict, Result, list, np.ndarray, Statevector, QuantumCircuit, Instruction, OperatorBase)]]) -> None:
self.data['eigenstates'] = value
def groundenergy(self) -> Optional[float]:
energies = self.get('eigenenergies')
if energies:
return energies[0].real
return None
def groundstate(self) -> Optional[Union[(str, dict, Result, list, np.ndarray, Statevector, QuantumCircuit, Instruction, OperatorBase)]]:
states = self.get('eigenstates')
if states:
return states[0]
return None
def aux_operator_eigenvalues(self) -> Optional[List[float]]:
return self.get('aux_operator_eigenvalues')
_operator_eigenvalues.setter
def aux_operator_eigenvalues(self, value: List[float]) -> None:
self.data['aux_operator_eigenvalues'] = value
def raw_result(self) -> Optional[AlgorithmResult]:
return self.get('raw_result')
_result.setter
def raw_result(self, result: AlgorithmResult) -> None:
self.data['raw_result'] = result |
class SawyerBoxCloseEnv(SawyerXYZEnv):
def __init__(self):
liftThresh = 0.12
goal_low = ((- 0.1), 0.85, 0.1329)
goal_high = (0.1, 0.95, 0.1331)
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.05), 0.55, 0.02)
obj_high = (0.05, 0.6, 0.02)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0, 0.6, 0.02], dtype=np.float32), 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32)}
self.goal = np.array([0.0, 0.9, 0.133])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.liftThresh = liftThresh
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def model_name(self):
return full_v1_path_for('sawyer_xyz/sawyer_box.xml')
_assert_task_is_set
def step(self, action):
ob = super().step(action)
(reward, _, reachDist, pickRew, _, placingDist) = self.compute_reward(action, ob)
info = {'reachDist': reachDist, 'pickRew': pickRew, 'epRew': reward, 'goalDist': placingDist, 'success': float((placingDist <= 0.08))}
return (ob, reward, False, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('handle').copy()
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('handle')[2]
self.boxheight = self.get_body_com('box')[2]
self.heightTarget = (self.objHeight + self.liftThresh)
if self.random_init:
goal_pos = self._get_state_rand_vec()
while (np.linalg.norm((goal_pos[:2] - goal_pos[(- 3):(- 1)])) < 0.25):
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]]))
self._target_pos = goal_pos[(- 3):]
self.sim.model.body_pos[self.model.body_name2id('box')] = np.concatenate((self._target_pos[:2], [self.boxheight]))
self._set_obj_xyz(self.obj_init_pos)
self.maxPlacingDist = (np.linalg.norm((np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._target_pos))) + self.heightTarget)
return self._get_obs()
def _reset_hand(self):
super()._reset_hand(10)
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
self.init_fingerCOM = ((rightFinger + leftFinger) / 2)
self.pickCompleted = False
def compute_reward(self, actions, obs):
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
heightTarget = self.heightTarget
placeGoal = self._target_pos
placingDist = np.linalg.norm((objPos - placeGoal))
reachDist = np.linalg.norm((objPos - fingerCOM))
def reachReward():
reachRew = (- reachDist)
reachDistxy = np.linalg.norm((objPos[:(- 1)] - fingerCOM[:(- 1)]))
zRew = np.linalg.norm((fingerCOM[(- 1)] - self.init_fingerCOM[(- 1)]))
if (reachDistxy < 0.05):
reachRew = (- reachDist)
else:
reachRew = ((- reachDistxy) - (2 * zRew))
if (reachDist < 0.05):
reachRew = ((- reachDist) + (max(actions[(- 1)], 0) / 50))
return (reachRew, reachDist)
def pickCompletionCriteria():
tolerance = 0.01
if (objPos[2] >= (heightTarget - tolerance)):
return True
else:
return False
if pickCompletionCriteria():
self.pickCompleted = True
def objDropped():
return ((objPos[2] < (self.objHeight + 0.005)) and (placingDist > 0.02) and (reachDist > 0.02))
def orig_pickReward():
hScale = 100
if (self.pickCompleted and (not objDropped())):
return (hScale * heightTarget)
elif ((reachDist < 0.1) and (objPos[2] > (self.objHeight + 0.005))):
return (hScale * min(heightTarget, objPos[2]))
else:
return 0
def placeReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
cond = (self.pickCompleted and (reachDist < 0.1) and (not objDropped()))
if cond:
placeRew = ((1000 * (self.maxPlacingDist - placingDist)) + (c1 * (np.exp(((- (placingDist ** 2)) / c2)) + np.exp(((- (placingDist ** 2)) / c3)))))
placeRew = max(placeRew, 0)
return [placeRew, placingDist]
else:
return [0, placingDist]
(reachRew, reachDist) = reachReward()
pickRew = orig_pickReward()
(placeRew, placingDist) = placeReward()
assert ((placeRew >= 0) and (pickRew >= 0))
reward = ((reachRew + pickRew) + placeRew)
return [reward, reachRew, reachDist, pickRew, placeRew, placingDist] |
class DataCollator():
pad_id: int
max_length: int = 4096
def __call__(self, batch):
batch = self.collate_fn(batch)
batch = jax.tree_util.tree_map(shard, batch)
return batch
def collate_fn(self, features):
(input_ids, attention_mask) = self.fetch_inputs(features['input_ids'])
batch = {'input_ids': jnp.array(input_ids, dtype=jnp.int32), 'attention_mask': jnp.array(attention_mask, dtype=jnp.int32), 'start_labels': jnp.array(features['start_token'], dtype=jnp.int32), 'end_labels': jnp.array(features['end_token'], dtype=jnp.int32), 'pooled_labels': jnp.array(features['category'], dtype=jnp.int32)}
return batch
def fetch_inputs(self, input_ids: list):
inputs = [self._fetch_inputs(ids) for ids in input_ids]
return zip(*inputs)
def _fetch_inputs(self, input_ids: list):
attention_mask = [1 for _ in range(len(input_ids))]
while (len(input_ids) < self.max_length):
input_ids.append(self.pad_id)
attention_mask.append(0)
return (input_ids, attention_mask) |
def test_receive_order_paid_of_period_outside_current_one(requests_mock):
user = UserFactory(email='')
with time_machine.travel('2023-12-16 01:04:50Z', tick=False):
requests_mock.get(f'{settings.PRETIX_API}organizers/test-organizer/events/local-conf-test/orders/9YKZK/', json=ORDER_DATA_WITH_MEMBERSHIP)
requests_mock.get(f'{settings.PRETIX_API}organizers/test-organizer/events/local-conf-test/items/?active=true&category=25', json=ITEMS_WITH_CATEGORY)
requests_mock.get(f'{settings.PRETIX_API}organizers/test-organizer/events/local-conf-test/categories/', json=CATEGORIES)
pretix_event_order_paid(ORDER_PAID)
created_membership = Membership.objects.get(user_id=user.id)
assert (created_membership.status == MembershipStatus.PENDING)
assert (created_membership.user_id == user.id)
assert (created_membership.payments.count() == 1) |
class NonLocal2D(nn.Module):
def __init__(self, in_channels, reduction=2, use_scale=True, conv_cfg=None, norm_cfg=None, mode='embedded_gaussian'):
super(NonLocal2D, self).__init__()
self.in_channels = in_channels
self.reduction = reduction
self.use_scale = use_scale
self.inter_channels = (in_channels // reduction)
self.mode = mode
assert (mode in ['embedded_gaussian', 'dot_product'])
self.g = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, activation=None)
self.theta = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, activation=None)
self.phi = ConvModule(self.in_channels, self.inter_channels, kernel_size=1, activation=None)
self.conv_out = ConvModule(self.inter_channels, self.in_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=None)
self.init_weights()
def init_weights(self, std=0.01, zeros_init=True):
for m in [self.g, self.theta, self.phi]:
normal_init(m.conv, std=std)
if zeros_init:
constant_init(self.conv_out.conv, 0)
else:
normal_init(self.conv_out.conv, std=std)
def embedded_gaussian(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
if self.use_scale:
pairwise_weight /= (theta_x.shape[(- 1)] ** (- 0.5))
pairwise_weight = pairwise_weight.softmax(dim=(- 1))
return pairwise_weight
def dot_product(self, theta_x, phi_x):
pairwise_weight = torch.matmul(theta_x, phi_x)
pairwise_weight /= pairwise_weight.shape[(- 1)]
return pairwise_weight
def forward(self, x):
(n, _, h, w) = x.shape
g_x = self.g(x).view(n, self.inter_channels, (- 1))
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(n, self.inter_channels, (- 1))
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(n, self.inter_channels, (- 1))
pairwise_func = getattr(self, self.mode)
pairwise_weight = pairwise_func(theta_x, phi_x)
y = torch.matmul(pairwise_weight, g_x)
y = y.permute(0, 2, 1).reshape(n, self.inter_channels, h, w)
output = (x + self.conv_out(y))
return output |
def check_imports(filename):
with open(filename, 'r', encoding='utf-8') as f:
content = f.read()
imports = re.findall('^\\s*import\\s+(\\S+)\\s*$', content, flags=re.MULTILINE)
imports += re.findall('^\\s*from\\s+(\\S+)\\s+import', content, flags=re.MULTILINE)
imports = [imp.split('.')[0] for imp in imports if (not imp.startswith('.'))]
imports = list(set(imports))
missing_packages = []
for imp in imports:
try:
importlib.import_module(imp)
except ImportError:
missing_packages.append(imp)
if (len(missing_packages) > 0):
raise ImportError(f"This modeling file requires the following packages that were not found in your environment: {', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`")
return get_relative_imports(filename) |
(Gst, 'GStreamer missing')
class TGStreamerSink(TestCase):
def test_simple(self):
sinks = ['gconfaudiosink', 'alsasink']
for n in filter(Gst.ElementFactory.find, sinks):
(obj, name) = gstreamer_sink(n)
self.assertTrue(obj)
self.assertEqual(name, n)
def test_invalid(self):
with ignore_gst_errors():
self.assertRaises(PlayerError, gstreamer_sink, 'notarealsink')
def test_fallback(self):
(obj, name) = gstreamer_sink('')
self.assertTrue(obj)
if (os.name == 'nt'):
self.assertEqual(name, 'directsoundsink')
else:
self.assertEqual(name, find_audio_sink()[1])
def test_append_sink(self):
(obj, name) = gstreamer_sink('volume')
self.assertTrue(obj)
self.assertEqual(name.split('!')[(- 1)].strip(), gstreamer_sink('')[1]) |
def main(args, override_args=None):
utils.import_user_module(args)
assert ((args.max_tokens is not None) or (args.batch_size is not None)), 'Must specify batch size either with --max-tokens or --batch-size'
use_fp16 = args.fp16
use_cuda = (torch.cuda.is_available() and (not args.cpu))
if use_cuda:
torch.cuda.set_device(args.device_id)
if (override_args is not None):
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
logger.info('loading model(s) from {}'.format(args.path))
(models, model_args, task) = checkpoint_utils.load_model_ensemble_and_task([args.path], arg_overrides=overrides, suffix=getattr(args, 'checkpoint_suffix', ''))
model = models[0]
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
logger.info(model_args)
criterion = task.build_criterion(model_args)
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=1)
dataset = task.dataset(subset)
except KeyError:
raise Exception(('Cannot find dataset: ' + subset))
itr = task.get_batch_iterator(dataset=dataset, max_tokens=args.max_tokens, max_sentences=args.batch_size, max_positions=utils.resolve_max_positions(task.max_positions(), *[m.max_positions() for m in models]), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, num_workers=args.num_workers, data_buffer_size=args.data_buffer_size).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(itr, log_format=args.log_format, log_interval=args.log_interval, prefix=f"valid on '{subset}' subset", default_log_format=('tqdm' if (not args.no_progress_bar) else 'simple'))
log_outputs = []
for (i, sample) in enumerate(progress):
sample = (utils.move_to_cuda(sample) if use_cuda else sample)
(_loss, _sample_size, log_output) = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if (args.distributed_world_size > 1):
log_outputs = distributed_utils.all_gather_list(log_outputs, max_size=getattr(args, 'all_gather_list_size', 16384))
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i) |
class TestHeaderInclusion(unittest.TestCase):
def test_primitives_included_in_header(self) -> None:
base_dir = os.path.join(os.path.dirname(__file__), '..', 'lib-rt')
with open(os.path.join(base_dir, 'CPy.h')) as f:
header = f.read()
with open(os.path.join(base_dir, 'pythonsupport.h')) as f:
header += f.read()
def check_name(name: str) -> None:
if name.startswith('CPy'):
assert re.search(f'{name}', header), f'"{name}" is used in mypyc.primitives but not declared in CPy.h'
for values in [registry.method_call_ops.values(), registry.function_ops.values(), registry.binary_ops.values(), registry.unary_ops.values()]:
for ops in values:
if isinstance(ops, CFunctionDescription):
ops = [ops]
for op in ops:
check_name(op.c_function_name)
primitives_path = os.path.join(os.path.dirname(__file__), '..', 'primitives')
for fnam in glob.glob(f'{primitives_path}/*.py'):
with open(fnam) as f:
content = f.read()
for name in re.findall('c_function_name=["\\\'](CPy[A-Z_a-z0-9]+)', content):
check_name(name) |
class TextEncoder(tf.keras.Model):
def __init__(self, strategy, trainable=False):
self.strategy = strategy
with self.strategy.scope():
super(TextEncoder, self).__init__()
self.encoder = hub.KerasLayer(' trainable=trainable)
def __call__(self, inp):
with self.strategy.scope():
embedding = self.encoder(inp)
return embedding |
def structure(t, fieldproc=unescape):
d = {}
if (t[0] is not None):
d['scheme'] = t[0]
if (t[1] is not None):
uphp = split_netloc(t[1], fieldproc=fieldproc)
if (uphp[0] is not None):
d['user'] = uphp[0]
if (uphp[1] is not None):
d['password'] = uphp[1]
if (uphp[2] is not None):
d['host'] = uphp[2]
if (uphp[3] is not None):
d['port'] = uphp[3]
if (t[2] is not None):
if t[2]:
d['path'] = list(map(fieldproc, t[2].split('/')))
else:
d['path'] = []
if (t[3] is not None):
if t[3]:
d['query'] = [tuple((list(map(fieldproc, x.split('=', 1))) + [None])[:2]) for x in t[3].split('&')]
else:
d['query'] = []
if (t[4] is not None):
d['fragment'] = fieldproc(t[4])
return d |
def get_config(path: str) -> Dict[(str, RepositoryConfig)]:
realpath = os.path.realpath(os.path.expanduser(path))
parser = configparser.RawConfigParser()
try:
with open(realpath) as f:
parser.read_file(f)
logger.info(f'Using configuration from {realpath}')
except FileNotFoundError:
if (path != DEFAULT_CONFIG_FILE):
raise
defaults: RepositoryConfig = {'username': parser.get('server-login', 'username', fallback=None), 'password': parser.get('server-login', 'password', fallback=None)}
config: DefaultDict[(str, RepositoryConfig)]
config = collections.defaultdict((lambda : defaults.copy()))
index_servers = parser.get('distutils', 'index-servers', fallback='pypi testpypi').split()
config['pypi']['repository'] = DEFAULT_REPOSITORY
if ('testpypi' in index_servers):
config['testpypi']['repository'] = TEST_REPOSITORY
for repository in index_servers:
for key in ['username', 'repository', 'password', 'ca_cert', 'client_cert']:
if parser.has_option(repository, key):
config[repository][key] = parser.get(repository, key)
return dict(config) |
def _datetime_offset_inst(obj: str, pattern: str) -> datetime:
(dat_str, tim_str) = obj.split('T')
(splitter, factor) = (('+', 1) if ('+' in tim_str) else ('-', (- 1)))
(naive_tim_str, offset) = tim_str.split(splitter)
naive_dattim_str = '{}T{}'.format(dat_str, naive_tim_str)
dattim_obj = datetime.strptime(naive_dattim_str, pattern)
(hrs_str, mins_str) = offset.split(':')
hrs = (int(hrs_str) * factor)
mins = (int(mins_str) * factor)
tz = timezone(offset=timedelta(hours=hrs, minutes=mins))
return _new_datetime(dattim_obj.date(), dattim_obj.time(), tz) |
def create_model(model_name: str, pretrained: Optional[str]=None, precision: str='fp32', device: Union[(str, torch.device)]='cpu', jit: bool=False, force_quick_gelu: bool=False, force_custom_clip: bool=False, force_patch_dropout: Optional[float]=None, pretrained_image: str='', pretrained_text: str='', pretrained_hf: bool=True, pretrained_visual_model: str=None, pretrained_text_model: str=None, cache_dir: Optional[str]=None, skip_list: list=[]):
model_name = model_name.replace('/', '-')
if isinstance(device, str):
device = torch.device(device)
if (pretrained and (pretrained.lower() == 'openai')):
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(model_name, precision=precision, device=device, jit=jit, cache_dir=cache_dir)
else:
model_cfg = get_model_config(model_name)
if (model_cfg is not None):
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if ('rope' in model_cfg.get('vision_cfg', {})):
if model_cfg['vision_cfg']['rope']:
os.environ['RoPE'] = '1'
else:
os.environ['RoPE'] = '0'
if force_quick_gelu:
model_cfg['quick_gelu'] = True
if (force_patch_dropout is not None):
model_cfg['vision_cfg']['patch_dropout'] = force_patch_dropout
cast_dtype = get_cast_dtype(precision)
custom_clip = (model_cfg.pop('custom_text', False) or force_custom_clip or ('hf_model_name' in model_cfg['text_cfg']))
if custom_clip:
if ('hf_model_name' in model_cfg.get('text_cfg', {})):
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
pretrained_cfg = {}
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path, model_key='model|module|state_dict', strict=False)
else:
error_str = f'Pretrained weights ({pretrained}) not found for model {model_name}.Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.'
logging.warning(error_str)
raise RuntimeError(error_str)
else:
visual_checkpoint_path = ''
text_checkpoint_path = ''
if pretrained_image:
pretrained_visual_model = pretrained_visual_model.replace('/', '-')
pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image)
if ('timm_model_name' in model_cfg.get('vision_cfg', {})):
model_cfg['vision_cfg']['timm_model_pretrained'] = True
elif pretrained_image_cfg:
visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained_image):
visual_checkpoint_path = pretrained_image
else:
logging.warning(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
raise RuntimeError(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
if pretrained_text:
pretrained_text_model = pretrained_text_model.replace('/', '-')
pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text)
if pretrained_image_cfg:
text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained_text):
text_checkpoint_path = pretrained_text
else:
logging.warning(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
raise RuntimeError(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
if visual_checkpoint_path:
logging.info(f'Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).')
if text_checkpoint_path:
logging.info(f'Loading pretrained {model_name}.text weights ({text_checkpoint_path}).')
if (visual_checkpoint_path or text_checkpoint_path):
load_pretrained_checkpoint(model, visual_checkpoint_path, text_checkpoint_path, strict=False, visual_model=pretrained_visual_model, text_model=pretrained_text_model, model_key='model|module|state_dict', skip_list=skip_list)
if (('fp16' in precision) or ('bf16' in precision)):
logging.info(f'convert precision to {precision}')
model = (model.to(torch.bfloat16) if ('bf16' in precision) else model.to(torch.float16))
model.to(device=device)
model.visual.image_mean = (pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN)
model.visual.image_std = (pretrained_cfg.get('std', None) or OPENAI_DATASET_STD)
if jit:
model = torch.jit.script(model)
return model |
def train_epoch(gpu, train_loader, model, base_optimizer, epoch, args, lr_scheduler=None, grad_rho_scheduler=None, grad_norm_rho_scheduler=None, optimizer=None):
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
Lr = AverageMeter('Lr', ':.4e')
progress = ProgressMeter(len(train_loader), [losses, top1], prefix='Epoch: [{}]'.format(epoch))
model.train()
lr = base_optimizer.param_groups[0]['lr']
if ((not grad_rho_scheduler) and (not grad_norm_rho_scheduler) and (not optimizer)):
grad_rho_scheduler = ProportionScheduler(pytorch_lr_scheduler=lr_scheduler, max_lr=args.lr, min_lr=0.0, max_value=args.grad_rho_max, min_value=args.grad_rho_min)
grad_norm_rho_scheduler = ProportionScheduler(pytorch_lr_scheduler=lr_scheduler, max_lr=args.lr, min_lr=0.0, max_value=args.grad_norm_rho_max, min_value=args.grad_norm_rho_min)
optimizer = GAM(params=model.parameters(), base_optimizer=base_optimizer, model=model, grad_rho_scheduler=grad_rho_scheduler, grad_norm_rho_scheduler=grad_norm_rho_scheduler, adaptive=args.adaptive, args=args)
def loss_fn(predictions, targets):
return smooth_crossentropy(predictions, targets, smoothing=args.label_smoothing).mean()
for (i, (images, target)) in enumerate(train_loader):
images = images.cuda(gpu, non_blocking=True)
target = target.cuda(gpu, non_blocking=True)
optimizer.set_closure(loss_fn, images, target)
(predictions, loss) = optimizer.step()
with torch.no_grad():
optimizer.update_rho_t()
(acc1, acc5) = accuracy(predictions, target, topk=(1, 5))
top1.update(acc1[0], images.size(0))
losses.update(loss.item(), images.size(0))
optimizer.zero_grad()
Lr.update(lr, 1)
method_name = args.log_path.split('/')[(- 2)]
if ((i % args.print_freq) == 0):
progress.display(i, method_name)
progress.write_log(i, args.log_path)
if torch.isnan(loss).any():
raise SystemExit('NaN!') |
_required
def plugin_update(request, package_name):
plugin = get_object_or_404(Plugin, package_name=package_name)
if (not check_plugin_access(request.user, plugin)):
return render(request, 'plugins/plugin_permission_deny.html', {})
if (request.method == 'POST'):
form = PluginForm(request.POST, request.FILES, instance=plugin)
form.fields['owners'].queryset = User.objects.exclude(pk=plugin.created_by.pk).order_by('username')
if form.is_valid():
new_object = form.save(commit=False)
new_object.modified_by = request.user
new_object.save()
form.save_m2m()
new_object.owners.clear()
for o in form.cleaned_data['owners']:
new_object.owners.add(o)
msg = _('The Plugin has been successfully updated.')
messages.success(request, msg, fail_silently=True)
_check_optional_metadata(form, request)
return HttpResponseRedirect(new_object.get_absolute_url())
else:
form = PluginForm(instance=plugin)
form.fields['owners'].queryset = User.objects.exclude(pk=plugin.created_by.pk).order_by('username')
return render(request, 'plugins/plugin_form.html', {'form': form, 'form_title': _('Edit plugin'), 'plugin': plugin}) |
def get_examples(path, sub_sample_train=3000, sub_sample_eval=256):
with open(path) as f:
raw_dataset = json.load(f)
positive_examples = raw_dataset['Positive Examples']
negative_examples = raw_dataset['Negative Examples']
all_examples = raw_dataset['Instances']
n = len(all_examples)
eval_sub = random.sample(range(n), sub_sample_eval)
left = list((set(range(n)) - set(eval_sub)))
train_sub = random.sample(left, sub_sample_train)
train_examples = process_examples([all_examples[i] for i in train_sub])
eval_examples = process_examples([all_examples[i] for i in eval_sub])
return (process_examples(positive_examples), process_examples(negative_examples), train_examples, eval_examples, raw_dataset['Definition'][0]) |
class CustomJsonFormatter(jsonlogger.JsonFormatter):
service_name = ''
tracer = ''
def add_fields(self, log_record, record, message_dict):
super().add_fields(log_record, record, message_dict)
if (not log_record.get('timestamp')):
now = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
log_record['timestamp'] = now
if log_record.get('level'):
log_record['severity'] = log_record['level'].upper()
else:
log_record['severity'] = record.levelname
log_record['service'] = self.service_name
try:
headers = inject_span_in_headers({})
log_record['trace'] = headers.get('X-B3-TraceId', '')
log_record['span'] = headers.get('X-B3-SpanId', '')
log_record['parent'] = headers.get('X-B3-ParentSpanId', '')
except Exception as ex:
logger.error('Tracer error: {}'.format(ex))
def add_service_name(self, project_name):
self.service_name = project_name.lower() |
def parse_extension_item_param(header: str, pos: int, header_name: str) -> Tuple[(ExtensionParameter, int)]:
(name, pos) = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
value: Optional[str] = None
if (peek_ahead(header, pos) == '='):
pos = parse_OWS(header, (pos + 1))
if (peek_ahead(header, pos) == '"'):
pos_before = pos
(value, pos) = parse_quoted_string(header, pos, header_name)
if (_token_re.fullmatch(value) is None):
raise exceptions.InvalidHeaderFormat(header_name, 'invalid quoted header content', header, pos_before)
else:
(value, pos) = parse_token(header, pos, header_name)
pos = parse_OWS(header, pos)
return ((name, value), pos) |
def init_visualization(argument):
if isinstance(argument, Eigenstates):
eigenstates = argument
return init_eigenstate_visualization(eigenstates)
elif isinstance(argument, TimeSimulation):
simulation = argument
return init_timesimulation_visualization(simulation) |
class InstrumentDumpFetch():
def __init__(self):
self.conn = redis.StrictRedis(host='localhost', port=6379)
def data_dump(self, symbol, instrument_data):
self.conn.set(symbol, json.dumps(instrument_data))
def symbol_data(self, symbol):
try:
contract_detail = json.loads(self.conn.get(symbol))
except TypeError:
raise Exception('Key not found - {}'.format(symbol))
return contract_detail
def fetch_token(self, token):
try:
token_instrument = json.loads(self.conn.get(token))
except Exception as e:
raise Exception('Error {}'.format(e))
return token_instrument
def store_optiondata(self, tradingsymbol, token, optionData):
optionChainKey = '{}:{}'.format(tradingsymbol, token)
try:
self.conn.set(optionChainKey, json.dumps(optionData))
except Exception as e:
raise Exception('Error - {}'.format(e))
def fetch_option_data(self, tradingsymbol, token):
optionContractKey = '{}:{}'.format(tradingsymbol, token)
try:
token_data = json.loads(self.conn.get(optionContractKey))
except Exception as e:
raise Exception('Error - {}'.format(e))
return token_data |
def get_dataset(data_args: argparse.Namespace, processor: Union[(Union[(PyGameTextRenderer, PangoCairoTextRenderer)], PreTrainedTokenizerFast)], modality: Modality, split: Split, config: PretrainedConfig):
if (modality == Modality.IMAGE):
transforms = get_transforms(do_resize=True, size=(processor.pixels_per_patch, (processor.pixels_per_patch * processor.max_seq_length)))
else:
transforms = None
return UDDataset(data_dir=data_args.data_dir, processor=processor, transforms=transforms, modality=modality, labels=UD_HEAD_LABELS, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=split, pad_token=config.pad_token_id) |
def calculate_class_abstract_status(typ: TypeInfo, is_stub_file: bool, errors: Errors) -> None:
typ.is_abstract = False
typ.abstract_attributes = []
if typ.typeddict_type:
return
concrete: set[str] = set()
abstract: list[tuple[(str, int)]] = []
abstract_in_this_class: list[str] = []
if typ.is_newtype:
return
for base in typ.mro:
for (name, symnode) in base.names.items():
node = symnode.node
if isinstance(node, OverloadedFuncDef):
if node.items:
func: (Node | None) = node.items[0]
else:
func = None
else:
func = node
if isinstance(func, Decorator):
func = func.func
if isinstance(func, FuncDef):
if ((func.abstract_status in (IS_ABSTRACT, IMPLICITLY_ABSTRACT)) and (name not in concrete)):
typ.is_abstract = True
abstract.append((name, func.abstract_status))
if (base is typ):
abstract_in_this_class.append(name)
elif isinstance(node, Var):
if (node.is_abstract_var and (name not in concrete)):
typ.is_abstract = True
abstract.append((name, IS_ABSTRACT))
if (base is typ):
abstract_in_this_class.append(name)
concrete.add(name)
typ.abstract_attributes = sorted(abstract)
if is_stub_file:
if (typ.declared_metaclass and typ.declared_metaclass.type.has_base('abc.ABCMeta')):
return
if typ.is_protocol:
return
if (abstract and (not abstract_in_this_class)):
def report(message: str, severity: str) -> None:
errors.report(typ.line, typ.column, message, severity=severity)
attrs = ', '.join((f'"{attr}"' for (attr, _) in sorted(abstract)))
report(f'Class {typ.fullname} has abstract attributes {attrs}', 'error')
report("If it is meant to be abstract, add 'abc.ABCMeta' as an explicit metaclass", 'note')
if (typ.is_final and abstract):
attrs = ', '.join((f'"{attr}"' for (attr, _) in sorted(abstract)))
errors.report(typ.line, typ.column, f'Final class {typ.fullname} has abstract attributes {attrs}') |
def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_spec):
bundle_data = bundles.load(bundle, environ, bundle_timestamp)
if (trading_calendar is None):
trading_calendar = get_calendar('XNYS')
if (trading_calendar.session_distance(start, end) < 1):
raise _RunAlgoError(('There are no trading days between %s and %s' % (start.date(), end.date())))
(benchmark_sid, benchmark_returns) = benchmark_spec.resolve(asset_finder=bundle_data.asset_finder, start_date=start, end_date=end)
if (algotext is not None):
if local_namespace:
ip = get_ipython()
namespace = ip.user_ns
else:
namespace = {}
for assign in defines:
try:
(name, value) = assign.split('=', 2)
except ValueError:
raise ValueError(('invalid define %r, should be of the form name=value' % assign))
try:
namespace[name] = eval(value, namespace)
except Exception as e:
raise ValueError(('failed to execute definition for name %r: %s' % (name, e)))
elif defines:
raise _RunAlgoError('cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'")
else:
namespace = {}
if (algofile is not None):
algotext = algofile.read()
if print_algo:
if PYGMENTS:
highlight(algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout)
else:
click.echo(algotext)
first_trading_day = bundle_data.equity_minute_bar_reader.first_trading_day
data = DataPortal(bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader)
pipeline_loader = USEquityPricingLoader.without_fx(bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader)
def choose_loader(column):
if (column in USEquityPricing.columns):
return pipeline_loader
raise ValueError(('No PipelineLoader registered for column %s.' % column))
if isinstance(metrics_set, six.string_types):
try:
metrics_set = metrics.load(metrics_set)
except ValueError as e:
raise _RunAlgoError(str(e))
if isinstance(blotter, six.string_types):
try:
blotter = load(Blotter, blotter)
except ValueError as e:
raise _RunAlgoError(str(e))
try:
perf = TradingAlgorithm(namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters(start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, data_frequency=data_frequency), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, benchmark_sid=benchmark_sid, **({'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze} if (algotext is None) else {'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext})).run()
except NoBenchmark:
raise _RunAlgoError('No ``benchmark_spec`` was provided, and ``zipline.api.set_benchmark`` was not called in ``initialize``.', "Neither '--benchmark-symbol' nor '--benchmark-sid' was provided, and ``zipline.api.set_benchmark`` was not called in ``initialize``. Did you mean to pass '--no-benchmark'?")
if (output == '-'):
click.echo(str(perf))
elif (output != os.devnull):
perf.to_pickle(output)
return perf |
class UVCCSD(UVCC):
def __init__(self, num_modals: (list[int] | None)=None, qubit_mapper: (QubitMapper | None)=None, *, reps: int=1, initial_state: (QuantumCircuit | None)=None) -> None:
super().__init__(num_modals=num_modals, excitations='sd', qubit_mapper=qubit_mapper, reps=reps, initial_state=initial_state) |
def finite_loss(ival: Interval, loss: float, x_scale: float) -> tuple[(float, Interval)]:
if (math.isinf(loss) or math.isnan(loss)):
loss = ((ival[1] - ival[0]) / x_scale)
if (len(ival) == 3):
loss /= ival[2]
round_fac = .0
loss = (int(((loss * round_fac) + 0.5)) / round_fac)
return (loss, ival) |
class TestVehicleRouting(QiskitOptimizationTestCase):
def setUp(self):
super().setUp()
random.seed(600)
low = 0
high = 100
pos = {i: (random.randint(low, high), random.randint(low, high)) for i in range(4)}
self.graph = nx.random_geometric_graph(4, (np.hypot((high - low), (high - low)) + 1), pos=pos)
for (w, v) in self.graph.edges:
delta = [(self.graph.nodes[w]['pos'][i] - self.graph.nodes[v]['pos'][i]) for i in range(2)]
self.graph.edges[(w, v)]['weight'] = np.rint(np.hypot(delta[0], delta[1]))
op = QuadraticProgram()
for i in range(12):
op.binary_var()
self.result = OptimizationResult(x=[1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0], fval=184, variables=op.variables, status=OptimizationResultStatus.SUCCESS)
self.result_d2 = OptimizationResult(x=[1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1], fval=208.0, variables=op.variables, status=OptimizationResultStatus.SUCCESS)
self.result_nv3 = OptimizationResult(x=[1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0], fval=212.0, variables=op.variables, status=OptimizationResultStatus.SUCCESS)
def test_to_quadratic_program(self):
vehicle_routing = VehicleRouting(self.graph)
op = vehicle_routing.to_quadratic_program()
self.assertEqual(op.name, 'Vehicle routing')
self.assertEqual(op.get_num_vars(), 12)
for var in op.variables:
self.assertEqual(var.vartype, VarType.BINARY)
obj = op.objective
self.assertEqual(obj.sense, QuadraticObjective.Sense.MINIMIZE)
self.assertEqual(obj.constant, 0)
self.assertDictEqual(obj.linear.to_dict(), {0: 49.0, 1: 36.0, 2: 21.0, 3: 49.0, 4: 65.0, 5: 67.0, 6: 36.0, 7: 65.0, 8: 29.0, 9: 21.0, 10: 67.0, 11: 29.0})
self.assertEqual(obj.quadratic.to_dict(), {})
lin = op.linear_constraints
self.assertEqual(len(lin), 12)
for i in range(3):
self.assertEqual(lin[i].sense, Constraint.Sense.EQ)
self.assertEqual(lin[i].rhs, 1)
self.assertEqual(lin[i].linear.to_dict(), {(3 * (i + 1)): 1, ((3 * (i + 1)) + 1): 1, ((3 * (i + 1)) + 2): 1})
self.assertEqual(lin[3].sense, Constraint.Sense.EQ)
self.assertEqual(lin[3].rhs, 1)
self.assertEqual(lin[3].linear.to_dict(), {0: 1, 7: 1, 10: 1})
self.assertEqual(lin[4].sense, Constraint.Sense.EQ)
self.assertEqual(lin[4].rhs, 1)
self.assertEqual(lin[4].linear.to_dict(), {1: 1, 4: 1, 11: 1})
self.assertEqual(lin[5].sense, Constraint.Sense.EQ)
self.assertEqual(lin[5].rhs, 1)
self.assertEqual(lin[5].linear.to_dict(), {2: 1, 5: 1, 8: 1})
self.assertEqual(lin[6].sense, Constraint.Sense.EQ)
self.assertEqual(lin[6].rhs, 2)
self.assertEqual(lin[6].linear.to_dict(), {3: 1, 6: 1, 9: 1})
self.assertEqual(lin[7].sense, Constraint.Sense.EQ)
self.assertEqual(lin[7].rhs, 2)
self.assertEqual(lin[7].linear.to_dict(), {0: 1, 1: 1, 2: 1})
self.assertEqual(lin[8].sense, Constraint.Sense.LE)
self.assertEqual(lin[8].rhs, 1)
self.assertEqual(lin[8].linear.to_dict(), {4: 1, 7: 1})
self.assertEqual(lin[9].sense, Constraint.Sense.LE)
self.assertEqual(lin[9].rhs, 1)
self.assertEqual(lin[9].linear.to_dict(), {5: 1, 10: 1})
self.assertEqual(lin[10].sense, Constraint.Sense.LE)
self.assertEqual(lin[10].rhs, 1)
self.assertEqual(lin[10].linear.to_dict(), {8: 1.0, 11: 1.0})
self.assertEqual(lin[11].sense, Constraint.Sense.LE)
self.assertEqual(lin[11].rhs, 2)
self.assertEqual(lin[11].linear.to_dict(), {4: 1, 5: 1, 7: 1, 8: 1, 10: 1, 11: 1})
def test_interpret(self):
vehicle_routing = VehicleRouting(self.graph)
self.assertEqual(vehicle_routing.interpret(self.result), [[[0, 1], [1, 0]], [[0, 2], [2, 3], [3, 0]]])
def test_edgelist(self):
vehicle_routing = VehicleRouting(self.graph)
self.assertEqual(vehicle_routing._edgelist(vehicle_routing.interpret(self.result)), [[0, 1], [1, 0], [0, 2], [2, 3], [3, 0]])
def test_edge_color(self):
vehicle_routing = VehicleRouting(self.graph)
self.assertEqual(vehicle_routing._edge_color(vehicle_routing.interpret(self.result)), [0.0, 0.0, 0.5, 0.5, 0.5])
def test_to_quadratic_program_d2(self):
vehicle_routing = VehicleRouting(self.graph, depot=2)
op = vehicle_routing.to_quadratic_program()
self.assertEqual(op.name, 'Vehicle routing')
self.assertEqual(op.get_num_vars(), 12)
for var in op.variables:
self.assertEqual(var.vartype, VarType.BINARY)
obj = op.objective
self.assertEqual(obj.sense, QuadraticObjective.Sense.MINIMIZE)
self.assertEqual(obj.constant, 0)
self.assertDictEqual(obj.linear.to_dict(), {0: 49.0, 1: 36.0, 2: 21.0, 3: 49.0, 4: 65.0, 5: 67.0, 6: 36.0, 7: 65.0, 8: 29.0, 9: 21.0, 10: 67.0, 11: 29.0})
self.assertEqual(obj.quadratic.to_dict(), {})
lin = op.linear_constraints
self.assertEqual(len(lin), 12)
c012 = [(- 1), 0, 2]
for i in range(3):
j = c012[i]
self.assertEqual(lin[i].sense, Constraint.Sense.EQ)
self.assertEqual(lin[i].rhs, 1)
self.assertEqual(lin[i].linear.to_dict(), {(3 * (j + 1)): 1, ((3 * (j + 1)) + 1): 1, ((3 * (j + 1)) + 2): 1})
self.assertEqual(lin[3].sense, Constraint.Sense.EQ)
self.assertEqual(lin[3].rhs, 1)
self.assertEqual(lin[3].linear.to_dict(), {3: 1, 6: 1, 9: 1})
self.assertEqual(lin[4].sense, Constraint.Sense.EQ)
self.assertEqual(lin[4].rhs, 1)
self.assertEqual(lin[4].linear.to_dict(), {0: 1, 7: 1, 10: 1})
self.assertEqual(lin[5].sense, Constraint.Sense.EQ)
self.assertEqual(lin[5].rhs, 1)
self.assertEqual(lin[5].linear.to_dict(), {2: 1, 5: 1, 8: 1})
self.assertEqual(lin[6].sense, Constraint.Sense.EQ)
self.assertEqual(lin[6].rhs, 2)
self.assertEqual(lin[6].linear.to_dict(), {1: 1, 4: 1, 11: 1})
self.assertEqual(lin[7].sense, Constraint.Sense.EQ)
self.assertEqual(lin[7].rhs, 2)
self.assertEqual(lin[7].linear.to_dict(), {6: 1, 7: 1, 8: 1})
self.assertEqual(lin[8].sense, Constraint.Sense.LE)
self.assertEqual(lin[8].rhs, 1)
self.assertEqual(lin[8].linear.to_dict(), {0: 1, 3: 1})
self.assertEqual(lin[9].sense, Constraint.Sense.LE)
self.assertEqual(lin[9].rhs, 1)
self.assertEqual(lin[9].linear.to_dict(), {2: 1, 9: 1})
self.assertEqual(lin[10].sense, Constraint.Sense.LE)
self.assertEqual(lin[10].rhs, 1)
self.assertEqual(lin[10].linear.to_dict(), {5: 1.0, 10: 1.0})
self.assertEqual(lin[11].sense, Constraint.Sense.LE)
self.assertEqual(lin[11].rhs, 2)
self.assertEqual(lin[11].linear.to_dict(), {0: 1, 2: 1, 3: 1, 5: 1, 9: 1, 10: 1})
def test_interpret_d2(self):
vehicle_routing = VehicleRouting(self.graph, depot=2)
self.assertEqual(vehicle_routing.interpret(self.result_d2), [[[2, 0], [0, 1], [1, 2]], [[2, 3], [3, 2]]])
def test_edgelist_d2(self):
vehicle_routing = VehicleRouting(self.graph, depot=2)
self.assertEqual(vehicle_routing._edgelist(vehicle_routing.interpret(self.result_d2)), [[2, 0], [0, 1], [1, 2], [2, 3], [3, 2]])
def test_edge_color_d2(self):
vehicle_routing = VehicleRouting(self.graph, depot=2)
self.assertEqual(vehicle_routing._edge_color(vehicle_routing.interpret(self.result_d2)), [0.0, 0.0, 0.0, 0.5, 0.5])
def test_to_quadratic_program_nv3(self):
vehicle_routing = VehicleRouting(self.graph, num_vehicles=3)
op = vehicle_routing.to_quadratic_program()
self.assertEqual(op.name, 'Vehicle routing')
self.assertEqual(op.get_num_vars(), 12)
for var in op.variables:
self.assertEqual(var.vartype, VarType.BINARY)
obj = op.objective
self.assertEqual(obj.sense, QuadraticObjective.Sense.MINIMIZE)
self.assertEqual(obj.constant, 0)
self.assertDictEqual(obj.linear.to_dict(), {0: 49.0, 1: 36.0, 2: 21.0, 3: 49.0, 4: 65.0, 5: 67.0, 6: 36.0, 7: 65.0, 8: 29.0, 9: 21.0, 10: 67.0, 11: 29.0})
self.assertEqual(obj.quadratic.to_dict(), {})
lin = op.linear_constraints
self.assertEqual(len(lin), 12)
for i in range(3):
self.assertEqual(lin[i].sense, Constraint.Sense.EQ)
self.assertEqual(lin[i].rhs, 1)
self.assertEqual(lin[i].linear.to_dict(), {(3 * (i + 1)): 1, ((3 * (i + 1)) + 1): 1, ((3 * (i + 1)) + 2): 1})
self.assertEqual(lin[3].sense, Constraint.Sense.EQ)
self.assertEqual(lin[3].rhs, 1)
self.assertEqual(lin[3].linear.to_dict(), {0: 1, 7: 1, 10: 1})
self.assertEqual(lin[4].sense, Constraint.Sense.EQ)
self.assertEqual(lin[4].rhs, 1)
self.assertEqual(lin[4].linear.to_dict(), {1: 1, 4: 1, 11: 1})
self.assertEqual(lin[5].sense, Constraint.Sense.EQ)
self.assertEqual(lin[5].rhs, 1)
self.assertEqual(lin[5].linear.to_dict(), {2: 1, 5: 1, 8: 1})
self.assertEqual(lin[6].sense, Constraint.Sense.EQ)
self.assertEqual(lin[6].rhs, 3)
self.assertEqual(lin[6].linear.to_dict(), {3: 1, 6: 1, 9: 1})
self.assertEqual(lin[7].sense, Constraint.Sense.EQ)
self.assertEqual(lin[7].rhs, 3)
self.assertEqual(lin[7].linear.to_dict(), {0: 1, 1: 1, 2: 1})
self.assertEqual(lin[8].sense, Constraint.Sense.LE)
self.assertEqual(lin[8].rhs, 1)
self.assertEqual(lin[8].linear.to_dict(), {4: 1, 7: 1})
self.assertEqual(lin[9].sense, Constraint.Sense.LE)
self.assertEqual(lin[9].rhs, 1)
self.assertEqual(lin[9].linear.to_dict(), {5: 1, 10: 1})
self.assertEqual(lin[10].sense, Constraint.Sense.LE)
self.assertEqual(lin[10].rhs, 1)
self.assertEqual(lin[10].linear.to_dict(), {8: 1.0, 11: 1.0})
self.assertEqual(lin[11].sense, Constraint.Sense.LE)
self.assertEqual(lin[11].rhs, 2)
self.assertEqual(lin[11].linear.to_dict(), {4: 1, 5: 1, 7: 1, 8: 1, 10: 1, 11: 1})
def test_interpret_nv3(self):
vehicle_routing = VehicleRouting(self.graph, num_vehicles=3)
self.assertEqual(vehicle_routing.interpret(self.result_nv3), [[[0, 1], [1, 0]], [[0, 2], [2, 0]], [[0, 3], [3, 0]]])
def test_edgelist_nv3(self):
vehicle_routing = VehicleRouting(self.graph, num_vehicles=3)
self.assertEqual(vehicle_routing._edgelist(vehicle_routing.interpret(self.result_nv3)), [[0, 1], [1, 0], [0, 2], [2, 0], [0, 3], [3, 0]])
def test_edge_color_nv3(self):
vehicle_routing = VehicleRouting(self.graph, num_vehicles=3)
self.assertEqual(vehicle_routing._edge_color(vehicle_routing.interpret(self.result_nv3)), [0.0, 0.0, (1 / 3), (1 / 3), (2 / 3), (2 / 3)])
def test_create_random_instance(self):
vehicle_routing = VehicleRouting.create_random_instance(n=4, seed=600)
graph = vehicle_routing.graph
for node in graph.nodes:
self.assertEqual(graph.nodes[node]['pos'], self.graph.nodes[node]['pos'])
for edge in graph.edges:
self.assertEqual(graph.edges[edge]['weight'], self.graph.edges[edge]['weight'])
def test_num_vehicles(self):
vehicle_routing = VehicleRouting(self.graph, num_vehicles=2)
vehicle_routing.num_vehicles = 5
self.assertEqual(vehicle_routing.num_vehicles, 5)
def test_depot(self):
vehicle_routing = VehicleRouting(self.graph, depot=0)
vehicle_routing.depot = 2
self.assertEqual(vehicle_routing.depot, 2) |
_canonicalize
_specialize
_rewriter([AdvancedSubtensor1])
def local_adv_sub1_adv_inc_sub1(fgraph, node):
if (not isinstance(node.op, AdvancedSubtensor1)):
return
inp = node.inputs[0]
if ((not inp.owner) or (not isinstance(inp.owner.op, AdvancedIncSubtensor1))):
return
idx = node.inputs[1]
idx2 = inp.owner.inputs[2]
x = inp.owner.inputs[0]
y = inp.owner.inputs[1]
if (idx is not idx2):
return
if ((not inp.owner.op.set_instead_of_inc) and (extract_constant(x, elemwise=False) != 0)):
return
if (not inp.owner.op.set_instead_of_inc):
return
cond = [pt_all(and_(lt(idx, x.shape[0]), ge(idx, (- x.shape[0]))))]
if (not fgraph.shape_feature.same_shape(idx, y, 0, 0)):
cond.append(eq(idx.shape[0], y.shape[0]))
r = Assert('Bad indexing or shapes in a AdvancedIncSubtensor1 that was optimized away')(y, *cond)
copy_stack_trace(y, r)
if (r.dtype == node.outputs[0].dtype):
return [r]
r2 = cast(r, node.outputs[0].dtype)
copy_stack_trace(r, r2)
return [r2] |
def test_apply_along_last_axis():
_along_last_axis
def f(x):
assert (x.ndim == 1)
return (x[:(len(x) // 2)] + np.arange((len(x) // 2)))
for shape in [(10,), (2, 10), (2, 2, 10)]:
x = np.ones(shape)
y = f(x)
xshape = x.shape
yshape = y.shape
assert (len(xshape) == len(yshape))
assert ((xshape[(- 1)] // 2) == yshape[(- 1)])
y = f(x=x)
yshape = y.shape
assert (len(xshape) == len(yshape))
assert ((xshape[(- 1)] // 2) == yshape[(- 1)])
x = np.ones((2, 10), dtype=np.float64)
y = np.empty((2, 5), dtype=np.float64)
for i in range(len(x)):
y[i] = f(x[i])
yhat = f(x)
assert np.allclose(yhat, y)
x = np.ones((2, 2, 10), dtype=np.float64)
y = np.empty((2, 2, 5), dtype=np.float64)
for i in range(len(x)):
for j in range(len(x[i])):
y[i][j] = f(x[i][j])
yhat = f(x)
assert np.allclose(yhat, y) |
def data_collator(features):
if (not isinstance(features[0], dict)):
features = [vars(f) for f in features]
first = features[0]
batch = {}
if (('label' in first) and (first['label'] is not None)):
label = (first['label'].item() if isinstance(first['label'], torch.Tensor) else first['label'])
dtype = (torch.long if isinstance(label, int) else torch.float)
batch['labels'] = torch.tensor([f['label'] for f in features], dtype=dtype)
elif (('label_ids' in first) and (first['label_ids'] is not None)):
if isinstance(first['label_ids'], torch.Tensor):
batch['labels'] = torch.stack([f['label_ids'] for f in features])
else:
dtype = (torch.long if (type(first['label_ids'][0]) is int) else torch.float)
batch['labels'] = torch.tensor([f['label_ids'] for f in features], dtype=dtype)
for (k, v) in first.items():
if ((k not in ('label', 'label_ids')) and (v is not None) and (not isinstance(v, str))):
if isinstance(v, torch.Tensor):
batch[k] = torch.stack([f[k] for f in features])
else:
batch[k] = torch.tensor([f[k] for f in features], dtype=torch.long)
return batch |
class ConvOps(BaseOp):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, transposed=False, depthwised=False, dropout_rate=0, ops_order='weight_norm_act'):
super().__init__(in_channels, out_channels, dropout_rate, ops_order)
self.depthwised = depthwised
padding = max(0, ceil(((((dilation * (kernel_size - 1)) - stride) + 1) / 2)))
if transposed:
if depthwised:
self.depth_conv = nn.ConvTranspose3d(in_channels, in_channels, kernel_size, stride=stride, padding=padding, groups=in_channels, output_padding=(0 if (stride == 1) else 1))
self.point_conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
else:
self.conv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, output_padding=(0 if (stride == 1) else 1))
elif depthwised:
self.depth_conv = nn.Conv3d(in_channels, in_channels, kernel_size, stride=stride, padding=padding, groups=in_channels)
self.point_conv = nn.Conv3d(in_channels, out_channels, kernel_size=1)
else:
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation)
def weight_call(self, x):
if self.depthwised:
x = self.depth_conv(x)
x = self.point_conv(x)
else:
x = self.conv(x)
return x |
_bool('is_required_a')
def test_extra_extract(debug_ctx, debug_trail, trail_select, is_required_a, acc_schema):
dumper_getter = make_dumper_getter(shape=shape(TestField('a', acc_schema.accessor_maker('a', is_required=is_required_a)), TestField('b', acc_schema.accessor_maker('b', is_required=True))), name_layout=OutputNameLayout(crown=OutDictCrown({'a': OutFieldCrown('a')}, sieves={}), extra_move=ExtraExtract(my_extractor)), debug_trail=debug_trail, debug_ctx=debug_ctx)
dumper = dumper_getter()
assert (dumper(acc_schema.dummy(a=1, b={'e': 2})) == {'a': 1, 'e': 2})
assert (dumper(acc_schema.dummy(a=1, b={'b': 2})) == {'a': 1, 'b': 2})
if (not is_required_a):
assert (dumper(acc_schema.dummy(b={'f': 2})) == {'f': 2})
assert (dumper(acc_schema.dummy()) == {})
assert (dumper(acc_schema.dummy(a=1)) == {'a': 1})
if is_required_a:
raises_exc(trail_select(disable=acc_schema.access_error(ANY), first=with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('a')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('a')])])), (lambda : dumper(acc_schema.dummy())))
raises_exc(trail_select(disable=acc_schema.access_error(ANY), first=with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('a')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(acc_schema.access_error(ANY), [acc_schema.trail_element_maker('a')])])), (lambda : dumper(acc_schema.dummy(b=1))))
raises_exc(trail_select(disable=SomeError(), first=SomeError(), all=CompatExceptionGroup(f'while dumping model {Dummy}', [SomeError()])), (lambda : dumper(acc_schema.dummy(a=1, b=SomeError()))))
raises_exc(trail_select(disable=SomeError(0), first=with_trail(SomeError(0), [acc_schema.trail_element_maker('a')]), all=CompatExceptionGroup(f'while dumping model {Dummy}', [with_trail(SomeError(0), [acc_schema.trail_element_maker('a')]), SomeError(1)])), (lambda : dumper(acc_schema.dummy(a=SomeError(0), b=SomeError(1))))) |
class HIKOM4(FinTS3Segment):
bank_identifier = DataElementGroupField(type=BankIdentifier, _d='Kreditinstitutskennung')
default_language = CodeField(enum=Language2, max_length=3, _d='Standardsprache')
communication_parameters = DataElementGroupField(type=CommunicationParameter2, min_count=1, max_count=9, _d='Kommunikationsparameter') |
def loadData(datasetStr):
DIR = os.path.join(os.getcwd(), 'dataset', datasetStr)
log(DIR)
with open((DIR + '/train.pkl'), 'rb') as fs:
trainMat = pk.load(fs)
with open((DIR + '/test_data.pkl'), 'rb') as fs:
testData = pk.load(fs)
with open((DIR + '/valid_data.pkl'), 'rb') as fs:
validData = pk.load(fs)
with open((DIR + '/train_time.pkl'), 'rb') as fs:
trainTimeMat = pk.load(fs)
with open((DIR + '/trust.pkl'), 'rb') as fs:
trustMat = pk.load(fs)
return (trainMat, testData, validData, trainTimeMat, trustMat) |
class Solution(object):
def mostCommonWord(self, paragraph, banned):
banned = set(banned)
count = collections.Counter((word for word in re.split("[ !?',;.]", paragraph.lower()) if word))
return max((item for item in count.items() if (item[0] not in banned)), key=operator.itemgetter(1))[0] |
_onnx
class OnnxUtilsTestCaseV2(TestCase):
_torch
('transformers.onnx.convert.is_torch_onnx_dict_inputs_support_available', return_value=False)
def test_ensure_pytorch_version_ge_1_8_0(self, mock_is_torch_onnx_dict_inputs_support_available):
self.assertRaises(AssertionError, export, None, None, None, None, None)
mock_is_torch_onnx_dict_inputs_support_available.assert_called()
def test_compute_effective_axis_dimension(self):
self.assertEqual(compute_effective_axis_dimension((- 1), fixed_dimension=2, num_token_to_add=0), 2)
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=2, num_token_to_add=0), 2)
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=8, num_token_to_add=2), 6)
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=8, num_token_to_add=2), 6)
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=8, num_token_to_add=3), 5)
self.assertEqual(compute_effective_axis_dimension(0, fixed_dimension=8, num_token_to_add=3), 5)
def test_compute_parameters_serialized_size(self):
self.assertEqual(compute_serialized_parameters_size(2, ParameterFormat.Float), (2 * ParameterFormat.Float.size))
def test_flatten_output_collection_property(self):
self.assertEqual(OnnxConfig.flatten_output_collection_property('past_key', [[0], [1], [2]]), {'past_key.0': 0, 'past_key.1': 1, 'past_key.2': 2}) |
class Distance2EcmStrMaxGetter(SmoothPointGetter):
_baseResolution = 50
_extraDepth = 2
ECM_ATTRS_GENERAL = ('scanGravimetricStrengthBonus', 'scanLadarStrengthBonus', 'scanMagnetometricStrengthBonus', 'scanRadarStrengthBonus')
ECM_ATTRS_FIGHTERS = ('fighterAbilityECMStrengthGravimetric', 'fighterAbilityECMStrengthLadar', 'fighterAbilityECMStrengthMagnetometric', 'fighterAbilityECMStrengthRadar')
def _getCommonData(self, miscParams, src, tgt):
resonance = (1 - (miscParams['resist'] or 0))
ecms = []
for mod in src.item.activeModulesIter():
for effectName in ('remoteECMFalloff', 'structureModuleEffectECM'):
if (effectName in mod.item.effects):
ecms.append(((max((mod.getModifiedItemAttr(a) for a in self.ECM_ATTRS_GENERAL)) * resonance), (mod.maxRange or 0), (mod.falloff or 0), True, False))
if ('doomsdayAOEECM' in mod.item.effects):
ecms.append(((max((mod.getModifiedItemAttr(a) for a in self.ECM_ATTRS_GENERAL)) * resonance), max(0, ((mod.maxRange or 0) + mod.getModifiedItemAttr('doomsdayAOERange'))), (mod.falloff or 0), False, False))
for drone in src.item.activeDronesIter():
if ('entityECMFalloff' in drone.item.effects):
ecms.extend((drone.amountActive * (((max((drone.getModifiedItemAttr(a) for a in self.ECM_ATTRS_GENERAL)) * resonance), math.inf, 0, True, True),)))
for (fighter, ability) in src.item.activeFighterAbilityIter():
if (ability.effect.name == 'fighterAbilityECM'):
ecms.append((((max((fighter.getModifiedItemAttr(a) for a in self.ECM_ATTRS_FIGHTERS)) * fighter.amount) * resonance), math.inf, 0, True, False))
return {'ecms': ecms}
def _calculatePoint(self, x, miscParams, src, tgt, commonData):
distance = x
inLockRange = checkLockRange(src=src, distance=distance)
inDroneRange = checkDroneControlRange(src=src, distance=distance)
combinedStr = 0
for (strength, optimal, falloff, needsLock, needsDcr) in commonData['ecms']:
if ((needsLock and (not inLockRange)) or (needsDcr and (not inDroneRange))):
continue
combinedStr += (strength * calculateRangeFactor(srcOptimalRange=optimal, srcFalloffRange=falloff, distance=distance))
return combinedStr |
def validate(gpu, val_loader, model, criterion, test=True, args=None):
if test:
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
else:
batch_time = AverageMeter('val Time', ':6.3f')
losses = AverageMeter('val Loss', ':.4e')
top1 = AverageMeter('Val ', ':6.2f')
top5 = AverageMeter('Val ', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix='Val: ')
model.eval()
with torch.no_grad():
end = time.time()
for (i, (images, target)) in enumerate(val_loader):
images = images.cuda(gpu, non_blocking=True)
target = target.cuda(gpu, non_blocking=True)
if args.dataset.startswith('CIFAR'):
output = model(images)
else:
(output, cfeatures) = model(images)
loss = criterion(output, target)
acc1 = accuracy(output, target, topk=(1,))[0]
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
method_name = args.log_path.split('/')[(- 2)]
progress.display(i, method_name)
progress.write_log(i, args.log_path)
print(' * {top1.avg:.3f} {top5.avg:.3f}'.format(top1=top1, top5=top5))
with open(args.log_path, 'a') as f1:
f1.writelines(' * {top1.avg:.3f} {top5.avg:.3f}'.format(top1=top1, top5=top5))
return top1.avg |
class AoA_Decoder_Core(nn.Module):
def __init__(self, opt):
super(AoA_Decoder_Core, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.d_model = opt.rnn_size
self.use_multi_head = opt.use_multi_head
self.multi_head_scale = opt.multi_head_scale
self.use_ctx_drop = getattr(opt, 'ctx_drop', 0)
self.out_res = getattr(opt, 'out_res', 0)
self.decoder_type = getattr(opt, 'decoder_type', 'AoA')
self.att_lstm = nn.LSTMCell((opt.input_encoding_size + opt.rnn_size), opt.rnn_size)
self.out_drop = nn.Dropout(self.drop_prob_lm)
if (self.decoder_type == 'AoA'):
self.att2ctx = nn.Sequential(nn.Linear(((self.d_model * opt.multi_head_scale) + opt.rnn_size), (2 * opt.rnn_size)), nn.GLU())
elif (self.decoder_type == 'LSTM'):
self.att2ctx = nn.LSTMCell(((self.d_model * opt.multi_head_scale) + opt.rnn_size), opt.rnn_size)
else:
self.att2ctx = nn.Sequential(nn.Linear(((self.d_model * opt.multi_head_scale) + opt.rnn_size), opt.rnn_size), nn.ReLU())
if (opt.use_multi_head == 2):
self.attention = MultiHeadedDotAttention_d(opt.num_heads, opt.rnn_size, project_k_v=0, scale=opt.multi_head_scale, use_output_layer=0, do_aoa=0, norm_q=1)
else:
self.attention = Attention(opt)
if self.use_ctx_drop:
self.ctx_drop = nn.Dropout(self.drop_prob_lm)
else:
self.ctx_drop = (lambda x: x)
self.implicit1 = nn.Linear(1024, 2048)
self.implicit2 = nn.Linear(1024, 2048)
self.implicit3 = nn.Linear(1024, 2048)
def forward(self, xt, mean_feats, att_feats, p_att_feats, state, att_masks=None):
p_att_feats1 = self.implicit1(att_feats)
p_att_feats2 = self.implicit2(att_feats)
p_att_feats3 = self.implicit3(att_feats)
(h_att, c_att) = self.att_lstm(torch.cat([xt, (mean_feats + self.ctx_drop(state[0][1]))], 1), (state[0][0], state[1][0]))
if (self.use_multi_head == 2):
att = self.attention(h_att, p_att_feats1, p_att_feats2, p_att_feats3, att_masks)
else:
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
ctx_input = torch.cat([att, h_att], 1)
if (self.decoder_type == 'LSTM'):
(output, c_logic) = self.att2ctx(ctx_input, (state[0][1], state[1][1]))
state = (torch.stack((h_att, output)), torch.stack((c_att, c_logic)))
else:
output = self.att2ctx(ctx_input)
state = (torch.stack((h_att, output)), torch.stack((c_att, state[1][1])))
if self.out_res:
output = (output + h_att)
output = self.out_drop(output)
return (output, state) |
def recurse_artifacts(artifacts: list, root) -> Iterable[Path]:
for raw_artifact in artifacts:
artifact = Path(raw_artifact)
if (not artifact.is_absolute()):
artifact = (root / artifact)
if artifact.is_file():
(yield artifact)
elif artifact.is_dir():
(yield from artifact.iterdir()) |
def test_read_bsrn_logical_records_not_found():
(data, metadata) = read_bsrn((DATA_DIR / 'bsrn-lr0100-pay0616.dat'), logical_records=['0300', '0500'])
assert data.empty
assert ('uva_global' in data.columns)
assert ('uvb_reflected_std' in data.columns)
assert ('uva_global_max' in data.columns)
assert ('dni' not in data.columns)
assert ('day' not in data.columns) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.