code stringlengths 281 23.7M |
|---|
def test_volume_sample_world_v_linear_values(volume: wp.uint64, points: wp.array(dtype=wp.vec3), values: wp.array(dtype=wp.float32)):
tid = wp.tid()
q = points[tid]
p = wp.volume_world_to_index(volume, q)
ones = wp.vec3(1.0, 1.0, 1.0)
values[tid] = wp.dot(wp.volume_sample_v(volume, p, wp.Volume.LINEAR), ones) |
class TestClusterUtilizationOverTimeRange(unittest.TestCase):
('deltacat.utils.resources.ray')
def test_sanity(self, ray_mock):
from deltacat.utils.resources import ClusterUtilizationOverTimeRange
ray_mock.cluster_resources.side_effect = [{'CPU': 32} for _ in range(5)]
ray_mock.available_resources.side_effect = [{'CPU': (2 ** (i + 1))} for i in range(5)]
with ClusterUtilizationOverTimeRange() as cu:
time.sleep(3)
self.assertTrue((cu.used_vcpu_seconds <= 82))
self.assertTrue((cu.total_vcpu_seconds >= cu.used_vcpu_seconds))
self.assertIsNotNone(cu.total_memory_gb_seconds)
self.assertIsNotNone(cu.used_memory_gb_seconds)
self.assertIsNotNone(cu.max_cpu) |
class AdaLayerNorm(nn.Module):
def __init__(self, embedding_dim, num_embeddings):
super().__init__()
self.emb = nn.Embedding(num_embeddings, embedding_dim)
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, (embedding_dim * 2))
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False)
def forward(self, x, timestep):
emb = self.linear(self.silu(self.emb(timestep)))
(scale, shift) = torch.chunk(emb, 2)
x = ((self.norm(x) * (1 + scale)) + shift)
return x |
def test_generate_graphql_schema():
out = io.StringIO()
m_open = mock_open()
class TestSchema():
a: int
with patch('api.management.commands.graphql_schema.json.dump') as mock_j, patch('api.management.commands.graphql_schema.graphql_sync') as p, patch('api.management.commands.graphql_schema.open', m_open, create=True):
m = Mock()
m.data = {'a': 1}
p.return_value = m
call_command('graphql_schema', stdout=out)
assert ('Successfully dumped GraphQL schema to schema.json' in out.getvalue())
mock_j.assert_called_once()
assert (mock_j.call_args_list[0][0][0] == {'data': {'a': 1}}) |
class MLMPreprocessor(Preprocessor):
def get_input_features(self, example: InputExample, labelled: bool, priming: bool=False, **kwargs) -> InputFeatures:
(input_ids, token_type_ids, block_flag) = self.pvp.encode(example)
attention_mask = ([1] * len(input_ids))
padding_length = (self.wrapper.config.max_seq_length - len(input_ids))
if (padding_length < 0):
raise ValueError(f'Maximum sequence length is too small, got {len(input_ids)} input ids')
input_ids = (input_ids + ([self.wrapper.tokenizer.pad_token_id] * padding_length))
attention_mask = (attention_mask + ([0] * padding_length))
token_type_ids = (token_type_ids + ([0] * padding_length))
block_flag = (block_flag + ([0] * padding_length))
assert (len(input_ids) == self.wrapper.config.max_seq_length)
assert (len(attention_mask) == self.wrapper.config.max_seq_length)
assert (len(token_type_ids) == self.wrapper.config.max_seq_length)
assert (len(block_flag) == self.wrapper.config.max_seq_length)
example_label = example.label
example_task = example.task
if (example_label not in self.label_map.keys()):
if (type(example_label) == int):
example_label = str(example_label)
elif (type(example_label) == str):
example_label = int(example_label)
label = (self.label_map[example_label] if (example.label is not None) else (- 100))
task = task_to_id[example_task]
logits = (example.logits if example.logits else [(- 1)])
if labelled:
mlm_labels = self.pvp.get_mask_positions(input_ids)
else:
mlm_labels = ([(- 1)] * self.wrapper.config.max_seq_length)
return InputFeatures(guid=int(example.guid.split('-')[1]), input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, task=task, label=label, mlm_labels=mlm_labels, logits=logits, idx=example.idx, block_flag=block_flag) |
class testCommandsToCommandFile(unittest.TestCase):
def setUp(self):
self.command_file = '/tmp/cmdfile'
self.timestamp =
self.testhost = 'hosttest.example.com'
self.testauthor = ''
self.test_svc_desc = 'Test Service'
self.test_svc_group = 'TestSVCGroup'
self.test_check_command = 'test_check_command'
self.test_event_handler_command = 'test_event_handler'
self.check_interval = 50
self.command = Command
self.command_open_mock = mock_open()
self.patcher1 = patch('pynag.Control.Command.open', self.command_open_mock, create=True)
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_add_host_comment(self):
persistent = 0
comment = 'Test Comment!'
self.command.add_host_comment(host_name=self.testhost, persistent=persistent, author=self.testauthor, comment=comment, command_file=self.command_file, timestamp=self.timestamp)
expected_nagios_command = ('[%s] ADD_HOST_COMMENT;%s;%s;%s;%s' % (self.timestamp, self.testhost, persistent, self.testauthor, comment))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected_nagios_command + '\n'))
def test_shutdown_program(self):
self.command.shutdown_program(command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] SHUTDOWN_PROGRAM;' % self.timestamp)
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_disable_service_group_passive_svc_checks(self):
self.command.disable_servicegroup_passive_svc_checks(servicegroup_name=self.test_svc_group, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] DISABLE_SERVICEGROUP_PASSIVE_SVC_CHECKS;%s' % (self.timestamp, self.test_svc_group))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_enable_service_group_passive_host_checks(self):
self.command.enable_servicegroup_passive_host_checks(servicegroup_name=self.test_svc_group, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] ENABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;%s' % (self.timestamp, self.test_svc_group))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_disable_servicegroup_passive_host_checks(self):
self.command.disable_servicegroup_passive_host_checks(servicegroup_name=self.test_svc_group, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;%s' % (self.timestamp, self.test_svc_group))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_global_host_event_handler(self):
self.command.change_global_host_event_handler(event_handler_command=self.test_event_handler_command, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_GLOBAL_HOST_EVENT_HANDLER;%s' % (self.timestamp, self.test_event_handler_command))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_global_svc_event_handler(self):
self.command.change_global_svc_event_handler(event_handler_command=self.test_event_handler_command, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_GLOBAL_SVC_EVENT_HANDLER;%s' % (self.timestamp, self.test_event_handler_command))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_host_event_handler(self):
self.command.change_host_event_handler(host_name=self.testhost, event_handler_command=self.test_event_handler_command, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_HOST_EVENT_HANDLER;%s;%s' % (self.timestamp, self.testhost, self.test_event_handler_command))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_svc_event_handler(self):
self.command.change_svc_event_handler(host_name=self.testhost, service_description=self.test_svc_desc, event_handler_command=self.test_event_handler_command, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_SVC_EVENT_HANDLER;%s;%s;%s' % (self.timestamp, self.testhost, self.test_svc_desc, self.test_event_handler_command))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_host_check_command(self):
self.command.change_host_check_command(host_name=self.testhost, check_command=self.test_check_command, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_HOST_CHECK_COMMAND;%s;%s' % (self.timestamp, self.testhost, self.test_check_command))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_svc_check_command(self):
self.command.change_svc_check_command(host_name=self.testhost, service_description=self.test_svc_desc, check_command=self.test_check_command, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_SVC_CHECK_COMMAND;%s;%s;%s' % (self.timestamp, self.testhost, self.test_svc_desc, self.test_check_command))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_normal_host_check_interval(self):
self.command.change_normal_host_check_interval(host_name=self.testhost, check_interval=self.check_interval, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_NORMAL_HOST_CHECK_INTERVAL;%s;%s' % (self.timestamp, self.testhost, self.check_interval))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_enable_svc_notifications(self):
self.command.enable_svc_notifications(host_name=self.testhost, service_description=self.test_svc_desc, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] ENABLE_SVC_NOTIFICATIONS;%s;%s' % (self.timestamp, self.testhost, self.test_svc_desc))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_normal_svc_check_interval(self):
self.command.change_normal_svc_check_interval(host_name=self.testhost, service_description=self.test_svc_desc, check_interval=self.check_interval, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_NORMAL_SVC_CHECK_INTERVAL;%s;%s;%s' % (self.timestamp, self.testhost, self.test_svc_desc, self.check_interval))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_retry_svc_check_interval(self):
self.command.change_retry_svc_check_interval(host_name=self.testhost, service_description=self.test_svc_desc, check_interval=self.check_interval, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_RETRY_SVC_CHECK_INTERVAL;%s;%s;%s' % (self.timestamp, self.testhost, self.test_svc_desc, self.check_interval))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_max_host_check_attempts(self):
max_attempts = 30
self.command.change_max_host_check_attempts(host_name=self.testhost, check_attempts=max_attempts, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_MAX_HOST_CHECK_ATTEMPTS;%s;%s' % (self.timestamp, self.testhost, max_attempts))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_change_max_svc_check_attempts(self):
max_attempts = 30
self.command.change_max_svc_check_attempts(host_name=self.testhost, service_description=self.test_svc_desc, check_attempts=max_attempts, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] CHANGE_MAX_SVC_CHECK_ATTEMPTS;%s;%s;%s' % (self.timestamp, self.testhost, self.test_svc_desc, max_attempts))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n'))
def test_process_service_check_result(self):
return_code = 2
plugin_output = 'output'
self.command.process_service_check_result(host_name=self.testhost, service_description=self.test_svc_desc, return_code=return_code, plugin_output=plugin_output, command_file=self.command_file, timestamp=self.timestamp)
expected = ('[%s] PROCESS_SERVICE_CHECK_RESULT;%s;%s;%s;%s' % (self.timestamp, self.testhost, self.test_svc_desc, return_code, plugin_output))
self.command_open_mock.assert_called_with(self.command_file, 'a')
handle = self.command_open_mock()
handle.write.assert_called_once_with((expected + '\n')) |
class Model(OriginalModel):
def __init__(self, *args, **kwargs):
logger.debug('Initializing %s: (args: %s, kwargs: %s', self.__class__.__name__, args, kwargs)
self.configfile = kwargs.get('configfile', None)
self.lowmem = self.config.get('lowmem', False)
kwargs['input_shape'] = (self.config['input_size'], self.config['input_size'], 3)
kwargs['encoder_dim'] = (512 if self.lowmem else self.config['nodes'])
self.kernel_initializer = RandomNormal(0, 0.02)
super().__init__(*args, **kwargs)
logger.debug('Initialized %s', self.__class__.__name__)
def add_networks(self):
logger.debug('Adding networks')
self.add_network('decoder', 'a', self.decoder_a(), is_output=True)
self.add_network('decoder', 'b', self.decoder_b(), is_output=True)
self.add_network('encoder', None, self.encoder())
logger.debug('Added networks')
def encoder(self):
kwargs = dict(kernel_initializer=self.kernel_initializer)
encoder_complexity = (128 if self.lowmem else self.config['complexity_encoder'])
dense_dim = (384 if self.lowmem else 512)
dense_shape = (self.input_shape[0] // 16)
input_ = Input(shape=self.input_shape)
var_x = input_
var_x = self.blocks.conv(var_x, encoder_complexity, use_instance_norm=True, **kwargs)
var_x = self.blocks.conv(var_x, (encoder_complexity * 2), use_instance_norm=True, **kwargs)
var_x = self.blocks.conv(var_x, (encoder_complexity * 4), **kwargs)
var_x = self.blocks.conv(var_x, (encoder_complexity * 6), **kwargs)
var_x = self.blocks.conv(var_x, (encoder_complexity * 8), **kwargs)
var_x = Dense(self.encoder_dim, kernel_initializer=self.kernel_initializer)(Flatten()(var_x))
var_x = Dense(((dense_shape * dense_shape) * dense_dim), kernel_initializer=self.kernel_initializer)(var_x)
var_x = Reshape((dense_shape, dense_shape, dense_dim))(var_x)
return KerasModel(input_, var_x)
def decoder_a(self):
kwargs = dict(kernel_size=5, kernel_initializer=self.kernel_initializer)
decoder_complexity = (320 if self.lowmem else self.config['complexity_decoder_a'])
dense_dim = (384 if self.lowmem else 512)
decoder_shape = (self.input_shape[0] // 16)
input_ = Input(shape=(decoder_shape, decoder_shape, dense_dim))
var_x = input_
var_x = self.blocks.upscale(var_x, decoder_complexity, **kwargs)
var_x = SpatialDropout2D(0.25)(var_x)
var_x = self.blocks.upscale(var_x, decoder_complexity, **kwargs)
if self.lowmem:
var_x = SpatialDropout2D(0.15)(var_x)
else:
var_x = SpatialDropout2D(0.25)(var_x)
var_x = self.blocks.upscale(var_x, (decoder_complexity // 2), **kwargs)
var_x = self.blocks.upscale(var_x, (decoder_complexity // 4), **kwargs)
var_x = self.blocks.conv2d(var_x, 3, kernel_size=5, padding='same', activation='sigmoid', name='face_out')
outputs = [var_x]
if self.config.get('mask_type', None):
var_y = input_
var_y = self.blocks.upscale(var_y, decoder_complexity)
var_y = self.blocks.upscale(var_y, decoder_complexity)
var_y = self.blocks.upscale(var_y, (decoder_complexity // 2))
var_y = self.blocks.upscale(var_y, (decoder_complexity // 4))
var_y = self.blocks.conv2d(var_y, 1, kernel_size=5, padding='same', activation='sigmoid', name='mask_out')
outputs.append(var_y)
return KerasModel(input_, outputs=outputs)
def decoder_b(self):
kwargs = dict(kernel_size=5, kernel_initializer=self.kernel_initializer)
dense_dim = (384 if self.lowmem else self.config['complexity_decoder_b'])
decoder_complexity = (384 if self.lowmem else 512)
decoder_shape = (self.input_shape[0] // 16)
input_ = Input(shape=(decoder_shape, decoder_shape, dense_dim))
var_x = input_
if self.lowmem:
var_x = self.blocks.upscale(var_x, decoder_complexity, **kwargs)
var_x = self.blocks.upscale(var_x, (decoder_complexity // 2), **kwargs)
var_x = self.blocks.upscale(var_x, (decoder_complexity // 4), **kwargs)
var_x = self.blocks.upscale(var_x, (decoder_complexity // 8), **kwargs)
else:
var_x = self.blocks.upscale(var_x, decoder_complexity, res_block_follows=True, **kwargs)
var_x = self.blocks.res_block(var_x, decoder_complexity, kernel_initializer=self.kernel_initializer)
var_x = self.blocks.upscale(var_x, decoder_complexity, res_block_follows=True, **kwargs)
var_x = self.blocks.res_block(var_x, decoder_complexity, kernel_initializer=self.kernel_initializer)
var_x = self.blocks.upscale(var_x, (decoder_complexity // 2), res_block_follows=True, **kwargs)
var_x = self.blocks.res_block(var_x, (decoder_complexity // 2), kernel_initializer=self.kernel_initializer)
var_x = self.blocks.upscale(var_x, (decoder_complexity // 4), **kwargs)
var_x = self.blocks.conv2d(var_x, 3, kernel_size=5, padding='same', activation='sigmoid', name='face_out')
outputs = [var_x]
if self.config.get('mask_type', None):
var_y = input_
var_y = self.blocks.upscale(var_y, decoder_complexity)
if (not self.lowmem):
var_y = self.blocks.upscale(var_y, decoder_complexity)
var_y = self.blocks.upscale(var_y, (decoder_complexity // 2))
var_y = self.blocks.upscale(var_y, (decoder_complexity // 4))
if self.lowmem:
var_y = self.blocks.upscale(var_y, (decoder_complexity // 8))
var_y = self.blocks.conv2d(var_y, 1, kernel_size=5, padding='same', activation='sigmoid', name='mask_out')
outputs.append(var_y)
return KerasModel(input_, outputs=outputs) |
class Trainer(TrainerBase):
def __init__(self, args, train_loader=None, val_loader=None, test_loader=None, train=True):
super().__init__(args, train_loader=train_loader, val_loader=val_loader, test_loader=test_loader, train=train)
from cococaption_model import FewVLMCOCOCaption
model_kwargs = {}
if ('t5' in args.backbone):
model_class = FewVLMCOCOCaption
config = self.create_config()
self.tokenizer = self.create_tokenizer()
self.model = self.create_model(model_class, config, **model_kwargs)
if ('t5' in self.args.tokenizer):
self.model.resize_token_embeddings(self.tokenizer.vocab_size)
self.model.tokenizer = self.tokenizer
self.start_epoch = None
if (args.load is not None):
ckpt_path = (args.load + '.pth')
self.load_checkpoint(ckpt_path)
if self.args.from_scratch:
self.init_weights()
print(f'Model Launching at GPU {self.args.gpu}')
if self.verbose:
from time import time
start = time()
self.model = self.model.to(args.gpu)
if train:
(self.optim, self.lr_scheduler) = self.create_optimizer_and_scheduler()
if (self.args.fp16 and _use_native_amp):
self.scaler = torch.cuda.amp.GradScaler()
elif _use_apex:
(self.model, self.optim) = amp.initialize(self.model, self.optim, opt_level='O1', verbosity=self.verbose)
if args.multiGPU:
if args.distributed:
self.model = DDP(self.model, device_ids=[args.gpu], find_unused_parameters=True)
if self.verbose:
print(f'It took {(time() - start):.1f}s')
def train(self):
if self.verbose:
loss_meter = LossMeter()
best_valid = 0.0
best_epoch = 0
if self.args.distributed:
dist.barrier()
global_step = 0
epochs = self.args.epochs
if (not self.args.test_only):
for epoch in range(epochs):
if (self.start_epoch is not None):
epoch += self.start_epoch
self.model.train()
if self.args.distributed:
self.train_loader.sampler.set_epoch(epoch)
if self.verbose:
pbar = tqdm(total=len(self.train_loader), ncols=120)
epoch_results = {'loss': 0.0}
for (step_i, batch) in enumerate(self.train_loader):
if (self.args.fp16 and _use_native_amp):
with autocast():
if self.args.distributed:
results = self.model.module.train_step(batch)
else:
results = self.model.train_step(batch)
elif self.args.distributed:
results = self.model.module.train_step(batch)
else:
results = self.model.train_step(batch)
loss = results['loss']
if (self.args.fp16 and _use_native_amp):
self.scaler.scale(loss).backward()
elif (self.args.fp16 and _use_apex):
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
loss = loss.detach()
if (self.args.clip_grad_norm > 0):
if (self.args.fp16 and _use_native_amp):
self.scaler.unscale_(self.optim)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
elif (self.args.fp16 and _use_apex):
torch.nn.utils.clip_grad_norm_(amp.master_params(self.optim), self.args.clip_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
update = True
if (self.args.gradient_accumulation_steps > 1):
if (step_i == 0):
update = False
elif (((step_i % self.args.gradient_accumulation_steps) == 0) or (step_i == (len(self.train_loader) - 1))):
update = True
else:
update = False
if update:
if (self.args.fp16 and _use_native_amp):
self.scaler.step(self.optim)
self.scaler.update()
else:
self.optim.step()
if self.lr_scheduler:
self.lr_scheduler.step()
for param in self.model.parameters():
param.grad = None
global_step += 1
for (k, v) in results.items():
if (k in epoch_results):
epoch_results[k] += v.item()
if self.lr_scheduler:
if (version.parse(torch.__version__) >= version.parse('1.4')):
lr = self.lr_scheduler.get_last_lr()[0]
else:
lr = self.lr_scheduler.get_lr()[0]
else:
try:
lr = self.optim.get_lr()[0]
except AttributeError:
lr = self.args.lr
if self.verbose:
loss_meter.update(loss.item())
desc_str = f'Epoch {epoch} | LR {lr:.6f} | Steps {global_step}'
desc_str += f' | Loss {loss_meter.val:4f}'
pbar.set_description(desc_str)
pbar.update(1)
if self.args.distributed:
dist.barrier()
if self.verbose:
pbar.close()
valid_results = self.evaluate(self.val_loader)
valid_score = valid_results['CIDEr']
if ((valid_score > best_valid) or (epoch == 0)):
best_valid = valid_score
best_epoch = epoch
self.save('BEST')
log_str = ''
log_str += pformat(valid_results)
log_str += ('\nEpoch %d: Valid CIDEr %0.4f' % (epoch, valid_score))
log_str += ('\nEpoch %d: Best CIDEr %0.4f\n' % (best_epoch, best_valid))
print(log_str)
if self.args.distributed:
dist.barrier()
if self.verbose:
self.save('LAST')
if self.verbose:
if (not os.path.isdir(self.args.output)):
os.makedirs(self.args.output, exist_ok=True)
if (not self.args.test_only):
best_path = os.path.join(self.args.output, 'BEST')
self.load(best_path)
print(f'''
Uploaded checkpoint {best_epoch}''', best_path)
test_results = self.evaluate(self.test_loader)
log_str = 'Test set results\n'
log_str += pformat(test_results)
print(log_str)
if self.args.distributed:
dist.barrier()
def predict(self, loader, dump_path=None):
self.model.eval()
with torch.no_grad():
predictions = []
targets = []
gen_kwargs = {}
gen_kwargs['num_beams'] = self.args.num_beams
gen_kwargs['max_length'] = self.args.gen_max_length
for (i, batch) in enumerate(tqdm(loader, ncols=120, desc='Prediction')):
if self.args.distributed:
results = self.model.module.test_step(batch, **gen_kwargs)
else:
results = self.model.test_step(batch, **gen_kwargs)
predictions.extend(results['pred'])
if ('targets' in batch):
targets.extend(batch['targets'])
results = {'predictions': predictions, 'targets': targets}
return results
def evaluate(self, loader, dump_path=None):
evaluator = loader.evaluator
results = self.predict(loader, dump_path)
predictions = results['predictions']
if (dump_path is None):
targets = results['targets']
eval_results = evaluator.evaluate(predictions, targets)
return eval_results
def oracle_score(loader):
evaluator = loader.evaluator
quesid2ans = {}
for (i, batch) in enumerate(loader):
ques_id = batch['question_ids']
label = batch['targets']
(_, label) = label.max(1)
for (qid, l) in zip(ques_id, label.cpu().numpy()):
ans = loader.dataset.raw_dataset.label2ans[l]
quesid2ans[qid] = ans
return evaluator.evaluate(quesid2ans) |
def test_kuccsd_openshell():
cell = gto.M(unit='B', a=[[0.0, 6., 6.], [6., 0.0, 6.], [6., 6., 0.0]], mesh=([13] * 3), atom='H 0 0 0\n H 1. 1. 1.\n H 3. 3. 3.', basis=[[0, (1.0, 1.0)], [0, (0.5, 1.0)]], verbose=1, charge=0, spin=1)
nmp = [3, 1, 1]
cell.spin = (cell.spin * 3)
supcell = super_cell(cell, nmp)
umf = scf.UHF(supcell, exxdiv=None)
umf.conv_tol = 1e-11
ehf = umf.kernel()
ucc = cc.UCCSD(umf)
ucc.conv_tol = 1e-12
(ecc, t1, t2) = ucc.kernel()
print(('UHF energy (supercell) %.9f \n' % (float(ehf) / 3.0)))
print(('UCCSD correlation energy (supercell) %.9f \n' % (float(ecc) / 3.0)))
assert (abs(((ehf / 3) - (- 1.))) < 1e-07)
assert (abs(((ecc / 3) - (- 0.))) < 1e-06)
kpts = cell.make_kpts(nmp)
kpts -= kpts[0]
kmf = scf.KUHF(cell, kpts, exxdiv=None)
kmf.conv_tol = 1e-11
ehf = kmf.kernel()
kcc = cc.KUCCSD(kmf)
kcc.conv_tol = 1e-12
(ecc, t1, t2) = kcc.kernel()
print(('UHF energy (kpts) %.9f \n' % float(ehf)))
print(('UCCSD correlation energy (kpts) %.9f \n' % float(ecc)))
assert (abs((ehf - (- 1.))) < 1e-07)
assert (abs((ecc - (- 0.))) < 1e-06) |
def wps_miniprogram_clockin(sid: str):
sio.write('\n\n ---wps---\n\n')
if (len(sid) == 0):
sio.write(': sid, \n\n')
return 0
elif (('*' in sid) or (sid[0] != 'V')):
sio.write(': sid, \n\n')
return 0
clockin_url = '
r = s.get(clockin_url, headers={'sid': sid})
if (len(r.history) != 0):
if (r.history[0].status_code == 302):
sio.write(': sid, \n\n')
return 0
resp = json.loads(r.text)
if (resp['msg'] == ''):
sio.write(': {}\n\n'.format(r.text))
return 1
elif (resp['msg'] == ''):
sio.write(': , \n\n')
return 0
elif (resp['msg'] == ''):
sio.write('\n\n')
signup_url = '
r = s.get(signup_url, headers={'sid': sid})
resp = json.loads(r.text)
if (resp['result'] == 'ok'):
sio.write(': , \n\n')
return 1
else:
sio.write(': , \n\n')
return 0
elif (resp['msg'] == ''):
getquestion_url = '
r = s.get(getquestion_url, headers={'sid': sid})
answer_set = {'WPS', '100G', 'WPS', 'WPSPDFdoc', 'WPSPDF', 'WPSPDF', 'PDFWORD', 'WPS', 'WPS', 'WPS', ',', ''}
resp = json.loads(r.text)
while (resp['data']['multi_select'] == 1):
r = s.get(getquestion_url, headers={'sid': sid})
resp = json.loads(r.text)
answer_id = 3
for i in range(4):
opt = resp['data']['options'][i]
if (opt in answer_set):
answer_id = (i + 1)
break
sio.write(': {}\n\n'.format(resp['data']['options']))
sio.write(': {}\n\n'.format(answer_id))
answer_url = '
r = s.post(answer_url, headers={'sid': sid}, data={'answer': answer_id})
resp = json.loads(r.text)
if (resp['msg'] == 'wrong answer'):
sio.write(', \n\n')
for i in range(4):
r = s.post(answer_url, headers={'sid': sid}, data={'answer': (i + 1)})
resp = json.loads(r.text)
sio.write((i + 1))
if (resp['result'] == 'ok'):
sio.write(r.text)
break
clockin_url = '
r = s.get(clockin_url, headers={'sid': sid})
sio.write(': {}\n\n'.format(r.text))
return 1
elif (resp['msg'] == 'ParamData Empty'):
sio.write(': {}\n\n'.format(r.text))
signup_url = '
r = s.get(signup_url, headers={'sid': sid})
sio.write(', \n\n')
return 1
elif (resp['msg'] == ''):
sio.write(': \n\n')
signup_url = '
r = s.get(signup_url, headers={'sid': sid})
resp = json.loads(r.text)
if (resp['result'] == 'ok'):
sio.write(', \n\n')
return 1
else:
sio.write(': , \n\n')
return 0
elif (resp['result'] == 'error'):
sio.write(': {}\n\n'.format(r.text))
signup_url = '
r = s.get(signup_url, headers={'sid': sid})
resp = json.loads(r.text)
if (resp['result'] == 'ok'):
sio.write(', \n\n')
return 1
else:
sio.write(': , \n\n')
return 0 |
class DistillKL(nn.Module):
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax((y_s / self.T), dim=1)
p_t = F.softmax((y_t / self.T), dim=1)
loss = (F.kl_div(p_s, p_t, reduction='batchmean') * (self.T ** 2))
return loss |
class GameObject(SavesProjectID):
def __init__(self, name='GameObject', parent=None):
self.name = name
self.components = []
self.transform = self.AddComponent(Transform)
if (parent is not None):
self.transform.ReparentTo(parent.transform)
self.tag = Tag(0)
self.enabled = True
self.scene = None
def BareObject(cls, name='GameObject'):
obj = cls.__new__(cls)
obj.name = name
obj.components = []
obj.transform = None
obj.scene = None
return obj
def AddComponent(self, componentClass):
if (not isinstance(componentClass, type)):
raise ComponentException(f'Cannot add {componentClass!r} to the GameObject; it is not a component')
if (not issubclass(componentClass, Component)):
raise ComponentException(f'Cannot add {componentClass.__name__} to the GameObject; it is not a component')
if (issubclass(componentClass, SingleComponent) and (self.GetComponent(componentClass) is not None)):
raise ComponentException(f'Cannot add {componentClass.__name__} to the GameObject; it already has one')
component = Component.__new__(componentClass)
if (componentClass is Transform):
component.transform = component
else:
component.transform = self.transform
component.gameObject = self
component.__init__()
self.components.append(component)
return component
def GetComponent(self, componentClass):
for component in self.components:
if isinstance(component, componentClass):
return component
return None
def RemoveComponent(self, componentClass):
component = self.GetComponent(componentClass)
if (component is None):
raise ComponentException(f"Cannot remove {componentClass.__name__} from the GameObject; it doesn't have one")
if (componentClass is Transform):
raise ComponentException('Cannot remove a Transform from a GameObject')
self.components.remove(component)
def GetComponents(self, componentClass):
return [cpnt for cpnt in self.components if isinstance(cpnt, componentClass)]
def RemoveComponents(self, componentClass):
components = self.GetComponents(componentClass)
if (componentClass is Transform):
raise ComponentException('Cannot remove a Transform from a GameObject')
for component in components:
self.components.remove(component)
def __repr__(self):
return f'<GameObject name={self.name!r} components={[type(x).__name__ for x in self.components]}>'
def __str__(self):
return f'<GameObject name={self.name!r} components={[type(x).__name__ for x in self.components]}>' |
class DataPrep(object):
def __init__(self, raw_df: pd.DataFrame, categorical: list, log: list, mixed: dict, general: list, non_categorical: list, integer: list, type: dict, test_ratio: float):
self.categorical_columns = categorical
self.log_columns = log
self.mixed_columns = mixed
self.general_columns = general
self.non_categorical_columns = non_categorical
self.integer_columns = integer
self.column_types = dict()
self.column_types['categorical'] = []
self.column_types['mixed'] = {}
self.column_types['general'] = []
self.column_types['non_categorical'] = []
self.lower_bounds = {}
self.label_encoder_list = []
target_col = list(type.values())[0]
if (target_col is not None):
y_real = raw_df[target_col]
X_real = raw_df.drop(columns=[target_col])
(X_train_real, y_train_real) = (X_real, y_real)
X_train_real[target_col] = y_train_real
self.df = X_train_real
else:
self.df = raw_df
self.df = self.df.replace(' ', np.nan)
self.df = self.df.fillna('empty')
all_columns = set(self.df.columns)
irrelevant_missing_columns = set(self.categorical_columns)
relevant_missing_columns = list((all_columns - irrelevant_missing_columns))
for i in relevant_missing_columns:
if (i in self.log_columns):
if ('empty' in list(self.df[i].values)):
self.df[i] = self.df[i].apply((lambda x: ((- 9999999) if (x == 'empty') else x)))
self.mixed_columns[i] = [(- 9999999)]
elif (i in list(self.mixed_columns.keys())):
if ('empty' in list(self.df[i].values)):
self.df[i] = self.df[i].apply((lambda x: ((- 9999999) if (x == 'empty') else x)))
self.mixed_columns[i].append((- 9999999))
elif ('empty' in list(self.df[i].values)):
self.df[i] = self.df[i].apply((lambda x: ((- 9999999) if (x == 'empty') else x)))
self.mixed_columns[i] = [(- 9999999)]
if self.log_columns:
for log_column in self.log_columns:
valid_indices = []
for (idx, val) in enumerate(self.df[log_column].values):
if (val != (- 9999999)):
valid_indices.append(idx)
eps = 1
lower = np.min(self.df[log_column].iloc[valid_indices].values)
self.lower_bounds[log_column] = lower
if (lower > 0):
self.df[log_column] = self.df[log_column].apply((lambda x: (np.log(x) if (x != (- 9999999)) else (- 9999999))))
elif (lower == 0):
self.df[log_column] = self.df[log_column].apply((lambda x: (np.log((x + eps)) if (x != (- 9999999)) else (- 9999999))))
else:
self.df[log_column] = self.df[log_column].apply((lambda x: (np.log(((x - lower) + eps)) if (x != (- 9999999)) else (- 9999999))))
for (column_index, column) in enumerate(self.df.columns):
if (column in self.categorical_columns):
label_encoder = preprocessing.LabelEncoder()
self.df[column] = self.df[column].astype(str)
label_encoder.fit(self.df[column])
current_label_encoder = dict()
current_label_encoder['column'] = column
current_label_encoder['label_encoder'] = label_encoder
transformed_column = label_encoder.transform(self.df[column])
self.df[column] = transformed_column
self.label_encoder_list.append(current_label_encoder)
self.column_types['categorical'].append(column_index)
if (column in self.general_columns):
self.column_types['general'].append(column_index)
if (column in self.non_categorical_columns):
self.column_types['non_categorical'].append(column_index)
elif (column in self.mixed_columns):
self.column_types['mixed'][column_index] = self.mixed_columns[column]
elif (column in self.general_columns):
self.column_types['general'].append(column_index)
super().__init__()
def inverse_prep(self, data, eps=1):
df_sample = pd.DataFrame(data, columns=self.df.columns)
for i in range(len(self.label_encoder_list)):
le = self.label_encoder_list[i]['label_encoder']
df_sample[self.label_encoder_list[i]['column']] = df_sample[self.label_encoder_list[i]['column']].astype(int)
df_sample[self.label_encoder_list[i]['column']] = le.inverse_transform(df_sample[self.label_encoder_list[i]['column']])
if self.log_columns:
for i in df_sample:
if (i in self.log_columns):
lower_bound = self.lower_bounds[i]
if (lower_bound > 0):
df_sample[i].apply((lambda x: np.exp(x)))
elif (lower_bound == 0):
df_sample[i] = df_sample[i].apply((lambda x: (np.ceil((np.exp(x) - eps)) if ((np.exp(x) - eps) < 0) else (np.exp(x) - eps))))
else:
df_sample[i] = df_sample[i].apply((lambda x: ((np.exp(x) - eps) + lower_bound)))
if self.integer_columns:
for column in self.integer_columns:
df_sample[column] = np.round(df_sample[column].values)
df_sample[column] = df_sample[column].astype(int)
df_sample.replace((- 9999999), np.nan, inplace=True)
df_sample.replace('empty', np.nan, inplace=True)
return df_sample |
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
super(Requirement, self).__init__(requirement_string)
self.unsafe_name = self.name
project_name = safe_name(self.name)
(self.project_name, self.key) = (project_name, project_name.lower())
self.specs = [(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (self.key, self.url, self.specifier, frozenset(self.extras), (str(self.marker) if self.marker else None))
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (isinstance(other, Requirement) and (self.hashCmp == other.hashCmp))
def __ne__(self, other):
return (not (self == other))
def __contains__(self, item):
if isinstance(item, Distribution):
if (item.key != self.key):
return False
item = item.version
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return ('Requirement.parse(%r)' % str(self))
def parse(s):
(req,) = parse_requirements(s)
return req |
class Display(object):
extension_major_opcodes = {}
error_classes = error.xerror_class.copy()
event_classes = event.event_class.copy()
def __init__(self, display=None):
(name, protocol, host, displayno, screenno) = connect.get_display(display)
self.display_name = name
self.default_screen = screenno
self.socket = connect.get_socket(name, protocol, host, displayno)
(auth_name, auth_data) = connect.get_auth(self.socket, name, protocol, host, displayno)
self.socket_error_lock = lock.allocate_lock()
self.socket_error = None
self.event_queue_read_lock = lock.allocate_lock()
self.event_queue_write_lock = lock.allocate_lock()
self.event_queue = []
self.request_queue_lock = lock.allocate_lock()
self.request_serial = 1
self.request_queue = []
self.send_recv_lock = lock.allocate_lock()
self.send_active = 0
self.recv_active = 0
self.event_waiting = 0
self.event_wait_lock = lock.allocate_lock()
self.request_waiting = 0
self.request_wait_lock = lock.allocate_lock()
buffer_size = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)
buffer_size = math.pow(2, math.floor(math.log(buffer_size, 2)))
self.recv_buffer_size = int(buffer_size)
self.sent_requests = []
self.recv_packet_len = 0
self.data_send = b''
self.data_recv = b''
self.data_sent_bytes = 0
self.resource_id_lock = lock.allocate_lock()
self.resource_ids = {}
self.last_resource_id = 0
self.error_handler = None
self.big_endian = struct.unpack('BB', struct.pack('H', 256))[0]
if self.big_endian:
order = 66
else:
order = 108
r = ConnectionSetupRequest(self, byte_order=order, protocol_major=11, protocol_minor=0, auth_prot_name=auth_name, auth_prot_data=auth_data)
if (r.status != 1):
raise error.DisplayConnectionError(self.display_name, r.reason)
self.info = r
self.default_screen = min(self.default_screen, (len(self.info.roots) - 1))
def get_display_name(self):
return self.display_name
def get_default_screen(self):
return self.default_screen
def fileno(self):
self.check_for_error()
return self.socket.fileno()
def next_event(self):
self.check_for_error()
self.event_queue_read_lock.acquire()
self.event_queue_write_lock.acquire()
while (not self.event_queue):
self.send_recv_lock.acquire()
self.event_queue_write_lock.release()
self.send_and_recv(event=True)
self.event_queue_write_lock.acquire()
event = self.event_queue[0]
del self.event_queue[0]
self.event_queue_write_lock.release()
self.event_queue_read_lock.release()
return event
def pending_events(self):
self.check_for_error()
self.send_recv_lock.acquire()
self.send_and_recv(recv=True)
self.event_queue_write_lock.acquire()
count = len(self.event_queue)
self.event_queue_write_lock.release()
return count
def flush(self):
self.check_for_error()
self.send_recv_lock.acquire()
self.send_and_recv(flush=True)
def close(self):
self.flush()
self.close_internal('client')
def set_error_handler(self, handler):
self.error_handler = handler
def allocate_resource_id(self):
self.resource_id_lock.acquire()
try:
i = self.last_resource_id
while (i in self.resource_ids):
i = (i + 1)
if (i > self.info.resource_id_mask):
i = 0
if (i == self.last_resource_id):
raise error.ResourceIDError('out of resource ids')
self.resource_ids[i] = None
self.last_resource_id = i
return (self.info.resource_id_base | i)
finally:
self.resource_id_lock.release()
def free_resource_id(self, rid):
self.resource_id_lock.acquire()
try:
i = (rid & self.info.resource_id_mask)
if ((rid - i) != self.info.resource_id_base):
return None
try:
del self.resource_ids[i]
except KeyError:
pass
finally:
self.resource_id_lock.release()
def get_resource_class(self, class_name, default=None):
return self.resource_classes.get(class_name, default)
def set_extension_major(self, extname, major):
self.extension_major_opcodes[extname] = major
def get_extension_major(self, extname):
return self.extension_major_opcodes[extname]
def add_extension_event(self, code, evt, subcode=None):
if (subcode == None):
self.event_classes[code] = evt
elif (not (code in self.event_classes)):
self.event_classes[code] = {subcode: evt}
else:
self.event_classes[code][subcode] = evt
def add_extension_error(self, code, err):
self.error_classes[code] = err
def check_for_error(self):
self.socket_error_lock.acquire()
err = self.socket_error
self.socket_error_lock.release()
if err:
raise err
def send_request(self, request, wait_for_response):
if self.socket_error:
raise self.socket_error
self.request_queue_lock.acquire()
request._serial = self.request_serial
self.request_serial = ((self.request_serial + 1) % 65536)
self.request_queue.append((request, wait_for_response))
qlen = len(self.request_queue)
self.request_queue_lock.release()
def close_internal(self, whom):
self.request_queue = None
self.sent_requests = None
self.event_queue = None
self.data_send = None
self.data_recv = None
self.socket.close()
self.socket_error_lock.acquire()
self.socket_error = error.ConnectionClosedError(whom)
self.socket_error_lock.release()
def send_and_recv(self, flush=False, event=False, request=None, recv=False):
if (((flush or (request is not None)) and self.send_active) or ((event or recv) and self.recv_active)):
if event:
wait_lock = self.event_wait_lock
if (not self.event_waiting):
self.event_waiting = 1
wait_lock.acquire()
elif (request is not None):
wait_lock = self.request_wait_lock
if (not self.request_waiting):
self.request_waiting = 1
wait_lock.acquire()
self.send_recv_lock.release()
if (flush or recv):
return
wait_lock.acquire()
wait_lock.release()
return
if (not self.recv_active):
receiving = 1
self.recv_active = 1
else:
receiving = 0
flush_bytes = None
sending = 0
while 1:
if (sending or (not self.send_active)):
self.request_queue_lock.acquire()
for (req, wait) in self.request_queue:
self.data_send = (self.data_send + req._binary)
if wait:
self.sent_requests.append(req)
del self.request_queue[:]
self.request_queue_lock.release()
if self.data_send:
self.send_active = 1
sending = 1
else:
self.send_active = 0
sending = 0
self.send_recv_lock.release()
if (not (sending or receiving)):
break
if (flush and (flush_bytes is None)):
flush_bytes = (self.data_sent_bytes + len(self.data_send))
try:
if sending:
writeset = [self.socket]
else:
writeset = []
if (recv or flush):
timeout = 0
else:
timeout = None
(rs, ws, es) = select.select([self.socket], writeset, [], timeout)
except select.error as err:
if isinstance(err, OSError):
code = err.errno
else:
code = err[0]
if (code != errno.EINTR):
raise
self.send_recv_lock.acquire()
continue
if ws:
try:
i = self.socket.send(self.data_send)
except socket.error as err:
self.close_internal(('server: %s' % err))
raise self.socket_error
self.data_send = self.data_send[i:]
self.data_sent_bytes = (self.data_sent_bytes + i)
gotreq = 0
if rs:
if receiving:
try:
count = (self.recv_packet_len - len(self.data_recv))
count = max(self.recv_buffer_size, count)
bytes_recv = self.socket.recv(count)
except socket.error as err:
self.close_internal(('server: %s' % err))
raise self.socket_error
if (not bytes_recv):
self.close_internal('server')
raise self.socket_error
self.data_recv = (bytes(self.data_recv) + bytes_recv)
gotreq = self.parse_response(request)
else:
self.send_recv_lock.acquire()
self.send_active = 0
self.send_recv_lock.release()
return
if (flush and (flush_bytes >= self.data_sent_bytes)):
break
if (event and self.event_queue):
break
if ((request is not None) and gotreq):
break
if recv:
break
self.send_recv_lock.acquire()
self.send_recv_lock.acquire()
if sending:
self.send_active = 0
if receiving:
self.recv_active = 0
if self.event_waiting:
self.event_waiting = 0
self.event_wait_lock.release()
if self.request_waiting:
self.request_waiting = 0
self.request_wait_lock.release()
self.send_recv_lock.release()
def parse_response(self, request):
if (request == (- 1)):
return self.parse_connection_setup()
gotreq = False
while True:
if self.data_recv:
rtype = byte2int(self.data_recv)
if self.recv_packet_len:
if (len(self.data_recv) < self.recv_packet_len):
return gotreq
if (rtype == 1):
gotreq = (self.parse_request_response(request) or gotreq)
continue
elif ((rtype & 127) == ge.GenericEventCode):
self.parse_event_response(rtype)
continue
else:
raise AssertionError(rtype)
if (len(self.data_recv) < 32):
return gotreq
if (rtype == 0):
gotreq = (self.parse_error_response(request) or gotreq)
elif ((rtype == 1) or ((rtype & 127) == ge.GenericEventCode)):
rlen = int(struct.unpack('=L', self.data_recv[4:8])[0])
self.recv_packet_len = (32 + (rlen * 4))
else:
self.parse_event_response(rtype)
def parse_error_response(self, request):
code = indexbytes(self.data_recv, 1)
estruct = self.error_classes.get(code, error.XError)
e = estruct(self, self.data_recv[:32])
self.data_recv = bytesview(self.data_recv, 32)
req = self.get_waiting_request(e.sequence_number)
if (req and req._set_error(e)):
if isinstance(req, rq.ReplyRequest):
self.send_recv_lock.acquire()
if self.request_waiting:
self.request_waiting = 0
self.request_wait_lock.release()
self.send_recv_lock.release()
return (request == e.sequence_number)
else:
if self.error_handler:
rq.call_error_handler(self.error_handler, e, None)
else:
self.default_error_handler(e)
return False
def default_error_handler(self, err):
sys.stderr.write(('X protocol error:\n%s\n' % err))
def parse_request_response(self, request):
req = self.get_waiting_replyrequest()
sno = struct.unpack('=H', self.data_recv[2:4])[0]
if (sno != req._serial):
raise RuntimeError(("Expected reply for request %s, but got %s. Can't happen!" % (req._serial, sno)))
req._parse_response(self.data_recv[:self.recv_packet_len])
self.data_recv = bytesview(self.data_recv, self.recv_packet_len)
self.recv_packet_len = 0
self.send_recv_lock.acquire()
if self.request_waiting:
self.request_waiting = 0
self.request_wait_lock.release()
self.send_recv_lock.release()
return (req.sequence_number == request)
def parse_event_response(self, etype):
etype = (etype & 127)
if (etype == ge.GenericEventCode):
length = self.recv_packet_len
else:
length = 32
estruct = self.event_classes.get(etype, event.AnyEvent)
if (type(estruct) == dict):
subcode = self.data_recv[1]
if (type(subcode) == str):
subcode = ord(subcode)
estruct = estruct[subcode]
e = estruct(display=self, binarydata=self.data_recv[:length])
if (etype == ge.GenericEventCode):
self.recv_packet_len = 0
self.data_recv = bytesview(self.data_recv, length)
if hasattr(e, 'sequence_number'):
self.get_waiting_request(((e.sequence_number - 1) % 65536))
self.event_queue_write_lock.acquire()
self.event_queue.append(e)
self.event_queue_write_lock.release()
self.send_recv_lock.acquire()
if self.event_waiting:
self.event_waiting = 0
self.event_wait_lock.release()
self.send_recv_lock.release()
def get_waiting_request(self, sno):
if (not self.sent_requests):
return None
if (self.sent_requests[0]._serial > self.request_serial):
last_serial = (self.request_serial + 65536)
if (sno < self.request_serial):
sno = (sno + 65536)
else:
last_serial = self.request_serial
if (sno > self.request_serial):
sno = (sno - 65536)
if (sno < self.sent_requests[0]._serial):
return None
req = None
reqpos = len(self.sent_requests)
adj = 0
last = 0
for i in range(0, len(self.sent_requests)):
rno = (self.sent_requests[i]._serial + adj)
if (rno < last):
adj = 65536
rno = (rno + adj)
last = rno
if (sno == rno):
req = self.sent_requests[i]
reqpos = (i + 1)
break
elif (sno < rno):
req = None
reqpos = i
break
del self.sent_requests[:reqpos]
return req
def get_waiting_replyrequest(self):
for i in range(0, len(self.sent_requests)):
if hasattr(self.sent_requests[i], '_reply'):
req = self.sent_requests[i]
del self.sent_requests[:(i + 1)]
return req
else:
raise RuntimeError("Request reply to unknown request. Can't happen!")
def parse_connection_setup(self):
r = self.sent_requests[0]
while True:
if r._data:
alen = (r._data['additional_length'] * 4)
if (len(self.data_recv) < alen):
return False
if (r._data['status'] != 1):
r._data['reason'] = self.data_recv[:r._data['reason_length']]
else:
(x, d) = r._success_reply.parse_binary(self.data_recv[:alen], self, rawdict=True)
r._data.update(x)
del self.sent_requests[0]
self.data_recv = self.data_recv[alen:]
return True
else:
if (len(self.data_recv) < 8):
return False
(r._data, d) = r._reply.parse_binary(self.data_recv[:8], self, rawdict=True)
self.data_recv = self.data_recv[8:] |
def build_token(aud, token_type, build_id, job_id, expiration, instance_keys):
token_data = {'token_type': token_type, 'build_id': build_id, 'job_id': job_id, 'expiration': expiration}
token = generate_bearer_token(aud, ANONYMOUS_SUB, token_data, {}, expiration, instance_keys)
return token |
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name))
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
logger.info('Evaluation results for {} in csv format:'.format(dataset_name))
print_csv_format(results_i)
if (len(results) == 1):
results = list(results.values())[0]
return results |
def main():
if (not ('debug' in args.save)):
from nasbench_analysis import eval_darts_one_shot_model_in_nasbench as naseval
if (args.search_space == '1'):
search_space = SearchSpace1()
elif (args.search_space == '2'):
search_space = SearchSpace2()
elif (args.search_space == '3'):
search_space = SearchSpace3()
else:
raise ValueError('Unknown search space')
torch.set_num_threads(3)
if (not torch.cuda.is_available()):
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info(('gpu device = %d' % args.gpu))
logging.info('args = %s', args)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion, output_weights=args.output_weights, steps=search_space.num_intermediate_nodes, search_space=search_space)
model = model.cuda()
logging.info('param size = %fMB', utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
(train_transform, valid_transform) = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor((args.train_portion * num_train)))
if ('debug' in args.save):
split = args.batch_size
num_train = (2 * args.batch_size)
train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True)
valid_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), eta_min=args.learning_rate_min)
analyzer = Analyzer(model, args)
architect = Architect(model, args)
for epoch in range(args.epochs):
scheduler.step()
lr = scheduler.get_lr()[0]
train_transform.transforms[(- 1)].cutout_prob = ((args.cutout_prob * epoch) / (args.epochs - 1))
logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, train_transform.transforms[(- 1)].cutout_prob)
arch_filename = os.path.join(args.save, 'one_shot_architecture_{}.obj'.format(epoch))
with open(arch_filename, 'wb') as filehandler:
numpy_tensor_list = []
for tensor in model.arch_parameters():
numpy_tensor_list.append(tensor.detach().cpu().numpy())
pickle.dump(numpy_tensor_list, filehandler)
for i in numpy_tensor_list:
print(i)
(train_acc, train_obj, ev) = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch, analyzer)
logging.info('train_acc %f', train_acc)
logging.info('eigenvalue %f', ev)
writer.add_scalar('Acc/train', train_acc, epoch)
writer.add_scalar('Obj/train', train_obj, epoch)
writer.add_scalar('Analysis/eigenvalue', ev, epoch)
(valid_acc, valid_obj) = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
writer.add_scalar('Acc/valid', valid_acc, epoch)
writer.add_scalar('Obj/valid', valid_obj, epoch)
utils.save(model, os.path.join(args.save, 'weights.pt'))
if (not ('debug' in args.save)):
logging.info('STARTING EVALUATION')
(test, valid, runtime, params) = naseval.eval_one_shot_model(config=args.__dict__, model=arch_filename)
index = np.random.choice(list(range(3)))
(test, valid, runtime, params) = (np.mean(test), np.mean(valid), np.mean(runtime), np.mean(params))
logging.info(('TEST ERROR: %.3f | VALID ERROR: %.3f | RUNTIME: %f | PARAMS: %d' % (test, valid, runtime, params)))
writer.add_scalar('Analysis/test', test, epoch)
writer.add_scalar('Analysis/valid', valid, epoch)
writer.add_scalar('Analysis/runtime', runtime, epoch)
writer.add_scalar('Analysis/params', params, epoch)
writer.close() |
def plot_hyperparam(hyperparam_to_plot, fig=None, ax_arr=None, big_ax=None, legend=False, dpi=300, figsize=(3, 5.5)):
if ((fig is None) and (ax_arr is None)):
(fig, ax_arr) = plt.subplots(2, 1, dpi=dpi, figsize=figsize)
for ax_ in ax_arr.flatten():
ax_.tick_params(pad=0.1)
if (big_ax is None):
big_ax = fig.add_subplot(111, frameon=False)
big_ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
big_ax.grid(False)
SPECIES = ('Bengalese Finch',)
HYPERPARAM_EXPTS = {'filter_num': 'No. of filters', 'filter_size': 'Filter size'}
HYERPARAM_FOR_FINAL_RESULTS = {'Bengalese Finch': {'filter_num': 32, 'filter_size': 5}}
METRICS = {'avg_error': 'Frame\nerror (%)', 'avg_segment_error_rate': 'Syllable\nerror rate (%)'}
YLIMS = {'Bengalese Finch': {'avg_error': (0.0, 7.0), 'avg_segment_error_rate': (0.0, 32.0)}}
for (hyperparam_num, (hyperparam_expt, hyperparam_label)) in enumerate(HYPERPARAM_EXPTS.items()):
if (hyperparam_expt != hyperparam_to_plot):
continue
for (col, species) in enumerate(SPECIES):
for (metric_num, (metric_column_name, metric_label)) in enumerate(METRICS.items()):
data = hyperparams_expt_df[(((hyperparams_expt_df.species == species) & (hyperparams_expt_df.hyperparam_expt == hyperparam_expt)) & (hyperparams_expt_df.train_set_dur == TRAIN_SET_DUR_TO_USE[species]))]
row = metric_num
ax = ax_arr[row]
if (row == 0):
ax.set_title(species)
if ((row == 1) and (col == 0)):
plot_legend = True
else:
plot_legend = False
g = sns.boxplot(data=data, x='hyperparam_val', y=metric_column_name, showfliers=False, hue='Post-processing', palette=POST_PROCESS_PALETTE, ax=ax)
if plot_legend:
(handles, labels) = ax.get_legend_handles_labels()
g.legend_.remove()
ax.set_ylim(YLIMS[species][metric_column_name])
ax.set_xlabel('')
if ((row == 1) or (row == 3)):
new_xticklabels = []
for xticklabel in ax.get_xticklabels():
if (int(xticklabel.get_text()) == HYERPARAM_FOR_FINAL_RESULTS[species][hyperparam_expt]):
new_xticklabels.append(f'$f{{{xticklabel.get_text()}}}$')
else:
new_xticklabels.append(xticklabel.get_text())
ax.set_xticklabels(new_xticklabels, rotation=45)
else:
ax.set_xticklabels([])
if (col == 0):
ax.set_ylabel(f'''{metric_label}
max. train dur.''')
else:
ax.set_ylabel('')
big_ax.set_xlabel(hyperparam_label, fontweight='bold', labelpad=15)
sns.despine(fig)
if legend:
big_ax.legend(title='Post-processing', handles=handles, labels=['With', 'Without'], loc='lower right', bbox_to_anchor=((- 0.4), (- 0.05)))
return (fig, ax_arr) |
class RegistrationPendingWidget(TitledWidget):
def __init__(self, view, account_management_interface, verify_bookmark):
super().__init__(view)
config = ExecutionContext.get_context().config
self.add_child(P(view, text=(_('There is a registration pending for email address "%s".') % account_management_interface.email)))
self.add_child(P(view, text=_('Before you can log in, you need to verify your email address using the secret key sent to that address. It looks like you did not do that.')))
self.add_child(P(view, text=_('You should receive the automated email anything between a minute to an hour after registration. Sometimes though, your email software may mistakenly identify our email as junk email. If this happens it will be hidden away in a "junk email" folder or just not shown to you.')))
self.add_child(P(view, text=_('You can have the email re-sent to you by clicking on the button below.')))
self.add_child(P(view, text=(_('Before you do that, however, please make sure that your email system will allow emails from "%s" through to you.') % config.accounts.admin_email)))
self.add_child(P(view, text=_('Sometimes these emails arrive immediately, but they may also be delayed.')))
p = P(view, text=_('Once you located the email, retrieve the code and then enter it on {verify}.'))
self.add_child(p.format(verify=A.from_bookmark(view, verify_bookmark.with_description(_('the verification page')))))
self.add_child(RegistrationPendingForm(view)) |
class Registration():
def addMetadataFilterFactory(sparkSession, filterFactory):
sparkSession._jvm.io.xskipper.Registration.addMetadataFilterFactory(filterFactory)
def addIndexFactory(sparkSession, indexFactory):
sparkSession._jvm.io.xskipper.Registration.addIndexFactory(indexFactory)
def addMetaDataTranslator(sparkSession, metadataTranslator):
sparkSession._jvm.io.xskipper.Registration.addMetaDataTranslator(metadataTranslator)
def addClauseTranslator(sparkSession, clauseTranslator):
sparkSession._jvm.io.xskipper.Registration.addClauseTranslator(clauseTranslator)
def setActiveMetadataStoreManager(sparkSession, metadataStoreManager):
sparkSession._jvm.io.xskipper.Registration.setActiveMetadataStoreManager(metadataStoreManager)
def getActiveMetadataStoreManagerType(sparkSession):
return sparkSession._jvm.io.xskipper.Registration.getActiveMetadataStoreManagerType().getClass().getCanonicalName() |
def test_top_down_PoseTrack18_dataset_compatibility():
dataset = 'TopDownPoseTrack18Dataset'
dataset_class = DATASETS.get(dataset)
dataset_class.load_annotations = MagicMock()
dataset_class.coco = MagicMock()
channel_cfg = dict(num_output_channels=17, dataset_joints=17, dataset_channel=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]], inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
data_cfg = dict(image_size=[192, 256], heatmap_size=[48, 64], num_output_channels=channel_cfg['num_output_channels'], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'], soft_nms=False, nms_thr=1.0, oks_thr=0.9, vis_thr=0.2, use_gt_bbox=True, det_bbox_thr=0.0, bbox_file='tests/data/posetrack18/annotations/test_posetrack18_human_detections.json')
data_cfg_copy = copy.deepcopy(data_cfg)
data_cfg_copy['use_gt_bbox'] = False
with pytest.warns(DeprecationWarning):
_ = dataset_class(ann_file='tests/data/posetrack18/annotations/test_posetrack18_val.json', img_prefix='tests/data/posetrack18/', data_cfg=data_cfg_copy, pipeline=[], test_mode=True)
with pytest.warns(DeprecationWarning):
_ = dataset_class(ann_file='tests/data/posetrack18/annotations/test_posetrack18_val.json', img_prefix='tests/data/posetrack18/', data_cfg=data_cfg_copy, pipeline=[], test_mode=False)
with pytest.warns(DeprecationWarning):
custom_dataset = dataset_class(ann_file='tests/data/posetrack18/annotations/test_posetrack18_val.json', img_prefix='tests/data/posetrack18/', data_cfg=data_cfg, pipeline=[], test_mode=True)
assert (custom_dataset.test_mode is True)
assert (custom_dataset.dataset_name == 'posetrack18')
image_id =
assert (image_id in custom_dataset.img_ids)
assert (len(custom_dataset.img_ids) == 3)
_ = custom_dataset[0] |
def _parse_freqplot_args(*args):
(syslist, plotstyle, omega, other) = ([], [], None, {})
i = 0
while (i < len(args)):
if isinstance(args[i], LTI):
syslist.append(args[i])
i += 1
if ((i < len(args)) and isinstance(args[i], str)):
plotstyle.append(args[i])
i += 1
continue
elif isinstance(args[i], (list, np.ndarray)):
omega = args[i]
i += 1
break
elif (isinstance(args[i], tuple) and (len(args[i]) == 2)):
other['omega_limits'] = args[i]
i += 1
else:
raise ControlArgument('unrecognized argument type')
if (i < len(args)):
raise ControlArgument('not all arguments processed')
if ((len(plotstyle) != 0) and (len(syslist) != len(plotstyle))):
raise ControlArgument('number of systems and plotstyles should be equal')
if (len(plotstyle) != 0):
warn('Warning (matlab.bode): plot styles not implemented')
if (len(syslist) == 0):
raise ControlArgument('no systems specified')
elif (len(syslist) == 1):
syslist = syslist[0]
return (syslist, omega, plotstyle, other) |
def rgb(x: ColorType) -> tuple[(float, float, float, float)]:
if isinstance(x, (tuple, list)):
if (len(x) == 4):
alpha = x[(- 1)]
else:
alpha = 1.0
return ((x[0] / 255.0), (x[1] / 255.0), (x[2] / 255.0), alpha)
elif isinstance(x, str):
if x.startswith('#'):
x = x[1:]
if ('.' in x):
(x, alpha_str) = x.split('.')
alpha = float(('0.' + alpha_str))
else:
alpha = 1.0
if (len(x) not in (3, 6, 8)):
raise ValueError('RGB specifier must be 3, 6 or 8 characters long.')
if (len(x) == 3):
vals = tuple(((int(i, 16) * 17) for i in x))
else:
vals = tuple((int(i, 16) for i in (x[0:2], x[2:4], x[4:6])))
if (len(x) == 8):
alpha = (int(x[6:8], 16) / 255.0)
vals += (alpha,)
return rgb(vals)
raise ValueError('Invalid RGB specifier.') |
class TLatin1TextListSpec(TestCase):
def test_read(self):
spec = Latin1TextListSpec('name')
self.assertEqual(spec.read(None, None, b'\x00xxx'), ([], b'xxx'))
self.assertEqual(spec.read(None, None, b'\x01foo\x00'), ([u'foo'], b''))
self.assertEqual(spec.read(None, None, b'\x01\x00'), ([u''], b''))
self.assertEqual(spec.read(None, None, b'\x02f\x00o\x00'), ([u'f', u'o'], b''))
def test_write(self):
spec = Latin1TextListSpec('name')
self.assertEqual(spec.write(None, None, []), b'\x00')
self.assertEqual(spec.write(None, None, [u'']), b'\x01\x00')
def test_validate(self):
spec = Latin1TextListSpec('name')
self.assertRaises(TypeError, spec.validate, None, object())
self.assertRaises(TypeError, spec.validate, None, None)
self.assertEqual(spec.validate(None, [u'foo']), [u'foo'])
self.assertEqual(spec.validate(None, []), []) |
class Log(Gtk.TextView):
__gtype_name__ = 'Log'
def __init__(self):
super().__init__()
self.text_buffer = self.get_buffer()
self.props.editable = False
self.props.monospace = True
self.props.wrap_mode = Gtk.WrapMode.WORD_CHAR
self.props.hexpand = True
self.add_css_class('log')
def append(self, text: str, bold: bool=False, italic: bool=False):
end_iter = self.text_buffer.get_end_iter()
text = (f'<b>{text}</b>' if bold else text)
text = (f'<i>{text}</i>' if italic else text)
text = (text + '\n')
self.text_buffer.insert_markup(end_iter, text, (- 1))
def reset(self):
(star, end) = self.text_buffer.get_bounds()
self.text_buffer.delete(star, end) |
def _execute(args):
pth = path.abspath(args.function_dir)
cfg = config.Config(pth, args.config, role=args.role, variables=args.variables)
if args.s3_bucket:
cfg.set_s3(args.s3_bucket, args.s3_key)
if args.no_virtualenv:
venv = False
elif args.virtualenv:
venv = args.virtualenv
else:
venv = None
if args.no_build:
pkg = package.create_package(pth)
else:
_print('Building Package')
requirements = cfg.requirements
if args.requirements:
requirements = path.abspath(args.requirements)
extra_files = cfg.extra_files
if args.extra_files:
extra_files = args.extra_files
pkg = package.build_package(pth, requirements, venv, cfg.ignore, extra_files, pyexec=cfg.runtime)
if (not args.no_clean):
pkg.clean_workspace()
if (not args.no_upload):
if args.publish:
cfg.set_publish()
create_alias = False
if (args.alias is not None):
cfg.set_alias(args.alias, args.alias_description)
create_alias = True
_print('Uploading Package')
upldr = uploader.PackageUploader(cfg, args.profile)
upldr.upload(pkg)
if create_alias:
upldr.alias()
if cfg.subscription:
_print('Creating subscription')
subscribers.create_subscriptions(cfg, args.profile)
pkg.clean_zipfile()
_print('Fin') |
def _generate_positive_items(user_pos_dict):
if (not isinstance(user_pos_dict, dict)):
raise TypeError("'user_pos_dict' must be a dict.")
if (not user_pos_dict):
raise ValueError("'user_pos_dict' cannot be empty.")
(users_list, pos_items_list) = ([], [])
user_pos_len = []
for (user, pos_items) in user_pos_dict.items():
pos_len = len(pos_items)
user_pos_len.append([user, pos_len])
users_list.extend(([user] * len(pos_items)))
pos_items_list.extend(pos_items)
return (user_pos_len, users_list, pos_items_list) |
class _Typedef(object):
base = 0
both = None
item = 0
kind = None
leng = None
refs = None
type = None
vari = None
xtyp = None
def __init__(self, **kwds):
self.reset(**kwds)
def __lt__(self, unused):
return True
def __repr__(self):
return repr(self.args())
def __str__(self):
t = [str(self.base), str(self.item)]
for f in (self.leng, self.refs):
t.append((_nameof(f) or 'n/a'))
if (not self.both):
t.append('(code only)')
return ', '.join(t)
def args(self):
return (self.base, self.item, self.leng, self.refs, self.both, self.kind, self.type, self.xtyp)
def dup(self, other=None, **kwds):
t = (other or _dict_typedef)
d = t.kwds()
d.update(kwds)
self.reset(**d)
def flat(self, obj, mask=0):
s = self.base
if (self.leng and (self.item > 0)):
s += (self.leng(obj) * self.item)
if (not self.xtyp):
s = _getsizeof(obj, s)
if mask:
s = ((s + mask) & (~ mask))
return s
def format(self):
a = _nameof(self.leng)
return dict(leng=((' (%s)' % (a,)) if a else _NN), item=('var' if self.vari else self.item), code=(_NN if self.both else ' (code only)'), base=self.base, kind=self.kind)
def kwds(self):
return dict(base=self.base, both=self.both, item=self.item, kind=self.kind, leng=self.leng, refs=self.refs, type=self.type, vari=self.vari, xtyp=self.xtyp)
def reset(self, base=0, item=0, leng=None, refs=None, both=True, kind=None, type=None, vari=_Not_vari, xtyp=False, **extra):
v = (vari or _Not_vari)
if (v != str(v)):
e = dict(vari=v)
elif (base < 0):
e = dict(base=base)
elif (both not in (False, True)):
e = dict(both=both)
elif (item < 0):
e = dict(item=item)
elif (kind not in _all_kinds):
e = dict(kind=kind)
elif (leng not in _all_lens):
e = dict(leng=leng)
elif (refs not in _all_refs):
e = dict(refs=refs)
elif (xtyp not in (False, True)):
e = dict(xtyp=xtyp)
elif extra:
e = {}
else:
self.base = base
self.both = both
self.item = item
self.kind = kind
self.leng = leng
self.refs = refs
self.type = type
self.vari = v
self.xtyp = xtyp
return
e.update(extra)
raise _OptionError(self.reset, **e)
def save(self, t, base=0, heap=False):
(c, k) = _key2tuple(t)
if (k and (k not in _typedefs)):
_typedefs[k] = self
if (c and (c not in _typedefs)):
b = _basicsize(type(t), base=base, heap=heap)
k = (_kind_ignored if _isignored(t) else self.kind)
_typedefs[c] = _Typedef(base=b, both=False, kind=k, type=t, refs=_type_refs)
elif (t not in _typedefs):
if (not _isbuiltin2(t)):
s = ' '.join((self.vari, _moduleof(t), _nameof(t)))
s = ('%r %s %s' % ((c, k), self.both, s.strip()))
raise KeyError(('typedef %r bad: %s' % (self, s)))
_typedefs[t] = _Typedef(base=_basicsize(t, base=base), both=False, kind=_kind_ignored, type=t)
def set(self, safe_len=False, **kwds):
if kwds:
d = self.kwds()
d.update(kwds)
self.reset(**d)
if (safe_len and self.item):
self.leng = _len |
class DDPG():
def __init__(self, state_space, action_dim):
self.name = 'DDPG'
self.sess = tf.Session()
self.state_space = state_space
self.action_dim = action_dim
self.ac_network = ActorCriticNetwork(self.sess, self.state_space, self.action_dim)
self.replay_buffer = ReplayBuffer(REPLAY_BUFFER_SIZE)
self.exploration_noise = OUNoise(self.action_dim)
def train(self):
minibatch = self.replay_buffer.get_batch(BATCH_SIZE)
state_batch = np.asarray([data[0] for data in minibatch])
state_batch = self.sparse_tensor(state_batch, self.state_space)
action_batch = np.asarray([data[1] for data in minibatch])
reward_batch = np.asarray([data[2] for data in minibatch])
next_state_batch = np.asarray([data[3] for data in minibatch])
next_state_batch = self.sparse_tensor(next_state_batch, self.state_space)
done_batch = np.asarray([data[4] for data in minibatch])
action_batch = np.resize(action_batch, [BATCH_SIZE, self.action_dim])
next_action_batch = self.ac_network.target_actions(next_state_batch)
q_value_batch = self.ac_network.target_q(next_state_batch, next_action_batch)
print(('step_%d next_action:' % self.ac_network.time_step), next_action_batch[0][0])
y_batch = []
for i in range(len(minibatch)):
if done_batch[i]:
y_batch.append(reward_batch[i])
else:
y_batch.append((reward_batch[i] + (GAMMA * q_value_batch[i])))
y_batch = np.resize(y_batch, [BATCH_SIZE, 1])
cost = self.ac_network.train_critic(y_batch, state_batch, action_batch)
print(('step_%d critic cost:' % self.ac_network.time_step), cost)
action_batch_for_gradients = self.ac_network.actions(state_batch)
q_gradient_batch = self.ac_network.gradients(state_batch, action_batch_for_gradients)
self.ac_network.train_actor(q_gradient_batch, state_batch)
self.ac_network.update_target()
self.ac_network.save_network()
def noise_action(self, state):
action = self.ac_network.actions(state)
return (action[0] + self.exploration_noise.noise())
def action(self, state):
action = self.ac_network.actions([state])
return action[0]
def perceive(self, state, action, reward, next_state, done):
self.replay_buffer.add(state, action, reward, next_state, done)
if (self.replay_buffer.count() > REPLAY_START_SIZE):
self.train()
if done:
self.exploration_noise.reset()
def sparse_tensor(self, state_batch, state_space):
row = len(state_batch)
indices = []
for r in range(row):
indices += [(r, c) for c in state_batch[r]]
values = [1.0 for i in range(len(indices))]
return tf.SparseTensorValue(indices=indices, values=values, dense_shape=[row, state_space]) |
def log_Phi(x):
if isinstance(x, np.ndarray):
result = []
for value in x:
if (value > 5):
result.append((- sps.norm.sf(value)))
else:
result.append(sps.norm.logcdf(value))
result = np.array(result)
elif (x > 5):
result = (- sps.norm.sf(x))
else:
result = sps.norm.logcdf(x)
return result |
class BinaryPayloadBuilder():
def __init__(self, payload=None, byteorder=Endian.LITTLE, wordorder=Endian.BIG, repack=False):
self._payload = (payload or [])
self._byteorder = byteorder
self._wordorder = wordorder
self._repack = repack
def _pack_words(self, fstring, value):
value = pack(f'!{fstring}', value)
wordorder = (WC.get(fstring.lower()) // 2)
upperbyte = f'!{wordorder}H'
payload = unpack(upperbyte, value)
if (self._wordorder == Endian.LITTLE):
payload = list(reversed(payload))
fstring = (self._byteorder + 'H')
payload = [pack(fstring, word) for word in payload]
payload = b''.join(payload)
return payload
def encode(self) -> bytes:
return b''.join(self._payload)
def __str__(self) -> str:
return self.encode().decode('utf-8')
def reset(self) -> None:
self._payload = []
def to_registers(self):
fstring = '!H'
payload = self.build()
if self._repack:
payload = [unpack((self._byteorder + 'H'), value)[0] for value in payload]
else:
payload = [unpack(fstring, value)[0] for value in payload]
Log.debug('{}', payload)
return payload
def to_coils(self) -> list[bool]:
payload = self.to_registers()
coils = [bool(int(bit)) for reg in payload for bit in format(reg, '016b')]
return coils
def build(self) -> list[bytes]:
buffer = self.encode()
length = len(buffer)
buffer += (b'\x00' * (length % 2))
return [buffer[i:(i + 2)] for i in range(0, length, 2)]
def add_bits(self, values: list[bool]) -> None:
value = pack_bitstring(values)
self._payload.append(value)
def add_8bit_uint(self, value: int) -> None:
fstring = (self._byteorder + 'B')
self._payload.append(pack(fstring, value))
def add_16bit_uint(self, value: int) -> None:
fstring = (self._byteorder + 'H')
self._payload.append(pack(fstring, value))
def add_32bit_uint(self, value: int) -> None:
fstring = 'I'
p_string = self._pack_words(fstring, value)
self._payload.append(p_string)
def add_64bit_uint(self, value: int) -> None:
fstring = 'Q'
p_string = self._pack_words(fstring, value)
self._payload.append(p_string)
def add_8bit_int(self, value: int) -> None:
fstring = (self._byteorder + 'b')
self._payload.append(pack(fstring, value))
def add_16bit_int(self, value: int) -> None:
fstring = (self._byteorder + 'h')
self._payload.append(pack(fstring, value))
def add_32bit_int(self, value: int) -> None:
fstring = 'i'
p_string = self._pack_words(fstring, value)
self._payload.append(p_string)
def add_64bit_int(self, value: int) -> None:
fstring = 'q'
p_string = self._pack_words(fstring, value)
self._payload.append(p_string)
def add_16bit_float(self, value: float) -> None:
fstring = 'e'
p_string = self._pack_words(fstring, value)
self._payload.append(p_string)
def add_32bit_float(self, value: float) -> None:
fstring = 'f'
p_string = self._pack_words(fstring, value)
self._payload.append(p_string)
def add_64bit_float(self, value: float) -> None:
fstring = 'd'
p_string = self._pack_words(fstring, value)
self._payload.append(p_string)
def add_string(self, value: str) -> None:
fstring = ((self._byteorder + str(len(value))) + 's')
self._payload.append(pack(fstring, value.encode())) |
def polymer_species(subunit, site1, site2, size, closed=False):
_verify_sites(subunit, site1, site2)
if (size <= 0):
raise ValueError('size must be an integer greater than 0')
if (size == 1):
polymer = subunit({site1: None, site2: None})
elif (size == 2):
polymer = (subunit({site1: None, site2: 1}) % subunit({site1: 1, site2: None}))
else:
seam_site_num = (size if closed else None)
polymer = subunit({site1: seam_site_num, site2: 1})
for i in range(1, (size - 1)):
polymer %= subunit({site1: i, site2: (i + 1)})
polymer %= subunit({site1: (size - 1), site2: seam_site_num})
polymer.match_once = True
return polymer |
class NumberParsingTestCase(unittest.TestCase):
def test_can_parse_decimals(self):
assert (decimal.Decimal('1099.98') == numbers.parse_decimal('1,099.98', locale='en_US'))
assert (decimal.Decimal('1099.98') == numbers.parse_decimal('1.099,98', locale='de'))
assert (decimal.Decimal('1099.98') == numbers.parse_decimal('109998', locale='ar', numbering_system='default'))
with pytest.raises(numbers.NumberFormatError):
numbers.parse_decimal('2,109,998', locale='de')
with pytest.raises(numbers.UnsupportedNumberingSystemError):
numbers.parse_decimal('2,109,998', locale='de', numbering_system='unknown')
def test_parse_decimal_strict_mode(self):
with pytest.raises(numbers.NumberFormatError) as info:
numbers.parse_decimal('11.11', locale='de', strict=True)
assert (info.value.suggestions == ['1.111', '11,11'])
with pytest.raises(numbers.NumberFormatError) as info:
numbers.parse_decimal('80.00.00', locale='de', strict=True)
assert (info.value.suggestions == ['800.000'])
with pytest.raises(numbers.NumberFormatError) as info:
numbers.parse_decimal('2000,000', locale='en_US', strict=True)
assert (info.value.suggestions == ['2,000,000', '2,000'])
with pytest.raises(numbers.NumberFormatError) as info:
numbers.parse_decimal('0,,000', locale='en_US', strict=True)
assert (info.value.suggestions == ['0'])
with pytest.raises(numbers.NumberFormatError) as info:
numbers.parse_decimal('0.00', locale='de', strict=True)
assert (info.value.suggestions == ['0'])
assert (str(numbers.parse_decimal('1.001', locale='de', strict=True)) == '1001')
assert (str(numbers.parse_decimal('3.00', locale='en_US', strict=True)) == '3.00')
assert (str(numbers.parse_decimal('3,400.6', locale='en_US', strict=True)) == '3400.6')
assert (str(numbers.parse_decimal('3,400.60', locale='en_US', strict=True)) == '3400.60')
assert (str(numbers.parse_decimal('3,400.00', locale='en_US', strict=True)) == '3400.00')
assert (str(numbers.parse_decimal('3,400.0000', locale='en_US', strict=True)) == '3400.0000')
assert (str(numbers.parse_decimal('3,800', locale='en_US', strict=True)) == '3800')
assert (str(numbers.parse_decimal('2000.1', locale='en_US', strict=True)) == '2000.1')
assert (str(numbers.parse_decimal('2580', locale='en_US', strict=True)) == '2580')
assert (str(numbers.parse_decimal('5,000001', locale='fr', strict=True)) == '5.000001') |
def save_checkpoint(filepath, obj, num_ckpt_keep=5):
name = re.match('(do|g)_\\d+', pathlib.Path(filepath).name).group(1)
ckpts = sorted(pathlib.Path(filepath).parent.glob(f'{name}_*'))
if (len(ckpts) > num_ckpt_keep):
[os.remove(c) for c in ckpts[:(- num_ckpt_keep)]]
print('Saving checkpoint to {}'.format(filepath))
torch.save(obj, filepath)
print('Complete.') |
def _deprecate(fn: Callable) -> Callable:
name = fn.__name__
msg = build_deprecation_message(f'The function ops.functional.{name}', '1.0', info=f'It was moved to loss.functional.{name}. See for details')
(fn)
def wrapper(*args, **kwargs):
warnings.warn(msg)
return fn(*args, **kwargs)
return wrapper |
class TestHashable(TestNameCheckVisitorBase):
_passes()
def test_type(self):
from typing import Hashable, Type
from typing_extensions import Protocol
class MyHashable(Protocol):
def __hash__(self) -> int:
raise NotImplementedError
def want_hash(h: Hashable):
pass
def want_myhash(h: MyHashable):
pass
class A():
pass
class B():
def __hash__(self) -> int:
return 42
def capybara(t1: Type[int], t2: type):
want_hash(t1)
want_hash(t2)
want_hash(int)
want_hash(A)
want_hash(B)
want_myhash(t1)
want_myhash(t2)
want_myhash(int)
want_myhash(A)
want_myhash(B)
{t1: 0}
{t2: 0}
{int: 0}
{A: 0}
want_hash([])
want_myhash([]) |
def get_quantsim_artifacts(base_model):
base_model = prepare_model(base_model)
dummy_input = np.random.rand(1, 16, 16, 3)
sim = QuantizationSimModel(model=base_model, quant_scheme='tf_enhanced', rounding_mode='nearest', default_output_bw=8, default_param_bw=8, in_place=False, config_file=None)
sim.trainable = False
sim.compute_encodings(dummy_forward_pass, forward_pass_callback_args=dummy_input)
return sim |
class ManniStyle(Style):
name = 'manni'
background_color = '#f0f3f3'
styles = {Whitespace: '#bbbbbb', Comment: 'italic #0099FF', Comment.Preproc: 'noitalic #009999', Comment.Special: 'bold', Keyword: 'bold #006699', Keyword.Pseudo: 'nobold', Keyword.Type: '#007788', Operator: '#555555', Operator.Word: 'bold #000000', Name.Builtin: '#336666', Name.Function: '#CC00FF', Name.Class: 'bold #00AA88', Name.Namespace: 'bold #00CCFF', Name.Exception: 'bold #CC0000', Name.Variable: '#003333', Name.Constant: '#336600', Name.Label: '#9999FF', Name.Entity: 'bold #999999', Name.Attribute: '#330099', Name.Tag: 'bold #330099', Name.Decorator: '#9999FF', String: '#CC3300', String.Doc: 'italic', String.Interpol: '#AA0000', String.Escape: 'bold #CC3300', String.Regex: '#33AAAA', String.Symbol: '#FFCC33', String.Other: '#CC3300', Number: '#FF6600', Generic.Heading: 'bold #003300', Generic.Subheading: 'bold #003300', Generic.Deleted: 'border:#CC0000 bg:#FFCCCC', Generic.Inserted: 'border:#00CC00 bg:#CCFFCC', Generic.Error: '#FF0000', Generic.Emph: 'italic', Generic.Strong: 'bold', Generic.EmphStrong: 'bold italic', Generic.Prompt: 'bold #000099', Generic.Output: '#AAAAAA', Generic.Traceback: '#99CC66', Error: 'bg:#FFAAAA #AA0000'} |
def registry_services():
return {'blobuploadcleanupworker': {'autostart': 'true'}, 'buildlogsarchiver': {'autostart': 'true'}, 'builder': {'autostart': 'true'}, 'chunkcleanupworker': {'autostart': 'true'}, 'expiredappspecifictokenworker': {'autostart': 'true'}, 'exportactionlogsworker': {'autostart': 'true'}, 'gcworker': {'autostart': 'true'}, 'globalpromstats': {'autostart': 'true'}, 'logrotateworker': {'autostart': 'true'}, 'namespacegcworker': {'autostart': 'true'}, 'repositorygcworker': {'autostart': 'true'}, 'notificationworker': {'autostart': 'true'}, 'queuecleanupworker': {'autostart': 'true'}, 'reconciliationworker': {'autostart': 'true'}, 'repositoryactioncounter': {'autostart': 'true'}, 'securityworker': {'autostart': 'true'}, 'storagereplication': {'autostart': 'true'}, 'teamsyncworker': {'autostart': 'true'}, 'dnsmasq': {'autostart': 'true'}, 'gunicorn-registry': {'autostart': 'true'}, 'gunicorn-secscan': {'autostart': 'true'}, 'gunicorn-web': {'autostart': 'true'}, 'ip-resolver-update-worker': {'autostart': 'true'}, 'memcache': {'autostart': 'true'}, 'nginx': {'autostart': 'true'}, 'pushgateway': {'autostart': 'true'}, 'servicekey': {'autostart': 'true'}, 'repomirrorworker': {'autostart': 'false'}, 'manifestbackfillworker': {'autostart': 'true'}, 'securityscanningnotificationworker': {'autostart': 'true'}, 'config-editor': {'autostart': 'false'}, 'quotatotalworker': {'autostart': 'true'}, 'quotaregistrysizeworker': {'autostart': 'true'}, 'autopruneworker': {'autostart': 'true'}} |
class TrainDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'russ_train.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.extended_data = []
expansion = (args.seq2seq.expansion if args.seq2seq.expansion else 1)
for expand_id in range(expansion):
for raw_data in tqdm(self.raw_datasets):
extend_data = copy.deepcopy(raw_data)
question = extend_data['question'].lower()
query = extend_data['query']
extend_data.update({'struct_in': '', 'text_in': question.lower(), 'seq_out': query.lower()})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
def _prep_metadata(md_sect, path):
if (not set(md_sect).issuperset(metadata_required_fields)):
missing = (metadata_required_fields - set(md_sect))
raise ConfigError(('Required fields missing: ' + '\n'.join(missing)))
res = LoadedConfig()
res.module = md_sect.get('module')
if (not all([m.isidentifier() for m in res.module.split('.')])):
raise ConfigError(('Module name %r is not a valid identifier' % res.module))
md_dict = res.metadata
if ('description-file' in md_sect):
desc_path = md_sect.get('description-file')
res.referenced_files.append(desc_path)
(desc_content, mimetype) = description_from_file(desc_path, path.parent)
md_dict['description'] = desc_content
md_dict['description_content_type'] = mimetype
if ('urls' in md_sect):
project_urls = md_dict['project_urls'] = []
for (label, url) in sorted(md_sect.pop('urls').items()):
project_urls.append('{}, {}'.format(label, url))
for (key, value) in md_sect.items():
if (key in {'description-file', 'module'}):
continue
if (key not in metadata_allowed_fields):
closest = difflib.get_close_matches(key, metadata_allowed_fields, n=1, cutoff=0.7)
msg = 'Unrecognised metadata key: {!r}'.format(key)
if closest:
msg += ' (did you mean {!r}?)'.format(closest[0])
raise ConfigError(msg)
k2 = key.replace('-', '_')
md_dict[k2] = value
if (key in metadata_list_fields):
if (not isinstance(value, list)):
raise ConfigError('Expected a list for {} field, found {!r}'.format(key, value))
if (not all((isinstance(a, str) for a in value))):
raise ConfigError('Expected a list of strings for {} field'.format(key))
elif (key == 'requires-extra'):
if (not isinstance(value, dict)):
raise ConfigError('Expected a dict for requires-extra field, found {!r}'.format(value))
if (not all((isinstance(e, list) for e in value.values()))):
raise ConfigError('Expected a dict of lists for requires-extra field')
for (e, reqs) in value.items():
if (not all((isinstance(a, str) for a in reqs))):
raise ConfigError('Expected a string list for requires-extra. (extra {})'.format(e))
elif (not isinstance(value, str)):
raise ConfigError('Expected a string for {} field, found {!r}'.format(key, value))
if ('requires' in md_dict):
md_dict['requires_dist'] = md_dict.pop('requires')
if ('dist_name' in md_dict):
md_dict['name'] = md_dict.pop('dist_name')
reqs_noextra = md_dict.pop('requires_dist', [])
res.reqs_by_extra = md_dict.pop('requires_extra', {})
dev_requires = md_dict.pop('dev_requires', None)
if (dev_requires is not None):
if ('dev' in res.reqs_by_extra):
raise ConfigError('dev-requires occurs together with its replacement requires-extra.dev.')
else:
log.warning('"dev-requires = ..." is obsolete. Use "requires-extra = {"dev" = ...}" instead.')
res.reqs_by_extra['dev'] = dev_requires
md_dict['requires_dist'] = (reqs_noextra + list(_expand_requires_extra(res.reqs_by_extra)))
md_dict['provides_extra'] = sorted(res.reqs_by_extra.keys())
res.reqs_by_extra['.none'] = reqs_noextra
return res |
class DeformRoIPoolPack(DeformRoIPool):
def __init__(self, output_size, output_channels, deform_fc_channels=1024, spatial_scale=1.0, sampling_ratio=0, gamma=0.1):
super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale, sampling_ratio, gamma)
self.output_channels = output_channels
self.deform_fc_channels = deform_fc_channels
self.offset_fc = nn.Sequential(nn.Linear(((self.output_size[0] * self.output_size[1]) * self.output_channels), self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, self.deform_fc_channels), nn.ReLU(inplace=True), nn.Linear(self.deform_fc_channels, ((self.output_size[0] * self.output_size[1]) * 2)))
self.offset_fc[(- 1)].weight.data.zero_()
self.offset_fc[(- 1)].bias.data.zero_()
def forward(self, input, rois):
assert (input.size(1) == self.output_channels)
x = deform_roi_pool(input, rois, None, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma)
rois_num = rois.size(0)
offset = self.offset_fc(x.view(rois_num, (- 1)))
offset = offset.view(rois_num, 2, self.output_size[0], self.output_size[1])
return deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma) |
class TestPrunetraceback():
def test_custom_repr_failure(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n import not_exists\n ')
pytester.makeconftest('\n import pytest\n def pytest_collect_file(file_path, parent):\n return MyFile.from_parent(path=file_path, parent=parent)\n class MyError(Exception):\n pass\n class MyFile(pytest.File):\n def collect(self):\n raise MyError()\n def repr_failure(self, excinfo):\n if isinstance(excinfo.value, MyError):\n return "hello world"\n return pytest.File.repr_failure(self, excinfo)\n ')
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(['*ERROR collecting*', '*hello world*'])
.xfail(reason='other mechanism for adding to reporting needed')
def test_collect_report_postprocessing(self, pytester: Pytester) -> None:
p = pytester.makepyfile('\n import not_exists\n ')
pytester.makeconftest('\n import pytest\n (wrapper=True)\n def pytest_make_collect_report():\n rep = yield\n rep.headerlines += ["header1"]\n return rep\n ')
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(['*ERROR collecting*', '*header1*'])
def test_collection_error_traceback_is_clean(self, pytester: Pytester) -> None:
pytester.makepyfile('\n raise Exception("LOUSY")\n ')
result = pytester.runpytest()
result.stdout.fnmatch_lines(['*ERROR collecting*', 'test_*.py:1: in <module>', ' raise Exception("LOUSY")', 'E Exception: LOUSY', '*= short test summary info =*'], consecutive=True) |
def test_channel_cleared_after_two_unlocks():
(our_model, _) = create_model(balance=700, num_pending_locks=1)
(partner_model, partner_key1) = create_model(balance=700, num_pending_locks=1)
channel_state = create_channel_from_models(our_model, partner_model, partner_key1)
block_number = 1
block_hash = make_block_hash()
pseudo_random_generator = random.Random()
def make_unlock(unlock_end, partner_end):
batch_unlock = ContractReceiveChannelBatchUnlock(transaction_hash=make_transaction_hash(), canonical_identifier=channel_state.canonical_identifier, receiver=partner_end.address, sender=unlock_end.address, locksroot=unlock_end.balance_proof.locksroot, unlocked_amount=10, returned_tokens=0, block_number=block_number, block_hash=block_hash)
return batch_unlock
settle_channel = ContractReceiveChannelSettled(transaction_hash=make_transaction_hash(), canonical_identifier=channel_state.canonical_identifier, our_onchain_locksroot=compute_locksroot(channel_state.our_state.pending_locks), our_transferred_amount=0, partner_onchain_locksroot=compute_locksroot(channel_state.partner_state.pending_locks), partner_transferred_amount=0, block_number=1, block_hash=make_block_hash())
iteration = channel.state_transition(channel_state=channel_state, state_change=settle_channel, block_number=block_number, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator)
msg = 'both participants have pending locks, locksroot must not represent the empty list'
assert (iteration.new_state.our_state.onchain_locksroot != LOCKSROOT_OF_NO_LOCKS), msg
assert (iteration.new_state.partner_state.onchain_locksroot != LOCKSROOT_OF_NO_LOCKS), msg
batch_unlock = make_unlock(channel_state.our_state, channel_state.partner_state)
iteration = channel.state_transition(channel_state=iteration.new_state, state_change=batch_unlock, block_number=block_number, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator)
msg = 'all of our locks has been unlocked, onchain state must be updated'
assert (iteration.new_state.our_state.onchain_locksroot is LOCKSROOT_OF_NO_LOCKS), msg
msg = 'partner has pending locks, the locksroot must not represent the empty list'
assert (iteration.new_state.partner_state.onchain_locksroot is not LOCKSROOT_OF_NO_LOCKS), msg
msg = 'partner locksroot is not unlocked, channel should not have been cleaned'
assert (iteration.new_state is not None), msg
iteration = channel.state_transition(channel_state=iteration.new_state, state_change=batch_unlock, block_number=block_number, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator)
msg = 'partner has pending locks, the locksroot must not represent the empty list'
assert (iteration.new_state.partner_state.onchain_locksroot is not LOCKSROOT_OF_NO_LOCKS), msg
msg = 'partner locksroot is not unlocked, channel should not have been cleaned'
assert (iteration.new_state is not None), msg
iteration = channel.state_transition(channel_state=iteration.new_state, state_change=make_unlock(channel_state.partner_state, channel_state.our_state), block_number=block_number, block_hash=block_hash, pseudo_random_generator=pseudo_random_generator)
msg = 'all unlocks have been done, channel must be cleared'
assert (iteration.new_state is None), msg |
def make_url(ext: str, *, file_checksum: (str | None)=None, metadata_checksum: (str | None)=None, hashes: (dict[(str, str)] | None)=None, metadata: ((dict[(str, str)] | str) | None)=None) -> Link:
url = f'
if (not hashes):
file_checksum = (file_checksum or make_checksum())
url += f'#sha256={file_checksum}'
if (not metadata):
metadata = (f'sha256={metadata_checksum}' if metadata_checksum else None)
return Link(url, hashes=hashes, metadata=metadata) |
class GitEventHandler(BaseEventHandler):
def __init__(self, gitdir, source, modified_by, auto_init=False, ignore_errors=False):
BaseEventHandler.__init__(self)
self.gitdir = gitdir
self.modified_by = modified_by
self.source = source
self.messages = []
self.ignore_errors = ignore_errors
if auto_init:
try:
self._run_command('git status --short')
except EventHandlerError as e:
if (e.errorcode == 128):
self._git_init()
self._update_author()
def debug(self, object_definition, message):
pass
def _update_author(self):
environ['GIT_AUTHOR_NAME'] = self.modified_by
environ['GIT_AUTHOR_EMAIL'] = ('%%s' % (self.source, node()))
def _run_command(self, command):
cwd = self.gitdir
proc = subprocess.Popen(command, cwd=cwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate('through stdin to stdout')
stdout = bytes2str(stdout)
stderr = bytes2str(stderr)
returncode = proc.returncode
if ((returncode > 0) and (self.ignore_errors is False)):
errorstring = "Command '%s' returned exit status %s.\n stdout: %s \n stderr: %s\n Current user: %s"
errorstring = (errorstring % (command, returncode, stdout, stderr, getuser()))
raise EventHandlerError(errorstring, errorcode=returncode, errorstring=stderr)
return stdout
def is_commited(self):
return (self.get_uncommited_files() == 0)
def get_uncommited_files(self):
output = self._run_command('git status --porcelain')
result = []
for line in output.split('\n'):
line = line.split()
if (len(line) < 2):
continue
result.append({'status': line[0], 'filename': ' '.join(line[1:])})
return result
def _git_init(self, directory=None):
self._update_author()
self._run_command('git init')
self._run_command('git add .')
self._run_command("git commit -a -m 'Initial Commit'")
def _git_add(self, filename):
self._update_author()
command = ("git add '%s'" % filename)
return self._run_command(command)
def _git_commit(self, filename, message, filelist=None):
if (filelist is None):
filelist = []
self._update_author()
message = message.replace("'", '"')
if (len(filelist) > 0):
filename = "' '".join(filelist)
command = ("git commit '%s' -m '%s'" % (filename, message))
return self._run_command(command=command)
def pre_save(self, object_definition, message):
filename = object_definition.get_filename()
if self._is_dirty(filename):
self._git_add(filename)
self._git_commit(filename, message=("External changes commited in %s '%s'" % (object_definition.object_type, object_definition.get_shortname())))
def save(self, object_definition, message):
filename = object_definition.get_filename()
if (len(self.messages) > 0):
message = ([message, '\n'] + self.messages)
message = '\n'.join(message)
self._git_add(filename)
if self._is_dirty(filename):
self._git_commit(filename, message)
self.messages = []
def _is_dirty(self, filename):
command = ("git status --porcelain '%s'" % filename)
output = self._run_command(command)
return (len(output) > 0)
def write(self, object_definition, message):
self.messages.append((' * %s' % message)) |
class C1():
x = attr.ib(validator=attr.validators.instance_of(int))
y = attr.ib()
def method(self):
return self.x
def classmethod(cls):
return 'clsmethod'
def staticmethod():
return 'staticmethod'
def my_class(self):
return __class__
def my_super(self):
return super().__repr__() |
class LastConv(Transition):
def __init__(self, in_channels, out_channels, num_inputs, kernel_size=3, **kwargs):
super().__init__(in_channels, out_channels)
self.num_inputs = num_inputs
self.conv_out = ConvModule(in_channels, out_channels, kernel_size, padding=((kernel_size - 1) // 2), **kwargs)
def forward(self, inputs):
assert (len(inputs) == self.num_inputs)
return self.conv_out(inputs[(- 1)]) |
.parametrize('ngood, nbad, nsample, size', [(np.array(10, dtype=np.int64), np.array(20, dtype=np.int64), np.array(5, dtype=np.int64), None), (np.array(10, dtype=np.int64), np.array(20, dtype=np.int64), np.array(5, dtype=np.int64), []), (np.full((1, 2), 10, dtype=np.int64), np.array(20, dtype=np.int64), np.array(5, dtype=np.int64), None)])
def test_hypergeometric_samples(ngood, nbad, nsample, size):
compare_sample_values(hypergeometric, ngood, nbad, nsample, size=size) |
def test_catalog_loader(tmpdir):
tmpcatalog = os.path.join(tmpdir, 'my_catalog.xosc')
cf = xosc.CatalogFile()
cf.create_catalog(tmpcatalog, 'TrajectoryCatalog', 'My first miscobject catalog', 'Mandolin')
orig = xosc.Controller('my_controller', xosc.Properties())
cf.add_to_catalog(orig)
cf.dump()
read = xosc.CatalogReader(xosc.CatalogReference('my_catalog', 'my_controller'), tmpdir)
assert (read == orig) |
def atomic_write(filepath, binary=False, fsync=False):
tmppath = (filepath + '~')
while os.path.isfile(tmppath):
tmppath += '~'
try:
with open(tmppath, ('wb' if binary else 'w')) as file:
(yield file)
if fsync:
file.flush()
os.fsync(file.fileno())
replace(tmppath, filepath)
finally:
try:
os.remove(tmppath)
except (IOError, OSError):
pass |
(frozen=False)
class CollaborationState():
optimizer_step: int
samples_accumulated: int
target_batch_size: int
num_peers: int
eta_next_step: float
next_fetch_time: float
def should_perform_step(self):
return ((self.samples_accumulated >= self.target_batch_size) or (hivemind.get_dht_time() >= self.eta_next_step))
def register_step(self):
self.optimizer_step += 1
self.samples_accumulated = 0
self.eta_next_step = float('inf') |
def _gen_rhf_response(mf, mo_coeff=None, mo_occ=None, singlet=None, hermi=0, max_memory=None):
assert (isinstance(mf, hf.RHF) and (not isinstance(mf, (uhf.UHF, rohf.ROHF))))
if (mo_coeff is None):
mo_coeff = mf.mo_coeff
if (mo_occ is None):
mo_occ = mf.mo_occ
mol = mf.mol
if isinstance(mf, hf.KohnShamDFT):
from pyscf.dft import numint
ni = mf._numint
ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)
if (mf.nlc or ni.libxc.is_nlc(mf.xc)):
logger.warn(mf, 'NLC functional found in DFT object. Its second deriviative is not available. Its contribution is not included in the response function.')
(omega, alpha, hyb) = ni.rsh_and_hybrid_coeff(mf.xc, mol.spin)
hybrid = ni.libxc.is_hybrid_xc(mf.xc)
if ((not hybrid) and ('MultiGridFFTDF' == getattr(mf, 'with_df', None).__class__.__name__)):
from pyscf.pbc.dft import multigrid
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
return multigrid._gen_rhf_response(mf, dm0, singlet, hermi)
if (singlet is None):
spin = 0
else:
spin = 1
(rho0, vxc, fxc) = ni.cache_xc_kernel(mol, mf.grids, mf.xc, mo_coeff, mo_occ, spin)
dm0 = None
if (max_memory is None):
mem_now = lib.current_memory()[0]
max_memory = max(2000, ((mf.max_memory * 0.8) - mem_now))
if (singlet is None):
def vind(dm1):
if (hermi == 2):
v1 = numpy.zeros_like(dm1)
else:
v1 = ni.nr_rks_fxc(mol, mf.grids, mf.xc, dm0, dm1, 0, hermi, rho0, vxc, fxc, max_memory=max_memory)
if hybrid:
if (hermi != 2):
(vj, vk) = mf.get_jk(mol, dm1, hermi=hermi)
vk *= hyb
if (abs(omega) > 1e-10):
vk += (mf.get_k(mol, dm1, hermi, omega) * (alpha - hyb))
v1 += (vj - (0.5 * vk))
else:
v1 -= ((0.5 * hyb) * mf.get_k(mol, dm1, hermi=hermi))
elif (hermi != 2):
v1 += mf.get_j(mol, dm1, hermi=hermi)
return v1
elif singlet:
def vind(dm1):
if (hermi == 2):
v1 = numpy.zeros_like(dm1)
else:
v1 = ni.nr_rks_fxc_st(mol, mf.grids, mf.xc, dm0, dm1, 0, True, rho0, vxc, fxc, max_memory=max_memory)
v1 *= 0.5
if hybrid:
if (hermi != 2):
(vj, vk) = mf.get_jk(mol, dm1, hermi=hermi)
vk *= hyb
if (abs(omega) > 1e-10):
vk += (mf.get_k(mol, dm1, hermi, omega) * (alpha - hyb))
v1 += (vj - (0.5 * vk))
else:
v1 -= ((0.5 * hyb) * mf.get_k(mol, dm1, hermi=hermi))
elif (hermi != 2):
v1 += mf.get_j(mol, dm1, hermi=hermi)
return v1
else:
def vind(dm1):
if (hermi == 2):
v1 = numpy.zeros_like(dm1)
else:
v1 = ni.nr_rks_fxc_st(mol, mf.grids, mf.xc, dm0, dm1, 0, False, rho0, vxc, fxc, max_memory=max_memory)
v1 *= 0.5
if hybrid:
vk = mf.get_k(mol, dm1, hermi=hermi)
vk *= hyb
if (abs(omega) > 1e-10):
vk += (mf.get_k(mol, dm1, hermi, omega) * (alpha - hyb))
v1 += ((- 0.5) * vk)
return v1
elif (((singlet is None) or singlet) and (hermi != 2)):
def vind(dm1):
(vj, vk) = mf.get_jk(mol, dm1, hermi=hermi)
return (vj - (0.5 * vk))
else:
def vind(dm1):
return ((- 0.5) * mf.get_k(mol, dm1, hermi=hermi))
return vind |
class DiscordMocksTests(unittest.TestCase):
def test_mock_role_default_initialization(self):
role = helpers.MockRole()
self.assertIsInstance(role, discord.Role)
self.assertEqual(role.name, 'role')
self.assertEqual(role.position, 1)
self.assertEqual(role.mention, '&role')
def test_mock_role_alternative_arguments(self):
role = helpers.MockRole(name='Admins', id=90210, position=10)
self.assertEqual(role.name, 'Admins')
self.assertEqual(role.id, 90210)
self.assertEqual(role.position, 10)
self.assertEqual(role.mention, '&Admins')
def test_mock_role_accepts_dynamic_arguments(self):
role = helpers.MockRole(guild='Dino Man', hoist=True)
self.assertEqual(role.guild, 'Dino Man')
self.assertTrue(role.hoist)
def test_mock_role_uses_position_for_less_than_greater_than(self):
role_one = helpers.MockRole(position=1)
role_two = helpers.MockRole(position=2)
role_three = helpers.MockRole(position=3)
self.assertLess(role_one, role_two)
self.assertLess(role_one, role_three)
self.assertLess(role_two, role_three)
self.assertGreater(role_three, role_two)
self.assertGreater(role_three, role_one)
self.assertGreater(role_two, role_one)
def test_mock_member_default_initialization(self):
member = helpers.MockMember()
self.assertIsInstance(member, discord.Member)
self.assertEqual(member.name, 'member')
self.assertListEqual(member.roles, [helpers.MockRole(name='', position=1, id=0)])
self.assertEqual(member.mention, '')
def test_mock_member_alternative_arguments(self):
core_developer = helpers.MockRole(name='Core Developer', position=2)
member = helpers.MockMember(name='Mark', id=12345, roles=[core_developer])
self.assertEqual(member.name, 'Mark')
self.assertEqual(member.id, 12345)
self.assertListEqual(member.roles, [helpers.MockRole(name='', position=1, id=0), core_developer])
self.assertEqual(member.mention, '')
def test_mock_member_accepts_dynamic_arguments(self):
member = helpers.MockMember(nick='Dino Man', colour=discord.Colour.default())
self.assertEqual(member.nick, 'Dino Man')
self.assertEqual(member.colour, discord.Colour.default())
def test_mock_guild_default_initialization(self):
guild = helpers.MockGuild()
self.assertIsInstance(guild, discord.Guild)
self.assertListEqual(guild.roles, [helpers.MockRole(name='', position=1, id=0)])
self.assertListEqual(guild.members, [])
def test_mock_guild_alternative_arguments(self):
core_developer = helpers.MockRole(name='Core Developer', position=2)
guild = helpers.MockGuild(roles=[core_developer], members=[helpers.MockMember(id=54321)])
self.assertListEqual(guild.roles, [helpers.MockRole(name='', position=1, id=0), core_developer])
self.assertListEqual(guild.members, [helpers.MockMember(id=54321)])
def test_mock_guild_accepts_dynamic_arguments(self):
guild = helpers.MockGuild(emojis=(':hyperjoseph:', ':pensive_ela:'), premium_subscription_count=15)
self.assertTupleEqual(guild.emojis, (':hyperjoseph:', ':pensive_ela:'))
self.assertEqual(guild.premium_subscription_count, 15)
def test_mock_bot_default_initialization(self):
bot = helpers.MockBot()
self.assertIsInstance(bot, discord.ext.commands.Bot)
def test_mock_context_default_initialization(self):
context = helpers.MockContext()
self.assertIsInstance(context, discord.ext.commands.Context)
self.assertIsInstance(context.bot, helpers.MockBot)
self.assertIsInstance(context.guild, helpers.MockGuild)
self.assertIsInstance(context.author, helpers.MockMember)
def test_mocks_allows_access_to_attributes_part_of_spec(self):
mocks = ((helpers.MockGuild(), 'name'), (helpers.MockRole(), 'hoist'), (helpers.MockMember(), 'display_name'), (helpers.MockBot(), 'user'), (helpers.MockContext(), 'invoked_with'), (helpers.MockTextChannel(), 'last_message'), (helpers.MockMessage(), 'mention_everyone'))
for (mock, valid_attribute) in mocks:
with self.subTest(mock=mock):
try:
getattr(mock, valid_attribute)
except AttributeError:
msg = f'accessing valid attribute `{valid_attribute}` raised an AttributeError'
self.fail(msg)
.patch(f'{__name__}.DiscordMocksTests.subTest')
.patch(f'{__name__}.getattr')
def test_mock_allows_access_to_attributes_test(self, mock_getattr, mock_subtest):
mock_getattr.side_effect = AttributeError
msg = 'accessing valid attribute `name` raised an AttributeError'
with self.assertRaises(AssertionError, msg=msg):
self.test_mocks_allows_access_to_attributes_part_of_spec()
def test_mocks_rejects_access_to_attributes_not_part_of_spec(self):
mocks = (helpers.MockGuild(), helpers.MockRole(), helpers.MockMember(), helpers.MockBot(), helpers.MockContext(), helpers.MockTextChannel(), helpers.MockMessage())
for mock in mocks:
with self.subTest(mock=mock), self.assertRaises(AttributeError):
mock.the_cake_is_a_lie
def test_mocks_use_mention_when_provided_as_kwarg(self):
test_cases = ((helpers.MockRole, 'role mention'), (helpers.MockMember, 'member mention'), (helpers.MockTextChannel, 'channel mention'))
for (mock_type, mention) in test_cases:
with self.subTest(mock_type=mock_type, mention=mention):
mock = mock_type(mention=mention)
self.assertEqual(mock.mention, mention)
def test_create_test_on_mock_bot_closes_passed_coroutine(self):
async def dementati():
coroutine_object = dementati()
bot = helpers.MockBot()
bot.loop.create_task(coroutine_object)
with self.assertRaises(RuntimeError, msg='cannot reuse already awaited coroutine'):
asyncio.run(coroutine_object)
def test_user_mock_uses_explicitly_passed_mention_attribute(self):
user = helpers.MockUser(mention='hello')
self.assertEqual(user.mention, 'hello') |
def evaluation_plot(csv_file, criteria, label, save_name, val=True):
df = pd.read_csv(csv_file)
dict_criteria = {}
dict_criteria['ET'] = [x for x in df[(criteria + '_ET')] if (not np.isnan(x))][:(- 5)]
dict_criteria['TC'] = [x for x in df[(criteria + '_TC')] if (not np.isnan(x))][:(- 5)]
dict_criteria['WT'] = [x for x in df[(criteria + '_WT')] if (not np.isnan(x))][:(- 5)]
plt.figure()
plt.boxplot(dict_criteria.values(), labels=[(key + ('\nmean: %.2f' % np.mean(dict_criteria[key]))) for key in dict_criteria.keys()])
plt.ylabel(label)
dataset_type = ('Val' if val else 'Training')
plt.title((((label + ' Boxplot of ') + dataset_type) + ' Dataset'))
plt.savefig(save_name, dpi=200) |
def _get_build_num(args):
search = reversed(run_conda_search('pypdfium2_raw', 'pypdfium2-team'))
if args.is_literal_latest:
assert (args.pdfium_ver > max([int(d['version']) for d in search])), 'Literal latest must resolve to a new version. This is done to avoid rebuilds without new version in scheduled releases. If you want to rebuild, omit --pdfium-ver or pass the resolved value.'
build_num = max([d['build_number'] for d in search if (int(d['version']) == args.pdfium_ver)], default=None)
build_num = (0 if (build_num is None) else (build_num + 1))
return build_num |
class ChannelGate(nn.Module):
def __init__(self, in_channels, num_gates=None, return_gates=False, gate_activation='sigmoid', reduction=16, layer_norm=False):
super(ChannelGate, self).__init__()
if (num_gates is None):
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(in_channels, (in_channels // reduction), kernel_size=1, bias=True, padding=0)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm(((in_channels // reduction), 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d((in_channels // reduction), num_gates, kernel_size=1, bias=True, padding=0)
if (gate_activation == 'sigmoid'):
self.gate_activation = nn.Sigmoid()
elif (gate_activation == 'relu'):
self.gate_activation = nn.ReLU(inplace=True)
elif (gate_activation == 'linear'):
self.gate_activation = nn.Identity()
else:
raise RuntimeError('Unknown gate activation: {}'.format(gate_activation))
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if (self.norm1 is not None):
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.gate_activation(x)
if self.return_gates:
return x
return (input * x) |
def test_connection():
con = pyodrx.Connection(1, 2, pyodrx.ContactPoint.start, 5)
con.add_lanelink(1, (- 1))
con.add_lanelink(2, (- 2))
prettyprint(con.get_element(pyodrx.JunctionType.direct))
con2 = pyodrx.Connection(1, 2, pyodrx.ContactPoint.start, 5)
con2.add_lanelink(1, (- 1))
con2.add_lanelink(2, (- 2))
con3 = pyodrx.Connection(1, 2, pyodrx.ContactPoint.start, 5)
con3.add_lanelink(1, (- 1))
con3.add_lanelink(1, (- 2))
assert (con == con2)
assert (con != con3)
assert (version_validation('t_junction_connection', con, wanted_schema='xodr') == ValidationResponse.OK) |
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels, num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(False)
def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels):
if (num_branches != len(num_blocks)):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if (num_branches != len(num_channels)):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if (num_branches != len(num_inchannels)):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if ((stride != 1) or (self.num_inchannels[branch_index] != (num_channels[branch_index] * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.num_inchannels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((num_channels[branch_index] * block.expansion), momentum=BN_MOMENTUM))
layers = []
layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = (num_channels[branch_index] * block.expansion)
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if (self.num_branches == 1):
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range((num_branches if self.multi_scale_output else 1)):
fuse_layer = []
for j in range(num_branches):
if (j > i):
fuse_layer.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False), nn.BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM), nn.Upsample(scale_factor=(2 ** (j - i)), mode='nearest')))
elif (j == i):
fuse_layer.append(None)
else:
conv3x3s = []
for k in range((i - j)):
if (k == ((i - j) - 1)):
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM), nn.ReLU(False)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if (self.num_branches == 1):
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = (x[0] if (i == 0) else self.fuse_layers[i][0](x[0]))
for j in range(1, self.num_branches):
if (i == j):
y = (y + x[j])
else:
y = (y + self.fuse_layers[i][j](x[j]))
x_fuse.append(self.relu(y))
return x_fuse |
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
(user, password) = HTTPPasswordMgr.find_user_password(self, realm, authuri)
if (user is not None):
return (user, password)
return HTTPPasswordMgr.find_user_password(self, None, authuri) |
class TwoStageCNNGeometric(CNNGeometric):
def __init__(self, fr_feature_size=15, fr_kernel_sizes=[7, 5], fr_channels=[128, 64], feature_extraction_cnn='vgg', feature_extraction_last_layer='', return_correlation=False, normalize_features=True, normalize_matches=True, batch_normalization=True, train_fe=False, use_cuda=True, s1_output_dim=6, s2_output_dim=18):
super(TwoStageCNNGeometric, self).__init__(output_dim=s1_output_dim, fr_feature_size=fr_feature_size, fr_kernel_sizes=fr_kernel_sizes, fr_channels=fr_channels, feature_extraction_cnn=feature_extraction_cnn, feature_extraction_last_layer=feature_extraction_last_layer, return_correlation=return_correlation, normalize_features=normalize_features, normalize_matches=normalize_matches, batch_normalization=batch_normalization, train_fe=train_fe, use_cuda=use_cuda)
if (s1_output_dim == 6):
self.geoTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda)
else:
tps_grid_size = np.sqrt((s2_output_dim / 2))
self.geoTnf = GeometricTnf(geometric_model='tps', tps_grid_size=tps_grid_size, use_cuda=use_cuda)
self.FeatureRegression2 = FeatureRegression(output_dim=s2_output_dim, use_cuda=use_cuda, feature_size=fr_feature_size, kernel_sizes=fr_kernel_sizes, channels=fr_channels, batch_normalization=batch_normalization)
def forward(self, batch, f_src=None, f_tgt=None, use_theta_GT_aff=False):
if ((f_src is None) and (f_tgt is None)):
f_src = self.FeatureExtraction(batch['source_image'])
f_tgt = self.FeatureExtraction(batch['target_image'])
correlation_1 = self.FeatureCorrelation(f_src, f_tgt)
theta_1 = self.FeatureRegression(correlation_1)
if (use_theta_GT_aff == False):
source_image_wrp = self.geoTnf(batch['source_image'], theta_1)
else:
source_image_wrp = self.geoTnf(batch['source_image'], batch['theta_GT_aff'])
f_src_wrp = self.FeatureExtraction(source_image_wrp)
correlation_2 = self.FeatureCorrelation(f_src_wrp, f_tgt)
theta_2 = self.FeatureRegression2(correlation_2)
if self.return_correlation:
return (theta_1, theta_2, correlation_1, correlation_2)
else:
return (theta_1, theta_2) |
_required
def plugin_create(request):
if (request.method == 'POST'):
form = PluginForm(request.POST, request.FILES)
form.fields['owners'].queryset = User.objects.exclude(pk=request.user.pk).order_by('username')
if form.is_valid():
plugin = form.save(commit=False)
plugin.created_by = request.user
plugin.save()
plugin_notify(plugin)
msg = _('The Plugin has been successfully created.')
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(plugin.get_absolute_url())
else:
form = PluginForm()
form.fields['owners'].queryset = User.objects.exclude(pk=request.user.pk).order_by('username')
return render(request, 'plugins/plugin_form.html', {'form': form, 'form_title': _('New plugin')}) |
def convert2panoptic(cityscapesPath=None, outputFolder=None, useTrainId=False, setNames=['val', 'train', 'test']):
if (cityscapesPath is None):
if ('CITYSCAPES_DATASET' in os.environ):
cityscapesPath = os.environ['CITYSCAPES_DATASET']
else:
cityscapesPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
cityscapesPath = os.path.join(cityscapesPath, 'gtFine')
if (outputFolder is None):
outputFolder = cityscapesPath
categories = []
for label in labels:
if label.ignoreInEval:
continue
categories.append({'id': (int(label.trainId) if useTrainId else int(label.id)), 'name': label.name, 'color': label.color, 'supercategory': label.category, 'isthing': (1 if label.hasInstances else 0)})
for setName in setNames:
searchFine = os.path.join(cityscapesPath, setName, '*', '*_instanceIds.png')
filesFine = glob.glob(searchFine)
filesFine.sort()
files = filesFine
if (not files):
printError('Did not find any files for {} set using matching pattern {}. Please consult the README.'.format(setName, searchFine))
print('Converting {} annotation files for {} set.'.format(len(files), setName))
trainIfSuffix = ('_trainId' if useTrainId else '')
outputBaseFile = 'cityscapes_panoptic_{}{}'.format(setName, trainIfSuffix)
outFile = os.path.join(outputFolder, '{}.json'.format(outputBaseFile))
print('Json file with the annotations in panoptic format will be saved in {}'.format(outFile))
panopticFolder = os.path.join(outputFolder, outputBaseFile)
if (not os.path.isdir(panopticFolder)):
print('Creating folder {} for panoptic segmentation PNGs'.format(panopticFolder))
os.mkdir(panopticFolder)
print('Corresponding segmentations in .png format will be saved in {}'.format(panopticFolder))
images = []
annotations = []
for (progress, f) in enumerate(files):
originalFormat = np.array(Image.open(f))
fileName = os.path.basename(f)
imageId = fileName.replace('_gtFine_instanceIds.png', '')
inputFileName = fileName.replace('_instanceIds.png', '_leftImg8bit.png')
outputFileName = fileName.replace('_instanceIds.png', '_panoptic.png')
images.append({'id': imageId, 'width': int(originalFormat.shape[1]), 'height': int(originalFormat.shape[0]), 'file_name': inputFileName})
pan_format = np.zeros((originalFormat.shape[0], originalFormat.shape[1], 3), dtype=np.uint8)
segmentIds = np.unique(originalFormat)
segmInfo = []
for segmentId in segmentIds:
if (segmentId < 1000):
semanticId = segmentId
isCrowd = 1
else:
semanticId = (segmentId // 1000)
isCrowd = 0
labelInfo = id2label[semanticId]
categoryId = (labelInfo.trainId if useTrainId else labelInfo.id)
if labelInfo.ignoreInEval:
continue
if (not labelInfo.hasInstances):
isCrowd = 0
mask = (originalFormat == segmentId)
color = [(segmentId % 256), (segmentId // 256), ((segmentId // 256) // 256)]
pan_format[mask] = color
area = np.sum(mask)
hor = np.sum(mask, axis=0)
hor_idx = np.nonzero(hor)[0]
x = hor_idx[0]
width = ((hor_idx[(- 1)] - x) + 1)
vert = np.sum(mask, axis=1)
vert_idx = np.nonzero(vert)[0]
y = vert_idx[0]
height = ((vert_idx[(- 1)] - y) + 1)
bbox = [int(x), int(y), int(width), int(height)]
segmInfo.append({'id': int(segmentId), 'category_id': int(categoryId), 'area': int(area), 'bbox': bbox, 'iscrowd': isCrowd})
annotations.append({'image_id': imageId, 'file_name': outputFileName, 'segments_info': segmInfo})
Image.fromarray(pan_format).save(os.path.join(panopticFolder, outputFileName))
print('\rProgress: {:>3.2f} %'.format((((progress + 1) * 100) / len(files))), end=' ')
sys.stdout.flush()
print('\nSaving the json file {}'.format(outFile))
d = {'images': images, 'annotations': annotations, 'categories': categories}
with open(outFile, 'w') as f:
json.dump(d, f, sort_keys=True, indent=4) |
class check_transitive_modifications(log_queries):
def __init__(self):
filters = ['^DELETE.+IN \\(SELECT.+$', '^UPDATE.+IN \\(SELECT.+$']
super(check_transitive_modifications, self).__init__(query_filters=filters)
def __exit__(self, exc_type, exc_val, exc_tb):
super(check_transitive_modifications, self).__exit__(exc_type, exc_val, exc_tb)
queries = self.get_queries()
if queries:
raise Exception(('Detected transitive deletion or update in queries: %s' % queries)) |
def test_shell_commmand_complete_in_path(cmd2_app, request):
test_dir = os.path.dirname(request.module.__file__)
text = os.path.join(test_dir, 's')
line = 'shell {}'.format(text)
endidx = len(line)
begidx = (endidx - len(text))
expected = os.path.join(test_dir, ('scripts' + os.path.sep))
first_match = complete_tester(text, line, begidx, endidx, cmd2_app)
assert ((first_match is not None) and (expected in cmd2_app.completion_matches)) |
class INT(IntEnum):
IRC40KSTBIF = (1 << 0)
LXTALSTBIF = (1 << 1)
IRC8MSTBIF = (1 << 2)
HXTALSTBIF = (1 << 3)
PLLSTBIF = (1 << 4)
PLL1STBIF = (1 << 5)
PLL2STBIF = (1 << 6)
CKMIF = (1 << 7)
IRC40KSTBIE = (1 << 8)
LXTALSTBIE = (1 << 9)
IRC8MSTBIE = (1 << 10)
HXTALSTBIE = (1 << 11)
PLLSTBIE = (1 << 12)
PLL1STBIE = (1 << 13)
PLL2STBIE = (1 << 14)
IRC40KSTBIC = (1 << 16)
LXTALSTBIC = (1 << 17)
IRC8MSTBIC = (1 << 18)
HXTALSTBIC = (1 << 19)
PLLSTBIC = (1 << 20)
PLL1STBIC = (1 << 21)
PLL2STBIC = (1 << 22)
CKMIC = (1 << 23) |
def generate_instrument_list(inst_loc, user_info=None):
instrument_names = inst_loc.__all__
instrument_download = []
instrument_optional_load = []
instrument_no_download = []
for inst_module in instrument_names:
try:
module = importlib.import_module(''.join(('.', inst_module)), package=inst_loc.__name__)
except ImportError:
pass
else:
try:
info = module._test_dates
except AttributeError:
info = {}
info[''] = {'': dt.datetime(2009, 1, 1)}
module._test_dates = info
for iid in info.keys():
for tag in info[iid].keys():
in_dict = {'inst_module': module, 'tag': tag, 'inst_id': iid}
if (user_info and (inst_module in user_info)):
in_dict['user_info'] = user_info[inst_module]
inst = pysat.Instrument(inst_module=module, tag=tag, inst_id=iid, temporary_file_list=True)
ci_skip = ((os.environ.get('CI') == 'true') and (not inst._test_download_ci))
if (not ci_skip):
if inst._test_download:
instrument_download.append(in_dict.copy())
if hasattr(module, '_test_load_opt'):
try:
kw_list = module._test_load_opt[iid][tag]
kw_list = pysat.utils.listify(kw_list)
for kwargs in kw_list:
in_dict['kwargs'] = kwargs
instrument_optional_load.append(in_dict.copy())
except KeyError:
pass
elif (not inst._password_req):
instrument_no_download.append(in_dict)
output = {'names': instrument_names, 'download': instrument_download, 'load_options': (instrument_download + instrument_optional_load), 'no_download': instrument_no_download}
return output |
def plot_repertoires(subsystem, sia, **kwargs):
if (config.REPERTOIRE_DISTANCE != 'GENERALIZED_INTRINSIC_DIFFERENCE'):
raise NotImplementedError('Only REPERTOIRE_DISTANCE = GENERALIZED_INTRINSIC_DIFFERENCE is supported')
cut_subsystem = subsystem.apply_cut(sia.partition)
labels = ['unpartitioned', 'partitioned']
subsystems = dict(zip(labels, [subsystem, cut_subsystem]))
repertoires = {direction: {label: s.forward_repertoire(direction, s.node_indices, s.node_indices) for (label, s) in subsystems.items()} for direction in Direction.both()}
fig = plt.figure(figsize=(12, 9))
axes = fig.subplots(2, 1)
for (ax, direction) in zip(axes, Direction.both()):
plot_distribution(repertoires[direction][labels[0]], repertoires[direction][labels[1]], validate=False, title=str(direction), labels=labels, ax=ax, **kwargs)
fig.tight_layout(h_pad=0.5)
for ax in axes:
ax.legend(bbox_to_anchor=(1.1, 1.1))
return (fig, axes, repertoires) |
class StemDecorator(ChartDecorator, SimpleLegendItem):
def __init__(self, series: QFSeries, key: str=None, marker_props: Mapping[(str, Any)]=None, stemline_props: Mapping[(str, Any)]=None, baseline_props: Mapping[(str, Any)]=None):
ChartDecorator.__init__(self, key)
SimpleLegendItem.__init__(self)
self._series = series
self.marker_props = marker_props
self.stemline_props = stemline_props
self.baseline_props = baseline_props
def decorate(self, chart: 'Chart'):
(markerline, stemlines, baseline) = chart.axes.stem(self._series.index.values, self._series.values)
self.legend_artist = markerline
if (self.marker_props is not None):
artist.setp(markerline, **self.marker_props)
if (self.stemline_props is not None):
artist.setp(stemlines, **self.stemline_props)
if (self.baseline_props is not None):
artist.setp(baseline, **self.baseline_props) |
class Gauge(gui.Svg):
def __init__(self, width, height, _min, _max):
super(Gauge, self).__init__(width=width, height=height)
self.width = width
self.height = height
self.min = _min
self.max = _max
self.scale_angle_range = ((math.pi * 2) - 1.0)
self.scale_value_range = (_max - _min)
self.base_angle = 0
self.radius = (min(width, height) / 2.0)
circle = gui.SvgCircle((width / 2.0), (height / 2.0), self.radius)
self.append(circle)
circle.set_fill('gray')
circle.set_stroke(1, 'lightgray')
circle = gui.SvgCircle((width / 2.0), (height / 2.0), ((self.radius * 92.0) / 100.0))
self.append(circle)
circle.set_fill('lightgray')
circle.set_stroke(1, 'lightgray')
font_size = ((self.radius * 10.0) / 100.0)
xy = self.value_to_xy_tuple(_min, ((self.radius * 90.0) / 100.0))
textMin = gui.SvgText(xy[0], xy[1], str(_min))
xy = self.value_to_xy_tuple(_max, ((self.radius * 90.0) / 100.0))
textMax = gui.SvgText(xy[0], xy[1], str(_max))
textMin.style['font-size'] = gui.to_pix(font_size)
textMax.style['font-size'] = gui.to_pix(font_size)
textMin.style['text-anchor'] = 'end'
textMax.style['text-anchor'] = 'end'
textMin.set_fill('red')
textMax.set_fill('green')
for i in range(0, 11):
xy1 = self.value_to_xy_tuple((self.min + ((self.scale_value_range / 10) * i)), ((self.radius * 92.0) / 100.0))
xy2 = self.value_to_xy_tuple((self.min + ((self.scale_value_range / 10) * i)), self.radius)
tick = gui.SvgLine(xy1[0], xy1[1], xy2[0], xy2[1])
tick.set_stroke(2, 'white')
self.append(tick)
self.append(textMin)
self.append(textMax)
self.arrow = gui.SvgPolyline()
self.arrow.add_coord((((- self.radius) * 20.0) / 100.0), 0)
self.arrow.add_coord((((- self.radius) * 23.0) / 100.0), ((self.radius * 10.0) / 100.0))
self.arrow.add_coord(0, 0)
self.arrow.add_coord((((- self.radius) * 23.0) / 100.0), (((- self.radius) * 10.0) / 100.0))
self.arrow.add_coord((((- self.radius) * 20.0) / 100.0), 0)
self.arrow.style['fill'] = 'white'
self.arrow.set_stroke(1.0, 'white')
self.append(self.arrow)
self.arrow_preview = gui.SvgPolyline()
self.arrow_preview.add_coord((((- self.radius) * 10.0) / 100.0), 0)
self.arrow_preview.add_coord((((- self.radius) * 13.0) / 100.0), ((self.radius * 5.0) / 100.0))
self.arrow_preview.add_coord(0, 0)
self.arrow_preview.add_coord((((- self.radius) * 13.0) / 100.0), (((- self.radius) * 5.0) / 100.0))
self.arrow_preview.add_coord((((- self.radius) * 10.0) / 100.0), 0)
self.arrow_preview.style['fill'] = 'beige'
self.arrow_preview.set_stroke(1.0, 'beige')
self.append(self.arrow_preview)
self.set_value(_min)
def value_to_angle(self, value):
return (self.base_angle + (((value - self.min) * self.scale_angle_range) / self.scale_value_range))
def angle_to_value(self, angle):
print(('angolo:' + str(math.degrees(angle))))
print(('valore:' + str(((((angle - self.base_angle) * self.scale_value_range) / self.scale_angle_range) + self.min))))
return ((((angle - self.base_angle) * self.scale_value_range) / self.scale_angle_range) + self.min)
def value_to_xy_tuple(self, value, radius):
return [((math.cos(self.value_to_angle(value)) * radius) + self.radius), (self.radius - (math.sin(self.value_to_angle(value)) * radius))]
def xy_tuple_to_value(self, xy):
return self.angle_to_value((math.atan2(xy[1], xy[0]) % (math.pi * 2)))
def set_value(self, value):
if (value < self.min):
value = self.min
if (value > self.max):
value = self.max
self.value = value
angle = self.value_to_angle(value)
xy = self.value_to_xy_tuple(value, (self.radius - 10))
self.arrow.attributes['transform'] = ('translate(%s,%s) rotate(%s)' % (xy[0], xy[1], math.degrees((- angle))))
self.set_value_preview(value)
def set_value_preview(self, value):
if (value < self.min):
value = self.min
if (value > self.max):
value = self.max
angle = self.value_to_angle(value)
xy = self.value_to_xy_tuple(value, (self.radius - 10))
self.arrow_preview.attributes['transform'] = ('translate(%s,%s) rotate(%s)' % (xy[0], xy[1], math.degrees((- angle))))
def onmousedown(self, widget, x, y):
value = self.xy_tuple_to_value([(float(x) - self.radius), (- (float(y) - self.radius))])
self.set_value(value)
def onmousemove(self, widget, x, y):
value = self.xy_tuple_to_value([(float(x) - self.radius), (- (float(y) - self.radius))])
self.set_value_preview(value) |
def subfinder(mylist, pattern):
matches = []
indices = []
for (idx, i) in enumerate(range(len(mylist))):
if ((mylist[i] == pattern[0]) and (mylist[i:(i + len(pattern))] == pattern)):
matches.append(pattern)
indices.append(idx)
if matches:
return (matches[0], indices[0])
else:
return (None, 0) |
class DeferredGeneratorList():
def __init__(self, generator):
self.gen = generator
self._elements = []
def __eq__(self, other):
return (list(self) == other)
def __getitem__(self, key) -> Any:
if (not isinstance(key, (int, slice))):
raise TypeError('Key must be either a slice or int.')
key_slice = key
if isinstance(key, int):
key_slice = slice(key, (key + 1), 1)
while (len(self._elements) < key_slice.stop):
try:
next_item = next(self.gen)
except StopIteration:
raise IndexError
else:
self._elements.append(next_item)
return self._elements[key]
def __iter__(self):
iter_index = 0
while True:
try:
curr_item = self[iter_index]
except IndexError:
return
else:
(yield curr_item)
iter_index += 1
def __next__(self) -> Any:
try:
curr_element = self[self.iter_index]
except IndexError:
raise StopIteration
self.iter_index += 1
return curr_element
def __len__(self) -> int:
self.generate_all()
return len(self._elements)
def __repr__(self) -> str:
self.generate_all()
return str(self._elements)
def __reversed__(self):
self.generate_all()
return self._elements[::(- 1)]
def generate_all(self):
while True:
try:
next_item = next(self.gen)
except StopIteration:
break
else:
self._elements.append(next_item) |
class Mssql():
ERROR_ACCOUNT_IS_DISABLED = 'Reason: The account is disabled'
ERROR_ACCOUNT_INVALID = 'Login failed for user '
ERROR_UNTRUSTED_DOMAIN = 'The login is from an untrusted domain and cannot be used with Windows authentication.'
ERROR_UNABLE_TO_CONNECT = 'Unable to connect:'
MS2019_BANNER = 'Microsoft SQL Server 2019'
MS2017_BANNER = 'Microsoft SQL Server 2017'
MS2016_BANNER = 'Microsoft SQL Server 2016'
MS2014_BANNER = 'Microsoft SQL Server 2014'
MS2012_BANNER = 'Microsoft SQL Server 2012'
MS2008_BANNER = 'Microsoft SQL Server 2008'
MS2005_BANNER = 'Microsoft SQL Server 2005'
MS2000_BANNER = 'Microsoft SQL Server 2000'
POST_TABLE_NAME = 'MSAT_TABLE_'
REQ_USE_THIS_DB = 'USE {0}'
REQ_GET_DB_NAME = 'SELECT DB_NAME() AS databasename'
REQ_EXEC_SP_FOR_PE = 'EXEC {0}'
REQ_DEL_PROC = 'DROP PROCEDURE {0}'
def __init__(self, args=None, loginTimeout=DEFAULT_LOGIN_TIMEOUT, charset=DEFAULT_CHARSET):
self.host = args['host']
self.user = args['user']
self.password = args['password']
self.database = args['database']
self.port = args['port']
self.loginTimeout = args['loginTimeout']
self.charset = args['charset']
self.domain = args['domain']
self.args = args
if (('connection' in self.args) == False):
self.args['connection'] = None
if (('cursor' in self.args) == False):
self.args['cursor'] = None
self.autocommit = True
self.completeVersion = None
def connect(self, printErrorAsDebug=False, stopIfError=False):
logging.debug("Connecting to the '{0}':'{4}' database server, on the '{1}' database with the '{2}':'{3}' account...".format(self.host, self.database, self.user, self.password, self.port))
if (self.domain == None):
logging.debug('Domain name NOT specified. Consequently, windows authentication NOT enabled: SQL server Authentication eanbled ONLY !')
userString = self.user
else:
logging.debug('Domain name specified. Consequently, SQL server Authentication DISABLED and windows authentication ENABLED !')
userString = '{0}\\{1}'.format(self.domain, self.user)
try:
self.args['connection'] = pymssql.connect(host=self.host, user=userString, password=self.password, database=self.database, port=self.port, charset=self.charset, login_timeout=DEFAULT_LOGIN_TIMEOUT)
self.args['connection'].autocommit(self.autocommit)
except Exception as e:
logging.debug("Connection not established : '{0}'".format(repr(e)))
if (self.ERROR_ACCOUNT_IS_DISABLED in str(e)):
errorMsg = "The '{0}' account is disabled".format(self.user)
if (printErrorAsDebug == False):
logging.error(errorMsg)
else:
logging.debug(errorMsg)
return ErrorClass(errorMsg)
elif ((self.ERROR_ACCOUNT_INVALID in str(e)) or (self.ERROR_UNTRUSTED_DOMAIN in str(e))):
if (self.domain == None):
errorMsg = "The '{0}' account is invalid. A domain name could be used to enable the Windows Authentication".format(self.user)
else:
errorMsg = "The '{0}' account is invalid. The domain name could be removed to enable SQL server Authentication".format(self.user)
if (printErrorAsDebug == False):
logging.error(errorMsg)
else:
logging.debug(errorMsg)
return ErrorClass(errorMsg)
elif ((self.ERROR_UNABLE_TO_CONNECT in str(e)) or (printErrorAsDebug == False)):
logging.error('Connection error: {0}'.format(e))
return (- 1)
else:
logging.debug('Connection error: {0}'.format(e))
return ErrorClass(e)
else:
logging.debug('Connection established. Creating the cursor...')
try:
self.args['cursor'] = self.args['connection'].cursor()
logging.debug('Cursor created')
return True
except Exception as e:
return ErrorClass(e)
def update(self, host, user, password, database='master', port=1433):
self.host = host
self.user = user
self.password = password
self.database = database
self.port = port
self.args['connection'] = None
self.args['cursor'] = None
def closeConnection(self):
logging.debug('Closing the connection to the {0} database server...'.format(self.host))
if (self.args['connection'] != None):
try:
self.args['connection'].close()
except Exception as e:
return ErrorClass(e)
else:
logging.debug('Connection closed')
return True
else:
errorMsg = 'A connection has not been established to the {0} database server'.format(self.host)
logging.error(errorMsg)
return ErrorClass(errorMsg)
def executeRequest(self, request, ld=[], noResult=False, autoLD=False):
logging.debug('Executing the following request: {0}...'.format(repr(request)))
if (self.args['cursor'] != None):
try:
self.args['cursor'].execute(request)
except Exception as e:
return ErrorClass(e)
else:
if (noResult == True):
return []
try:
results = self.args['cursor'].fetchall()
except Exception as e:
return ErrorClass(e)
if (ld == []):
if (autoLD == True):
ld = [item[0] for item in self.args['cursor'].description]
else:
return results
values = []
for line in results:
dico = {}
for i in range(len(line)):
dico[ld[i]] = line[i]
values.append(dico)
return values
else:
errorMsg = 'A cursor has not been created to the {0} database server'.format(self)
logging.error(errorMsg)
return ErrorClass(errorMsg)
def getCompleteVersion(self):
logging.debug('Getting the database version installed on the {0} server'.format(self.host))
if (self.completeVersion == None):
data = self.executeRequest('SELECT ', ['version'])
if isinstance(data, Exception):
return data
elif (len(data) == 1):
version = cleanString(data[0]['version'])
logging.debug('The version is : {0}'.format(version))
return version
else:
return ''
else:
self.completeVersion
def __loadCompleteVersionIfNeed__(self):
if (self.completeVersion == None):
self.completeVersion = self.getCompleteVersion()
return True
else:
return False
def isThe2019Version(self):
self.__loadCompleteVersionIfNeed__()
if (self.MS2019_BANNER in self.completeVersion):
return True
else:
return False
def isThe2017Version(self):
self.__loadCompleteVersionIfNeed__()
if (self.MS2017_BANNER in self.completeVersion):
return True
else:
return False
def isThe2016Version(self):
self.__loadCompleteVersionIfNeed__()
if (self.MS2016_BANNER in self.completeVersion):
return True
else:
return False
def isThe2014Version(self):
self.__loadCompleteVersionIfNeed__()
if (self.MS2014_BANNER in self.completeVersion):
return True
else:
return False
def isThe2012Version(self):
self.__loadCompleteVersionIfNeed__()
if (self.MS2012_BANNER in self.completeVersion):
return True
else:
return False
def isThe2008Version(self):
self.__loadCompleteVersionIfNeed__()
if (self.MS2008_BANNER in self.completeVersion):
return True
else:
return False
def isThe2005Version(self):
self.__loadCompleteVersionIfNeed__()
if (self.MS2005_BANNER in self.completeVersion):
return True
else:
return False
def isThe2000Version(self):
self.__loadCompleteVersionIfNeed__()
if (self.MS2000_BANNER in self.completeVersion):
return True
else:
return False
def getStandardBarStarted(self, maxvalue):
return ProgressBar(widgets=['', Percentage(), ' ', Bar(), ' ', ETA(), ' ', ''], maxval=maxvalue).start()
def useThisDB(self, name):
logging.info('Moving to the database {0}'.format(name))
data = self.executeRequest(self.REQ_USE_THIS_DB.format(name), noResult=True)
if isinstance(data, Exception):
logging.warning('Impossible to move to the database {0}'.format(name))
return data
else:
logging.debug('We are in the database {0}'.format(name))
return True
def __isCurrentUser__(self, roleName):
REQ = "SELECT is_srvrolemember('{0}') as role".format(roleName)
logging.info('Checking if the current user is {0}'.format(roleName))
data = self.executeRequest(REQ, ld=['role'])
if isinstance(data, Exception):
logging.warning('Impossible to known if the user has {0} role: {1}'.format(roleName, data))
return data
else:
for e in data:
if (e['role'] == 0):
logging.debug('The current user is not {0}'.format(roleName))
return False
elif (e['role'] == 1):
logging.debug('The current user is {0}'.format(roleName))
return True
else:
msg = "Impossible to known if the user has {0} because the result is not 1 or 0. The result is '{1}'".format(roleName, e['issysadmin'])
logging.warning(msg)
return ErrorClass(msg)
msg = 'Impossible to known if the user has {0} because the result is empty'
logging.warning(msg)
return ErrorClass(msg)
def isCurrentUserSysadmin(self):
return self.__isCurrentUser__('sysadmin')
def isCurrentUserServeradmin(self):
return self.__isCurrentUser__('serveradmin')
def isCurrentUserDbcreator(self):
return self.__isCurrentUser__('dbcreator')
def isCurrentUserSetupadmin(self):
return self.__isCurrentUser__('setupadmin')
def isCurrentUserBulkadmin(self):
return self.__isCurrentUser__('bulkadmin')
def isCurrentUserSecurityadmin(self):
return self.__isCurrentUser__('securityadmin')
def isCurrentUserDiskadmin(self):
return self.__isCurrentUser__('diskadmin')
def isCurrentUserPublic(self):
return self.__isCurrentUser__('public')
def isCurrentUserProcessadmin(self):
return self.__isCurrentUser__('processadmin')
def getCurrentUser(self):
logging.info('Getting the current username')
data = self.executeRequest(self.REQ_GET_CURRENT_USER, ld=['username'])
if isinstance(data, Exception):
logging.warning('Impossible to know the current username: {0}'.format(data))
return data
else:
for e in data:
username = e['username'].replace('{0}\\'.format(self.domain), '')
logging.debug('The current user is {0}'.format(username))
return username
msg = 'Impossible to know the current username because the result is empty'
logging.warning(msg)
return ErrorClass(msg)
def execSP(self, spName):
logging.info('Executing the {0} stored procedure'.format(spName))
data = self.executeRequest(self.REQ_EXEC_SP_FOR_PE.format(spName), noResult=True)
if isinstance(data, Exception):
logging.warning("Impossible to execute the stored procedure '{1}': {0}".format(data, spName))
return data
else:
logging.debug('The stored procedure named {0} has been executed'.format(spName))
return True
def deleteSP(self, spName):
logging.info('Deleting the stored procedure named {0}'.format(spName))
data = self.executeRequest(self.REQ_DEL_PROC.format(spName), noResult=True)
if isinstance(data, Exception):
logging.debug("Impossible to delete the stored procedure '{1}': {0}".format(data, spName))
return data
else:
logging.debug('The stored procedure named {0} has been removed'.format(spName))
return True
def getDBName(self):
logging.info('Getting the current database name')
data = self.executeRequest(self.REQ_GET_DB_NAME, ld=['databasename'])
if isinstance(data, Exception):
logging.warning('Impossible to get the current database name: {0}'.format(data))
return data
else:
for e in data:
logging.debug('The database name is {0}'.format(e['databasename']))
return e['databasename']
msg = 'Impossible to get the current database name because the result is empty'
logging.warning(msg)
return ErrorClass(msg)
def getUsernamesViaSyslogins(self):
QUERY = 'SELECT name FROM master..syslogins'
logging.info('Get all usernames from the syslogins table...')
response = self.executeRequest(request=QUERY, ld=['username'])
if isinstance(response, Exception):
logging.info('Error with the SQL request {0}: {1}'.format(QUERY, str(response)))
return response
else:
allUsernames = []
if (response == []):
pass
else:
for e in response:
allUsernames.append(e['username'])
return allUsernames
def getSysloginsInformation(self):
QUERY = 'SELECT * FROM master..syslogins'
logging.info('Get info from syslogins table...')
response = self.executeRequest(request=QUERY, ld=[], noResult=False, autoLD=True)
if isinstance(response, Exception):
logging.info('Error with the SQL request {0}: {1}'.format(QUERY, str(response)))
return response
else:
return response |
def simple_run(learner, n):
def get_goal(learner):
if hasattr(learner, 'nsamples'):
return (lambda lrn: (lrn.nsamples > n))
else:
return (lambda lrn: (lrn.npoints > n))
def goal():
if isinstance(learner, BalancingLearner):
return get_goal(learner.learners[0])
elif isinstance(learner, DataSaver):
return get_goal(learner.learner)
return get_goal(learner)
simple(learner, goal=goal()) |
def preprocess_Youtube2Text(base_path):
os.makedirs(base_path, exist_ok=True)
url = '
refs_pickle = os.path.join(base_path, 'refs.pkl')
if (not os.path.exists(refs_pickle)):
wget.download(url, out=refs_pickle)
url = '
mapping_txt = os.path.join(base_path, 'youtube_mapping.txt')
if (not os.path.exists(mapping_txt)):
wget.download(url, out=mapping_txt)
mapping_info = open(mapping_txt, 'r').read().strip().split('\n')
vid2id = {}
for line in mapping_info:
(_id, vid) = line.split()
vid = vid.replace('vid', 'video')
vid2id[vid] = _id
split = {'train': [i for i in range(1200)], 'validate': [i for i in range(1200, 1300)], 'test': [i for i in range(1300, 1970)]}
raw_caps_all = defaultdict(list)
raw_caps_train = {}
refs = pickle.load(open(refs_pickle, 'rb'))
for vid in tqdm(refs.keys()):
num = int(vid[5:])
for item in refs[vid]:
tokens = item['caption'].lower().split()
raw_caps_all[vid].append(tokens)
if (num in split['train']):
raw_caps_train[vid] = raw_caps_all[vid]
return {'split': split, 'raw_caps_train': raw_caps_train, 'raw_caps_all': raw_caps_all, 'vid2id': vid2id} |
class BatchSampler(BaseSampler):
def start_worker(self):
if (singleton_pool.n_parallel > 1):
singleton_pool.run_each(worker_init_tf)
parallel_sampler.populate_task(self.algo.env, self.algo.policy)
if (singleton_pool.n_parallel > 1):
singleton_pool.run_each(worker_init_tf_vars)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_policy_params = self.algo.policy.get_param_values()
cur_env_params = self.algo.env.get_param_values()
paths = parallel_sampler.sample_paths(policy_params=cur_policy_params, env_params=cur_env_params, max_samples=self.algo.batch_size, max_path_length=self.algo.max_path_length, scope=self.algo.scope)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated |
def attach_as_old(c, filename):
c.execute('ATTACH DATABASE ? AS old', (filename,))
c.execute('BEGIN TRANSACTION')
try:
try:
(yield)
except:
try:
c.execute('ROLLBACK')
except sqlite3.OperationalError:
pass
raise
else:
c.execute('COMMIT')
finally:
c.execute('DETACH DATABASE old') |
(short_help='Update Python distributions')
('names', required=True, nargs=(- 1))
('--dir', '-d', 'directory', help='The directory in which distributions reside')
_context
def update(ctx: click.Context, *, names: tuple[(str, ...)], directory: (str | None)):
app: Application = ctx.obj
manager = app.get_python_manager(directory)
installed = manager.get_installed()
selection = (tuple(installed) if ('all' in names) else names)
not_installed = [name for name in selection if (name not in installed)]
if not_installed:
app.abort(f"Distributions not installed: {', '.join(not_installed)}")
ctx.invoke(install, names=selection, directory=directory, private=True, update=True) |
def test_session_env_lazy(monkeypatch, gdalenv):
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'id')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'key')
monkeypatch.setenv('AWS_SESSION_TOKEN', 'token')
expected = {'AWS_ACCESS_KEY_ID': 'id', 'AWS_SECRET_ACCESS_KEY': 'key', 'AWS_SESSION_TOKEN': 'token'}
with rasterio.Env():
assert (getenv() == rasterio.env.local._env.options)
for (k, v) in expected.items():
assert (getenv()[k] == v)
monkeypatch.undo() |
def pipeline(task: str=None, model: Optional=None, config: Optional[Union[(str, PretrainedConfig)]]=None, tokenizer: Optional[Union[(str, PreTrainedTokenizer, PreTrainedTokenizerFast)]]=None, feature_extractor: Optional[Union[(str, PreTrainedFeatureExtractor)]]=None, framework: Optional[str]=None, revision: Optional[str]=None, use_fast: bool=True, use_auth_token: Optional[Union[(str, bool)]]=None, device: Optional[Union[(int, str, 'torch.device')]]=None, device_map=None, torch_dtype=None, trust_remote_code: Optional[bool]=None, model_kwargs: Dict[(str, Any)]=None, pipeline_class: Optional[Any]=None, **kwargs) -> Pipeline:
if (model_kwargs is None):
model_kwargs = {}
use_auth_token = model_kwargs.pop('use_auth_token', use_auth_token)
hub_kwargs = {'revision': revision, 'use_auth_token': use_auth_token, 'trust_remote_code': trust_remote_code, '_commit_hash': None}
if ((task is None) and (model is None)):
raise RuntimeError('Impossible to instantiate a pipeline without either a task or a model being specified. Please provide a task class or a model')
if ((model is None) and (tokenizer is not None)):
raise RuntimeError('Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer may not be compatible with the default model. Please provide a PreTrainedModel class or a path/identifier to a pretrained model when providing tokenizer.')
if ((model is None) and (feature_extractor is not None)):
raise RuntimeError('Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class or a path/identifier to a pretrained model when providing feature_extractor.')
if isinstance(config, str):
config = AutoConfig.from_pretrained(config, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs['_commit_hash'] = config._commit_hash
elif ((config is None) and isinstance(model, str)):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs['_commit_hash'] = config._commit_hash
custom_tasks = {}
if ((config is not None) and (len(getattr(config, 'custom_pipelines', {})) > 0)):
custom_tasks = config.custom_pipelines
if ((task is None) and (trust_remote_code is not False)):
if (len(custom_tasks) == 1):
task = list(custom_tasks.keys())[0]
else:
raise RuntimeError(f"We can't infer the task automatically for this model as there are multiple tasks available. Pick one in {', '.join(custom_tasks.keys())}")
if ((task is None) and (model is not None)):
if (not isinstance(model, str)):
raise RuntimeError(f'Inferring the task automatically requires to check the hub with a model_id defined as a `str`.{model} is not a valid model_id.')
task = get_task(model, use_auth_token)
if (task in custom_tasks):
normalized_task = task
(targeted_task, task_options) = clean_custom_task(custom_tasks[task])
if (pipeline_class is None):
if (not trust_remote_code):
raise ValueError('Loading this pipeline requires you to execute the code in the pipeline file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
class_ref = targeted_task['impl']
(module_file, class_name) = class_ref.split('.')
pipeline_class = get_class_from_dynamic_module(model, (module_file + '.py'), class_name, revision=revision, use_auth_token=use_auth_token)
else:
(normalized_task, targeted_task, task_options) = check_task(task)
if (pipeline_class is None):
pipeline_class = targeted_task['impl']
if (model is None):
(model, default_revision) = get_default_model_and_revision(targeted_task, framework, task_options)
revision = (revision if (revision is not None) else default_revision)
logger.warning(f'''No model was supplied, defaulted to {model} and revision {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).
Using a pipeline without specifying a model name and revision in production is not recommended.''')
if ((config is None) and isinstance(model, str)):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs['_commit_hash'] = config._commit_hash
if (device_map is not None):
if ('device_map' in model_kwargs):
raise ValueError('You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those arguments might conflict, use only one.)')
model_kwargs['device_map'] = device_map
if (torch_dtype is not None):
if ('torch_dtype' in model_kwargs):
raise ValueError('You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those arguments might conflict, use only one.)')
model_kwargs['torch_dtype'] = torch_dtype
model_name = (model if isinstance(model, str) else None)
model_classes = {'tf': targeted_task['tf'], 'pt': targeted_task['pt']}
(framework, model) = infer_framework_load_model(model, model_classes=model_classes, config=config, framework=framework, task=task, **hub_kwargs, **model_kwargs)
model_config = model.config
hub_kwargs['_commit_hash'] = model.config._commit_hash
load_tokenizer = ((type(model_config) in TOKENIZER_MAPPING) or (model_config.tokenizer_class is not None))
load_feature_extractor = ((type(model_config) in FEATURE_EXTRACTOR_MAPPING) or (feature_extractor is not None))
if ((tokenizer is None) and (not load_tokenizer) and (normalized_task not in NO_TOKENIZER_TASKS) and (model_config.__class__.__name__ in MULTI_MODEL_CONFIGS)):
load_tokenizer = True
if ((feature_extractor is None) and (not load_feature_extractor) and (normalized_task not in NO_FEATURE_EXTRACTOR_TASKS) and (model_config.__class__.__name__ in MULTI_MODEL_CONFIGS)):
load_feature_extractor = True
if (task in NO_TOKENIZER_TASKS):
load_tokenizer = False
if (task in NO_FEATURE_EXTRACTOR_TASKS):
load_feature_extractor = False
if load_tokenizer:
if (tokenizer is None):
if isinstance(model_name, str):
tokenizer = model_name
elif isinstance(config, str):
tokenizer = config
else:
raise Exception('Impossible to guess which tokenizer to use. Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer.')
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
use_fast = tokenizer[1].pop('use_fast', use_fast)
tokenizer_identifier = tokenizer[0]
tokenizer_kwargs = tokenizer[1]
else:
tokenizer_identifier = tokenizer
tokenizer_kwargs = model_kwargs
tokenizer = AutoTokenizer.from_pretrained(tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs)
if load_feature_extractor:
if (feature_extractor is None):
if isinstance(model_name, str):
feature_extractor = model_name
elif isinstance(config, str):
feature_extractor = config
else:
raise Exception('Impossible to guess which feature extractor to use. Please provide a PreTrainedFeatureExtractor class or a path/identifier to a pretrained feature extractor.')
if isinstance(feature_extractor, (str, tuple)):
feature_extractor = AutoFeatureExtractor.from_pretrained(feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs)
if (feature_extractor._processor_class and feature_extractor._processor_class.endswith('WithLM') and isinstance(model_name, str)):
try:
import kenlm
from pyctcdecode import BeamSearchDecoderCTC
if (os.path.isdir(model_name) or os.path.isfile(model_name)):
decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
else:
language_model_glob = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, '*')
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
allow_regex = [language_model_glob, alphabet_filename]
decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_regex=allow_regex)
kwargs['decoder'] = decoder
except ImportError as e:
logger.warning(f'Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Try to install `pyctcdecode` and `kenlm`: (`pip install pyctcdecode`, `pip install Error: {e}')
if ((task == 'translation') and model.config.task_specific_params):
for key in model.config.task_specific_params:
if key.startswith('translation'):
task = key
warnings.warn(f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"', UserWarning)
break
if (tokenizer is not None):
kwargs['tokenizer'] = tokenizer
if (feature_extractor is not None):
kwargs['feature_extractor'] = feature_extractor
if (device is not None):
kwargs['device'] = device
return pipeline_class(model=model, framework=framework, task=task, **kwargs) |
def mesh_query_point_loss(mesh: wp.uint64, query_points: wp.array(dtype=wp.vec3), projected_points: wp.array(dtype=wp.vec3), loss: wp.array(dtype=float)):
tid = wp.tid()
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
max_dist = 10012.0
p = query_points[tid]
wp.mesh_query_point(mesh, p, max_dist, sign, face_index, face_u, face_v)
q = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
projected_points[tid] = q
dist = wp.length(wp.sub(p, q))
loss[tid] = dist |
class Effect6307(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'thermalDamage', src.getModifiedItemAttr('shipBonusMD1'), skill='Minmatar Destroyer', **kwargs) |
class RwPooledEmbeddingSharding(BaseRwEmbeddingSharding[(EmbeddingShardingContext, KeyedJaggedTensor, torch.Tensor, torch.Tensor)]):
def create_input_dist(self, device: Optional[torch.device]=None) -> BaseSparseFeaturesDist[KeyedJaggedTensor]:
num_features = self._get_num_features()
feature_hash_sizes = self._get_feature_hash_sizes()
return RwSparseFeaturesDist(pg=self._pg, num_features=num_features, feature_hash_sizes=feature_hash_sizes, device=(device if (device is not None) else self._device), is_sequence=False, has_feature_processor=self._has_feature_processor, need_pos=self._need_pos)
def create_lookup(self, device: Optional[torch.device]=None, fused_params: Optional[Dict[(str, Any)]]=None, feature_processor: Optional[BaseGroupedFeatureProcessor]=None) -> BaseEmbeddingLookup:
return GroupedPooledEmbeddingsLookup(grouped_configs=self._grouped_embedding_configs, pg=self._pg, device=(device if (device is not None) else self._device), feature_processor=feature_processor)
def create_output_dist(self, device: Optional[torch.device]=None) -> BaseEmbeddingDist[(EmbeddingShardingContext, torch.Tensor, torch.Tensor)]:
return RwPooledEmbeddingDist(self._pg, qcomm_codecs_registry=self.qcomm_codecs_registry, embedding_dims=self.embedding_dims()) |
def evaluate_webnlg_challenge_2017(references_s, preds):
tmp_file_name = 'webnlg_challenge_2017_tmp4eval.txt'
with open(tmp_file_name, 'w') as tmp_file:
for pred in preds:
print(pred, file=tmp_file)
os.system('bash utils/process/general/dart_lib/run_eval_on_webnlg.sh {}'.format(tmp_file_name))
summary = extract_score_webnlg()
return summary |
class PublicKey(Key):
def __init__(self, verifying_key, network=BitcoinMainNet, *args, **kwargs):
super(PublicKey, self).__init__(*args, network=network, **kwargs)
self._verifying_key = verifying_key
self.x = verifying_key.pubkey.point.x()
self.y = verifying_key.pubkey.point.y()
def get_key(self, compressed=None):
if (compressed is None):
compressed = self.compressed
if compressed:
parity = (2 + (self.y & 1))
return ensure_bytes((long_to_hex(parity, 2) + long_to_hex(self.x, 64)))
else:
return ensure_bytes(((b'04' + long_to_hex(self.x, 64)) + long_to_hex(self.y, 64)))
def from_hex_key(cls, key, network=BitcoinMainNet):
if ((len(key) == 130) or (len(key) == 66)):
try:
key = unhexlify(key)
except TypeError:
pass
key = ensure_bytes(key)
compressed = False
id_byte = key[0]
if (not isinstance(id_byte, six.integer_types)):
id_byte = ord(id_byte)
if (id_byte == 4):
if (len(key) != 65):
raise KeyParseError('Invalid key length')
public_pair = PublicPair(long_or_int(hexlify(key[1:33]), 16), long_or_int(hexlify(key[33:]), 16))
elif (id_byte in [2, 3]):
compressed = True
if (len(key) != 33):
raise KeyParseError('Invalid key length')
y_odd = bool((id_byte & 1))
x = long_or_int(hexlify(key[1:]), 16)
curve = SECP256k1.curve
p = curve.p()
alpha = (((pow(x, 3, p) + (curve.a() * x)) + curve.b()) % p)
beta = square_root_mod_prime(alpha, p)
y_even = (not y_odd)
if (y_even == bool((beta & 1))):
public_pair = PublicPair(x, (p - beta))
else:
public_pair = PublicPair(x, beta)
else:
raise KeyParseError('The given key is not in a known format.')
return cls.from_public_pair(public_pair, network=network, compressed=compressed)
def create_point(self, x, y):
if ((not isinstance(x, six.integer_types)) or (not isinstance(y, six.integer_types))):
raise ValueError('The coordinates must be longs.')
return _ECDSA_Point(SECP256k1.curve, x, y)
def to_point(self):
return self._verifying_key.pubkey.point
def from_point(cls, point, network=BitcoinMainNet, **kwargs):
verifying_key = VerifyingKey.from_public_point(point, curve=SECP256k1)
return cls.from_verifying_key(verifying_key, network=network, **kwargs)
def from_verifying_key(cls, verifying_key, network=BitcoinMainNet, **kwargs):
return cls(verifying_key, network=network, **kwargs)
def to_address(self, compressed=None):
key = unhexlify(self.get_key(compressed))
hash160_bytes = hash160(key)
network_hash160_bytes = (chr_py2(self.network.PUBKEY_ADDRESS) + hash160_bytes)
return ensure_str(base58.b58encode_check(network_hash160_bytes))
def to_public_pair(self):
return PublicPair(self.x, self.y)
def from_public_pair(cls, pair, network=BitcoinMainNet, **kwargs):
point = _ECDSA_Point(SECP256k1.curve, pair.x, pair.y)
return cls.from_point(point, network=network, **kwargs)
def __eq__(self, other):
return (super(PublicKey, self).__eq__(other) and (self.x == other.x) and (self.y == other.y))
__hash__ = Key.__hash__ |
(frozen=True)
class Preset(BitPackValue):
name: str
uuid: uuid_module.UUID
description: str
game: RandovaniaGame
configuration: BaseConfiguration
def as_json(self) -> dict:
return {'name': self.name, 'uuid': str(self.uuid), 'description': self.description, 'game': self.game.value, 'configuration': self.configuration.as_json}
def from_json_dict(cls, value) -> Preset:
game = RandovaniaGame(value['game'])
return Preset(name=value['name'], uuid=uuid_module.UUID(value['uuid']), description=value['description'], game=game, configuration=game.data.layout.configuration.from_json(value['configuration']))
def dangerous_settings(self) -> list[str]:
return self.configuration.dangerous_settings()
def settings_incompatible_with_multiworld(self) -> list[str]:
return self.configuration.settings_incompatible_with_multiworld()
def is_same_configuration(self, other: Preset) -> bool:
return (self.configuration == other.configuration)
def bit_pack_encode(self, metadata) -> Iterator[tuple[(int, int)]]:
manager: PresetManager = metadata['manager']
reference = manager.reference_preset_for_game(self.game).get_preset()
(yield from self.configuration.bit_pack_encode({'reference': reference.configuration}))
def bit_pack_unpack(cls, decoder: BitPackDecoder, metadata) -> Preset:
manager: PresetManager = metadata['manager']
game: RandovaniaGame = metadata['game']
reference = manager.reference_preset_for_game(game).get_preset()
return Preset(name=f'{game.long_name} Custom', description='A customized preset.', uuid=uuid_module.uuid4(), game=reference.game, configuration=reference.configuration.bit_pack_unpack(decoder, {'reference': reference.configuration}))
def fork(self) -> Preset:
return dataclasses.replace(self, name=f'{self.name} Copy', description=f'A copy version of {self.name}.', uuid=uuid_module.uuid4()) |
def test_vec2_transform(test, device, n):
dest = wp.zeros(n=n, dtype=wp.vec2, device=device)
c = np.array((1.0, 2.0))
m = np.array(((3.0, (- 1.0)), (2.5, 4.0)))
wp.launch(transform_vec2, dim=n, inputs=[dest, m, c], device=device)
test.assertTrue(np.array_equal(dest.numpy(), np.tile((m c), (n, 1)))) |
class ImagePyramid(ComplexObject):
def __init__(self, edge_sizes: Sequence[int], num_steps: Union[(Sequence[int], int)], edge: Union[(Sequence[str], str)]='short', interpolation_mode: str='bilinear', resize_targets: Collection[loss.Loss]=()):
self._levels = self.build_levels(edge_sizes, num_steps, edge)
self.interpolation_mode = interpolation_mode
self._resize_targets = set(resize_targets)
def build_levels(edge_sizes: Sequence[int], num_steps: Union[(Sequence[int], int)], edge: Union[(Sequence[str], str)]) -> Tuple[(PyramidLevel, ...)]:
num_levels = len(edge_sizes)
if isinstance(num_steps, int):
num_steps = ([num_steps] * num_levels)
if isinstance(edge, str):
edge = ([edge] * num_levels)
return tuple((PyramidLevel(edge_size, num_steps_, edge_) for (edge_size, num_steps_, edge_) in zip_equal(edge_sizes, num_steps, edge)))
def add_resize_target(self, loss: loss.Loss) -> None:
self._resize_targets.add(loss)
def __len__(self) -> int:
return len(self._levels)
def __getitem__(self, idx: int) -> PyramidLevel:
return self._levels[idx]
def __iter__(self) -> Iterator[PyramidLevel]:
image_storage = ImageStorage(self._resize_losses())
for level in self._levels:
try:
self._resize(level)
(yield level)
finally:
image_storage.restore()
def _resize(self, level: PyramidLevel) -> None:
for loss_ in self._resize_losses():
if isinstance(loss_, loss.ComparisonLoss):
if (loss_.target_image is not None):
resized_image = level.resize_image(loss_.target_image, interpolation_mode=self.interpolation_mode)
resized_guide = (level.resize_guide(loss_.target_guide) if (loss_.target_guide is not None) else None)
loss_.set_target_image(resized_image, guide=resized_guide)
if (loss_.input_guide is not None):
resized_guide = level.resize_guide(loss_.input_guide)
loss_.set_input_guide(resized_guide)
def _resize_losses(self) -> Set[loss.Loss]:
return {loss_ for target in self._resize_targets for loss_ in target.modules() if (isinstance(loss_, loss.Loss) and (not isinstance(loss_, loss.LossContainer)))}
def _properties(self) -> Dict[(str, Any)]:
dct = super()._properties()
if (self.interpolation_mode != 'bilinear'):
dct['interpolation_mode'] = self.interpolation_mode
return dct
def _named_children(self) -> Iterator[Tuple[(str, Any)]]:
(yield from super()._named_children())
for (idx, level) in enumerate(self._levels):
(yield (str(idx), level)) |
def _validate_geometry_input(geoms, ids=None, valid_geometry_types=None):
if isinstance(geoms, (geopandas.GeoSeries | geopandas.GeoDataFrame)):
geoms = geoms.geometry
if (ids is None):
ids = geoms.index
ids = np.asarray(ids)
geom_types = set(geoms.geom_type)
if (valid_geometry_types is not None):
if isinstance(valid_geometry_types, str):
valid_geometry_types = (valid_geometry_types,)
valid_geometry_types = set(valid_geometry_types)
if (not (geom_types <= valid_geometry_types)):
raise ValueError(f'This Graph type is only well-defined for geom_types: {valid_geometry_types}.')
coordinates = shapely.get_coordinates(geoms)
geoms = geoms.copy()
geoms.index = ids
return (coordinates, ids, geoms)
elif isinstance(geoms.dtype, geopandas.array.GeometryDtype):
return _validate_geometry_input(geopandas.GeoSeries(geoms), ids=ids, valid_geometry_types=valid_geometry_types)
elif ((geoms.ndim == 2) and (geoms.shape[1] == 2)):
return _validate_geometry_input(geopandas.points_from_xy(*geoms.T), ids=ids, valid_geometry_types=valid_geometry_types)
raise ValueError('input geometry type is not supported. Input must either be a geopandas.GeoSeries, geopandas.GeoDataFrame, a numpy array with a geometry dtype, or an array of coordinates.') |
def start_end_collate(batch):
batch_meta = [e['meta'] for e in batch]
model_inputs_keys = batch[0]['model_inputs'].keys()
batched_data = dict()
for k in model_inputs_keys:
if (k == 'span_labels'):
batched_data[k] = [dict(spans=e['model_inputs']['span_labels']) for e in batch]
continue
if (k in ['saliency_pos_labels', 'saliency_neg_labels']):
batched_data[k] = torch.LongTensor([e['model_inputs'][k] for e in batch])
continue
if (k == 'saliency_all_labels'):
(pad_data, mask_data) = pad_sequences_1d([e['model_inputs'][k] for e in batch], dtype=np.float32, fixed_length=None)
batched_data[k] = torch.tensor(pad_data, dtype=torch.float32)
continue
batched_data[k] = pad_sequences_1d([e['model_inputs'][k] for e in batch], dtype=torch.float32, fixed_length=None)
return (batch_meta, batched_data) |
def test_metadata_dictionary_keys():
package = package_file.PackageFile.from_filename(helpers.SDIST_FIXTURE, None)
assert (set(package.metadata_dictionary()) == {'name', 'version', 'filetype', 'pyversion', 'metadata_version', 'summary', 'home_page', 'author', 'author_email', 'maintainer', 'maintainer_email', 'license', 'description', 'keywords', 'platform', 'classifiers', 'download_url', 'supported_platform', 'comment', 'md5_digest', 'sha256_digest', 'blake2_256_digest', 'provides', 'requires', 'obsoletes', 'project_urls', 'provides_dist', 'obsoletes_dist', 'requires_dist', 'requires_external', 'requires_python', 'provides_extras', 'description_content_type', 'dynamic'}) |
def _get_unsharded_module_names_helper(model: torch.nn.Module, path: str, unsharded_module_names: Set[str]) -> bool:
sharded_children = set()
for (name, child) in model.named_children():
curr_path = (path + name)
if isinstance(child, ShardedModule):
sharded_children.add(name)
else:
child_sharded = _get_unsharded_module_names_helper(child, (curr_path + '.'), unsharded_module_names)
if child_sharded:
sharded_children.add(name)
if (len(sharded_children) > 0):
for (name, _) in model.named_children():
if (name not in sharded_children):
unsharded_module_names.add((path + name))
return (len(sharded_children) > 0) |
def make_sequence(feats, feats_aux):
inputs = [tf.train.Feature(float_list=tf.train.FloatList(value=feat)) for feat in feats]
inputs_aux = [tf.train.Feature(float_list=tf.train.FloatList(value=feat_aux)) for feat_aux in feats_aux]
feature_list = {'inputs': tf.train.FeatureList(feature=inputs), 'inputs_aux': tf.train.FeatureList(feature=inputs_aux)}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
return tf.train.SequenceExample(feature_lists=feature_lists) |
class DCUN_TFC_FiLM_LaSAFT(DenseCUNet_FiLM):
def __init__(self, n_fft, input_channels, internal_channels, n_blocks, n_internal_layers, first_conv_activation, last_activation, t_down_layers, f_down_layers, kernel_size_t, kernel_size_f, bn_factor, min_bn_units, tfc_tdf_bias, tfc_tdf_activation, num_tdfs, dk, control_vector_type, control_input_dim, embedding_dim, control_type, control_n_layer, condition_to, film_type, gamma_activation, beta_activation):
tfc_tdf_activation = get_activation_by_name(tfc_tdf_activation)
def mk_tfc_lasaft(in_channels, internal_channels, f):
return TFC_LaSAFT(in_channels, n_internal_layers, internal_channels, kernel_size_t, kernel_size_f, f, bn_factor, min_bn_units, tfc_tdf_bias, tfc_tdf_activation, embedding_dim, num_tdfs, dk)
def mk_ds(internal_channels, i, f, t_down_layers):
if (t_down_layers is None):
scale = (2, 2)
else:
scale = ((2, 2) if (i in t_down_layers) else (1, 2))
ds = nn.Sequential(nn.Conv2d(in_channels=internal_channels, out_channels=internal_channels, kernel_size=scale, stride=scale), nn.BatchNorm2d(internal_channels))
return (ds, (f // scale[(- 1)]))
def mk_us(internal_channels, i, f, n, t_down_layers):
if (t_down_layers is None):
scale = (2, 2)
else:
scale = ((2, 2) if (i in [((n - 1) - s) for s in t_down_layers]) else (1, 2))
us = nn.Sequential(nn.ConvTranspose2d(in_channels=internal_channels, out_channels=internal_channels, kernel_size=scale, stride=scale), nn.BatchNorm2d(internal_channels))
return (us, (f * scale[(- 1)]))
super(DCUN_TFC_FiLM_LaSAFT, self).__init__(n_fft, input_channels, internal_channels, n_blocks, n_internal_layers, mk_tfc_lasaft, mk_ds, mk_us, first_conv_activation, last_activation, t_down_layers, f_down_layers, control_vector_type, control_input_dim, embedding_dim, condition_to, control_type, control_n_layer, film_type, gamma_activation, beta_activation)
def forward(self, input_spec, input_condition):
condition_embedding = self.embedding(input_condition)
(gammas, betas) = self.condition_generator(condition_embedding)
x = self.first_conv(input_spec)
encoding_outputs = []
(gammas_encoder, gammas_middle, gammas_decoder) = gammas
(betas_encoder, betas_middle, betas_decoder) = betas
for i in range(self.n):
x = self.encoders[i].tfc(x)
if self.is_encoder_conditioned:
x = self.film(x, gammas_encoder[i], betas_encoder[i])
x = (x + self.encoders[i].lasaft(x, condition_embedding))
encoding_outputs.append(x)
x = self.downsamplings[i](x)
x = self.mid_block.tfc(x)
if self.is_middle_conditioned:
x = self.film(x, gammas_middle, betas_middle)
x = (x + self.mid_block.lasaft(x, condition_embedding))
for i in range(self.n):
x = self.upsamplings[i](x)
x = torch.cat((x, encoding_outputs[((- i) - 1)]), 1)
x = self.decoders[i].tfc(x)
if self.is_decoder_conditioned:
x = self.film(x, gammas_decoder[i], betas_decoder[i])
x = (x + self.decoders[i].lasaft(x, condition_embedding))
return self.last_conv(x) |
class RagRayDistributedRetriever(RagRetriever):
def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, retrieval_workers, index=None):
if ((index is not None) and index.is_initialized() and (len(retrieval_workers) > 0)):
raise ValueError("When using Ray for distributed fine-tuning, you'll need to provide the paths instead, as the dataset and the index are loaded separately. More info in examples/rag/use_own_knowledge_dataset.py ")
super().__init__(config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, init_retrieval=False)
self.retrieval_workers = retrieval_workers
if (len(self.retrieval_workers) > 0):
ray.get([worker.create_rag_retriever.remote(config, question_encoder_tokenizer, generator_tokenizer, index) for worker in self.retrieval_workers])
def init_retrieval(self):
logger.info('initializing retrieval')
if (len(self.retrieval_workers) > 0):
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
self.index.init_index()
def retrieve(self, question_hidden_states, n_docs):
if (len(self.retrieval_workers) > 0):
random_worker = self.retrieval_workers[random.randint(0, (len(self.retrieval_workers) - 1))]
(doc_ids, retrieved_doc_embeds) = ray.get(random_worker.retrieve.remote(question_hidden_states, n_docs))
else:
(doc_ids, retrieved_doc_embeds) = self._main_retrieve(question_hidden_states, n_docs)
return (retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids))
def get_tokenizers(cls, retriever_name_or_path, indexed_dataset=None, **kwargs):
return super(RagRayDistributedRetriever, cls).get_tokenizers(retriever_name_or_path, indexed_dataset, **kwargs)
def from_pretrained(cls, retriever_name_or_path, actor_handles, indexed_dataset=None, **kwargs):
config = (kwargs.pop('config', None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs))
rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
question_encoder_tokenizer = rag_tokenizer.question_encoder
generator_tokenizer = rag_tokenizer.generator
if (indexed_dataset is not None):
config.index_name = 'custom'
index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
else:
index = cls._build_index(config)
return cls(config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, retrieval_workers=actor_handles, index=index) |
def plot_losses(losses: Union[(nn.Module, List[nn.Module])], visdom_server: Optional['visdom.Visdom']=None, env: Optional[str]=None, win: Optional[str]=None, title: str='') -> Any:
if ((visdom_server is None) and visdom_connected()):
visdom_server = vis[(- 1)]
if ((not visdom_server) or (not visdom_server.check_connection())):
print('WARNING: Not connected to visdom. Skipping plotting.')
return
if isinstance(losses, nn.Module):
losses = [losses]
assert (type(losses) == list)
assert all((isinstance(loss, nn.Module) for loss in losses))
if any((isinstance(loss, UNSUPPORTED_LOSSES) for loss in losses)):
raise NotImplementedError('loss function not supported')
for (idx, loss) in enumerate(losses):
score = torch.arange((- 5.0), 5.0, 0.005)
if (idx == 0):
loss_val = torch.FloatTensor(score.size(0), len(losses))
if isinstance(loss, REGRESSION_LOSSES):
target = torch.FloatTensor(score.size()).fill_(0.0)
else:
target = torch.LongTensor(score.size()).fill_(1)
for n in range(0, score.nelement()):
loss_val[n][idx] = loss(score.narrow(0, n, 1), target.narrow(0, n, 1)).item()
title = (str(loss) if (title == '') else title)
legend = [str(loss) for loss in losses]
opts = {'title': title, 'xlabel': 'Score', 'ylabel': 'Loss', 'legend': legend}
win = visdom_server.line(loss_val, score, env=env, win=win, opts=opts)
return win |
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
cand_indexes = []
for (i, token) in enumerate(tokens):
if ((token == '[CLS]') or (token == '[SEP]')):
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
masked_lm = collections.namedtuple('masked_lm', ['index', 'label'])
num_to_predict = min(max_predictions_per_seq, max(1, int(round((len(tokens) * masked_lm_prob)))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if (len(masked_lms) >= num_to_predict):
break
if (index in covered_indexes):
continue
covered_indexes.add(index)
masked_token = None
if (rng.random() < 0.8):
masked_token = '[MASK]'
elif (rng.random() < 0.5):
masked_token = tokens[index]
else:
masked_token = vocab_words[rng.randint(0, (len(vocab_words) - 1))]
output_tokens[index] = masked_token
masked_lms.append(masked_lm(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=(lambda x: x.index))
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels) |
def with_progress(iterable, desc=None, total=None, leave=True):
try:
from tqdm import tqdm
def _it(iterable, desc, total, leave):
if (total is None):
try:
total = len(iterable)
except Exception:
total = 0
for el in tqdm(iterable, desc=desc, total=total, leave=leave):
(yield el)
if leave:
print('')
return _it(iterable, desc, total, leave)
except ImportError:
return iterable |
class UserReal():
def __init__(self, config):
with open(config.restaurants_info_dict_path, 'r') as f:
self.restaurants_info_dict = json.load(f)
self.business_info_dict = None
self.user_name = None
def init_episode(self, user_name, business_name):
self.business_info_dict = self.restaurants_info_dict[business_name]
self.user_name = user_name
print('Now you are user {}'.format(self.user_name))
print('Target restaurant info: {}'.format(str(self.business_info_dict)))
def next_turn(self, request_facet):
utterance_content = input('your turn:')
return utterance_content |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.