code stringlengths 281 23.7M |
|---|
class NetworkTrainer_acdc(object):
def __init__(self, deterministic=True, fp16=False, seed=12345):
self.fp16 = fp16
self.amp_grad_scaler = None
if deterministic:
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
self.network: Tuple[(SegmentationNetwork, nn.DataParallel)] = None
self.optimizer = None
self.lr_scheduler = None
self.tr_gen = self.val_gen = None
self.was_initialized = False
self.output_folder = None
self.fold = None
self.loss = None
self.dataset_directory = None
self.dataset = None
self.dataset_tr = self.dataset_val = None
self.patience = 50
self.val_eval_criterion_alpha = 0.9
self.train_loss_MA_alpha = 0.93
self.train_loss_MA_eps = 0.0005
self.max_num_epochs = 1000
self.num_batches_per_epoch = 250
self.num_val_batches_per_epoch = 50
self.also_val_in_tr_mode = False
self.lr_threshold = 1e-06
self.val_eval_criterion_MA = None
self.train_loss_MA = None
self.best_val_eval_criterion_MA = None
self.best_MA_tr_loss_for_patience = None
self.best_epoch_based_on_MA_tr_loss = None
self.all_tr_losses = []
self.all_val_losses = []
self.all_val_losses_tr_mode = []
self.all_val_eval_metrics = []
self.epoch = 0
self.log_file = None
self.deterministic = deterministic
self.use_progress_bar = False
if ('nnformer_use_progress_bar' in os.environ.keys()):
self.use_progress_bar = bool(int(os.environ['nnformer_use_progress_bar']))
self.save_every = 30
self.save_latest_only = False
self.save_intermediate_checkpoints = True
self.save_best_checkpoint = True
self.save_final_checkpoint = True
def initialize(self, training=True):
def load_dataset(self):
pass
def do_split(self):
splits_file = join(self.dataset_directory, 'splits_final.pkl')
if (not isfile(splits_file)):
self.print_to_log_file('Creating new split...')
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for (i, (train_idx, test_idx)) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[(- 1)]['train'] = train_keys
splits[(- 1)]['val'] = test_keys
save_pickle(splits, splits_file)
splits = load_pickle(splits_file)
if (self.fold == 'all'):
tr_keys = val_keys = list(self.dataset.keys())
else:
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def plot_progress(self):
try:
font = {'weight': 'normal', 'size': 18}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(30, 24))
ax = fig.add_subplot(111)
ax2 = ax.twinx()
x_values = list(range((self.epoch + 1)))
ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label='loss_tr')
ax.plot(x_values, self.all_val_losses, color='r', ls='-', label='loss_val, train=False')
if (len(self.all_val_losses_tr_mode) > 0):
ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label='loss_val, train=True')
if (len(self.all_val_eval_metrics) == len(x_values)):
ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label='evaluation metric')
ax.set_xlabel('epoch')
ax.set_ylabel('loss')
ax2.set_ylabel('evaluation metric')
ax.legend()
ax2.legend(loc=9)
fig.savefig(join(self.output_folder, 'progress.png'))
plt.close()
except IOError:
self.print_to_log_file('failed to plot: ', sys.exc_info())
def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):
timestamp = time()
dt_object = datetime.fromtimestamp(timestamp)
if add_timestamp:
args = (('%s:' % dt_object), *args)
if (self.log_file is None):
maybe_mkdir_p(self.output_folder)
timestamp = datetime.now()
self.log_file = join(self.output_folder, ('training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt' % (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second)))
with open(self.log_file, 'w') as f:
f.write('Starting... \n')
successful = False
max_attempts = 5
ctr = 0
while ((not successful) and (ctr < max_attempts)):
try:
with open(self.log_file, 'a+') as f:
for a in args:
f.write(str(a))
f.write(' ')
f.write('\n')
successful = True
except IOError:
print(('%s: failed to log: ' % datetime.fromtimestamp(timestamp)), sys.exc_info())
sleep(0.5)
ctr += 1
if also_print_to_console:
print(*args)
def save_checkpoint(self, fname, save_optimizer=True):
start_time = time()
state_dict = self.network.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
lr_sched_state_dct = None
if ((self.lr_scheduler is not None) and hasattr(self.lr_scheduler, 'state_dict')):
lr_sched_state_dct = self.lr_scheduler.state_dict()
if save_optimizer:
optimizer_state_dict = self.optimizer.state_dict()
else:
optimizer_state_dict = None
self.print_to_log_file('saving checkpoint...')
save_this = {'epoch': (self.epoch + 1), 'state_dict': state_dict, 'optimizer_state_dict': optimizer_state_dict, 'lr_scheduler_state_dict': lr_sched_state_dct, 'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics), 'best_stuff': (self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA)}
if (self.amp_grad_scaler is not None):
save_this['amp_grad_scaler'] = self.amp_grad_scaler.state_dict()
torch.save(save_this, fname)
self.print_to_log_file(('done, saving took %.2f seconds' % (time() - start_time)))
def load_best_checkpoint(self, train=True):
if (self.fold is None):
raise RuntimeError('Cannot load best checkpoint if self.fold is None')
if isfile(join(self.output_folder, 'model_best.model')):
self.load_checkpoint(join(self.output_folder, 'model_best.model'), train=train)
else:
self.print_to_log_file('WARNING! model_best.model does not exist! Cannot load best checkpoint. Falling back to load_latest_checkpoint')
self.load_latest_checkpoint(train)
def load_latest_checkpoint(self, train=True):
if isfile(join(self.output_folder, 'model_final_checkpoint.model')):
return self.load_checkpoint(join(self.output_folder, 'model_final_checkpoint.model'), train=train)
if isfile(join(self.output_folder, 'model_latest.model')):
return self.load_checkpoint(join(self.output_folder, 'model_latest.model'), train=train)
if isfile(join(self.output_folder, 'model_best.model')):
return self.load_best_checkpoint(train)
raise RuntimeError('No checkpoint found')
def load_final_checkpoint(self, train=False):
filename = join(self.output_folder, 'model_final_checkpoint.model')
if (not isfile(filename)):
raise RuntimeError(('Final checkpoint not found. Expected: %s. Please finish the training first.' % filename))
return self.load_checkpoint(filename, train=train)
def load_checkpoint(self, fname, train=True):
self.print_to_log_file('loading checkpoint', fname, 'train=', train)
if (not self.was_initialized):
self.initialize(train)
saved_model = torch.load(fname, map_location=torch.device('cpu'))
self.load_checkpoint_ram(saved_model, train)
def initialize_network(self):
pass
def initialize_optimizer_and_scheduler(self):
pass
def load_checkpoint_ram(self, checkpoint, train=True):
print('I am here !!!')
if (not self.was_initialized):
self.initialize(train)
new_state_dict = OrderedDict()
curr_state_dict_keys = list(self.network.state_dict().keys())
for (k, value) in checkpoint['state_dict'].items():
key = k
if ((key not in curr_state_dict_keys) and key.startswith('module.')):
key = key[7:]
new_state_dict[key] = value
if self.fp16:
self._maybe_init_amp()
if ('amp_grad_scaler' in checkpoint.keys()):
self.amp_grad_scaler.load_state_dict(checkpoint['amp_grad_scaler'])
self.network.load_state_dict(new_state_dict)
self.epoch = checkpoint['epoch']
if train:
optimizer_state_dict = checkpoint['optimizer_state_dict']
if (optimizer_state_dict is not None):
self.optimizer.load_state_dict(optimizer_state_dict)
if ((self.lr_scheduler is not None) and hasattr(self.lr_scheduler, 'load_state_dict') and (checkpoint['lr_scheduler_state_dict'] is not None)):
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
if issubclass(self.lr_scheduler.__class__, _LRScheduler):
self.lr_scheduler.step(self.epoch)
(self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics) = checkpoint['plot_stuff']
print(('best_stuff' in checkpoint.keys()))
if ('best_stuff' in checkpoint.keys()):
(self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA) = checkpoint['best_stuff']
if (self.epoch != len(self.all_tr_losses)):
self.print_to_log_file('WARNING in loading checkpoint: self.epoch != len(self.all_tr_losses). This is due to an old bug and should only appear when you are loading old models. New models should have this fixed! self.epoch is now set to len(self.all_tr_losses)')
self.epoch = len(self.all_tr_losses)
self.all_tr_losses = self.all_tr_losses[:self.epoch]
self.all_val_losses = self.all_val_losses[:self.epoch]
self.all_val_losses_tr_mode = self.all_val_losses_tr_mode[:self.epoch]
self.all_val_eval_metrics = self.all_val_eval_metrics[:self.epoch]
self._maybe_init_amp()
def _maybe_init_amp(self):
if (self.fp16 and (self.amp_grad_scaler is None)):
self.amp_grad_scaler = GradScaler()
def plot_network_architecture(self):
pass
def run_training(self):
if (not torch.cuda.is_available()):
self.print_to_log_file('WARNING!!! You are attempting to run training on a CPU (torch.cuda.is_available() is False). This can be VERY slow!')
_ = self.tr_gen.next()
_ = self.val_gen.next()
if torch.cuda.is_available():
torch.cuda.empty_cache()
self._maybe_init_amp()
maybe_mkdir_p(self.output_folder)
self.plot_network_architecture()
if (cudnn.benchmark and cudnn.deterministic):
warn('torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! If you want deterministic then set benchmark=False')
if (not self.was_initialized):
self.initialize(True)
while (self.epoch < self.max_num_epochs):
self.print_to_log_file('\nepoch: ', self.epoch)
epoch_start_time = time()
train_losses_epoch = []
self.network.train()
if self.use_progress_bar:
with trange(self.num_batches_per_epoch) as tbar:
for b in tbar:
tbar.set_description('Epoch {}/{}'.format((self.epoch + 1), self.max_num_epochs))
l = self.run_iteration(self.tr_gen, True)
tbar.set_postfix(loss=l)
train_losses_epoch.append(l)
else:
for _ in range(self.num_batches_per_epoch):
l = self.run_iteration(self.tr_gen, True)
train_losses_epoch.append(l)
self.all_tr_losses.append(np.mean(train_losses_epoch))
self.print_to_log_file(('train loss : %.4f' % self.all_tr_losses[(- 1)]))
with torch.no_grad():
self.network.eval()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False, True)
val_losses.append(l)
self.all_val_losses.append(np.mean(val_losses))
self.print_to_log_file(('validation loss: %.4f' % self.all_val_losses[(- 1)]))
if self.also_val_in_tr_mode:
self.network.train()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False)
val_losses.append(l)
self.all_val_losses_tr_mode.append(np.mean(val_losses))
self.print_to_log_file(('validation loss (train=True): %.4f' % self.all_val_losses_tr_mode[(- 1)]))
self.update_train_loss_MA()
continue_training = self.on_epoch_end()
epoch_end_time = time()
if (not continue_training):
break
self.epoch += 1
self.print_to_log_file(('This epoch took %f s\n' % (epoch_end_time - epoch_start_time)))
self.epoch -= 1
if self.save_final_checkpoint:
self.save_checkpoint(join(self.output_folder, 'model_final_checkpoint.model'))
if isfile(join(self.output_folder, 'model_latest.model')):
os.remove(join(self.output_folder, 'model_latest.model'))
if isfile(join(self.output_folder, 'model_latest.model.pkl')):
os.remove(join(self.output_folder, 'model_latest.model.pkl'))
def maybe_update_lr(self):
if (self.lr_scheduler is not None):
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.step(self.val_eval_criterion_MA)
else:
self.lr_scheduler.step((self.epoch + 1))
self.print_to_log_file(('lr is now (scheduler) %s' % str(self.optimizer.param_groups[0]['lr'])))
def maybe_save_checkpoint(self):
if (self.save_intermediate_checkpoints and ((self.epoch % self.save_every) == (self.save_every - 1)) and (self.epoch > 400)):
self.print_to_log_file('saving scheduled checkpoint file...')
if (not self.save_latest_only):
self.save_checkpoint(join(self.output_folder, ('model_ep_%03.0d.model' % (self.epoch + 1))))
self.print_to_log_file('done')
def update_eval_criterion_MA(self):
if (self.val_eval_criterion_MA is None):
if (len(self.all_val_eval_metrics) == 0):
self.val_eval_criterion_MA = (- self.all_val_losses[(- 1)])
else:
self.val_eval_criterion_MA = self.all_val_eval_metrics[(- 1)]
elif (len(self.all_val_eval_metrics) == 0):
self.val_eval_criterion_MA = ((self.val_eval_criterion_alpha * self.val_eval_criterion_MA) - ((1 - self.val_eval_criterion_alpha) * self.all_val_losses[(- 1)]))
else:
self.val_eval_criterion_MA = ((self.val_eval_criterion_alpha * self.val_eval_criterion_MA) + ((1 - self.val_eval_criterion_alpha) * self.all_val_eval_metrics[(- 1)]))
def manage_patience(self):
continue_training = True
if (self.patience is not None):
if (self.best_MA_tr_loss_for_patience is None):
self.best_MA_tr_loss_for_patience = self.train_loss_MA
if (self.best_epoch_based_on_MA_tr_loss is None):
self.best_epoch_based_on_MA_tr_loss = self.epoch
if (self.best_val_eval_criterion_MA is None):
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
self.print_to_log_file(('current best_val_eval_criterion_MA is %.4f0' % self.best_val_eval_criterion_MA))
self.print_to_log_file(('current val_eval_criterion_MA is %.4f' % self.val_eval_criterion_MA))
if (self.val_eval_criterion_MA > self.best_val_eval_criterion_MA):
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
if self.save_best_checkpoint:
self.save_checkpoint(join(self.output_folder, 'model_best.model'))
if ((self.train_loss_MA + self.train_loss_MA_eps) < self.best_MA_tr_loss_for_patience):
self.best_MA_tr_loss_for_patience = self.train_loss_MA
self.best_epoch_based_on_MA_tr_loss = self.epoch
else:
pass
if ((self.epoch - self.best_epoch_based_on_MA_tr_loss) > self.patience):
if (self.optimizer.param_groups[0]['lr'] > self.lr_threshold):
self.best_epoch_based_on_MA_tr_loss = (self.epoch - (self.patience // 2))
else:
continue_training = False
else:
pass
return continue_training
def on_epoch_end(self):
self.finish_online_evaluation()
self.plot_progress()
self.maybe_save_checkpoint()
self.update_eval_criterion_MA()
self.maybe_update_lr()
continue_training = self.manage_patience()
return continue_training
def update_train_loss_MA(self):
if (self.train_loss_MA is None):
self.train_loss_MA = self.all_tr_losses[(- 1)]
else:
self.train_loss_MA = ((self.train_loss_MA_alpha * self.train_loss_MA) + ((1 - self.train_loss_MA_alpha) * self.all_tr_losses[(- 1)]))
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def run_online_evaluation(self, *args, **kwargs):
pass
def finish_online_evaluation(self):
pass
def validate(self, *args, **kwargs):
pass
def find_lr(self, num_iters=1000, init_value=1e-06, final_value=10.0, beta=0.98):
import math
self._maybe_init_amp()
mult = ((final_value / init_value) ** (1 / num_iters))
lr = init_value
self.optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.0
best_loss = 0.0
losses = []
log_lrs = []
for batch_num in range(1, (num_iters + 1)):
loss = (self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1)
avg_loss = ((beta * avg_loss) + ((1 - beta) * loss))
smoothed_loss = (avg_loss / (1 - (beta ** batch_num)))
if ((batch_num > 1) and (smoothed_loss > (4 * best_loss))):
break
if ((smoothed_loss < best_loss) or (batch_num == 1)):
best_loss = smoothed_loss
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
lr *= mult
self.optimizer.param_groups[0]['lr'] = lr
import matplotlib.pyplot as plt
lrs = [(10 ** i) for i in log_lrs]
fig = plt.figure()
plt.xscale('log')
plt.plot(lrs[10:(- 5)], losses[10:(- 5)])
plt.savefig(join(self.output_folder, 'lr_finder.png'))
plt.close()
return (log_lrs, losses) |
.parametrize('project_id', projects)
def test_project_delete(db, project_id):
project = Project.objects.get(id=project_id)
project_parent_id = (project.parent_id if project.parent else None)
project_children = [child.id for child in project.get_children()]
project.delete()
for child_id in project_children:
child = Project.objects.get(id=child_id)
if (project_parent_id is None):
assert (child.parent is None)
else:
assert (child.parent.id is project_parent_id) |
class MyTestCase(unittest.TestCase):
def test_something(self):
net = nn.Linear(10, 10)
optimizer = make_optimizer(cfg, net)
lr_scheduler = WarmupMultiStepLR(optimizer, [20, 40], warmup_iters=10)
for i in range(50):
lr_scheduler.step()
for j in range(3):
print(i, lr_scheduler.get_lr()[0])
optimizer.step() |
class TestCheckpointFunctions(unittest.TestCase):
def setUp(self):
self.base_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.base_dir)
def test_save_and_load_checkpoint(self):
checkpoint_dict = {str(i): (i * 2) for i in range(1000)}
save_checkpoint(self.base_dir, checkpoint_dict)
loaded_checkpoint = load_checkpoint(self.base_dir)
self.assertDictEqual(checkpoint_dict, loaded_checkpoint)
checkpoint_path = f'{self.base_dir}/{CHECKPOINT_FILE}'
loaded_checkpoint = load_checkpoint(checkpoint_path)
self.assertDictEqual(checkpoint_dict, loaded_checkpoint)
filename = 'my_checkpoint.torch'
checkpoint_dict = {str(i): (i * 3) for i in range(1000)}
save_checkpoint(self.base_dir, checkpoint_dict, checkpoint_file=filename)
checkpoint_path = f'{self.base_dir}/{filename}'
loaded_checkpoint = load_checkpoint(checkpoint_path)
self.assertDictEqual(checkpoint_dict, loaded_checkpoint)
('classy_vision.generic.util.torch')
def test_get_torch_version(self, mock_torch: mock.MagicMock):
mock_torch.__version__ = '1.7.2'
self.assertEqual(get_torch_version(), [1, 7])
self.assertLess(get_torch_version(), [1, 8])
self.assertGreater(get_torch_version(), [1, 6])
mock_torch.__version__ = '1.11.2a'
self.assertEqual(get_torch_version(), [1, 11])
self.assertLess(get_torch_version(), [1, 13])
self.assertGreater(get_torch_version(), [1, 8]) |
def token_generator_three(source_path, target_path_l2r, target_path_r2l, token_vocab_src, token_vocab_tgt, eos=None):
eos_list = ([] if (eos is None) else [eos])
pad_list = ([] if (PAD is None) else [PAD])
l2r_list = ([] if (L2R is None) else [L2R])
r2l_list = ([] if (R2L is None) else [R2L])
with tf.gfile.GFile(source_path, mode='r') as source_file:
with tf.gfile.GFile(target_path_l2r, mode='r') as target_file_l2r:
with tf.gfile.GFile(target_path_r2l, mode='r') as target_file_r2l:
(source, target_l2r, target_r2l) = (source_file.readline(), target_file_l2r.readline(), target_file_r2l.readline())
while (source and target_l2r and target_r2l):
source_ints = (token_vocab_src.encode(source.strip()) + eos_list)
t_l2r = token_vocab_tgt.encode(target_l2r.strip())
t_r2l = token_vocab_tgt.encode(target_r2l.strip())
t_length_max = max(len(t_l2r), len(t_r2l))
t_l2r_add_len = (t_length_max - len(t_l2r))
t_r2l_add_len = (t_length_max - len(t_r2l))
target_ints_l2r = (((l2r_list + t_l2r) + (t_l2r_add_len * pad_list)) + eos_list)
target_ints_r2l = (((r2l_list + t_r2l) + (t_r2l_add_len * pad_list)) + eos_list)
(yield {'inputs': source_ints, 'targets_l2r': target_ints_l2r, 'targets_r2l': target_ints_r2l})
(source, target_l2r, target_r2l) = (source_file.readline(), target_file_l2r.readline(), target_file_r2l.readline()) |
class IO():
def get(cls, file_path):
(_, file_extension) = os.path.splitext(file_path)
if (file_extension in ['.npy']):
return cls._read_npy(file_path)
elif (file_extension in ['.h5']):
return cls._read_h5(file_path)
elif (file_extension in ['.txt']):
return cls._read_txt(file_path)
else:
raise Exception(('Unsupported file extension: %s' % file_extension))
def _read_npy(cls, file_path):
return np.load(file_path)
def _read_txt(cls, file_path):
return np.loadtxt(file_path)
def _read_h5(cls, file_path):
f = h5py.File(file_path, 'r')
return f['data'][()] |
class NeuralNet(torch.nn.Module):
def __init__(self, d_in, d_out):
self.d_in = d_in
self.d_out = d_out
super().__init__()
self.norm = torch.nn.LayerNorm(d_in)
self.norm2 = FusedLayerNorm(d_out)
self.linear = torch.nn.Linear(d_in, d_out)
self.linear2 = torch.nn.Linear(d_out, d_out)
def forward(self, input):
input = self.norm(input)
print(input.type())
output = self.linear(input)
print(output.type())
output = torch.relu(output)
print(output.type())
output = self.norm2(output)
output = self.linear2(output)
print(output.type())
output = torch.nn.functional.log_softmax(output)
print('end')
return output |
class SimpleParameter(Parameter):
def __init__(self, *args, **kargs):
Parameter.__init__(self, *args, **kargs)
def _interpretValue(self, v):
typ = self.opts['type']
def _missing_interp(v):
return v
interpreter = getattr(builtins, typ, _missing_interp)
return interpreter(v) |
class DOSTest(unittest.TestCase):
def test_dos_8086_hello(self):
ql = Qiling(['../examples/rootfs/8086/dos/HI.DOS_COM'], '../examples/rootfs/8086/dos', verbose=QL_VERBOSE.DEBUG)
ck = Checklist()
def onenter(ql: Qiling):
ck.visited_onenter = True
def onexit(ql: Qiling):
ck.visited_onexit = True
ql.os.set_api((33, 9), onexit, QL_INTERCEPT.EXIT)
ql.os.set_api((33, 76), onenter, QL_INTERCEPT.ENTER)
ql.run()
self.assertTrue(ck.visited_onenter)
self.assertTrue(ck.visited_onexit) |
class AENC(Frame):
_framespec = [Latin1TextSpec('owner'), SizedIntegerSpec('preview_start', size=2, default=0), SizedIntegerSpec('preview_length', size=2, default=0), BinaryDataSpec('data')]
def HashKey(self):
return ('%s:%s' % (self.FrameID, self.owner))
def __bytes__(self):
return self.owner.encode('utf-8')
def __str__(self):
return self.owner
def __eq__(self, other):
return (self.owner == other)
__hash__ = Frame.__hash__ |
class Timezone(BaseOption):
def validate(self, value, **kwargs):
return validatorfuncs.timezone(value, option_key=self.key, **kwargs)
def default(self):
return _TZ_DICT[self.default_value]
def deserialize(self, save_data):
if (save_data not in _TZ_DICT):
raise ValueError(f"{self.key} expected Timezone Data, got '{save_data}'")
return _TZ_DICT[save_data]
def serialize(self):
return str(self.value_storage) |
def multitest(url, payloads):
if (urlparse(url).scheme == ''):
url = (' + url)
regexBypassPayloads = generator(url, payloads)
if ('=' in url):
if url.endswith('='):
url += 'r007'
parsedQueries = parse_qs(urlparse(url).query)
keys = [key for key in parsedQueries]
values = [value for value in parsedQueries.values()]
parsedURL = list(urlparse(url))
parsedURL[(- 2)] = ''
finalURL = urlunparse(parsedURL)
queries = []
count = 0
for key in keys:
for payload in payloads:
parsedQueries[key] = payload
queries.append(parsedQueries.copy())
for payload in regexBypassPayloads:
parsedQueries[key] = payload
queries.append(parsedQueries.copy())
parsedQueries[key] = values[count]
count += 1
return (queries, finalURL)
else:
urls = []
print(('%s Appending payloads just after the URL' % info))
if (not url.endswith('/')):
url += '/'
for payload in payloads:
urls.append((url + payload))
for payload in regexBypassPayloads:
urls.append((url + payload))
return urls |
def compute_statistics(model=None, args=None, logger=None, log_time=None):
from utils.norm_stats_utils import ComputeNormStatsHook
compute_stat_hooks = []
list_stat_mean = []
list_stat_var = []
if (args.arch == 'tanet'):
if (args.stat_type in ['temp', 'temp_v2']):
candidate_layers = [nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
elif (args.stat_type in ['spatial', 'spatiotemp']):
candidate_layers = [nn.BatchNorm2d, nn.BatchNorm3d]
chosen_layers = choose_layers(model, candidate_layers)
elif (args.arch == 'videoswintransformer'):
candidate_layers = [nn.LayerNorm]
chosen_layers = choose_layers(model, candidate_layers)
chosen_layers = chosen_layers[1:]
for (layer_id, (layer_name, layer_)) in enumerate(chosen_layers):
compute_stat_hooks.append(ComputeNormStatsHook(layer_, clip_len=args.clip_length, stat_type=args.stat_type, before_norm=args.before_norm, batch_size=args.batch_size))
list_stat_mean.append(AverageMeter())
list_stat_var.append(AverageMeter())
if (args.arch == 'tanet'):
n_clips = int(args.sample_style.split('-')[(- 1)])
elif (args.arch == 'videoswintransformer'):
n_clips = args.num_clips
if (args.arch == 'tanet'):
data_loader = torch.utils.data.DataLoader(get_dataset_tanet(args, split='val', dataset_type='eval'), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
elif (args.arch == 'videoswintransformer'):
data_loader = torch.utils.data.DataLoader(get_dataset_videoswin(args, split='val', dataset_type='eval'), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
else:
data_loader = torch.utils.data.DataLoader(get_dataset(args, split='val'), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
model.eval()
with torch.no_grad():
for (batch_id, (input, target)) in enumerate(data_loader):
actual_bz = input.shape[0]
input = input.cuda()
if (args.arch == 'tanet'):
input = input.view((- 1), 3, input.size(2), input.size(3))
input = input.view(((actual_bz * args.test_crops) * n_clips), args.clip_length, 3, input.size(2), input.size(3))
_ = model(input)
elif (args.arch == 'videoswintransformer'):
n_views = (args.test_crops * n_clips)
_ = model(input)
else:
input = input.reshape((((- 1),) + input.shape[2:]))
_ = model(input)
if ((batch_id % 1000) == 0):
print(f'{batch_id}/{len(data_loader)} batches completed ...')
for (hook_id, stat_hook) in enumerate(compute_stat_hooks):
list_stat_mean[hook_id].update(stat_hook.batch_mean, n=actual_bz)
list_stat_var[hook_id].update(stat_hook.batch_var, n=actual_bz)
for (hook_id, stat_hook) in enumerate(compute_stat_hooks):
list_stat_mean[hook_id] = list_stat_mean[hook_id].avg.cpu().numpy()
list_stat_var[hook_id] = list_stat_var[hook_id].avg.cpu().numpy()
np.save(osp.join(args.result_dir, f'list_{args.stat_type}_mean_{log_time}.npy'), list_stat_mean, allow_pickle=True)
np.save(osp.join(args.result_dir, f'list_{args.stat_type}_var_{log_time}.npy'), list_stat_var, allow_pickle=True) |
class MultiHeadedDotAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1, scale=1, project_k_v=1, use_output_layer=1, do_aoa=0, norm_q=0, dropout_aoa=0.3):
super(MultiHeadedDotAttention, self).__init__()
assert (((d_model * scale) % h) == 0)
self.d_k = ((d_model * scale) // h)
self.h = h
self.project_k_v = project_k_v
if norm_q:
self.norm = LayerNorm(d_model)
else:
self.norm = (lambda x: x)
self.linears = clones(nn.Linear(d_model, (d_model * scale)), (1 + (2 * project_k_v)))
self.output_layer = nn.Linear((d_model * scale), d_model)
self.use_aoa = do_aoa
if self.use_aoa:
self.aoa_layer = nn.Sequential(nn.Linear(((1 + scale) * d_model), (2 * d_model)), nn.GLU())
if (dropout_aoa > 0):
self.dropout_aoa = nn.Dropout(p=dropout_aoa)
else:
self.dropout_aoa = (lambda x: x)
if (self.use_aoa or (not use_output_layer)):
del self.output_layer
self.output_layer = (lambda x: x)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, value, key, mask=None):
if (mask is not None):
if (len(mask.size()) == 2):
mask = mask.unsqueeze((- 2))
mask = mask.unsqueeze(1)
single_query = 0
if (len(query.size()) == 2):
single_query = 1
query = query.unsqueeze(1)
nbatches = query.size(0)
query = self.norm(query)
if (self.project_k_v == 0):
query_ = self.linears[0](query).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
key_ = key.view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
value_ = value.view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
else:
(query_, key_, value_) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.linears, (query, key, value))]
(x, self.attn) = attention(query_, key_, value_, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, (- 1), (self.h * self.d_k))
if self.use_aoa:
x = self.aoa_layer(self.dropout_aoa(torch.cat([x, query], (- 1))))
x = self.output_layer(x)
if single_query:
query = query.squeeze(1)
x = x.squeeze(1)
return x |
class GetChatPhotosCount():
async def get_chat_photos_count(self: 'pyrogram.Client', chat_id: Union[(int, str)]) -> int:
peer_id = (await self.resolve_peer(chat_id))
if isinstance(peer_id, raw.types.InputPeerChannel):
r = (await self.invoke(raw.functions.messages.GetSearchCounters(peer=peer_id, filters=[raw.types.InputMessagesFilterChatPhotos()])))
return r[0].count
else:
r = (await self.invoke(raw.functions.photos.GetUserPhotos(user_id=peer_id, offset=0, max_id=0, limit=1)))
if isinstance(r, raw.types.photos.Photos):
return len(r.photos)
else:
return r.count |
class LogReturnsSeries(ReturnsSeries):
def _constructor(self):
return LogReturnsSeries
def _constructor_expanddim(self):
from qf_lib.containers.dataframe.log_returns_dataframe import LogReturnsDataFrame
return LogReturnsDataFrame
def to_log_returns(self) -> 'LogReturnsSeries':
return self
def to_simple_returns(self) -> 'SimpleReturnsSeries':
from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries
simple_rets_values = [(exp(log_ret) - 1) for log_ret in self.values]
simple_returns_tms = SimpleReturnsSeries(index=self.index.copy(), data=simple_rets_values).__finalize__(self)
return simple_returns_tms
def total_cumulative_return(self) -> float:
return (np.exp(self.sum()) - 1.0)
def _to_prices_values(self, initial_price):
prices_values = self.values.cumsum()
prices_values = exp(prices_values)
prices_values = append([1], prices_values)
return (prices_values * initial_price) |
class YamlMigrations(QObject):
changed = pyqtSignal()
def __init__(self, settings: Any, parent: QObject=None) -> None:
super().__init__(parent)
self._settings = settings
def migrate(self) -> None:
self._migrate_configdata()
self._migrate_bindings_default()
self._migrate_font_default_family()
self._migrate_font_replacements()
self._migrate_bool('tabs.favicons.show', 'always', 'never')
self._migrate_bool('scrolling.bar', 'always', 'overlay')
self._migrate_bool('qt.force_software_rendering', 'software-opengl', 'none')
self._migrate_renamed_bool(old_name='content.webrtc_public_interfaces_only', new_name='content.webrtc_ip_handling_policy', true_value='default-public-interface-only', false_value='all-interfaces')
self._migrate_renamed_bool(old_name='tabs.persist_mode_on_change', new_name='tabs.mode_on_change', true_value='persist', false_value='normal')
self._migrate_renamed_bool(old_name='statusbar.hide', new_name='statusbar.show', true_value='never', false_value='always')
self._migrate_renamed_bool(old_name='content.ssl_strict', new_name='content.tls.certificate_errors', true_value='block', false_value='load-insecurely', ask_value='ask')
self._migrate_renamed_bool(old_name='content.javascript.can_access_clipboard', new_name='content.javascript.clipboard', true_value='access', false_value='none')
for setting in ['colors.webpage.force_dark_color_scheme', 'colors.webpage.prefers_color_scheme_dark']:
self._migrate_renamed_bool(old_name=setting, new_name='colors.webpage.preferred_color_scheme', true_value='dark', false_value='auto')
for setting in ['tabs.title.format', 'tabs.title.format_pinned', 'window.title_format']:
self._migrate_string_value(setting, '(?<!{)\\{title\\}(?!})', '{current_title}')
self._migrate_to_multiple('fonts.tabs', ('fonts.tabs.selected', 'fonts.tabs.unselected'))
self._migrate_to_multiple('content.media_capture', ('content.media.audio_capture', 'content.media.audio_video_capture', 'content.media.video_capture'))
setting = 'content.headers.user_agent'
self._migrate_none(setting, configdata.DATA[setting].default)
self._remove_empty_patterns()
def _migrate_configdata(self) -> None:
for name in list(self._settings):
if (name in configdata.MIGRATIONS.renamed):
new_name = configdata.MIGRATIONS.renamed[name]
log.config.debug('Renaming {} to {}'.format(name, new_name))
self._settings[new_name] = self._settings[name]
del self._settings[name]
self.changed.emit()
elif (name in configdata.MIGRATIONS.deleted):
log.config.debug('Removing {}'.format(name))
del self._settings[name]
self.changed.emit()
def _migrate_bindings_default(self) -> None:
if ('bindings.default' not in self._settings):
return
del self._settings['bindings.default']
self.changed.emit()
def _migrate_font_default_family(self) -> None:
old_name = 'fonts.monospace'
new_name = 'fonts.default_family'
if (old_name not in self._settings):
return
old_default_fonts = 'Monospace, "DejaVu Sans Mono", Monaco, "Bitstream Vera Sans Mono", "Andale Mono", "Courier New", Courier, "Liberation Mono", monospace, Fixed, Consolas, Terminal'
self._settings[new_name] = {}
for (scope, val) in self._settings[old_name].items():
old_fonts = val.replace(old_default_fonts, '').rstrip(' ,')
new_fonts = configutils.FontFamilies.from_str(old_fonts)
self._settings[new_name][scope] = list(new_fonts)
del self._settings[old_name]
self.changed.emit()
def _migrate_font_replacements(self) -> None:
for (name, values) in self._settings.items():
if (not isinstance(values, dict)):
continue
try:
opt = configdata.DATA[name]
except KeyError:
continue
if (not isinstance(opt.typ, configtypes.FontBase)):
continue
for (scope, val) in values.items():
if (isinstance(val, str) and val.endswith(' monospace')):
new_val = val.replace('monospace', 'default_family')
self._settings[name][scope] = new_val
self.changed.emit()
def _migrate_bool(self, name: str, true_value: str, false_value: str) -> None:
if (name not in self._settings):
return
values = self._settings[name]
if (not isinstance(values, dict)):
return
for (scope, val) in values.items():
if isinstance(val, bool):
new_value = (true_value if val else false_value)
self._settings[name][scope] = new_value
self.changed.emit()
def _migrate_renamed_bool(self, old_name: str, new_name: str, true_value: str, false_value: str, ask_value: str=None) -> None:
if (old_name not in self._settings):
return
self._settings[new_name] = {}
for (scope, val) in self._settings[old_name].items():
if (val == 'ask'):
assert (ask_value is not None)
new_value = ask_value
elif val:
new_value = true_value
else:
new_value = false_value
self._settings[new_name][scope] = new_value
del self._settings[old_name]
self.changed.emit()
def _migrate_none(self, name: str, value: str) -> None:
if (name not in self._settings):
return
values = self._settings[name]
if (not isinstance(values, dict)):
return
for (scope, val) in values.items():
if (val is None):
self._settings[name][scope] = value
self.changed.emit()
def _migrate_to_multiple(self, old_name: str, new_names: Iterable[str]) -> None:
if (old_name not in self._settings):
return
for new_name in new_names:
self._settings[new_name] = {}
for (scope, val) in self._settings[old_name].items():
self._settings[new_name][scope] = val
del self._settings[old_name]
self.changed.emit()
def _migrate_string_value(self, name: str, source: str, target: str) -> None:
if (name not in self._settings):
return
values = self._settings[name]
if (not isinstance(values, dict)):
return
for (scope, val) in values.items():
if isinstance(val, str):
new_val = re.sub(source, target, val)
if (new_val != val):
self._settings[name][scope] = new_val
self.changed.emit()
def _remove_empty_patterns(self) -> None:
scope = '*://*./*'
for (name, values) in self._settings.items():
if (not isinstance(values, dict)):
continue
if (scope in values):
del self._settings[name][scope]
self.changed.emit() |
def render_pep440_branch(pieces):
if pieces['closest-tag']:
rendered = pieces['closest-tag']
if (pieces['distance'] or pieces['dirty']):
if (pieces['branch'] != 'master'):
rendered += '.dev0'
rendered += plus_or_dot(pieces)
rendered += ('%d.g%s' % (pieces['distance'], pieces['short']))
if pieces['dirty']:
rendered += '.dirty'
else:
rendered = '0'
if (pieces['branch'] != 'master'):
rendered += '.dev0'
rendered += ('+untagged.%d.g%s' % (pieces['distance'], pieces['short']))
if pieces['dirty']:
rendered += '.dirty'
return rendered |
class ZeroconfIPv6Address(IPv6Address):
__slots__ = ('_str', '_is_link_local', '_is_unspecified')
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._str = super().__str__()
self._is_link_local = super().is_link_local
self._is_unspecified = super().is_unspecified
def __str__(self) -> str:
return self._str
def is_link_local(self) -> bool:
return self._is_link_local
def is_unspecified(self) -> bool:
return self._is_unspecified |
class GraphicsWidgetAnchor(object):
def __init__(self):
self.__parent = None
self.__parentAnchor = None
self.__itemAnchor = None
self.__offset = (0, 0)
if hasattr(self, 'geometryChanged'):
self.geometryChanged.connect(self.__geometryChanged)
def anchor(self, itemPos, parentPos, offset=(0, 0)):
parent = self.parentItem()
if (parent is None):
raise Exception('Cannot anchor; parent is not set.')
if (self.__parent is not parent):
if (self.__parent is not None):
self.__parent.geometryChanged.disconnect(self.__geometryChanged)
self.__parent = parent
parent.geometryChanged.connect(self.__geometryChanged)
self.__itemAnchor = itemPos
self.__parentAnchor = parentPos
self.__offset = offset
self.__geometryChanged()
def autoAnchor(self, pos, relative=True):
pos = Point(pos)
br = self.mapRectToParent(self.boundingRect()).translated((pos - self.pos()))
pbr = self.parentItem().boundingRect()
anchorPos = [0, 0]
parentPos = Point()
itemPos = Point()
if (abs((br.left() - pbr.left())) < abs((br.right() - pbr.right()))):
anchorPos[0] = 0
parentPos[0] = pbr.left()
itemPos[0] = br.left()
else:
anchorPos[0] = 1
parentPos[0] = pbr.right()
itemPos[0] = br.right()
if (abs((br.top() - pbr.top())) < abs((br.bottom() - pbr.bottom()))):
anchorPos[1] = 0
parentPos[1] = pbr.top()
itemPos[1] = br.top()
else:
anchorPos[1] = 1
parentPos[1] = pbr.bottom()
itemPos[1] = br.bottom()
if relative:
relPos = [((itemPos[0] - pbr.left()) / pbr.width()), ((itemPos[1] - pbr.top()) / pbr.height())]
self.anchor(anchorPos, relPos)
else:
offset = (itemPos - parentPos)
self.anchor(anchorPos, anchorPos, offset)
def __geometryChanged(self):
if (self.__parent is None):
return
if (self.__itemAnchor is None):
return
o = self.mapToParent(Point(0, 0))
a = (self.boundingRect().bottomRight() * Point(self.__itemAnchor))
a = self.mapToParent(a)
p = (self.__parent.boundingRect().bottomRight() * Point(self.__parentAnchor))
off = Point(self.__offset)
pos = ((p + (o - a)) + off)
self.setPos(pos) |
class HumanReadableMtimeLinemode(LinemodeBase):
name = 'humanreadablemtime'
def filetitle(self, fobj, metadata):
return fobj.relative_path
def infostring(self, fobj, metadata):
if (fobj.stat is None):
return '?'
return human_readable_time(fobj.stat.st_mtime) |
def test_regular_bind_and_provider_dont_work_with_multibind():
Names = NewType('Names', List[str])
Passwords = NewType('Passwords', Dict[(str, str)])
class MyModule(Module):
with pytest.raises(Error):
def provide_strs(self) -> List[str]:
return []
with pytest.raises(Error):
def provide_names(self) -> Names:
return []
with pytest.raises(Error):
def provide_strs_in_dict(self) -> Dict[(str, str)]:
return {}
with pytest.raises(Error):
def provide_passwords(self) -> Passwords:
return {}
injector = Injector()
binder = injector.binder
with pytest.raises(Error):
binder.bind(List[str], to=[])
with pytest.raises(Error):
binder.bind(Names, to=[])
with pytest.raises(Error):
binder.bind(Dict[(str, str)], to={})
with pytest.raises(Error):
binder.bind(Passwords, to={}) |
def adagrad_window(loss_or_grads=None, params=None, learning_rate=0.001, epsilon=0.1, n_win=10):
if ((loss_or_grads is None) and (params is None)):
return partial(adagrad_window, **_get_call_kwargs(locals()))
elif ((loss_or_grads is None) or (params is None)):
raise ValueError('Please provide both `loss_or_grads` and `params` to get updates')
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for (param, grad) in zip(params, grads):
i = pytensor.shared(pm.floatX(0))
i_int = i.astype('int32')
value = param.get_value(borrow=True)
accu = pytensor.shared(np.zeros((value.shape + (n_win,)), dtype=value.dtype))
accu_new = pt.set_subtensor(accu[(..., i_int)], (grad ** 2))
i_new = pt.switch(((i + 1) < n_win), (i + 1), 0)
updates[accu] = accu_new
updates[i] = i_new
accu_sum = accu_new.sum(axis=(- 1))
updates[param] = (param - ((learning_rate * grad) / pt.sqrt((accu_sum + epsilon))))
return updates |
class JordanWignerSparseTest(unittest.TestCase):
def test_jw_sparse_0create(self):
expected = csc_matrix(([1], ([1], [0])), shape=(2, 2))
self.assertTrue(numpy.allclose(jordan_wigner_sparse(FermionOperator('0^')).A, expected.A))
def test_jw_sparse_1annihilate(self):
expected = csc_matrix(([1, (- 1)], ([0, 2], [1, 3])), shape=(4, 4))
self.assertTrue(numpy.allclose(jordan_wigner_sparse(FermionOperator('1')).A, expected.A))
def test_jw_sparse_0create_2annihilate(self):
expected = csc_matrix(([(- 1j), 1j], ([4, 6], [1, 3])), shape=(8, 8), dtype=numpy.complex128)
self.assertTrue(numpy.allclose(jordan_wigner_sparse(FermionOperator('0^ 2', (- 1j))).A, expected.A))
def test_jw_sparse_0create_3annihilate(self):
expected = csc_matrix(([(- 1j), 1j, 1j, (- 1j)], ([8, 10, 12, 14], [1, 3, 5, 7])), shape=(16, 16), dtype=numpy.complex128)
self.assertTrue(numpy.allclose(jordan_wigner_sparse(FermionOperator('0^ 3', (- 1j))).A, expected.A))
def test_jw_sparse_twobody(self):
expected = csc_matrix(([1, 1], ([6, 14], [5, 13])), shape=(16, 16))
self.assertTrue(numpy.allclose(jordan_wigner_sparse(FermionOperator('2^ 1^ 1 3')).A, expected.A))
def test_qubit_operator_sparse_n_qubits_too_small(self):
with self.assertRaises(ValueError):
qubit_operator_sparse(QubitOperator('X3'), 1)
def test_qubit_operator_sparse_n_qubits_not_specified(self):
expected = csc_matrix(([1, 1, 1, 1], ([1, 0, 3, 2], [0, 1, 2, 3])), shape=(4, 4))
self.assertTrue(numpy.allclose(qubit_operator_sparse(QubitOperator('X1')).A, expected.A))
def test_get_linear_qubit_operator_diagonal_wrong_n(self):
with self.assertRaises(ValueError):
get_linear_qubit_operator_diagonal(QubitOperator('X3'), 1)
def test_get_linear_qubit_operator_diagonal_0(self):
qubit_operator = QubitOperator.zero()
vec_expected = numpy.zeros(8)
self.assertTrue(numpy.allclose(get_linear_qubit_operator_diagonal(qubit_operator, 3), vec_expected))
def test_get_linear_qubit_operator_diagonal_zero(self):
qubit_operator = QubitOperator('X0 Y1')
vec_expected = numpy.zeros(4)
self.assertTrue(numpy.allclose(get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_non_zero(self):
qubit_operator = QubitOperator('Z0 Z2')
vec_expected = numpy.array([1, (- 1), 1, (- 1), (- 1), 1, (- 1), 1])
self.assertTrue(numpy.allclose(get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_cmp_zero(self):
qubit_operator = QubitOperator('Z1 X2 Y5')
vec_expected = numpy.diag((LinearQubitOperator(qubit_operator) * numpy.eye((2 ** 6))))
self.assertTrue(numpy.allclose(get_linear_qubit_operator_diagonal(qubit_operator), vec_expected))
def test_get_linear_qubit_operator_diagonal_cmp_non_zero(self):
qubit_operator = QubitOperator('Z1 Z2 Z5')
vec_expected = numpy.diag((LinearQubitOperator(qubit_operator) * numpy.eye((2 ** 6))))
self.assertTrue(numpy.allclose(get_linear_qubit_operator_diagonal(qubit_operator), vec_expected)) |
class OrderWeighted(Reorder, OrderRemembered):
name = 'weighted'
display_name = _('Prefer higher rated')
accelerated_name = _('Prefer _higher rated')
def next(self, playlist, iter):
super().next(playlist, iter)
remaining = self.remaining(playlist)
if (not remaining):
self.reset(playlist)
return None
total_score = sum([song('~#rating') for song in remaining.values()])
if (total_score == 0):
return playlist.get_iter((random.choice(list(remaining)),))
choice = (random.random() * total_score)
current = 0.0
for (i, song) in remaining.items():
current += song('~#rating')
if (current >= choice):
return playlist.get_iter([i])
raise ValueError |
def test_collect_ref_counts():
source = Stream()
collector = source.collect()
refs = []
for i in range(10):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(((r.count == 1) for r in refs))
collector.flush()
assert all(((r.count == 0) for r in refs)) |
def _decode_host(host):
if (not host):
return u''
try:
host_bytes = host.encode('ascii')
except UnicodeEncodeError:
host_text = host
else:
try:
host_text = idna_decode(host_bytes, uts46=True)
except ValueError:
host_text = host
return host_text |
class CT_Style(BaseOxmlElement):
_tag_seq = ('w:name', 'w:aliases', 'w:basedOn', 'w:next', 'w:link', 'w:autoRedefine', 'w:hidden', 'w:uiPriority', 'w:semiHidden', 'w:unhideWhenUsed', 'w:qFormat', 'w:locked', 'w:personal', 'w:personalCompose', 'w:personalReply', 'w:rsid', 'w:pPr', 'w:rPr', 'w:tblPr', 'w:trPr', 'w:tcPr', 'w:tblStylePr')
name = ZeroOrOne('w:name', successors=_tag_seq[1:])
basedOn = ZeroOrOne('w:basedOn', successors=_tag_seq[3:])
next = ZeroOrOne('w:next', successors=_tag_seq[4:])
uiPriority = ZeroOrOne('w:uiPriority', successors=_tag_seq[8:])
semiHidden = ZeroOrOne('w:semiHidden', successors=_tag_seq[9:])
unhideWhenUsed = ZeroOrOne('w:unhideWhenUsed', successors=_tag_seq[10:])
qFormat = ZeroOrOne('w:qFormat', successors=_tag_seq[11:])
locked = ZeroOrOne('w:locked', successors=_tag_seq[12:])
pPr = ZeroOrOne('w:pPr', successors=_tag_seq[17:])
rPr = ZeroOrOne('w:rPr', successors=_tag_seq[18:])
del _tag_seq
type: (WD_STYLE_TYPE | None) = OptionalAttribute('w:type', WD_STYLE_TYPE)
styleId: (str | None) = OptionalAttribute('w:styleId', ST_String)
default = OptionalAttribute('w:default', ST_OnOff)
customStyle = OptionalAttribute('w:customStyle', ST_OnOff)
def basedOn_val(self):
basedOn = self.basedOn
if (basedOn is None):
return None
return basedOn.val
_val.setter
def basedOn_val(self, value):
if (value is None):
self._remove_basedOn()
else:
self.get_or_add_basedOn().val = value
def base_style(self):
basedOn = self.basedOn
if (basedOn is None):
return None
styles = self.getparent()
base_style = styles.get_by_id(basedOn.val)
if (base_style is None):
return None
return base_style
def delete(self):
self.getparent().remove(self)
def locked_val(self):
locked = self.locked
if (locked is None):
return False
return locked.val
_val.setter
def locked_val(self, value):
self._remove_locked()
if (bool(value) is True):
locked = self._add_locked()
locked.val = value
def name_val(self):
name = self.name
if (name is None):
return None
return name.val
_val.setter
def name_val(self, value):
self._remove_name()
if (value is not None):
name = self._add_name()
name.val = value
def next_style(self):
next = self.next
if (next is None):
return None
styles = self.getparent()
return styles.get_by_id(next.val)
def qFormat_val(self):
qFormat = self.qFormat
if (qFormat is None):
return False
return qFormat.val
_val.setter
def qFormat_val(self, value):
self._remove_qFormat()
if bool(value):
self._add_qFormat()
def semiHidden_val(self):
semiHidden = self.semiHidden
if (semiHidden is None):
return False
return semiHidden.val
_val.setter
def semiHidden_val(self, value):
self._remove_semiHidden()
if (bool(value) is True):
semiHidden = self._add_semiHidden()
semiHidden.val = value
def uiPriority_val(self):
uiPriority = self.uiPriority
if (uiPriority is None):
return None
return uiPriority.val
_val.setter
def uiPriority_val(self, value):
self._remove_uiPriority()
if (value is not None):
uiPriority = self._add_uiPriority()
uiPriority.val = value
def unhideWhenUsed_val(self):
unhideWhenUsed = self.unhideWhenUsed
if (unhideWhenUsed is None):
return False
return unhideWhenUsed.val
_val.setter
def unhideWhenUsed_val(self, value):
self._remove_unhideWhenUsed()
if (bool(value) is True):
unhideWhenUsed = self._add_unhideWhenUsed()
unhideWhenUsed.val = value |
class PublishFilter(SimpleListFilter):
title = _('Publish status')
parameter_name = 'published'
def lookups(self, request, model_admin):
return [('yes', gettext('Published')), ('no', gettext('Waiting for publication date'))]
def queryset(self, request, queryset):
if (self.value() == 'yes'):
return queryset.filter(date_created__lte=timezone.now())
if (self.value() == 'no'):
return queryset.filter(date_created__gte=timezone.now())
return None |
class Car():
def __init__(self, world, init_angle, init_x, init_y):
self.world = world
self.hull = self.world.CreateDynamicBody(position=(init_x, init_y), angle=init_angle, fixtures=[fixtureDef(shape=polygonShape(vertices=[((x * SIZE), (y * SIZE)) for (x, y) in HULL_POLY1]), density=1.0), fixtureDef(shape=polygonShape(vertices=[((x * SIZE), (y * SIZE)) for (x, y) in HULL_POLY2]), density=1.0), fixtureDef(shape=polygonShape(vertices=[((x * SIZE), (y * SIZE)) for (x, y) in HULL_POLY3]), density=1.0), fixtureDef(shape=polygonShape(vertices=[((x * SIZE), (y * SIZE)) for (x, y) in HULL_POLY4]), density=1.0)])
self.hull.color = (0.8, 0.0, 0.0)
self.wheels = []
self.fuel_spent = 0.0
WHEEL_POLY = [((- WHEEL_W), (+ WHEEL_R)), ((+ WHEEL_W), (+ WHEEL_R)), ((+ WHEEL_W), (- WHEEL_R)), ((- WHEEL_W), (- WHEEL_R))]
for (wx, wy) in WHEELPOS:
front_k = (1.0 if (wy > 0) else 1.0)
w = self.world.CreateDynamicBody(position=((init_x + (wx * SIZE)), (init_y + (wy * SIZE))), angle=init_angle, fixtures=fixtureDef(shape=polygonShape(vertices=[(((x * front_k) * SIZE), ((y * front_k) * SIZE)) for (x, y) in WHEEL_POLY]), density=0.1, categoryBits=32, maskBits=1, restitution=0.0))
w.wheel_rad = ((front_k * WHEEL_R) * SIZE)
w.color = WHEEL_COLOR
w.gas = 0.0
w.brake = 0.0
w.steer = 0.0
w.phase = 0.0
w.omega = 0.0
w.skid_start = None
w.skid_particle = None
rjd = revoluteJointDef(bodyA=self.hull, bodyB=w, localAnchorA=((wx * SIZE), (wy * SIZE)), localAnchorB=(0, 0), enableMotor=True, enableLimit=True, maxMotorTorque=(((180 * 900) * SIZE) * SIZE), motorSpeed=0, lowerAngle=(- 0.4), upperAngle=(+ 0.4))
w.joint = self.world.CreateJoint(rjd)
w.tiles = set()
w.userData = w
self.wheels.append(w)
self.drawlist = (self.wheels + [self.hull])
self.particles = []
def gas(self, gas):
gas = np.clip(gas, 0, 1)
for w in self.wheels[2:4]:
diff = (gas - w.gas)
if (diff > 0.1):
diff = 0.1
w.gas += diff
def brake(self, b):
for w in self.wheels:
w.brake = b
def steer(self, s):
self.wheels[0].steer = s
self.wheels[1].steer = s
def step(self, dt):
for w in self.wheels:
dir = np.sign((w.steer - w.joint.angle))
val = abs((w.steer - w.joint.angle))
w.joint.motorSpeed = (dir * min((50.0 * val), 3.0))
grass = True
friction_limit = (FRICTION_LIMIT * 0.6)
for tile in w.tiles:
friction_limit = max(friction_limit, (FRICTION_LIMIT * tile.road_friction))
grass = False
forw = w.GetWorldVector((0, 1))
side = w.GetWorldVector((1, 0))
v = w.linearVelocity
vf = ((forw[0] * v[0]) + (forw[1] * v[1]))
vs = ((side[0] * v[0]) + (side[1] * v[1]))
w.omega += ((((dt * ENGINE_POWER) * w.gas) / WHEEL_MOMENT_OF_INERTIA) / (abs(w.omega) + 5.0))
self.fuel_spent += ((dt * ENGINE_POWER) * w.gas)
if (w.brake >= 0.9):
w.omega = 0
elif (w.brake > 0):
BRAKE_FORCE = 15
dir = (- np.sign(w.omega))
val = (BRAKE_FORCE * w.brake)
if (abs(val) > abs(w.omega)):
val = abs(w.omega)
w.omega += (dir * val)
w.phase += (w.omega * dt)
vr = (w.omega * w.wheel_rad)
f_force = ((- vf) + vr)
p_force = (- vs)
f_force *= ((205000 * SIZE) * SIZE)
p_force *= ((205000 * SIZE) * SIZE)
force = np.sqrt((np.square(f_force) + np.square(p_force)))
if (abs(force) > (2.0 * friction_limit)):
if (w.skid_particle and (w.skid_particle.grass == grass) and (len(w.skid_particle.poly) < 30)):
w.skid_particle.poly.append((w.position[0], w.position[1]))
elif (w.skid_start is None):
w.skid_start = w.position
else:
w.skid_particle = self._create_particle(w.skid_start, w.position, grass)
w.skid_start = None
else:
w.skid_start = None
w.skid_particle = None
if (abs(force) > friction_limit):
f_force /= force
p_force /= force
force = friction_limit
f_force *= force
p_force *= force
w.omega -= (((dt * f_force) * w.wheel_rad) / WHEEL_MOMENT_OF_INERTIA)
w.ApplyForceToCenter((((p_force * side[0]) + (f_force * forw[0])), ((p_force * side[1]) + (f_force * forw[1]))), True)
def draw(self, viewer, draw_particles=True):
if draw_particles:
for p in self.particles:
viewer.draw_polyline(p.poly, color=p.color, linewidth=5)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
path = [(trans * v) for v in f.shape.vertices]
viewer.draw_polygon(path, color=obj.color)
if ('phase' not in obj.__dict__):
continue
a1 = obj.phase
a2 = (obj.phase + 1.2)
s1 = math.sin(a1)
s2 = math.sin(a2)
c1 = math.cos(a1)
c2 = math.cos(a2)
if ((s1 > 0) and (s2 > 0)):
continue
if (s1 > 0):
c1 = np.sign(c1)
if (s2 > 0):
c2 = np.sign(c2)
white_poly = [(((- WHEEL_W) * SIZE), (((+ WHEEL_R) * c1) * SIZE)), (((+ WHEEL_W) * SIZE), (((+ WHEEL_R) * c1) * SIZE)), (((+ WHEEL_W) * SIZE), (((+ WHEEL_R) * c2) * SIZE)), (((- WHEEL_W) * SIZE), (((+ WHEEL_R) * c2) * SIZE))]
viewer.draw_polygon([(trans * v) for v in white_poly], color=WHEEL_WHITE)
def _create_particle(self, point1, point2, grass):
class Particle():
pass
p = Particle()
p.color = (WHEEL_COLOR if (not grass) else MUD_COLOR)
p.ttl = 1
p.poly = [(point1[0], point1[1]), (point2[0], point2[1])]
p.grass = grass
self.particles.append(p)
while (len(self.particles) > 30):
self.particles.pop(0)
return p
def destroy(self):
self.world.DestroyBody(self.hull)
self.hull = None
for w in self.wheels:
self.world.DestroyBody(w)
self.wheels = [] |
class TestModels(TestCase):
(QF_LRA)
def test_get_model(self):
varA = Symbol('A', BOOL)
varB = Symbol('B', REAL)
zero = Real(0)
f1 = Implies(varA, And(GT(varB, zero), LT(varB, zero)))
model = None
for solver_name in get_env().factory.all_solvers(logic=QF_UFLIRA):
with Solver(name=solver_name, logic=QF_LRA) as s:
s.add_assertion(f1)
check = s.solve()
self.assertTrue(check)
model = s.get_model()
self.assertEqual(model[varA], FALSE())
(QF_BOOL)
def test_get_py_value_model(self):
varA = Symbol('A', BOOL)
with Solver(logic=QF_BOOL) as s:
s.add_assertion(varA)
s.solve()
model = s.get_model()
self.assertTrue(model.get_py_value(varA))
(QF_BOOL)
def test_eager_model_iterator(self):
(x, y, z) = [Symbol(s) for s in 'xyz']
with Solver(logic=QF_BOOL) as s:
s.add_assertion(And(x, y))
assert s.solve()
d = {}
d[x] = s.get_value(x)
d[y] = s.get_value(y)
m = EagerModel(assignment=d)
for (k, _) in m:
self.assertFalse((k == z))
(QF_BOOL)
def test_pickle_eager_model(self):
import pickle
(x, y) = (Symbol('x'), Symbol('y'))
with Solver(logic=QF_BOOL) as s:
s.add_assertion(And(x, y))
assert s.solve()
model = list(s.get_model())
self.assertIsNotNone(pickle.dumps(model, (- 1))) |
class TensorBoardLogger(MetricLogger):
def __init__(self: TensorBoardLogger, path: str, *args: Any, **kwargs: Any) -> None:
self._writer: Optional[SummaryWriter] = None
self._rank: int = get_global_rank()
self._sync_path_to_workers(path)
if (self._rank == 0):
logger.info(f'TensorBoard SummaryWriter instantiated. Files will be stored in: {path}')
self._writer = SummaryWriter(*args, log_dir=path, **kwargs)
else:
logger.debug(f'Not logging metrics on this host because env RANK: {self._rank} != 0')
atexit.register(self.close)
def _sync_path_to_workers(self: TensorBoardLogger, path: str) -> None:
if (not (dist.is_available() and dist.is_initialized())):
self._path: str = path
return
pg = PGWrapper(dist.group.WORLD)
path_container: List[str] = ([path] if (self._rank == 0) else [''])
pg.broadcast_object_list(path_container, 0)
updated_path = path_container[0]
if (updated_path != path):
logger.info(f'Updating TensorBoard path to match rank 0: {updated_path}')
self._path: str = updated_path
def writer(self: TensorBoardLogger) -> Optional[SummaryWriter]:
return self._writer
def path(self: TensorBoardLogger) -> str:
return self._path
def log_dict(self: TensorBoardLogger, payload: Mapping[(str, Scalar)], step: int) -> None:
if self._writer:
for (k, v) in payload.items():
self.log(k, v, step)
def log(self: TensorBoardLogger, name: str, data: Scalar, step: int) -> None:
if self._writer:
self._writer.add_scalar(name, data, global_step=step, new_style=True)
def log_text(self: TensorBoardLogger, name: str, data: str, step: int) -> None:
if self._writer:
self._writer.add_text(name, data, global_step=step)
def log_hparams(self: TensorBoardLogger, hparams: Dict[(str, Scalar)], metrics: Dict[(str, Scalar)]) -> None:
if self._writer:
self._writer.add_hparams(hparams, metrics)
def log_image(self: TensorBoardLogger, *args: Any, **kwargs: Any) -> None:
writer = self._writer
if writer:
writer.add_image(*args, **kwargs)
def log_images(self: TensorBoardLogger, *args: Any, **kwargs: Any) -> None:
writer = self._writer
if writer:
writer.add_images(*args, **kwargs)
def log_audio(self: TensorBoardLogger, *args: Any, **kwargs: Any) -> None:
writer = self._writer
if writer:
writer.add_audio(*args, **kwargs)
def log_scalars(self: TensorBoardLogger, main_tag: str, tag_scalar_dict: Dict[(str, Union[(float, int)])], global_step: Optional[int]=None, walltime: Optional[float]=None) -> None:
if self._writer:
self._writer.add_scalars(main_tag=main_tag, tag_scalar_dict=tag_scalar_dict, global_step=global_step, walltime=walltime)
def flush(self: TensorBoardLogger) -> None:
if self._writer:
self._writer.flush()
def close(self: TensorBoardLogger) -> None:
if self._writer:
self._writer.close()
self._writer = None |
.parametrize('locale, time, expected_period_id', [('de', time(7, 42), 'morning1'), ('de', time(3, 11), 'night1'), ('fi', time(0), 'midnight'), ('en_US', time(12), 'noon'), ('en_US', time(21), 'night1'), ('en_US', time(5), 'night1'), ('en_US', time(6), 'morning1'), ('agq', time(10), 'am'), ('agq', time(22), 'pm'), ('am', time(14), 'afternoon1')])
def test_day_period_rules(locale, time, expected_period_id):
assert (dates.get_period_id(time, locale=locale) == expected_period_id) |
def make_d_label_spk2uttr(lines):
idx = 0
dic_label = {}
list_label = []
dic_spk2utt = {}
for line in lines:
spk = get_spk(line)
if (spk not in dic_label):
dic_label[spk] = idx
list_label.append(spk)
idx += 1
if (spk not in dic_spk2utt):
dic_spk2utt[spk] = []
dic_spk2utt[spk].append(line)
return (dic_label, list_label, dic_spk2utt) |
class ModelArguments():
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'})
resize_position_embeddings: Optional[bool] = field(default=None, metadata={'help': "Whether to automatically resize the position embeddings if `max_source_length` exceeds the model's position embeddings."})
quantization_bit: Optional[int] = field(default=None)
pre_seq_len: Optional[int] = field(default=None)
prefix_projection: bool = field(default=False) |
_tf
_sentencepiece
_tokenizers
class TFMT5ModelIntegrationTest(unittest.TestCase):
def test_small_integration_test(self):
model = TFAutoModelForSeq2SeqLM.from_pretrained('google/mt5-small')
tokenizer = AutoTokenizer.from_pretrained('google/mt5-small')
input_ids = tokenizer('Hello there', return_tensors='tf').input_ids
labels = tokenizer('Hi I am', return_tensors='tf').input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = (- tf.math.reduce_mean(loss).numpy())
EXPECTED_SCORE = (- 21.228168)
self.assertTrue((abs((mtf_score - EXPECTED_SCORE)) < 0.0002)) |
class UrlPattern():
_DEFAULT_PORTS = {' 443, ' 80, 'ftp': 21}
_SCHEMES_WITHOUT_HOST = ['about', 'file', 'data', 'javascript']
def __init__(self, pattern: str) -> None:
self._pattern = pattern
self._match_all = False
self._match_subdomains: bool = False
self._scheme: Optional[str] = None
self.host: Optional[str] = None
self._path: Optional[str] = None
self._port: Optional[int] = None
if (pattern == '<all_urls>'):
self._match_all = True
return
if ('\x00' in pattern):
raise ParseError('May not contain NUL byte')
pattern = self._fixup_pattern(pattern)
try:
parsed = urllib.parse.urlparse(pattern)
except ValueError as e:
raise ParseError(str(e))
assert (parsed is not None)
self._init_scheme(parsed)
self._init_host(parsed)
self._init_path(parsed)
self._init_port(parsed)
def _to_tuple(self) -> Tuple[(bool, bool, Optional[str], Optional[str], Optional[str], Optional[int])]:
return (self._match_all, self._match_subdomains, self._scheme, self.host, self._path, self._port)
def __hash__(self) -> int:
return hash(self._to_tuple())
def __eq__(self, other: Any) -> bool:
if (not isinstance(other, UrlPattern)):
return NotImplemented
return (self._to_tuple() == other._to_tuple())
def __repr__(self) -> str:
return utils.get_repr(self, pattern=self._pattern, constructor=True)
def __str__(self) -> str:
return self._pattern
def _fixup_pattern(self, pattern: str) -> str:
if pattern.startswith('*:'):
pattern = ('any:' + pattern[2:])
schemes = tuple(((s + ':') for s in self._SCHEMES_WITHOUT_HOST))
if (('://' not in pattern) and (not pattern.startswith(schemes))):
pattern = ('any://' + pattern)
if (pattern.startswith('file://') and (not pattern.startswith('file:///'))):
pattern = ('file:///' + pattern[len('file://'):])
return pattern
def _init_scheme(self, parsed: urllib.parse.ParseResult) -> None:
if (not parsed.scheme):
raise ParseError('Missing scheme')
if (parsed.scheme == 'any'):
self._scheme = None
return
self._scheme = parsed.scheme
def _init_path(self, parsed: urllib.parse.ParseResult) -> None:
if ((self._scheme == 'about') and (not parsed.path.strip())):
raise ParseError('Pattern without path')
if (parsed.path == '/*'):
self._path = None
elif (not parsed.path):
self._path = None
else:
self._path = parsed.path
def _init_host(self, parsed: urllib.parse.ParseResult) -> None:
if ((parsed.hostname is None) or (not parsed.hostname.strip())):
if (self._scheme not in self._SCHEMES_WITHOUT_HOST):
raise ParseError('Pattern without host')
assert (self.host is None)
return
if parsed.netloc.startswith('['):
url = QUrl()
url.setHost(parsed.hostname)
if (not url.isValid()):
raise ParseError(url.errorString())
self.host = url.host()
return
if (parsed.hostname == '*'):
self._match_subdomains = True
hostname = None
elif parsed.hostname.startswith('*.'):
if (len(parsed.hostname) == 2):
raise ParseError('Pattern without host')
self._match_subdomains = True
hostname = parsed.hostname[2:]
elif (set(parsed.hostname) in {frozenset('.'), frozenset('. ')}):
raise ParseError('Invalid host')
else:
hostname = parsed.hostname
if (hostname is None):
self.host = None
elif ('*' in hostname):
raise ParseError('Invalid host wildcard')
else:
self.host = hostname.rstrip('.')
def _init_port(self, parsed: urllib.parse.ParseResult) -> None:
if parsed.netloc.endswith(':*'):
self._port = None
elif parsed.netloc.endswith(':'):
raise ParseError('Invalid port: Port is empty')
else:
try:
self._port = parsed.port
except ValueError as e:
raise ParseError('Invalid port: {}'.format(e))
scheme_has_port = ((self._scheme in list(self._DEFAULT_PORTS)) or (self._scheme is None))
if ((self._port is not None) and (not scheme_has_port)):
raise ParseError('Ports are unsupported with {} scheme'.format(self._scheme))
def _matches_scheme(self, scheme: str) -> bool:
return ((self._scheme is None) or (self._scheme == scheme))
def _matches_host(self, host: str) -> bool:
host = host.rstrip('.')
if (self.host is None):
return True
if (host == self.host):
return True
if (not self._match_subdomains):
return False
if (not utils.raises(ValueError, ipaddress.ip_address, host)):
return False
if (len(host) <= (len(self.host) + 1)):
return False
if (not host.endswith(self.host)):
return False
return (host[((len(host) - len(self.host)) - 1)] == '.')
def _matches_port(self, scheme: str, port: int) -> bool:
if ((port == (- 1)) and (scheme in self._DEFAULT_PORTS)):
port = self._DEFAULT_PORTS[scheme]
return ((self._port is None) or (self._port == port))
def _matches_path(self, path: str) -> bool:
if (self._path is None):
return True
if ((path + '/*') == self._path):
return True
return fnmatch.fnmatchcase(path, self._path)
def matches(self, qurl: QUrl) -> bool:
qtutils.ensure_valid(qurl)
if self._match_all:
return True
if (not self._matches_scheme(qurl.scheme())):
return False
if (not self._matches_host(qurl.host())):
return False
if (not self._matches_port(qurl.scheme(), qurl.port())):
return False
if (not self._matches_path(qurl.path())):
return False
return True |
_grad()
def load_mtl(fn, clear_ks=True):
import re
mtl_path = os.path.dirname(fn)
with open(fn, 'r') as f:
lines = f.readlines()
materials = []
for line in lines:
split_line = re.split(' +|\t+|\n+', line.strip())
prefix = split_line[0].lower()
data = split_line[1:]
if ('newmtl' in prefix):
material = Material({'name': data[0]})
materials += [material]
elif materials:
if (('bsdf' in prefix) or ('map_kd' in prefix) or ('map_ks' in prefix) or ('bump' in prefix)):
material[prefix] = data[0]
else:
material[prefix] = torch.tensor(tuple((float(d) for d in data)), dtype=torch.float32, device='cuda')
for mat in materials:
if (not ('bsdf' in mat)):
mat['bsdf'] = 'pbr'
if ('map_kd' in mat):
mat['kd'] = texture.load_texture2D(os.path.join(mtl_path, mat['map_kd']))
else:
mat['kd'] = texture.Texture2D(mat['kd'])
if ('map_ks' in mat):
mat['ks'] = texture.load_texture2D(os.path.join(mtl_path, mat['map_ks']), channels=3)
else:
mat['ks'] = texture.Texture2D(mat['ks'])
if ('bump' in mat):
mat['normal'] = texture.load_texture2D(os.path.join(mtl_path, mat['bump']), lambda_fn=(lambda x: ((x * 2) - 1)), channels=3)
mat['kd'] = texture.srgb_to_rgb(mat['kd'])
if clear_ks:
for mip in mat['ks'].getMips():
mip[(..., 0)] = 0.0
return materials |
def parse_locale(identifier: str, sep: str='_') -> (tuple[(str, (str | None), (str | None), (str | None))] | tuple[(str, (str | None), (str | None), (str | None), (str | None))]):
(identifier, _, modifier) = identifier.partition('')
if ('.' in identifier):
identifier = identifier.split('.', 1)[0]
parts = identifier.split(sep)
lang = parts.pop(0).lower()
if (not lang.isalpha()):
raise ValueError(f'expected only letters, got {lang!r}')
script = territory = variant = None
if (parts and (len(parts[0]) == 4) and parts[0].isalpha()):
script = parts.pop(0).title()
if parts:
if ((len(parts[0]) == 2) and parts[0].isalpha()):
territory = parts.pop(0).upper()
elif ((len(parts[0]) == 3) and parts[0].isdigit()):
territory = parts.pop(0)
if (parts and (((len(parts[0]) == 4) and parts[0][0].isdigit()) or ((len(parts[0]) >= 5) and parts[0][0].isalpha()))):
variant = parts.pop().upper()
if parts:
raise ValueError(f'{identifier!r} is not a valid locale identifier')
if modifier:
return (lang, territory, script, variant, modifier)
else:
return (lang, territory, script, variant) |
class ConnectionPair():
def __init__(self) -> None:
self.conn = {CLIENT: Connection(CLIENT), SERVER: Connection(SERVER)}
self.other = {CLIENT: SERVER, SERVER: CLIENT}
def conns(self) -> ValuesView[Connection]:
return self.conn.values()
def send(self, role: Type[Sentinel], send_events: Union[(List[Event], Event)], expect: Union[(List[Event], Event, Literal['match'])]='match') -> bytes:
if (not isinstance(send_events, list)):
send_events = [send_events]
data = b''
closed = False
for send_event in send_events:
new_data = self.conn[role].send(send_event)
if (new_data is None):
closed = True
else:
data += new_data
if data:
self.conn[self.other[role]].receive_data(data)
if closed:
self.conn[self.other[role]].receive_data(b'')
got_events = get_all_events(self.conn[self.other[role]])
if (expect == 'match'):
expect = send_events
if (not isinstance(expect, list)):
expect = [expect]
assert (got_events == expect)
return data |
def linux_distribution(full_distribution_name: bool=True) -> Tuple[(str, str, str)]:
warnings.warn("distro.linux_distribution() is deprecated. It should only be used as a compatibility shim with Python's platform.linux_distribution(). Please use distro.id(), distro.version() and distro.name() instead.", DeprecationWarning, stacklevel=2)
return _distro.linux_distribution(full_distribution_name) |
class StopwatchMeter(Meter):
def __init__(self, round: Optional[int]=None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1, prehook=None):
if (self.start_time is not None):
if (prehook is not None):
prehook()
delta = (time.perf_counter() - self.start_time)
self.sum = (self.sum + delta)
self.n = (type_as(self.n, n) + n)
def reset(self):
self.sum = 0
self.n = 0
self.start()
def state_dict(self):
return {'sum': self.sum, 'n': self.n, 'round': self.round}
def load_state_dict(self, state_dict):
self.sum = state_dict['sum']
self.n = state_dict['n']
self.start_time = None
self.round = state_dict.get('round', None)
def avg(self):
return ((self.sum / self.n) if (self.n > 0) else self.sum)
def elapsed_time(self):
if (self.start_time is None):
return 0.0
return (time.perf_counter() - self.start_time)
def smoothed_value(self) -> float:
val = (self.avg if (self.sum > 0) else self.elapsed_time)
if ((self.round is not None) and (val is not None)):
val = safe_round(val, self.round)
return val |
.parametrize('book__name', ['PyTest for Dummies'])
.parametrize('book__price', [1.0])
.parametrize('author__name', ['Bill Gates'])
.parametrize('edition__year', [2000])
def test_parametrized(book: Book):
assert (book.name == 'PyTest for Dummies')
assert (book.price == 1.0)
assert (book.author.name == 'Bill Gates')
assert (len(book.editions) == 1)
assert (book.editions[0].year == 2000) |
class CombinedROIHeads(torch.nn.ModuleDict):
def __init__(self, cfg, heads):
super(CombinedROIHeads, self).__init__(heads)
self.cfg = cfg.clone()
if (cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR):
self.mask.feature_extractor = self.box.feature_extractor
if (cfg.MODEL.KEYPOINT_ON and cfg.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR):
self.keypoint.feature_extractor = self.box.feature_extractor
def forward(self, features, proposals, targets=None):
losses = {}
(x, detections, loss_box) = self.box(features, proposals, targets)
losses.update(loss_box)
if self.cfg.MODEL.MASK_ON:
mask_features = features
if (self.training and self.cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR):
mask_features = x
(x, detections, loss_mask) = self.mask(mask_features, detections, targets)
losses.update(loss_mask)
if self.cfg.MODEL.KEYPOINT_ON:
keypoint_features = features
if (self.training and self.cfg.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR):
keypoint_features = x
(x, detections, loss_keypoint) = self.keypoint(keypoint_features, detections, targets)
losses.update(loss_keypoint)
return (x, detections, losses) |
class MHAtt(nn.Module):
def __init__(self, __C):
super(MHAtt, self).__init__()
self.__C = __C
self.linear_v = nn.Linear(__C['fusion']['mca_HIDDEN_SIZE'], __C['fusion']['mca_HIDDEN_SIZE'])
self.linear_k = nn.Linear(__C['fusion']['mca_HIDDEN_SIZE'], __C['fusion']['mca_HIDDEN_SIZE'])
self.linear_q = nn.Linear(__C['fusion']['mca_HIDDEN_SIZE'], __C['fusion']['mca_HIDDEN_SIZE'])
self.linear_merge = nn.Linear(__C['fusion']['mca_HIDDEN_SIZE'], __C['fusion']['mca_HIDDEN_SIZE'])
self.dropout = nn.Dropout(__C['fusion']['mca_DROPOUT_R'])
def forward(self, v, k, q, mask=None):
n_batches = q.size(0)
v = self.linear_v(v).view(n_batches, (- 1), self.__C['fusion']['mca_MULTI_HEAD'], self.__C['fusion']['mca_HIDDEN_SIZE_HEAD']).transpose(1, 2)
k = self.linear_k(k).view(n_batches, (- 1), self.__C['fusion']['mca_MULTI_HEAD'], self.__C['fusion']['mca_HIDDEN_SIZE_HEAD']).transpose(1, 2)
q = self.linear_q(q).view(n_batches, (- 1), self.__C['fusion']['mca_MULTI_HEAD'], self.__C['fusion']['mca_HIDDEN_SIZE_HEAD']).transpose(1, 2)
atted = self.att(v, k, q, mask)
atted = atted.transpose(1, 2).contiguous().view(n_batches, (- 1), self.__C['fusion']['mca_HIDDEN_SIZE'])
atted = self.linear_merge(atted)
return atted
def att(self, value, key, query, mask=None):
d_k = query.size((- 1))
scores = (torch.matmul(query, key.transpose((- 2), (- 1))) / math.sqrt(d_k))
if (mask is not None):
scores = scores.masked_fill(mask, (- .0))
att_map = F.softmax(scores, dim=(- 1))
att_map = self.dropout(att_map)
return torch.matmul(att_map, value) |
def process_form(form, comp=True):
max2theta = form.max2theta.data
min2theta = form.min2theta.data
if (min2theta > max2theta):
min2theta = 0
flash(Markup('<span class="glyphicon glyphicon-warning-sign" aria-hidden="true"></span><span class="sr-only">Error:</span> 2<i>θ</i><sub>min</sub> <strong>greater</strong> than 2<i>θ</i><sub>max</sub>—defaulting 2<i>θ</i><sub>min</sub> to 0°.'), 'warning alert-dismissible')
session['WAVELENGTH'] = form.wavelength.data
session['MIN2THETA'] = min2theta
session['MAX2THETA'] = max2theta
session['RES'] = form.res.data
session['METHOD'] = form.method.data
session['FWHM'] = form.fwhm.data
session['U'] = form.u.data
session['V'] = form.v.data
session['W'] = form.w.data
session['A'] = form.a.data
session['ETA_H'] = form.eta_h.data
session['ETA_L'] = form.eta_l.data
if comp:
session['SHIFT'] = form.shift.data |
class ConvTranspose2d_same(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(5, 5), stride=(2, 2)):
super(ConvTranspose2d_same, self).__init__()
output_padding = [abs(((k % 2) - (s % 2))) for (k, s) in zip(kernel_size, stride)]
padding = [(((k - s) + o) // 2) for (k, s, o) in zip(kernel_size, stride, output_padding)]
self.deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, output_padding=output_padding)
def forward(self, x):
return self.deconv(x) |
class RtlSdrTcpBase(object):
DEFAULT_PORT = 1235
def __init__(self, device_index=0, test_mode_enabled=False, hostname='127.0.0.1', port=None):
self.device_index = device_index
self.test_mode_enabled = test_mode_enabled
self.hostname = hostname
self.port = port
if (self.port is None):
self.port = self.DEFAULT_PORT
self.device_ready = False
self.server_thread = None
def packed_bytes_to_iq(self, bytes):
if has_numpy:
data = np.ctypeslib.as_array(bytes)
iq = data.astype(np.float64).view(np.complex128)
iq /= 127.5
iq -= (1 + 1j)
else:
iq = [complex(((i / (255 / 2)) - 1), ((q / (255 / 2)) - 1)) for (i, q) in izip(bytes[::2], bytes[1::2])]
return iq |
def bottleneck_v1b(input_x, base_channel, scope, stride=1, projection=False, avg_down=True):
with tf.variable_scope(scope):
if DEBUG:
debug_dict[input_x.op.name] = tf.transpose(input_x, [0, 3, 1, 2])
net = slim.conv2d(input_x, num_outputs=base_channel, kernel_size=[1, 1], stride=1, padding='VALID', biases_initializer=None, data_format=DATA_FORMAT, scope='conv0')
if DEBUG:
debug_dict[net.op.name] = tf.transpose(net, [0, 3, 1, 2])
net = tf.pad(net, paddings=[[0, 0], [1, 1], [1, 1], [0, 0]])
if DEBUG:
debug_dict[net.op.name] = tf.transpose(net, [0, 3, 1, 2])
net = slim.conv2d(net, num_outputs=base_channel, kernel_size=[3, 3], stride=stride, padding='VALID', biases_initializer=None, data_format=DATA_FORMAT, scope='conv1')
if DEBUG:
debug_dict[net.op.name] = tf.transpose(net, [0, 3, 1, 2])
net = slim.conv2d(net, num_outputs=(base_channel * 4), kernel_size=[1, 1], stride=1, padding='VALID', biases_initializer=None, data_format=DATA_FORMAT, activation_fn=None, scope='conv2')
if DEBUG:
debug_dict[net.op.name] = tf.transpose(net, [0, 3, 1, 2])
if projection:
if avg_down:
shortcut = slim.avg_pool2d(input_x, kernel_size=[stride, stride], stride=stride, padding='SAME', data_format=DATA_FORMAT)
if DEBUG:
debug_dict[shortcut.op.name] = tf.transpose(shortcut, [0, 3, 1, 2])
shortcut = slim.conv2d(shortcut, num_outputs=(base_channel * 4), kernel_size=[1, 1], stride=1, padding='VALID', biases_initializer=None, data_format=DATA_FORMAT, activation_fn=None, scope='shortcut')
if DEBUG:
debug_dict[shortcut.op.name] = tf.transpose(shortcut, [0, 3, 1, 2])
else:
shortcut = slim.conv2d(input_x, num_outputs=(base_channel * 4), kernel_size=[1, 1], stride=stride, padding='VALID', biases_initializer=None, activation_fn=None, data_format=DATA_FORMAT, scope='shortcut')
if DEBUG:
debug_dict[shortcut.op.name] = tf.transpose(shortcut, [0, 3, 1, 2])
else:
shortcut = tf.identity(input_x, name='shortcut/Identity')
if DEBUG:
debug_dict[shortcut.op.name] = tf.transpose(shortcut, [0, 3, 1, 2])
net = (net + shortcut)
if DEBUG:
debug_dict[net.op.name] = tf.transpose(net, [0, 3, 1, 2])
net = tf.nn.relu(net)
if DEBUG:
debug_dict[net.op.name] = tf.transpose(net, [0, 3, 1, 2])
return net |
def _getShieldResists(ship):
em = (1 - ship.getModifiedItemAttr('shieldEmDamageResonance'))
therm = (1 - ship.getModifiedItemAttr('shieldThermalDamageResonance'))
kin = (1 - ship.getModifiedItemAttr('shieldKineticDamageResonance'))
explo = (1 - ship.getModifiedItemAttr('shieldExplosiveDamageResonance'))
return (em, therm, kin, explo) |
def _gen_pairings_between_partitions(parta, partb):
if (len((parta + partb)) < 5):
(yield (tuple(parta), tuple(partb)))
splita = [parta[:(len(parta) // 2)], parta[(len(parta) // 2):]]
splitb = [partb[:(len(partb) // 2)], partb[(len(partb) // 2):]]
for (a, b) in ((0, 0), (0, 1), (1, 0), (1, 1)):
if (max(len(splita[a]), len(splitb[b])) < 2):
continue
if (min(len(splita[(1 - a)]), len(splitb[(1 - b)])) < 1):
continue
gen_a = _loop_iterator(pair_within, splita[a])
gen_b = _loop_iterator(pair_within, splitb[b])
num_iter = max(((len(splitb[b]) - 1) + (len(splitb[b]) % 2)), ((len(splita[a]) - 1) + (len(splita[a]) % 2)))
for _ in range(num_iter):
(pair_a, _) = next(gen_a)
(pair_b, _) = next(gen_b)
gen_ab = pair_between(splita[(1 - a)], splitb[(1 - b)])
for pair_ab in gen_ab:
(yield ((pair_a + pair_b) + pair_ab)) |
class ErlangShellLexer(Lexer):
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
url = '
version_added = '1.1'
_prompt_re = re.compile('(?:\\([\\_.]+\\))?\\d+>(?=\\s|\\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if (m is not None):
end = m.end()
insertions.append((len(curcode), [(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
(yield from do_insertions(insertions, erlexer.get_tokens_unprocessed(curcode)))
curcode = ''
insertions = []
if line.startswith('*'):
(yield (match.start(), Generic.Traceback, line))
else:
(yield (match.start(), Generic.Output, line))
if curcode:
(yield from do_insertions(insertions, erlexer.get_tokens_unprocessed(curcode))) |
def test_log_file_cli(pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n import logging\n def test_log_file(request):\n plugin = request.config.pluginmanager.getplugin(\'logging-plugin\')\n assert plugin.log_file_handler.level == logging.WARNING\n logging.getLogger(\'catchlog\').info("This log message won\'t be shown")\n logging.getLogger(\'catchlog\').warning("This log message will be shown")\n print(\'PASSED\')\n ')
log_file = str(pytester.path.joinpath('pytest.log'))
result = pytester.runpytest('-s', f'--log-file={log_file}', '--log-file-level=WARNING')
result.stdout.fnmatch_lines(['test_log_file_cli.py PASSED'])
assert (result.ret == 0)
assert os.path.isfile(log_file)
with open(log_file, encoding='utf-8') as rfh:
contents = rfh.read()
assert ('This log message will be shown' in contents)
assert ("This log message won't be shown" not in contents) |
class TestNumericField(TestCase):
def setUp(self):
self.field = fields.NumericField()
def test_deserialize_float(self):
arbitrary_float = '214.8'
actual_value = self.field.deserialize(arbitrary_float)
expected_value = 214.8
self.assertEqual(actual_value, expected_value)
def test_deserialize_integer(self):
arbitrary_integer = '214'
actual_value = self.field.deserialize(arbitrary_integer)
expected_value = 214
self.assertEqual(actual_value, expected_value)
def test_deserialize_and_lo_and_behold_it_wasnt_numeric(self):
arbitrary_string = 'alpha'
actual_value = self.field.deserialize(arbitrary_string)
expected_value = arbitrary_string
self.assertEqual(actual_value, expected_value)
def test_numeric_value(self):
arbitrary_numeric_value = 10
actual_value = self.field.serialize(arbitrary_numeric_value)
expected_value = arbitrary_numeric_value
self.assertEqual(actual_value, expected_value)
def test_nonnumeric_value(self):
arbitrary_nonnumeric_value = 'alpha'
with self.assertRaises(ValueError):
self.field.serialize(arbitrary_nonnumeric_value) |
def packet_processor(pkt):
if pkt.haslayer(IP):
src = pkt[IP].src
dst = pkt[IP].dst
else:
src = pkt.src
dst = pkt.dst
key = tuple(sorted([src, dst]))
packet_counts.update([key])
pkt_no = sum(packet_counts.values())
gateway = netifaces.gateways()['default'][2][0]
print(f'* {yellow(pkt_no)} {pkt.summary().replace(get_local_ip(), green(get_local_ip())).replace(gateway, blue(gateway))}') |
def init_model():
model = stylegan2.models.load('../mymodels/Gs_ffhq.pth')
model = utils.unwrap_module(model).to(device)
model.eval()
prior = cnf(512, '512-512-512-512-512', 17, 1)
prior.load_state_dict(torch.load('../flow_weight/modellarge10k.pt'))
prior.to(device)
prior.eval()
return (model, prior.cpu()) |
def evaluate_folder(folder_with_gts: str, folder_with_predictions: str, labels: tuple, **metric_kwargs):
files_gt = subfiles(folder_with_gts, suffix='.nii.gz', join=False)
files_pred = subfiles(folder_with_predictions, suffix='.nii.gz', join=False)
assert all([(i in files_pred) for i in files_gt]), 'files missing in folder_with_predictions'
assert all([(i in files_gt) for i in files_pred]), 'files missing in folder_with_gts'
test_ref_pairs = [(join(folder_with_predictions, i), join(folder_with_gts, i)) for i in files_pred]
res = aggregate_scores(test_ref_pairs, json_output_file=join(folder_with_predictions, 'summary.json'), num_threads=8, labels=labels, **metric_kwargs)
return res |
.parametrize('username,password', users)
.parametrize('export_format', export_formats)
def test_detail_export(db, client, username, password, export_format):
client.login(username=username, password=password)
instance = Condition.objects.first()
url = ((reverse(urlnames['detail_export'], args=[instance.pk]) + export_format) + '/')
response = client.get(url)
assert (response.status_code == status_map['detail'][username]), response.content
if ((response.status_code == 200) and (export_format == 'xml')):
root = et.fromstring(response.content)
assert (root.tag == 'rdmo')
for child in root:
assert (child.tag in ['condition']) |
class _job_state_monitor(threading.Thread):
def __init__(self, log):
self._log = log
self._lock = threading.Lock()
self._term = threading.Event()
self._jobs = dict()
self._cnt = 0
super(_job_state_monitor, self).__init__()
self.setDaemon(True)
def stop(self):
self._term.set()
def add_job(self, job):
job._id = ('job.%06d' % self._cnt)
self._cnt += 1
assert (job._id not in self._jobs)
job._set_state(api.RUNNING)
job._started = time.time()
with self._lock:
self._jobs[job._id] = job
def get_job(self, jid):
assert (jid in self._jobs)
with self._lock:
return self._jobs[jid]
def list_jobs(self):
with self._lock:
return list(self._jobs.keys())
def run(self):
try:
while (not self._term.is_set()):
now = time.time()
keep = dict()
with self._lock:
for job in self._jobs.values():
if (job.get_state() == api.CANCELED):
continue
if (job.tgt < now):
job._finished = now
job._exit_code = 0
job._set_state(api.DONE)
else:
keep[job._id] = job
self._jobs = keep
time.sleep(0.1)
except Exception:
self._log.exception('Exception in job monitoring thread') |
def test_get_imgformat_jpg_when_setting_jpg(qapp, settings, item):
settings.setValue('Items/image_storage_format', 'jpg')
img = MagicMock(hasAlphaChannel=MagicMock(return_value=True), height=MagicMock(return_value=100), width=MagicMock(return_value=100))
assert (item.get_imgformat(img) == 'jpg') |
(cc=STDCALL, params={'hwnd': HWND, 'pszPath': LPWSTR, 'csidl': INT, 'fCreate': BOOL})
def hook_SHGetSpecialFolderPathW(ql: Qiling, address: int, params):
directory_id = params['csidl']
dst = params['pszPath']
if (directory_id == CSIDL_COMMON_APPDATA):
path = ntpath.join(ql.os.userprofile, 'AppData\\')
appdata_dir = path.split('C:\\')[1].replace('\\', '/')
ql.log.debug(('dir path: %s' % path))
path_emulated = os.path.join(ql.rootfs, appdata_dir)
ql.log.debug(('emulated path: %s' % path_emulated))
ql.mem.write(dst, (path + '\x00').encode('utf-16le'))
if (not os.path.exists(path_emulated)):
try:
os.makedirs(path_emulated, 493)
except OSError:
ql.log.debug('os.makedirs failed')
else:
ql.log.debug('os.makedirs completed')
else:
raise QlErrorNotImplemented('API not implemented')
return 1 |
class TestItems():
def neighborlist(self):
return usertypes.NeighborList([1, 2, 3, 4, 5], default=3)
def test_curitem(self, neighborlist):
assert (neighborlist._idx == 2)
assert (neighborlist.curitem() == 3)
assert (neighborlist._idx == 2)
def test_nextitem(self, neighborlist):
assert (neighborlist.nextitem() == 4)
assert (neighborlist._idx == 3)
assert (neighborlist.nextitem() == 5)
assert (neighborlist._idx == 4)
def test_previtem(self, neighborlist):
assert (neighborlist.previtem() == 2)
assert (neighborlist._idx == 1)
assert (neighborlist.previtem() == 1)
assert (neighborlist._idx == 0)
def test_firstitem(self, neighborlist):
assert (neighborlist.firstitem() == 1)
assert (neighborlist._idx == 0)
def test_lastitem(self, neighborlist):
assert (neighborlist.lastitem() == 5)
assert (neighborlist._idx == 4)
def test_reset(self, neighborlist):
neighborlist.nextitem()
assert (neighborlist._idx == 3)
neighborlist.reset()
assert (neighborlist._idx == 2)
def test_getitem(self, neighborlist):
assert (neighborlist.getitem(2) == 5)
assert (neighborlist._idx == 4)
neighborlist.reset()
assert (neighborlist.getitem((- 2)) == 1)
assert (neighborlist._idx == 0) |
class BertConfig(PretrainedConfig):
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self, vocab_size_or_config_json_file=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, **kwargs):
super(BertConfig, self).__init__(**kwargs)
if (isinstance(vocab_size_or_config_json_file, str) or ((sys.version_info[0] == 2) and isinstance(vocab_size_or_config_json_file, unicode))):
with open(vocab_size_or_config_json_file, 'r', encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for (key, value) in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError('First argument must be either a vocabulary size (int) or the path to a pretrained model config file (str)') |
def load_binary_dataset(train_file, tokenizer, dev_file=None, train_limit=None, dev_limit=None, max_seq_length=MAX_SEQ_LENGTH):
logger.info('Read binary dataset')
train_samples = []
dev_samples = []
train_objs = utils.JsonL.load(train_file)
if train_limit:
random.shuffle(train_objs)
train_objs = train_objs[:train_limit]
for obj in train_objs:
ent_a = obj['entity_a']
ent_b = obj['entity_b']
ent_a = EntityContext(left_context=ent_a['left_context'], entity=ent_a['entity'], right_context=ent_a['right_context'])
ent_b = EntityContext(left_context=ent_b['left_context'], entity=ent_b['entity'], right_context=ent_b['right_context'])
raw_label = int(obj['label'])
if (raw_label > 0):
label = 1.0
else:
label = 0.0
train_samples.append(ContextualizedExample(entities=[ent_a, ent_b], label=label))
train_dataset = preprocess_pairwise_data(train_samples, tokenizer, max_seq_length=MAX_SEQ_LENGTH)
if dev_file:
dev_objs = utils.JsonL.load(dev_file)
if dev_limit:
random.shuffle(dev_objs)
dev_objs = dev_objs[:dev_limit]
for obj in dev_objs:
ent_a = obj['entity_a']
ent_b = obj['entity_b']
ent_a = EntityContext(left_context=ent_a['left_context'], entity=ent_a['entity'], right_context=ent_a['right_context'])
ent_b = EntityContext(left_context=ent_b['left_context'], entity=ent_b['entity'], right_context=ent_b['right_context'])
raw_label = int(obj['label'])
if (raw_label > 0):
label = 1
else:
label = 0
dev_samples.append(ContextualizedExample(entities=[ent_a, ent_b], label=label))
dev_dataset = preprocess_pairwise_data(dev_samples, tokenizer, max_seq_length=MAX_SEQ_LENGTH)
return (train_dataset, dev_dataset)
return train_dataset |
def test__poa_ground_shadows():
(poa_ground, f_gnd_beam, df, vf_gnd_sky) = (300.0, 0.5, 0.5, 0.2)
result = infinite_sheds._poa_ground_shadows(poa_ground, f_gnd_beam, df, vf_gnd_sky)
expected = (300.0 * ((0.5 * 0.5) + (0.5 * 0.2)))
assert np.isclose(result, expected)
poa_ground = np.array([300.0, 300.0])
f_gnd_beam = np.array([0.5, 0.5])
df = np.array([0.5, 0.0])
vf_gnd_sky = np.array([0.2, 0.2])
result = infinite_sheds._poa_ground_shadows(poa_ground, f_gnd_beam, df, vf_gnd_sky)
expected_vec = np.array([expected, (300.0 * 0.5)])
assert np.allclose(result, expected_vec) |
def scan(fn, sequences=None, outputs_info=None, non_sequences=None, n_steps=None, truncate_gradient=(- 1), go_backwards=False, mode=None, name=None, profile=False, allow_gc=None, strict=False, return_list=False):
def wrap_into_list(x):
if (x is None):
return []
elif (not isinstance(x, (list, tuple))):
return [x]
else:
return list(x)
seqs = wrap_into_list(sequences)
outs_info = wrap_into_list(outputs_info)
non_seqs = []
for elem in wrap_into_list(non_sequences):
if (not isinstance(elem, Variable)):
non_seqs.append(pt.as_tensor_variable(elem))
else:
non_seqs.append(elem)
n_fixed_steps = None
if isinstance(n_steps, (float, int)):
n_fixed_steps = int(n_steps)
else:
try:
n_fixed_steps = pt.get_underlying_scalar_constant_value(n_steps)
except NotScalarConstantError:
n_fixed_steps = None
if (hasattr(n_steps, 'dtype') and (str(n_steps.dtype) not in integer_dtypes)):
raise ValueError(f' n_steps must be an int. dtype provided is {n_steps.dtype}')
n_seqs = len(seqs)
n_outs = len(outs_info)
return_steps = {}
for i in range(n_seqs):
if (not isinstance(seqs[i], dict)):
seqs[i] = {'input': seqs[i], 'taps': [0]}
elif (seqs[i].get('taps', None) is not None):
seqs[i]['taps'] = wrap_into_list(seqs[i]['taps'])
elif (seqs[i].get('taps', None) is None):
seqs[i]['taps'] = [0]
for i in range(n_outs):
if (outs_info[i] is not None):
if (not isinstance(outs_info[i], dict)):
outs_info[i] = {'initial': outs_info[i], 'taps': [(- 1)]}
elif ((outs_info[i].get('initial', None) is None) and (outs_info[i].get('taps', None) is not None)):
raise ValueError(f'If you are using slices of an output you need to provide a initial state for it: {outs_info[i]}')
elif ((outs_info[i].get('initial', None) is not None) and (outs_info[i].get('taps', None) is None)):
if ('taps' in outs_info[i]):
warnings.warn(f"Output {getattr(outs_info[i]['initial'], 'name', 'None')} (index {i}) has a initial state but taps is explicitly set to None ")
outs_info[i]['taps'] = [(- 1)]
elif (outs_info[i].get('taps', None) is not None):
taps = outs_info[i]['taps']
if (len(taps) > len(set(taps))):
raise ValueError('All the taps must be different in `outputs_info`', outs_info[i])
for t in taps:
if (t >= 0):
raise ValueError('All the tap values must be smaller than 0.', outs_info[i])
_unexpected_keys = (set(outs_info[i]) - {'initial', 'taps', 'inplace'})
if _unexpected_keys:
raise ValueError(f'These keys were unexpected in Scan outputs_info[{i}]: {_unexpected_keys}')
else:
outs_info[i] = {}
n_seqs = 0
scan_seqs = []
inner_seqs = []
inner_slices = []
for (i, seq) in enumerate(seqs):
if ('taps' in seq):
mintap = np.min(seq['taps'])
maxtap = np.max(seq['taps'])
maxtap_proxy = max(maxtap, 0)
mintap_proxy = min(mintap, 0)
for k in seq['taps']:
actual_slice = seq['input'][(k - mintap_proxy)]
_seq_val = pt.as_tensor_variable(seq['input'])
_seq_val_slice = _seq_val[(k - mintap_proxy)]
nw_slice = _seq_val_slice.type()
if (config.compute_test_value != 'off'):
try:
nw_slice.tag.test_value = get_test_value(_seq_val_slice)
except TestValueError:
if (config.compute_test_value != 'ignore'):
warnings.warn(f'Cannot compute test value for the inner function of scan, input value missing {_seq_val_slice}')
if (getattr(seq['input'], 'name', None) is not None):
if (k > 0):
nw_name = (seq['input'].name + f'[t+{int(k)}]')
elif (k == 0):
nw_name = (seq['input'].name + '[t]')
else:
nw_name = (seq['input'].name + f'[t{int(k)}]')
nw_slice.name = nw_name
start = (k - mintap_proxy)
nw_name = None
if (k == maxtap_proxy):
nw_seq = seq['input'][start:]
if (getattr(seq['input'], 'name', None) is not None):
nw_name = (seq['input'].name + f'[{int(start)}:]')
else:
end = (- (maxtap_proxy - k))
nw_seq = seq['input'][start:end]
if (getattr(seq['input'], 'name', None) is not None):
nw_name = (seq['input'].name + f'[{int(start)}:{int(end)}]')
if go_backwards:
nw_seq = nw_seq[::(- 1)]
scan_seqs.append(nw_seq)
inner_seqs.append(nw_slice)
inner_slices.append(actual_slice)
n_seqs += 1
if (nw_name is not None):
nw_seq.name = nw_name
lengths_vec = []
for seq in scan_seqs:
lengths_vec.append(seq.shape[0])
if (not isNaN_or_Inf_or_None(n_steps)):
lengths_vec.append(pt.as_tensor(n_steps))
if (len(lengths_vec) == 0):
raise ValueError('No information about the number of steps provided. Either provide a value for n_steps argument of scan or provide an input sequence')
if isNaN_or_Inf_or_None(n_steps):
actual_n_steps = lengths_vec[0]
for contestant in lengths_vec[1:]:
actual_n_steps = minimum(actual_n_steps, contestant)
else:
actual_n_steps = pt.as_tensor(n_steps)
scan_seqs = [seq[:actual_n_steps] for seq in scan_seqs]
n_mit_mot = 0
mit_mot_scan_inputs = []
mit_mot_inner_inputs = []
mit_mot_inner_outputs = []
mit_mot_out_slices = []
n_mit_sot = 0
mit_sot_scan_inputs = []
mit_sot_inner_inputs = []
mit_sot_inner_slices = []
mit_sot_inner_outputs = []
mit_sot_return_steps = {}
mit_sot_tap_array = []
mit_sot_rightOrder = []
n_sit_sot = 0
sit_sot_scan_inputs = []
sit_sot_inner_inputs = []
sit_sot_inner_slices = []
sit_sot_inner_outputs = []
sit_sot_return_steps = {}
sit_sot_rightOrder = []
for (i, init_out) in enumerate(outs_info):
if (init_out.get('taps', None) == [(- 1)]):
actual_arg = init_out['initial']
if (not isinstance(actual_arg, Variable)):
actual_arg = pt.as_tensor_variable(actual_arg)
arg = safe_new(actual_arg)
if isinstance(arg, Constant):
arg = arg.type()
if (config.compute_test_value != 'off'):
try:
arg.tag.test_value = get_test_value(actual_arg)
except TestValueError:
if (config.compute_test_value != 'ignore'):
warnings.warn(f'Cannot compute test value for the inner function of scan, test value missing: {actual_arg}')
if (getattr(init_out['initial'], 'name', None) is not None):
arg.name = (init_out['initial'].name + '[t-1]')
sit_sot_scan_inputs.append(expand_empty(unbroadcast(shape_padleft(actual_arg), 0), actual_n_steps))
sit_sot_inner_slices.append(actual_arg)
if (i in return_steps):
sit_sot_return_steps[n_sit_sot] = return_steps[i]
sit_sot_inner_inputs.append(arg)
sit_sot_rightOrder.append(i)
n_sit_sot += 1
elif init_out.get('taps', None):
if np.any((np.array(init_out.get('taps', [])) > 0)):
raise ValueError('Can not use future taps of outputs', init_out)
mintap = abs(np.min(init_out['taps']))
mit_sot_tap_array.append(init_out['taps'])
mit_sot_scan_inputs.append(expand_empty(init_out['initial'][:mintap], actual_n_steps))
if (i in return_steps):
mit_sot_return_steps[n_mit_sot] = return_steps[i]
mit_sot_rightOrder.append(i)
n_mit_sot += 1
for k in init_out['taps']:
actual_nw_slice = init_out['initial'][(k + mintap)]
_init_out_var = pt.as_tensor_variable(init_out['initial'])
_init_out_var_slice = _init_out_var[(k + mintap)]
nw_slice = _init_out_var_slice.type()
if (config.compute_test_value != 'off'):
try:
nw_slice.tag.test_value = get_test_value(_init_out_var_slice)
except TestValueError:
if (config.compute_test_value != 'ignore'):
warnings.warn(f'Cannot compute test value for the inner function of scan, test value missing: {_init_out_var_slice}')
if (getattr(init_out['initial'], 'name', None) is not None):
if (k > 0):
nw_slice.name = (init_out['initial'].name + f'[t+{int(k)}]')
elif (k == 0):
nw_slice.name = (init_out['initial'].name + '[t]')
else:
nw_slice.name = (init_out['initial'].name + f'[t{int(k)}]')
mit_sot_inner_inputs.append(nw_slice)
mit_sot_inner_slices.append(actual_nw_slice)
max_mit_sot = (np.max(([(- 1)] + mit_sot_rightOrder)) + 1)
max_sit_sot = (np.max(([(- 1)] + sit_sot_rightOrder)) + 1)
n_elems = np.max([max_mit_sot, max_sit_sot])
_ordered_args = [[] for x in range(n_elems)]
offset = 0
for idx in range(n_mit_sot):
n_inputs = len(mit_sot_tap_array[idx])
if (n_fixed_steps in (1, (- 1))):
_ordered_args[mit_sot_rightOrder[idx]] = mit_sot_inner_slices[offset:(offset + n_inputs)]
else:
_ordered_args[mit_sot_rightOrder[idx]] = mit_sot_inner_inputs[offset:(offset + n_inputs)]
offset += n_inputs
for idx in range(n_sit_sot):
if (n_fixed_steps in (1, (- 1))):
_ordered_args[sit_sot_rightOrder[idx]] = [sit_sot_inner_slices[idx]]
else:
_ordered_args[sit_sot_rightOrder[idx]] = [sit_sot_inner_inputs[idx]]
ordered_args = []
for ls in _ordered_args:
ordered_args += ls
if (n_fixed_steps in (1, (- 1))):
args = ((inner_slices + ordered_args) + non_seqs)
else:
args = ((inner_seqs + ordered_args) + non_seqs)
dummy_args = [arg for arg in args if ((not isinstance(arg, SharedVariable)) and (not isinstance(arg, Constant)))]
with collect_new_shareds() as new_shareds:
raw_inner_outputs = fn(*args)
(condition, outputs, updates) = get_updates_and_outputs(raw_inner_outputs)
if (condition is not None):
as_while = True
else:
as_while = False
if (n_fixed_steps in (1, (- 1))):
for (pos, inner_out) in enumerate(outputs):
if (isinstance(inner_out.type, TensorType) and (return_steps.get(pos, 0) != 1)):
outputs[pos] = unbroadcast(shape_padleft(inner_out), 0)
if ((not return_list) and (len(outputs) == 1)):
outputs = outputs[0]
return (outputs, updates)
if (condition is not None):
outputs.append(condition)
fake_nonseqs = [x.type() for x in non_seqs]
fake_outputs = clone_replace(outputs, replace=dict(zip(non_seqs, fake_nonseqs)))
all_inputs = filter((lambda x: (isinstance(x, Variable) and (not isinstance(x, SharedVariable)) and (not isinstance(x, Constant)))), graph_inputs(fake_outputs))
extra_inputs = [x for x in all_inputs if (x not in (args + fake_nonseqs))]
non_seqs += extra_inputs
dummy_args += extra_inputs
dummy_outs = outputs
try:
(dummy_inputs, dummy_outputs) = construct_pfunc_ins_and_outs(dummy_args, dummy_outs, updates=updates)
except MissingInputError as err:
msg = "\nPlease pass this variable to the scan's inner function. Do not forget to also pass it to the `non_sequences` attribute of scan."
raise MissingInputError((err.args[0] + msg))
tmp_dummy_f_outs = len(dummy_outputs)
if as_while:
tmp_dummy_f_outs -= 1
if (not ((tmp_dummy_f_outs == n_outs) or (outs_info == []))):
raise ValueError('Please provide None as outputs_info for any output that does not feed back into scan (i.e. it behaves like a map) ')
if (outs_info == []):
n_outs = len(dummy_outputs)
if as_while:
n_outs = (n_outs - 1)
outs_info = [{} for x in range(n_outs)]
for (i, out) in enumerate(outs_info):
if (('taps' in out) and (out['taps'] != [(- 1)])):
mit_sot_inner_outputs.append(outputs[i])
for (i, out) in enumerate(outs_info):
if (('taps' in out) and (out['taps'] == [(- 1)])):
sit_sot_inner_outputs.append(outputs[i])
inner_replacements = {}
n_shared_outs = 0
shared_scan_inputs = []
shared_inner_inputs = []
shared_inner_outputs = []
sit_sot_shared = []
no_update_shared_inputs = []
for input in dummy_inputs:
if (not isinstance(input.variable, SharedVariable)):
continue
is_local = (input.variable in new_shareds)
if (input.update and (is_local or (input.variable in updates))):
if (is_local and (input.variable.default_update is not None)):
input.variable.default_update = None
new_var = safe_new(input.variable)
if (getattr(input.variable, 'name', None) is not None):
new_var.name = (input.variable.name + '_copy')
inner_replacements[input.variable] = new_var
if isinstance(new_var.type, TensorType):
sit_sot_inner_inputs.append(new_var)
sit_sot_scan_inputs.append(expand_empty(unbroadcast(shape_padleft(input.variable), 0), actual_n_steps))
tensor_update = pt.as_tensor_variable(input.update)
sit_sot_inner_outputs.append(tensor_update)
sit_sot_rightOrder.append(((- 1) - len(sit_sot_shared)))
sit_sot_shared.append(input.variable)
else:
shared_inner_inputs.append(new_var)
shared_scan_inputs.append(input.variable)
shared_inner_outputs.append(input.update)
n_shared_outs += 1
else:
no_update_shared_inputs.append(input)
n_sit_sot = len(sit_sot_inner_inputs)
n_nit_sot = 0
nit_sot_inner_outputs = []
nit_sot_return_steps = {}
nit_sot_rightOrder = []
for (i, out) in enumerate(outs_info):
if ('taps' not in out):
nit_sot_inner_outputs.append(outputs[i])
if (i in return_steps):
nit_sot_return_steps[n_nit_sot] = return_steps[i]
nit_sot_rightOrder.append(i)
n_nit_sot += 1
other_scan_args = []
other_inner_args = []
other_scan_args += [arg for arg in non_seqs if ((not isinstance(arg, SharedVariable)) and (not isinstance(arg, Constant)))]
other_inner_args += [safe_new(arg, '_copy') for arg in non_seqs if ((not isinstance(arg, SharedVariable)) and (not isinstance(arg, Constant)))]
inner_replacements.update(dict(zip(other_scan_args, other_inner_args)))
if strict:
non_seqs_set = set((non_sequences if (non_sequences is not None) else []))
other_shared_scan_args = [arg.variable for arg in no_update_shared_inputs if (arg.variable in non_seqs_set)]
other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg in no_update_shared_inputs if (arg.variable in non_seqs_set)]
else:
other_shared_scan_args = [arg.variable for arg in no_update_shared_inputs]
other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg in no_update_shared_inputs]
inner_replacements.update(dict(zip(other_shared_scan_args, other_shared_inner_args)))
inner_inputs = ((((((inner_seqs + mit_mot_inner_inputs) + mit_sot_inner_inputs) + sit_sot_inner_inputs) + shared_inner_inputs) + other_shared_inner_args) + other_inner_args)
inner_outs = ((((mit_mot_inner_outputs + mit_sot_inner_outputs) + sit_sot_inner_outputs) + nit_sot_inner_outputs) + shared_inner_outputs)
if (condition is not None):
inner_outs.append(condition)
new_outs = clone_replace(inner_outs, replace=inner_replacements)
if (allow_gc is None):
allow_gc = config.scan__allow_gc
info = ScanInfo(n_seqs=n_seqs, mit_mot_in_slices=(), mit_mot_out_slices=tuple((tuple(v) for v in mit_mot_out_slices)), mit_sot_in_slices=tuple((tuple(v) for v in mit_sot_tap_array)), sit_sot_in_slices=tuple((((- 1),) for x in range(n_sit_sot))), n_shared_outs=n_shared_outs, n_nit_sot=n_nit_sot, n_non_seqs=(len(other_shared_inner_args) + len(other_inner_args)), as_while=as_while)
local_op = Scan(inner_inputs, new_outs, info, mode=mode, truncate_gradient=truncate_gradient, name=name, profile=profile, allow_gc=allow_gc, strict=strict)
_scan_inputs = (((((((scan_seqs + mit_mot_scan_inputs) + mit_sot_scan_inputs) + sit_sot_scan_inputs) + shared_scan_inputs) + [actual_n_steps for x in range(n_nit_sot)]) + other_shared_scan_args) + other_scan_args)
scan_inputs = []
for arg in ([actual_n_steps] + _scan_inputs):
try:
arg = pt.as_tensor_variable(arg)
except TypeError:
pass
scan_inputs += [arg]
scan_outs = local_op(*scan_inputs)
if (not isinstance(scan_outs, (list, tuple))):
scan_outs = [scan_outs]
update_map = OrderedUpdates()
def remove_dimensions(outs, steps_return, offsets=None):
out_ls = []
for (idx, out) in enumerate(outs):
if (idx in steps_return):
if (steps_return[idx] > 1):
out_ls.append(out[(- steps_return[idx]):])
else:
out_ls.append(out[(- 1)])
elif (offsets is None):
out_ls.append(out)
else:
out_ls.append(out[offsets[idx]:])
return out_ls
offset = n_mit_mot
offsets = [abs(np.min(x)) for x in mit_sot_tap_array]
mit_sot_outs = remove_dimensions(scan_outs[offset:(offset + n_mit_sot)], mit_sot_return_steps, offsets)
offset += n_mit_sot
offsets = [1 for x in range(n_sit_sot)]
sit_sot_outs = remove_dimensions(scan_outs[offset:(offset + n_sit_sot)], sit_sot_return_steps, offsets)
offset += n_sit_sot
nit_sot_outs = remove_dimensions(scan_outs[offset:(offset + n_nit_sot)], nit_sot_return_steps)
offset += n_nit_sot
for (idx, update_rule) in enumerate(scan_outs[offset:(offset + n_shared_outs)]):
update_map[shared_scan_inputs[idx]] = update_rule
_scan_out_list = ((mit_sot_outs + sit_sot_outs) + nit_sot_outs)
rightOrder = ((mit_sot_rightOrder + sit_sot_rightOrder) + nit_sot_rightOrder)
scan_out_list = ([None] * len(rightOrder))
for (idx, pos) in enumerate(rightOrder):
if (pos >= 0):
scan_out_list[pos] = _scan_out_list[idx]
else:
update_map[sit_sot_shared[(abs(pos) - 1)]] = _scan_out_list[idx][(- 1)]
scan_out_list = [x for x in scan_out_list if (x is not None)]
if ((not return_list) and (len(scan_out_list) == 1)):
scan_out_list = scan_out_list[0]
elif (len(scan_out_list) == 0):
scan_out_list = None
return (scan_out_list, update_map) |
def parse_manifest_from_bytes(manifest_bytes, media_type, validate=True, sparse_manifest_support=False, ignore_unknown_mediatypes=False):
assert isinstance(manifest_bytes, Bytes)
if (is_manifest_list_type(media_type) and sparse_manifest_support):
return SparseManifestList(manifest_bytes, media_type)
if (media_type == DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE):
return DockerSchema2Manifest(manifest_bytes)
if (media_type == DOCKER_SCHEMA2_MANIFESTLIST_CONTENT_TYPE):
return DockerSchema2ManifestList(manifest_bytes)
if (media_type == OCI_IMAGE_MANIFEST_CONTENT_TYPE):
return OCIManifest(manifest_bytes, ignore_unknown_mediatypes=ignore_unknown_mediatypes)
if (media_type == OCI_IMAGE_INDEX_CONTENT_TYPE):
return OCIIndex(manifest_bytes)
if (media_type in DOCKER_SCHEMA1_CONTENT_TYPES):
return DockerSchema1Manifest(manifest_bytes, validate=validate)
raise ManifestException(('Unknown or unsupported manifest media type `%s`' % media_type)) |
class StateTomographyFitter(TomographyFitter):
def __init__(self, result: Result, circuits: List[QuantumCircuit], meas_basis: Union[(TomographyBasis, str)]='Pauli'):
super().__init__(result, circuits, meas_basis, None)
def fit(self, method: str='auto', standard_weights: bool=True, beta: float=0.5, **kwargs) -> np.array:
return super().fit(method, standard_weights, beta, trace=1, psd=True, **kwargs) |
class GoogledriveCom(BaseDownloader):
__name__ = 'GoogledriveCom'
__type__ = 'downloader'
__version__ = '0.35'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Drive.google.com downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('zapp-brannigan', 'fuerst.'), ('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
INFO_PATTERN = '<span class="uc-name-size"><a href="[^"]+">(?P<N>.+?)</a> \\((?P<S>[\\d.,]+)(?P<U>[\\w^_]+)\\)</span>'
API_URL = '
API_KEY = 'AIzaSyB68u-qFPP9oBJpo1DWAPFE_VD2Sfy9hpk'
def setup(self):
self.multi_dl = True
self.resume_download = True
self.chunk_limit = 1
def api_request(self, cmd, **kwargs):
kwargs['key'] = self.API_KEY
try:
json_data = json.loads(self.load('{}{}'.format(self.API_URL, cmd), get=kwargs))
self.log_debug(f'API response: {json_data}')
return json_data
except BadHeader as exc:
try:
json_data = json.loads(exc.content)
self.log_error('API Error: {}'.format(cmd), json_data['error']['message'], 'ID: {}'.format(self.info['pattern']['ID']), 'Error code: {}'.format(exc.code))
except ValueError:
self.log_error('API Error: {}'.format(cmd), exc, 'ID: {}'.format(self.info['pattern']['ID']), 'Error code: {}'.format(exc.code))
return None
def api_download(self, disposition):
try:
self.download('{}{}/{}'.format(self.API_URL, 'files', self.info['pattern']['ID']), get={'alt': 'media', 'acknowledgeAbuse': 'true', 'supportsAllDrives': 'true', 'key': self.API_KEY}, disposition=disposition)
except BadHeader as exc:
if (exc.code == 404):
self.offline()
elif (exc.code == 403):
self.temp_offline()
else:
raise
def process(self, pyfile):
disposition = False
json_data = self.api_request(('files/' + self.info['pattern']['ID']), fields='md5Checksum,name,size', supportsAllDrives='true')
if (json_data is None):
self.fail('API error')
self.data = self.load(pyfile.url, ref=False)
if ('error' in json_data):
if (json_data['error']['code'] == 404):
if ('Virus scan warning' not in self.data):
self.offline()
else:
m = re.search(self.INFO_PATTERN, self.data)
if (m is not None):
pyfile.name = m.group('N')
pyfile.size = parse.bytesize(m.group('S'), m.group('U'))
else:
disposition = True
else:
self.fail(json_data['error']['message'])
else:
pyfile.size = int(json_data['size'])
pyfile.name = json_data['name']
self.info['md5'] = json_data['md5Checksum']
for _i in range(2):
m = re.search('"([^"]+uc\\?.*?)"', self.data)
if (m is None):
if ('Quota exceeded' in self.data):
self.temp_offline()
else:
self.fail(self._('link pattern not found'))
link = re.sub('\\\\[uU]([\\da-fA-F]{4})', (lambda x: chr(int(x.group(1), 16))), m.group(1))
link = urllib.parse.urljoin(pyfile.url, link)
if ((pyfile.size > ) or ('Virus scan warning' in self.data)):
if re.search('/uc\\?.*&confirm=', link):
self.download(link, disposition=disposition)
break
else:
self.data = self.load(link)
else:
self.download(link, disposition=disposition)
break |
class CPUDataset():
def __init__(self, data, targets, transforms=[], batch_size=args.batch_size, use_hd=False):
self.data = data
if torch.is_tensor(data):
self.length = data.shape[0]
else:
self.length = len(self.data)
self.targets = targets
assert (self.length == targets.shape[0])
self.batch_size = batch_size
self.transforms = transforms
self.use_hd = use_hd
def __getitem__(self, idx):
if self.use_hd:
elt = transforms.ToTensor()(np.array(Image.open(self.data[idx]).convert('RGB')))
else:
elt = self.data[idx]
return (self.transforms(elt), self.targets[idx])
def __len__(self):
return self.length |
def particle_picking_visualization_main(p: PPVisRequest):
item = particlePickingPool.get(p.path)
if (p.subvol_num == (- 1)):
result = item.pick.view_subtom(p.subvol_num)
else:
result = item.pick.view_subtom(p.subvol_num)
with open(result, 'rb') as f:
b64 = base64.b64encode(f.read()).decode()
ppvr = PPVisResponse()
ppvr.subvol_url = b64
return ppvr |
def pyramid_block(pyramid_filters=256, segmentation_filters=128, upsample_rate=2, use_batchnorm=False):
def layer(c, m=None):
x = Conv2D(pyramid_filters, (1, 1))(c)
if (m is not None):
up = UpSampling2D((upsample_rate, upsample_rate))(m)
x = Add()([x, up])
p = Conv(segmentation_filters, (3, 3), padding='same', use_batchnorm=use_batchnorm)(x)
p = Conv(segmentation_filters, (3, 3), padding='same', use_batchnorm=use_batchnorm)(p)
m = x
return (m, p)
return layer |
class process(object):
def __init__(self):
pass
def process_train(self):
c = 0
common_feat_dict = {}
with open(common_feat_path.format('train'), 'r') as fr:
for line in fr:
line_list = line.strip().split(',')
kv = np.array(re.split('\x01|\x02|\x03', line_list[2]))
key = kv[range(0, len(kv), 3)]
value = kv[range(1, len(kv), 3)]
feat_dict = dict(zip(key, value))
common_feat_dict[line_list[0]] = feat_dict
c += 1
if ((c % 100000) == 0):
print(c)
print('join feats...')
c = 0
vocabulary = dict(zip(use_columns, [{} for _ in range(len(use_columns))]))
with open((data_path.format('train') + '.tmp'), 'w') as fw:
fw.write((('click,purchase,' + ','.join(use_columns)) + '\n'))
with open(data_path.format('train'), 'r') as fr:
for line in fr:
line_list = line.strip().split(',')
if ((line_list[1] == '0') and (line_list[2] == '1')):
continue
kv = np.array(re.split('\x01|\x02|\x03', line_list[5]))
key = kv[range(0, len(kv), 3)]
value = kv[range(1, len(kv), 3)]
feat_dict = dict(zip(key, value))
feat_dict.update(common_feat_dict[line_list[3]])
feats = line_list[1:3]
for k in use_columns:
feats.append(feat_dict.get(k, '0'))
fw.write((','.join(feats) + '\n'))
for (k, v) in feat_dict.items():
if (k in use_columns):
if (v in vocabulary[k]):
vocabulary[k][v] += 1
else:
vocabulary[k][v] = 0
c += 1
if ((c % 100000) == 0):
print(c)
print('before filter low freq:')
for (k, v) in vocabulary.items():
print(((k + ':') + str(len(v))))
new_vocabulary = dict(zip(use_columns, [set() for _ in range(len(use_columns))]))
for (k, v) in vocabulary.items():
for (k1, v1) in v.items():
if (v1 > 10):
new_vocabulary[k].add(k1)
vocabulary = new_vocabulary
print('after filter low freq:')
for (k, v) in vocabulary.items():
print(((k + ':') + str(len(v))))
joblib.dump(vocabulary, enum_path, compress=3)
print('encode feats...')
vocabulary = joblib.load(enum_path)
feat_map = {}
for feat in use_columns:
feat_map[feat] = dict(zip(vocabulary[feat], range(1, (len(vocabulary[feat]) + 1))))
c = 0
with open((write_path + '.train'), 'w') as fw1:
with open((write_path + '.dev'), 'w') as fw2:
fw1.write((('click,purchase,' + ','.join(use_columns)) + '\n'))
fw2.write((('click,purchase,' + ','.join(use_columns)) + '\n'))
with open((data_path.format('train') + '.tmp'), 'r') as fr:
fr.readline()
for line in fr:
line_list = line.strip().split(',')
new_line = line_list[:2]
for (value, feat) in zip(line_list[2:], use_columns):
new_line.append(str(feat_map[feat].get(value, '0')))
if (random.random() >= 0.9):
fw2.write((','.join(new_line) + '\n'))
else:
fw1.write((','.join(new_line) + '\n'))
c += 1
if ((c % 100000) == 0):
print(c)
def process_test(self):
c = 0
common_feat_dict = {}
with open(common_feat_path.format('test'), 'r') as fr:
for line in fr:
line_list = line.strip().split(',')
kv = np.array(re.split('\x01|\x02|\x03', line_list[2]))
key = kv[range(0, len(kv), 3)]
value = kv[range(1, len(kv), 3)]
feat_dict = dict(zip(key, value))
common_feat_dict[line_list[0]] = feat_dict
c += 1
if ((c % 100000) == 0):
print(c)
print('join feats...')
c = 0
with open((data_path.format('test') + '.tmp'), 'w') as fw:
fw.write((('click,purchase,' + ','.join(use_columns)) + '\n'))
with open(data_path.format('test'), 'r') as fr:
for line in fr:
line_list = line.strip().split(',')
if ((line_list[1] == '0') and (line_list[2] == '1')):
continue
kv = np.array(re.split('\x01|\x02|\x03', line_list[5]))
key = kv[range(0, len(kv), 3)]
value = kv[range(1, len(kv), 3)]
feat_dict = dict(zip(key, value))
feat_dict.update(common_feat_dict[line_list[3]])
feats = line_list[1:3]
for k in use_columns:
feats.append(str(feat_dict.get(k, '0')))
fw.write((','.join(feats) + '\n'))
c += 1
if ((c % 100000) == 0):
print(c)
print('encode feats...')
vocabulary = joblib.load(enum_path)
feat_map = {}
for feat in use_columns:
feat_map[feat] = dict(zip(vocabulary[feat], range(1, (len(vocabulary[feat]) + 1))))
c = 0
with open((write_path + '.test'), 'w') as fw:
fw.write((('click,purchase,' + ','.join(use_columns)) + '\n'))
with open((data_path.format('test') + '.tmp'), 'r') as fr:
fr.readline()
for line in fr:
line_list = line.strip().split(',')
new_line = line_list[:2]
for (value, feat) in zip(line_list[2:], use_columns):
new_line.append(str(feat_map[feat].get(value, '0')))
fw.write((','.join(new_line) + '\n'))
c += 1
if ((c % 100000) == 0):
print(c) |
_client_parallelize(1)
_channel('purerpc_port')
def test_metadata_grpc_client(greeter_pb2, greeter_pb2_grpc, channel):
stub = greeter_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(greeter_pb2.HelloRequest(name='World'), metadata=METADATA)
received_metadata = pickle.loads(base64.b64decode(response.message))
if (received_metadata[(- 1)][0] == 'accept-encoding'):
received_metadata = received_metadata[:(- 1)]
assert (METADATA == received_metadata) |
def _test_helper(res):
assert (((2 * 40), (2048 * 2)) == res['1'].shape)
assert ('reflectance' == res['1'].attrs['calibration'])
assert ('%' == res['1'].attrs['units'])
assert (((2 * 40), (2048 * 2)) == res['2'].shape)
assert ('reflectance' == res['2'].attrs['calibration'])
assert ('%' == res['2'].attrs['units'])
assert (((2 * 40), (2048 * 2)) == res['3'].shape)
assert ('reflectance' == res['3'].attrs['calibration'])
assert ('%' == res['3'].attrs['units'])
assert (((2 * 40), (2048 * 2)) == res['4'].shape)
assert ('reflectance' == res['4'].attrs['calibration'])
assert ('%' == res['4'].attrs['units']) |
class EnableCloudPassword():
async def enable_cloud_password(self: 'pyrogram.Client', password: str, hint: str='', email: str=None) -> bool:
r = (await self.invoke(raw.functions.account.GetPassword()))
if r.has_password:
raise ValueError('There is already a cloud password enabled')
r.new_algo.salt1 += os.urandom(32)
new_hash = btoi(compute_password_hash(r.new_algo, password))
new_hash = itob(pow(r.new_algo.g, new_hash, btoi(r.new_algo.p)))
(await self.invoke(raw.functions.account.UpdatePasswordSettings(password=raw.types.InputCheckPasswordEmpty(), new_settings=raw.types.account.PasswordInputSettings(new_algo=r.new_algo, new_password_hash=new_hash, hint=hint, email=email))))
return True |
class AutoModelWithLMHead(_AutoModelWithLMHead):
def from_config(cls, config):
warnings.warn('The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.', FutureWarning)
return super().from_config(config)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
warnings.warn('The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and `AutoModelForSeq2SeqLM` for encoder-decoder models.', FutureWarning)
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) |
class KeyboardButtonRequestChat(TelegramObject):
__slots__ = ('request_id', 'chat_is_channel', 'chat_is_forum', 'chat_has_username', 'chat_is_created', 'user_administrator_rights', 'bot_administrator_rights', 'bot_is_member')
def __init__(self, request_id: int, chat_is_channel: bool, chat_is_forum: Optional[bool]=None, chat_has_username: Optional[bool]=None, chat_is_created: Optional[bool]=None, user_administrator_rights: Optional[ChatAdministratorRights]=None, bot_administrator_rights: Optional[ChatAdministratorRights]=None, bot_is_member: Optional[bool]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.request_id: int = request_id
self.chat_is_channel: bool = chat_is_channel
self.chat_is_forum: Optional[bool] = chat_is_forum
self.chat_has_username: Optional[bool] = chat_has_username
self.chat_is_created: Optional[bool] = chat_is_created
self.user_administrator_rights: Optional[ChatAdministratorRights] = user_administrator_rights
self.bot_administrator_rights: Optional[ChatAdministratorRights] = bot_administrator_rights
self.bot_is_member: Optional[bool] = bot_is_member
self._id_attrs = (self.request_id,)
self._freeze()
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['KeyboardButtonRequestChat']:
data = cls._parse_data(data)
if (not data):
return None
data['user_administrator_rights'] = ChatAdministratorRights.de_json(data.get('user_administrator_rights'), bot)
data['bot_administrator_rights'] = ChatAdministratorRights.de_json(data.get('bot_administrator_rights'), bot)
return super().de_json(data=data, bot=bot) |
class Meteor():
def __init__(self):
self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, '-', '-', '-stdio', '-l', 'en', '-norm']
self.meteor_p = subprocess.Popen(self.meteor_cmd, cwd=os.path.dirname(os.path.abspath(__file__)), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.lock = threading.Lock()
def compute_score(self, gts, res):
assert (gts.keys() == res.keys())
imgIds = gts.keys()
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i in imgIds:
assert (len(res[i]) == 1)
stat = self._stat(res[i][0], gts[i])
eval_line += ' ||| {}'.format(stat)
self.meteor_p.stdin.write('{}\n'.format(eval_line))
for i in range(0, len(imgIds)):
scores.append(float(self.meteor_p.stdout.readline().strip()))
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return (score, scores)
def method(self):
return 'METEOR'
def _stat(self, hypothesis_str, reference_list):
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str.encode('utf-8')))
self.meteor_p.stdin.write('{}\n'.format(score_line.encode('utf-8')))
return self.meteor_p.stdout.readline().strip()
def _score(self, hypothesis_str, reference_list):
self.lock.acquire()
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write('{}\n'.format(score_line))
stats = self.meteor_p.stdout.readline().strip()
eval_line = 'EVAL ||| {}'.format(stats)
self.meteor_p.stdin.write('{}\n'.format(eval_line))
score = float(self.meteor_p.stdout.readline().strip())
score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return score
def __del__(self):
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release() |
class ConcatOutput(nn.Module):
def __init__(self, channel):
super(ConcatOutput, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_upsample1 = BasicConv2d(channel, channel, 3, padding=1)
self.conv_upsample2 = BasicConv2d(channel, channel, 3, padding=1)
self.conv_upsample3 = BasicConv2d(channel, channel, 3, padding=1)
self.conv_cat1 = nn.Sequential(BasicConv2d((2 * channel), (2 * channel), 3, padding=1), BasicConv2d((2 * channel), channel, 1))
self.conv_cat2 = nn.Sequential(BasicConv2d((2 * channel), (2 * channel), 3, padding=1), BasicConv2d((2 * channel), channel, 1))
self.conv_cat3 = nn.Sequential(BasicConv2d((2 * channel), (2 * channel), 3, padding=1), BasicConv2d((2 * channel), channel, 1))
self.output = nn.Sequential(BasicConv2d(channel, channel, 3, padding=1), nn.Conv2d(channel, 1, 1))
def forward(self, x1, x2, x3, x4):
x3 = torch.cat((x3, self.conv_upsample1(self.upsample(x4))), 1)
x3 = self.conv_cat1(x3)
x2 = torch.cat((x2, self.conv_upsample2(self.upsample(x3))), 1)
x2 = self.conv_cat2(x2)
x1 = torch.cat((x1, self.conv_upsample3(self.upsample(x2))), 1)
x1 = self.conv_cat3(x1)
x = self.output(x1)
return x |
def set_panning(self, crtc, left, top, width, height, track_left, track_top, track_width, track_height, border_left, border_top, border_width, border_height, timestamp=X.CurrentTime):
return SetPanning(display=self.display, opcode=self.display.get_extension_major(extname), crtc=crtc, left=left, top=top, width=width, height=height, track_left=track_left, track_top=track_top, track_width=track_width, track_height=track_height, border_left=border_left, border_top=border_top, border_width=border_width, border_height=border_height, timestamp=timestamp) |
def postprocess_text(mode, preds, golds):
predictions = {}
for (pred, gold) in zip(preds, golds):
dial_id = gold['ID']
if (dial_id not in predictions):
predictions[dial_id] = {}
predictions[dial_id]['domains'] = gold['domains']
predictions[dial_id]['turns'] = {}
cleaned_gold_belief = []
for bs in gold['turn_belief']:
if (('not mentioned' in bs) or ('none' in bs)):
continue
cleaned_gold_belief.append(bs)
if (gold['turn_id'] not in predictions[dial_id]['turns'].keys()):
predictions[dial_id]['turns'][gold['turn_id']] = {'turn_belief': cleaned_gold_belief, 'pred_belief': []}
for pred_slot_value in pred.split(', '):
if (len(pred_slot_value.split(' ')) < 2):
continue
pred_tokens = pred_slot_value.split(' ')
if (pred_tokens[1] == 'book'):
domain_slot_pred = ((pred_tokens[0] + '-') + ' '.join(pred_tokens[1:3]))
value_pred = ' '.join(pred_slot_value.split(' ')[3:])
else:
domain_slot_pred = ((pred_tokens[0] + '-') + pred_tokens[1])
value_pred = ' '.join(pred_slot_value.split(' ')[2:])
pred_bs = '{}-{}'.format(domain_slot_pred, value_pred)
if (('not mentioned' in pred_bs) or ('none' in pred_bs)):
continue
predictions[dial_id]['turns'][gold['turn_id']]['pred_belief'].append(pred_bs)
return predictions |
class STM32F4xxSdio(QlConnectivityPeripheral):
class Type(ctypes.Structure):
_fields_ = [('POWER', ctypes.c_uint32), ('CLKCR', ctypes.c_uint32), ('ARG', ctypes.c_uint32), ('CMD', ctypes.c_uint32), ('RESPCMD', ctypes.c_uint32), ('RESP1', ctypes.c_uint32), ('RESP2', ctypes.c_uint32), ('RESP3', ctypes.c_uint32), ('RESP4', ctypes.c_uint32), ('DTIMER', ctypes.c_uint32), ('DLEN', ctypes.c_uint32), ('DCTRL', ctypes.c_uint32), ('DCOUNT', ctypes.c_uint32), ('STA', ctypes.c_uint32), ('ICR', ctypes.c_uint32), ('MASK', ctypes.c_uint32), ('RESERVED0', (ctypes.c_uint32 * 2)), ('FIFOCNT', ctypes.c_uint32), ('RESERVED1', (ctypes.c_uint32 * 13)), ('FIFO', ctypes.c_uint32)]
def __init__(self, ql: Qiling, label: str, intn: int=None):
super().__init__(ql, label)
self.intn = intn
self.instance = self.struct()
()
def write(self, offset: int, size: int, value: int):
if (offset == self.struct.CMD.offset):
if (value & SDIO_CMD.CPSMEN):
waitresp = ((value & SDIO_CMD.WAITRESP) >> 6)
if (waitresp in [0, 3]):
self.instance.STA |= SDIO_STA.CMDSENT
else:
self.instance.STA |= SDIO_STA.CMDREND
self.instance.RESPCMD = (value & SDIO_CMD.CMDINDEX)
self.raw_write(offset, size, value) |
def digit_version(version_str: str, length: int=4):
assert ('parrots' not in version_str)
version = parse(version_str)
assert version.release, f'failed to parse version {version_str}'
release = list(version.release)
release = release[:length]
if (len(release) < length):
release = (release + ([0] * (length - len(release))))
if version.is_prerelease:
mapping = {'a': (- 3), 'b': (- 2), 'rc': (- 1)}
val = (- 4)
if version.pre:
if (version.pre[0] not in mapping):
warnings.warn(f'unknown prerelease version {version.pre[0]}, version checking may go wrong')
else:
val = mapping[version.pre[0]]
release.extend([val, version.pre[(- 1)]])
else:
release.extend([val, 0])
elif version.is_postrelease:
release.extend([1, version.post])
else:
release.extend([0, 0])
return tuple(release) |
def get_cmdclass():
if ('versioneer' in sys.modules):
del sys.modules['versioneer']
cmds = {}
from distutils.core import Command
class cmd_version(Command):
description = 'report generated version string'
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print(('Version: %s' % vers['version']))
print((' full-revisionid: %s' % vers.get('full-revisionid')))
print((' dirty: %s' % vers.get('dirty')))
print((' date: %s' % vers.get('date')))
if vers['error']:
print((' error: %s' % vers['error']))
cmds['version'] = cmd_version
if ('setuptools' in sys.modules):
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print(('UPDATING %s' % target_versionfile))
write_to_version_file(target_versionfile, versions)
cmds['build_py'] = cmd_build_py
if ('cx_Freeze' in sys.modules):
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print(('UPDATING %s' % target_versionfile))
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write((LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source}))
cmds['build_exe'] = cmd_build_exe
del cmds['build_py']
if ('py2exe' in sys.modules):
try:
from py2exe.distutils_buildexe import py2exe as _py2exe
except ImportError:
from py2exe.build_exe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print(('UPDATING %s' % target_versionfile))
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write((LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source}))
cmds['py2exe'] = cmd_py2exe
if ('setuptools' in sys.modules):
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
self.distribution.metadata.version = versions['version']
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print(('UPDATING %s' % target_versionfile))
write_to_version_file(target_versionfile, self._versioneer_generated_versions)
cmds['sdist'] = cmd_sdist
return cmds |
class TrappingPotential():
def get_potential(self, sites_count: int) -> np.ndarray:
def as_quadratic_hamiltonian(self, sites_count: int, j: Union[(Real, Iterable[Real])]) -> openfermion.QuadraticHamiltonian:
return _potential_to_quadratic_hamiltonian(self.get_potential(sites_count), j) |
class EllipsisType(ProperType):
__slots__ = ()
def accept(self, visitor: TypeVisitor[T]) -> T:
assert isinstance(visitor, SyntheticTypeVisitor)
ret: T = visitor.visit_ellipsis_type(self)
return ret
def serialize(self) -> JsonDict:
assert False, "Synthetic types don't serialize" |
_meter('accuracy_list_meter')
class AccuracyListMeter(ClassyMeter):
def __init__(self, num_meters: int, topk_values: List[int], meter_names: List[str]):
super().__init__()
assert is_pos_int(num_meters), 'num_meters must be positive'
assert isinstance(topk_values, list), 'topk_values must be a list'
assert (len(topk_values) > 0), 'topk_values list should have at least one element'
assert [is_pos_int(x) for x in topk_values], 'each value in topk_values must be >= 1'
self._num_meters = num_meters
self._topk_values = topk_values
self._meters = [AccuracyMeter(self._topk_values) for _ in range(self._num_meters)]
self._meter_names = meter_names
self.reset()
def from_config(cls, meters_config: AttrDict):
return cls(num_meters=meters_config['num_meters'], topk_values=meters_config['topk_values'], meter_names=meters_config['meter_names'])
def name(self):
return 'accuracy_list_meter'
def value(self):
val_dict = {}
for (ind, meter) in enumerate(self._meters):
meter_val = meter.value
sample_count = meter._total_sample_count
val_dict[ind] = {}
val_dict[ind]['val'] = meter_val
val_dict[ind]['sample_count'] = sample_count
output_dict = {}
for k in self._topk_values:
top_k_str = f'top_{k}'
output_dict[top_k_str] = {}
for ind in range(len(self._meters)):
meter_name = (self._meter_names[ind] if (len(self._meter_names) > 0) else ind)
val = (100.0 * round(float(val_dict[ind]['val'][top_k_str]), 6))
if (meter_name not in output_dict[top_k_str]):
output_dict[top_k_str][meter_name] = [val]
else:
output_dict[top_k_str][meter_name].append(val)
for topk in output_dict:
for k in output_dict[topk]:
if (len(output_dict[topk][k]) == 1):
output_dict[topk][k] = output_dict[topk][k][0]
return output_dict
def sync_state(self):
for (_, meter) in enumerate(self._meters):
meter.sync_state()
def get_classy_state(self):
meter_states = {}
for (ind, meter) in enumerate(self._meters):
state = meter.get_classy_state()
meter_states[ind] = {}
meter_states[ind]['state'] = state
return meter_states
def set_classy_state(self, state):
assert (len(state) == len(self._meters)), 'Incorrect state dict for meters'
for (ind, meter) in enumerate(self._meters):
meter.set_classy_state(state[ind]['state'])
def __repr__(self):
value = self.value
for k in self._topk_values:
top_k_str = f'top_{k}'
hr_format = [('%.1f' % (100 * x)) for x in value[top_k_str]]
value[top_k_str] = ','.join(hr_format)
repr_dict = {'name': self.name, 'num_meters': self._num_meters, 'value': value}
return pprint.pformat(repr_dict, indent=2)
def update(self, model_output: Union[(torch.Tensor, List[torch.Tensor])], target: torch.Tensor):
if isinstance(model_output, torch.Tensor):
model_output = [model_output]
assert isinstance(model_output, list)
assert (len(model_output) == self._num_meters)
for (meter, output) in zip(self._meters, model_output):
meter.update(output, target)
def reset(self):
[x.reset() for x in self._meters]
def validate(self, model_output_shape, target_shape):
pass |
def _request(url, post=False, **kwargs):
logger.debug(('Accessing URL %s' % url))
if post:
logger.debug(('POST data: \n%s' % post))
req = requests.Request('POST', url=url, params=kwargs, data=post)
else:
req = requests.Request('GET', url=url, params=kwargs)
ses = requests.Session()
prep = ses.prepare_request(req)
prep.headers['Accept'] = '*/*'
resp = ses.send(prep, stream=True)
resp.raise_for_status()
if (resp.status_code == 204):
raise EmptyResult(url)
return resp.raw |
def test_revalidate_vercel_frontend_when_vercel_is_down_doesnt_crash(caplog, requests_mock, locale):
parent = PageFactory()
page = PageFactory(slug='test123', locale=locale('en'), parent=parent)
site = SiteFactory(hostname='pycon', root_page=page)
italian_page = page.copy_for_translation(locale=locale('it'))
italian_page.slug = 'something-else'
italian_page.save()
settings = VercelFrontendSettingsFactory(revalidate_url=' revalidate_secret='test', site=site)
mock_call = requests_mock.post(settings.revalidate_url, status_code=500)
revalidate_vercel_frontend_task(page_id=italian_page.id)
assert mock_call.called
assert ('Error while revalidating' in caplog.records[0].message) |
class MilvusUploader(BaseUploader):
client = None
upload_params = {}
collection: Collection = None
distance: str = None
def get_mp_start_method(cls):
return ('forkserver' if ('forkserver' in mp.get_all_start_methods()) else 'spawn')
def init_client(cls, host, distance, connection_params, upload_params):
cls.client = connections.connect(alias=MILVUS_DEFAULT_ALIAS, host=host, port=str(connection_params.pop('port', MILVUS_DEFAULT_PORT)), **connection_params)
cls.collection = Collection(MILVUS_COLLECTION_NAME, using=MILVUS_DEFAULT_ALIAS)
cls.upload_params = upload_params
cls.distance = DISTANCE_MAPPING[distance]
def upload_batch(cls, ids: List[int], vectors: List[list], metadata: Optional[List[dict]]):
if (metadata is not None):
field_values = [[(payload.get(field_schema.name) or DTYPE_DEFAULT[field_schema.dtype]) for payload in metadata] for field_schema in cls.collection.schema.fields if (field_schema.name not in ['id', 'vector'])]
else:
field_values = []
cls.collection.insert(([ids, vectors] + field_values))
def post_upload(cls, distance):
index_params = {'metric_type': cls.distance, 'index_type': cls.upload_params.get('index_type', 'HNSW'), 'params': {**cls.upload_params.get('index_params', {})}}
cls.collection.flush()
cls.collection.create_index(field_name='vector', index_params=index_params)
for field_schema in cls.collection.schema.fields:
if (field_schema.name in ['id', 'vector']):
continue
try:
cls.collection.create_index(field_name=field_schema.name, index_name=field_schema.name)
except MilvusException as e:
if (1 != e.code):
raise e
for index in cls.collection.indexes:
wait_for_index_building_complete(MILVUS_COLLECTION_NAME, index_name=index.index_name, using=MILVUS_DEFAULT_ALIAS)
cls.collection.load()
return {} |
def test_connect_two_chains():
g = Graph()
(a1, a2, b1, b2) = get_pseudo_nodes('a1', 'a2', 'b1', 'b2')
g.add_chain(a1, a2, _input=None, _output=None)
g.add_chain(b1, b2, _input=None, _output=None)
assert (len(g.outputs_of(a2)) == 0)
g.add_chain(_input=a2, _output=b1)
assert (g.outputs_of(a2) == g.indexes_of(b1)) |
class Blade(metaclass=_GradedTypesMeta):
def __init__(self, layout):
self.layout = layout
def _repr_skip_members(self):
return {'layout'}
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def __repr__(self):
members = self.__dict__.copy()
for name in self._repr_skip_members():
members.pop(name)
return '{}({})'.format(type(self).__name__, ', '.join(('{}={!r}'.format(k, v) for (k, v) in members.items())))
def _repr_pretty_(self, p, cycle):
members = self.__dict__.copy()
for name in self._repr_skip_members():
members.pop(name)
prefix = '{}('.format(type(self).__name__)
with p.group(len(prefix), prefix, ')'):
is_first = True
for (k, v) in members.items():
if (not is_first):
p.text(',')
p.breakable()
p.text('{}={!r}'.format(k, v))
is_first = False
def _translate(self, t, x):
einf = self.layout.einf
versor = (1 - ((t * einf) / 2))
return ((versor * x) * (~ versor))
def mv(self) -> MultiVector:
raise NotImplementedError |
class Propagator():
def __init__(self, system, *, c_ops=(), args=None, options=None, memoize=10, tol=1e-14):
if isinstance(system, MultiTrajSolver):
raise TypeError('Non-deterministic solvers cannot be used as a propagator system')
elif isinstance(system, HEOMSolver):
raise NotImplementedError('HEOM is not supported by Propagator. Please, tell us on GitHub issues if you need it!')
elif isinstance(system, Solver):
self.solver = system
else:
Hevo = QobjEvo(system, args=args)
c_ops = [QobjEvo(op, args=args) for op in c_ops]
if (Hevo.issuper or c_ops):
self.solver = MESolver(Hevo, c_ops=c_ops, options=options)
else:
self.solver = SESolver(Hevo, options=options)
self.times = [0]
self.invs = [None]
self.props = [qeye(self.solver.sys_dims)]
self.solver.start(self.props[0], self.times[0])
self.cte = self.solver.rhs.isconstant
H_0 = self.solver.rhs(0)
self.unitary = ((not H_0.issuper) and H_0.isherm)
self.args = args
self.memoize = max(3, int(memoize))
self.tol = tol
def _lookup_or_compute(self, t):
idx = np.searchsorted(self.times, t)
if ((idx < len(self.times)) and (abs((t - self.times[idx])) <= self.tol)):
U = self.props[idx]
elif ((idx > 0) and (abs((t - self.times[(idx - 1)])) <= self.tol)):
U = self.props[(idx - 1)]
else:
U = self._compute(t, idx)
self._insert(t, U, idx)
return U
def __call__(self, t, t_start=0, **args):
if ((not self.cte) and args and (args != self.args)):
self.args = args
self.solver._argument(args)
self.times = [0]
self.props = [qeye_like(self.props[0])]
self.solver.start(self.props[0], self.times[0])
if t_start:
if (t == t_start):
U = self._lookup_or_compute(0)
if self.cte:
U = self._lookup_or_compute((t - t_start))
else:
Uinv = self._inv(self._lookup_or_compute(t_start))
U = (self._lookup_or_compute(t) Uinv)
else:
U = self._lookup_or_compute(t)
return U
def inv(self, t, **args):
return self._inv(self(t, **args))
def _compute(self, t, idx):
t_last = self.solver._integrator.get_state(copy=False)[0]
if (self.times[(idx - 1)] <= t_last <= t):
U = self.solver.step(t)
elif (idx > 0):
self.solver.start(self.props[(idx - 1)], self.times[(idx - 1)])
U = self.solver.step(t)
else:
self.solver.start(qeye_like(self.props[0]), t)
Uinv = self.solver.step(self.times[idx])
U = self._inv(Uinv)
return U
def _inv(self, U):
return (U.dag() if self.unitary else U.inv())
def _insert(self, t, U, idx):
while (len(self.times) >= self.memoize):
rm_idx = (self.memoize // 2)
if (self.times[rm_idx] < t):
idx -= 1
del self.times[rm_idx]
del self.props[rm_idx]
self.times.insert(idx, t)
self.props.insert(idx, U) |
def _get_epsilon_for_un_fused_bn(graph_def: tf.Graph, bn_conn_graph_op: Op) -> Union[(None, float)]:
epsilon = None
bn_op_name = (bn_conn_graph_op.name + '/batchnorm/add/y')
for node in graph_def.node:
if (bn_op_name == node.name):
epsilon = node.attr['value'].tensor.float_val[0]
break
return epsilon |
def main() -> None:
args = _get_command_line_arguments()
splits_dir = Path(args[ARG_SPLITS_DIR])
spotting_game_paths = _read_spotting_game_paths_dict(splits_dir)
segmentation_game_paths_set = _read_segmentation_game_paths_set(splits_dir)
out_rows = _prepare_out_rows(spotting_game_paths, segmentation_game_paths_set)
_write_rows(out_rows)
_print_summary(out_rows) |
class LogNormal(PositiveContinuous):
rv_op = lognormal
def dist(cls, mu=0, sigma=None, tau=None, *args, **kwargs):
(tau, sigma) = get_tau_sigma(tau=tau, sigma=sigma)
mu = pt.as_tensor_variable(floatX(mu))
sigma = pt.as_tensor_variable(floatX(sigma))
return super().dist([mu, sigma], *args, **kwargs)
def moment(rv, size, mu, sigma):
mean = pt.exp((mu + (0.5 * (sigma ** 2))))
if (not rv_size_is_none(size)):
mean = pt.full(size, mean)
return mean
def logp(value, mu, sigma):
res = (((((- 0.5) * pt.pow(((pt.log(value) - mu) / sigma), 2)) - (0.5 * pt.log((2.0 * np.pi)))) - pt.log(sigma)) - pt.log(value))
res = pt.switch(pt.gt(value, 0.0), res, (- np.inf))
return check_parameters(res, (sigma > 0), msg='sigma > 0')
def logcdf(value, mu, sigma):
res = pt.switch(pt.le(value, 0), (- np.inf), normal_lcdf(mu, sigma, pt.log(value)))
return check_parameters(res, (sigma > 0), msg='sigma > 0')
def icdf(value, mu, sigma):
res = pt.exp(icdf(Normal.dist(mu, sigma), value))
return res |
class TestTrainingExtensionsQcQuantizeOp():
def test_qc_quantize_op_cpu(self):
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = True
with graph.as_default():
with tf.device('/device:CPU:0'):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
is_int_data_type = tf.Variable(initial_value=True, trainable=False, dtype=tf.bool)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.updateStats), trainable=False, dtype=tf.int32)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer, encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer, is_int_data_type.initializer])
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp, op_mode=mode_var, tensor_quantizer_reference=tensor_quant_ref, encoding_min=encoding_min, encoding_max=encoding_max, bit_width=bit_width, use_symmetric_encoding=use_symmetric_encoding, is_int_data_type=is_int_data_type)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10)
print('inp_data', inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print('out_data', out_data)
assert np.allclose(out_data, inp_data)
assert (not tensor_quantizer.isEncodingValid)
encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding)
assert tensor_quantizer.isEncodingValid
print('min=', encoding.min, ', max=', encoding.max)
inp_data = (np.random.rand(10) * 2)
print(inp_data)
mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
assert (not np.allclose(out_data, inp_data))
sess.close()
def test_qc_quantize_op_cpu_fp16_quantize_dequantize(self):
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = True
with graph.as_default():
with tf.device('/device:CPU:0'):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
is_int_data_type = tf.Variable(initial_value=False, trainable=False, dtype=tf.bool)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.quantizeDequantize), trainable=False, dtype=tf.int32)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer, encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer, is_int_data_type.initializer])
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp, op_mode=mode_var, tensor_quantizer_reference=tensor_quant_ref, encoding_min=encoding_min, encoding_max=encoding_max, bit_width=bit_width, use_symmetric_encoding=use_symmetric_encoding, is_int_data_type=is_int_data_type)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.array([0., 0., 0.6942797, 0., 0., 0., 0.219199, 0., 0., 0.6348504], dtype=np.float32)
out_exp = np.array([0., 0.4416504, 0., 0.6977539, 0., 0., 0., 0., 0., 0.6347656], dtype=np.float32)
print('inp_data', inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print('out_data', out_data)
assert np.allclose(out_data, out_exp)
sess.close()
def test_qc_quantize_op_cpu_fp16_pass_through(self):
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = True
with graph.as_default():
with tf.device('/device:CPU:0'):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
is_int_data_type = tf.Variable(initial_value=False, trainable=False, dtype=tf.bool)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.passThrough), trainable=False, dtype=tf.int32)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer, encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer, is_int_data_type.initializer])
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp, op_mode=mode_var, tensor_quantizer_reference=tensor_quant_ref, encoding_min=encoding_min, encoding_max=encoding_max, bit_width=bit_width, use_symmetric_encoding=use_symmetric_encoding, is_int_data_type=is_int_data_type)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.array([0., 0., 0.6942797, 0., 0., 0., 0.219199, 0., 0., 0.6348504], dtype=np.float32)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
assert np.allclose(out_data, inp_data)
sess.close()
def test_qc_quantize_op_oneshot_cpu(self):
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = False
with graph.as_default():
with tf.device('/device:CPU:0'):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize), trainable=False, dtype=tf.int32)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
is_int_data_type = tf.Variable(initial_value=True, trainable=False, dtype=tf.bool)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer, encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer, is_int_data_type.initializer])
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp, op_mode=mode_var, tensor_quantizer_reference=tensor_quant_ref, encoding_min=encoding_min, encoding_max=encoding_max, bit_width=bit_width, use_symmetric_encoding=use_symmetric_encoding, is_int_data_type=is_int_data_type)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = (np.random.rand(10) * 256)
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
assert tensor_quantizer.isEncodingValid
encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding)
print('min=', encoding.min, ', max=', encoding.max)
assert (not np.allclose(out_data, inp_data))
sess.close()
.cuda
def test_qc_quantize_op_gpu(self):
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = False
with graph.as_default():
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.updateStats), trainable=False, dtype=tf.int32)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
is_int_data_type = tf.Variable(initial_value=True, trainable=False, dtype=tf.bool)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer, encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer, is_int_data_type.initializer])
with tf.device('/device:GPU:0'):
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp, op_mode=mode_var, tensor_quantizer_reference=tensor_quant_ref, encoding_min=encoding_min, encoding_max=encoding_max, bit_width=bit_width, use_symmetric_encoding=use_symmetric_encoding, is_int_data_type=is_int_data_type)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.random.rand(10)
print('inp_data', inp_data)
with tf.device('/device:GPU:0'):
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print('out_data', out_data)
assert np.allclose(out_data, inp_data)
assert (not tensor_quantizer.isEncodingValid)
encoding = tensor_quantizer.computeEncoding(bitwidth, use_symm_encoding)
assert tensor_quantizer.isEncodingValid
print('min=', encoding.min, ', max=', encoding.max)
inp_data = (np.random.rand(10) * 2)
print('inp_data', inp_data)
mode_var.load(int(libpymo.TensorQuantizerOpMode.quantizeDequantize), sess)
with tf.device('/device:GPU:0'):
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print('out_data', out_data)
assert (not np.allclose(out_data, inp_data))
sess.close()
.cuda
def test_qc_quantize_op_gpu_fp16(self):
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = False
with graph.as_default():
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.quantizeDequantize), trainable=False, dtype=tf.int32)
encoding_min = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
is_int_data_type = tf.Variable(initial_value=False, trainable=False, dtype=tf.bool)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer, encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer, is_int_data_type.initializer])
with tf.device('/device:GPU:0'):
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp, op_mode=mode_var, tensor_quantizer_reference=tensor_quant_ref, encoding_min=encoding_min, encoding_max=encoding_max, bit_width=bit_width, use_symmetric_encoding=use_symmetric_encoding, is_int_data_type=is_int_data_type)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.array([0., 0., 0.6942797, 0., 0., 0., 0.219199, 0., 0., 0.6348504], dtype=np.float32)
out_exp = np.array([0., 0.4416504, 0., 0.6977539, 0., 0., 0., 0., 0., 0.6347656], dtype=np.float32)
print('inp_data', inp_data)
with tf.device('/device:GPU:0'):
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print('out_data', out_data)
assert np.allclose(out_data, out_exp)
sess.close()
.cuda
.parametrize('use_symmetric_encoding, use_strict_symmetric, use_unsigned_symmetric, encoding_min_val, expected_out', [(True, False, False, (- 10.0), (- 4.0)), (True, False, True, (- 10.0), (- 4.0)), (True, False, True, 0.0, 0.0), (True, True, False, (- 10.0), (- 3.0)), (False, False, False, (- 7.5), (- 4.5))])
def test_qc_quantize_op_symmetric_modes(self, use_symmetric_encoding, use_strict_symmetric, use_unsigned_symmetric, encoding_min_val, expected_out):
tf.compat.v1.reset_default_graph()
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 3
use_symm_encoding = use_symmetric_encoding
with graph.as_default():
inp = tf.compat.v1.placeholder(tf.float32, shape=[1], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer.setStrictSymmetric(use_strict_symmetric)
tensor_quantizer.setUnsignedSymmetric(use_unsigned_symmetric)
tensor_quantizer.isEncodingValid = True
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.quantizeDequantize), trainable=False, dtype=tf.int32)
encoding_min = tf.Variable(initial_value=encoding_min_val, trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=3.0, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
is_int_data_type = tf.Variable(initial_value=True, trainable=False, dtype=tf.bool)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer, encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer, is_int_data_type.initializer])
with tf.device('/device:GPU:0'):
pass_through_op_output = zero_out_module.qc_quantize(name='quant_op', in_tensor=inp, op_mode=mode_var, tensor_quantizer_reference=tensor_quant_ref, encoding_min=encoding_min, encoding_max=encoding_max, bit_width=bit_width, use_symmetric_encoding=use_symmetric_encoding, is_int_data_type=is_int_data_type)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
inp_data = np.array([(- 5.0)], dtype=np.float32)
out_exp = np.array([expected_out], dtype=np.float32)
print('inp_data', inp_data)
with tf.device('/device:GPU:0'):
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print('out_data', out_data)
assert np.allclose(out_data, out_exp)
sess.close() |
.parametrize('status, raising, message', [(QDataStream.Status.Ok, False, None), (QDataStream.Status.ReadPastEnd, True, 'The data stream has read past the end of the data in the underlying device.'), (QDataStream.Status.ReadCorruptData, True, 'The data stream has read corrupt data.'), (QDataStream.Status.WriteFailed, True, 'The data stream cannot write to the underlying device.')])
def test_check_qdatastream(status, raising, message):
stream = QDataStream()
stream.setStatus(status)
if raising:
with pytest.raises(OSError, match=message):
qtutils.check_qdatastream(stream)
else:
qtutils.check_qdatastream(stream) |
class TestNoReassignmentChecker(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = example_plugin.NoReassignmentChecker
def test_finds_reassigned_variable(self):
(assign_node_a, assign_node_b) = astroid.extract_node('\n test = 1 #\n test = 2 #\n ')
self.checker.visit_assign(assign_node_a)
self.checker.visit_assign(assign_node_b)
self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='reassigned-variable', node=assign_node_a))
def test_ignores_no_reassigned_variable(self):
(assign_node_a, assign_node_b) = astroid.extract_node('\n test1 = 1 #\n test2 = 2 #\n ')
with self.assertNoMessages():
self.checker.visit_assign(assign_node_a)
self.checker.visit_assign(assign_node_b)
def test_ignores_variable_outside_function(self):
(func_node, assign_node_a, assign_node_b) = astroid.extract_node('\n def test1(): #\n test = 1 #\n\n def test2():\n test = 2 #\n ')
with self.assertNoMessages():
self.checker.visit_assign(assign_node_a)
self.checker.leave_functiondef(func_node)
self.checker.visit_assign(assign_node_b) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.