code stringlengths 101 5.91M |
|---|
def instance_whitening_loss(f_map, eye, mask_matrix, margin, num_remove_cov):
(f_cor, B) = get_covariance_matrix(f_map, eye=eye)
f_cor_masked = (f_cor * mask_matrix)
off_diag_sum = (torch.sum(torch.abs(f_cor_masked), dim=(1, 2), keepdim=True) - margin)
loss = torch.clamp(torch.div(off_diag_sum, num_remove_cov), min=0)
loss = (torch.sum(loss) / B)
return loss |
def prune(spans, T, LAMBDA=0.4):
STOP = int((LAMBDA * T))
sorted_spans = sorted(spans, key=(lambda s: s.si), reverse=True)
nonoverlapping = remove_overlapping(sorted_spans)
pruned_spans = nonoverlapping[:STOP]
spans = sorted(pruned_spans, key=(lambda s: (s.i1, s.i2)))
return spans |
def exposure_meter_BC_vel(JDUTC, expmeterflux, starname='', hip_id=None, ra=None, dec=None, epoch=None, pmra=None, pmdec=None, px=None, rv=None, obsname='', lat=0.0, longi=0.0, alt=0.0, zmeas=0.0, ephemeris='de430', leap_dir=os.path.join(os.path.dirname(__file__), 'data'), leap_update=True, SolSystemTarget=None, HorizonsID_type='smallbody', predictive=False):
expmeterflux = np.array(expmeterflux)
error = []
if isinstance(JDUTC, (int, float)):
JDUTC = np.array([JDUTC])
if (len(JDUTC) != len(expmeterflux)):
print('Error: Size of JDUTC array is not equal to expmeterflux (Flux) array')
error += ['Error: Size of JDUTC array is not equal to expmeterflux (Flux) array']
(vel, warning, status) = get_BC_vel(JDUTC=JDUTC, starname=starname, hip_id=hip_id, ra=ra, dec=dec, epoch=epoch, pmra=pmra, pmdec=pmdec, px=px, obsname=obsname, lat=lat, longi=longi, alt=alt, rv=rv, zmeas=zmeas, ephemeris=ephemeris, SolSystemTarget=SolSystemTarget, HorizonsID_type=HorizonsID_type, predictive=predictive)
weighted_vel = flux_weighting(flux=expmeterflux, qty=vel)
try:
JDUTC = JDUTC.jd
except:
JDUTC = np.array(JDUTC)
JDUTCMID = flux_weighting(flux=expmeterflux, qty=JDUTC)
if error:
warning.append(error)
status = 2
return (weighted_vel, JDUTCMID, warning, status) |
class ImageClassifierCLI(CLI):
def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None:
super().add_arguments_to_parser(parser)
parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate')
parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate')
parser.set_defaults({'experiment': 'deepsea', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1, 'model.scorer': 'auroc', 'model.loss_fn': 'BCEWithLogitsLoss'}) |
def test_mi_base_return():
n_inst = ConstructorStats.detail_reg_inst()
c1 = m.i801c_b1()
assert (type(c1) is m.I801C)
assert (c1.a == 1)
assert (c1.b == 2)
d1 = m.i801d_b1()
assert (type(d1) is m.I801D)
assert (d1.a == 1)
assert (d1.b == 2)
assert (ConstructorStats.detail_reg_inst() == (n_inst + 4))
c2 = m.i801c_b2()
assert (type(c2) is m.I801C)
assert (c2.a == 1)
assert (c2.b == 2)
d2 = m.i801d_b2()
assert (type(d2) is m.I801D)
assert (d2.a == 1)
assert (d2.b == 2)
assert (ConstructorStats.detail_reg_inst() == (n_inst + 8))
del c2
assert (ConstructorStats.detail_reg_inst() == (n_inst + 6))
del c1, d1, d2
assert (ConstructorStats.detail_reg_inst() == n_inst)
e1 = m.i801e_c()
assert (type(e1) is m.I801C)
assert (e1.a == 1)
assert (e1.b == 2)
e2 = m.i801e_b2()
assert (type(e2) is m.I801B2)
assert (e2.b == 2) |
def run(args):
if (args.exp_name is None):
exp_layout = collections.OrderedDict([('dicg{}_de_ppo', args.n_gcn_layers), ('atype={}', args.attention_type), ('res={}', bool(args.residual)), ('entcoeff={}', args.ent), ('dim={}', args.dim), ('nagents={}', args.n_agents), ('difficulty={}', args.difficulty), ('curr={}', bool(args.curriculum)), ('steps={}', args.max_env_steps), ('nenvs={}', args.n_envs), ('bs={:0.0e}', args.bs), ('splits={}', args.opt_n_minibatches), ('miniepoch={}', args.opt_mini_epochs), ('seed={}', args.seed)])
exp_name = '_'.join([key.format(val) for (key, val) in exp_layout.items()])
else:
exp_name = args.exp_name
prefix = 'traffic'
id_suffix = (('_' + str(args.run_id)) if (args.run_id != 0) else '')
unseeded_exp_dir = ((('./data/' + args.loc) + '/') + exp_name[:(- 7)])
exp_dir = (((('./data/' + args.loc) + '/') + exp_name) + id_suffix)
args.center_adv = (False if (args.entropy_method == 'max') else args.center_adv)
if (args.mode == 'train'):
_experiment(name=exp_name, prefix=prefix, log_dir=exp_dir, snapshot_mode='last', snapshot_gap=1)
def train_traffic(ctxt=None, args_dict=vars(args)):
args = SimpleNamespace(**args_dict)
set_seed(args.seed)
if args.curriculum:
curr_start = int((0.125 * args.n_epochs))
curr_end = int((0.625 * args.n_epochs))
else:
curr_start = 0
curr_end = 0
args.add_rate_min = args.add_rate_max
env = TrafficJunctionWrapper(centralized=True, dim=args.dim, vision=1, add_rate_min=args.add_rate_min, add_rate_max=args.add_rate_max, curr_start=curr_start, curr_end=curr_end, difficulty=args.difficulty, n_agents=args.n_agents, max_steps=args.max_env_steps)
env = GarageEnv(env)
runner = LocalRunnerWrapper(ctxt, eval=args.eval_during_training, n_eval_episodes=args.n_eval_episodes, eval_greedy=args.eval_greedy, eval_epoch_freq=args.eval_epoch_freq, save_env=env.pickleable)
hidden_nonlinearity = (F.relu if (args.hidden_nonlinearity == 'relu') else torch.tanh)
policy = DecCategoricalMLPPolicy(env.spec, env.n_agents, hidden_nonlinearity=hidden_nonlinearity, hidden_sizes=args.policy_hidden_sizes, name='dec_categorical_mlp_policy')
baseline = DICGCritic(env.spec, env.n_agents, encoder_hidden_sizes=args.encoder_hidden_sizes, embedding_dim=args.embedding_dim, attention_type=args.attention_type, n_gcn_layers=args.n_gcn_layers, residual=args.residual, gcn_bias=args.gcn_bias, name='dicg_critic')
algo = CentralizedMAPPO(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=args.max_env_steps, discount=args.discount, center_adv=bool(args.center_adv), positive_adv=bool(args.positive_adv), gae_lambda=args.gae_lambda, policy_ent_coeff=args.ent, entropy_method=args.entropy_method, stop_entropy_gradient=(True if (args.entropy_method == 'max') else False), clip_grad_norm=args.clip_grad_norm, optimization_n_minibatches=args.opt_n_minibatches, optimization_mini_epochs=args.opt_mini_epochs)
runner.setup(algo, env, sampler_cls=CentralizedMAOnPolicyVectorizedSampler, sampler_args={'n_envs': args.n_envs})
runner.train(n_epochs=args.n_epochs, batch_size=args.bs)
train_traffic(args_dict=vars(args))
elif (args.mode in ['restore', 'eval']):
data = joblib.load((exp_dir + '/params.pkl'))
env = data['env']
algo = data['algo']
if (args.mode == 'restore'):
from dicg.experiment.runner_utils import restore_training
restore_training(exp_dir, exp_name, args, env_saved=env.pickleable, env=env)
elif (args.mode == 'eval'):
env.eval(algo.policy, n_episodes=args.n_eval_episodes, greedy=args.eval_greedy, load_from_file=True, max_steps=args.max_env_steps, render=args.render) |
class FIDInceptionE_2(torchvision.models.inception.InceptionE):
def __init__(self, in_channels):
super(FIDInceptionE_2, self).__init__(in_channels)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl)]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1) |
class NaiveModelParallelSplitter():
def __init__(self):
pass
def spread_on_devices(model: torch.nn.Module, devices: Optional[List]=None):
if ((devices is None) and torch.cuda.is_available()):
devices = list(range(torch.cuda.device_count()))
if (len(devices) < 2):
device = (devices[0] if devices else None)
if (device is not None):
model.forward = decorate_args_and_kwargs_to_deivce(func=model.forward, device=device)
model.to(device)
return
modules_to_move = set(model.modules())
handled_models = set()
block_list = model.get_block_list()
group_size = (len(block_list) // len(devices))
for (i, block) in enumerate(block_list):
device = devices[(i // group_size)]
block.to(device)
block.device = device
block.forward = decorate_args_and_kwargs_to_deivce(func=block.forward, device=device)
modules_to_move.remove(block)
handled_models.add(block)
for (nm, m) in block.named_modules():
if (m in modules_to_move):
m.forward = decorate_args_and_kwargs_to_deivce(func=m.forward, device=device)
modules_to_move.remove(m)
handled_models.add(m)
else:
logger.info(f'Shared model not moved {nm}')
device = devices[0]
for module in list(modules_to_move):
sumbs = set(module.modules())
intersection = (sumbs & handled_models)
if intersection:
logger.info('skipping model because it or one or more of submodules was already handled')
continue
else:
logger.info(f'remaining module will be placed on device {device} ')
module.to(device)
module.device = device
module.forward = decorate_args_and_kwargs_to_deivce(func=module.forward, device=device) |
def log_loss_calc(classes, prob_vector, actual_vector, normalize=True, sample_weight=None, pos_class=None):
try:
vector_length = len(actual_vector)
if (sample_weight is None):
sample_weight = ([1] * vector_length)
weight_sum = sum(sample_weight)
if (pos_class is None):
pos_class = max(classes)
result = 0
for (index, item) in enumerate(actual_vector):
filtered_item = 0
if (item == pos_class):
filtered_item = 1
result += (((- 1) * (sample_weight[index] / weight_sum)) * ((filtered_item * math.log(prob_vector[index])) + ((1 - filtered_item) * math.log((1 - prob_vector[index])))))
if (not normalize):
result = (result * weight_sum)
return result
except Exception:
return 'None' |
class OverFeatTest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
(height, width) = (231, 231)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = overfeat.overfeat(inputs, num_classes)
self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
(height, width) = (281, 281)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(), [batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
(height, width) = (231, 231)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1', 'overfeat/pool1', 'overfeat/conv2', 'overfeat/pool2', 'overfeat/conv3', 'overfeat/conv4', 'overfeat/conv5', 'overfeat/pool5', 'overfeat/fc6', 'overfeat/fc7', 'overfeat/fc8']
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
(height, width) = (231, 231)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1/weights', 'overfeat/conv1/biases', 'overfeat/conv2/weights', 'overfeat/conv2/biases', 'overfeat/conv3/weights', 'overfeat/conv3/biases', 'overfeat/conv4/weights', 'overfeat/conv4/biases', 'overfeat/conv5/weights', 'overfeat/conv5/biases', 'overfeat/fc6/weights', 'overfeat/fc6/biases', 'overfeat/fc7/weights', 'overfeat/fc7/biases', 'overfeat/fc8/weights', 'overfeat/fc8/biases']
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
(height, width) = (231, 231)
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = overfeat.overfeat(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
(train_height, train_width) = (231, 231)
(eval_height, eval_width) = (281, 281)
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform((train_batch_size, train_height, train_width, 3))
(logits, _) = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(), [train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform((eval_batch_size, eval_height, eval_width, 3))
(logits, _) = overfeat.overfeat(eval_inputs, is_training=False, spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(), [eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
(height, width) = (231, 231)
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = overfeat.overfeat(inputs)
sess.run(tf.initialize_all_variables())
output = sess.run(logits)
self.assertTrue(output.any()) |
def get_loader(mode, data_root, batch_size, shuffle, num_workers, test_mode=None):
if (mode == 'train'):
is_training = True
else:
is_training = False
dataset = VimeoSepTuplet(data_root, is_training=is_training)
return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True) |
def prev_identical_edge(cur, E, edits):
for e in E:
if ((e[1] == cur[0]) and (edits[e] == edits[cur])):
return e
return None |
def load_lib():
libpath = '/usr/local/lib/libcdf.so'
lib = ctypes.CDLL(libpath)
for funcname in call_dict:
func = getattr(lib, funcname)
args = call_dict[funcname]
func.restype = args[0]
func.argtypes = (None if (len(args) <= 1) else args[1:])
return lib |
def _check_voxceleb_folders(data_folders, splits):
for data_folder in data_folders:
if ('train' in splits):
folder_vox1 = os.path.join(data_folder, 'wav', 'id10001')
folder_vox2 = os.path.join(data_folder, 'wav', 'id00012')
if ((not os.path.exists(folder_vox1)) or (not os.path.exists(folder_vox2))):
err_msg = 'the specified folder does not contain Voxceleb'
raise FileNotFoundError(err_msg)
if ('test' in splits):
folder = os.path.join(data_folder, 'wav', 'id10270')
if (not os.path.exists(folder)):
err_msg = ('the folder %s does not exist (as it is expected in the Voxceleb dataset)' % folder)
raise FileNotFoundError(err_msg)
folder = os.path.join(data_folder, 'meta')
if (not os.path.exists(folder)):
err_msg = ('the folder %s does not exist (as it is expected in the Voxceleb dataset)' % folder)
raise FileNotFoundError(err_msg) |
.serial
def test_log_multitask_performance_task_id():
lengths = np.array([10, 5, 1, 1])
batch = TrajectoryBatch(EnvSpec(akro.Box(np.array([0.0, 0.0, 0.0]), np.array([1.0, 1.0, 1.0])), akro.Box(np.array([(- 1.0), (- 1.0)]), np.array([0.0, 0.0]))), observations=np.ones((sum(lengths), 3), dtype=np.float32), last_observations=np.ones((len(lengths), 3), dtype=np.float32), actions=np.zeros((sum(lengths), 2), dtype=np.float32), rewards=np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.4332891]), terminals=np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1], dtype=bool), env_infos={'success': np.array([0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1], dtype=bool), 'task_id': np.array((((([1] * 10) + ([3] * 5)) + [1]) + [4]))}, agent_infos={}, lengths=lengths)
log_file = tempfile.NamedTemporaryFile()
csv_output = dowel.CsvOutput(log_file.name)
logger.add_output(csv_output)
log_multitask_performance(7, batch, 0.8, {1: 'env1', 3: 'env2', 4: 'env3', 5: 'env4'})
logger.log(tabular)
logger.dump_output_type(dowel.CsvOutput)
with open(log_file.name, 'r') as file:
rows = list(csv.DictReader(file))
res = {k: float(r) for (k, r) in rows[0].items()}
assert (res['env1/Iteration'] == 7)
assert (res['env2/Iteration'] == 7)
assert (res['env3/Iteration'] == 7)
assert (res['env4/Iteration'] == 7)
assert (res['env1/NumTrajs'] == 2)
assert (res['env2/NumTrajs'] == 1)
assert (res['env3/NumTrajs'] == 1)
assert (res['env4/NumTrajs'] == 0)
assert math.isclose(res['env1/SuccessRate'], 0.5)
assert math.isclose(res['env2/SuccessRate'], 1.0)
assert math.isclose(res['env3/SuccessRate'], 1.0)
assert math.isnan(res['env4/SuccessRate'])
assert math.isnan(res['env4/AverageReturn']) |
def butter_lowpass(cutoff, fs, order=5):
nyq = (0.5 * fs)
normal_cutoff = (cutoff / nyq)
(b, a) = butter(order, normal_cutoff, btype='low', analog=False)
return (b, a) |
def simple_separated_format(separator):
return TableFormat(None, None, None, None, headerrow=DataRow('', separator, ''), datarow=DataRow('', separator, ''), padding=0, with_header_hide=None) |
class ProfileResult():
f_times_mean: Dict[(int, float)]
f_times_std: Dict[(int, float)]
b_times_mean: Dict[(int, float)]
b_times_std: Dict[(int, float)]
communication_stats: Dict[(int, Dict[(str, float)])]
nocommf_times_mean: Dict[(int, float)]
nocommf_times_std: Dict[(int, float)]
nocommb_times_mean: Dict[(int, float)]
nocommb_times_std: Dict[(int, float)]
warnings_list: List[str] |
def false_positive(pred, target, num_classes):
out = []
for i in range(num_classes):
out.append(((pred == i) & (target != i)).sum())
return torch.tensor(out) |
def t5_small_tied_lmhead_4p_bw12_async_squad1():
return dict(model_type='t5_stateless', model_name_or_path='t5-small', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True) |
(scope='session')
def hypothesis_max_examples():
value = settings().max_examples
return (None if (value == 100) else value) |
class ZeroBaseline(Baseline):
def __init__(self, env_spec):
pass
def get_param_values(self, **kwargs):
return None
def set_param_values(self, val, **kwargs):
pass
def fit(self, paths):
pass
def predict(self, path):
return np.zeros_like(path['rewards'])
def predict_n(self, paths):
return [np.zeros_like(path['rewards']) for path in paths] |
def setup(args):
cfg = get_cfg()
add_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg |
class AggregationCell_3(AggregationCell):
def __init__(self, genotype, steps, multiplier, parse_method, C_in=[256, 32, 768]):
super().__init__(genotype, steps, multiplier, parse_method)
C = 128
self.preprocess0 = nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(C_in[0], C, kernel_size=1, bias=False), nn.BatchNorm2d(C, affine=True), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True))
self.preprocess1 = nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(C_in[1], C, kernel_size=1, bias=False), nn.BatchNorm2d(C, affine=True))
self.preprocess2 = nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(C_in[2], C, kernel_size=1, bias=False), nn.BatchNorm2d(C, affine=True), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)) |
class TestSweepPoly(object):
def test_sweep_poly_quad1(self):
p = np.poly1d([1.0, 0.0, 1.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
(tf, f) = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs((f - expected)))
assert_((abserr < 1e-06))
def test_sweep_poly_const(self):
p = np.poly1d(2.0)
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
(tf, f) = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs((f - expected)))
assert_((abserr < 1e-06))
def test_sweep_poly_linear(self):
p = np.poly1d([(- 1.0), 10.0])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
(tf, f) = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs((f - expected)))
assert_((abserr < 1e-06))
def test_sweep_poly_quad2(self):
p = np.poly1d([1.0, 0.0, (- 2.0)])
t = np.linspace(0, 3.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
(tf, f) = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs((f - expected)))
assert_((abserr < 1e-06))
def test_sweep_poly_cubic(self):
p = np.poly1d([2.0, 1.0, 0.0, (- 2.0)])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
(tf, f) = compute_frequency(t, phase)
expected = p(tf)
abserr = np.max(np.abs((f - expected)))
assert_((abserr < 1e-06))
def test_sweep_poly_cubic2(self):
p = np.array([2.0, 1.0, 0.0, (- 2.0)])
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
(tf, f) = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs((f - expected)))
assert_((abserr < 1e-06))
def test_sweep_poly_cubic3(self):
p = [2.0, 1.0, 0.0, (- 2.0)]
t = np.linspace(0, 2.0, 10000)
phase = waveforms._sweep_poly_phase(t, p)
(tf, f) = compute_frequency(t, phase)
expected = np.poly1d(p)(tf)
abserr = np.max(np.abs((f - expected)))
assert_((abserr < 1e-06)) |
def get_atari_median_human_normalized_score(algo_title, env_variant):
normalized_scores = []
algo_entries = find_all({'algo-title': algo_title, 'env-variant': env_variant})
for algo_entry in algo_entries:
if (algo_entry['env-title'][:5] == 'atari'):
score = get_human_normalized_score(algo_entry)
normalized_scores.append(score)
return median(normalized_scores) |
def _impl(array, axis, keepdims, initial, mask_identity, highlevel, behavior, attrs):
axis = regularize_axis(axis)
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
layout = ctx.unwrap(array, allow_record=False, primitive_policy='error')
reducer = ak._reducers.Max(initial)
out = ak._do.reduce(layout, reducer, axis=axis, mask=mask_identity, keepdims=keepdims, behavior=ctx.behavior)
return ctx.wrap(out, highlevel=highlevel, allow_other=True) |
class GPTNeoXLayer(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def transpose(array, axes=None):
if is_numpy_array(array):
return np.transpose(array, axes=axes)
elif is_torch_tensor(array):
return (array.T if (axes is None) else array.permute(*axes))
elif is_tf_tensor(array):
import tensorflow as tf
return tf.transpose(array, perm=axes)
elif is_jax_tensor(array):
return jnp.transpose(array, axes=axes)
else:
raise ValueError(f'Type not supported for transpose: {type(array)}.') |
class EntityReviewBucketBatchSampler(Sampler):
def __init__(self, dataset, entity_id, batch_size, pct=1.0):
ids = ['__'.join([entity_id, review_id]) for review_id in dataset.reviews[entity_id]]
if (pct < 1.0):
num_ids = int((len(ids) * pct))
shuffle(ids)
ids = ids[:num_ids]
lengths = dataset.lengths
self.batch_size = batch_size
self.buckets = defaultdict(list)
for full_id in tqdm(ids, disable=True):
if (full_id not in lengths):
dataset[full_id]
rev_len = len(lengths[full_id])
self.buckets[rev_len].append(full_id)
self.batch_list = []
for bucket_len in tqdm(sorted(self.buckets), disable=True):
self.buckets[bucket_len].sort(key=(lambda full_id: max(lengths[full_id])))
self.batch_list += [(bucket_len, start_idx) for start_idx in range(0, len(self.buckets[bucket_len]), self.batch_size)]
def __iter__(self):
for (bucket_len, start_idx) in self.batch_list:
(yield self.buckets[bucket_len][start_idx:(start_idx + self.batch_size)])
def __len__(self):
return len(self.batch_list) |
def attn_bilinear(lf_input, rt_input, lf_max_len, rt_max_len, dim_att_hidden):
(expand_lf_input, expand_rt_input) = expand_both_dims(lf_input=lf_input, rt_input=rt_input, lf_max_len=lf_max_len, rt_max_len=rt_max_len)
with tf.variable_scope('cross_att_bilinear', reuse=tf.AUTO_REUSE):
w = tf.get_variable(name='w', dtype=tf.float32, shape=[dim_att_hidden, dim_att_hidden])
att_mat = tf.reduce_sum(input_tensor=tf.multiply(x=tf.matmul(expand_lf_input, w), y=expand_rt_input), axis=(- 1), name='att_mat')
return att_mat |
def get_parameter_file_loader():
file_loaders = OrderedDict([('.h5', _h5_parameter_file_loader), ('.protobuf', _pb_parameter_file_loader), ('.nntxt,.prototxt', _nntxt_parameter_file_loader), ('.nnp', _nnp_parameter_file_loader)])
return file_loaders |
class RandomDatasetSampler(Sampler):
def __init__(self, data_source, batch_size, n_dataset):
self.data_source = data_source
self.dataset_dict = defaultdict(list)
for (i, items) in enumerate(data_source):
dsetid = items[3]
self.dataset_dict[dsetid].append(i)
self.datasets = list(self.dataset_dict.keys())
if ((n_dataset is None) or (n_dataset <= 0)):
n_dataset = len(self.datasets)
assert ((batch_size % n_dataset) == 0)
self.n_img_per_dset = (batch_size // n_dataset)
self.batch_size = batch_size
self.n_dataset = n_dataset
self.length = len(list(self.__iter__()))
def __iter__(self):
dataset_dict = copy.deepcopy(self.dataset_dict)
final_idxs = []
stop_sampling = False
while (not stop_sampling):
selected_datasets = random.sample(self.datasets, self.n_dataset)
for dset in selected_datasets:
idxs = dataset_dict[dset]
selected_idxs = random.sample(idxs, self.n_img_per_dset)
final_idxs.extend(selected_idxs)
for idx in selected_idxs:
dataset_dict[dset].remove(idx)
remaining = len(dataset_dict[dset])
if (remaining < self.n_img_per_dset):
stop_sampling = True
return iter(final_idxs)
def __len__(self):
return self.length |
class ConstantRangeMemlet(MemletPattern):
def can_be_applied(self, expressions, variable_context, node_range, orig_edges):
constant_range = True
for dim in node_range:
for rngelem in dim:
if ((not dtypes.isconstant(rngelem)) and (not isinstance(rngelem, sympy.Number))):
constant_range = False
break
if (not constant_range):
return False
self.params = variable_context[(- 1)]
return True
def propagate(self, array, expressions, node_range):
rng = ([(None, None, 1)] * len(array.shape))
node_range_gen = (range(rb, re, rs) for (rb, re, rs) in node_range)
for ndind in itertools.product(*tuple(node_range_gen)):
repldict = {p: ndind[i] for (i, p) in enumerate(self.params)}
for expr in expressions:
for (dim, dexpr) in enumerate(expr):
evaldexpr = _subexpr(dexpr, repldict)
(rb, re, rs) = rng[dim]
if (rb is None):
rng[dim] = (evaldexpr, evaldexpr, 1)
else:
if (evaldexpr < rb):
rng[dim] = (evaldexpr, re, rs)
if (evaldexpr > re):
rng[dim] = (rb, evaldexpr, rs)
return subsets.Range(rng) |
class TaggingTask(task.Task):
__metaclass__ = abc.ABCMeta
def __init__(self, config: configure_finetuning.FinetuningConfig, name, tokenizer, is_token_level):
super(TaggingTask, self).__init__(config, name)
self._tokenizer = tokenizer
self._label_mapping_path = os.path.join(self.config.preprocessed_data_dir, ((('debug_' if self.config.debug else '') + self.name) + '_label_mapping.pkl'))
self._is_token_level = is_token_level
self._label_mapping = None
def get_examples(self, split):
sentences = self._get_labeled_sentences(split)
examples = []
label_mapping = self._get_label_mapping(split, sentences)
for (i, (words, tags)) in enumerate(sentences):
examples.append(TaggingExample(i, self.name, words, tags, self._is_token_level, label_mapping))
return examples
def _get_label_mapping(self, provided_split=None, provided_sentences=None):
if (self._label_mapping is not None):
return self._label_mapping
if tf.io.gfile.exists(self._label_mapping_path):
self._label_mapping = utils.load_pickle(self._label_mapping_path)
return self._label_mapping
utils.log('Writing label mapping for task', self.name)
tag_counts = collections.Counter()
train_tags = set()
for split in ['train', 'dev', 'test']:
if (not tf.io.gfile.exists(os.path.join(self.config.raw_data_dir(self.name), (split + '.tsv')))):
continue
if (split == provided_split):
split_sentences = provided_sentences
else:
split_sentences = self._get_labeled_sentences(split)
for (_, tags) in split_sentences:
if (not self._is_token_level):
span_labels = tagging_utils.get_span_labels(tags)
tags = tagging_utils.get_tags(span_labels, len(tags), LABEL_ENCODING)
for tag in tags:
tag_counts[tag] += 1
if (provided_split == 'train'):
train_tags.add(tag)
if (self.name == 'ccg'):
infrequent_tags = []
for tag in tag_counts:
if (tag not in train_tags):
infrequent_tags.append(tag)
label_mapping = {label: i for (i, label) in enumerate(sorted(filter((lambda t: (t not in infrequent_tags)), tag_counts.keys())))}
n = len(label_mapping)
for tag in infrequent_tags:
label_mapping[tag] = n
else:
labels = sorted(tag_counts.keys())
label_mapping = {label: i for (i, label) in enumerate(labels)}
utils.write_pickle(label_mapping, self._label_mapping_path)
self._label_mapping = label_mapping
return label_mapping
def featurize(self, example: TaggingExample, is_training, log=False):
words_to_tokens = tokenize_and_align(self._tokenizer, example.words)
input_ids = []
tagged_positions = []
for word_tokens in words_to_tokens:
if (((len(words_to_tokens) + len(input_ids)) + 1) > self.config.max_seq_length):
input_ids.append(self._tokenizer.vocab['[SEP]'])
break
if (('[CLS]' not in word_tokens) and ('[SEP]' not in word_tokens)):
tagged_positions.append(len(input_ids))
for token in word_tokens:
input_ids.append(self._tokenizer.vocab[token])
pad = (lambda x: (x + ([0] * (self.config.max_seq_length - len(x)))))
labels = pad(example.labels[:self.config.max_seq_length])
labeled_positions = pad(tagged_positions)
labels_mask = pad(([1.0] * len(tagged_positions)))
segment_ids = pad(([1] * len(input_ids)))
input_mask = pad(([1] * len(input_ids)))
input_ids = pad(input_ids)
assert (len(input_ids) == self.config.max_seq_length)
assert (len(input_mask) == self.config.max_seq_length)
assert (len(segment_ids) == self.config.max_seq_length)
assert (len(labels) == self.config.max_seq_length)
assert (len(labels_mask) == self.config.max_seq_length)
return {'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids, 'task_id': self.config.task_names.index(self.name), (self.name + '_eid'): example.eid, (self.name + '_labels'): labels, (self.name + '_labels_mask'): labels_mask, (self.name + '_labeled_positions'): labeled_positions}
def _get_labeled_sentences(self, split):
sentences = []
with tf.io.gfile.GFile(os.path.join(self.config.raw_data_dir(self.name), (split + '.tsv')), 'r') as f:
sentence = []
for line in f:
line = line.strip().split()
if (not line):
if sentence:
(words, tags) = zip(*sentence)
sentences.append((words, tags))
sentence = []
if (self.config.debug and (len(sentences) > 100)):
return sentences
continue
if (line[0] == '-DOCSTART-'):
continue
(word, tag) = (line[0], line[(- 1)])
sentence.append((word, tag))
return sentences
def get_scorer(self, split):
return (tagging_metrics.AccuracyScorer() if self._is_token_level else tagging_metrics.EntityLevelF1Scorer(self._get_label_mapping()))
def get_feature_specs(self):
return [feature_spec.FeatureSpec((self.name + '_eid'), []), feature_spec.FeatureSpec((self.name + '_labels'), [self.config.max_seq_length]), feature_spec.FeatureSpec((self.name + '_labels_mask'), [self.config.max_seq_length], is_int_feature=False), feature_spec.FeatureSpec((self.name + '_labeled_positions'), [self.config.max_seq_length])]
def get_prediction_module(self, bert_model, features, is_training, percent_done):
n_classes = len(self._get_label_mapping())
reprs = bert_model.get_sequence_output()
reprs = pretrain_helpers.gather_positions(reprs, features[(self.name + '_labeled_positions')])
logits = tf.layers.dense(reprs, n_classes)
losses = tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(features[(self.name + '_labels')], n_classes), logits=logits)
losses *= features[(self.name + '_labels_mask')]
losses = tf.reduce_sum(losses, axis=(- 1))
return (losses, dict(loss=losses, logits=logits, predictions=tf.argmax(logits, axis=(- 1)), labels=features[(self.name + '_labels')], labels_mask=features[(self.name + '_labels_mask')], eid=features[(self.name + '_eid')]))
def _create_examples(self, lines, split):
pass |
class TestShLexer(unittest.TestCase):
def lex(self, str, *args, **kwargs):
return list(ShLexer(str, *args, **kwargs).lex())
def test_basic(self):
self.assertEqual(self.lex('a|b>c&d<e;f'), ['a', ('|',), 'b', ('>',), 'c', ('&',), 'd', ('<',), 'e', (';',), 'f'])
def test_redirection_tokens(self):
self.assertEqual(self.lex('a2>c'), ['a2', ('>',), 'c'])
self.assertEqual(self.lex('a 2>c'), ['a', ('>', 2), 'c'])
def test_quoting(self):
self.assertEqual(self.lex(" 'a' "), ['a'])
self.assertEqual(self.lex(' "hello\\"world" '), ['hello"world'])
self.assertEqual(self.lex(' "hello\\\'world" '), ["hello\\'world"])
self.assertEqual(self.lex(' "hello\\\\world" '), ['hello\\world'])
self.assertEqual(self.lex(' he"llo wo"rld '), ['hello world'])
self.assertEqual(self.lex(' a\\ b a\\\\b '), ['a b', 'a\\b'])
self.assertEqual(self.lex(' "" "" '), ['', ''])
self.assertEqual(self.lex(' a\\ b ', win32Escapes=True), ['a\\', 'b']) |
class BridgeTowerForContrastiveLearning(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ZeldaProblem(Problem):
_tile_types = ['empty', 'solid', 'player', 'key', 'door', 'bat', 'scorpion', 'spider']
def __init__(self, cfg: Config):
super().__init__(cfg=cfg)
self.path_length = 0
self.path = []
self._prob = {'empty': 0.58, 'solid': 0.3, 'player': 0.02, 'key': 0.02, 'door': 0.02, 'bat': 0.02, 'scorpion': 0.02, 'spider': 0.02}
self._border_tile = 'solid'
self._max_enemies = 5
self._target_enemy_dist = 4
self._target_path = 16
self._reward_weights = {'player': 3, 'key': 3, 'door': 3, 'regions': 5, 'enemies': 1, 'nearest-enemy': 2, 'path-length': 1}
'\n Adjust the parameters for the current problem\n\n Parameters:\n width (int): change the width of the problem level\n height (int): change the height of the problem level\n probs (dict(string, float)): change the probability of each tile\n intiialization, the names are "empty", "solid"\n target_path (int): the current path length that the episode turn when it reaches\n rewards (dict(string,float)): the weights of each reward change between the new_stats and old_stats\n '
def adjust_param(self, **kwargs):
super().adjust_param(**kwargs)
self._max_enemies = kwargs.get('max_enemies', self._max_enemies)
self._target_enemy_dist = kwargs.get('target_enemy_dist', self._target_enemy_dist)
self._target_path = kwargs.get('target_path', self._target_path)
rewards = kwargs.get('rewards')
if (rewards is not None):
for t in rewards:
if (t in self._reward_weights):
self._reward_weights[t] = rewards[t]
'\n Get the current stats of the map\n\n Returns:\n dict(string,any): stats of the current map to be used in the reward, episode_over, debug_info calculations.\n The used status are "reigons": number of connected empty tiles, "path-length": the longest path across the map\n '
def get_stats(self, map, lenient_paths=False):
self.path = []
map_locations = get_tile_locations(map, self.get_tile_types())
map_stats = {'player': calc_certain_tile(map_locations, ['player']), 'key': calc_certain_tile(map_locations, ['key']), 'door': calc_certain_tile(map_locations, ['door']), 'enemies': calc_certain_tile(map_locations, ['bat', 'spider', 'scorpion']), 'regions': calc_num_regions(map, map_locations, ['empty', 'player', 'key', 'bat', 'spider', 'scorpion']), 'nearest-enemy': 0, 'path-length': 0}
if ((map_stats['player'] == 1) and (map_stats['regions'] == 1)):
(p_x, p_y) = map_locations['player'][0]
enemies = []
enemies.extend(map_locations['spider'])
enemies.extend(map_locations['bat'])
enemies.extend(map_locations['scorpion'])
if (len(enemies) > 0):
(dijkstra, _) = run_dijkstra(p_x, p_y, map, ['key', 'empty', 'player', 'bat', 'spider', 'scorpion'])
min_dist = (self._width * self._height)
for (e_x, e_y) in enemies:
if ((dijkstra[e_y][e_x] > 0) and (dijkstra[e_y][e_x] < min_dist)):
min_dist = dijkstra[e_y][e_x]
map_stats['nearest-enemy'] = min_dist
if ((map_stats['key'] == 1) and (map_stats['door'] == 1)):
(k_x, k_y) = map_locations['key'][0]
(d_x, d_y) = map_locations['door'][0]
(dijkstra_k, _) = run_dijkstra(p_x, p_y, map, ['empty', 'key', 'player', 'bat', 'spider', 'scorpion'])
map_stats['path-length'] += dijkstra_k[k_y][k_x]
(dijkstra_d, _) = run_dijkstra(k_x, k_y, map, ['empty', 'player', 'key', 'door', 'bat', 'spider', 'scorpion'])
map_stats['path-length'] += dijkstra_d[d_y][d_x]
if self.render_path:
self.path = np.hstack((get_path_coords(dijkstra_k, init_coords=(k_y, k_x)), get_path_coords(dijkstra_d, init_coords=(d_y, d_x))))
self.path_length = map_stats['path-length']
return map_stats
'\n Get the current game reward between two stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n float: the current reward due to the change between the old map stats and the new map stats\n '
def get_reward(self, new_stats, old_stats):
rewards = {'player': get_range_reward(new_stats['player'], old_stats['player'], 1, 1), 'key': get_range_reward(new_stats['key'], old_stats['key'], 1, 1), 'door': get_range_reward(new_stats['door'], old_stats['door'], 1, 10), 'enemies': get_range_reward(new_stats['enemies'], old_stats['enemies'], 2, self._max_enemies), 'regions': get_range_reward(new_stats['regions'], old_stats['regions'], 1, 1), 'nearest-enemy': get_range_reward(new_stats['nearest-enemy'], old_stats['nearest-enemy'], self._target_enemy_dist, np.inf), 'path-length': get_range_reward(new_stats['path-length'], old_stats['path-length'], np.inf, np.inf)}
return (((((((rewards['player'] * self._reward_weights['player']) + (rewards['key'] * self._reward_weights['key'])) + (rewards['door'] * self._reward_weights['door'])) + (rewards['enemies'] * self._reward_weights['enemies'])) + (rewards['regions'] * self._reward_weights['regions'])) + (rewards['nearest-enemy'] * self._reward_weights['nearest-enemy'])) + (rewards['path-length'] * self._reward_weights['path-length']))
'\n Uses the stats to check if the problem ended (episode_over) which means reached\n a satisfying quality based on the stats\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n boolean: True if the level reached satisfying quality based on the stats and False otherwise\n '
def get_episode_over(self, new_stats, old_stats):
return ((new_stats['nearest-enemy'] >= self._target_enemy_dist) and (new_stats['path-length'] >= self._target_path))
'\n Get any debug information need to be printed\n\n Parameters:\n new_stats (dict(string,any)): the new stats after taking an action\n old_stats (dict(string,any)): the old stats before taking an action\n\n Returns:\n dict(any,any): is a debug information that can be used to debug what is\n happening in the problem\n '
def get_debug_info(self, new_stats, old_stats):
return {'player': new_stats['player'], 'key': new_stats['key'], 'door': new_stats['door'], 'enemies': new_stats['enemies'], 'regions': new_stats['regions'], 'nearest-enemy': new_stats['nearest-enemy'], 'path-length': new_stats['path-length']}
def init_graphics(self):
if self.GVGAI_SPRITES:
self._graphics = {'empty': Image.open((os.path.dirname(__file__) + '/sprites/oryx/floor3.png')).convert('RGBA'), 'solid': Image.open((os.path.dirname(__file__) + '/sprites/oryx/wall3.png')).convert('RGBA'), 'player': Image.open((os.path.dirname(__file__) + '/sprites/oryx/swordman1_0.png')).convert('RGBA'), 'key': Image.open((os.path.dirname(__file__) + '/sprites/oryx/key2.png')).convert('RGBA').resize((24, 24)), 'door': Image.open((os.path.dirname(__file__) + '/sprites/oryx/doorclosed1.png')).convert('RGBA'), 'spider': Image.open((os.path.dirname(__file__) + '/sprites/oryx/spider1.png')).convert('RGBA'), 'bat': Image.open((os.path.dirname(__file__) + '/sprites/oryx/bat2.png')).convert('RGBA'), 'scorpion': Image.open((os.path.dirname(__file__) + '/sprites/oryx/scorpion1.png')).convert('RGBA'), 'path': Image.open((os.path.dirname(__file__) + '/sprites/newset/snowmanchest.png')).convert('RGBA')}
else:
self._graphics = {'empty': Image.open((PROB_DIR + '/common/empty.png')).convert('RGBA'), 'solid': Image.open((PROB_DIR + '/common/solid.png')).convert('RGBA'), 'player': Image.open((os.path.dirname(__file__) + '/zelda/player.png')).convert('RGBA'), 'key': Image.open((os.path.dirname(__file__) + '/zelda/key.png')).convert('RGBA'), 'door': Image.open((os.path.dirname(__file__) + '/zelda/door.png')).convert('RGBA'), 'spider': Image.open((os.path.dirname(__file__) + '/zelda/spider.png')).convert('RGBA'), 'bat': Image.open((os.path.dirname(__file__) + '/zelda/bat.png')).convert('RGBA'), 'scorpion': Image.open((os.path.dirname(__file__) + '/zelda/scorpion.png')).convert('RGBA'), 'path': Image.open((PROB_DIR + '/common/path_g.png')).convert('RGBA')}
'\n Get an image on how the map will look like for a specific map\n\n Parameters:\n map (string[][]): the current game map\n\n Returns:\n Image: a pillow image on how the map will look like using the binary graphics\n '
def render(self, map):
if (self._graphics is None):
self.init_graphics()
return super().render(map, render_path=self.path) |
def _maybe_load_yaml(item):
if isinstance(item, six.string_types):
return yaml.load(item)
elif isinstance(item, dict):
return item
else:
raise ValueError('Got {}, expected YAML string or dict', type(item)) |
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.model = torchvision.models.mobilenet_v2(pretrained=True)
self.model.requires_grad_(False)
self.model.classifier[1] = torch.nn.Linear(in_features=1280, out_features=num_classes, bias=True)
def forward(self, x):
x = self.model.forward(x)
return x |
def _clean_street(result_dict: Dict[(str, str)], street: str) -> None:
if re.match('\\d+[st|nd|rd|th]', street, flags=re.IGNORECASE):
result_dict['street_name'] = street.lower()
else:
result_dict['street_name'] = street.title() |
def interleave(inter, f, seq, **kwargs):
seq = iter(seq)
try:
f(next(seq), **kwargs)
except StopIteration:
pass
else:
for x in seq:
inter()
f(x, **kwargs) |
def load_entity_vocab(data_dir, ignore_bad_title=True, min_ent_count=1):
entity_vocab = {}
bad_title = 0
few_entity = 0
with open(os.path.join(data_dir, 'entity_vocab.txt'), 'r', encoding='utf-8') as f:
for line in f:
(_, entity_id, entity_title, entity_mid, count) = line.strip().split('\t')
if (ignore_bad_title and (entity_title == '')):
bad_title += 1
elif (int(count) < min_ent_count):
few_entity += 1
else:
entity_vocab[len(entity_vocab)] = {'wiki_id': int(entity_id), 'wiki_title': entity_title, 'mid': entity_mid, 'count': int(count)}
print(('total number of entity: %d\nremove because of empty title: %d\nremove because count<%d: %d' % (len(entity_vocab), bad_title, min_ent_count, few_entity)))
return entity_vocab |
def skipIfNoQNNPACK(fn):
reason = 'Quantized operations require QNNPACK.'
if isinstance(fn, type):
if ('qnnpack' not in torch.backends.quantized.supported_engines):
fn.__unittest_skip__ = True
fn.__unittest_skip_why__ = reason
return fn
(fn)
def wrapper(*args, **kwargs):
if ('qnnpack' not in torch.backends.quantized.supported_engines):
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper |
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
if ('optimizer' in checkpoint):
del checkpoint['optimizer']
for key in list(checkpoint['state_dict']):
if key.startswith('ema_'):
checkpoint['state_dict'].pop(key)
if (torch.__version__ >= '1.6'):
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = (out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]))
subprocess.Popen(['mv', out_file, final_file])
return final_file |
def dut_cb_event(ed, eventid, exp_meta, exp_meta_lock):
global initial_dr
if (eventid == 'JOINED'):
print('DUT: End device joined')
event_device_joined.set()
elif (eventid == 'REBOOT'):
print('DUT: Reboot successful') |
def orthogonal_procrustes(A, B, check_finite=True):
if check_finite:
A = np.asarray_chkfinite(A)
B = np.asarray_chkfinite(B)
else:
A = np.asanyarray(A)
B = np.asanyarray(B)
if (A.ndim != 2):
raise ValueError(('expected ndim to be 2, but observed %s' % A.ndim))
if (A.shape != B.shape):
raise ValueError(('the shapes of A and B differ (%s vs %s)' % (A.shape, B.shape)))
(u, w, vt) = svd(B.T.dot(A).T)
R = u.dot(vt)
scale = w.sum()
return (R, scale) |
def test_c2st_shape_error():
source_samples = np.random.random(size=(5, 2))
target_samples = np.random.random(size=(5, 3))
with pytest.raises(ShapeError):
computational_utilities.c2st(source_samples, target_samples) |
def intersect_and_union(pred_label, label, num_classes, ignore_index, label_map=dict(), reduce_zero_label=False):
if isinstance(pred_label, str):
pred_label = torch.from_numpy(np.load(pred_label))
else:
pred_label = torch.from_numpy(pred_label)
if isinstance(label, str):
label = torch.from_numpy(mmcv.imread(label, flag='unchanged', backend='pillow'))
else:
label = torch.from_numpy(label)
if (label_map is not None):
label_copy = label.clone()
for (old_id, new_id) in label_map.items():
label[(label_copy == old_id)] = new_id
if reduce_zero_label:
label[(label == 0)] = 255
label = (label - 1)
label[(label == 254)] = 255
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[(pred_label == label)]
area_intersect = torch.histc(intersect.float(), bins=num_classes, min=0, max=(num_classes - 1))
area_pred_label = torch.histc(pred_label.float(), bins=num_classes, min=0, max=(num_classes - 1))
area_label = torch.histc(label.float(), bins=num_classes, min=0, max=(num_classes - 1))
area_union = ((area_pred_label + area_label) - area_intersect)
return (area_intersect, area_union, area_pred_label, area_label) |
def _copy_cookie_jar(jar):
if (jar is None):
return None
if hasattr(jar, 'copy'):
return jar.copy()
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar |
def get_layer_groups_(T_m, m):
return [nn.Sequential(*flatten_model(T_m)), nn.Sequential(*flatten_model(m))] |
def test_multiple_encoded_covariates_totalvi():
adata = synthetic_iid()
adata.obs['cont1'] = np.random.normal(size=(adata.shape[0],))
adata.obs['cont2'] = np.random.normal(size=(adata.shape[0],))
adata.obs['cat1'] = np.random.randint(0, 5, size=(adata.shape[0],))
adata.obs['cat2'] = np.random.randint(0, 5, size=(adata.shape[0],))
TOTALVI.setup_anndata(adata, batch_key='batch', protein_expression_obsm_key='protein_expression', protein_names_uns_key='protein_names', continuous_covariate_keys=['cont1', 'cont2'], categorical_covariate_keys=['cat1', 'cat2'])
m = TOTALVI(adata, encode_covariates=True)
m.train(1) |
def _load(plugin):
if (plugin in find_available_plugins(loaded=True)):
return
if (plugin not in plugin_module_name):
raise ValueError(f'Plugin {plugin} not found.')
else:
modname = plugin_module_name[plugin]
plugin_module = __import__(('skimage.io._plugins.' + modname), fromlist=[modname])
provides = plugin_provides[plugin]
for p in provides:
if (p == 'imread_collection'):
_inject_imread_collection_if_needed(plugin_module)
elif (not hasattr(plugin_module, p)):
print(f'Plugin {plugin} does not provide {p} as advertised. Ignoring.')
continue
store = plugin_store[p]
func = getattr(plugin_module, p)
if ((plugin, func) not in store):
store.append((plugin, func)) |
class Decoder(nn.Module):
def __init__(self, num_ch_enc, num_output_channels=3):
super(Decoder, self).__init__()
num_ch_dec = [16, 32, 64, 128, 256]
self.upconv5 = ConvBlock(num_ch_enc[4], num_ch_dec[4])
self.upconv4 = ConvBlock(num_ch_dec[4], num_ch_dec[3])
self.upconv3 = ConvBlock(num_ch_dec[3], num_ch_dec[2])
self.upconv2 = ConvBlock(num_ch_dec[2], num_ch_dec[1])
self.upconv1 = ConvBlock(num_ch_dec[1], num_ch_dec[0])
self.iconv5 = ConvBlock(num_ch_dec[4], num_ch_dec[4])
self.iconv4 = ConvBlock(num_ch_dec[3], num_ch_dec[3])
self.iconv3 = ConvBlock(num_ch_dec[2], num_ch_dec[2])
self.iconv2 = ConvBlock(num_ch_dec[1], num_ch_dec[1])
self.iconv1 = ConvBlock(num_ch_dec[0], num_ch_dec[0])
self.disp4 = Conv3x3(num_ch_dec[3], num_output_channels)
self.disp3 = Conv3x3(num_ch_dec[2], num_output_channels)
self.disp2 = Conv3x3(num_ch_dec[1], num_output_channels)
self.disp1 = Conv3x3(num_ch_dec[0], num_output_channels)
self.sigmoid = nn.Sigmoid()
def forward(self, input_features, frame_id=0):
self.outputs = {}
(_, _, _, _, econv5) = input_features
upconv5 = upsample(self.upconv5(econv5))
iconv5 = self.iconv5(upconv5)
upconv4 = upsample(self.upconv4(iconv5))
iconv4 = self.iconv4(upconv4)
upconv3 = upsample(self.upconv3(iconv4))
iconv3 = self.iconv3(upconv3)
upconv2 = upsample(self.upconv2(iconv3))
iconv2 = self.iconv2(upconv2)
upconv1 = upsample(self.upconv1(iconv2))
iconv1 = self.iconv1(upconv1)
self.outputs[('disp', frame_id, 3)] = self.sigmoid(self.disp4(iconv4))
self.outputs[('disp', frame_id, 2)] = self.sigmoid(self.disp3(iconv3))
self.outputs[('disp', frame_id, 1)] = self.sigmoid(self.disp2(iconv2))
self.outputs[('disp', frame_id, 0)] = self.sigmoid(self.disp1(iconv1))
return self.outputs |
def _convert_models_to_fp32(model):
for p in model.parameters():
p.data = p.data.float()
p.grad.data = p.grad.data.float() |
def run_nodistributed(local_rank, func, cfg):
torch.cuda.set_device(local_rank)
cfg.process_rank = local_rank
func(cfg) |
.hypothesis_nested
def test_date_deserializing(testdir):
schema = {'openapi': '3.0.2', 'info': {'title': 'Test', 'description': 'Test', 'version': '0.1.0'}, 'paths': {'/teapot': {'get': {'summary': 'Test', 'parameters': [{'name': 'key', 'in': 'query', 'required': True, 'schema': {'allOf': [{'type': 'string', 'example': datetime.date(2020, 1, 1)}, {'type': 'string', 'example': datetime.date(2020, 1, 1)}]}}], 'responses': {'200': {'description': 'OK'}}}}}}
schema_path = testdir.makefile('.yaml', schema=yaml.dump(schema))
schema = schemathesis.from_path(str(schema_path))
(case=schema['/teapot']['GET'].as_strategy())
(suppress_health_check=[HealthCheck.filter_too_much])
def test(case):
assert isinstance(case.query['key'], str)
test() |
def time_logger(func):
def wrapper(*args, **kw):
start_time = time.time()
print(f'Start running {func.__name__} at {get_cur_time()}')
ret = func(*args, **kw)
print(f'Finished running {func.__name__} at {get_cur_time()}, running time = {time2str((time.time() - start_time))}.')
return ret
return wrapper |
def check_full_copies(overwrite: bool=False):
diffs = []
for (target, source) in FULL_COPIES.items():
with open(source, 'r', encoding='utf-8') as f:
source_code = f.read()
with open(target, 'r', encoding='utf-8') as f:
target_code = f.read()
if (source_code != target_code):
if overwrite:
with open(target, 'w', encoding='utf-8') as f:
print(f'Replacing the content of {target} by the one of {source}.')
f.write(source_code)
else:
diffs.append(f'- {target}: copy does not match {source}.')
if ((not overwrite) and (len(diffs) > 0)):
diff = '\n'.join(diffs)
raise Exception((('Found the following copy inconsistencies:\n' + diff) + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.')) |
class EmbedDropout(nn.Dropout):
def forward(self, sequences_batch):
ones = sequences_batch.data.new_ones(sequences_batch.shape[0], sequences_batch.shape[(- 1)])
dropout_mask = nn.functional.dropout(ones, self.p, self.training, inplace=False)
return (dropout_mask.unsqueeze(1) * sequences_batch) |
class DataTrainingArguments():
task_name: Optional[str] = field(default=None, metadata={'help': f'The name of the glue task to train on. choices {list(task_to_keys.keys())}'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a csv or JSON file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate on (a csv or JSON file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to predict on (a csv or JSON file).'})
text_column_name: Optional[str] = field(default=None, metadata={'help': 'The column name of text to input in the file (a csv or JSON file).'})
label_column_name: Optional[str] = field(default=None, metadata={'help': 'The column name of label to input in the file (a csv or JSON file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: int = field(default=None, metadata={'help': 'The maximum total input sequence length after tokenization. If set, sequences longer than this will be truncated, sequences shorter will be padded.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
blurb_task: Optional[str] = field(default=None, metadata={'help': f'The name of the glue task to train on. choices {list(task_to_keys.keys())}'})
metric_name: Optional[str] = field(default=None, metadata={'help': 'The name of the metric'})
def __post_init__(self):
if ((self.task_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
self.task_name = (self.task_name.lower() if (type(self.task_name) == str) else self.task_name) |
class _SessionState():
def __init__(self, session, hash_funcs):
self.__dict__['_state'] = {'data': {}, 'hash': None, 'hasher': _CodeHasher(hash_funcs), 'is_rerun': False, 'session': session}
def __call__(self, **kwargs):
for (item, value) in kwargs.items():
if (item not in self._state['data']):
self._state['data'][item] = value
def __getitem__(self, item):
return self._state['data'].get(item, None)
def __getattr__(self, item):
return self._state['data'].get(item, None)
def __setitem__(self, item, value):
self._state['data'][item] = value
def __setattr__(self, item, value):
self._state['data'][item] = value
def clear(self):
self._state['data'].clear()
self._state['session'].request_rerun(None)
def sync(self):
data_to_bytes = self._state['hasher'].to_bytes(self._state['data'], None)
if self._state['is_rerun']:
self._state['is_rerun'] = False
elif (self._state['hash'] is not None):
if (self._state['hash'] != data_to_bytes):
self._state['is_rerun'] = True
self._state['session'].request_rerun(None)
self._state['hash'] = data_to_bytes |
def build(x_channels, cond_channels, hparams, is_training):
if isinstance(hparams, str):
hparams = JsonConfig(hparams)
(graph, optim, lrschedule, criterion_dict) = (None, None, None, None)
(cpu, devices) = ('cpu', None)
get_loss = None
graph = Glow(x_channels, cond_channels, hparams)
graph.device = hparams.Device.glow
if (graph is not None):
devices = get_proper_device(graph.device)
graph.device = devices
graph.to(cpu)
try:
if ((graph is not None) and is_training):
optim_name = hparams.Optim.name
optim = __build_optim_dict[optim_name](graph.parameters(), hparams.Optim.args.to_dict())
print('[Builder]: Using optimizer `{}`, with args:{}'.format(optim_name, hparams.Optim.args))
schedule_name = 'default'
schedule_args = {}
if ('Schedule' in hparams.Optim):
schedule_name = hparams.Optim.Schedule.name
schedule_args = hparams.Optim.Schedule.args.to_dict()
if (not ('init_lr' in schedule_args)):
schedule_args['init_lr'] = hparams.Optim.args.lr
assert (schedule_args['init_lr'] == hparams.Optim.args.lr), 'Optim lr {} != Schedule init_lr {}'.format(hparams.Optim.args.lr, schedule_args['init_lr'])
lrschedule = {'func': getattr(learning_rate_schedule, schedule_name), 'args': schedule_args}
except KeyError:
raise ValueError('[Builder]: Optimizer `{}` is not supported.'.format(optim_name))
if (graph is not None):
pre_trained = None
loaded_step = 0
if is_training:
if (('warm_start' in hparams.Train) and (len(hparams.Train.warm_start) > 0)):
pre_trained = hparams.Train.warm_start
else:
pre_trained = hparams.Infer.pre_trained
if (pre_trained is not None):
loaded_step = load(os.path.basename(pre_trained), graph=graph, optim=optim, criterion_dict=None, pkg_dir=os.path.dirname(pre_trained), device=cpu)
use_cpu = any([(isinstance(d, str) and (d.find('cpu') >= 0)) for d in devices])
if use_cpu:
graph = graph.cpu()
print('[Builder]: Use cpu to train.')
else:
if ('data' in hparams.Device):
data_gpu = hparams.Device.data
if isinstance(data_gpu, str):
data_gpu = int(data_gpu[5:])
else:
data_gpu = devices[0]
graph = graph.cuda(device=devices[0])
if (is_training and (pre_trained is not None)):
if hasattr(optim, 'state'):
def move_to(D, device):
for k in D:
if (isinstance(D[k], dict) or isinstance(D[k], defaultdict)):
move_to(D[k], device)
elif torch.is_tensor(D[k]):
D[k] = D[k].cuda(device)
move_to(optim.state, devices[0])
print('[Builder]: Use cuda {} to train, use {} to load data and get loss.'.format(devices, data_gpu))
return {'graph': graph, 'optim': optim, 'lrschedule': lrschedule, 'devices': devices, 'data_device': (data_gpu if (not use_cpu) else 'cpu'), 'loaded_step': loaded_step} |
class SparseEdgeConvLayer(MessagePassing):
def __init__(self, in_channels, out_channels, improved=False, cached=False, bias=True, **kwargs):
super(SparseEdgeConvLayer, self).__init__(aggr=cfg.gnn.agg, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.normalize = cfg.gnn.normalize_adj
self.msg_direction = cfg.gnn.msg_direction
self.linear_edge = nn.Sequential(nn.Linear(cfg.dataset.edge_dim, 1), nn.Sigmoid())
self.linear_node = nn.Linear(in_channels, out_channels)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
zeros(self.bias)
self.cached_result = None
self.cached_num_edges = None
def forward(self, x, edge_index, edge_feature):
edge_feature = self.linear_edge(edge_feature)
x = self.linear_node(x)
num_nodes = x.shape[0]
W = SparseTensor(row=edge_index[0], col=edge_index[1], value=edge_feature.squeeze(), sparse_sizes=(num_nodes, num_nodes))
out = self.propagate(edge_index=W, x=x)
return out
def message(self, x_j):
return x_j
def message_and_aggregate(self, adj_t, x):
return matmul(adj_t, x, reduce='mean')
def update(self, aggr_out):
if (self.bias is not None):
aggr_out = (aggr_out + self.bias)
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels, self.out_channels) |
class PredictJointModelStack(ModelStack[SupportsPredictJoint], SupportsPredictJoint):
def predict_joint(self, query_points: TensorType) -> tuple[(TensorType, TensorType)]:
(means, covs) = zip(*[model.predict_joint(query_points) for model in self._models])
return (tf.concat(means, axis=(- 1)), tf.concat(covs, axis=(- 3))) |
def validate_dk_cvr(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(cvr.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(cvr.is_valid)
else:
return df.applymap(cvr.is_valid)
return cvr.is_valid(df) |
def totuple(a):
try:
return tuple((totuple(i) for i in a))
except TypeError:
return a |
def print_timedelta(*args, sep=' '):
global last_time
this_time = time.time()
logger.info('[%7.2f] %s', ((this_time - last_time) * 1000), sep.join(map(str, args)))
last_time = this_time |
def test_div():
var1 = optplan.Parameter()
var2 = optplan.Parameter()
div = (var2 / var1)
assert isinstance(div, optplan.Product) |
class RobustRandomCutForest(BaseModel):
def __init__(self, num_trees=4, shingle_size=4, tree_size=256):
from rrcf import rrcf
self.tree_size = tree_size
self.shingle_size = shingle_size
self.num_trees = num_trees
self.forest = []
for _ in range(self.num_trees):
tree = rrcf.RCTree()
self.forest.append(tree)
self.index = 0
def fit_partial(self, X, y=None):
for tree in self.forest:
if (len(tree.leaves) > self.tree_size):
tree.forget_point((self.index - self.tree_size))
tree.insert_point(X, index=self.index)
self.index += 1
return self
def score_partial(self, X):
score = 0.0
for tree in self.forest:
leaf = tree.find_duplicate(X)
if (leaf is None):
tree.insert_point(X, index='test_point')
score += ((1.0 * tree.codisp('test_point')) / self.num_trees)
tree.forget_point('test_point')
else:
score += ((1.0 * tree.codisp(leaf)) / self.num_trees)
return score |
def register_Ns3TcpSocketFactory_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::TcpSocketFactory const &', 'arg0')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
return |
def set_param(torch_layer, weight, bias=None):
assert (torch_layer.weight.shape == weight.shape), '{} layer.weight does not match'.format(torch_layer)
torch_layer.weight = torch.nn.Parameter(weight)
if (bias is not None):
assert (torch_layer.bias.shape == bias.shape), '{} layer.bias does not match'.format(torch_layer)
torch_layer.bias = torch.nn.Parameter(bias) |
def quantity_from_str(text):
(value_str, unit_str) = text.split(None, 1)
value = float(value_str)
if (unit_str.strip() == 'log_lsun'):
value = (10 ** (value + np.log10(const.L_sun.cgs.value)))
unit_str = 'erg/s'
unit = u.Unit(unit_str)
if (unit == u.L_sun):
return (value * const.L_sun)
return u.Quantity(value, unit_str) |
def approx(expected, **kwargs):
class boolean_integer():
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (bool(self.value) == bool(other))
def __ne__(self, other):
return (bool(self.value) != bool(other))
if isinstance(expected, bool):
return boolean_integer(expected)
kwargs['rel'] = max(kwargs.get('rel', 1e-06), get_rel_eps())
import pytest
return pytest.approx(expected, **kwargs) |
def get_parameter_groups(model, weight_decay=1e-05, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if ((len(param.shape) == 1) or name.endswith('.bias') or (name in skip_list)):
group_name = 'no_decay'
this_weight_decay = 0.0
else:
group_name = 'decay'
this_weight_decay = weight_decay
if (get_num_layer is not None):
layer_id = get_num_layer(name)
group_name = ('layer_%d_%s' % (layer_id, group_name))
else:
layer_id = None
if (group_name not in parameter_group_names):
if (get_layer_scale is not None):
scale = get_layer_scale(layer_id)
else:
scale = 1.0
parameter_group_names[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'lr_scale': scale}
parameter_group_vars[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'lr_scale': scale}
parameter_group_vars[group_name]['params'].append(param)
parameter_group_names[group_name]['params'].append(name)
print(('Param groups = %s' % json.dumps(parameter_group_names, indent=2)))
return list(parameter_group_vars.values()) |
class Partition26(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[15]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[16]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[17]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[18]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[18]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[18]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[18]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:26'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.15.2.layer_norm', 'l_1': 'decoder.15.2.DenseReluDense.wi', 'l_2': 'decoder.15.2.DenseReluDense.dropout', 'l_3': 'decoder.15.2.DenseReluDense.wo', 'l_4': 'decoder.15.2.dropout', 'l_5': 'decoder.16.0.layer_norm', 'l_6': 'decoder.16.0.SelfAttention.q', 'l_7': 'decoder.16.0.SelfAttention.k', 'l_8': 'decoder.16.0.SelfAttention.v', 'l_9': 'decoder.16.0.SelfAttention.dropout', 'l_10': 'decoder.16.0.SelfAttention.o', 'l_11': 'decoder.16.0.dropout', 'l_12': 'decoder.16.1.layer_norm', 'l_13': 'decoder.16.1.EncDecAttention.q', 'l_14': 'decoder.16.1.EncDecAttention.k', 'l_15': 'decoder.16.1.EncDecAttention.v', 'l_16': 'decoder.16.1.EncDecAttention.dropout', 'l_17': 'decoder.16.1.EncDecAttention.o', 'l_18': 'decoder.16.1.dropout', 'l_19': 'decoder.16.2.layer_norm', 'l_20': 'decoder.16.2.DenseReluDense.wi', 'l_21': 'decoder.16.2.DenseReluDense.dropout', 'l_22': 'decoder.16.2.DenseReluDense.wo', 'l_23': 'decoder.16.2.dropout', 'l_24': 'decoder.17.0.layer_norm', 'l_25': 'decoder.17.0.SelfAttention.q', 'l_26': 'decoder.17.0.SelfAttention.k', 'l_27': 'decoder.17.0.SelfAttention.v', 'l_28': 'decoder.17.0.SelfAttention.dropout', 'l_29': 'decoder.17.0.SelfAttention.o', 'l_30': 'decoder.17.0.dropout', 'l_31': 'decoder.17.1.layer_norm', 'l_32': 'decoder.17.1.EncDecAttention.q', 'l_33': 'decoder.17.1.EncDecAttention.k', 'l_34': 'decoder.17.1.EncDecAttention.v', 'l_35': 'decoder.17.1.EncDecAttention.dropout', 'l_36': 'decoder.17.1.EncDecAttention.o', 'l_37': 'decoder.17.1.dropout', 'l_38': 'decoder.17.2.layer_norm', 'l_39': 'decoder.17.2.DenseReluDense.wi', 'l_40': 'decoder.17.2.DenseReluDense.dropout', 'l_41': 'decoder.17.2.DenseReluDense.wo', 'l_42': 'decoder.17.2.dropout', 'l_43': 'decoder.18.0.layer_norm', 'l_44': 'decoder.18.0.SelfAttention.q', 'l_45': 'decoder.18.0.SelfAttention.k', 'l_46': 'decoder.18.0.SelfAttention.v'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3) = unflatten(args, self.input_structure)
t_0 = self.l_14(x0)
t_1 = self.l_15(x0)
t_2 = self.l_33(x0)
t_3 = self.l_34(x0)
t_4 = self.l_0(x3)
t_4 = self.l_1(t_4)
t_4 = torch.nn.functional.relu(t_4, inplace=False)
t_4 = self.l_2(t_4)
t_4 = self.l_3(t_4)
t_4 = self.l_4(t_4)
t_4 = (x3 + t_4)
t_5 = self.l_5(t_4)
t_6 = t_5.size()
t_7 = self.l_6(t_5)
t_8 = self.l_7(t_5)
t_5 = self.l_8(t_5)
t_6 = t_6[0]
t_7 = t_7.view(t_6, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_8 = t_8.view(t_6, (- 1), 32, 128)
t_8 = t_8.transpose(1, 2)
t_5 = t_5.view(t_6, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_8 = t_8.transpose(3, 2)
t_8 = torch.matmul(t_7, t_8)
t_8 += x1
t_7 = t_8.float()
t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None)
t_8 = t_7.type_as(t_8)
t_8 = self.l_9(t_8)
t_5 = torch.matmul(t_8, t_5)
t_5 = t_5.transpose(1, 2)
t_5 = t_5.contiguous()
t_6 = t_5.view(t_6, (- 1), 4096)
t_6 = self.l_10(t_6)
t_6 = self.l_11(t_6)
t_6 = (t_4 + t_6)
t_4 = self.l_12(t_6)
t_5 = t_4.size()
t_4 = self.l_13(t_4)
t_5 = t_5[0]
t_4 = t_4.view(t_5, (- 1), 32, 128)
t_4 = t_4.transpose(1, 2)
t_0 = t_0.view(t_5, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_1 = t_1.view(t_5, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_0 = t_0.transpose(3, 2)
t_0 = torch.matmul(t_4, t_0)
t_0 += x2
t_4 = t_0.float()
t_4 = torch.nn.functional.softmax(t_4, dim=(- 1), _stacklevel=3, dtype=None)
t_0 = t_4.type_as(t_0)
t_0 = self.l_16(t_0)
t_1 = torch.matmul(t_0, t_1)
t_1 = t_1.transpose(1, 2)
t_1 = t_1.contiguous()
t_5 = t_1.view(t_5, (- 1), 4096)
t_5 = self.l_17(t_5)
t_5 = self.l_18(t_5)
t_5 = (t_6 + t_5)
t_6 = self.l_19(t_5)
t_6 = self.l_20(t_6)
t_6 = torch.nn.functional.relu(t_6, inplace=False)
t_6 = self.l_21(t_6)
t_6 = self.l_22(t_6)
t_6 = self.l_23(t_6)
t_6 = (t_5 + t_6)
t_5 = self.l_24(t_6)
t_1 = t_5.size()
t_0 = self.l_25(t_5)
t_4 = self.l_26(t_5)
t_5 = self.l_27(t_5)
t_1 = t_1[0]
t_0 = t_0.view(t_1, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_4 = t_4.view(t_1, (- 1), 32, 128)
t_4 = t_4.transpose(1, 2)
t_5 = t_5.view(t_1, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_4 = t_4.transpose(3, 2)
t_4 = torch.matmul(t_0, t_4)
t_4 += x1
t_0 = t_4.float()
t_0 = torch.nn.functional.softmax(t_0, dim=(- 1), _stacklevel=3, dtype=None)
t_4 = t_0.type_as(t_4)
t_4 = self.l_28(t_4)
t_5 = torch.matmul(t_4, t_5)
t_5 = t_5.transpose(1, 2)
t_5 = t_5.contiguous()
t_1 = t_5.view(t_1, (- 1), 4096)
t_1 = self.l_29(t_1)
t_1 = self.l_30(t_1)
t_1 = (t_6 + t_1)
t_6 = self.l_31(t_1)
t_5 = t_6.size()
t_6 = self.l_32(t_6)
t_5 = t_5[0]
t_6 = t_6.view(t_5, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_2 = t_2.view(t_5, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_3 = t_3.view(t_5, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_2 = t_2.transpose(3, 2)
t_2 = torch.matmul(t_6, t_2)
t_2 += x2
t_6 = t_2.float()
t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None)
t_2 = t_6.type_as(t_2)
t_2 = self.l_35(t_2)
t_3 = torch.matmul(t_2, t_3)
t_3 = t_3.transpose(1, 2)
t_3 = t_3.contiguous()
t_5 = t_3.view(t_5, (- 1), 4096)
t_5 = self.l_36(t_5)
t_5 = self.l_37(t_5)
t_5 = (t_1 + t_5)
t_1 = self.l_38(t_5)
t_1 = self.l_39(t_1)
t_1 = torch.nn.functional.relu(t_1, inplace=False)
t_1 = self.l_40(t_1)
t_1 = self.l_41(t_1)
t_1 = self.l_42(t_1)
t_1 = (t_5 + t_1)
t_5 = self.l_43(t_1)
t_3 = self.l_44(t_5)
t_2 = self.l_45(t_5)
t_6 = self.l_46(t_5)
return list(flatten((x0, x1, x2, t_1, t_5, t_3, t_2, t_6)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
class TestCabs(object):
def setup(self):
self.olderr = np.seterr(invalid='ignore')
def teardown(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([(1 + 1j), (0 + 2j), (1 + 2j), np.inf, np.nan])
y_r = np.array([np.sqrt(2.0), 2, np.sqrt(5), np.inf, np.nan])
y = np.abs(x)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
def test_fabs(self):
x = np.array([(1 + 0j)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(1, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.inf, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.nan, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
(x, y) = ([], [])
x.append(np.nan)
y.append(np.nan)
check_real_value(np.abs, np.nan, np.nan, np.nan)
x.append(np.nan)
y.append((- np.nan))
check_real_value(np.abs, (- np.nan), np.nan, np.nan)
x.append(np.inf)
y.append(np.nan)
check_real_value(np.abs, np.inf, np.nan, np.inf)
x.append((- np.inf))
y.append(np.nan)
check_real_value(np.abs, (- np.inf), np.nan, np.inf)
def f(a):
return np.abs(np.conj(a))
def g(a, b):
return np.abs(complex(a, b))
xa = np.array(x, dtype=complex)
for i in range(len(xa)):
ref = g(x[i], y[i])
check_real_value(f, x[i], y[i], ref) |
class ResNet(nn.Module):
def __init__(self, arch, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(ResNet, self).__init__()
self.name = arch
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.dimout0 = self.bn1.weight.size()[0]
if isinstance(self.layer4[(- 1)], Bottleneck):
self.dimout1 = self.layer1[(- 1)].bn3.weight.size()[0]
self.dimout2 = self.layer2[(- 1)].bn3.weight.size()[0]
self.dimout3 = self.layer3[(- 1)].bn3.weight.size()[0]
self.dimout4 = self.layer4[(- 1)].bn3.weight.size()[0]
elif isinstance(self.layer4[(- 1)], BasicBlock):
self.dimout1 = self.layer1[(- 1)].bn2.weight.size()[0]
self.dimout2 = self.layer2[(- 1)].bn2.weight.size()[0]
self.dimout3 = self.layer3[(- 1)].bn2.weight.size()[0]
self.dimout4 = self.layer4[(- 1)].bn2.weight.size()[0]
else:
raise ValueError('Unsupported class of block {}.'.format(type(self.layer4[(- 1)])))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def get_nbr_params(self):
return sum([p.numel() for p in self.parameters()])
def __str__(self):
return '{}: RESNET.'.format(self.name) |
class SSIM():
def __init__(self):
self.win_size = None
self.gradient = False
self.data_range = 255
self.multichannel = True
self.gaussian_weights = False
self.full = False
def forward(self, img1, img2):
(img1, img2) = (img1.copy(), img2.copy())
return structural_similarity(img1, img2, win_size=self.win_size, gradient=self.gradient, data_range=self.data_range, multichannel=self.multichannel, gaussian_weights=self.gaussian_weights, full=self.full) |
def group_chains(chain_list):
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
ii = 0
while (ii < len(chain_list)):
c1 = sorted(chain_list[ii])
is0 = (c1[0] in chain)
is1 = (c1[1] in chain)
if (is0 and is1):
chain_list.pop(ii)
elif (is0 or is1):
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
chains.append(list(chain))
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
return chains |
_utils.test(print_preprocessed_ir=True)
def test_func():
def bar(x):
return ((x * x), (- x))
a = ti.field(ti.i32, shape=(10,))
b = ti.field(ti.i32, shape=(10,))
def foo():
for i in a:
(a[i], b[i]) = bar(i)
foo()
for i in range(10):
assert (a[i] == (i * i))
assert (b[i] == (- i)) |
class GeniaProcessor(QueryNERProcessor):
def get_labels(self):
return ['cell_line', 'cell_type', 'DNA', 'RNA', 'protein', 'O'] |
def tile(imgs, rows=None, cols=None):
if ((rows is None) and (cols is None)):
rows = int(math.sqrt(len(imgs)))
if (rows is None):
rows = (((len(imgs) + cols) - 1) // cols)
else:
cols = (((len(imgs) + rows) - 1) // rows)
diff = ((rows * cols) - len(imgs))
if (diff != 0):
imgs.extend([np.zeros(imgs[0].shape, dtype=imgs[0].dtype) for _ in range(diff)])
return np.vstack([np.hstack(imgs[(i * cols):((i + 1) * cols)]) for i in range(rows)]) |
.parametrize('dtype, storage_format', [(ti.f32, 'col_major'), (ti.f32, 'row_major'), (ti.f64, 'col_major'), (ti.f64, 'row_major')])
_utils.test(arch=ti.cpu)
def test_sparse_matrix_subtraction(dtype, storage_format):
n = 8
Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100, dtype=dtype, storage_format=storage_format)
Bbuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100, dtype=dtype, storage_format=storage_format)
def fill(Abuilder: ti.types.sparse_matrix_builder(), Bbuilder: ti.types.sparse_matrix_builder()):
for (i, j) in ti.ndrange(n, n):
Abuilder[(i, j)] += (i + j)
Bbuilder[(i, j)] += (i - j)
fill(Abuilder, Bbuilder)
A = Abuilder.build()
B = Bbuilder.build()
C = (A - B)
for i in range(n):
for j in range(n):
assert (C[(i, j)] == (2 * j)) |
class Indexer(object):
def __init__(self, index_dir):
print('lucene:', lucene.VERSION)
self.index_dir = index_dir
store = SimpleFSDirectory(Paths.get(self.index_dir))
analyzer = LimitTokenCountAnalyzer(StandardAnalyzer(), 1048576)
config = IndexWriterConfig(analyzer)
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE)
self.writer = IndexWriter(store, config)
def build_index(self, dict_data):
print('loading data...')
t1 = FieldType()
t1.setStored(True)
t1.setTokenized(False)
t1.setIndexOptions(IndexOptions.DOCS_AND_FREQS)
t2 = FieldType()
t2.setStored(True)
t2.setTokenized(True)
t2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
for (k, v) in dict_data.items():
doc = Document()
doc.add(Field('id', k, t1))
doc.add(Field('content', v, t2))
self.writer.addDocument(doc)
ticker = Ticker()
print('commit index')
threading.Thread(target=ticker.run).start()
self.writer.commit()
self.writer.close()
ticker.tick = False
print('done') |
def draw_rtt_1M(results):
SIZE = (2 ** 20)
results = results[(results[COLUMNS[1]] == SIZE)]
(Latency, STD) = (results[COLUMNS[2]], results[COLUMNS[3]])
plt.figure(figsize=(4, 4))
ind = range(5)
width = 0.8
plt.bar(ind, (Latency * 1000), width, label='usr', color=COLORS, linewidth=10)
plt.errorbar(ind[1:], (Latency[1:] * 1000), yerr=(STD[1:] * 1000), linewidth=0, elinewidth=1.5, color='#444444', capthick=1.5, capsize=6)
plt.xticks(ind, LABELS, fontsize=18)
for label in plt.gca().get_xmajorticklabels():
label.set_rotation(30)
label.set_horizontalalignment('right')
plt.yticks(fontsize=18)
plt.ylabel('RTT (ms)', fontsize=18)
plt.savefig('RTT1M.pdf', bbox_inches='tight') |
def test_sort_events(tmp_path: pathlib.Path) -> None:
events = create_events(tmp_path)
sorted_events = events.sort(os.path.join(tmp_path, 'sorted_events'), num_threads=2)
with sorted_events.reader() as reader:
all_sorted_events = list(reader)
assert (sorted(all_sorted_events) == sorted(all_events))
for reader_func in sorted_events.sharded_readers():
with reader_func() as reader:
s_events = list(reader)
assert (sorted(s_events, key=(lambda a: (a[0], a[1].start))) == s_events) |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
best_filename = (('model_best_' + str(save_name)) + '.pth.tar')
shutil.copyfile(filename, best_filename) |
class NotSupportedError(Exception):
def __init__(self, message):
super().__init__(message) |
def get_charge(molecule):
logger.debug('Entering get_charge()')
return Chem.rdmolops.GetFormalCharge(molecule) |
class OperatorFPExceptionsTest(TestCase):
def test_fp_exception_divbyzero(self):
workspace.blobs['0'] = np.array([0.0], dtype=np.float32)
workspace.blobs['1'] = np.array([1.0], dtype=np.float32)
net = core.Net('test_fp')
net.Div(['1', '0'], 'out')
for throw_if_fp_exceptions in (True, False):
setThrowIfFpExceptions(throw_if_fp_exceptions)
exception_raised = False
try:
workspace.RunNetOnce(net)
except Exception as e:
exception_raised = True
self.assertEquals(exception_raised, throw_if_fp_exceptions) |
def subsample_dataset(dataset, idxs, absolute=True):
mask = np.zeros(len(dataset)).astype('bool')
if (absolute == True):
mask[idxs] = True
else:
idxs = set(idxs)
mask = np.array([(i in idxs) for i in dataset.uq_idxs])
dataset.samples = [s for (m, s) in zip(mask, dataset.samples) if (m == True)]
dataset.targets = [t for (m, t) in zip(mask, dataset.targets) if (m == True)]
dataset.uq_idxs = dataset.uq_idxs[mask]
return dataset |
class ComboMultiStepLR():
def __init__(self, optimizers, base_lr, **kwargs):
self.schedulers = dict()
for (name, opt) in optimizers.items():
self.schedulers[name] = WarmupMultiStepLR(opt, lr=base_lr, **kwargs)
self.last_epoch = 0
def set_batch_size(self, batch_size, lod):
for x in self.schedulers.values():
x.set_batch_size(batch_size, lod)
def step(self, epoch=None):
for x in self.schedulers.values():
x.step()
if (epoch is None):
epoch = (self.last_epoch + 1)
self.last_epoch = epoch
def state_dict(self):
return {key: value.state_dict() for (key, value) in self.schedulers.items()}
def load_state_dict(self, state_dict):
for (k, x) in self.schedulers.items():
x.load_state_dict(state_dict[k])
last_epochs = [x.last_epoch for (k, x) in self.schedulers.items()]
assert np.all((np.asarray(last_epochs) == last_epochs[0]))
self.last_epoch = last_epochs[0]
def start_epoch(self):
return self.last_epoch |
def get_user_input():
model_types = list(auto_module.configuration_auto.MODEL_NAMES_MAPPING.keys())
valid_model_type = False
while (not valid_model_type):
old_model_type = input('What is the model you would like to duplicate? Please provide the lowercase `model_type` (e.g. roberta): ')
if (old_model_type in model_types):
valid_model_type = True
else:
print(f'{old_model_type} is not a valid model type.')
near_choices = difflib.get_close_matches(old_model_type, model_types)
if (len(near_choices) >= 1):
if (len(near_choices) > 1):
near_choices = ' or '.join(near_choices)
print(f'Did you mean {near_choices}?')
old_model_info = retrieve_info_for_model(old_model_type)
old_tokenizer_class = old_model_info['model_patterns'].tokenizer_class
old_image_processor_class = old_model_info['model_patterns'].image_processor_class
old_feature_extractor_class = old_model_info['model_patterns'].feature_extractor_class
old_processor_class = old_model_info['model_patterns'].processor_class
old_frameworks = old_model_info['frameworks']
old_checkpoint = None
if (len(old_model_info['model_patterns'].checkpoint) == 0):
old_checkpoint = get_user_field("We couldn't find the name of the base checkpoint for that model, please enter it here.")
model_name = get_user_field('What is the name (with no special casing) for your new model in the paper (e.g. RoBERTa)? ')
default_patterns = ModelPatterns(model_name, model_name)
model_type = get_user_field('What identifier would you like to use for the `model_type` of this model? ', default_value=default_patterns.model_type)
model_lower_cased = get_user_field('What lowercase name would you like to use for the module (folder) of this model? ', default_value=default_patterns.model_lower_cased)
model_camel_cased = get_user_field('What prefix (camel-cased) would you like to use for the model classes of this model (e.g. Roberta)? ', default_value=default_patterns.model_camel_cased)
model_upper_cased = get_user_field('What prefix (upper-cased) would you like to use for the constants relative to this model? ', default_value=default_patterns.model_upper_cased)
config_class = get_user_field('What will be the name of the config class for this model? ', default_value=f'{model_camel_cased}Config')
checkpoint = get_user_field('Please give a checkpoint identifier (on the model Hub) for this new model (e.g. facebook/roberta-base): ')
old_processing_classes = [c for c in [old_image_processor_class, old_feature_extractor_class, old_tokenizer_class, old_processor_class] if (c is not None)]
old_processing_classes = ', '.join(old_processing_classes)
keep_processing = get_user_field(f'Will your new model use the same processing class as {old_model_type} ({old_processing_classes}) (yes/no)? ', convert_to=convert_to_bool, fallback_message='Please answer yes/no, y/n, true/false or 1/0. ')
if keep_processing:
image_processor_class = old_image_processor_class
feature_extractor_class = old_feature_extractor_class
processor_class = old_processor_class
tokenizer_class = old_tokenizer_class
else:
if (old_tokenizer_class is not None):
tokenizer_class = get_user_field('What will be the name of the tokenizer class for this model? ', default_value=f'{model_camel_cased}Tokenizer')
else:
tokenizer_class = None
if (old_image_processor_class is not None):
image_processor_class = get_user_field('What will be the name of the image processor class for this model? ', default_value=f'{model_camel_cased}ImageProcessor')
else:
image_processor_class = None
if (old_feature_extractor_class is not None):
feature_extractor_class = get_user_field('What will be the name of the feature extractor class for this model? ', default_value=f'{model_camel_cased}FeatureExtractor')
else:
feature_extractor_class = None
if (old_processor_class is not None):
processor_class = get_user_field('What will be the name of the processor class for this model? ', default_value=f'{model_camel_cased}Processor')
else:
processor_class = None
model_patterns = ModelPatterns(model_name, checkpoint, model_type=model_type, model_lower_cased=model_lower_cased, model_camel_cased=model_camel_cased, model_upper_cased=model_upper_cased, config_class=config_class, tokenizer_class=tokenizer_class, image_processor_class=image_processor_class, feature_extractor_class=feature_extractor_class, processor_class=processor_class)
add_copied_from = get_user_field('Should we add # Copied from statements when creating the new modeling file (yes/no)? ', convert_to=convert_to_bool, default_value='yes', fallback_message='Please answer yes/no, y/n, true/false or 1/0.')
all_frameworks = get_user_field(f'Should we add a version of your new model in all the frameworks implemented by {old_model_type} ({old_frameworks}) (yes/no)? ', convert_to=convert_to_bool, default_value='yes', fallback_message='Please answer yes/no, y/n, true/false or 1/0.')
if all_frameworks:
frameworks = None
else:
frameworks = get_user_field('Please enter the list of framworks you want (pt, tf, flax) separated by spaces', is_valid_answer=(lambda x: all(((p in ['pt', 'tf', 'flax']) for p in x.split(' ')))))
frameworks = list(set(frameworks.split(' ')))
return (old_model_type, model_patterns, add_copied_from, frameworks, old_checkpoint) |
('nlu_engine')
class SnipsNLUEngine(ProcessingUnit):
config_type = NLUEngineConfig
def __init__(self, config=None, **shared):
super(SnipsNLUEngine, self).__init__(config, **shared)
self.intent_parsers = []
self.dataset_metadata = None
def default_config(cls):
return None
def fitted(self):
return (self.dataset_metadata is not None)
_elapsed_time(logger, logging.INFO, 'Fitted NLU engine in {elapsed_time}')
def fit(self, dataset, force_retrain=True):
dataset = validate_and_format_dataset(dataset)
if (self.config is None):
language = dataset[LANGUAGE]
default_config = DEFAULT_CONFIGS.get(language)
if (default_config is not None):
self.config = self.config_type.from_dict(default_config)
else:
self.config = self.config_type()
self.load_resources_if_needed(dataset[LANGUAGE])
self.fit_builtin_entity_parser_if_needed(dataset)
self.fit_custom_entity_parser_if_needed(dataset)
parsers = []
for parser_config in self.config.intent_parsers_configs:
recycled_parser = None
for parser in self.intent_parsers:
if (parser.unit_name == parser_config.unit_name):
recycled_parser = parser
break
if (recycled_parser is None):
recycled_parser = IntentParser.from_config(parser_config, builtin_entity_parser=self.builtin_entity_parser, custom_entity_parser=self.custom_entity_parser, resources=self.resources, random_state=self.random_state)
if (force_retrain or (not recycled_parser.fitted)):
recycled_parser.fit(dataset, force_retrain)
parsers.append(recycled_parser)
self.intent_parsers = parsers
self.dataset_metadata = _get_dataset_metadata(dataset)
return self
_elapsed_time(logger, logging.DEBUG, 'Parsed input in {elapsed_time}')
_required
def parse(self, text, intents=None, top_n=None):
if (not isinstance(text, str)):
raise InvalidInputError(('Expected unicode but received: %s' % type(text)))
if isinstance(intents, str):
intents = {intents}
elif isinstance(intents, list):
intents = set(intents)
if (intents is not None):
for intent in intents:
if (intent not in self.dataset_metadata['slot_name_mappings']):
raise IntentNotFoundError(intent)
if (top_n is None):
none_proba = 0.0
for parser in self.intent_parsers:
res = parser.parse(text, intents)
if is_empty(res):
none_proba = res[RES_INTENT][RES_PROBA]
continue
resolved_slots = self._resolve_slots(text, res[RES_SLOTS])
return parsing_result(text, intent=res[RES_INTENT], slots=resolved_slots)
return empty_result(text, none_proba)
intents_results = self.get_intents(text)
if (intents is not None):
intents_results = [res for res in intents_results if ((res[RES_INTENT_NAME] is None) or (res[RES_INTENT_NAME] in intents))]
intents_results = intents_results[:top_n]
results = []
for intent_res in intents_results:
slots = self.get_slots(text, intent_res[RES_INTENT_NAME])
results.append(extraction_result(intent_res, slots))
return results
_elapsed_time(logger, logging.DEBUG, 'Got intents in {elapsed_time}')
_required
def get_intents(self, text):
results = None
for parser in self.intent_parsers:
parser_results = parser.get_intents(text)
if (results is None):
results = {res[RES_INTENT_NAME]: res for res in parser_results}
continue
for res in parser_results:
intent = res[RES_INTENT_NAME]
proba = max(res[RES_PROBA], results[intent][RES_PROBA])
results[intent][RES_PROBA] = proba
return sorted(itervalues(results), key=(lambda res: (- res[RES_PROBA])))
_elapsed_time(logger, logging.DEBUG, 'Parsed slots in {elapsed_time}')
_required
def get_slots(self, text, intent):
if (not isinstance(text, str)):
raise InvalidInputError(('Expected unicode but received: %s' % type(text)))
if (intent is None):
return []
if (intent not in self.dataset_metadata['slot_name_mappings']):
raise IntentNotFoundError(intent)
for parser in self.intent_parsers:
slots = parser.get_slots(text, intent)
if (not slots):
continue
return self._resolve_slots(text, slots)
return []
_persisted_path
def persist(self, path):
path.mkdir()
parsers_count = defaultdict(int)
intent_parsers = []
for parser in self.intent_parsers:
parser_name = parser.unit_name
parsers_count[parser_name] += 1
count = parsers_count[parser_name]
if (count > 1):
parser_name = '{n}_{c}'.format(n=parser_name, c=count)
parser_path = (path / parser_name)
parser.persist(parser_path)
intent_parsers.append(parser_name)
config = None
if (self.config is not None):
config = self.config.to_dict()
builtin_entity_parser = None
if (self.builtin_entity_parser is not None):
builtin_entity_parser = 'builtin_entity_parser'
builtin_entity_parser_path = (path / builtin_entity_parser)
self.builtin_entity_parser.persist(builtin_entity_parser_path)
custom_entity_parser = None
if (self.custom_entity_parser is not None):
custom_entity_parser = 'custom_entity_parser'
custom_entity_parser_path = (path / custom_entity_parser)
self.custom_entity_parser.persist(custom_entity_parser_path)
model = {'unit_name': self.unit_name, 'dataset_metadata': self.dataset_metadata, 'intent_parsers': intent_parsers, 'custom_entity_parser': custom_entity_parser, 'builtin_entity_parser': builtin_entity_parser, 'config': config, 'model_version': __model_version__, 'training_package_version': __version__}
model_json = json_string(model)
model_path = (path / 'nlu_engine.json')
with model_path.open(mode='w', encoding='utf8') as f:
f.write(model_json)
if self.fitted:
required_resources = self.config.get_required_resources()
language = self.dataset_metadata['language_code']
resources_path = (path / 'resources')
resources_path.mkdir()
persist_resources(self.resources, (resources_path / language), required_resources)
def from_path(cls, path, **shared):
directory_path = Path(path)
model_path = (directory_path / 'nlu_engine.json')
if (not model_path.exists()):
raise LoadingError(('Missing nlu engine model file: %s' % model_path.name))
with model_path.open(encoding='utf8') as f:
model = json.load(f)
model_version = model.get('model_version')
if ((model_version is None) or (model_version != __model_version__)):
bypass_version_check = shared.get(BYPASS_VERSION_CHECK, False)
if bypass_version_check:
logger.warning("Incompatible model version found. The library expected '%s' but the loaded engine is '%s'. The NLU engine may not load correctly.", __model_version__, model_version)
else:
raise IncompatibleModelError(model_version)
dataset_metadata = model['dataset_metadata']
if ((shared.get(RESOURCES) is None) and (dataset_metadata is not None)):
language = dataset_metadata['language_code']
resources_dir = ((directory_path / 'resources') / language)
if resources_dir.is_dir():
resources = load_resources_from_dir(resources_dir)
shared[RESOURCES] = resources
if (shared.get(BUILTIN_ENTITY_PARSER) is None):
path = model['builtin_entity_parser']
if (path is not None):
parser_path = (directory_path / path)
shared[BUILTIN_ENTITY_PARSER] = BuiltinEntityParser.from_path(parser_path)
if (shared.get(CUSTOM_ENTITY_PARSER) is None):
path = model['custom_entity_parser']
if (path is not None):
parser_path = (directory_path / path)
shared[CUSTOM_ENTITY_PARSER] = CustomEntityParser.from_path(parser_path)
config = cls.config_type.from_dict(model['config'])
nlu_engine = cls(config=config, **shared)
nlu_engine.dataset_metadata = dataset_metadata
intent_parsers = []
for (parser_idx, parser_name) in enumerate(model['intent_parsers']):
parser_config = config.intent_parsers_configs[parser_idx]
intent_parser_path = (directory_path / parser_name)
intent_parser = IntentParser.load_from_path(intent_parser_path, parser_config.unit_name, **shared)
intent_parsers.append(intent_parser)
nlu_engine.intent_parsers = intent_parsers
return nlu_engine
def _resolve_slots(self, text, slots):
builtin_scope = [slot[RES_ENTITY] for slot in slots if is_builtin_entity(slot[RES_ENTITY])]
custom_scope = [slot[RES_ENTITY] for slot in slots if (not is_builtin_entity(slot[RES_ENTITY]))]
builtin_entities = self.builtin_entity_parser.parse(text, builtin_scope, use_cache=False)
custom_entities = self.custom_entity_parser.parse(text, custom_scope, use_cache=True)
resolved_slots = []
for slot in slots:
entity_name = slot[RES_ENTITY]
raw_value = slot[RES_VALUE]
is_builtin = is_builtin_entity(entity_name)
if is_builtin:
entities = builtin_entities
parser = self.builtin_entity_parser
slot_builder = builtin_slot
use_cache = False
extensible = False
else:
entities = custom_entities
parser = self.custom_entity_parser
slot_builder = custom_slot
use_cache = True
extensible = self.dataset_metadata[ENTITIES][entity_name][AUTOMATICALLY_EXTENSIBLE]
resolved_slot = None
for ent in entities:
if ((ent[ENTITY_KIND] == entity_name) and (ent[RES_MATCH_RANGE] == slot[RES_MATCH_RANGE])):
resolved_slot = slot_builder(slot, ent[RESOLVED_VALUE])
break
if (resolved_slot is None):
matches = parser.parse(raw_value, scope=[entity_name], use_cache=use_cache)
if matches:
match = matches[0]
if (is_builtin or (len(match[RES_VALUE]) == len(raw_value))):
resolved_slot = slot_builder(slot, match[RESOLVED_VALUE])
if ((resolved_slot is None) and extensible):
resolved_slot = slot_builder(slot)
if (resolved_slot is not None):
resolved_slots.append(resolved_slot)
return resolved_slots |
class CityscapesLabelTool(QtWidgets.QMainWindow):
def __init__(self):
super(CityscapesLabelTool, self).__init__()
configDir = os.path.dirname(__file__)
self.configFile = os.path.join(configDir, 'cityscapesLabelTool.conf')
self.config = configuration()
self.config.load(self.configFile)
self.w = 0
self.h = 0
self.xoff = 0
self.yoff = 0
self.bordergap = 20
self.scale = 1.0
self.images = []
self.imageExt = '_leftImg8bit.png'
self.gtExt = '{}_polygons.json'
self.image = QtGui.QImage()
self.idx = 0
self.annotation = None
self.correctionXML = None
self.changes = []
self.mouseObj = (- 1)
self.selObjs = []
self.highlightObjs = []
self.highlightObjLabel = None
self.highlightTexture = None
self.mousePos = None
self.mousePosOrig = None
self.mousePosScaled = None
self.mouseOutsideImage = True
self.mousePosOnZoom = None
self.mouseButtons = 0
self.changedLayer = []
self.changedPolygon = []
self.drawPoly = QtGui.QPolygonF()
self.drawPolyClosed = False
self.draggedPt = (- 1)
self.actImage = []
self.actImageNotFirst = []
self.actImageNotLast = []
self.actChanges = []
self.actPolyOrSelObj = []
self.actClosedPoly = []
self.actSelObj = []
self.singleActSelObj = []
self.screenshotToggleState = False
self.playState = False
self.transpTempZero = False
self.correctAction = []
self.corrections = []
self.selected_correction = (- 1)
self.in_progress_bbox = None
self.in_progress_correction = None
self.mousePressEvent = []
self.defaultLabel = 'static'
if (not (self.defaultLabel in name2label)):
print('The {0} label is missing in the internal label definitions.'.format(self.defaultLabel))
return
self.lastLabel = self.defaultLabel
self.initUI()
self.deselectAllObjects()
self.clearPolygon()
self.clearChanges()
self.loadCity()
self.imageChanged()
def __del__(self):
self.config.save(self.configFile)
def initUI(self):
self.toolbar = self.addToolBar('Tools')
iconDir = os.path.join(os.path.dirname(__file__), 'icons')
loadAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'open.png')), '&Tools', self)
loadAction.setShortcuts(['o'])
self.setTip(loadAction, 'Open city')
loadAction.triggered.connect(self.selectCity)
self.toolbar.addAction(loadAction)
backAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'back.png')), '&Tools', self)
backAction.setShortcut('left')
backAction.setStatusTip('Previous image')
backAction.triggered.connect(self.prevImage)
self.toolbar.addAction(backAction)
self.actImageNotFirst.append(backAction)
nextAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'next.png')), '&Tools', self)
nextAction.setShortcut('right')
self.setTip(nextAction, 'Next image')
nextAction.triggered.connect(self.nextImage)
self.toolbar.addAction(nextAction)
self.actImageNotLast.append(nextAction)
playAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'play.png')), '&Tools', self)
playAction.setShortcut(' ')
playAction.setCheckable(True)
playAction.setChecked(False)
self.setTip(playAction, 'Play all images')
playAction.triggered.connect(self.playImages)
self.toolbar.addAction(playAction)
self.actImageNotLast.append(playAction)
self.playAction = playAction
selImageAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'shuffle.png')), '&Tools', self)
selImageAction.setShortcut('i')
self.setTip(selImageAction, 'Select image')
selImageAction.triggered.connect(self.selectImage)
self.toolbar.addAction(selImageAction)
self.actImage.append(selImageAction)
saveAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'save.png')), '&Tools', self)
saveAction.setShortcut('s')
self.setTip(saveAction, 'Save changes')
saveAction.triggered.connect(self.save)
self.toolbar.addAction(saveAction)
self.actChanges.append(saveAction)
clearPolAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'clearpolygon.png')), '&Tools', self)
clearPolAction.setShortcuts(['q', 'Esc'])
self.setTip(clearPolAction, 'Clear polygon')
clearPolAction.triggered.connect(self.clearPolygonAction)
self.toolbar.addAction(clearPolAction)
self.actPolyOrSelObj.append(clearPolAction)
newObjAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'newobject.png')), '&Tools', self)
newObjAction.setShortcuts(['n'])
self.setTip(newObjAction, 'New object')
newObjAction.triggered.connect(self.newObject)
self.toolbar.addAction(newObjAction)
self.actClosedPoly.append(newObjAction)
deleteObjectAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'deleteobject.png')), '&Tools', self)
deleteObjectAction.setShortcuts(['d', 'delete'])
self.setTip(deleteObjectAction, 'Delete object')
deleteObjectAction.triggered.connect(self.deleteObject)
self.toolbar.addAction(deleteObjectAction)
self.actSelObj.append(deleteObjectAction)
undoAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'undo.png')), '&Tools', self)
undoAction.setShortcut('u')
self.setTip(undoAction, 'Undo all unsaved changes')
undoAction.triggered.connect(self.undo)
self.toolbar.addAction(undoAction)
self.actChanges.append(undoAction)
labelAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'modify.png')), '&Tools', self)
labelAction.setShortcuts(['m', 'l'])
self.setTip(labelAction, 'Modify label')
labelAction.triggered.connect(self.modifyLabel)
self.toolbar.addAction(labelAction)
self.actSelObj.append(labelAction)
layerUpAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'layerup.png')), '&Tools', self)
layerUpAction.setShortcuts(['Up'])
self.setTip(layerUpAction, 'Move object a layer up')
layerUpAction.triggered.connect(self.layerUp)
self.toolbar.addAction(layerUpAction)
self.singleActSelObj.append(layerUpAction)
layerDownAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'layerdown.png')), '&Tools', self)
layerDownAction.setShortcuts(['Down'])
self.setTip(layerDownAction, 'Move object a layer down')
layerDownAction.triggered.connect(self.layerDown)
self.toolbar.addAction(layerDownAction)
self.singleActSelObj.append(layerDownAction)
zoomAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'zoom.png')), '&Tools', self)
zoomAction.setShortcuts(['z'])
zoomAction.setCheckable(True)
zoomAction.setChecked(self.config.zoom)
self.setTip(zoomAction, 'Enable/disable permanent zoom')
zoomAction.toggled.connect(self.zoomToggle)
self.toolbar.addAction(zoomAction)
self.actImage.append(zoomAction)
highlightAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'highlight.png')), '&Tools', self)
highlightAction.setShortcuts(['g'])
highlightAction.setCheckable(True)
highlightAction.setChecked(self.config.highlight)
self.setTip(highlightAction, 'Enable/disable highlight of certain object class')
highlightAction.toggled.connect(self.highlightClassToggle)
self.toolbar.addAction(highlightAction)
self.actImage.append(highlightAction)
minusAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'minus.png')), '&Tools', self)
minusAction.setShortcut('-')
self.setTip(minusAction, 'Decrease transparency')
minusAction.triggered.connect(self.minus)
self.toolbar.addAction(minusAction)
plusAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'plus.png')), '&Tools', self)
plusAction.setShortcut('+')
self.setTip(plusAction, 'Increase transparency')
plusAction.triggered.connect(self.plus)
self.toolbar.addAction(plusAction)
screenshotAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'screenshot.png')), '&Tools', self)
screenshotAction.setShortcut('t')
self.setTip(screenshotAction, 'Take a screenshot')
screenshotAction.triggered.connect(self.screenshot)
self.toolbar.addAction(screenshotAction)
self.actImage.append(screenshotAction)
screenshotToggleAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'screenshotToggle.png')), '&Tools', self)
screenshotToggleAction.setShortcut('Ctrl+t')
screenshotToggleAction.setCheckable(True)
screenshotToggleAction.setChecked(False)
self.setTip(screenshotToggleAction, 'Take a screenshot in each loaded frame')
screenshotToggleAction.toggled.connect(self.screenshotToggle)
self.toolbar.addAction(screenshotToggleAction)
self.actImage.append(screenshotToggleAction)
displayFilepathAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'filepath.png')), '&Tools', self)
displayFilepathAction.setShortcut('f')
self.setTip(displayFilepathAction, 'Show path to current image')
displayFilepathAction.triggered.connect(self.displayFilepath)
self.toolbar.addAction(displayFilepathAction)
self.correctAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'checked6.png')), '&Tools', self)
self.correctAction.setShortcut('c')
self.correctAction.setCheckable(True)
self.correctAction.setChecked(self.config.correctionMode)
if self.config.correctionMode:
self.correctAction.setIcon(QtGui.QIcon(os.path.join(iconDir, 'checked6_red.png')))
self.setTip(self.correctAction, 'Toggle correction mode')
self.correctAction.triggered.connect(self.toggleCorrectionMode)
self.toolbar.addAction(self.correctAction)
helpAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'help19.png')), '&Tools', self)
helpAction.setShortcut('h')
self.setTip(helpAction, 'Help')
helpAction.triggered.connect(self.displayHelpMessage)
self.toolbar.addAction(helpAction)
exitAction = QtWidgets.QAction(QtGui.QIcon(os.path.join(iconDir, 'exit.png')), '&Tools', self)
self.setTip(exitAction, 'Exit')
exitAction.triggered.connect(self.close)
self.toolbar.addAction(exitAction)
self.defaultStatusbar = 'Ready'
self.statusBar().showMessage(self.defaultStatusbar)
self.setMouseTracking(True)
self.toolbar.setMouseTracking(True)
screenShape = QtWidgets.QDesktopWidget().screenGeometry()
self.resize(screenShape.width(), screenShape.height())
self.applicationTitle = 'Cityscapes Label Tool v{}'.format(VERSION)
self.setWindowTitle(self.applicationTitle)
self.show()
def selectCity(self):
restoreMessage = self.statusBar().currentMessage()
csPath = self.config.csPath
if ((not csPath) or (not os.path.isdir(csPath))):
if ('CITYSCAPES_DATASET' in os.environ):
csPath = os.environ['CITYSCAPES_DATASET']
else:
csPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
availableCities = []
annotations = sorted(glob.glob(os.path.join(csPath, 'gt*')))
annotations = [os.path.basename(a) for a in annotations]
splits = ['train_extra', 'train', 'val', 'test']
for gt in annotations:
for split in splits:
cities = glob.glob(os.path.join(csPath, gt, split, '*'))
cities.sort()
availableCities.extend([(split, gt, os.path.basename(c)) for c in cities if os.path.isdir(c)])
items = [((((split + ', ') + gt) + ', ') + city) for (split, gt, city) in availableCities]
previousItem = ((((self.config.split + ', ') + self.config.gtType) + ', ') + self.config.cityName)
default = 0
if (previousItem in items):
default = items.index(previousItem)
dlgTitle = 'Select city'
message = dlgTitle
question = dlgTitle
message = 'Select city for editing'
question = 'Which city would you like to edit?'
self.statusBar().showMessage(message)
if items:
(item, ok) = QtWidgets.QInputDialog.getItem(self, dlgTitle, question, items, default, False)
self.statusBar().showMessage(restoreMessage)
if (ok and item):
(split, gt, city) = [str(i) for i in item.split(', ')]
self.config.city = os.path.normpath(os.path.join(csPath, 'leftImg8bit', split, city))
self.config.cityName = city
self.config.labelPath = os.path.normpath(os.path.join(csPath, gt, split, city))
self.config.correctionPath = os.path.normpath(os.path.join(csPath, (gt + '_corrections'), split, city))
self.config.gtType = gt
self.config.split = split
self.deselectAllObjects()
self.clearPolygon()
self.loadCity()
self.imageChanged()
else:
warning = ''
warning += 'The data was not found. Please:\n\n'
warning += ' - make sure the scripts folder is in the Cityscapes root folder\n'
warning += 'or\n'
warning += ' - set CITYSCAPES_DATASET to the Cityscapes root folder\n'
warning += " e.g. 'export CITYSCAPES_DATASET=<root_path>'\n"
reply = QtWidgets.QMessageBox.information(self, 'ERROR!', warning, QtWidgets.QMessageBox.Ok)
if (reply == QtWidgets.QMessageBox.Ok):
sys.exit()
return
def prevImage(self):
if (not self.images):
return
if (self.idx > 0):
if self.checkAndSave():
self.idx -= 1
self.imageChanged()
return
def nextImage(self):
if (not self.images):
return
if (self.idx < (len(self.images) - 1)):
if self.checkAndSave():
self.idx += 1
self.imageChanged()
elif self.playState:
self.playState = False
self.playAction.setChecked(False)
if self.playState:
QtCore.QTimer.singleShot(0, self.nextImage)
return
def playImages(self, status):
self.playState = status
if self.playState:
QtCore.QTimer.singleShot(0, self.nextImage)
def toggleCorrectionMode(self):
if (not self.config.correctionMode):
self.config.correctionMode = True
iconDir = os.path.join(os.path.dirname(sys.argv[0]), 'icons')
self.correctAction.setIcon(QtGui.QIcon(os.path.join(iconDir, 'checked6_red.png')))
else:
self.config.correctionMode = False
iconDir = os.path.join(os.path.dirname(sys.argv[0]), 'icons')
self.correctAction.setIcon(QtGui.QIcon(os.path.join(iconDir, 'checked6.png')))
self.update()
return
def selectImage(self):
if (not self.images):
return
dlgTitle = 'Select image to load'
self.statusBar().showMessage(dlgTitle)
items = ['{}: {}'.format(num, os.path.basename(i)) for (num, i) in enumerate(self.images)]
(item, ok) = QtWidgets.QInputDialog.getItem(self, dlgTitle, 'Image', items, self.idx, False)
if (ok and item):
idx = items.index(item)
if ((idx != self.idx) and self.checkAndSave()):
self.idx = idx
self.imageChanged()
else:
self.statusBar().showMessage(self.defaultStatusbar)
def save(self):
saved = False
message = ''
if (self.changes and (self.annotation or self.corrections) and self.config.currentFile and self.image):
if self.annotation:
self.annotation.imgWidth = self.image.width()
self.annotation.imgHeight = self.image.height()
filename = self.config.currentLabelFile
if (not filename):
filename = self.getLabelFilename(True)
if filename:
proceed = True
if (os.path.isfile(filename) and self.config.showSaveWarning):
msgBox = QtWidgets.QMessageBox(self)
msgBox.setWindowTitle('Overwriting')
msgBox.setText('Saving overwrites the original file and it cannot be reversed. Do you want to continue?')
msgBox.addButton(QtWidgets.QMessageBox.Cancel)
okAndNeverAgainButton = msgBox.addButton('OK and never ask again', QtWidgets.QMessageBox.AcceptRole)
okButton = msgBox.addButton(QtWidgets.QMessageBox.Ok)
msgBox.setDefaultButton(QtWidgets.QMessageBox.Ok)
msgBox.setIcon(QtWidgets.QMessageBox.Warning)
msgBox.exec_()
if (msgBox.clickedButton() == okButton):
pass
elif (msgBox.clickedButton() == okAndNeverAgainButton):
self.config.showSaveWarning = False
else:
message += 'Nothing saved, no harm has been done. '
proceed = False
if proceed:
try:
self.annotation.toJsonFile(filename)
saved = True
message += 'Saved labels to {0} '.format(filename)
except IOError as e:
message += 'Error writing labels to {0}. Message: {1} '.format(filename, e.strerror)
else:
message += 'Error writing labels. Cannot generate a valid filename. '
if (self.corrections or self.config.currentCorrectionFile):
filename = self.config.currentCorrectionFile
if (not filename):
filename = self.getCorrectionFilename(True)
if filename:
root = ET.Element('correction')
root.text = '\n'
root.tail = '\n'
filenameNode = ET.SubElement(root, 'filename')
filenameNode.text = os.path.basename(self.config.currentFile)
filenameNode.tail = '\n'
folderNode = ET.SubElement(root, 'folder')
folderNode.text = ('StereoDataset/' + self.config.cityName)
folderNode.tail = '\n'
sourceNode = ET.SubElement(root, 'source')
sourceNode.text = '\n'
sourceNode.tail = '\n'
sourceImageNode = ET.SubElement(sourceNode, 'sourceImage')
sourceImageNode.text = 'Label Cities'
sourceImageNode.tail = '\n'
sourceAnnotationNode = ET.SubElement(sourceNode, 'sourceAnnotation')
sourceAnnotationNode.text = 'mcLabelTool'
sourceAnnotationNode.tail = '\n'
imagesizeNode = ET.SubElement(root, 'imagesize')
imagesizeNode.text = '\n'
imagesizeNode.tail = '\n'
nrowsNode = ET.SubElement(imagesizeNode, 'nrows')
nrowsNode.text = str(self.image.height())
nrowsNode.tail = '\n'
ncolsNode = ET.SubElement(imagesizeNode, 'ncols')
ncolsNode.text = str(self.image.height())
ncolsNode.tail = '\n'
for correction in self.corrections:
correction.appendToXMLNode(root)
self.correctionXML = ET.ElementTree(root)
try:
self.correctionXML.write(filename)
saved = True
message += 'Saved corrections to {0} '.format(filename)
except IOError as e:
message += 'Error writing corrections to {0}. Message: {1} '.format(filename, e.strerror)
else:
message += 'Error writing corrections. Cannot generate a valid filename. '
if saved:
self.clearChanges()
else:
message += 'Nothing to save '
saved = True
self.statusBar().showMessage(message)
return saved
def undo(self):
if (len(self.changes) > 1):
restoreMessage = self.statusBar().currentMessage()
dlgTitle = 'Undo changes?'
self.statusBar().showMessage(dlgTitle)
text = 'Do you want to undo the following changes?\n'
for c in self.changes:
text += (('- ' + c) + '\n')
buttons = (QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
ret = QtWidgets.QMessageBox.question(self, dlgTitle, text, buttons, QtWidgets.QMessageBox.Ok)
proceed = False
if (ret == QtWidgets.QMessageBox.Ok):
proceed = True
self.statusBar().showMessage(restoreMessage)
if (not proceed):
return
self.annotation = None
self.imageChanged()
def clearPolygonAction(self):
self.deselectAllObjects()
self.clearPolygon()
self.update()
def newObject(self):
label = self.lastLabel
(label, ok) = self.getLabelFromUser(label)
if (ok and label):
self.appendObject(label, self.drawPoly)
self.deselectAllObjects()
self.clearPolygon()
self.statusBar().showMessage(self.defaultStatusbar)
self.lastLabel = label
self.update()
def deleteObject(self):
if (not self.selObjs):
return
if (not self.annotation):
return
for selObj in self.selObjs:
obj = self.annotation.objects[selObj]
obj.delete()
self.addChange('Deleted object {0} with label {1}'.format(obj.id, obj.label))
self.deselectAllObjects()
self.clearPolygon()
self.update()
def modifyLabel(self):
if (not self.annotation):
return
if (not self.selObjs):
return
obj = self.annotation.objects[self.selObjs[(- 1)]]
defaultLabel = obj.label
defaultId = (- 1)
if (len(self.selObjs) == 1):
defaultId = obj.id
(label, ok) = self.getLabelFromUser(defaultLabel, defaultId)
if (ok and label):
for selObj in self.selObjs:
obj = self.annotation.objects[selObj]
if (obj.label != label):
self.addChange('Set label {0} for object {1} with previous label {2}'.format(label, obj.id, obj.label))
obj.label = label
obj.updateDate()
self.update()
def layerUp(self):
self.modifyLayer((+ 1))
self.update()
def layerDown(self):
self.modifyLayer((- 1))
self.update()
def zoomToggle(self, status):
self.config.zoom = status
if status:
self.mousePosOnZoom = self.mousePos
self.update()
def highlightClassToggle(self, status):
if status:
defaultLabel = ''
if (self.config.highlightLabelSelection and (self.config.highlightLabelSelection in name2label)):
defaultLabel = self.config.highlightLabelSelection
(label, ok) = self.getLabelFromUser(defaultLabel)
if (ok and label):
self.config.highlightLabelSelection = label
else:
status = False
self.config.highlight = status
self.update()
def minus(self):
self.config.transp = max((self.config.transp - 0.1), 0.0)
self.update()
def displayFilepath(self):
self.statusBar().showMessage('Current image: {0}'.format(self.config.currentFile))
self.update()
def plus(self):
self.config.transp = min((self.config.transp + 0.1), 1.0)
self.update()
def screenshot(self):
dlgTitle = 'Get screenshot filename'
filter = 'Images (*.png *.xpm *.jpg)'
(answer, _) = QtWidgets.QFileDialog.getSaveFileName(self, dlgTitle, self.config.screenshotFilename, filter, options=QtWidgets.QFileDialog.DontUseNativeDialog)
if answer:
self.config.screenshotFilename = str(answer)
else:
return
self.doScreenshot()
def screenshotToggle(self, status):
self.screenshotToggleState = status
if status:
self.screenshot()
def displayHelpMessage(self):
message = (self.applicationTitle + '\n\n')
message += 'INSTRUCTIONS\n'
message += ' - press open (left button) to select a city from drop-down menu\n'
message += ' - browse images and edit labels using\n'
message += ' the toolbar buttons (check tooltips) and the controls below\n'
message += ' - note that the editing happens in-place;\n'
message += ' if you want to annotate your own images or edit a custom\n'
message += " set of labels, check (and modify) the code of the method 'loadCity'\n"
message += ' - note that this tool modifys the JSON polygon files, but\n'
message += ' does not create or update the pngs; for the latter use\n'
message += ' the preparation tools that come with this tool box.\n'
message += '\n'
message += 'CONTROLS\n'
message += ' - highlight objects [move mouse]\n'
message += ' - draw new polygon\n'
message += ' - start drawing a polygon [left click]\n'
message += ' - add point to open polygon [left click]\n'
message += ' - delete last added point [Backspace]\n'
message += ' - close polygon [left click on first point]\n'
message += ' - select closed polygon, existing object [Ctrl + left click]\n'
message += ' - move point [left click and hold on point, move mouse]\n'
message += ' - add point [click on edge]\n'
message += ' - delete point from polygon [Shift + left click on point]\n'
message += ' - deselect polygon [Q]\n'
message += ' - select multiple polygons [Ctrl + left click]\n'
message += ' - intersect/merge two polygons: draw new polygon, then\n'
message += ' - intersect [Shift + left click on existing polygon]\n'
message += ' - merge [Alt + left click on existing polygon]\n'
message += ' - open zoom window [Z or hold down right mouse button]\n'
message += ' - zoom in/out [mousewheel]\n'
message += ' - enlarge/shrink zoom window [shift+mousewheel]\n'
message += ' - start correction mode [C]\n'
message += ' - draw a correction box [left click and hold, move, release]\n'
message += ' - set box type [1,2,3,4]\n'
message += ' - previous/next box [E,R]\n'
message += ' - delete box [D]\n'
message += ' - modify text, use ascii only [M]\n'
QtWidgets.QMessageBox.about(self, 'HELP!', message)
self.update()
def closeEvent(self, event):
if self.checkAndSave():
event.accept()
else:
event.ignore()
def imageChanged(self):
self.corrections = []
self.selected_correction = (- 1)
self.deselectAllObjects()
self.clearPolygon()
self.loadImage()
self.loadLabels()
self.loadCorrections()
self.updateMouseObject()
self.update()
if self.screenshotToggleState:
self.doScreenshot()
def loadCity(self):
self.images = []
if os.path.isdir(self.config.city):
self.images = glob.glob(os.path.join(self.config.city, ('*' + self.imageExt)))
self.images.sort()
if (self.config.currentFile in self.images):
self.idx = self.images.index(self.config.currentFile)
else:
self.idx = 0
def loadImage(self):
success = False
message = self.defaultStatusbar
if self.images:
filename = self.images[self.idx]
filename = os.path.normpath(filename)
if ((not self.image.isNull()) and (filename == self.config.currentFile)):
success = True
else:
self.image = QtGui.QImage(filename)
if self.image.isNull():
message = 'Failed to read image: {0}'.format(filename)
else:
message = 'Read image: {0}'.format(filename)
self.config.currentFile = filename
success = True
for act in self.actImage:
act.setEnabled(success)
for act in self.actImageNotFirst:
act.setEnabled((success and (self.idx > 0)))
for act in self.actImageNotLast:
act.setEnabled((success and (self.idx < (len(self.images) - 1))))
self.statusBar().showMessage(message)
def loadLabels(self):
filename = self.getLabelFilename()
if ((not filename) or (not os.path.isfile(filename))):
self.clearAnnotation()
return
if (self.annotation and (filename == self.currentLabelFile)):
return
self.clearAnnotation()
try:
self.annotation = Annotation()
self.annotation.fromJsonFile(filename)
except IOError as e:
message = 'Error parsing labels in {0}. Message: {1}'.format(filename, e.strerror)
self.statusBar().showMessage(message)
self.currentLabelFile = filename
restoreMessage = self.statusBar().currentMessage()
self.statusBar().showMessage(restoreMessage)
def loadCorrections(self):
filename = self.getCorrectionFilename()
if (not filename):
self.clearCorrections()
return
if (self.correctionXML and self.corrections and (filename == self.config.currentCorrectionFile)):
return
self.clearCorrections()
if (not os.path.isfile(filename)):
return
try:
self.correctionXML = ET.parse(filename)
except IOError as e:
message = 'Error parsing corrections in {0}. Message: {1}'.format(filename, e.strerror)
self.statusBar().showMessage(message)
self.correctionXML = []
return
except ET.ParseError as e:
message = 'Error parsing corrections in {0}. Message: {1}'.format(filename, e)
self.statusBar().showMessage(message)
self.correctionXML = []
return
self.config.currentCorrectionFile = filename
restoreMessage = self.statusBar().currentMessage()
root = self.correctionXML.getroot()
for (i, objNode) in enumerate(root.findall('correction')):
obj = CorrectionBox()
obj.readFromXMLNode(objNode)
if (i == 0):
self.selected_correction = 0
obj.select()
self.corrections.append(obj)
self.statusBar().showMessage(restoreMessage)
def modify_correction_type(self, correction_type):
if (self.selected_correction >= 0):
self.corrections[self.selected_correction].type = correction_type
self.addChange('Modified correction type.')
self.update()
return
def delete_selected_annotation(self):
if ((self.selected_correction >= 0) and self.config.correctionMode):
del self.corrections[self.selected_correction]
if (self.selected_correction == len(self.corrections)):
self.selected_correction = (self.selected_correction - 1)
if (self.selected_correction >= 0):
self.corrections[self.selected_correction].select()
self.addChange('Deleted correction.')
self.update()
return
def modify_correction_description(self):
if ((self.selected_correction >= 0) and self.config.correctionMode):
description = QtWidgets.QInputDialog.getText(self, 'Modify Error Description', 'Please describe the labeling error briefly.', text=self.corrections[self.selected_correction].annotation)
if description[1]:
self.corrections[self.selected_correction].annotation = description[0]
self.addChange('Changed correction description.')
self.update()
return
def select_next_correction(self):
if (self.selected_correction >= 0):
self.corrections[self.selected_correction].unselect()
if (self.selected_correction == (len(self.corrections) - 1)):
self.selected_correction = 0
else:
self.selected_correction = (self.selected_correction + 1)
self.corrections[self.selected_correction].select()
self.update()
return
def select_previous_correction(self):
if (self.selected_correction >= 0):
self.corrections[self.selected_correction].unselect()
if (self.selected_correction == 0):
self.selected_correction = (len(self.corrections) - 1)
else:
self.selected_correction = (self.selected_correction - 1)
self.corrections[self.selected_correction].select()
self.update()
return
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
self.updateScale(qp)
self.getHighlightedObject(qp)
self.drawImage(qp)
overlay = self.drawLabels(qp)
self.drawDrawPoly(qp)
self.drawDrawRect(qp)
self.drawLabelAtMouse(qp)
self.drawZoom(qp, None)
qp.end()
QtWidgets.QMainWindow.paintEvent(self, event)
def updateScale(self, qp):
if ((not self.image.width()) or (not self.image.height())):
return
self.xoff = self.bordergap
self.yoff = (self.toolbar.height() + self.bordergap)
sx = (float((qp.device().width() - (2 * self.xoff))) / self.image.width())
sy = (float((qp.device().height() - (2 * self.yoff))) / self.image.height())
self.scale = min(sx, sy)
self.w = (self.scale * self.image.width())
self.h = (self.scale * self.image.height())
def getHighlightedObject(self, qp):
self.highlightObjs = []
self.highlightObjLabel = None
if (not self.annotation):
return
highlightObjIds = self.selObjs
if ((not highlightObjIds) and (self.drawPoly.isEmpty() or self.drawPolyClosed) and (self.mouseObj >= 0) and (not self.mouseOutsideImage)):
highlightObjIds = [self.mouseObj]
if highlightObjIds:
self.highlightObjs = [self.annotation.objects[i] for i in highlightObjIds]
if self.config.highlight:
self.highlightObjLabel = self.config.highlightLabelSelection
elif ((len(highlightObjIds) == 1) and self.config.correctionMode):
self.highlightObjLabel = self.annotation.objects[highlightObjIds[(- 1)]].label
def drawImage(self, qp):
if self.image.isNull():
return
qp.save()
qp.drawImage(QtCore.QRect(self.xoff, self.yoff, self.w, self.h), self.image)
qp.restore()
def getPolygon(self, obj):
poly = QtGui.QPolygonF()
for pt in obj.polygon:
point = QtCore.QPointF(pt.x, pt.y)
poly.append(point)
return poly
def drawLabels(self, qp, ignore=[]):
if (self.image.isNull() or (self.w <= 0) or (self.h <= 0)):
return
if (not self.annotation):
return
if self.transpTempZero:
return
overlay = QtGui.QImage(self.w, self.h, QtGui.QImage.Format_ARGB32_Premultiplied)
defaultLabel = name2label[self.defaultLabel]
col = QtGui.QColor(*defaultLabel.color)
overlay.fill(col)
qp2 = QtGui.QPainter()
qp2.begin(overlay)
qp2.setPen(QtGui.QColor('white'))
for obj in self.annotation.objects:
if (not obj.draw):
continue
name = assureSingleInstanceName(obj.label)
if (not (name in name2label)):
print('The annotations contain unkown labels. This should not happen. Please inform the datasets authors. Thank you!')
print("Details: label '{}', file '{}'".format(name, self.currentLabelFile))
continue
if (name in ignore):
continue
poly = self.getPolygon(obj)
polyToDraw = (poly * QtGui.QTransform.fromScale(self.scale, self.scale))
col = QtGui.QColor(*name2label[name].color)
brush = QtGui.QBrush(col, QtCore.Qt.SolidPattern)
qp2.setBrush(brush)
if ((obj in self.highlightObjs) or (name == self.highlightObjLabel)):
qp2.setCompositionMode(QtGui.QPainter.CompositionMode_Clear)
qp2.drawPolygon(polyToDraw)
qp2.setCompositionMode(QtGui.QPainter.CompositionMode_SourceOver)
brush = QtGui.QBrush(col, QtCore.Qt.DiagCrossPattern)
qp2.setBrush(brush)
qp2.drawPolygon(polyToDraw)
for obj in self.highlightObjs:
brush = QtGui.QBrush(QtCore.Qt.NoBrush)
qp2.setBrush(brush)
qp2.setPen(QtCore.Qt.DashLine)
polyToDraw = (self.getPolygon(obj) * QtGui.QTransform.fromScale(self.scale, self.scale))
qp2.drawPolygon(polyToDraw)
qp2.end()
qp.save()
qp.setOpacity(self.config.transp)
qp.drawImage(self.xoff, self.yoff, overlay)
qp.restore()
return overlay
def drawDrawRect(self, qp):
qp.save()
qp.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
qp.setFont(QtGui.QFont('QFont::AnyStyle', 14))
thickPen = QtGui.QPen()
qp.setPen(thickPen)
for c in self.corrections:
rect = copy.deepcopy(c.bbox)
width = rect.width()
height = rect.height()
rect.setX(((c.bbox.x() * self.scale) + self.xoff))
rect.setY(((c.bbox.y() * self.scale) + self.yoff))
rect.setWidth((width * self.scale))
rect.setHeight((height * self.scale))
if c.selected:
thickPen.setColor(QtGui.QColor(0, 0, 0))
if (c.type == CorrectionBox.types.QUESTION):
descr = 'QUESTION'
elif (c.type == CorrectionBox.types.RESOLVED):
descr = 'FIXED'
else:
descr = 'ERROR'
qp.setPen(thickPen)
qp.drawText(QtCore.QPoint(self.xoff, ((self.yoff + self.h) + 20)), ('(%s: %s)' % (descr, c.annotation)))
pen_width = 6
else:
pen_width = 3
colour = c.get_colour()
thickPen.setColor(colour)
thickPen.setWidth(pen_width)
qp.setPen(thickPen)
qp.drawRect(rect)
if (self.in_progress_bbox is not None):
rect = copy.deepcopy(self.in_progress_bbox)
width = rect.width()
height = rect.height()
rect.setX(((self.in_progress_bbox.x() * self.scale) + self.xoff))
rect.setY(((self.in_progress_bbox.y() * self.scale) + self.yoff))
rect.setWidth((width * self.scale))
rect.setHeight((height * self.scale))
thickPen.setColor(QtGui.QColor(255, 0, 0))
thickPen.setWidth(3)
qp.setPen(thickPen)
qp.drawRect(rect)
qp.restore()
def drawDrawPoly(self, qp, transform=None):
if self.drawPoly.isEmpty():
return
if (not self.image):
return
qp.save()
poly = QtGui.QPolygonF(self.drawPoly)
if ((not self.drawPolyClosed) and (self.mousePosScaled is not None)):
poly.append(self.mousePosScaled)
if (not transform):
poly = (poly * QtGui.QTransform.fromScale(self.scale, self.scale))
poly.translate(self.xoff, self.yoff)
else:
poly = (poly * transform)
qp.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
polyColor = QtGui.QColor(255, 0, 0)
qp.setPen(polyColor)
if (not self.drawPolyClosed):
qp.drawPolyline(poly)
else:
qp.drawPolygon(poly)
if (self.mousePosScaled is not None):
closestPt = self.getClosestPoint(self.drawPoly, self.mousePosScaled)
else:
closestPt = ((- 1), (- 1))
if (closestPt[0] != closestPt[1]):
thickPen = QtGui.QPen(polyColor)
thickPen.setWidth(3)
qp.setPen(thickPen)
qp.drawLine(poly[closestPt[0]], poly[closestPt[1]])
qp.setPen(polyColor)
startDrawingPts = 0
if (not self.drawPolyClosed):
self.drawPoint(qp, poly.first(), True, ((closestPt == (0, 0)) and (self.drawPoly.size() > 1)))
startDrawingPts = 1
for pt in range(startDrawingPts, poly.size()):
self.drawPoint(qp, poly[pt], False, (self.drawPolyClosed and (closestPt == (pt, pt))))
qp.restore()
def drawLabelAtMouse(self, qp):
if (not self.highlightObjs):
return
if (not self.drawPoly.isEmpty()):
return
if (not self.mousePos):
return
qp.save()
mouse = self.mousePos
showZoom = (self.config.zoom and (not self.image.isNull()) and self.w and self.h)
mouseText = self.highlightObjs[(- 1)].label
off = 36
if showZoom:
off += (self.config.zoomSize / 2)
if ((mouse.y() - off) > self.toolbar.height()):
top = (mouse.y() - off)
btm = mouse.y()
vAlign = QtCore.Qt.AlignTop
else:
if (not showZoom):
off += 20
top = mouse.y()
btm = (mouse.y() + off)
vAlign = QtCore.Qt.AlignBottom
rect = QtCore.QRect()
rect.setTopLeft(QtCore.QPoint((mouse.x() - 100), top))
rect.setBottomRight(QtCore.QPoint((mouse.x() + 100), btm))
qp.setPen(QtGui.QColor('white'))
font = QtGui.QFont('Helvetica', 20, QtGui.QFont.Bold)
qp.setFont(font)
qp.setOpacity(1)
qp.drawText(rect, (QtCore.Qt.AlignHCenter | vAlign), mouseText)
qp.restore()
def drawZoom(self, qp, overlay):
if (not self.config.zoom):
return
if (self.image.isNull() or (not self.w) or (not self.h)):
return
if (not self.mousePos):
return
zoomSize = self.config.zoomSize
mouse = self.mousePos
pix = self.mousePosScaled
selSize = (zoomSize / (self.config.zoomFactor * self.config.zoomFactor))
sel = QtCore.QRectF((pix.x() - (selSize / 2)), (pix.y() - (selSize / 2)), selSize, selSize)
view = QtCore.QRectF((mouse.x() - (zoomSize / 2)), (mouse.y() - (zoomSize / 2)), zoomSize, zoomSize)
qp.drawImage(view, self.image, sel)
if (not self.drawPoly.isEmpty()):
transform = QtGui.QTransform()
quadFrom = QtGui.QPolygonF()
quadFrom.append(sel.topLeft())
quadFrom.append(sel.topRight())
quadFrom.append(sel.bottomRight())
quadFrom.append(sel.bottomLeft())
quadTo = QtGui.QPolygonF()
quadTo.append(view.topLeft())
quadTo.append(view.topRight())
quadTo.append(view.bottomRight())
quadTo.append(view.bottomLeft())
if QtGui.QTransform.quadToQuad(quadFrom, quadTo, transform):
qp.setClipRect(view)
self.drawDrawPoly(qp, transform)
else:
print('not possible')
def mouseMoveEvent(self, event):
if (self.image.isNull() or (self.w == 0) or (self.h == 0)):
return
self.updateMousePos(event.localPos())
if (not self.config.correctionMode):
if (self.draggedPt >= 0):
self.drawPoly.replace(self.draggedPt, self.mousePosScaled)
if self.selObjs:
obj = self.annotation.objects[self.selObjs[(- 1)]]
obj.polygon[self.draggedPt] = Point(self.mousePosScaled.x(), self.mousePosScaled.y())
if (not (obj.id in self.changedPolygon)):
self.changedPolygon.append(obj.id)
self.addChange('Changed polygon of object {0} with label {1}'.format(obj.id, obj.label))
elif (self.in_progress_bbox is not None):
p0 = (self.mousePosScaled.x(), self.mousePosScaled.y())
p1 = (self.mousePressEvent.x(), self.mousePressEvent.y())
xy = (min(p0[0], p1[0]), min(p0[1], p1[1]))
(w, h) = (abs((p0[0] - p1[0])), abs((p0[1] - p1[1])))
self.in_progress_bbox = QtCore.QRectF(xy[0], xy[1], w, h)
self.updateMouseObject()
self.update()
def leaveEvent(self, event):
self.mousePos = None
self.mousePosScaled = None
self.mouseOutsideImage = True
def mousePressEvent(self, event):
self.mouseButtons = event.buttons()
shiftPressed = (QtWidgets.QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier)
self.updateMousePos(event.localPos())
self.mousePressEvent = self.mousePosScaled
if (event.button() == QtCore.Qt.LeftButton):
if (not self.config.correctionMode):
if (self.drawPolyClosed and (self.mousePosScaled is not None)):
closestPt = self.getClosestPoint(self.drawPoly, self.mousePosScaled)
if shiftPressed:
if (closestPt[0] == closestPt[1]):
del self.drawPoly[closestPt[0]]
if self.selObjs:
obj = self.annotation.objects[self.selObjs[(- 1)]]
del obj.polygon[closestPt[0]]
if (not (obj.id in self.changedPolygon)):
self.changedPolygon.append(obj.id)
self.addChange('Changed polygon of object {0} with label {1}'.format(obj.id, obj.label))
self.update()
elif (closestPt[0] == closestPt[1]):
self.draggedPt = closestPt[0]
else:
self.drawPoly.insert(closestPt[1], self.mousePosScaled)
self.draggedPt = closestPt[1]
if self.selObjs:
obj = self.annotation.objects[self.selObjs[(- 1)]]
obj.polygon.insert(closestPt[1], Point(self.mousePosScaled.x(), self.mousePosScaled.y()))
if (not (obj.id in self.changedPolygon)):
self.changedPolygon.append(obj.id)
self.addChange('Changed polygon of object {0} with label {1}'.format(obj.id, obj.label))
else:
assert (self.in_progress_bbox == None)
self.in_progress_bbox = QtCore.QRectF(self.mousePosScaled.x(), self.mousePosScaled.y(), 0, 0)
elif (event.button() == QtCore.Qt.RightButton):
self.toggleZoom(event.localPos())
self.update()
def mouseReleaseEvent(self, event):
self.mouseButtons = event.buttons()
ctrlPressed = (event.modifiers() & QtCore.Qt.ControlModifier)
shiftPressed = (event.modifiers() & QtCore.Qt.ShiftModifier)
altPressed = (event.modifiers() & QtCore.Qt.AltModifier)
if (event.button() == QtCore.Qt.LeftButton):
if (not self.config.correctionMode):
if ctrlPressed:
if (shiftPressed and self.drawPolyClosed):
self.intersectPolygon()
if (altPressed and self.drawPolyClosed):
self.mergePolygon()
else:
self.selectObject()
elif (not self.drawPolyClosed):
if self.ptClosesPoly():
self.closePolygon()
elif (self.mousePosScaled is not None):
if ((not self.drawPolyClosed) and self.drawPoly.isEmpty()):
self.mousePosOnZoom = self.mousePos
self.addPtToPoly(self.mousePosScaled)
elif self.drawPolyClosed:
self.draggedPt = (- 1)
elif (self.in_progress_bbox is not None):
if (self.in_progress_bbox.width() > 20):
description = QtWidgets.QInputDialog.getText(self, 'Error Description', 'Please describe the labeling error briefly.')
if (description[1] and description[0]):
self.corrections.append(CorrectionBox(self.in_progress_bbox, annotation=description[0]))
self.corrections[self.selected_correction].unselect()
self.selected_correction = (len(self.corrections) - 1)
self.corrections[self.selected_correction].select()
self.addChange('Added correction.')
self.in_progress_annotation = None
self.in_progress_bbox = None
elif (event.button() == QtCore.Qt.RightButton):
self.toggleZoom(event.localPos())
self.update()
def wheelEvent(self, event):
deltaDegree = (event.angleDelta().y() / 8)
deltaSteps = (deltaDegree / 15)
if self.config.zoom:
if (event.modifiers() and QtCore.Qt.Key_Shift):
self.config.zoomSize += (deltaSteps * 10)
self.config.zoomSize = max(self.config.zoomSize, 10)
self.config.zoomSize = min(self.config.zoomSize, 1000)
else:
self.config.zoomFactor += (deltaSteps * 0.05)
self.config.zoomFactor = max(self.config.zoomFactor, 0.1)
self.config.zoomFactor = min(self.config.zoomFactor, 10)
self.update()
def keyPressEvent(self, e):
if (e.key() == QtCore.Qt.Key_Control):
QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
elif (e.key() == QtCore.Qt.Key_Backspace):
if (not self.drawPolyClosed):
del self.drawPoly[(- 1)]
self.update()
elif (e.key() == QtCore.Qt.Key_0):
self.transpTempZero = True
self.update()
elif (e.key() == QtCore.Qt.Key_E):
self.select_next_correction()
elif (e.key() == QtCore.Qt.Key_R):
self.select_previous_correction()
elif (e.key() == QtCore.Qt.Key_1):
self.modify_correction_type(CorrectionBox.types.TO_CORRECT)
elif (e.key() == QtCore.Qt.Key_2):
self.modify_correction_type(CorrectionBox.types.TO_REVIEW)
elif (e.key() == QtCore.Qt.Key_3):
self.modify_correction_type(CorrectionBox.types.RESOLVED)
elif (e.key() == QtCore.Qt.Key_4):
self.modify_correction_type(CorrectionBox.types.QUESTION)
elif ((e.key() == QtCore.Qt.Key_D) and self.config.correctionMode):
self.delete_selected_annotation()
elif ((e.key() == QtCore.Qt.Key_M) and self.config.correctionMode):
self.modify_correction_description()
def keyReleaseEvent(self, e):
if (e.key() == QtCore.Qt.Key_Control):
QtWidgets.QApplication.restoreOverrideCursor()
elif ((e.key() == QtCore.Qt.Key_0) or (e.key() == QtCore.Qt.Key_Insert)):
self.transpTempZero = False
self.update()
def setTip(self, action, tip):
tip += ((" (Hotkeys: '" + "', '".join([str(s.toString()) for s in action.shortcuts()])) + "')")
action.setStatusTip(tip)
action.setToolTip(tip)
def updateMousePos(self, mousePosOrig):
if ((self.config.zoomFactor <= 1) or (self.drawPolyClosed or self.drawPoly.isEmpty())):
sens = 1.0
else:
sens = (1.0 / pow(self.config.zoomFactor, 3))
if (self.config.zoom and (self.mousePosOnZoom is not None)):
mousePos = QtCore.QPointF(round((((1 - sens) * self.mousePosOnZoom.x()) + (sens * mousePosOrig.x()))), round((((1 - sens) * self.mousePosOnZoom.y()) + (sens * mousePosOrig.y()))))
else:
mousePos = mousePosOrig
mousePosScaled = QtCore.QPointF((float((mousePos.x() - self.xoff)) / self.scale), (float((mousePos.y() - self.yoff)) / self.scale))
mouseOutsideImage = (not self.image.rect().contains(mousePosScaled.toPoint()))
mousePosScaled.setX(max(mousePosScaled.x(), 0.0))
mousePosScaled.setY(max(mousePosScaled.y(), 0.0))
mousePosScaled.setX(min(mousePosScaled.x(), self.image.rect().right()))
mousePosScaled.setY(min(mousePosScaled.y(), self.image.rect().bottom()))
if (not self.image.rect().contains(mousePosScaled.toPoint())):
self.mousePos = None
self.mousePosScaled = None
self.mousePosOrig = None
self.updateMouseObject()
self.update()
return
self.mousePos = mousePos
self.mousePosScaled = mousePosScaled
self.mousePosOrig = mousePosOrig
self.mouseOutsideImage = mouseOutsideImage
def toggleZoom(self, mousePosOrig):
self.config.zoom = (not self.config.zoom)
if self.config.zoom:
self.mousePosOnZoom = self.mousePos
self.updateMousePos(mousePosOrig)
else:
self.updateMousePos(mousePosOrig)
if ((not self.config.correctionMode) and (self.draggedPt >= 0)):
self.drawPoly.replace(self.draggedPt, self.mousePosScaled)
def getClosestPoint(self, poly, pt):
closest = ((- 1), (- 1))
distTh = 4.0
dist = .0
for i in range(poly.size()):
curDist = self.ptDist(poly[i], pt)
if (curDist < dist):
closest = (i, i)
dist = curDist
if (dist <= distTh):
return closest
if (self.drawPolyClosed and (poly.size() >= 2)):
for i in range(poly.size()):
pt1 = poly[i]
j = (i + 1)
if (j == poly.size()):
j = 0
pt2 = poly[j]
edge = QtCore.QLineF(pt1, pt2)
normal = edge.normalVector()
normalThroughMouse = QtCore.QLineF(pt.x(), pt.y(), (pt.x() + normal.dx()), (pt.y() + normal.dy()))
intersectionPt = QtCore.QPointF()
intersectionType = edge.intersect(normalThroughMouse, intersectionPt)
if (intersectionType == QtCore.QLineF.BoundedIntersection):
curDist = self.ptDist(intersectionPt, pt)
if (curDist < dist):
closest = (i, j)
dist = curDist
if (dist <= distTh):
return closest
return ((- 1), (- 1))
def ptDist(self, pt1, pt2):
line = QtCore.QLineF(pt1, pt2)
lineLength = line.length()
return lineLength
def ptClosesPoly(self):
if self.drawPoly.isEmpty():
return False
if (self.mousePosScaled is None):
return False
closestPt = self.getClosestPoint(self.drawPoly, self.mousePosScaled)
return (closestPt == (0, 0))
def drawPoint(self, qp, pt, isFirst, increaseRadius):
if isFirst:
qp.setBrush(QtGui.QBrush(QtGui.QColor(0, 255, 0), QtCore.Qt.SolidPattern))
else:
qp.setBrush(QtGui.QBrush(QtGui.QColor(255, 0, 0), QtCore.Qt.SolidPattern))
r = 3.0
if increaseRadius:
r *= 2.5
qp.drawEllipse(pt, r, r)
def isLabelPathValid(self, labelPath):
return os.path.isdir(labelPath)
def getLabelFromUser(self, defaultLabel='', objID=(- 1)):
restoreMessage = self.statusBar().currentMessage()
if (not defaultLabel):
defaultLabel = self.defaultLabel
items = list(name2label.keys())
items.sort()
default = items.index(defaultLabel)
if (default < 0):
self.statusBar().showMessage('The selected label is missing in the internal color map.')
return
dlgTitle = 'Select label'
message = dlgTitle
question = dlgTitle
if (objID >= 0):
message = 'Select new label for object {0} with current label {1}'.format(objID, defaultLabel)
question = 'Label for object {0}'.format(objID)
self.statusBar().showMessage(message)
(item, ok) = QtWidgets.QInputDialog.getItem(self, dlgTitle, question, items, default, False)
item = str(item)
self.statusBar().showMessage(restoreMessage)
return (item, ok)
def addPtToPoly(self, pt):
self.drawPoly.append(pt)
for act in self.actPolyOrSelObj:
act.setEnabled(True)
def clearPolygon(self):
self.drawPoly = QtGui.QPolygonF()
self.drawPolyClosed = False
for act in self.actPolyOrSelObj:
act.setEnabled(bool(self.selObjs))
for act in self.actClosedPoly:
act.setEnabled(False)
def closePolygon(self):
self.drawPolyClosed = True
for act in self.actClosedPoly:
act.setEnabled(True)
message = 'What should I do with the polygon? Press n to create a new object, '
message += 'press Ctrl + Shift + Left Click to intersect with another object, '
message += 'press Ctrl + Alt + Left Click to merge with another object.'
self.statusBar().showMessage(message)
def intersectPolygon(self):
if (not self.annotation):
return
if (self.mouseObj < 0):
return
obj = self.annotation.objects[self.mouseObj]
intersection = self.drawPoly.intersected(self.getPolygon(obj))
if (not intersection.isEmpty()):
self.drawPoly = intersection
(label, ok) = self.getLabelFromUser(obj.label)
if (ok and label):
self.appendObject(label, intersection)
self.clearPolygon()
self.statusBar().showMessage(self.defaultStatusbar)
self.deselectAllObjects()
self.update()
def mergePolygon(self):
if (not self.annotation):
return
if (self.mouseObj < 0):
return
obj = self.annotation.objects[self.mouseObj]
union = self.drawPoly.united(self.getPolygon(obj))
if (not union.isEmpty()):
self.drawPoly = union
(label, ok) = self.getLabelFromUser(obj.label)
if (ok and label):
self.appendObject(label, union)
self.clearPolygon()
self.statusBar().showMessage(self.defaultStatusbar)
self.deselectAllObjects()
self.update()
def initPolygonFromObject(self):
if (not self.annotation):
return
if (not self.selObjs):
return
if (len(self.selObjs) > 1):
self.clearPolygon()
self.update()
return
obj = self.annotation.objects[self.selObjs[(- 1)]]
self.drawPoly = self.getPolygon(obj)
self.drawPolyClosed = True
for act in self.actPolyOrSelObj:
act.setEnabled(True)
for act in self.actClosedPoly:
act.setEnabled(True)
self.update()
def appendObject(self, label, polygon):
if (not self.annotation):
self.annotation = Annotation()
newID = 0
for obj in self.annotation.objects:
if (obj.id >= newID):
newID = (obj.id + 1)
obj = CsPoly()
obj.label = label
obj.polygon = [Point(p.x(), p.y()) for p in polygon]
obj.id = newID
obj.deleted = 0
obj.verified = 0
obj.user = getpass.getuser()
obj.updateDate()
self.annotation.objects.append(obj)
self.addChange('Created object {0} with label {1}'.format(newID, label))
self.deselectAllObjects()
self.clearPolygon()
self.mouseObj = 0
self.selectObject()
def checkAndSave(self):
if (not self.changes):
return True
restoreMessage = self.statusBar().currentMessage()
dlgTitle = 'Save changes?'
self.statusBar().showMessage(dlgTitle)
text = 'Do you want to save the following changes?\n'
for c in self.changes:
text += (('- ' + c) + '\n')
buttons = ((QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Discard) | QtWidgets.QMessageBox.Cancel)
ret = QtWidgets.QMessageBox.question(self, dlgTitle, text, buttons, QtWidgets.QMessageBox.Save)
proceed = False
if (ret == QtWidgets.QMessageBox.Save):
proceed = self.save()
elif (ret == QtWidgets.QMessageBox.Discard):
self.clearChanges()
proceed = True
else:
proceed = False
self.statusBar().showMessage(restoreMessage)
return proceed
def doScreenshot(self):
img = QtGui.QImage(self.image)
qp = QtGui.QPainter()
qp.begin(img)
xoff = self.xoff
yoff = self.yoff
scale = self.scale
w = self.w
h = self.h
self.xoff = 0
self.yoff = 0
self.scale = 1
self.w = self.image.width()
self.h = self.image.height()
self.highlightObjs = []
blurLicensePlates = True
if blurLicensePlates:
self.blurLicensePlates(qp)
ignore = []
if blurLicensePlates:
ignore.append('numberplate')
self.drawLabels(qp, ignore)
qp.end()
self.xoff = xoff
self.yoff = yoff
self.scale = scale
self.w = w
self.h = h
file = self.config.screenshotFilename
cityIdx = file.find('%c')
if (cityIdx >= 0):
if self.config.cityName:
dir = os.path.join(file[:cityIdx], self.config.cityName)
if (not os.path.exists(dir)):
os.makedirs(dir)
file = file.replace('%c', (self.config.cityName + '/'), 1)
if (file.find('%c') > 0):
message = "Found multiple '%c' in screenshot filename. Not allowed"
file = None
else:
message = "Do not have a city name. Cannot replace '%c' in screenshot filename."
file = None
if file:
file = file.replace('%i', os.path.splitext(os.path.basename(self.config.currentFile))[0])
if file:
if (not os.path.splitext(file)[1]):
file += '.png'
if file:
success = img.save(file)
if success:
message = ('Saved screenshot to ' + file)
else:
message = 'Failed to save screenshot'
self.statusBar().showMessage(message)
self.update()
def blurLicensePlates(self, qp):
searchedNames = ['license plate']
img = self.image
for obj in self.annotation.objects:
if (not obj.draw):
continue
name = obj.label
if (name not in name2label):
continue
if (not (name in searchedNames)):
continue
polyToDraw = (self.getPolygon(obj) * QtGui.QTransform.fromScale(self.scale, self.scale))
bb = polyToDraw.boundingRect()
meanR = 0
meanG = 0
meanB = 0
num = 0
for y in range(max(int(bb.top()), 0), min(int((bb.bottom() + 1.5)), img.height())):
for x in range(max(int(bb.left()), 0), min(int((bb.right() + 1.5)), img.width())):
col = img.pixel(x, y)
meanR += QtGui.QColor(col).red()
meanG += QtGui.QColor(col).green()
meanB += QtGui.QColor(col).blue()
num += 1
meanR /= float(num)
meanG /= float(num)
meanB /= float(num)
col = QtGui.QColor(meanR, meanG, meanB)
qp.setPen(col)
brush = QtGui.QBrush(col, QtCore.Qt.SolidPattern)
qp.setBrush(brush)
qp.drawPolygon(polyToDraw)
def updateMouseObject(self):
self.mouseObj = (- 1)
if (self.mousePosScaled is None):
return
if ((not self.annotation) or (not self.annotation.objects)):
return
for idx in reversed(range(len(self.annotation.objects))):
obj = self.annotation.objects[idx]
if (obj.draw and self.getPolygon(obj).containsPoint(self.mousePosScaled, QtCore.Qt.OddEvenFill)):
self.mouseObj = idx
break
def infoOnSelectedObject(self):
if (not self.selObjs):
return
objID = self.selObjs[(- 1)]
if (self.annotation and (objID >= 0)):
obj = self.annotation.objects[objID]
self.statusBar().showMessage('Label of object {0}: {1}'.format(obj.id, obj.label))
def selectObject(self):
if (self.mouseObj < 0):
self.deselectObject()
return
if (not (self.mouseObj in self.selObjs)):
self.selObjs.append(self.mouseObj)
else:
self.deselectObject()
self.initPolygonFromObject()
if self.selObjs:
for act in (self.actSelObj + self.actPolyOrSelObj):
act.setEnabled(True)
for act in self.singleActSelObj:
act.setEnabled((len(self.selObjs) == 1))
self.infoOnSelectedObject()
def deselectObject(self):
if (not self.selObjs):
return
if (self.mouseObj < 0):
del self.selObjs[(- 1)]
if (self.mouseObj in self.selObjs):
self.selObjs.remove(self.mouseObj)
if (not self.selObjs):
for act in self.actSelObj:
act.setEnabled(False)
for act in self.actPolyOrSelObj:
act.setEnabled(bool(self.drawPoly))
for act in self.singleActSelObj:
act.setEnabled((len(self.selObjs) == 1))
self.infoOnSelectedObject()
def deselectAllObjects(self):
self.selObjs = []
self.mouseObj = (- 1)
for act in self.actSelObj:
act.setEnabled(False)
for act in self.singleActSelObj:
act.setEnabled((len(self.selObjs) == 1))
self.infoOnSelectedObject()
def modifyLayer(self, offset):
if (not self.annotation):
return
if (len(self.selObjs) != 1):
return
obj = self.annotation.objects[self.selObjs[(- 1)]]
oldidx = self.selObjs[(- 1)]
newidx = (oldidx + offset)
newidx = max(newidx, 0)
newidx = min(newidx, (len(self.annotation.objects) - 1))
if (oldidx == newidx):
return
self.annotation.objects.insert(newidx, self.annotation.objects.pop(oldidx))
self.selObjs[(- 1)] = newidx
self.statusBar().showMessage('Moved object {0} with label {1} to layer {2}'.format(obj.id, obj.label, newidx))
if (not (obj.id in self.changedLayer)):
self.changedLayer.append(obj.id)
self.addChange('Changed layer for object {0} with label {1}'.format(obj.id, obj.label))
def addChange(self, text):
if (not text):
return
self.changes.append(text)
for act in self.actChanges:
act.setEnabled(True)
def clearChanges(self):
self.changes = []
self.changedLayer = []
self.changedPolygon = []
for act in self.actChanges:
act.setEnabled(False)
def clearAnnotation(self):
self.annotation = None
self.clearChanges()
self.deselectAllObjects()
self.clearPolygon()
self.config.currentLabelFile = ''
def clearCorrections(self):
self.correctionXML = None
self.corrections = []
self.config.currentCorrectionFile = ''
def getLabelFilename(self, createDirs=False):
if (not self.config.cityName):
return ''
if (not self.config.labelPath):
return ''
if (not self.config.currentFile):
return ''
if (not self.isLabelPathValid(self.config.labelPath)):
return ''
if (not os.path.isdir(self.config.labelPath)):
return ''
labelDir = self.config.labelPath
if self.config.gtType:
ext = self.gtExt.format(('_' + self.config.gtType))
else:
ext = self.gtExt.format('')
filename = os.path.basename(self.config.currentFile)
filename = filename.replace(self.imageExt, ext)
filename = os.path.join(labelDir, filename)
filename = os.path.normpath(filename)
return filename
def getCorrectionFilename(self, createDirs=False):
if (not self.config.correctionPath):
return ''
if (not self.config.currentFile):
return ''
correctionDir = self.config.correctionPath
if (not os.path.isdir(correctionDir)):
if createDirs:
os.makedirs(correctionDir)
if (not os.path.isdir(correctionDir)):
return ''
else:
return ''
filename = os.path.basename(self.config.currentFile)
filename = filename.replace(self.imageExt, '.xml')
filename = os.path.join(correctionDir, filename)
filename = os.path.normpath(filename)
return filename
def createPopupMenu(self):
pass |
_test(assert_ii_1=False, intel=False)
def test_type_inference():
sdfg = program.to_sdfg()
sdfg.apply_transformations(FPGATransformSDFG)
f2cOperator = np.array([0, 1, 2, 3], dtype=np.uint32)
rc = np.array([42, 42, 42, 42], dtype=np.float64)
Axf = np.array([0, 2, 4, 6], dtype=np.float64)
x = np.array([0, 1, 2, 3], dtype=np.float64)
sdfg(f2cOperator0=f2cOperator, rc1=rc, Axf0=Axf, x=x, N0=4, N1=4)
assert (rc == np.array([0, (- 1), (- 2), (- 3)])).all()
return sdfg |
class InputBlockDAG(DAG):
def __init__(self, add_output=True, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.with_input_blocks, '`InputBlockDAG` class only handles `with_input_blocks=True`'
self.added_output_nodes = []
self.add_output = add_output
def _build_dag(self):
assert (type(self.input_node) in (list, tuple)), ('If ``with_input_blocks=True``, ``input_node`` must be array-like. Current type of input_node is %s and with_input_blocks=%s' % (type(self.input_node), self.with_input_blocks))
assert (type(self.output_node) is ComputationNode)
nodes = self._init_nodes()
nodes = self._prune_nodes(nodes)
node_list = (((self.input_node + nodes) + self.added_output_nodes) + [self.output_node])
for node in node_list:
try:
node.build()
except:
print(node.node_name)
print([x.node_name for x in node.parent])
print([x.node_name for x in node.child])
raise Exception('above')
self.nodes = nodes
self.model = DenseAddOutputChild(inputs=[x.operation_layer for x in self.input_node], outputs=([self.output_node.operation_layer] + [n.operation_layer for n in self.added_output_nodes]), nodes=self.nodes)
return self.model
def _init_nodes(self):
arc_pointer = 0
nodes = []
for layer_id in range(self.num_layers):
arc_id = self.arc_seq[arc_pointer]
op = self.model_space[layer_id][arc_id]
parent = []
node_ = ComputationNode(op, node_name=('L%i_%s' % (layer_id, get_layer_shortname(op))))
inp_bl = np.where((self.arc_seq[(arc_pointer + 1):((arc_pointer + 1) + len(self.input_node))] == 1))[0]
if any(inp_bl):
for i in inp_bl:
parent.append(self.input_node[i])
self.input_node[i].child.append(node_)
if (self.add_output and (layer_id != (self.num_layers - 1))):
if (type(self.output_node) is list):
assert (len(self.output_node) == 1)
self.output_node = self.output_node[0]
new_out = ComputationNode(operation=self.output_node.operation, node_name=('added_out_%i' % (len(self.added_output_nodes) + 1)))
new_out.parent.append(node_)
self.added_output_nodes.append(new_out)
if (self.with_skip_connection and (layer_id > 0)):
skip_con = np.where((self.arc_seq[((arc_pointer + 1) + (len(self.input_node) * self.with_input_blocks)):(((arc_pointer + 1) + (len(self.input_node) * self.with_input_blocks)) + layer_id)] == 1))[0]
for i in skip_con:
parent.append(nodes[i])
nodes[i].child.append(node_)
elif (layer_id > 0):
parent.append(nodes[(- 1)])
nodes[(- 1)].child.append(node_)
node_.parent = parent
nodes.append(node_)
arc_pointer += ((1 + (int(self.with_input_blocks) * len(self.input_node))) + (int(self.with_skip_connection) * layer_id))
return nodes |
class StackedMultiLevelAttentionFusion(tf.keras.layers.Layer):
def __init__(self, filters=256, projection_dim=64, num_repeats=2, min_level=3, max_level=7, backbone_max_level=5, conv_2d_op_params=None, normalization_op_params=None, use_channel_attention=True, **kwargs):
super(StackedMultiLevelAttentionFusion, self).__init__(**kwargs)
self.num_repeats = num_repeats
mlaf_block = functools.partial(MultiLevelAttentionFusion, filters=filters, projection_dim=projection_dim, min_level=min_level, backbone_max_level=backbone_max_level, conv_2d_op_params=conv_2d_op_params, normalization_op_params=normalization_op_params, use_channel_attention=use_channel_attention, **kwargs)
self._blocks = {}
for i in range(num_repeats):
block_max_level = (max_level if (i == (num_repeats - 1)) else backbone_max_level)
self._blocks[str(i)] = mlaf_block(max_level=block_max_level, use_lateral_conv=(i == 0), name=('mlaf_' + str((i + 1))))
def call(self, features, training=False):
outputs = features
for i in range(self.num_repeats):
outputs = self._blocks[str(i)](outputs, training=training)
return outputs |
def get_width_manner_node_list(root):
node_list = []
queue = []
if (root is not None):
queue.append(root)
while (len(queue) != 0):
node = queue.pop(0)
node_list.append(node)
if (node.left is not None):
queue.append(node.left)
if (node.right is not None):
queue.append(node.right)
return node_list |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.