code stringlengths 101 5.91M |
|---|
.parametrize('observer, model, rule', [(_broken_observer, _PseudoTrainableQuadratic(), FixedAcquisitionRule([[0.0]])), (_quadratic_observer, _BrokenModel(), FixedAcquisitionRule([[0.0]])), (_quadratic_observer, _PseudoTrainableQuadratic(), _BrokenRule())])
def test_bayesian_optimizer_optimize_for_failed_step(observer: Observer, model: TrainableProbabilisticModel, rule: AcquisitionRule[(None, Box, ProbabilisticModel)]) -> None:
optimizer = BayesianOptimizer(observer, Box([0], [1]))
(data, models) = ({NA: mk_dataset([[0.0]], [[0.0]])}, {NA: model})
(result, history) = optimizer.optimize(3, data, models, rule).astuple()
with pytest.raises(_Whoops):
result.unwrap()
assert (len(history) == 1) |
class AnalyserGeneratorTests(unittest.TestCase):
generator = Generator()
analyser = Analyser()
def setUp(self):
self.testFile = open(os.path.join(CURR_DIR, 'tests.json'))
self.tests = json.load(self.testFile, object_hook=Struct)
def tearDown(self):
self.testFile.close()
def test_analyse(self):
print('\t**** Analyse tests ****\t')
line = 0
for test in self.tests:
line += 1
with self.subTest(test.word):
anals = self.analyser.analyse(test.word)
match = False
if (not (hasattr(test, 'skip') and test.skip)):
self.assertTrue((len(anals) != 0), ('Analysis failed for ' + test.word))
else:
continue
print(('%3d %s\t<--\t%s' % (line, test.word, anals)))
for index in range(len(anals)):
if (test.analysis == anals[index][0]):
match = True
break
if (not (hasattr(test, 'skip') and test.skip)):
self.assertEqual(match, True, ('Analysis for ' + test.analysis))
def test_generate(self):
print('\t**** Generate tests ****\t')
line = 0
for test in self.tests:
line += 1
with self.subTest(test.word):
match = False
gens = self.generator.generate(test.analysis, True)
if (not (hasattr(test, 'skip') and test.skip)):
self.assertTrue((len(gens) != 0), ('Generate failed for ' + test.analysis))
else:
continue
print(('%3d %s\t<--\t%s' % (line, test.analysis, gens)))
for index in range(len(gens)):
if (test.word == gens[index][0]):
match = True
break
if (not (hasattr(test, 'skip') and test.skip)):
self.assertEqual(match, True, ('Generate for ' + test.analysis)) |
def two_choice(input_dict, k, verbose=False, method='means'):
num_inputs = len(input_dict)
num_dimensions = len(list(input_dict.values())[0])
original_dist_means_array = np.zeros(num_dimensions)
original_dist_inputs_by_dim = []
for d in range(num_dimensions):
inputs = [val[d] for val in input_dict.values()]
original_dist_inputs_by_dim.append(inputs)
sum_d = sum(inputs)
original_dist_means_array[d] = (sum_d / k)
original_dist_cov = np.cov(original_dist_inputs_by_dim)
subproblem_dim_lists = [[[] for _ in range(num_dimensions)] for _ in range(k)]
subproblem_entity_assignments = [[] for _ in range(k)]
num_assigned = 0
subproblem_num_entity_means = [[0, np.zeros(num_dimensions)] for _ in range(k)]
subproblem_covs = [np.zeros((num_dimensions, num_dimensions)) for _ in range(k)]
sp_ids = [i for i in range(k)]
for (entity, dims) in input_dict.items():
if ((num_assigned % int((num_inputs / 4))) == 0):
print((('Assigned ' + str(num_assigned)) + ' entities'))
max_dist_change = (- np.inf)
max_dist_sp = 0
updated_cov = None
num_sp_left = len(sp_ids)
if (num_sp_left == 1):
max_dist_sp = sp_ids[0]
else:
random_sp1_id = random.randint(0, (num_sp_left - 1))
random_sp2_id = random.randint(0, (num_sp_left - 1))
while (random_sp1_id == random_sp2_id):
random_sp2_id = random.randint(0, (num_sp_left - 1))
random_sp1 = sp_ids[random_sp1_id]
random_sp2 = sp_ids[random_sp2_id]
for sp_index in [random_sp1, random_sp2]:
dist_change = 0
if (method == 'means'):
dist_change = calc_dist_mean_change(subproblem_dim_lists[sp_index], dims, original_dist_means_array, subproblem_num_entity_means[sp_index])
elif (method == 'covs'):
(dist_change, new_cov) = calc_dist_cov_change(subproblem_dim_lists[sp_index], dims, original_dist_cov, subproblem_num_entity_means[sp_index], subproblem_covs[sp_index])
if (dist_change >= max_dist_change):
max_dist_change = dist_change
max_dist_sp = sp_index
if (method == 'covs'):
updated_cov = new_cov
if (method == 'cov'):
subproblem_covs[max_dist_sp] = new_cov
subproblem_entity_assignments[max_dist_sp].append(entity)
if (len(subproblem_entity_assignments[max_dist_sp]) > ((num_inputs * 1.01) / (k * 1.0))):
sp_ids.remove(max_dist_sp)
for d in range(num_dimensions):
subproblem_dim_lists[max_dist_sp][d].append(dims[d])
num_entity = subproblem_num_entity_means[max_dist_sp][0]
dim_means = subproblem_num_entity_means[max_dist_sp][1]
subproblem_num_entity_means[max_dist_sp][0] += 1
subproblem_num_entity_means[max_dist_sp][1] = (dim_means + np.asarray(dims))
num_assigned += 1
if verbose:
print(subproblem_dim_lists)
print(subproblem_entity_assignments)
print('\n')
return subproblem_entity_assignments |
class TreePredictor():
def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets):
self.nodes = nodes
self.binned_left_cat_bitsets = binned_left_cat_bitsets
self.raw_left_cat_bitsets = raw_left_cat_bitsets
def get_n_leaf_nodes(self):
return int(self.nodes['is_leaf'].sum())
def get_max_depth(self):
return int(self.nodes['depth'].max())
def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_raw_data(self.nodes, X, self.raw_left_cat_bitsets, known_cat_bitsets, f_idx_map, n_threads, out)
return out
def predict_binned(self, X, missing_values_bin_idx, n_threads):
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_binned_data(self.nodes, X, self.binned_left_cat_bitsets, missing_values_bin_idx, n_threads, out)
return out
def compute_partial_dependence(self, grid, target_features, out):
_compute_partial_dependence(self.nodes, grid, target_features, out) |
class DiscreteProbabilitySpace(ProbabilitySpace_generic, DiscreteRandomVariable):
def __init__(self, X, P, codomain=None, check=False):
if (codomain is None):
from sage.rings.real_mpfr import RealField
codomain = RealField()
if ((not isinstance(codomain, sage.rings.abc.RealField)) and (not is_RationalField(codomain))):
raise TypeError(('Argument codomain (= %s) must be the reals or rationals' % codomain))
if check:
one = sum(P.values())
if is_RationalField(codomain):
if (not (one == 1)):
raise TypeError('Argument P (= %s) does not define a probability function')
elif (not (abs((one - 1)) < (2 ** ((- codomain.precision()) + 1)))):
raise TypeError('Argument P (= %s) does not define a probability function')
ProbabilitySpace_generic.__init__(self, X, codomain)
DiscreteRandomVariable.__init__(self, self, P, codomain, check)
def __repr__(self):
F = pformat(self.function())
return ('Discrete probability space defined by %s' % F)
def set(self):
return Set(self.function())
def entropy(self):
def neg_xlog2x(p):
if (p == 0):
return 0
else:
return ((- p) * log(p, 2))
p = self.function()
return sum([neg_xlog2x(p[x]) for x in p]) |
class EggProvider(NullProvider):
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
path = self.module_path
old = None
while (path != old):
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
(path, base) = os.path.split(path) |
def sympy_set_to_list(set, vars):
from sage.rings.infinity import UnsignedInfinity
from sympy import FiniteSet, And, Or, Union, Interval, oo, S
from sympy.core.relational import Relational
if (set == S.Reals):
return [(x._sage_() < oo) for x in vars]
elif (set == S.Complexes):
return [(x._sage_() != UnsignedInfinity) for x in vars]
elif ((set is None) or (set == S.EmptySet)):
return []
if isinstance(set, (And, Or, Relational)):
if isinstance(set, And):
return [[item for rel in set._args[0] for item in sympy_set_to_list(rel, vars)]]
elif isinstance(set, Or):
return [sympy_set_to_list(iv, vars) for iv in set._args[0]]
elif isinstance(set, Relational):
return [set._sage_()]
elif isinstance(set, FiniteSet):
x = vars[0]
return [(x._sage_() == arg._sage_()) for arg in set.args]
elif isinstance(set, (Union, Interval)):
x = vars[0]
if isinstance(set, Interval):
(left, right, lclosed, rclosed) = set._args
if lclosed:
rel1 = [(x._sage_() > left._sage_())]
else:
rel1 = [(x._sage_() >= left._sage_())]
if rclosed:
rel2 = [(x._sage_() < right._sage_())]
else:
return [(x._sage_() <= right._sage_())]
if (right == oo):
return rel1
if (left == (- oo)):
return rel2
return [rel1, rel2]
if isinstance(set, Union):
return [sympy_set_to_list(iv, vars) for iv in set._args]
return set |
def construct_optimizer(model, cfg):
bn_params = []
rest_params = []
for (name, p) in model.named_parameters():
if ('bn' in name):
bn_params.append(p)
else:
rest_params.append(p)
optim_params = []
if bn_params:
optim_params.append({'params': bn_params, 'weight_decay': cfg.BN.WEIGHT_DECAY})
if rest_params:
optim_params.append({'params': rest_params, 'weight_decay': cfg.SOLVER.WEIGHT_DECAY})
assert (len(list(model.parameters())) == (len(rest_params) + len(bn_params))), 'parameter size does not match: {} + {} != {}'.format(len(rest_params), len(bn_params), len(list(model.parameters())))
if (cfg.SOLVER.OPTIMIZING_METHOD == 'sgd'):
optimizer = torch.optim.SGD(optim_params, momentum=cfg.SOLVER.MOMENTUM, dampening=cfg.SOLVER.DAMPENING, nesterov=cfg.SOLVER.NESTEROV)
elif (cfg.SOLVER.OPTIMIZING_METHOD == 'adam'):
optimizer = torch.optim.Adam(optim_params, betas=(0.9, 0.999), eps=1e-06)
elif (cfg.SOLVER.OPTIMIZING_METHOD == 'adamw'):
optimizer = torch.optim.AdamW(optim_params, betas=(0.9, 0.999), eps=1e-06, amsgrad=cfg.SOLVER.USE_AMSGRAD)
else:
raise NotImplementedError('Does not support {} optimizer'.format(cfg.SOLVER.OPTIMIZING_METHOD))
return optimizer |
class COCODataModule(pl.LightningDataModule):
def __init__(self, data_path, train_batch_size=16, val_batch_size=16, test_batch_size=16, use_data_augmentation=False):
super().__init__()
self.data_path = Path(data_path)
self.annotations_path = (self.data_path / 'annotations')
self.train_transformer = get_training_image_transformer(use_data_augmentation)
self.test_transformer = get_testing_image_transformer()
self.train_batch_size = train_batch_size
self.val_batch_size = val_batch_size
self.test_batch_size = test_batch_size
def prepare_data(self):
pass
def setup(self, stage: Optional[str]=None):
if ((stage == 'fit') or (stage is None)):
self.train = COCODataset(root=(self.data_path / 'train2014'), annotation=(self.annotations_path / 'train2014_train_split.json'), transform_fn=self.train_transformer)
self.val = COCODataset(root=(self.data_path / 'train2014'), annotation=(self.annotations_path / 'train2014_val_split.json'), transform_fn=self.test_transformer)
if ((stage == 'test') or (stage is None)):
self.test = COCODataset(root=(self.data_path / 'val2014'), annotation=(self.annotations_path / 'instances_val2014.json'), transform_fn=self.test_transformer)
def train_dataloader(self):
return DataLoader(self.train, batch_size=self.train_batch_size, collate_fn=collate_fn, shuffle=True, num_workers=4, pin_memory=torch.cuda.is_available())
def val_dataloader(self):
return DataLoader(self.val, batch_size=self.val_batch_size, collate_fn=collate_fn, num_workers=4, pin_memory=torch.cuda.is_available())
def test_dataloader(self):
return DataLoader(self.test, batch_size=self.test_batch_size, collate_fn=collate_fn, num_workers=4, pin_memory=torch.cuda.is_available()) |
class SingleDummyVecEnv2(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
(obs, obs_critic, select_opponent, rews, dones, infos) = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
(obs[i], obs_critic[i], select_opponent[i]) = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return (np.array(obs), np.array(obs_critic), np.array(select_opponent), np.array(rews), np.array(dones), infos)
def reset(self):
results = [env.reset() for env in self.envs]
(obs, obs_critic, select_opponent) = zip(*results)
return (np.array(obs), np.array(obs_critic), np.array(select_opponent))
def close(self):
return |
def gumbel_softmax_sample(logits, temperature):
y = (logits + sample_gumbel(logits.shape, tens_type=type(logits.data)))
return F.softmax((y / temperature), dim=1) |
def register_Ns3QueueSizeChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::QueueSizeChecker const &', 'arg0')])
return |
def get_tpc():
tp = mct.target_platform
default_config = tp.OpQuantizationConfig(activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO, activation_n_bits=3, weights_n_bits=2, weights_per_channel_threshold=True, enable_weights_quantization=True, enable_activation_quantization=True, quantization_preserving=False, fixed_scale=1.0, fixed_zero_point=0, weights_multiplier_nbits=0, simd_size=None)
default_configuration_options = tp.QuantizationConfigOptions([default_config])
tp_model = tp.TargetPlatformModel(default_configuration_options)
with tp_model:
tp.OperatorsSet('NoQuantization', tp.get_default_quantization_config_options().clone_and_edit(enable_weights_quantization=False, enable_activation_quantization=False))
tpc = tp.TargetPlatformCapabilities(tp_model)
with tpc:
tp.OperationsSetToLayers('NoQuantization', [layers.Flatten, layers.Dropout])
return tpc |
(**njit_dict_no_parallel)
def macro_atom(activation_level_id, current_shell_id, opacity_state):
current_transition_type = 0
while (current_transition_type >= 0):
probability = 0.0
probability_event = np.random.random()
block_start = opacity_state.macro_block_references[activation_level_id]
block_end = opacity_state.macro_block_references[(activation_level_id + 1)]
for transition_id in range(block_start, block_end):
transition_probability = opacity_state.transition_probabilities[(transition_id, current_shell_id)]
probability += transition_probability
if (probability > probability_event):
activation_level_id = opacity_state.destination_level_id[transition_id]
current_transition_type = opacity_state.transition_type[transition_id]
break
else:
raise MacroAtomError('MacroAtom ran out of the block. This should not happen as the sum of probabilities is normalized to 1 and the probability_event should be less than 1')
return (opacity_state.transition_line_id[transition_id], current_transition_type) |
def get_camera_meshes(camera_list, radius=0.02):
verts_list = []
faces_list = []
color_list = []
rots = np.array([quaternion.as_rotation_matrix(camera_info['rotation']) for camera_info in camera_list])
lookat = np.array([0, 0, (- 1)])
vertical = np.array([0, 1, 0])
positions = np.array([camera_info['position'].flatten() for camera_info in camera_list])
lookats = (rots lookat.T)
verticals = (rots vertical.T)
predetermined_color = [[0.10196, 0.32157, 1.0], [1.0, 0.0667, 0.149], [(197 / 255), (181 / 255), (24 / 255)], [(73 / 255), (145 / 255), (115 / 255)], [(198 / 255), (120 / 255), (221 / 255)]][:len(camera_list)]
assert (len(predetermined_color) == len(camera_list))
for (idx, (position, lookat, vertical, color)) in enumerate(zip(positions, lookats, verticals, predetermined_color)):
cur_num_verts = 0
edges = get_cone_edges(position, lookat, vertical)
cam_verts = []
cam_inds = []
for k in range(len(edges)):
try:
(cyl_verts, cyl_ind) = create_cylinder_mesh(radius, edges[k][0], edges[k][1])
except:
import pdb
pdb.set_trace()
cyl_verts = [x for x in cyl_verts]
cyl_ind = [(x + cur_num_verts) for x in cyl_ind]
cur_num_verts += len(cyl_verts)
cam_verts.extend(cyl_verts)
cam_inds.extend(cyl_ind)
verts_list.append(torch.tensor(cam_verts, dtype=torch.float32))
faces_list.append(torch.tensor(cam_inds, dtype=torch.float32))
color_list.append(color)
color_tensor = torch.tensor(color_list, dtype=torch.float32).unsqueeze_(1)
tex = TexturesVertex(verts_features=color_tensor)
meshes = Meshes(verts=verts_list, faces=faces_list, textures=tex)
return meshes |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
update_data_root(cfg)
assert (args.eval or args.format_only), 'Please specify at least one operation (eval/format the results) with the argument "--eval", "--format-only"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = ({} if (args.eval_options is None) else args.eval_options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
for key in ['interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', 'rule']:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs)) |
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output |
def random_deletion(words, p):
if (len(words) == 1):
return words
new_words = []
for word in words:
r = random.uniform(0, 1)
if (r > p):
new_words.append(word)
if (len(new_words) == 0):
rand_int = random.randint(0, (len(words) - 1))
return [words[rand_int]]
return new_words |
def srwl_uti_write_data_cols(_file_path, _cols, _str_sep, _str_head=None, _i_col_start=0, _i_col_end=(- 1)):
f = open(_file_path, 'w')
if (_str_head is not None):
lenStrHead = len(_str_head)
if (lenStrHead > 0):
strHead = _str_head
if (_str_head[(lenStrHead - 1)] != '\n'):
strHead = (copy(_str_head) + '\n')
f.write(strHead)
if (_cols is None):
f.close()
return
nCols = len(_cols)
if (nCols <= 0):
f.close()
return
nLines = len(_cols[0])
for i in range(1, nCols):
newLen = len(_cols[i])
if (nLines < newLen):
nLines = newLen
strSep = '\t'
if (_str_sep is not None):
if (len(_str_sep) > 0):
strSep = _str_sep
strTot = ''
iColEndP1 = nCols
if ((_i_col_end >= 0) and (_i_col_end < nCols)):
iColEndP1 = (_i_col_end + 1)
iColEnd = (iColEndP1 - 1)
nLinesM1 = (nLines - 1)
for i in range(nLines):
curLine = ''
for j in range(_i_col_start, iColEndP1):
curElem = ' '
if (i < len(_cols[j])):
curElem = repr(_cols[j][i])
curLine += curElem
if (j < iColEnd):
curLine += strSep
if (i < nLinesM1):
curLine += '\n'
strTot += curLine
f.write(strTot)
f.close() |
def my_load(model, pretrained_dict):
current_dict = model.state_dict()
new_state_dict = OrderedDict()
for key in current_dict.keys():
if (key in pretrained_dict.keys()):
new_state_dict[key] = pretrained_dict[key]
elif ('encoder1' in key):
if (pretrained_dict[key.replace('encoder1', 'encoder')].shape == current_dict[key].shape):
new_state_dict[key] = pretrained_dict[key.replace('encoder1', 'encoder')]
else:
print('weight {} lost!'.format(key))
new_state_dict[key] = current_dict[key]
elif ('encoder2' in key):
if (pretrained_dict[key.replace('encoder2', 'encoder')].shape == current_dict[key].shape):
new_state_dict[key] = pretrained_dict[key.replace('encoder2', 'encoder')]
else:
print('weight {} lost!'.format(key))
new_state_dict[key] = current_dict[key]
else:
print('weight {} lost!'.format(key))
new_state_dict[key] = current_dict[key]
model.load_state_dict(new_state_dict)
return model |
class GenericObject(object):
def __init__(self, v):
self.v = v
def __add__(self, other):
return self
def __radd__(self, other):
return self
dtype = np.dtype('O') |
.skipif((not has_pytorch()), reason='Pytorch not installed.')
_utils.test(arch=[ti.cpu, ti.opengl])
def test_subscript():
a = ti.ndarray(ti.i32, shape=(10, 10))
def ndarray(x: ti.types.ndarray()):
b = x[(3, 1.1)]
with pytest.raises(ti.TaichiTypeError, match='indices must be integers'):
ndarray(a) |
_criterion('composite_loss')
class CompositeLoss(FairseqCriterion):
def add_args(parser):
parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True, help='underlying criterion to use for the composite loss')
def build_underlying_criterion(args, task):
saved_criterion = args.criterion
args.criterion = args.underlying_criterion
assert (saved_criterion != args.underlying_criterion)
underlying_criterion = task.build_criterion(args)
args.criterion = saved_criterion
return underlying_criterion
def build_criterion(cls, args, task):
underlying_criterion = CompositeLoss.build_underlying_criterion(args, task)
class FakeModel(nn.Module):
def __init__(self, model, net_out, target):
super().__init__()
self.model = model
self.net_out = net_out
self.target = target
def forward(self, **unused):
return self.net_out
def get_normalized_probs(self, net_output, log_probs, sample=None):
return self.model.get_normalized_probs(net_output, log_probs, sample=sample)
def get_targets(self, *unused):
return self.target
def decoder(self):
return self.model.decoder
class _CompositeLoss(FairseqCriterion):
def __init__(self, args, task, underlying_criterion):
super().__init__(args, task)
self.underlying_criterion = underlying_criterion
def forward(self, model, sample, reduce=True):
net_outputs = model(**sample['net_input'])
targets = sample['target']
bsz = targets[0].size(0)
loss = net_outputs[0][0].new((1 if reduce else bsz)).float().zero_()
sample_size = 0
logging_output = {}
for (o, t) in zip(net_outputs[0], targets):
m = FakeModel(model, (o, net_outputs[1]), t)
sample['target'] = t
(l, ss, logging_output) = self.underlying_criterion(m, sample, reduce)
loss += l
sample_size += ss
loss.div_(len(targets))
sample_size /= len(targets)
logging_output['loss'] = (utils.item(loss.data) if reduce else loss.data)
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
return underlying_criterion.__class__.aggregate_logging_outputs(logging_outputs)
return _CompositeLoss(args, task, underlying_criterion) |
def _init_dist_pytorch(backend, **kwargs):
torch.cuda.set_device(int(os.environ['LOCAL_RANK']))
dist.init_process_group(backend=backend, **kwargs) |
class CustomAbsoluteJointVelocity(JointVelocity):
def action(self, scene: Scene, action: np.ndarray):
if (np.random.random() > 0.5):
print('Skip action!')
return
super(CustomAbsoluteJointVelocity, self).action(scene, action) |
def convolve_with_waveform(func, waveform, times, fargs=None, fkwargs=None):
try:
t_nodes = waveform.time_nodes
except AttributeError:
raise TypeError(f'Unsupported waveform type of {type(waveform)}')
if (fargs is None):
fargs = []
if (fkwargs is None):
fkwargs = {}
n_int = (len(t_nodes) - 1)
out = np.zeros_like(times, dtype=float)
for (it, t) in enumerate(times):
def integral(quad_time, t):
wave_eval = waveform.eval_deriv((t - quad_time))
return (wave_eval * func(quad_time, *fargs, **fkwargs))
for i in range(n_int):
b = (t - t_nodes[i])
a = (t - t_nodes[(i + 1)])
a = np.maximum(a, 0.0)
b = np.maximum(b, 0.0)
(val, _) = integrate.quadrature(integral, a, b, tol=0.0, maxiter=500, args=t)
out[it] -= val
return out |
_processor('multi_sentence_bert_tokenizer')
class MultiSentenceBertTokenizer(BaseProcessor):
def __init__(self, config, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.fusion_strategy = config.get('fusion', 'concat')
self._probability = config.get('mask_probability', 0)
self.tokenizer = BertTokenizer(config)
def __call__(self, item: Dict[(str, Any)]):
texts = item['text']
if (not isinstance(texts, list)):
texts = [texts]
processed = []
for (idx, text) in enumerate(texts):
sample = Sample()
processed_text = self.tokenizer({'text': text})
sample.update(processed_text)
sample.segment_ids.fill_(idx)
processed.append(sample)
processed = SampleList(processed)
if (self.fusion_strategy == 'concat'):
processed.input_ids = processed.input_ids.view((- 1))
processed.input_mask = processed.input_mask.view((- 1))
processed.segment_ids = processed.segment_ids.view((- 1))
processed.lm_label_ids = processed.lm_label_ids.view((- 1))
return processed.to_dict() |
def descr_to_dtype(descr):
if isinstance(descr, str):
return numpy.dtype(descr)
elif isinstance(descr, tuple):
dt = descr_to_dtype(descr[0])
return numpy.dtype((dt, descr[1]))
fields = []
offset = 0
for field in descr:
if (len(field) == 2):
(name, descr_str) = field
dt = descr_to_dtype(descr_str)
else:
(name, descr_str, shape) = field
dt = numpy.dtype((descr_to_dtype(descr_str), shape))
is_pad = ((name == '') and (dt.type is numpy.void) and (dt.names is None))
if (not is_pad):
fields.append((name, dt, offset))
offset += dt.itemsize
(names, formats, offsets) = zip(*fields)
nametups = ((n if isinstance(n, tuple) else (None, n)) for n in names)
(titles, names) = zip(*nametups)
return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, 'offsets': offsets, 'itemsize': offset}) |
class UniformMutation(Mutation):
def __init__(self, tokenizer=None, mlm_obj=None, safe_mut=False):
self.tokenizer = tokenizer
self.mlm_obj = mlm_obj
self.safe_mut = safe_mut
def _do(self, problem, x, **kwargs):
query_batches = problem.x_to_query_batches(x)
(batch_shape, num_vars) = (query_batches.shape[:(- 1)], query_batches.shape[(- 1)])
flat_queries = query_batches.reshape((- 1), num_vars)
num_samples = flat_queries.shape[0]
x0 = flat_queries[(..., 0)]
seqs = [problem.candidate_pool[i].mutant_residue_seq for i in x0]
x1 = np.random.randint(problem.xl[1], problem.xu[1], num_samples)
x1 = np.array([(idx % len(seq)) for (idx, seq) in zip(x1, seqs)])
if ((self.mlm_obj is None) and (not self.safe_mut)):
x2 = np.random.randint(0, len(self.tokenizer.sampling_vocab), num_samples)
elif self.safe_mut:
x2 = safe_vocab_mutation(self.tokenizer, problem, x0, x1)
else:
x2 = get_mlm_mutation(self.mlm_obj, problem, x0, x1)
x3 = np.random.randint(0, len(problem.op_types), num_samples)
new_queries = np.stack([x0, x1, x2, x3], axis=(- 1)).reshape(*batch_shape, (- 1))
new_x = problem.query_batches_to_x(new_queries)
return new_x |
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'src')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
os.environ['CC'] = 'g++'
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if (torch.cuda.is_available() and (CUDA_HOME is not None)):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
else:
pass
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('_ext', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)]
return ext_modules |
def _down_score(level: Array, vul: Array, call_x: Array, call_xx: Array, trick: Array) -> Array:
_DOWN = jnp.array([(- 50), (- 100), (- 150), (- 200), (- 250), (- 300), (- 350), (- 400), (- 450), (- 500), (- 550), (- 600), (- 650)], dtype=jnp.float32)
_DOWN_VUL = jnp.array([(- 100), (- 200), (- 300), (- 400), (- 500), (- 600), (- 700), (- 800), (- 900), (- 1000), (- 1100), (- 1200), (- 1300)], dtype=jnp.float32)
_DOWN_X = jnp.array([(- 100), (- 300), (- 500), (- 800), (- 1100), (- 1400), (- 1700), (- 2000), (- 2300), (- 2600), (- 2900), (- 3200), (- 3500)], dtype=jnp.float32)
_DOWN_X_VUL = jnp.array([(- 200), (- 500), (- 800), (- 1100), (- 1400), (- 1700), (- 2000), (- 2300), (- 2600), (- 2900), (- 3200), (- 3500), (- 3800)], dtype=jnp.float32)
_DOWN_XX = jnp.array([(- 200), (- 600), (- 1000), (- 1600), (- 2200), (- 2800), (- 3400), (- 4000), (- 4600), (- 5200), (- 5800), (- 6400), (- 7000)], dtype=jnp.float32)
_DOWN_XX_VUL = jnp.array([(- 400), (- 1000), (- 1600), (- 2200), (- 2800), (- 3400), (- 4000), (- 4600), (- 5200), (- 5800), (- 6400), (- 7000), (- 7600)], dtype=jnp.float32)
under_trick = ((level + 6) - trick)
down = jax.lax.cond(vul, (lambda : _DOWN_VUL[(under_trick - 1)]), (lambda : _DOWN[(under_trick - 1)]))
down_x = jax.lax.cond(vul, (lambda : _DOWN_X_VUL[(under_trick - 1)]), (lambda : _DOWN_X[(under_trick - 1)]))
down_xx = jax.lax.cond(vul, (lambda : _DOWN_XX_VUL[(under_trick - 1)]), (lambda : _DOWN_XX[(under_trick - 1)]))
return jax.lax.cond(call_xx, (lambda : down_xx), (lambda : jax.lax.cond(call_x, (lambda : down_x), (lambda : down)))) |
def test_range_stmt_non_interactive_wrong_commit(group):
x = Secret(value=14)
randomizer = Secret(value=group.order().random())
(g, h) = make_generators(2, group)
lo = 7
hi = 15
com = ((x * g) + (randomizer * h))
comval = (com.eval() + g)
stmt = RangeStmt(comval, g, h, lo, hi, x, randomizer)
tr = stmt.prove()
with pytest.raises(Exception):
assert (not stmt.verify(tr)) |
def find_index(input_sequence, tokens):
for i in range(len(input_sequence)):
found = True
j = 0
while (j < len(tokens)):
if (input_sequence[(i + j)] == tokens[j]):
j += 1
else:
found = False
break
if found:
return i
return (- 1) |
class QuantumMoebiusAlgebra(Parent, UniqueRepresentation):
def __init__(self, L, q=None):
if (not L.is_lattice()):
raise ValueError('L must be a lattice')
if (q is None):
q = LaurentPolynomialRing(ZZ, 'q').gen()
self._q = q
R = q.parent()
cat = Algebras(R).WithBasis()
if (L in FiniteEnumeratedSets()):
cat = cat.Commutative().FiniteDimensional()
self._lattice = L
self._category = cat
Parent.__init__(self, base=R, category=self._category.WithRealizations())
def _repr_(self):
txt = 'Quantum Moebius algebra of {} with q={} over {}'
return txt.format(self._lattice, self._q, self.base_ring())
def a_realization(self):
return self.E()
def lattice(self):
return self._lattice
class E(BasisAbstract):
def __init__(self, M, prefix='E'):
self._basis_name = 'natural'
CombinatorialFreeModule.__init__(self, M.base_ring(), tuple(M._lattice), prefix=prefix, category=MoebiusAlgebraBases(M))
def product_on_basis(self, x, y):
L = self.realization_of()._lattice
q = self.realization_of()._q
moebius = L.moebius_function
rank = L.rank_function()
R = L.rank()
j = L.join(x, y)
return self.sum_of_terms(((z, (moebius(a, z) * (q ** (R - rank(a))))) for z in L.order_filter([j]) for a in L.closed_interval(j, z)))
_method
def one(self):
L = self.realization_of()._lattice
q = self.realization_of()._q
moebius = L.moebius_function
rank = L.rank_function()
R = L.rank()
return self.sum_of_terms(((x, (moebius(y, x) * (q ** (rank(y) - R)))) for x in L for y in L.order_ideal([x])))
natural = E
class C(BasisAbstract):
def __init__(self, M, prefix='C'):
self._basis_name = 'characteristic'
CombinatorialFreeModule.__init__(self, M.base_ring(), tuple(M._lattice), prefix=prefix, category=MoebiusAlgebraBases(M))
E = M.E()
phi = self.module_morphism(self._to_natural_basis, codomain=E, category=self.category(), triangular='lower', unitriangular=True, key=M._lattice._element_to_vertex)
phi.register_as_coercion()
(~ phi).register_as_coercion()
_method
def _to_natural_basis(self, x):
M = self.realization_of()
N = M.natural()
q = M._q
L = M._lattice
def poly(x, y):
return L.subposet(L.closed_interval(x, y)).characteristic_polynomial()
return N.sum_of_terms(((y, poly(x, y)(q=q)) for y in L.order_filter([x])))
characteristic_basis = C
class KL(BasisAbstract):
def __init__(self, M, prefix='KL'):
self._basis_name = 'Kazhdan-Lusztig'
CombinatorialFreeModule.__init__(self, M.base_ring(), tuple(M._lattice), prefix=prefix, category=MoebiusAlgebraBases(M))
E = M.E()
phi = self.module_morphism(self._to_natural_basis, codomain=E, category=self.category(), triangular='lower', unitriangular=True, key=M._lattice._element_to_vertex)
phi.register_as_coercion()
(~ phi).register_as_coercion()
_method
def _to_natural_basis(self, x):
M = self.realization_of()
L = M._lattice
E = M.E()
q = M._q
rank = L.rank_function()
return E.sum_of_terms(((y, ((q ** (rank(y) - rank(x))) * L.kazhdan_lusztig_polynomial(x, y)(q=(q ** (- 2))))) for y in L.order_filter([x])))
kazhdan_lusztig = KL |
def convert_mesh_to_numpy(mesh_file, npoints=8192):
print('Loading point cloud.')
mesh = trimesh.load_mesh(mesh_file, force='mesh')
mesh = as_mesh(mesh)
vertices = mesh.vertices
num_points = len(vertices)
if (num_points < npoints):
repetitions = int(np.ceil((npoints / num_points)))
vertices = np.repeat(vertices, repetitions, axis=0)[:npoints]
elif (num_points > npoints):
vertices = mesh.vertices
print('Point cloud loaded..')
return vertices |
def pairwise_distance(query_features, gallery_features, query=None, gallery=None):
if ((query is None) and (gallery is None)):
n = len(features)
x = torch.cat(list(features.values()))
x = x.view(n, (- 1))
dist = (torch.pow(x, 2).sum(1) * 2)
dist = (dist.expand(n, n) - (2 * torch.mm(x, x.t())))
return dist
x = torch.cat([query_features[f].unsqueeze(0) for (f, _, _) in query], 0)
y = torch.cat([gallery_features[f].unsqueeze(0) for (f, _, _) in gallery], 0)
(m, n) = (x.size(0), y.size(0))
x = x.view(m, (- 1))
y = y.view(n, (- 1))
dist = (torch.pow(x, 2).sum(1).unsqueeze(1).expand(m, n) + torch.pow(y, 2).sum(1).unsqueeze(1).expand(n, m).t())
dist.addmm_(1, (- 2), x, y.t())
return dist |
class Button():
def __init__(self, name: str, pybullet_server_id=0):
self.pid = pybullet_server_id
self.btn = p.addUserDebugParameter((' %s ' % name), 1, 0, 0, pybullet_server_id)
self.n = 0
def is_click(self) -> bool:
c = p.readUserDebugParameter(self.btn, self.pid)
r = (c != self.n)
self.n = c
return r
def num_clicks(self) -> int:
return int(p.readUserDebugParameter(self.btn, self.pid)) |
class Flatten(Module):
def __init__(self):
Module.__init__(self)
self.inputshape = []
def backward(self, DY):
return np.reshape(DY, self.inputshape)
def forward(self, X, *args, **kwargs):
self.inputshape = X.shape
return np.reshape(X, [self.inputshape[0], numpy.prod(self.inputshape[1:])])
def lrp(self, R, *args, **kwargs):
return np.reshape(R, self.inputshape)
def to_cupy(self):
if imp.find_spec('cupy'):
global np
np = cupy
def to_numpy(self):
global np
np = numpy |
class BaseStream(object):
def __init__(self, mode='w'):
self._i = 0
self._count = (- 1)
if isinstance(mode, int):
self._count = mode
mode = 'r'
elif (mode == 'w'):
self._count = 0
assert (mode in ('r', 'w'))
self._mode = mode
self._f = None
self._start_pos = 0
def _activate(self, file, encode_func, decode_func):
if (self._f is not None):
raise IOError('Stream object cannot be activated twice?')
self._f = file
self._start_pos = self._f.tell()
self._encode = encode_func
self._decode = decode_func
def mode(self):
return self._mode |
def log_txt_as_img(wh, xc, size=10):
b = len(xc)
txts = list()
for bi in range(b):
txt = Image.new('RGB', wh, color='white')
draw = ImageDraw.Draw(txt)
font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)
nc = int((40 * (wh[0] / 256)))
lines = '\n'.join((xc[bi][start:(start + nc)] for start in range(0, len(xc[bi]), nc)))
try:
draw.text((0, 0), lines, fill='black', font=font)
except UnicodeEncodeError:
print('Cant encode string for logging. Skipping.')
txt = ((np.array(txt).transpose(2, 0, 1) / 127.5) - 1.0)
txts.append(txt)
txts = np.stack(txts)
txts = torch.tensor(txts)
return txts |
class SqueezeBertConfig(PretrainedConfig):
pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = 'squeezebert'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, embedding_size=768, q_groups=4, k_groups=4, v_groups=4, post_attention_groups=1, intermediate_groups=4, output_groups=4, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_size = embedding_size
self.q_groups = q_groups
self.k_groups = k_groups
self.v_groups = v_groups
self.post_attention_groups = post_attention_groups
self.intermediate_groups = intermediate_groups
self.output_groups = output_groups |
class values_vec_t(object):
__slots__ = ['x', 'y', 'rot', 'rot_vec', 'scalar_vec', 'list_of_lists']
def __init__(self, x=0.0, y=0.0, rot=None, rot_vec=None, scalar_vec=None, list_of_lists=None, _skip_initialize=False):
if _skip_initialize:
return
self.x = x
self.y = y
self.rot = (Vector4d._default() if (rot is None) else rot)
self.rot_vec = ([Vector4d._default() for dim0 in range(3)] if (rot_vec is None) else rot_vec)
self.scalar_vec = ([0.0 for dim0 in range(3)] if (scalar_vec is None) else scalar_vec)
self.list_of_lists = ([[Vector4d._default() for dim1 in range(3)] for dim0 in range(3)] if (list_of_lists is None) else list_of_lists)
def from_all_fields(x, y, rot, rot_vec, scalar_vec, list_of_lists):
return values_vec_t(x=x, y=y, rot=rot, rot_vec=rot_vec, scalar_vec=scalar_vec, list_of_lists=list_of_lists)
def _skytype_meta():
return dict(type='struct', package='codegen_cpp_test', name='values_vec_t')
def _default(cls):
return cls()
def __repr__(self):
return 'values_vec_t({})'.format(', '.join(('{}={}'.format(name, repr(getattr(self, name))) for name in self.__slots__)))
def __eq__(self, other):
if (not isinstance(other, values_vec_t)):
return NotImplemented
return ((self.x == other.x) and (self.y == other.y) and (self.rot == other.rot) and (self.rot_vec == other.rot_vec) and (self.scalar_vec == other.scalar_vec) and (self.list_of_lists == other.list_of_lists))
__hash__ = None
def encode(self):
buf = BytesIO()
buf.write(values_vec_t._get_packed_fingerprint())
self._encode_one(buf)
return buf.getvalue()
def _encode_one(self, buf):
buf.write(values_vec_t._CACHED_STRUCT_0.pack(self.x, self.y))
if hasattr(self.rot, '_get_packed_fingerprint'):
assert (self.rot._get_packed_fingerprint() == Vector4d._get_packed_fingerprint())
else:
assert (self.rot._get_hash_recursive([]) == Vector4d._get_hash_recursive([]))
self.rot._encode_one(buf)
for i0 in range(3):
if hasattr(self.rot_vec[i0], '_get_packed_fingerprint'):
assert (self.rot_vec[i0]._get_packed_fingerprint() == Vector4d._get_packed_fingerprint())
else:
assert (self.rot_vec[i0]._get_hash_recursive([]) == Vector4d._get_hash_recursive([]))
self.rot_vec[i0]._encode_one(buf)
buf.write(values_vec_t._CACHED_STRUCT_1.pack(*self.scalar_vec[:3]))
for i0 in range(3):
for i1 in range(3):
if hasattr(self.list_of_lists[i0][i1], '_get_packed_fingerprint'):
assert (self.list_of_lists[i0][i1]._get_packed_fingerprint() == Vector4d._get_packed_fingerprint())
else:
assert (self.list_of_lists[i0][i1]._get_hash_recursive([]) == Vector4d._get_hash_recursive([]))
self.list_of_lists[i0][i1]._encode_one(buf)
def decode(data):
if hasattr(data, 'read'):
buf = T.cast(T.BinaryIO, data)
else:
buf = BytesIO(T.cast(bytes, data))
if (buf.read(8) != values_vec_t._get_packed_fingerprint()):
raise ValueError('Decode error')
return values_vec_t._decode_one(buf)
def _decode_one(buf):
self = values_vec_t(_skip_initialize=True)
(self.x, self.y) = values_vec_t._CACHED_STRUCT_0.unpack(buf.read(16))
self.rot = Vector4d._decode_one(buf)
self.rot_vec = []
for i0 in range(3):
self.rot_vec.append(Vector4d._decode_one(buf))
self.scalar_vec = list(values_vec_t._CACHED_STRUCT_1.unpack(buf.read(24)))
self.list_of_lists = []
for i0 in range(3):
self.list_of_lists.append([])
for i1 in range(3):
self.list_of_lists[i0].append(Vector4d._decode_one(buf))
return self
def _get_hash_recursive(parents):
if (values_vec_t in parents):
return 0
newparents = (parents + [values_vec_t])
tmphash = (((( + Vector4d._get_hash_recursive(newparents)) + Vector4d._get_hash_recursive(newparents)) + Vector4d._get_hash_recursive(newparents)) & )
tmphash = ((((tmphash << 1) & ) + (tmphash >> 63)) & )
return tmphash
_packed_fingerprint = None
def _get_packed_fingerprint():
if (values_vec_t._packed_fingerprint is None):
values_vec_t._packed_fingerprint = struct.pack('>Q', values_vec_t._get_hash_recursive([]))
return values_vec_t._packed_fingerprint
def deepcopy(self, **kwargs):
result = copy.deepcopy(self)
for key in kwargs:
if (not hasattr(result, key)):
raise KeyError(('Type values_vec_t does not have attribute: ' + str(key)))
setattr(result, key, kwargs[key])
return result
_CACHED_STRUCT_0 = struct.Struct('>dd')
_CACHED_STRUCT_1 = struct.Struct('>3d') |
def get_KarelDSLSyntax(dsl_type='prob', seed=None):
if (dsl_type == 'prob'):
return KarelDSLProbSyntax(seed=seed)
else:
raise ValueError('Undefined dsl syntax type') |
def get_doctime_class(c, doc_ts, threshold=((24 * 60) * 60)):
if (not doc_ts):
return None
sent_ts = get_sentence_markup(c.get_parent(), 'DATETIME', tagged_sentences)
if (not sent_ts):
return None
sent_ts = [d for d in list(zip(*sent_ts))[(- 1)] if d]
if (not sent_ts):
return None
sent_ts = max(sent_ts)
doc_ts = doc_ts[(- 1)]
if (doc_ts == sent_ts):
return DOCTIME_OVERLAP
tdelta = (doc_ts - sent_ts).total_seconds()
return (DOCTIME_BEFORE if (tdelta > threshold) else DOCTIME_OVERLAP) |
def create_GC_metadata(raw_data_path, meta_data_path):
GC_IMAGE_WIDTH = 1920
GC_IMAGE_HEIGHT = 1080
dir_list = sorted(os.listdir(raw_data_path))
p_num = len(dir_list)
p_data_list = [{} for _ in range((p_num + 1))]
max_t = 0
for dir_name in dir_list:
person_trajectory_txt_path = os.path.join(raw_data_path, dir_name)
pid = int(dir_name.replace('.txt', ''))
with open(person_trajectory_txt_path, 'r') as f:
trajectory_list = f.read().split()
for i in range((len(trajectory_list) // 3)):
x = (int(trajectory_list[(3 * i)]) / GC_IMAGE_WIDTH)
y = (int(trajectory_list[((3 * i) + 1)]) / GC_IMAGE_HEIGHT)
t = (int(trajectory_list[((3 * i) + 2)]) // 20)
max_t = max(max_t, t)
p_data_list[pid][t] = (x, y)
f_data_list = [[] for _ in range((max_t + 1))]
for (pid, p_data) in enumerate(p_data_list):
for t in p_data.keys():
f_data_list[t].append(pid)
print('pedestrian_data_list size: ', len(p_data_list))
print('frame_data_list size: ', len(f_data_list))
with open(meta_data_path, 'w') as f:
json.dump({'frame_data_list': f_data_list, 'pedestrian_data_list': p_data_list}, f)
print(('create %s successfully!' % meta_data_path)) |
def parse_overrides(options: list):
config = {}
for position in range(0, len(options), 2):
key: str = options[position]
assert key.startswith('--')
key = key.strip('--')
value_str: str = options[(position + 1)]
(key, value_str) = (key.strip(), value_str.strip())
remaining = key.split('.')
try:
value = eval(value_str)
except Exception as e:
if (('newdict' in value_str) or ('Container' in value_str)):
raise
value = value_str
logger.debug(f'{key} = {value}')
target_config = config
for (i, field_name) in enumerate(remaining):
if (i == (len(remaining) - 1)):
target_config[field_name] = value
else:
target_config.setdefault(field_name, {})
target_config = target_config[field_name]
return config |
def test_integer():
with pytest.raises(ValueError):
ak.operations.from_json(' [ 1 ,2 ,3.0, 4, 5] \n ', schema={'type': 'array', 'items': {'type': 'integer'}})
result = ak.operations.from_json(' [ 1 ,2 ,3, 4, 5] \n ', schema={'type': 'array', 'items': {'type': 'integer'}})
assert (result.to_list() == [1, 2, 3, 4, 5])
assert (str(result.type) == '5 * int64')
result = ak.operations.from_json((' [ 1 ,2 ,3, 4, 5] \n ' * 2), schema={'type': 'array', 'items': {'type': 'integer'}}, line_delimited=True)
assert (result.to_list() == ([1, 2, 3, 4, 5] * 2))
assert (str(result.type) == '10 * int64')
result = ak.operations.from_json('[ ]', schema={'type': 'array', 'items': {'type': 'integer'}})
assert (result.to_list() == [])
assert (str(result.type) == '0 * int64') |
def builder_inited_handler(app):
subprocess.run(['./cp_origin_docs.sh'])
subprocess.run(['./merge_docs.sh'])
subprocess.run(['./stats.py']) |
def augment_many_model_functions_with_bundled_inputs(model: torch.jit.ScriptModule, inputs: Dict[(Callable, Optional[Sequence[Tuple[(Any, ...)]]])], _receive_inflate_expr: Optional[List[str]]=None, info: Optional[Dict[(Callable, List[str])]]=None, skip_size_check=False) -> None:
if (not isinstance(model, torch.jit.ScriptModule)):
raise Exception('Only ScriptModule is supported.')
if (not inputs):
raise Exception('Please provide inputs for at least 1 function')
if (hasattr(model, 'get_all_bundled_inputs') or hasattr(model, 'get_bundled_inputs_functions_and_info')):
raise Exception("Models can only be augmented with bundled inputs once. This Model seems to have already been augmented with bundled inputs. Please start afresh with one that doesn't have bundled inputs.")
get_bundled_inputs_functions_and_info_template = ''
for (function, input_list) in inputs.items():
function_name = function.__name__
if ((input_list is not None) and (not isinstance(input_list, Sequence))):
raise TypeError('Error inputs for function {0} is not a Sequence'.format(function_name))
function_arg_types = [arg.type for arg in function.schema.arguments[1:]]
deflated_inputs_type: ListType = ListType(TupleType(function_arg_types))
model._c._register_attribute('_bundled_inputs_deflated_{name}'.format(name=function_name), deflated_inputs_type, [])
if hasattr(model, ('_generate_bundled_inputs_for_' + function_name)):
if (input_list is not None):
raise Exception('inputs[{name}] is not None, but _generate_bundled_inputs_for_{name} is already defined'.format(name=function_name))
elif ((input_list is None) or (len(input_list) == 0)):
raise Exception('inputs for {name} must be specified if _generate_bundled_inputs_for_{name} is not already defined'.format(name=function_name))
else:
deflated_inputs = []
parts = []
for (inp_idx, args) in enumerate(input_list):
if ((not isinstance(args, Tuple)) and (not isinstance(args, List))):
raise TypeError('Error bundled input for function {0} idx: {1} is not a Tuple or a List'.format(function_name, inp_idx))
deflated_args = []
parts.append('(')
for (arg_idx, arg) in enumerate(args):
inflate_helper_fn_name = _get_inflate_helper_fn_name(arg_idx, inp_idx, function_name)
(deflated, inflater, helper_definition) = _inflate_expr(arg, f'deflated[{inp_idx}][{arg_idx}]', inflate_helper_fn_name, skip_size_check=skip_size_check)
deflated_args.append(deflated)
parts.append(f' {inflater},')
if helper_definition:
model.define(textwrap.dedent(helper_definition))
deflated_inputs.append(tuple(deflated_args))
parts.append('),')
parts.append('')
expr = '\n'.join(parts)
if (_receive_inflate_expr is not None):
_receive_inflate_expr.append(expr)
setattr(model, '_bundled_inputs_deflated_{name}'.format(name=function_name), deflated_inputs)
definition = textwrap.dedent('\n def _generate_bundled_inputs_for_{name}(self):\n deflated = self._bundled_inputs_deflated_{name}\n return [\n {expr}\n ]\n ').format(expr=expr, name=function_name)
model.define(definition)
model.define(textwrap.dedent('\n def get_all_bundled_inputs_for_{name}(self):\n all_inputs = self._generate_bundled_inputs_for_{name}()\n assert all_inputs is not None\n return all_inputs\n ').format(name=function_name))
inputs_info = (repr(info[function]) if (info and (function in info)) else '[]')
get_bundled_inputs_functions_and_info_template += "\n temp_dict : Dict[str,List[str]] = {{}}\n info: List[str] = {info}\n\n temp_dict['info'] = info\n temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{name}']\n all_inputs['{name}'] = temp_dict\n ".format(name=function_name, info=inputs_info)
if (function_name == 'forward'):
model.define(textwrap.dedent('\n def get_all_bundled_inputs(self):\n return self.get_all_bundled_inputs_for_forward()\n '))
model.define(textwrap.dedent('\n def get_num_bundled_inputs(self):\n return len(self.get_all_bundled_inputs_for_forward())\n '))
model.define(textwrap.dedent('\n def get_bundled_inputs_functions_and_info(self):\n all_inputs : Dict[str, Dict[str,List[str]]] = {{}}\n {template}\n return all_inputs\n '.format(template=get_bundled_inputs_functions_and_info_template))) |
class Vertex():
def __init__(self, vid, cid, nodes, k_in=0):
self._vid = vid
self._cid = cid
self._nodes = nodes
self._kin = k_in |
class DemoConfig():
num_inducing = 7
inner_layer_qsqrt_factor = 0.001
between_layer_noise_variance = 0.001
likelihood_noise_variance = 0.01
whiten = True |
def summary(results):
report = {}
for (k, v) in results.items():
if ((k != 'steps') and (k != 'probs')):
boots_series = sns.algorithms.bootstrap(results[k], func=np.mean, n_boot=1000)
report[k] = np.mean(results[k])
report[f'{k}_ci'] = np.max(np.abs((sns.utils.ci(boots_series, 95) - report[k])))
else:
array = np.array([k.mean().cpu().detach().numpy() for k in results['steps']])
boots_series = sns.algorithms.bootstrap(array, func=np.mean, n_boot=1000)
report[k] = np.mean(array)
report[f'{k}_ci'] = np.max(np.abs((sns.utils.ci(boots_series, 95) - report[k])))
return report |
.parametrize('seed', [313])
.parametrize('op', ['+', '-', '*', '/', '**'])
.parametrize('x_var, y_var', [(False, False), (False, True), (True, False)])
.parametrize('shape', [(2, 3, 4), (0,)])
def test_ndarray_arithmetic_ops2(seed, op, x_var, y_var, shape):
rng = np.random.RandomState(seed)
vx_data = rng.randn(*shape).astype(np.float32)
vy_data = rng.randn(*shape).astype(np.float32)
if ((op == '**') and (vx_data.size > 0)):
vx_data += ((- vx_data.min()) + 1.0)
if x_var:
vx = nn.Variable.from_numpy_array(vx_data)
else:
vx = nn.NdArray.from_numpy_array(vx_data)
if y_var:
vy = nn.Variable.from_numpy_array(vy_data)
else:
vy = nn.NdArray.from_numpy_array(vy_data)
vz = eval('vx {0} vy'.format(op))
ref_z = eval('vx_data {0} vy_data'.format(op))
assert_allclose(ref_z, vz.data)
if x_var:
return
vx_bak = vx
if (op == '+'):
vx += vy
elif (op == '-'):
vx -= vy
elif (op == '*'):
vx *= vy
elif (op == '/'):
vx /= vy
elif (op == '**'):
vx **= vy
assert_allclose(vx.data, vz.data)
if ((op == '*') or (op == '/') or (op == '**')):
return
assert (vx is vx_bak) |
class DQN(nn.Module):
def __init__(self, c: int, h: int, w: int, action_shape: Sequence[int], device: Union[(str, int, torch.device)]='cpu', features_only: bool=False) -> None:
super().__init__()
self.device = device
self.net = nn.Sequential(nn.Conv2d(c, 32, kernel_size=8, stride=4), nn.ReLU(inplace=True), nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1), nn.ReLU(inplace=True), nn.Flatten())
with torch.no_grad():
self.output_dim = np.prod(self.net(torch.zeros(1, c, h, w)).shape[1:])
if (not features_only):
self.net = nn.Sequential(self.net, nn.Linear(self.output_dim, 512), nn.ReLU(inplace=True), nn.Linear(512, np.prod(action_shape)))
self.output_dim = np.prod(action_shape)
_type_check
def forward(self, x: Union[(np.ndarray, torch.Tensor)], state: Optional[Any]=None, info: Optional[Dict[(str, Any)]]=None) -> Tuple[(torch.Tensor, Any)]:
x = torch.as_tensor(x, device=self.device, dtype=torch.float32)
return (self.net(x), state) |
('warnings.warn')
('sdv.iter_entry_points')
def test__find_addons_bad_addon(entry_points_mock, warning_mock):
def entry_point_error():
raise ValueError()
bad_entry_point = Mock()
bad_entry_point.name = 'bad_entry_point'
bad_entry_point.module_name = 'bad_module'
bad_entry_point.load.side_effect = entry_point_error
entry_points_mock.return_value = [bad_entry_point]
msg = 'Failed to load "bad_entry_point" from "bad_module".'
_find_addons()
entry_points_mock.assert_called_once_with(group='sdv_modules')
warning_mock.assert_called_once_with(msg) |
class TestComposed(TestCase):
def setUp(self):
self.n = 100
self.n_atoms = np.random.randint(1, high=10, size=self.n)
r = [(5 * np.random.random((na, 3))) for na in self.n_atoms]
self.r = np.array(r, dtype=object)
self.z = np.array([np.random.randint(1, high=3, size=na) for na in self.n_atoms], dtype=object)
self.data = Dataset(z=self.z, r=self.r)
self.tmpdir = ((pathlib.Path(__file__) / '..').resolve() / 'tmp_test_composed')
self.tmpdir.mkdir(exist_ok=True)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_mbtr_1(self):
mbtr = MBTR1(start=0, stop=4, num=5, geomf='count', weightf='unity', broadening=0.001, eindexf='noreversals', aindexf='noreversals', elems=[1, 2, 3], flatten=True)
composed = Composed(mbtr, mbtr.get_config())
computed = composed(self.data).array
plain_computed = mbtr(self.data).array
np.testing.assert_array_equal(computed, np.concatenate((plain_computed, plain_computed), axis=1))
def test_sf(self):
with unittest.mock.patch.dict('os.environ', {'CML_SCRATCH': str(self.tmpdir)}):
from cmlkit.representation.sf import SymmetryFunctions
sf = SymmetryFunctions(elems=[1, 2, 3], universal=[{'rad_centered': {'n': 10, 'cutoff': 6.0}}], context={'cleanup': False})
composed = Composed(sf, sf.get_config())
computed = composed(self.data).ragged
single_manual = sf(self.data).ragged
for i in range(self.data.n):
np.testing.assert_array_equal(computed[i], np.concatenate([single_manual[i], single_manual[i]], axis=1)) |
_method
class PeriodLattice_ell(PeriodLattice):
def __init__(self, E, embedding=None):
self.E = E
K = E.base_field()
if (embedding is None):
embs = K.embeddings(AA)
real = (len(embs) > 0)
if (not real):
embs = K.embeddings(QQbar)
embedding = embs[0]
else:
embedding = refine_embedding(embedding, Infinity)
real = embedding(K.gen()).imag().is_zero()
self.embedding = embedding
self.real_flag = 0
if real:
self.real_flag = (+ 1)
if (embedding(E.discriminant()) < 0):
self.real_flag = (- 1)
self.Ebar = self.E.change_ring(self.embedding)
self.f2 = self.Ebar.two_division_polynomial()
if (self.real_flag == 1):
self._ei = self.f2.roots(AA, multiplicities=False)
self._ei.sort()
(e1, e2, e3) = self._ei
elif (self.real_flag == (- 1)):
self._ei = self.f2.roots(QQbar, multiplicities=False)
self._ei = sorted(self._ei, key=(lambda z: z.imag()))
(e1, e3, e2) = self._ei
e3 = AA(e3)
self._ei = [e1, e2, e3]
else:
self._ei = self.f2.roots(QQbar, multiplicities=False)
(e1, e2, e3) = self._ei
self._abc = ((e3 - e1).sqrt(), (e3 - e2).sqrt(), (e2 - e1).sqrt())
PeriodLattice.__init__(self, base_ring=ZZ, rank=2, degree=1, sparse=False)
def __richcmp__(self, other, op):
if (not isinstance(other, PeriodLattice_ell)):
return NotImplemented
lx = self.E
rx = other.E
if (lx != rx):
return richcmp_not_equal(lx, rx, op)
a = self.E.base_field().gen()
return richcmp(self.embedding(a), other.embedding(a), op)
def __repr__(self):
if (self.E.base_field() is QQ):
return ('Period lattice associated to %s' % self.E)
return ('Period lattice associated to %s with respect to the embedding %s' % (self.E, self.embedding))
def __call__(self, P, prec=None):
return self.elliptic_logarithm(P, prec)
_method
def basis(self, prec=None, algorithm='sage'):
if self.is_real():
return self._compute_periods_real(prec=prec, algorithm=algorithm)
else:
return self._compute_periods_complex(prec=prec)
_method
def gens(self, prec=None, algorithm='sage'):
return tuple(self.basis(prec=prec, algorithm=algorithm))
_method
def normalised_basis(self, prec=None, algorithm='sage'):
(w1, w2) = self.basis(prec=prec, algorithm=algorithm)
(periods, _) = normalise_periods(w1, w2)
return periods
_method
def tau(self, prec=None, algorithm='sage'):
(w1, w2) = self.normalised_basis(prec=prec, algorithm=algorithm)
return (w1 / w2)
_method
def _compute_periods_real(self, prec=None, algorithm='sage'):
if (prec is None):
prec = 53
R = RealField(prec)
C = ComplexField(prec)
if (algorithm == 'pari'):
ainvs = self.E.a_invariants()
if (self.E.base_field() is not QQ):
ainvs = [C(self.embedding(ai)).real() for ai in ainvs]
E_pari = pari.ellinit(ainvs, precision=prec)
(w1, w2) = E_pari.omega()
return (R(w1), C(w2))
if (algorithm != 'sage'):
raise ValueError("invalid value of 'algorithm' parameter")
pi = R.pi()
if (self.real_flag == 1):
(a, b, c) = (R(x) for x in self._abc)
w1 = R((pi / a.agm(b)))
w2 = C(0, (pi / a.agm(c)))
else:
a = C(self._abc[0])
(x, y, r) = (a.real().abs(), a.imag().abs(), a.abs())
w1 = R((pi / r.agm(x)))
w2 = R((pi / r.agm(y)))
w2 = (C(w1, w2) / 2)
return (w1, w2)
_method
def _compute_periods_complex(self, prec=None, normalise=True):
if (prec is None):
prec = RealField().precision()
C = ComplexField(prec)
pi = C.pi()
(a, b, c) = (C(x) for x in self._abc)
if ((a + b).abs() < (a - b).abs()):
b = (- b)
if ((a + c).abs() < (a - c).abs()):
c = (- c)
w1 = (pi / a.agm(b))
w2 = ((pi * C.gen()) / a.agm(c))
if ((w1 / w2).imag() < 0):
w2 = (- w2)
if normalise:
(w1w2, mat) = normalise_periods(w1, w2)
return w1w2
return (w1, w2)
def is_real(self):
return (self.real_flag != 0)
def is_rectangular(self) -> bool:
if self.is_real():
return (self.real_flag == (+ 1))
raise RuntimeError('Not defined for non-real lattices.')
def real_period(self, prec=None, algorithm='sage'):
if self.is_real():
return self.basis(prec, algorithm)[0]
raise RuntimeError('Not defined for non-real lattices.')
def omega(self, prec=None, bsd_normalise=False):
if self.is_real():
n_components = (2 if (self.real_flag == 1) else 1)
return (self.real_period(prec) * n_components)
else:
bsd_factor = (2 if bsd_normalise else 1)
return (self.complex_area(prec) * bsd_factor)
_method
def basis_matrix(self, prec=None, normalised=False):
from sage.matrix.constructor import Matrix
if normalised:
return Matrix([list(w) for w in self.normalised_basis(prec)])
(w1, w2) = self.basis(prec)
if self.is_real():
return Matrix([[w1, 0], list(w2)])
else:
return Matrix([list(w) for w in (w1, w2)])
def complex_area(self, prec=None):
(w1, w2) = self.basis(prec)
return (w1 * w2.conjugate()).imag().abs()
def sigma(self, z, prec=None, flag=0):
if (prec is None):
prec = RealField().precision()
try:
return self.E.pari_curve().ellsigma(z, flag, precision=prec)
except AttributeError:
raise NotImplementedError('sigma function not yet implemented for period lattices of curves not defined over Q')
def curve(self):
return self.E
def ei(self):
return self._ei
def coordinates(self, z, rounding=None):
C = z.parent()
if isinstance(C, sage.rings.abc.RealField):
C = ComplexField(C.precision())
z = C(z)
elif isinstance(C, sage.rings.abc.ComplexField):
pass
else:
try:
C = ComplexField()
z = C(z)
except TypeError:
raise TypeError(('%s is not a complex number' % z))
prec = C.precision()
from sage.matrix.constructor import Matrix
from sage.modules.free_module_element import vector
if self.real_flag:
(w1, w2) = self.basis(prec)
M = (Matrix([[w1, 0], list(w2)]) ** (- 1))
else:
(w1, w2) = self.normalised_basis(prec)
M = (Matrix([list(w1), list(w2)]) ** (- 1))
(u, v) = (vector(z) * M)
if (rounding == 'round'):
return (u.round(), v.round())
if (rounding == 'floor'):
return (u.floor(), v.floor())
return (u, v)
def reduce(self, z):
C = z.parent()
if isinstance(C, sage.rings.abc.RealField):
C = ComplexField(C.precision())
z = C(z)
elif isinstance(C, sage.rings.abc.ComplexField):
pass
else:
try:
C = ComplexField()
z = C(z)
except TypeError:
raise TypeError(('%s is not a complex number' % z))
prec = C.precision()
if self.real_flag:
(w1, w2) = self.basis(prec)
else:
(w1, w2) = self.normalised_basis(prec)
(u, v) = self.coordinates(z, rounding='floor')
z = ((z - (u * w1)) - (v * w2))
if (self.real_flag == 0):
return z
if (self.real_flag == (- 1)):
k = (z.imag() / w2.imag()).round()
z = (z - (k * w2))
return C(z.real(), 0)
if (((2 * z.imag()) / w2.imag()).round() % 2):
return C(z.real(), (w2.imag() / 2))
else:
return C(z.real(), 0)
def e_log_RC(self, xP, yP, prec=None, reduce=True):
if (prec is None):
prec = RealField().precision()
prec2 = (prec + 40)
R = RealField(prec2)
C = ComplexField(prec2)
(e1, e2, e3) = self._ei
(a1, a2, a3) = (self.embedding(a) for a in self.E.ainvs()[:3])
wP = (((2 * yP) + (a1 * xP)) + a3)
if wP.is_zero():
(w1, w2) = self._compute_periods_complex(prec, normalise=False)
if (xP == e1):
z = (w2 / 2)
elif (xP == e3):
z = (w1 / 2)
else:
z = ((w1 + w2) / 2)
if reduce:
z = self.reduce(z)
return z
if (self.real_flag == 0):
a = C((e1 - e3).sqrt())
b = C((e1 - e2).sqrt())
if ((a + b).abs() < (a - b).abs()):
b = (- b)
r = C(((xP - e3) / (xP - e2)).sqrt())
if (r.real() < 0):
r = (- r)
t = ((- C(wP)) / ((2 * r) * (xP - e2)))
eps = (R(1) >> prec)
while True:
s = ((b * r) + a)
(a, b) = (((a + b) / 2), (a * b).sqrt())
if ((a + b).abs() < (a - b).abs()):
b = (- b)
r = ((a * (r + 1)) / s).sqrt()
if ((r.abs() - 1).abs() < eps):
break
if (r.real() < 0):
r = (- r)
t *= r
z = ((a / t).arctan() / a)
z = ComplexField(prec)(z)
if reduce:
z = self.reduce(z)
return z
if (self.real_flag == (- 1)):
z = C(self._abc[0])
(a, y, b) = (z.real(), z.imag(), z.abs())
uv = (xP - e1).sqrt()
(u, v) = (uv.real().abs(), uv.imag().abs())
r = ((u * a) / ((u * a) + (v * y))).sqrt()
t = (((- r) * R(wP)) / (2 * ((u ** 2) + (v ** 2))))
on_egg = False
else:
a = R((e3 - e1)).sqrt()
b = R((e3 - e2)).sqrt()
if ((a + b).abs() < (a - b).abs()):
b = (- b)
on_egg = (xP < e3)
if on_egg:
r = (a / R((e3 - xP)).sqrt())
t = ((r * R(wP)) / (2 * R((xP - e1))))
else:
r = R(((xP - e1) / (xP - e2))).sqrt()
t = ((- R(wP)) / ((2 * r) * R((xP - e2))))
eps = (R(1) >> prec)
while True:
s = ((b * r) + a)
(a, b) = (((a + b) / 2), (a * b).sqrt())
r = ((a * (r + 1)) / s).sqrt()
if ((r - 1).abs() < eps):
break
t *= r
z = ((a / t).arctan() / a)
if on_egg:
(w1, w2) = self._compute_periods_real(prec)
z += (w2 / 2)
z = ComplexField(prec)(z)
if reduce:
z = self.reduce(z)
return z
def elliptic_logarithm(self, P, prec=None, reduce=True):
if (P.curve() is not self.E):
raise ValueError('Point is on the wrong curve')
if (prec is None):
prec = RealField().precision()
if P.is_zero():
return ComplexField(prec)(0)
(xP, yP) = (self.embedding(coord) for coord in P.xy())
return self.e_log_RC(xP, yP, prec, reduce=reduce)
def elliptic_exponential(self, z, to_curve=True):
C = z.parent()
z_is_real = False
if isinstance(C, sage.rings.abc.RealField):
z_is_real = True
C = ComplexField(C.precision())
z = C(z)
elif isinstance(C, sage.rings.abc.ComplexField):
z_is_real = z.is_real()
else:
try:
C = ComplexField()
z = C(z)
z_is_real = z.is_real()
except TypeError:
raise TypeError(('%s is not a complex number' % z))
prec = C.precision()
eps = (C(2) ** ((- 0.8) * prec)).real()
if all((((t.round() - t).abs() < eps) for t in self.coordinates(z))):
K = z.parent()
if to_curve:
return self.curve().change_ring(K)(0)
else:
return (K(Infinity), K(Infinity))
(x, y) = pari(self.basis(prec=prec)).ellwp(z, flag=1)
(x, y) = (C(t) for t in (x, y))
if (self.real_flag and z_is_real):
x = x.real()
y = y.real()
if to_curve:
K = x.parent()
v = refine_embedding(self.embedding, Infinity)
(a1, a2, a3, a4, a6) = (K(v(a)) for a in self.E.ainvs())
b2 = K(v(self.E.b2()))
x = (x - (b2 / 12))
y = ((y - ((a1 * x) + a3)) / 2)
EK = EllipticCurve(K, [a1, a2, a3, a4, a6])
return EK.point((x, y, K.one()), check=False)
else:
return (x, y) |
class SawyerSoccerV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'ball_pos': obs[3:6], 'goal_pos': obs[9:], 'unused_info': obs[6:9]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = 1.0
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_ball = (o_d['ball_pos'] + np.array([0.0, 0.0, 0.03]))
pos_goal = o_d['goal_pos']
desired_z = (0.1 if (np.linalg.norm((pos_curr[:2] - pos_ball[:2])) < 0.02) else 0.03)
to_left_of_goal = ((pos_ball[0] - pos_goal[0]) < (- 0.05))
to_right_of_goal = ((pos_ball[0] - pos_goal[0]) > 0.05)
offset = 0.03
push_location = (pos_ball + np.array([0.0, (- offset), 0.0]))
if to_left_of_goal:
push_location = (pos_ball + np.array([(- offset), 0.0, 0.0]))
elif to_right_of_goal:
push_location = (pos_ball + np.array([(+ offset), 0.0, 0.0]))
push_location[2] = desired_z
if (np.linalg.norm((pos_curr - push_location)) > 0.01):
return push_location
return pos_ball |
def create_dataset(dataset, config, min_scale=0.5):
print(config)
normalize = transforms.Normalize((0., 0.4578275, 0.), (0., 0., 0.))
transform_train = transforms.Compose([transforms.RandomResizedCrop(config['image_size'], scale=(min_scale, 1.0)), RandomAugment(2, 5, isPIL=True, augs=['Identity', 'AutoContrast', 'Brightness', 'Sharpness', 'Equalize', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), transforms.ToTensor(), normalize])
transform_test = transforms.Compose([transforms.Resize((config['image_size'], config['image_size'])), transforms.ToTensor(), normalize])
if (dataset == 'pretrain'):
dataset = pretrain_dataset(config['train_file'], config['laion_path'], transform_train)
return dataset
elif (dataset == 'caption_cc3m'):
dataset = pretrain_dataset(config['train_file'], config['laion_path'], transform_test)
return dataset
elif (dataset == 'caption_coco'):
train_dataset = coco_karpathy_train(transform_train, config['image_root'], config['ann_root'], prompt=config['prompt'])
val_dataset = coco_karpathy_caption_eval(transform_test, config['image_root'], config['ann_root'], 'val')
test_dataset = coco_karpathy_caption_eval(transform_test, config['image_root'], config['ann_root'], 'test')
return (train_dataset, val_dataset, test_dataset)
elif (dataset == 'nocaps'):
val_dataset = nocaps_eval(transform_test, config['image_root'], config['ann_root'], 'val')
test_dataset = nocaps_eval(transform_test, config['image_root'], config['ann_root'], 'test')
return (val_dataset, test_dataset)
elif (dataset == 'retrieval_coco'):
train_dataset = coco_karpathy_train(transform_train, config['image_root'], config['ann_root'])
val_dataset = coco_karpathy_retrieval_eval(transform_test, config['image_root'], config['ann_root'], 'val')
test_dataset = coco_karpathy_retrieval_eval(transform_test, config['image_root'], config['ann_root'], 'test')
return (train_dataset, val_dataset, test_dataset)
elif (dataset == 'retrieval_flickr'):
train_dataset = flickr30k_train(transform_train, config['image_root'], config['ann_root'])
val_dataset = flickr30k_retrieval_eval(transform_test, config['image_root'], config['ann_root'], 'val')
test_dataset = flickr30k_retrieval_eval(transform_test, config['image_root'], config['ann_root'], 'test')
return (train_dataset, val_dataset, test_dataset)
elif (dataset == 'vqa'):
train_dataset = vqa_dataset(transform_train, config['ann_root'], config['vqa_root'], config['vg_root'], train_files=config['train_files'], split='train')
test_dataset = vqa_dataset(transform_test, config['ann_root'], config['vqa_root'], config['vg_root'], split='test')
return (train_dataset, test_dataset)
elif (dataset == 'nlvr'):
train_dataset = nlvr_dataset(transform_train, config['image_root'], config['ann_root'], 'train')
val_dataset = nlvr_dataset(transform_test, config['image_root'], config['ann_root'], 'val')
test_dataset = nlvr_dataset(transform_test, config['image_root'], config['ann_root'], 'test')
return (train_dataset, val_dataset, test_dataset) |
def test_recordarray_localindex():
v2_array = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), 3)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, 1)) == [[0, 1, 2], [0, 1, 2]])
assert (ak._do.local_index(v2_array.to_typetracer(), 1).form == ak._do.local_index(v2_array, 1).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [[0, 1, 2], [0, 1, 2]])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
assert (to_list(ak._do.local_index(v2_array, (- 2))) == [0, 1])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 2)).form == ak._do.local_index(v2_array, (- 2)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 3))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 2)
v2_array = ak.contents.regulararray.RegularArray(ak.contents.recordarray.RecordArray([ak.contents.emptyarray.EmptyArray()], ['nest']), 0, zeros_length=10)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, 1)) == [[], [], [], [], [], [], [], [], [], []])
assert (ak._do.local_index(v2_array.to_typetracer(), 1).form == ak._do.local_index(v2_array, 1).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [[], [], [], [], [], [], [], [], [], []])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
assert (to_list(ak._do.local_index(v2_array, (- 2))) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 2)).form == ak._do.local_index(v2_array, (- 2)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 3))
v2_array = ak.contents.listarray.ListArray(ak.index.Index(np.array([4, 100, 1])), ak.index.Index(np.array([7, 100, 3, 200])), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([6.6, 4.4, 5.5, 7.7, 1.1, 2.2, 3.3, 8.8]))], ['nest']))
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, 1)) == [[0, 1, 2], [], [0, 1]])
assert (ak._do.local_index(v2_array.to_typetracer(), 1).form == ak._do.local_index(v2_array, 1).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [[0, 1, 2], [], [0, 1]])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
assert (to_list(ak._do.local_index(v2_array, (- 2))) == [0, 1, 2])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 2)).form == ak._do.local_index(v2_array, (- 2)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 3))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 2)
v2_array = ak.contents.listoffsetarray.ListOffsetArray(ak.index.Index(np.array([1, 4, 4, 6])), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([6.6, 1.1, 2.2, 3.3, 4.4, 5.5, 7.7]))], ['nest']))
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, 1)) == [[0, 1, 2], [], [0, 1]])
assert (ak._do.local_index(v2_array.to_typetracer(), 1).form == ak._do.local_index(v2_array, 1).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [[0, 1, 2], [], [0, 1]])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
assert (to_list(ak._do.local_index(v2_array, (- 2))) == [0, 1, 2])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 2)).form == ak._do.local_index(v2_array, (- 2)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 3))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 2)
v2_array = ak.contents.indexedarray.IndexedArray(ak.index.Index(np.array([2, 2, 0, 1, 4, 5, 4])), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']))
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4, 5, 6])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4, 5, 6])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.indexedoptionarray.IndexedOptionArray(ak.index.Index(np.array([2, 2, (- 1), 1, (- 1), 5, 4])), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']))
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4, 5, 6])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4, 5, 6])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.bytemaskedarray.ByteMaskedArray(ak.index.Index(np.array([1, 0, 1, 0, 1], dtype=np.int8)), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), valid_when=True)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.bytemaskedarray.ByteMaskedArray(ak.index.Index(np.array([0, 1, 0, 1, 0], dtype=np.int8)), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), valid_when=False)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([True, True, True, True, False, False, False, False, True, False, True, False, True]))), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), valid_when=True, length=13, lsb_order=False)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0], dtype=np.uint8))), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), valid_when=False, length=13, lsb_order=False)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1], dtype=np.uint8))), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), valid_when=True, length=13, lsb_order=True)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], dtype=np.uint8))), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]))], ['nest']), valid_when=False, length=13, lsb_order=True)
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.unmaskedarray.UnmaskedArray(ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3], dtype=np.float64))], ['nest']))
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1)
v2_array = ak.contents.unionarray.UnionArray(ak.index.Index(np.array([1, 1, 0, 0, 1, 0, 1], dtype=np.int8)), ak.index.Index(np.array([4, 3, 0, 1, 2, 2, 4, 100])), [ak.contents.recordarray.RecordArray([ak.from_iter(['1', '2', '3'], highlevel=False)], ['nest']), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5]))], ['nest'])])
assert (to_list(ak._do.local_index(v2_array, 0)) == [0, 1, 2, 3, 4, 5, 6])
assert (ak._do.local_index(v2_array.to_typetracer(), 0).form == ak._do.local_index(v2_array, 0).form)
assert (to_list(ak._do.local_index(v2_array, (- 1))) == [0, 1, 2, 3, 4, 5, 6])
assert (ak._do.local_index(v2_array.to_typetracer(), (- 1)).form == ak._do.local_index(v2_array, (- 1)).form)
with pytest.raises(IndexError):
ak._do.local_index(v2_array, (- 2))
with pytest.raises(IndexError):
ak._do.local_index(v2_array, 1) |
def _parquet_schema_to_form(schema):
def lst(path):
return ('lst:' + '.'.join(path))
def col(path):
return ('col:' + '.'.join(path))
def maybe_nullable(field, content):
if field.nullable:
return ak.forms.ByteMaskedForm('i8', content.with_form_key(None), valid_when=True, form_key=content.form_key)
else:
return content
def contains_record(form):
if isinstance(form, ak.forms.RecordForm):
return True
elif isinstance(form, ak.forms.ListOffsetForm):
return contains_record(form.content)
else:
return False
def recurse(arrow_type, path):
if isinstance(arrow_type, pyarrow.StructType):
names = []
contents = []
for index in range(arrow_type.num_fields):
field = arrow_type[index]
names.append(field.name)
content = maybe_nullable(field, recurse(field.type, (path + (field.name,))))
contents.append(ak.forms.VirtualForm(content, has_length=True))
assert (len(contents) != 0)
return ak.forms.RecordForm(contents, names)
elif isinstance(arrow_type, pyarrow.ListType):
field = arrow_type.value_field
content = maybe_nullable(field, recurse(field.type, (path + ('list', 'item'))))
form_key = (None if contains_record(content) else lst(path))
return ak.forms.ListOffsetForm('i32', content, form_key=form_key)
elif isinstance(arrow_type, pyarrow.LargeListType):
field = arrow_type.value_field
content = maybe_nullable(field, recurse(field.type, (path + ('list', 'item'))))
form_key = (None if contains_record(content) else lst(path))
return ak.forms.ListOffsetForm('i64', content, form_key=form_key)
elif (arrow_type == pyarrow.string()):
return ak.forms.ListOffsetForm('i32', ak.forms.NumpyForm((), 1, 'B', parameters={'__array__': 'char'}), parameters={'__array__': 'string'}, form_key=col(path))
elif (arrow_type == pyarrow.large_string()):
return ak.forms.ListOffsetForm('i64', ak.forms.NumpyForm((), 1, 'B', parameters={'__array__': 'char'}), parameters={'__array__': 'string'}, form_key=col(path))
elif (arrow_type == pyarrow.binary()):
return ak.forms.ListOffsetForm('i32', ak.forms.NumpyForm((), 1, 'B', parameters={'__array__': 'byte'}), parameters={'__array__': 'bytestring'}, form_key=col(path))
elif (arrow_type == pyarrow.large_binary()):
return ak.forms.ListOffsetForm('i64', ak.forms.NumpyForm((), 1, 'B', parameters={'__array__': 'byte'}), parameters={'__array__': 'bytestring'}, form_key=col(path))
elif isinstance(arrow_type, pyarrow.DataType):
dtype = np.dtype(arrow_type.to_pandas_dtype())
return ak.forms.Form.from_numpy(dtype).with_form_key(col(path))
else:
raise NotImplementedError('cannot convert {0}.{1} to an equivalent Awkward Form'.format(type(arrow_type).__module__, type(arrow_type).__name__))
schema = schema.to_arrow_schema()
contents = []
for (index, name) in enumerate(schema.names):
field = schema.field(index)
content = maybe_nullable(field, recurse(field.type, (name,)))
contents.append(ak.forms.VirtualForm(content, has_length=True))
assert (len(contents) != 0)
return ak.forms.RecordForm(contents, schema.names) |
def register_all_ctx459(root):
root = os.path.join(root, 'pascal_ctx_d2')
meta = _get_ctx459_meta()
for (name, dirname) in [('train', 'training'), ('val', 'validation')]:
image_dir = os.path.join(root, 'images', dirname)
gt_dir = os.path.join(root, 'annotations_ctx459', dirname)
name = f'ctx459_sem_seg_{name}'
DatasetCatalog.register(name, (lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext='tif', image_ext='jpg', dataset_name='pascal_context_459')))
MetadataCatalog.get(name).set(stuff_classes=meta['stuff_classes'][:], thing_dataset_id_to_contiguous_id={}, stuff_dataset_id_to_contiguous_id=meta['stuff_dataset_id_to_contiguous_id'], image_root=image_dir, sem_seg_root=gt_dir, evaluator_type='sem_seg', ignore_label=65535) |
_module('numpy')
class ndenumerate(object):
def __init__(self, arr):
self.iter = asarray(arr).flat
def __next__(self):
return (self.iter.coords, next(self.iter))
def __iter__(self):
return self
next = __next__ |
def plot_training(H):
with plt.xkcd():
plt.plot(H.history['loss'], label='train_loss')
plt.plot(H.history['val_loss'], label='val_loss')
plt.plot(H.history['accuracy'], label='train_acc')
plt.plot(H.history['val_accuracy'], label='val_acc')
plt.legend(loc='lower left')
plt.show() |
class TransformerEncoder(EncoderBase):
def __init__(self, num_layers, d_model, heads, d_ff, dropout, attention_dropout, embeddings, max_relative_positions):
super(TransformerEncoder, self).__init__()
self.embeddings = embeddings
self.transformer = nn.ModuleList([TransformerEncoderLayer(d_model, heads, d_ff, dropout, attention_dropout, max_relative_positions=max_relative_positions) for i in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-06)
def from_opt(cls, opt, embeddings):
return cls(opt.enc_layers, opt.enc_rnn_size, opt.heads, opt.transformer_ff, (opt.dropout[0] if (type(opt.dropout) is list) else opt.dropout), (opt.attention_dropout[0] if (type(opt.attention_dropout) is list) else opt.attention_dropout), embeddings, opt.max_relative_positions)
def forward(self, src, max_len, lengths=None):
self._check_args(src, lengths)
emb = self.embeddings(src)
out = emb.transpose(0, 1).contiguous()
mask = (~ sequence_mask(lengths, max_len).unsqueeze(1))
for layer in self.transformer:
out = layer(out, mask)
out = self.layer_norm(out)
return (emb, out.transpose(0, 1).contiguous(), lengths)
def update_dropout(self, dropout, attention_dropout):
self.embeddings.update_dropout(dropout)
for layer in self.transformer:
layer.update_dropout(dropout, attention_dropout) |
def _calc_box(srs: dd.Series, qntls: da.Array, cfg: Config) -> Dict[(str, Any)]:
data = {f'qrtl{(i + 1)}': qntls.loc[qnt].sum() for (i, qnt) in enumerate((0.25, 0.5, 0.75))}
iqr = (data['qrtl3'] - data['qrtl1'])
srs_iqr = srs[srs.between((data['qrtl1'] - (1.5 * iqr)), (data['qrtl3'] + (1.5 * iqr)))]
(data['lw'], data['uw']) = (srs_iqr.min(), srs_iqr.max())
otlrs = srs[(~ srs.between((data['qrtl1'] - (1.5 * iqr)), (data['qrtl3'] + (1.5 * iqr))))]
smp_otlrs = otlrs.map_partitions((lambda x: x.sample(min(100, x.shape[0]))), meta=otlrs)
data['otlrs'] = smp_otlrs.values
if cfg.insight.enable:
data['notlrs'] = otlrs.shape[0]
return data |
def test_int():
array = ak.Array([[1, 2], [3, 4, 5]])
assert ((array == 2).to_list() == [[False, True], [False, False, False]]) |
class FlaxMBartForConditionalGeneration(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class GNNEdgeHead(nn.Module):
def __init__(self, dim_in, dim_out):
super(GNNEdgeHead, self).__init__()
if (cfg.model.edge_decoding == 'concat'):
self.layer_post_mp = MLP((dim_in * 2), dim_out, num_layers=cfg.gnn.layers_post_mp, bias=True)
self.decode_module = (lambda v1, v2: self.layer_post_mp(torch.cat((v1, v2), dim=(- 1))))
else:
if (dim_out > 1):
raise ValueError('Binary edge decoding ({})is used for multi-class edge/link prediction.'.format(cfg.model.edge_decoding))
self.layer_post_mp = MLP(dim_in, dim_in, num_layers=cfg.gnn.layers_post_mp, bias=True)
if (cfg.model.edge_decoding == 'dot'):
self.decode_module = (lambda v1, v2: torch.sum((v1 * v2), dim=(- 1)))
elif (cfg.model.edge_decoding == 'cosine_similarity'):
self.decode_module = nn.CosineSimilarity(dim=(- 1))
else:
raise ValueError('Unknown edge decoding {}.'.format(cfg.model.edge_decoding))
def _apply_index(self, batch):
index = '{}_edge_index'.format(batch.split)
label = '{}_edge_label'.format(batch.split)
return (batch.x[batch[index]], batch[label])
def forward(self, batch):
if (cfg.model.edge_decoding != 'concat'):
batch = self.layer_post_mp(batch)
(pred, label) = self._apply_index(batch)
nodes_first = pred[0]
nodes_second = pred[1]
pred = self.decode_module(nodes_first, nodes_second)
return (pred, label) |
def create_csv(data_list, save_path='list_folder/experiment_name', test_split=0.2, val_split=0.1, shuffle=False):
if shuffle:
np.random.shuffle(data_list)
num_files = len(data_list)
num_test_files = int((test_split * num_files))
num_val_files = int(((num_files - num_test_files) * val_split))
num_train_files = ((num_files - num_test_files) - num_val_files)
if ((test_split > 0) or (val_split > 0)):
train_identifier = '_train.csv'
else:
train_identifier = '.csv'
file_idx = np.arange(num_files)
if (num_test_files > 0):
test_idx = sorted(np.random.choice(file_idx, size=num_test_files, replace=False))
with open((save_path + '_test.csv'), 'w', newline='') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in test_idx:
writer.writerow(data_list[idx])
else:
test_idx = []
if (num_val_files > 0):
val_idx = sorted(np.random.choice(list((set(file_idx) - set(test_idx))), size=num_val_files, replace=False))
with open((save_path + '_val.csv'), 'w', newline='') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in val_idx:
writer.writerow(data_list[idx])
else:
val_idx = []
if (num_train_files > 0):
train_idx = sorted(list(((set(file_idx) - set(test_idx)) - set(val_idx))))
with open((save_path + train_identifier), 'w', newline='') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in train_idx:
writer.writerow(data_list[idx]) |
class codelineTypeSub(supermod.codelineType):
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight) |
def setup_env(gpu_s, seed):
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
setup_gpu(gpu_s)
setup_seed(seed) |
()
('--seed', default=1)
('--epochs', default=500)
('--batch_size', default=1024)
('--n_worker', default=psutil.cpu_count(logical=False))
_experiment(snapshot_mode='all')
def mttrpo_metaworld_mt50(ctxt, seed, epochs, batch_size, n_worker):
set_seed(seed)
tasks = mwb.MT50.get_train_tasks().all_task_names
envs = []
for task in tasks:
envs.append(normalize(GarageEnv(mwb.MT50.from_task(task))))
env = MultiEnvWrapper(envs, sample_strategy=round_robin_strategy, mode='vanilla')
policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=torch.tanh, output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec, hidden_sizes=(32, 32), hidden_nonlinearity=torch.tanh, output_nonlinearity=None)
algo = TRPO(env_spec=env.spec, policy=policy, value_function=value_function, max_path_length=128, discount=0.99, gae_lambda=0.95)
runner = LocalRunner(ctxt)
runner.setup(algo, env, n_workers=n_worker)
runner.train(n_epochs=epochs, batch_size=batch_size) |
(pin.WITH_HPP_FCL, 'Needs HPP-FCL')
class TestGeometryObjectBindings(unittest.TestCase):
def setUp(self):
self.model = pin.buildSampleModelHumanoid()
self.collision_model = pin.buildSampleGeometryModelHumanoid(self.model)
def test_name_get_set(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue((col.name == 'rleg_shoulder_object'))
col.name = 'new_collision_name'
self.assertTrue((col.name == 'new_collision_name'))
def test_parent_get_set(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue((col.parentJoint == 2))
col.parentJoint = 3
self.assertTrue((col.parentJoint == 3))
def test_placement_get_set(self):
m = pin.SE3.Identity()
new_m = pin.SE3.Random()
col = self.collision_model.geometryObjects[0]
self.assertTrue(np.allclose(col.placement.homogeneous, m.homogeneous))
col.placement = new_m
self.assertTrue(np.allclose(col.placement.homogeneous, new_m.homogeneous))
def test_meshpath_get(self):
col = self.collision_model.geometryObjects[0]
self.assertTrue((col.meshPath is not None))
def test_scale(self):
scale = np.array([1.0, 2.0, 3.0])
pin.setGeometryMeshScales(self.collision_model, scale)
for obj in self.collision_model.geometryObjects:
self.assertTrue((obj.meshScale[0] == scale[0]))
self.assertTrue((obj.meshScale[1] == scale[1]))
self.assertTrue((obj.meshScale[2] == scale[2]))
def test_scalar_scale(self):
scale = 2.0
vec = np.array(([scale] * 3))
pin.setGeometryMeshScales(self.collision_model, scale)
for obj in self.collision_model.geometryObjects:
self.assertTrue(np.allclose(obj.meshScale, vec))
def test_create_data(self):
collision_data = self.collision_model.createData()
self.assertEqual(len(collision_data.oMg), self.collision_model.ngeoms)
def test_create_datas(self):
collision_data = self.collision_model.createData()
self.assertEqual(len(collision_data.oMg), self.collision_model.ngeoms)
(data_2, collision_data_2) = pin.createDatas(self.model, self.collision_model)
self.assertTrue(self.model.check(data_2))
self.assertEqual(len(collision_data_2.oMg), self.collision_model.ngeoms)
def test_copy(self):
collision_model_copy = self.collision_model.copy()
self.assertEqual(self.collision_model.ngeoms, collision_model_copy.ngeoms)
collision_data = self.collision_model.createData()
collision_data_copy = collision_data.copy()
self.assertEqual(len(collision_data.oMg), len(collision_data_copy.oMg)) |
class Flowavenet(nn.Module):
def __init__(self, in_channel, cin_channel, n_block, n_flow, n_layer, affine=True, pretrained=False, block_per_split=8):
super().__init__()
self.block_per_split = block_per_split
self.blocks = nn.ModuleList()
self.n_block = n_block
for i in range(self.n_block):
split = (False if (((i + 1) % self.block_per_split) or (i == (self.n_block - 1))) else True)
self.blocks.append(Block(in_channel, cin_channel, n_flow, n_layer, affine=affine, pretrained=pretrained, split=split))
cin_channel *= 2
if (not split):
in_channel *= 2
self.upsample_conv = nn.ModuleList()
for s in [16, 16]:
convt = nn.ConvTranspose2d(1, 1, (3, (2 * s)), padding=(1, (s // 2)), stride=(1, s))
convt = nn.utils.weight_norm(convt)
nn.init.kaiming_normal_(convt.weight)
self.upsample_conv.append(convt)
self.upsample_conv.append(nn.LeakyReLU(0.4))
def forward(self, x, c):
(B, _, T) = x.size()
(logdet, log_p_sum) = (0, 0)
out = x
c = self.upsample(c)
for block in self.blocks:
(out, c, logdet_new, logp_new) = block(out, c)
logdet = (logdet + logdet_new)
log_p_sum = (log_p_sum + logp_new)
log_p_sum += (0.5 * ((- log((2.0 * pi))) - out.pow(2)).sum())
logdet = (logdet / (B * T))
log_p = (log_p_sum / (B * T))
return (log_p, logdet)
def reverse(self, z, c):
(_, _, T) = z.size()
(_, _, t_c) = c.size()
if (T != t_c):
c = self.upsample(c)
z_list = []
x = z
for i in range(self.n_block):
(b_size, _, T) = x.size()
squeezed_x = x.view(b_size, (- 1), (T // 2), 2).permute(0, 1, 3, 2)
x = squeezed_x.contiguous().view(b_size, (- 1), (T // 2))
squeezed_c = c.view(b_size, (- 1), (T // 2), 2).permute(0, 1, 3, 2)
c = squeezed_c.contiguous().view(b_size, (- 1), (T // 2))
if (not (((i + 1) % self.block_per_split) or (i == (self.n_block - 1)))):
(x, z) = x.chunk(2, 1)
z_list.append(z)
for (i, block) in enumerate(self.blocks[::(- 1)]):
index = (self.n_block - i)
if (not ((index % self.block_per_split) or (index == self.n_block))):
(x, c) = block.reverse(x, c, z_list[((index // self.block_per_split) - 1)])
else:
(x, c) = block.reverse(x, c)
return x
def upsample(self, c):
c = c.unsqueeze(1)
for f in self.upsample_conv:
c = f(c)
c = c.squeeze(1)
return c |
class DialogModel(modules.CudaModule):
def __init__(self, word_dict, item_dict, context_dict, output_length, args, device_id):
super(DialogModel, self).__init__(device_id)
domain = get_domain(args.domain)
self.word_dict = word_dict
self.item_dict = item_dict
self.context_dict = context_dict
self.args = args
self.word_encoder = nn.Embedding(len(self.word_dict), args.nembed_word)
ctx_encoder_ty = (modules.RnnContextEncoder if args.rnn_ctx_encoder else modules.MlpContextEncoder)
self.ctx_encoder = ctx_encoder_ty(len(self.context_dict), domain.input_length(), args.nembed_ctx, args.nhid_ctx, args.init_range, device_id)
self.reader = nn.GRU(input_size=(args.nhid_ctx + args.nembed_word), hidden_size=args.nhid_lang, bias=True)
self.decoder = nn.Linear(args.nhid_lang, args.nembed_word)
self.writer = nn.GRUCell(input_size=(args.nhid_ctx + args.nembed_word), hidden_size=args.nhid_lang, bias=True)
self.writer.weight_ih = self.reader.weight_ih_l0
self.writer.weight_hh = self.reader.weight_hh_l0
self.writer.bias_ih = self.reader.bias_ih_l0
self.writer.bias_hh = self.reader.bias_hh_l0
self.dropout = nn.Dropout(args.dropout)
self.sel_rnn = nn.GRU(input_size=(args.nhid_lang + args.nembed_word), hidden_size=args.nhid_attn, bias=True, bidirectional=True)
self.special_token_mask = torch.FloatTensor(len(self.word_dict))
self.attn = nn.Sequential(torch.nn.Linear((2 * args.nhid_attn), args.nhid_attn), nn.Tanh(), torch.nn.Linear(args.nhid_attn, 1))
self.sel_encoder = nn.Sequential(torch.nn.Linear(((2 * args.nhid_attn) + args.nhid_ctx), args.nhid_sel), nn.Tanh())
self.sel_decoders = nn.ModuleList()
for i in range(output_length):
self.sel_decoders.append(nn.Linear(args.nhid_sel, len(self.item_dict)))
self.init_weights()
for i in range(len(self.word_dict)):
w = self.word_dict.get_word(i)
special = (domain.item_pattern.match(w) or (w in ('<unk>', 'YOU:', 'THEM:', '<pad>')))
self.special_token_mask[i] = ((- 999) if special else 0.0)
self.special_token_mask = self.to_device(self.special_token_mask)
def set_device_id(self, device_id):
self.device_id = device_id
self.ctx_encoder.device_id = device_id
self.special_token_mask = self.to_device(self.special_token_mask)
def zero_hid(self, bsz, nhid=None, copies=None):
nhid = (self.args.nhid_lang if (nhid is None) else nhid)
copies = (1 if (copies is None) else copies)
hid = torch.zeros(copies, bsz, nhid)
hid = self.to_device(hid)
return Variable(hid)
def init_weights(self):
self.decoder.weight.data.uniform_((- self.args.init_range), self.args.init_range)
self.decoder.bias.data.fill_(0)
modules.init_rnn(self.reader, self.args.init_range)
self.word_encoder.weight.data.uniform_((- self.args.init_range), self.args.init_range)
modules.init_cont(self.attn, self.args.init_range)
modules.init_cont(self.sel_encoder, self.args.init_range)
modules.init_cont(self.sel_decoders, self.args.init_range)
def read(self, inpt, lang_h, ctx_h, prefix_token='THEM:'):
prefix = Variable(torch.LongTensor(1).unsqueeze(1))
prefix.data.fill_(self.word_dict.get_idx(prefix_token))
inpt = torch.cat([self.to_device(prefix), inpt])
inpt_emb = self.word_encoder(inpt)
ctx_h_rep = ctx_h.expand(inpt_emb.size(0), ctx_h.size(1), ctx_h.size(2))
inpt_emb = torch.cat([inpt_emb, ctx_h_rep], 2)
self.reader.flatten_parameters()
(out, lang_h) = self.reader(inpt_emb, lang_h)
return (out, lang_h)
def word2var(self, word):
result = Variable(torch.LongTensor(1))
result.data.fill_(self.word_dict.get_idx(word))
result = self.to_device(result)
return result
def forward_selection(self, inpt, lang_h, ctx_h):
inpt_emb = self.word_encoder(inpt)
h = torch.cat([lang_h, inpt_emb], 2)
h = self.dropout(h)
attn_h = self.zero_hid(h.size(1), self.args.nhid_attn, copies=2)
self.sel_rnn.flatten_parameters()
(h, _) = self.sel_rnn(h, attn_h)
h = h.transpose(0, 1).contiguous()
logit = self.attn(h.view((- 1), (2 * self.args.nhid_attn))).view(h.size(0), h.size(1))
prob = F.softmax(logit, dim=1).unsqueeze(2).expand_as(h)
attn = torch.sum(torch.mul(h, prob), 1, keepdim=True).transpose(0, 1).contiguous()
h = torch.cat([attn, ctx_h], 2).squeeze(0)
h = self.dropout(h)
h = self.sel_encoder.forward(h)
outs = [decoder.forward(h) for decoder in self.sel_decoders]
return torch.cat(outs)
def generate_choice_logits(self, inpt, lang_h, ctx_h):
inpt_emb = self.word_encoder(inpt)
h = torch.cat([lang_h.unsqueeze(1), inpt_emb], 2)
h = self.dropout(h)
attn_h = self.zero_hid(h.size(1), self.args.nhid_attn, copies=2)
self.sel_rnn.flatten_parameters()
(h, _) = self.sel_rnn(h, attn_h)
h = h.squeeze(1)
logit = self.attn(h).squeeze(1)
prob = F.softmax(logit, dim=0).unsqueeze(1).expand_as(h)
attn = torch.sum(torch.mul(h, prob), 0, keepdim=True)
ctx_h = ctx_h.squeeze(1)
h = torch.cat([attn, ctx_h], 1)
h = self.sel_encoder.forward(h)
logits = [decoder.forward(h).squeeze(0) for decoder in self.sel_decoders]
return logits
def write_batch(self, bsz, lang_h, ctx_h, temperature, max_words=100):
eod = self.word_dict.get_idx('<selection>')
lang_h = lang_h.squeeze(0).expand(bsz, lang_h.size(2))
ctx_h = ctx_h.squeeze(0).expand(bsz, ctx_h.size(2))
inpt = torch.LongTensor(bsz).fill_(self.word_dict.get_idx('YOU:'))
inpt = Variable(self.to_device(inpt))
(outs, lang_hs) = ([], [lang_h.unsqueeze(0)])
done = set()
for _ in range(max_words):
inpt_emb = torch.cat([self.word_encoder(inpt), ctx_h], 1)
lang_h = self.writer(inpt_emb, lang_h)
out = self.decoder(lang_h)
scores = F.linear(out, self.word_encoder.weight).div(temperature)
scores.sub_(scores.max(1, keepdim=True)[0].expand(scores.size(0), scores.size(1)))
out = torch.multinomial(scores.exp(), 1).squeeze(1)
outs.append(out.unsqueeze(0))
lang_hs.append(lang_h.unsqueeze(0))
inpt = out
data = out.data.cpu()
for i in range(bsz):
if (data[i] == eod):
done.add(i)
if (len(done) == bsz):
break
inpt_emb = torch.cat([self.word_encoder(inpt), ctx_h], 1)
lang_h = self.writer(inpt_emb, lang_h)
lang_hs.append(lang_h.unsqueeze(0))
return (torch.cat(outs, 0), torch.cat(lang_hs, 0))
def write(self, lang_h, ctx_h, max_words, temperature, stop_tokens=STOP_TOKENS, resume=False):
(outs, logprobs, lang_hs) = ([], [], [])
lang_h = lang_h.squeeze(1)
ctx_h = ctx_h.squeeze(1)
if resume:
inpt = None
else:
inpt = Variable(torch.LongTensor(1))
inpt.data.fill_(self.word_dict.get_idx('YOU:'))
inpt = self.to_device(inpt)
for _ in range(max_words):
if (inpt is not None):
inpt_emb = torch.cat([self.word_encoder(inpt), ctx_h], 1)
lang_h = self.writer(inpt_emb, lang_h)
lang_hs.append(lang_h)
out = self.decoder(lang_h)
scores = F.linear(out, self.word_encoder.weight).div(temperature)
scores = scores.add((- scores.max().item())).squeeze(0)
if (not resume):
mask = Variable(self.special_token_mask)
scores = scores.add(mask)
prob = F.softmax(scores, dim=0)
logprob = F.log_softmax(scores, dim=0)
word = prob.multinomial(num_samples=1).detach()
logprob = logprob.gather(0, word)
logprobs.append(logprob)
outs.append(word.view(word.size()[0], 1))
inpt = word
if (self.word_dict.get_word(word.data[0]) in stop_tokens):
break
inpt_emb = torch.cat([self.word_encoder(inpt), ctx_h], 1)
lang_h = self.writer(inpt_emb, lang_h)
lang_hs.append(lang_h)
lang_h = lang_h.unsqueeze(1)
return (logprobs, torch.cat(outs), lang_h, torch.cat(lang_hs))
def score_sent(self, sent, lang_h, ctx_h, temperature):
score = 0
lang_h = lang_h.squeeze(1)
ctx_h = ctx_h.squeeze(1)
inpt = Variable(torch.LongTensor(1))
inpt.data.fill_(self.word_dict.get_idx('YOU:'))
inpt = self.to_device(inpt)
lang_hs = []
for word in sent:
inpt_emb = torch.cat([self.word_encoder(inpt), ctx_h], 1)
lang_h = self.writer(inpt_emb, lang_h)
lang_hs.append(lang_h)
out = self.decoder(lang_h)
scores = F.linear(out, self.word_encoder.weight).div(temperature)
scores = scores.add((- scores.max().data[0])).squeeze(0)
mask = Variable(self.special_token_mask)
scores = scores.add(mask)
logprob = F.log_softmax(scores)
score += logprob[word[0]].data[0]
inpt = Variable(word)
inpt_emb = torch.cat([self.word_encoder(inpt), ctx_h], 1)
lang_h = self.writer(inpt_emb, lang_h)
lang_hs.append(lang_h)
lang_h = lang_h.unsqueeze(1)
return (score, lang_h, torch.cat(lang_hs))
def forward_context(self, ctx):
return self.ctx_encoder(ctx)
def forward_lm(self, inpt, lang_h, ctx_h):
inpt_emb = self.word_encoder(inpt)
ctx_h_rep = ctx_h.narrow(0, (ctx_h.size(0) - 1), 1).expand(inpt.size(0), ctx_h.size(1), ctx_h.size(2))
inpt_emb = torch.cat([inpt_emb, ctx_h_rep], 2)
inpt_emb = self.dropout(inpt_emb)
self.reader.flatten_parameters()
(out, _) = self.reader(inpt_emb, lang_h)
decoded = self.decoder(out.view((- 1), out.size(2)))
decoded = F.linear(decoded, self.word_encoder.weight)
return (decoded.view(out.size(0), out.size(1), decoded.size(1)), out) |
.parametrize('slate_id, reward, pscore, position, evaluation_policy_pscore, description_1', valid_input_of_slate_estimators)
.parametrize('alpha, n_bootstrap_samples, random_state, err, description_2', invalid_input_of_estimate_intervals)
def test_estimate_intervals_of_all_estimators_using_invalid_input_data(slate_id, reward, pscore, position, evaluation_policy_pscore, description_1, alpha, n_bootstrap_samples, random_state, err, description_2) -> None:
with pytest.raises(err, match=f'{description_2}*'):
_ = sips.estimate_interval(slate_id=slate_id, reward=reward, pscore=pscore, position=position, evaluation_policy_pscore=evaluation_policy_pscore, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state)
_ = iips.estimate_interval(slate_id=slate_id, reward=reward, pscore_item_position=pscore, position=position, evaluation_policy_pscore_item_position=evaluation_policy_pscore, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state)
_ = rips.estimate_interval(slate_id=slate_id, reward=reward, pscore_cascade=pscore, position=position, evaluation_policy_pscore_cascade=evaluation_policy_pscore, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state)
_ = snsips.estimate_interval(slate_id=slate_id, reward=reward, pscore=pscore, position=position, evaluation_policy_pscore=evaluation_policy_pscore, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state)
_ = sniips.estimate_interval(slate_id=slate_id, reward=reward, pscore_item_position=pscore, position=position, evaluation_policy_pscore_item_position=evaluation_policy_pscore, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state)
_ = snrips.estimate_interval(slate_id=slate_id, reward=reward, pscore_cascade=pscore, position=position, evaluation_policy_pscore_cascade=evaluation_policy_pscore, alpha=alpha, n_bootstrap_samples=n_bootstrap_samples, random_state=random_state) |
class EncoderManager(object):
def __init__(self):
self.encoders = []
self.sessions = []
def load_model(self, model_config, vocabulary_file, embedding_matrix_file, checkpoint_path):
tf.logging.info('Reading vocabulary from %s', vocabulary_file)
with tf.gfile.GFile(vocabulary_file, mode='r') as f:
lines = list(f.readlines())
reverse_vocab = [line.decode('utf-8').strip() for line in lines]
tf.logging.info('Loaded vocabulary with %d words.', len(reverse_vocab))
tf.logging.info('Loading embedding matrix from %s', embedding_matrix_file)
with open(embedding_matrix_file, 'r') as f:
embedding_matrix = np.load(f)
tf.logging.info('Loaded embedding matrix with shape %s', embedding_matrix.shape)
word_embeddings = collections.OrderedDict(zip(reverse_vocab, embedding_matrix))
g = tf.Graph()
with g.as_default():
encoder = skip_thoughts_encoder.SkipThoughtsEncoder(word_embeddings)
restore_model = encoder.build_graph_from_config(model_config, checkpoint_path)
sess = tf.Session(graph=g)
restore_model(sess)
self.encoders.append(encoder)
self.sessions.append(sess)
def encode(self, data, use_norm=True, verbose=False, batch_size=128, use_eos=False):
if (not self.encoders):
raise ValueError('Must call load_model at least once before calling encode.')
encoded = []
for (encoder, sess) in zip(self.encoders, self.sessions):
encoded.append(np.array(encoder.encode(sess, data, use_norm=use_norm, verbose=verbose, batch_size=batch_size, use_eos=use_eos)))
return np.concatenate(encoded, axis=1)
def close(self):
for sess in self.sessions:
sess.close() |
class _data_matrix(_spbase):
def __init__(self):
_spbase.__init__(self)
def dtype(self):
return self.data.dtype
def dtype(self, newtype):
self.data.dtype = newtype
def _deduped_data(self):
if hasattr(self, 'sum_duplicates'):
self.sum_duplicates()
return self.data
def __abs__(self):
return self._with_data(abs(self._deduped_data()))
def __round__(self, ndigits=0):
return self._with_data(np.around(self._deduped_data(), decimals=ndigits))
def _real(self):
return self._with_data(self.data.real)
def _imag(self):
return self._with_data(self.data.imag)
def __neg__(self):
if (self.dtype.kind == 'b'):
raise NotImplementedError('negating a boolean sparse array is not supported')
return self._with_data((- self.data))
def __imul__(self, other):
if isscalarlike(other):
self.data *= other
return self
else:
return NotImplemented
def __itruediv__(self, other):
if isscalarlike(other):
recip = (1.0 / other)
self.data *= recip
return self
else:
return NotImplemented
def astype(self, dtype, casting='unsafe', copy=True):
dtype = np.dtype(dtype)
if (self.dtype != dtype):
matrix = self._with_data(self.data.astype(dtype, casting=casting, copy=True), copy=True)
return matrix._with_data(matrix._deduped_data(), copy=False)
elif copy:
return self.copy()
else:
return self
astype.__doc__ = _spbase.astype.__doc__
def conjugate(self, copy=True):
if np.issubdtype(self.dtype, np.complexfloating):
return self._with_data(self.data.conjugate(), copy=copy)
elif copy:
return self.copy()
else:
return self
conjugate.__doc__ = _spbase.conjugate.__doc__
def copy(self):
return self._with_data(self.data.copy(), copy=True)
copy.__doc__ = _spbase.copy.__doc__
def count_nonzero(self):
return np.count_nonzero(self._deduped_data())
count_nonzero.__doc__ = _spbase.count_nonzero.__doc__
def power(self, n, dtype=None):
if (not isscalarlike(n)):
raise NotImplementedError('input is not scalar')
if (not n):
raise NotImplementedError('zero power is not supported as it would densify the matrix.\nUse `np.ones(A.shape, dtype=A.dtype)` for this case.')
data = self._deduped_data()
if (dtype is not None):
data = data.astype(dtype)
return self._with_data((data ** n))
def _mul_scalar(self, other):
return self._with_data((self.data * other)) |
def cal_ndcg(predicts, labels, user_ids, k_list):
d = {'user': np.squeeze(user_ids), 'predict': np.squeeze(predicts), 'label': np.squeeze(labels)}
df = pd.DataFrame(d)
user_unique = df.user.unique()
ndcgs = [[] for _ in range(len(k_list))]
for user_id in user_unique:
user_srow = df.loc[(df['user'] == user_id)]
upred = user_srow['predict'].tolist()
if (len(upred) < 2):
continue
ulabel = user_srow['label'].tolist()
for i in range(len(k_list)):
ndcgs[i].append(ndcg_score([ulabel], [upred], k=k_list[i]))
ndcg_mean = np.mean(np.array(ndcgs), axis=1)
return ndcg_mean |
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if (i < thresh):
lut.append(min(255, (i + add)))
else:
lut.append(i)
if (img.mode in ('L', 'RGB')):
if ((img.mode == 'RGB') and (len(lut) == 256)):
lut = ((lut + lut) + lut)
return img.point(lut)
else:
return img |
def main(_):
(env, dataset) = make_env_and_dataset(FLAGS.env_name, FLAGS.seed)
kwargs = dict(FLAGS.config)
kwargs['alpha'] = FLAGS.alpha
kwargs['alg'] = FLAGS.alg
agent = Learner(FLAGS.seed, env.observation_space.sample()[np.newaxis], env.action_space.sample()[np.newaxis], max_steps=FLAGS.max_steps, **kwargs)
kwargs['seed'] = FLAGS.seed
kwargs['env_name'] = FLAGS.env_name
wandb.init(project='project_name', entity='your_wandb_id', name=f'{FLAGS.env_name}', config=kwargs)
log = Log((Path('benchmark') / FLAGS.env_name), kwargs)
log(f'Log dir: {log.dir}')
for i in tqdm.tqdm(range(1, (FLAGS.max_steps + 1)), smoothing=0.1, disable=(not FLAGS.tqdm)):
batch = dataset.sample(FLAGS.batch_size)
update_info = agent.update(batch)
if ((i % FLAGS.log_interval) == 0):
wandb.log(update_info, i)
if ((i % FLAGS.eval_interval) == 0):
normalized_return = evaluate(FLAGS.env_name, agent, env, FLAGS.eval_episodes)
log.row({'normalized_return': normalized_return})
wandb.log({'normalized_return': normalized_return}, i) |
def bool_flag(s):
if (s.lower() in FALSY_STRINGS):
return False
elif (s.lower() in TRUTHY_STRINGS):
return True
else:
raise argparse.ArgumentTypeError('Invalid value for a boolean flag!') |
def _mk_fp_tern(f, rm, a, b, c, ctx):
ctx = _get_ctx(ctx)
[a, b, c] = _coerce_fp_expr_list([a, b, c], ctx)
if z3_debug():
_z3_assert(is_fprm(rm), 'First argument must be a Z3 floating-point rounding mode expression')
_z3_assert((is_fp(a) or is_fp(b) or is_fp(c)), 'Second, third or fourth argument must be a Z3 floating-point expression')
return FPRef(f(ctx.ref(), rm.as_ast(), a.as_ast(), b.as_ast(), c.as_ast()), ctx) |
class CpmTokenizerFast(PreTrainedTokenizerFast):
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
self._pad_token_type_id = 3
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = (False if (not self.vocab_file) else True)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__('You need to install jieba to use CpmTokenizer or CpmTokenizerFast. See for installation.')
self.jieba = jieba
self.translator = str.maketrans(' \n', '')
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((token_ids_0 + sep) + cls)
return ((((token_ids_0 + sep) + token_ids_1) + sep) + cls)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls_segment_id = [2]
if (token_ids_1 is None):
return ((len((token_ids_0 + sep)) * [0]) + cls_segment_id)
return (((len((token_ids_0 + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) + cls_segment_id)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not self.can_save_slow_tokenizer):
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
def _batch_encode_plus(self, batch_text_or_text_pairs, *args, **kwargs):
batch_text_or_text_pairs = [' '.join([x.translate(self.translator) for x in self.jieba.cut(text, cut_all=False)]) for text in batch_text_or_text_pairs]
return super()._batch_encode_plus(batch_text_or_text_pairs, *args, **kwargs)
def _decode(self, *args, **kwargs):
text = super()._decode(*args, **kwargs)
text = text.replace(' ', '').replace('', ' ').replace('', '\n')
return text |
def paint_profile_in_cube(cube, positions, profile=None, kernel=None):
assert ((profile is not None) or (kernel is not None))
if (kernel is None):
x = np.arange(cube.shape[0])
y = np.arange(cube.shape[1])
z = np.arange(cube.shape[2])
(rx, ry, rz) = np.meshgrid(x, y, z, sparse=True)
rgrid = np.sqrt((((rx ** 2) + (ry ** 2)) + (rz ** 2)))
kernel = profile(rgrid)
positions = positions.squeeze()
positions = (np.array([positions]) if (len(positions) == 3) else np.array(positions))
pos_grid = np.zeros_like(cube)
for (x, y, z) in positions:
pos_grid[(x, y, z)] = 1
out = convolve_fft(pos_grid, kernel, boundary='wrap', normalize_kernel=False, allow_huge=True)
return (out + cube) |
class GatewayOperator():
def __init__(self, op_type):
self.op_type = op_type
self.children = []
self.handle = None
def add_children(self, children):
self.children.extend(children)
def add_child(self, child):
self.children.append(child)
def set_handle(self, handle: str):
self.handle = handle
def to_dict(self):
if (len(self.children) == 0):
return {**self.__dict__, **{'children': []}}
return {**self.__dict__, **{'children': [child.to_dict() for child in self.children]}}
def to_json(self):
return json.dumps(self.to_dict())
def __repr__(self):
return self.to_json() |
class SawyerButtonPressTopdownWallV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'button_pos': obs[3:6], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = (- 1.0)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_button = (o_d['button_pos'] + np.array([0.0, (- 0.06), 0.0]))
if (np.linalg.norm((pos_curr[:2] - pos_button[:2])) > 0.04):
return (pos_button + np.array([0.0, 0.0, 0.1]))
else:
return pos_button |
class AdventLoss(torch.nn.Module):
def __init__(self):
super().__init__()
self.crit = nn.BCEWithLogitsLoss()
def forward(self, y_pred, y_true):
loss_stats = {}
y_t = torch.FloatTensor(y_pred.size())
y_t.fill_(y_true)
y_t = y_t.to(y_pred.get_device())
advent_loss = self.crit(y_pred, y_t)
loss_stats['advent_loss'] = advent_loss
return (advent_loss, loss_stats) |
def loss_fn(train_rng, state, params, batch, is_training, model, inner_learning_rate, inner_n_steps):
def inner_maml_loss_fn(inner_batch_train, inner_batch_val):
params_upd = inner_step(params=params, model=model, inner_batch=inner_batch_train, inner_learning_rate=inner_learning_rate, inner_n_steps=inner_n_steps)
return inner_loss_fn(params=params_upd, batch=inner_batch_val, model=model)
return jax.vmap(inner_maml_loss_fn)(batch['train'], batch['val']).mean() |
class A001906(RecurrenceSequence2):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
self._params = (0, 1, 3, (- 1))
self._b = []
self._precompute(2)
def _repr_(self):
return 'F(2n) = bisection of Fibonacci sequence: a(n)=3a(n-1)-a(n-2).' |
def generate_table_rounds(velocity):
r = ''
for i in range(0, len(presolver)):
res = ''
if (presolver_to_velocity.get(presolver.get(i)) != velocity):
continue
matches = list(filter((lambda x: (x.calls[i] > 0)), information))
val = list(map((lambda x: x.calls[i]), matches))
res = (res + (((((str(presolver.get(i)) + ' (') + abbrevations.get(i)) + ')') + ' & ') + round_to_string(sum(val))))
for j in range(0, len(presolver)):
if (presolver_to_velocity.get(presolver.get(j)) != velocity):
continue
res = (res + ' & ')
if (j > i):
res = (res + '- ')
continue
matches_same = list(filter((lambda x: (x.same_appearances[i][j] > 0)), information))
matches_con = list(filter((lambda x: (x.conflict_appearances[i][j] > 0)), information))
if ((len(matches_con) == 0) and (i == j)):
assert (len(matches_same) == 0)
res = ((res + '0/') + round_to_string(sum(val)))
elif ((len(matches_con) > 0) and (i == j)):
assert (len(matches_same) == 0)
res = (((res + round_to_string(sum(list(map((lambda x: x.conflict_appearances[i][j]), matches_con))))) + '/') + round_to_string(sum(val)))
elif (len(matches_same) == 0):
assert (len(matches_con) == 0)
res = (res + ' 0/0 ')
else:
if (len(matches_con) != 0):
res = (res + round_to_string(sum(list(map((lambda x: x.conflict_appearances[i][j]), matches_con)))))
else:
res = (res + '0')
res = ((res + '/') + round_to_string(sum(list(map((lambda x: x.same_appearances[i][j]), matches_same)))))
r = ((r + res) + '\\\\\n')
return r |
def uttwav_collater(batch):
max_len = 0
for sample in batch:
(wav, uttname) = sample
if (wav.shape[0] > max_len):
max_len = wav.shape[0]
wavs = []
utts = []
lens = []
for sample in batch:
(wav, uttname) = sample
T = wav.shape[0]
P = (max_len - T)
if (P > 0):
wav = np.concatenate((wav, np.zeros((P,))), axis=0)
wavs.append(wav)
utts.append(uttname)
lens.append(T)
return (torch.FloatTensor(wavs), utts, torch.LongTensor(lens)) |
def pbmc_seurat_v4_cite_seq(save_path: str='data/', apply_filters: bool=True, aggregate_proteins: bool=True, mask_protein_batches: int=0) -> anndata.AnnData:
return _load_pbmc_seurat_v4_cite_seq(save_path=save_path, apply_filters=apply_filters, aggregate_proteins=aggregate_proteins, mask_protein_batches=mask_protein_batches) |
def generate_matrix(size, dtype):
from numpy.random import default_rng
rng = default_rng(42)
A = rng.random((size, size), dtype=dtype)
return ((0.5 * A) A.T).copy() |
class ExactMatchEvaluator(object):
def __init__(self):
pass
def eval(self, pred, golden):
total_mentions = 0.0
pred_error = 0.0
pred_correct = 0.0
for sent_id in golden:
total_mentions += len(golden[sent_id])
if (not (sent_id in pred)):
continue
rst = set()
for (b, e, tp, _) in pred[sent_id]:
rst.add((b, e, tp))
for (b, e, tp) in rst:
if ((b, e, tp) in golden[sent_id]):
pred_correct += 1
else:
pred_error += 1
if (pred_correct == 0):
return (0.0, 0.0, 0.0)
precision = (pred_correct / (pred_correct + pred_error))
recall = (pred_correct / total_mentions)
F1 = ((2 * (precision * recall)) / (precision + recall))
return (precision, recall, F1) |
class MpKpiAggregation(Enum):
SUM = partial(sum_kpi)
MAX = partial(max_kpi)
TOTAL = partial(total_kpi)
def __call__(self, *args):
return self.value(*args) |
def get_gpt_prompt(claim: str, evidence_list: list[str], line_idx: list[int]) -> str:
assert (len(evidence_list) == len(line_idx)), f'{len(evidence_list)} != {len(line_idx)}, {line_idx}'
evidence_string = '\n'.join([f' <sentence_{idx}>{line}</sentence_{idx}>' for (idx, line) in zip(line_idx, evidence_list)])
return GPT_PROMPT.format(claim=claim, evidence=evidence_string) |
def test_keras_ensemble_ensemble_size_attributes(ensemble_size: int) -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size)
assert (keras_ensemble.ensemble_size == ensemble_size) |
def ambiguous_node_str():
classifier = HierarchicalClassifier()
classifier.y_ = np.array([['a', 'b'], ['b', 'c']])
return classifier |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.