code stringlengths 101 5.91M |
|---|
class TestMotionBindings(unittest.TestCase):
def test_zero_getters(self):
v = pin.Motion.Zero()
self.assertTrue(np.allclose(zero(3), v.linear))
self.assertTrue(np.allclose(zero(3), v.angular))
self.assertTrue(np.allclose(zero(6), v.vector))
def test_setRandom(self):
v = pin.Motion.Zero()
v.setRandom()
self.assertFalse(np.allclose(zero(3), v.linear))
self.assertFalse(np.allclose(zero(3), v.angular))
self.assertFalse(np.allclose(zero(6), v.vector))
def test_setZero(self):
v = pin.Motion.Zero()
v.setRandom()
v.setZero()
self.assertTrue(np.allclose(zero(3), v.linear))
self.assertTrue(np.allclose(zero(3), v.angular))
self.assertTrue(np.allclose(zero(6), v.vector))
def test_set_linear(self):
v = pin.Motion.Zero()
lin = rand(3)
v.linear = lin
self.assertTrue(np.allclose(v.linear, lin))
v.linear[1] = 1.0
self.assertTrue((v.linear[1] == 1.0))
def test_set_angular(self):
v = pin.Motion.Zero()
ang = rand(3)
v.angular = ang
self.assertTrue(np.allclose(v.angular, ang))
v.angular[1] = 1.0
self.assertTrue((v.angular[1] == 1.0))
def test_set_vector(self):
v = pin.Motion.Zero()
vec = rand(6)
v.vector = vec
self.assertTrue(np.allclose(v.vector, vec))
def test_internal_sums(self):
v1 = pin.Motion.Random()
v2 = pin.Motion.Random()
self.assertTrue(np.allclose((v1 + v2).vector, (v1.vector + v2.vector)))
self.assertTrue(np.allclose((v1 - v2).vector, (v1.vector - v2.vector)))
def test_se3_action(self):
m = pin.SE3.Random()
v = pin.Motion.Random()
self.assertTrue(np.allclose((m * v).vector, m.action.dot(v.vector)))
self.assertTrue(np.allclose(m.act(v).vector, m.action.dot(v.vector)))
self.assertTrue(np.allclose(m.actInv(v).vector, np.linalg.inv(m.action).dot(v.vector)))
self.assertTrue(np.allclose((v ^ v).vector, zero(6)))
def test_conversion(self):
m = pin.Motion.Random()
m_array = np.array(m)
m_from_array = pin.Motion(m_array)
self.assertTrue((m_from_array == m))
def test_several_init(self):
for _ in range(100000):
v = (pin.Motion.Zero() + pin.Motion.Zero())
self.assertTrue(np.allclose(v.vector, zero(6))) |
def _skip_slow():
if (os.environ.get('ASV_SKIP_SLOW', '0') == '1'):
raise NotImplementedError('Skipping this test...') |
class SpatialSoftmax3D(torch.nn.Module):
def __init__(self, depth, height, width, channel):
super(SpatialSoftmax3D, self).__init__()
self.depth = depth
self.height = height
self.width = width
self.channel = channel
self.temperature = 0.01
(pos_x, pos_y, pos_z) = np.meshgrid(np.linspace((- 1.0), 1.0, self.depth), np.linspace((- 1.0), 1.0, self.height), np.linspace((- 1.0), 1.0, self.width))
pos_x = torch.from_numpy(pos_x.reshape(((self.depth * self.height) * self.width))).float()
pos_y = torch.from_numpy(pos_y.reshape(((self.depth * self.height) * self.width))).float()
pos_z = torch.from_numpy(pos_z.reshape(((self.depth * self.height) * self.width))).float()
self.register_buffer('pos_x', pos_x)
self.register_buffer('pos_y', pos_y)
self.register_buffer('pos_z', pos_z)
def forward(self, feature):
feature = feature.view((- 1), ((self.height * self.width) * self.depth))
softmax_attention = F.softmax((feature / self.temperature), dim=(- 1))
expected_x = torch.sum((self.pos_x * softmax_attention), dim=1, keepdim=True)
expected_y = torch.sum((self.pos_y * softmax_attention), dim=1, keepdim=True)
expected_z = torch.sum((self.pos_z * softmax_attention), dim=1, keepdim=True)
expected_xy = torch.cat([expected_x, expected_y, expected_z], 1)
feature_keypoints = expected_xy.view((- 1), (self.channel * 3))
return feature_keypoints |
class CoolObjectAction(BaseAction):
valid_actions = {'OpenObject', 'CloseObject', 'PickupObject', 'PutObject'}
def get_reward(self, state, prev_state, expert_plan, goal_idx):
if (state.metadata['lastAction'] not in self.valid_actions):
(reward, done) = (self.rewards['invalid_action'], False)
return (reward, done)
(reward, done) = (self.rewards['neutral'], False)
next_put_goal_idx = (goal_idx + 2)
if (next_put_goal_idx < len(expert_plan)):
cool_object_id = expert_plan[next_put_goal_idx]['planner_action']['objectId']
cool_object = get_object(cool_object_id, state.metadata)
is_obj_cool = (cool_object['objectId'] in self.env.cooled_objects)
(reward, done) = ((self.rewards['positive'], True) if is_obj_cool else (self.rewards['negative'], False))
return (reward, done) |
class DoubleMNIST(CombinationMetaDataset):
def __init__(self, root, num_classes_per_task=None, meta_train=False, meta_val=False, meta_test=False, meta_split=None, transform=None, target_transform=None, dataset_transform=None, class_augmentations=None, download=False):
dataset = DoubleMNISTClassDataset(root, meta_train=meta_train, meta_val=meta_val, meta_test=meta_test, meta_split=meta_split, transform=transform, class_augmentations=class_augmentations, download=download)
super(DoubleMNIST, self).__init__(dataset, num_classes_per_task, target_transform=target_transform, dataset_transform=dataset_transform) |
def main(args):
fpaths = glob.glob(f'{args.dpath}/*mesh00.obj')
for fpath in fpaths:
print(f'Processing {fpath}...')
mesh = o3d.io.read_triangle_mesh(fpath)
if (args.filter_iters > 0):
mesh = mesh.filter_smooth_simple(number_of_iterations=args.filter_iters)
(fpath, ext) = os.path.splitext(fpath)
fpath = f'{fpath}_filtered{args.filter_iters:02d}{ext}'
o3d.io.write_triangle_mesh(fpath, mesh) |
def test_simplify_mixed_ws():
helpers.disbale_tqdm()
helpers.setup(with_data=True)
test_lines = ['a b c\n', 'test\tdata stuff\n', 'to test\tstuff\n']
out_file = os.path.join(helpers.DATA_DIR, 'test.cat')
with open(out_file, 'w') as f:
f.writelines(test_lines)
convert._simplify_mixed_ws(out_file)
expected_test_lines = ['a b c\n', 'test data stuff\n', 'to test stuff\n']
with open(out_file, 'r') as f:
actual_lines = f.readlines()
helpers.tear_down()
helpers.enable_tqdm()
assert (expected_test_lines == actual_lines) |
def select_examples_NQ(data, index, passages, passages_index):
selected_data = []
for (i, k) in enumerate(index):
ctxs = [{'id': idx, 'title': passages[idx][1], 'text': passages[idx][0]} for idx in passages_index[str(i)]]
dico = {'question': data[k]['question'], 'answers': data[k]['answer'], 'ctxs': ctxs}
selected_data.append(dico)
return selected_data |
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
super(SBCSGroupProber, self).__init__()
self.probers = [SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), SingleByteCharSetProber(MacCyrillicModel), SingleByteCharSetProber(Ibm866Model), SingleByteCharSetProber(Ibm855Model), SingleByteCharSetProber(Latin7GreekModel), SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), SingleByteCharSetProber(TIS620ThaiModel), SingleByteCharSetProber(Latin5TurkishModel)]
hebrew_prober = HebrewProber()
logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, False, hebrew_prober)
visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True, hebrew_prober)
hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
self.probers.extend([hebrew_prober, logical_hebrew_prober, visual_hebrew_prober])
self.reset() |
def replace_EO(sentence, EO, ent_type_labels, cur_type, entities):
s = ' '.join(sentence)
e = ' '.join(entities)
s = s.replace(e, ' '.join((['XXX'] * len(entities))))
s = s.split()
assert (len(s) == len(EO))
flag = True
cont = 0
indices_list = []
for i in range(len(s)):
if (s[i] == 'XXX'):
s[i] = 'YYY'
if flag:
cont += 1
flag = False
if (len(entities) == 1):
EO[i] = 'S'
ent_type_labels[i] = cur_type
flag = True
else:
EO[i] = 'B'
ent_type_labels[i] = cur_type
indices_list.append([i])
else:
cont += 1
if (cont == len(entities)):
EO[i] = 'E'
ent_type_labels[i] = cur_type
flag = True
else:
EO[i] = 'M'
ent_type_labels[i] = cur_type
indices_list[(- 1)].append(i)
return (s, EO, ent_type_labels, indices_list) |
def alpha_calc(RACC, ACC, POP):
try:
epsi = (1 / (2 * POP))
p_a = (((1 - epsi) * ACC) + epsi)
p_e = RACC
return reliability_calc(p_e, p_a)
except Exception:
return 'None' |
def command_generator():
args_msgs = []
for (token, item) in setting.items():
arg = setting[token]['arg']
val = np.random.choice(setting[token]['value'])
args_msgs.append(f'{arg} {val}')
args_msg = ' '.join(args_msgs)
command = ('python train.py ' + args_msg)
return (command, args_msg) |
def solve(proto, snapshot, gpus, timing, uid, rank):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if (snapshot and (len(snapshot) != 0)):
solver.restore(snapshot)
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
if (timing and (rank == 0)):
time(solver, nccl)
else:
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
solver.step(solver.param.max_iter) |
def load_usps0_noisy():
(X_train, y_train, X_test, y_test) = load_usps0()
n_samples = X_train.shape[0]
indices = np.arange(n_samples)
random_state = check_random_state(0)
random_state.shuffle(indices)
n = (n_samples / 10)
indices = indices[:n]
y_train[indices] = np.logical_not(y_train[indices]).astype(int)
return (X_train, y_train, X_test, y_test) |
def result_message(finding, info_finding):
message = (finding.get('message') or info_finding.get('descr_short') or finding['name'])
severity = finding.get('severity')
return (f'''{message}
Severity: {severity}''' if (message and severity) else (message if message else (f'Severity: {severity}' if severity else ''))) |
def strainxx(xx):
(x, y) = (xx[0], xx[1])
Q = qload
return ((((- 2) * pi) * np.sin(((2 * pi) * x))) * np.sin((pi * y))) |
def flatten_dict(d, parent_key='', sep='_'):
items = []
for (k, v) in d.items():
new_key = (((parent_key + sep) + k) if parent_key else k)
if isinstance(v, dict):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items) |
class EffHead(nn.Module):
def __init__(self, w_in, w_out, bn_norm):
super(EffHead, self).__init__()
self.conv = nn.Conv2d(w_in, w_out, 1, stride=1, padding=0, bias=False)
self.conv_bn = get_norm(bn_norm, w_out)
self.conv_swish = Swish()
def forward(self, x):
x = self.conv_swish(self.conv_bn(self.conv(x)))
return x |
class T5Corrector():
def __init__(self, model_name_or_path: str='shibing624/mengzi-t5-base-chinese-correction'):
t1 = time.time()
self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
self.model = T5ForConditionalGeneration.from_pretrained(model_name_or_path)
self.model.to(device)
logger.debug('Device: {}'.format(device))
logger.debug(('Loaded t5 correction model: %s, spend: %.3f s.' % (model_name_or_path, (time.time() - t1))))
def _predict(self, sentences, batch_size=32, max_length=128, silent=True):
corrected_sents = []
for batch in tqdm([sentences[i:(i + batch_size)] for i in range(0, len(sentences), batch_size)], desc='Generating outputs', disable=silent):
inputs = self.tokenizer(batch, padding=True, max_length=max_length, truncation=True, return_tensors='pt').to(device)
with torch.no_grad():
outputs = self.model.generate(**inputs, max_length=max_length)
for (i, sent) in enumerate(batch):
decode_tokens = self.tokenizer.decode(outputs[i], skip_special_tokens=True).replace(' ', '')
corrected_sent = decode_tokens[:len(sent)]
corrected_sents.append(corrected_sent)
return corrected_sents
def correct_batch(self, sentences: List[str], max_length: int=128, batch_size: int=32, silent: bool=True):
input_sents = []
sent_map = []
for (idx, sentence) in enumerate(sentences):
if (len(sentence) > max_length):
short_sentences = [i[0] for i in split_text_into_sentences_by_length(sentence, max_length)]
input_sents.extend(short_sentences)
sent_map.extend(([idx] * len(short_sentences)))
else:
input_sents.append(sentence)
sent_map.append(idx)
corrected_sents = self._predict(input_sents, batch_size=batch_size, max_length=max_length, silent=silent)
corrected_sentences = ([''] * len(sentences))
for (idx, corrected_sent) in zip(sent_map, corrected_sents):
corrected_sentences[idx] += corrected_sent
new_corrected_sentences = []
corrected_details = []
for (idx, corrected_sent) in enumerate(corrected_sentences):
(new_corrected_sent, sub_details) = get_errors_for_same_length(corrected_sent, sentences[idx])
new_corrected_sentences.append(new_corrected_sent)
corrected_details.append(sub_details)
return [{'source': s, 'target': c, 'errors': e} for (s, c, e) in zip(sentences, new_corrected_sentences, corrected_details)]
def correct(self, sentence: str, **kwargs):
return self.correct_batch([sentence], **kwargs)[0] |
def broadcast_to_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axis=None):
dy = grad_inputs[0]
x0 = inputs[0]
raise NotImplementedError('broadcast_to_backward is not implemented.') |
class TextClassificationPipeline(Pipeline):
def __call__(self, *args, **kwargs):
outputs = super().__call__(*args, **kwargs)
scores = (np.exp(outputs) / np.exp(outputs).sum((- 1)))
return [{'label': self.model.config.id2label[item.argmax()], 'score': item.max()} for item in scores] |
def barrier(group: Optional[ProcessGroup]=None) -> None:
if is_distributed():
if (group is None):
group = get_default_group()
torch_dist.barrier(group) |
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
root = os.path.join(args.data_path, ('train' if is_train else 'val'))
dataset = datasets.ImageFolder(root, transform=transform)
print(dataset)
return dataset |
class TFAlbertForTokenClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
.torch
def test_item_ids_are_grouped_to_sequences_with_subset(small_dataset: Dataset, item_id_and_item_feature_schema: TensorSchema):
tokenizer = SequenceTokenizer(item_id_and_item_feature_schema).fit(small_dataset)
sequential_dataset = tokenizer.transform(small_dataset, tensor_features_to_keep=['item_id'])
_compare_sequence(sequential_dataset, 0, 'item_id', [0, 1])
_compare_sequence(sequential_dataset, 1, 'item_id', [0, 2, 3])
_compare_sequence(sequential_dataset, 2, 'item_id', [1])
_compare_sequence(sequential_dataset, 3, 'item_id', [0, 1, 2, 3, 4, 5])
for tensor_feature_name in sequential_dataset.schema.keys():
assert (tensor_feature_name in {'item_id'})
with pytest.raises(KeyError):
sequential_dataset.get_sequence(0, 'some_item_feature') |
(frozen=True)
class DiscreteSACModules(Modules):
policy: CategoricalPolicy
q_funcs: nn.ModuleList
targ_q_funcs: nn.ModuleList
log_temp: Optional[Parameter]
actor_optim: Optimizer
critic_optim: Optimizer
temp_optim: Optional[Optimizer] |
def make_model(config):
model_type = config['aspect_term_model']['type']
if (model_type == 'recurrent_capsnet'):
return make_recurrent_capsule_network(config)
elif (model_type == 'bert_capsnet'):
return make_bert_capsule_network(config)
else:
raise ValueError('No Supporting.') |
class SASRecDataset(Dataset):
def __init__(self, args, user_seq, test_neg_items=None, data_type='train'):
self.args = args
self.user_seq = user_seq
self.test_neg_items = test_neg_items
self.data_type = data_type
self.max_len = args.max_seq_length
def _data_sample_rec_task(self, user_id, items, input_ids, target_pos, answer):
copied_input_ids = copy.deepcopy(input_ids)
target_neg = []
seq_set = set(items)
for _ in input_ids:
target_neg.append(neg_sample(seq_set, self.args.item_size))
pad_len = (self.max_len - len(input_ids))
input_ids = (([0] * pad_len) + input_ids)
target_pos = (([0] * pad_len) + target_pos)
target_neg = (([0] * pad_len) + target_neg)
input_ids = input_ids[(- self.max_len):]
target_pos = target_pos[(- self.max_len):]
target_neg = target_neg[(- self.max_len):]
assert (len(input_ids) == self.max_len)
assert (len(target_pos) == self.max_len)
assert (len(target_neg) == self.max_len)
if (self.test_neg_items is not None):
test_samples = self.test_neg_items[index]
cur_rec_tensors = (torch.tensor(user_id, dtype=torch.long), torch.tensor(input_ids, dtype=torch.long), torch.tensor(target_pos, dtype=torch.long), torch.tensor(target_neg, dtype=torch.long), torch.tensor(answer, dtype=torch.long), torch.tensor(test_samples, dtype=torch.long))
else:
cur_rec_tensors = (torch.tensor(user_id, dtype=torch.long), torch.tensor(input_ids, dtype=torch.long), torch.tensor(target_pos, dtype=torch.long), torch.tensor(target_neg, dtype=torch.long), torch.tensor(answer, dtype=torch.long))
return cur_rec_tensors
def __getitem__(self, index):
user_id = index
items = self.user_seq[index]
assert (self.data_type in {'train', 'valid', 'test'})
if (self.data_type == 'train'):
input_ids = items[:(- 3)]
target_pos = items[1:(- 2)]
answer = [0]
elif (self.data_type == 'valid'):
input_ids = items[:(- 2)]
target_pos = items[1:(- 1)]
answer = [items[(- 2)]]
else:
input_ids = items[:(- 1)]
target_pos = items[1:]
answer = [items[(- 1)]]
return self._data_sample_rec_task(user_id, items, input_ids, target_pos, answer)
def __len__(self):
return len(self.user_seq) |
def GetPseudoAAC2(ProteinSequence, lamda=30, weight=0.05, AAP=[_Hydrophobicity, _hydrophilicity]):
rightpart = []
for i in range(lamda):
rightpart.append(GetSequenceOrderCorrelationFactor(ProteinSequence, (i + 1), AAP))
result = {}
temp = (1 + (weight * sum(rightpart)))
for index in range(20, (20 + lamda)):
result[('PAAC' + str((index + 1)))] = round((((weight * rightpart[(index - 20)]) / temp) * 100), 3)
return result |
class MLP(nn.Module):
def __init__(self, n_in, n_out, dropout=0, activation=True):
super().__init__()
self.n_in = n_in
self.n_out = n_out
self.linear = nn.Linear(n_in, n_out)
self.activation = (nn.LeakyReLU(negative_slope=0.1) if activation else nn.Identity())
self.dropout = SharedDropout(p=dropout)
self.reset_parameters()
def __repr__(self):
s = f'n_in={self.n_in}, n_out={self.n_out}'
if (self.dropout.p > 0):
s += f', dropout={self.dropout.p}'
return f'{self.__class__.__name__}({s})'
def reset_parameters(self):
nn.init.orthogonal_(self.linear.weight)
nn.init.zeros_(self.linear.bias)
def forward(self, x):
x = self.linear(x)
x = self.activation(x)
x = self.dropout(x)
return x |
class SetPartitionsPk_k(SetPartitionsAk_k):
def _repr_(self):
return (SetPartitionsAk_k._repr_(self) + ' that are planar')
def __contains__(self, x):
if (not SetPartitionsAk_k.__contains__(self, x)):
return False
if (not is_planar(x)):
return False
return True
def cardinality(self):
return catalan_number((2 * self.k))
def __iter__(self):
for sp in SetPartitionsAk_k.__iter__(self):
if is_planar(sp):
(yield self.element_class(self, sp)) |
def pose_to_siren_to_pose(p: Pose, fps=None) -> Pose:
p.body.zero_filled()
(mu, std) = p.normalize_distribution()
net = siren.get_pose_siren(p, total_steps=3000, steps_til_summary=100, learning_rate=0.0001, cuda=True)
new_fps = (fps if (fps is not None) else p.body.fps)
coords = siren.PoseDataset.get_coords(time=(len(p.body.data) / p.body.fps), fps=new_fps)
pred = net(coords).cpu().numpy()
pose_body = NumPyPoseBody(fps=new_fps, data=ma.array(pred), confidence=np.ones(shape=tuple(pred.shape[:3])))
p = Pose(header=p.header, body=pose_body)
p.unnormalize_distribution(mu, std)
return p |
.parametrize('module_creator', [ModuleCreator(TSTNetNormal(), [(4, 3, 32, 32), (4, 3, 32, 32)]), ModuleCreator(ResUnit(16), [(4, 3, 32, 32)]), ModuleCreator(NestedTestNet(), [(4, 3, 32, 32), (4, 3, 32, 32)])])
def test_with_statement_graph_def(module_creator):
module = module_creator.module
proto_variable_inputs = [nn.ProtoVariable(shape) for shape in module_creator.input_shape]
with nn.graph_def.graph() as g:
outputs = module(*proto_variable_inputs)
variable_inputs = module_creator.get_variable_inputs()
outputs = g(*variable_inputs)
ref_outputs = module(*variable_inputs)
forward_variable_and_check_equal(outputs, ref_outputs) |
class ExceptionWrapper(object):
def __init__(self, exc_info=None, where='in background'):
if (exc_info is None):
exc_info = sys.exc_info()
self.exc_type = exc_info[0]
self.exc_msg = ''.join(traceback.format_exception(*exc_info))
self.where = where
def reraise(self):
msg = 'Caught {} {}.\nOriginal {}'.format(self.exc_type.__name__, self.where, self.exc_msg)
if (self.exc_type == KeyError):
msg = KeyErrorMessage(msg)
elif getattr(self.exc_type, 'message', None):
raise self.exc_type(message=msg)
raise self.exc_type(msg) |
def load_data(path, alphabet):
with open(path, 'rb') as f:
(names, structs, sequences) = scop.parse_astral(f, encoder=alphabet)
x = [torch.from_numpy(x).long() for x in sequences]
s = torch.from_numpy(structs)
c = []
for name in names:
name = name.decode('utf-8')
if (name not in cmap_dict):
name = ('d' + name[1:])
path = cmap_dict[name]
im = np.array(Image.open(path), copy=False)
contacts = np.zeros(im.shape, dtype=np.float32)
contacts[(im == 1)] = (- 1)
contacts[(im == 255)] = 1
mask = np.tril_indices(contacts.shape[0], k=(- 1))
contacts[mask] = (- 1)
c.append(torch.from_numpy(contacts))
return (x, s, c) |
def run_partition(args, data, metas):
(assignments, shard_names, filenames, clustering_types) = preprocess(data, args.computation.num_workers, args.log_every, verbose=args.verbose)
samples_list = run_greedy(args, assignments, shard_names, filenames, clustering_types, args.subset.size, args.subset.ratio, measure_name=args.measure_name, cluster_pairing=args.clustering.pairing, shuffle_candidates=args.shuffle_candidates, verbose=args.verbose)
return samples_list |
class Encode2DVAE_nb(nn.Module):
def __init__(self, opt, out_dim=None, num_blocks=2):
super(Encode2DVAE_nb, self).__init__()
if (out_dim is None):
output_dim = opt.nfc
else:
assert (type(out_dim) is int)
output_dim = out_dim
self.features = FeatureExtractor(opt.nc_im, opt.nfc, opt.ker_size, (opt.ker_size // 2), 1, num_blocks=num_blocks)
self.mu = nn.Sequential(ConvBlock2D(opt.nfc, output_dim, opt.ker_size, (opt.ker_size // 2), 1, bn=False, act=None), nn.AdaptiveAvgPool2d(1))
self.logvar = nn.Sequential(ConvBlock2D(opt.nfc, output_dim, opt.ker_size, (opt.ker_size // 2), 1, bn=False, act=None), nn.AdaptiveAvgPool2d(1))
self.bern = ConvBlock2D(opt.nfc, 1, opt.ker_size, (opt.ker_size // 2), 1, bn=False, act=None)
def forward(self, x):
features = self.features(x)
bern = torch.sigmoid(self.bern(features))
features = (bern * features)
mu = self.mu(features)
logvar = self.logvar(features)
return (mu, logvar, bern) |
class DarknetBasicBlockV3(gluon.HybridBlock):
def __init__(self, channel, num_sync_bn_devices=(- 1), **kwargs):
super(DarknetBasicBlockV3, self).__init__(**kwargs)
self.body = nn.HybridSequential(prefix='')
self.body.add(_conv2d(channel, 1, 0, 1, num_sync_bn_devices))
self.body.add(_conv2d((channel * 2), 3, 1, 1, num_sync_bn_devices))
def hybrid_forward(self, F, x, *args):
residual = x
x = self.body(x)
return (x + residual) |
def get_args():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('-c', '--config_path', type=Path, help='Path to the config.', required=True)
return parser.parse_args() |
def test_evaluate_classification_coverage(tmpdir):
stream = RandomTreeGenerator(tree_random_state=23, sample_random_state=12, n_classes=2, n_cat_features=2, n_num_features=5, n_categories_per_cat_feature=5, max_tree_depth=6, min_leaf_depth=3, fraction_leaves_per_level=0.15)
nominal_attr_idx = [x for x in range(15, len(stream.feature_names))]
learner = HoeffdingTreeClassifier(nominal_attributes=nominal_attr_idx)
max_samples = 1000
output_file = os.path.join(str(tmpdir), 'prequential_summary.csv')
metrics = ['accuracy', 'kappa', 'kappa_t', 'kappa_m', 'f1', 'precision', 'recall', 'gmean', 'true_vs_predicted']
evaluator = EvaluatePrequential(max_samples=max_samples, metrics=metrics, output_file=output_file)
evaluator.evaluate(stream=stream, model=learner)
(mean_performance, current_performance) = evaluator.get_measurements(model_idx=0)
expected_current_accuracy = 0.685
assert np.isclose(current_performance.accuracy_score(), expected_current_accuracy) |
class SeparatorStyle(Enum):
SINGLE = auto()
TWO = auto()
MPT = auto()
PLAIN = auto()
LLAMA_2 = auto() |
def subprocess_fn(rank, args):
if (not args.debug):
dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
distributed_utils.init_distributed_mode(rank, args)
if (args.rank != 0):
custom_ops.verbosity = 'none'
training_loop.training_loop(**args) |
def tokenize(tokenizer, tokens: List[str], splits: List[int]):
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
tok_to_orig_span_index = defaultdict(list)
for (i, s) in enumerate(splits):
tok_to_orig_span_index[s].append(i)
span_to_orig_index = dict()
for (i, token) in enumerate(tokens):
if (i in tok_to_orig_span_index):
span_to_orig_index[len(all_doc_tokens)] = tok_to_orig_span_index[i]
tok_to_orig_index.append((- 1))
all_doc_tokens.append(SPAN_TOKEN)
orig_to_tok_index.append(len(all_doc_tokens))
if (tokenizer.__class__.__name__ in ['RobertaTokenizer', 'LongformerTokenizer', 'BartTokenizer', 'RobertaTokenizerFast', 'LongformerTokenizerFast', 'BartTokenizerFast']):
sub_tokens = tokenizer.tokenize(token, add_prefix_space=True)
else:
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
return (all_doc_tokens, orig_to_tok_index, tok_to_orig_index, span_to_orig_index) |
def make_data_formatter(exp_name):
data_formatter_class = {'volatility': data_formatters.volatility.VolatilityFormatter, 'electricity': data_formatters.electricity.ElectricityFormatter, 'traffic': data_formatters.traffic.TrafficFormatter, 'favorita': data_formatters.favorita.FavoritaFormatter}
return data_formatter_class[exp_name]() |
def train(predictor, x, split_edge, optimizer, batch_size):
predictor.train()
pos_train_edge = split_edge['train']['edge'].to(x.device)
total_loss = total_examples = 0
for perm in DataLoader(range(pos_train_edge.size(0)), batch_size, shuffle=True):
optimizer.zero_grad()
edge = pos_train_edge[perm].t()
pos_out = predictor(x[edge[0]], x[edge[1]])
pos_loss = (- torch.log((pos_out + 1e-15)).mean())
edge = torch.randint(0, x.size(0), edge.size(), dtype=torch.long, device=x.device)
neg_out = predictor(x[edge[0]], x[edge[1]])
neg_loss = (- torch.log(((1 - neg_out) + 1e-15)).mean())
loss = (pos_loss + neg_loss)
loss.backward()
optimizer.step()
num_examples = pos_out.size(0)
total_loss += (loss.item() * num_examples)
total_examples += num_examples
return (total_loss / total_examples) |
class ME(nn.Module):
def __init__(self, cin, cout):
super().__init__()
self.maxpool = nn.MaxPool2d(2, ceil_mode=True)
self.pw = nn.Conv2d(cin, cout, 1, 1, bias=False)
self.bn = nn.BatchNorm2d(cout)
def forward(self, x):
x = self.maxpool(x)
x = self.pw(x)
x = self.bn(x)
return x |
def dump_tower(sess, net, from_layer, tower_name, tower_layers, operation='create'):
for tower_layer in tower_layers:
tower_layer = '{}/{}'.format(tower_name, tower_layer)
if ('pool' in tower_layer):
dump_pool(sess, net, from_layer, tower_layer, operation)
else:
dump_convbn(sess, net, from_layer, tower_layer, operation)
from_layer = tower_layer |
class AlbertOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)]) |
def syscall_get_stdout(cmd):
try:
out = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE).communicate()[0].decode('utf-8').rstrip()
return out.split('\n')
except:
raise Error(('Error in system call. I tried to run:\n' + str(cmd))) |
def append_data(save_folder, data, corpus):
logger.info((('Preparing ' + corpus) + '.csv'))
to_append = []
for line in tqdm(data):
(channel, filename, speaker_name, sentences) = line
out = subprocess.Popen(['soxi', '-D', ((((save_folder + '/wav/') + channel) + filename) + '.wav')], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(stdout, stderr) = out.communicate()
wav_duration = str(('%.2f' % float(stdout)))
wav = ((((save_folder + '/wav/') + channel) + filename) + '.wav')
IDs = get_IDs(speaker_name, sentences, channel, filename)
for n in range(len(sentences)):
f1 = float(sentences[n][3])
f2 = float(sentences[n][2])
duration = str(('%.2f' % (f1 - f2)))
if ((float(wav_duration) >= f1) and (float(duration) != 0.0) and (sentences[n][0] != '')):
to_append.append([IDs[n], duration, sentences[n][2], sentences[n][3], wav, 'wav', speaker_name, 'string', sentences[n][0], 'string', sentences[n][1], 'string'])
if (to_append is not None):
write_first_row(save_folder, corpus)
path = (((save_folder + '/csv/') + corpus) + '.csv')
SB_file = open(path, 'a')
writer = csv.writer(SB_file, delimiter=',')
writer.writerows(to_append)
SB_file.close() |
def make_roi_mask_loss_evaluator():
matcher = Matcher(cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD, cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD, allow_low_quality_matches=False)
loss_evaluator = MaskRCNNLossComputation(matcher, cfg.MODEL.ROI_MASK_HEAD.RESOLUTION)
return loss_evaluator |
def killProcess(processID):
if (processID is None):
return
if (platform.system() == 'Windows'):
import ctypes
handle = ctypes.windll.kernel32.OpenProcess(2, False, processID)
ctypes.windll.kernel32.TerminateProcess(handle, (- 1))
ctypes.windll.kernel32.CloseHandle(handle)
elif (platform.system() == 'Darwin'):
os.kill(processID, signal.SIGKILL)
elif (platform.system() == 'Linux'):
os.killpg(processID, signal.SIGKILL)
elif (platform.system() == 'FreeBSD'):
os.kill(processID, signal.SIGKILL)
elif (platform.system() == 'SunOS'):
os.kill(processID, signal.SIGTERM) |
def get_parameter_groups(model, stage_cfg, print_log=False):
weight_decay = stage_cfg.weight_decay
embed_weight_decay = stage_cfg.embed_weight_decay
backbone_lr_ratio = stage_cfg.backbone_lr_ratio
base_lr = stage_cfg.learning_rate
backbone_params = []
embed_params = []
other_params = []
embedding_names = ['summary_pos', 'query_init', 'query_emb', 'obj_pe']
embedding_names = [(e + '.weight') for e in embedding_names]
memo = set()
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if (param in memo):
continue
memo.add(param)
if name.startswith('module'):
name = name[7:]
inserted = False
if name.startswith('pixel_encoder.'):
backbone_params.append(param)
inserted = True
if print_log:
log.info(f'{name} counted as a backbone parameter.')
else:
for e in embedding_names:
if name.endswith(e):
embed_params.append(param)
inserted = True
if print_log:
log.info(f'{name} counted as an embedding parameter.')
break
if (not inserted):
other_params.append(param)
parameter_groups = [{'params': backbone_params, 'lr': (base_lr * backbone_lr_ratio), 'weight_decay': weight_decay}, {'params': embed_params, 'lr': base_lr, 'weight_decay': embed_weight_decay}, {'params': other_params, 'lr': base_lr, 'weight_decay': weight_decay}]
return parameter_groups |
def _determine_child_storage(parent_schedules: List[dtypes.ScheduleType]) -> Optional[dtypes.StorageType]:
for sched in reversed(parent_schedules):
if ((sched is not None) and (sched in dtypes.SCOPEDEFAULT_STORAGE) and (sched != dtypes.ScheduleType.Sequential)):
child_sched = dtypes.SCOPEDEFAULT_STORAGE[sched]
if (child_sched is not None):
return child_sched
return None |
def in_notebook():
try:
shell = get_ipython().__class__.__name__
if (shell == 'ZMQInteractiveShell'):
return True
elif (shell == 'TerminalInteractiveShell'):
return False
else:
return False
except NameError:
return False |
def calcul_value(annotation_list):
all_vlaue = {}
for i in annotation_list:
k = i['image_id']
v = i['caption']
if (v in all_vlaue):
all_vlaue[v] = (all_vlaue.get(v) + 1)
else:
all_vlaue[v] = 1
return all_vlaue |
def _get_extension():
if (TORCH_VERSION == 'parrots'):
from parrots.utils.build_extension import BuildExtension, Extension
CppExtension = partial(Extension, cuda=False)
CUDAExtension = partial(Extension, cuda=True)
else:
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
return (BuildExtension, CppExtension, CUDAExtension) |
class BaseDataset(Dataset, ABC):
def __init__(self, datadir: str, scene_bbox: torch.Tensor, split: str, is_ndc: bool, is_contracted: bool, rays_o: Optional[torch.Tensor], rays_d: Optional[torch.Tensor], intrinsics: Union[(Intrinsics, List[Intrinsics])], batch_size: Optional[int]=None, imgs: Optional[Union[(torch.Tensor, List[torch.Tensor])]]=None, sampling_weights: Optional[torch.Tensor]=None, weights_subsampled: int=1):
self.datadir = datadir
self.name = os.path.basename(self.datadir)
self.scene_bbox = scene_bbox
self.split = split
self.is_ndc = is_ndc
self.is_contracted = is_contracted
self.weights_subsampled = weights_subsampled
self.batch_size = batch_size
if (self.split in ['train', 'test_optim']):
assert (self.batch_size is not None)
self.rays_o = rays_o
self.rays_d = rays_d
self.imgs = imgs
if (self.imgs is not None):
self.num_samples = len(self.imgs)
elif (self.rays_o is not None):
self.num_samples = len(self.rays_o)
else:
self.num_samples = None
self.intrinsics = intrinsics
self.sampling_weights = sampling_weights
if (self.sampling_weights is not None):
assert (len(self.sampling_weights) == self.num_samples), f'Expected {self.num_samples} sampling weights but given {len(self.sampling_weights)}.'
self.sampling_batch_size = 2000000
if (self.num_samples is not None):
self.use_permutation = (self.num_samples < )
else:
self.use_permutation = True
self.perm = None
def img_h(self) -> Union[(int, List[int])]:
if isinstance(self.intrinsics, list):
return [i.height for i in self.intrinsics]
return self.intrinsics.height
def img_w(self) -> Union[(int, List[int])]:
if isinstance(self.intrinsics, list):
return [i.width for i in self.intrinsics]
return self.intrinsics.width
def reset_iter(self):
if ((self.sampling_weights is None) and self.use_permutation):
self.perm = torch.randperm(self.num_samples)
else:
del self.perm
self.perm = None
def get_rand_ids(self, index):
assert (self.batch_size is not None), "Can't get rand_ids for test split"
if (self.sampling_weights is not None):
batch_size = (self.batch_size // (self.weights_subsampled ** 2))
num_weights = len(self.sampling_weights)
if (num_weights > self.sampling_batch_size):
subset = torch.randint(0, num_weights, size=(self.sampling_batch_size,), dtype=torch.int64, device=self.sampling_weights.device)
samples = torch.multinomial(input=self.sampling_weights[subset], num_samples=batch_size)
return subset[samples]
return torch.multinomial(input=self.sampling_weights, num_samples=batch_size)
else:
batch_size = self.batch_size
if self.use_permutation:
return self.perm[(index * batch_size):((index + 1) * batch_size)]
else:
return torch.randint(0, self.num_samples, size=(batch_size,))
def __len__(self):
if (self.split in ['train', 'test_optim']):
return (((self.num_samples + self.batch_size) - 1) // self.batch_size)
else:
return self.num_samples
def __getitem__(self, index, return_idxs: bool=False):
if (self.split in ['train', 'test_optim']):
index = self.get_rand_ids(index)
out = {}
if (self.rays_o is not None):
out['rays_o'] = self.rays_o[index]
if (self.rays_d is not None):
out['rays_d'] = self.rays_d[index]
if (self.imgs is not None):
out['imgs'] = self.imgs[index]
else:
out['imgs'] = None
if return_idxs:
return (out, index)
return out |
.parametrize('metric', METRICS)
def test_kernel_density_numerical_consistency(global_random_seed, metric):
(X_64, X_32, Y_64, Y_32) = get_dataset_for_binary_tree(random_seed=global_random_seed)
metric_params = METRICS.get(metric, {})
kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params)
kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params)
kernel = 'gaussian'
h = 0.1
density64 = kd_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True)
density32 = kd_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True)
assert_allclose(density64, density32, rtol=1e-05)
assert (density64.dtype == np.float64)
assert (density32.dtype == np.float32) |
def test_ufunc_add_where_list():
A = np.random.randint(1, 10, size=(2,), dtype=np.int32)
B = np.random.randint(1, 10, size=(2,), dtype=np.int32)
try:
C = ufunc_add_where_list(A, B)
except:
assert True
return
assert False |
def pytest_addoption(parser):
parser.addoption('--fuser', default='old', help='fuser to use for benchmarks')
parser.addoption('--executor', default='legacy', help='executor to use for benchmarks') |
def LR_CI_calc(mean, SE, CV=1.96):
try:
CI_down = math.exp((math.log(mean) - (CV * SE)))
CI_up = math.exp((math.log(mean) + (CV * SE)))
return (CI_down, CI_up)
except Exception:
return ('None', 'None') |
class Precision(BaseMetric):
def __init__(self, recommendations, config, params, eval_objects):
super().__init__(recommendations, config, params, eval_objects)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.binary_relevance
def name():
return 'Precision'
def __user_precision(self, user_recommendations, user, cutoff):
return (sum([self._relevance.get_rel(user, i) for (i, _) in user_recommendations[:cutoff]]) / cutoff)
def eval_user_metric(self):
return {u: self.__user_precision(u_r, u, self._cutoff) for (u, u_r) in self._recommendations.items() if len(self._relevance.get_user_rel(u))} |
def load_model(app):
(Output('cytoscape-responsive-layout', 'elements'), Output('top-label', 'children'), Output('top-label', 'style'), Output('forward', 'n_clicks'), Input('auto_load', 'interval'))
def callback(interval):
import os
path = os.getcwd()
errmsg_style = component.top_lable_style
if (path is None):
return (dash.no_update, dash.no_update, dash.no_update)
try:
app.Global.analysis_data = mlirnet.analysis_data(path, app.Global.f32_mlir, app.Global.quant_mlir, app.Global.input)
except Exception as e:
errmsg_style['color'] = '#e63946'
return (dash.no_update, str(e), errmsg_style, dash.no_update)
app.Global.graph = graph.Graph(app.Global.analysis_data.quant_mlir)
app.Global.analysis_data.build_blob_info(app.Global.graph)
errmsg_style['color'] = 'blue'
if app.Global.manual_run:
return ((app.Global.graph.cy_nodes() + app.Global.graph.cy_edges()), '{}: {} vs {}'.format(path, app.Global.f32_mlir, app.Global.quant_mlir), errmsg_style, dash.no_update)
else:
return ((app.Global.graph.cy_nodes() + app.Global.graph.cy_edges()), '{}: {} vs {}'.format(path, app.Global.f32_mlir, app.Global.quant_mlir), errmsg_style, 1) |
def format_ov_stats(stats: Dict[(str, List[Any])]) -> Tuple[(Dict[(str, str)], List[Dict[(str, str)]])]:
(nrows, ncols, npresent_cells, nrows_wo_dups, mem_use, dtypes_cnt) = stats.values()
ncells = np.multiply(nrows, ncols).tolist()
data = {'Number of Variables': ncols, 'Number of Rows': nrows, 'Missing Cells': np.subtract(ncells, npresent_cells).astype(float).tolist(), 'Missing Cells (%)': np.subtract(1, np.divide(npresent_cells, ncells)).tolist(), 'Duplicate Rows': np.subtract(nrows, nrows_wo_dups).tolist(), 'Duplicate Rows (%)': np.subtract(1, np.divide(nrows_wo_dups, nrows)).tolist(), 'Total Size in Memory': list(map(float, mem_use)), 'Average Row Size in Memory': np.subtract(mem_use, nrows).tolist()}
return ({k: _format_values(k, v) for (k, v) in data.items()}, dtypes_cnt) |
class StarDistBase(BaseModel):
def __init__(self, config, name=None, basedir='.'):
super().__init__(config=config, name=name, basedir=basedir)
threshs = dict(prob=None, nms=None)
if (basedir is not None):
try:
threshs = load_json(str((self.logdir / 'thresholds.json')))
print("Loading thresholds from 'thresholds.json'.")
if ((threshs.get('prob') is None) or (not (0 < threshs.get('prob') < 1))):
print(("- Invalid 'prob' threshold (%s), using default value." % str(threshs.get('prob'))))
threshs['prob'] = None
if ((threshs.get('nms') is None) or (not (0 < threshs.get('nms') < 1))):
print(("- Invalid 'nms' threshold (%s), using default value." % str(threshs.get('nms'))))
threshs['nms'] = None
except FileNotFoundError:
if ((config is None) and (len(tuple(self.logdir.glob('*.h5'))) > 0)):
print("Couldn't load thresholds from 'thresholds.json', using default values. (Call 'optimize_thresholds' to change that.)")
self.thresholds = dict(prob=(0.5 if (threshs['prob'] is None) else threshs['prob']), nms=(0.4 if (threshs['nms'] is None) else threshs['nms']))
print('Using default values: prob_thresh={prob:g}, nms_thresh={nms:g}.'.format(prob=self.thresholds.prob, nms=self.thresholds.nms))
def thresholds(self):
return self._thresholds
def _is_multiclass(self):
return (self.config.n_classes is not None)
def _parse_classes_arg(self, classes, length):
if isinstance(classes, str):
((classes == 'auto') or _raise(ValueError(f"classes = '{classes}': only 'auto' supported as string argument for classes")))
if (self.config.n_classes is None):
classes = None
elif (self.config.n_classes == 1):
classes = ((1,) * length)
else:
raise ValueError("using classes = 'auto' for n_classes > 1 not supported")
elif isinstance(classes, (tuple, list, np.ndarray)):
((len(classes) == length) or _raise(ValueError(f'len(classes) should be {length}!')))
else:
raise ValueError("classes should either be 'auto' or a list of scalars/label dicts")
return classes
def thresholds(self, d):
self._thresholds = namedtuple('Thresholds', d.keys())(*d.values())
def prepare_for_training(self, optimizer=None):
if (optimizer is None):
optimizer = Adam(self.config.train_learning_rate)
masked_dist_loss = {'mse': masked_loss_mse, 'mae': masked_loss_mae, 'iou': masked_loss_iou}[self.config.train_dist_loss]
prob_loss = 'binary_crossentropy'
def split_dist_true_mask(dist_true_mask):
return tf.split(dist_true_mask, num_or_size_splits=[self.config.n_rays, (- 1)], axis=(- 1))
def dist_loss(dist_true_mask, dist_pred):
(dist_true, dist_mask) = split_dist_true_mask(dist_true_mask)
return masked_dist_loss(dist_mask, reg_weight=self.config.train_background_reg)(dist_true, dist_pred)
def dist_iou_metric(dist_true_mask, dist_pred):
(dist_true, dist_mask) = split_dist_true_mask(dist_true_mask)
return masked_metric_iou(dist_mask, reg_weight=0)(dist_true, dist_pred)
def relevant_mae(dist_true_mask, dist_pred):
(dist_true, dist_mask) = split_dist_true_mask(dist_true_mask)
return masked_metric_mae(dist_mask)(dist_true, dist_pred)
def relevant_mse(dist_true_mask, dist_pred):
(dist_true, dist_mask) = split_dist_true_mask(dist_true_mask)
return masked_metric_mse(dist_mask)(dist_true, dist_pred)
if self._is_multiclass():
prob_class_loss = weighted_categorical_crossentropy(self.config.train_class_weights, ndim=self.config.n_dim)
loss = [prob_loss, dist_loss, prob_class_loss]
else:
loss = [prob_loss, dist_loss]
self.keras_model.compile(optimizer, loss=loss, loss_weights=list(self.config.train_loss_weights), metrics={'prob': kld, 'dist': [relevant_mae, relevant_mse, dist_iou_metric]})
self.callbacks = []
if (self.basedir is not None):
self.callbacks += self._checkpoint_callbacks()
if self.config.train_tensorboard:
if IS_TF_1:
self.callbacks.append(CARETensorBoard(log_dir=str(self.logdir), prefix_with_timestamp=False, n_images=3, write_images=True, prob_out=False))
else:
self.callbacks.append(TensorBoard(log_dir=str((self.logdir / 'logs')), write_graph=False, profile_batch=0))
if (self.config.train_reduce_lr is not None):
rlrop_params = self.config.train_reduce_lr
if ('verbose' not in rlrop_params):
rlrop_params['verbose'] = True
self.callbacks.insert(0, ReduceLROnPlateau(**rlrop_params))
self._model_prepared = True
def _predict_setup(self, img, axes, normalizer, n_tiles, show_tile_progress, predict_kwargs):
if (n_tiles is None):
n_tiles = ([1] * img.ndim)
try:
n_tiles = tuple(n_tiles)
((img.ndim == len(n_tiles)) or _raise(TypeError()))
except TypeError:
raise ValueError(('n_tiles must be an iterable of length %d' % img.ndim))
(all(((np.isscalar(t) and (1 <= t) and (int(t) == t)) for t in n_tiles)) or _raise(ValueError('all values of n_tiles must be integer values >= 1')))
n_tiles = tuple(map(int, n_tiles))
axes = self._normalize_axes(img, axes)
axes_net = self.config.axes
_permute_axes = self._make_permute_axes(axes, axes_net)
x = _permute_axes(img)
channel = axes_dict(axes_net)['C']
((self.config.n_channel_in == x.shape[channel]) or _raise(ValueError()))
axes_net_div_by = self._axes_div_by(axes_net)
grid = tuple(self.config.grid)
((len(grid) == (len(axes_net) - 1)) or _raise(ValueError()))
grid_dict = dict(zip(axes_net.replace('C', ''), grid))
normalizer = self._check_normalizer_resizer(normalizer, None)[0]
resizer = StarDistPadAndCropResizer(grid=grid_dict)
x = normalizer.before(x, axes_net)
x = resizer.before(x, axes_net, axes_net_div_by)
if (not _is_floatarray(x)):
warnings.warn('Predicting on non-float input... ( forgot to normalize? )')
def predict_direct(x):
ys = self.keras_model.predict(x[np.newaxis], **predict_kwargs)
return tuple((y[0] for y in ys))
def tiling_setup():
assert (np.prod(n_tiles) > 1)
tiling_axes = axes_net.replace('C', '')
x_tiling_axis = tuple((axes_dict(axes_net)[a] for a in tiling_axes))
axes_net_tile_overlaps = self._axes_tile_overlap(axes_net)
_n_tiles = _permute_axes(np.empty(n_tiles, bool)).shape
(all(((_n_tiles[i] == 1) for i in range(x.ndim) if (i not in x_tiling_axis))) or _raise(ValueError(("entry of n_tiles > 1 only allowed for axes '%s'" % tiling_axes))))
sh = [(s // grid_dict.get(a, 1)) for (a, s) in zip(axes_net, x.shape)]
sh[channel] = None
def create_empty_output(n_channel, dtype=np.float32):
sh[channel] = n_channel
return np.empty(sh, dtype)
if callable(show_tile_progress):
(progress, _show_tile_progress) = (show_tile_progress, True)
else:
(progress, _show_tile_progress) = (tqdm, show_tile_progress)
n_block_overlaps = [int(np.ceil((overlap / blocksize))) for (overlap, blocksize) in zip(axes_net_tile_overlaps, axes_net_div_by)]
num_tiles_used = total_n_tiles(x, _n_tiles, block_sizes=axes_net_div_by, n_block_overlaps=n_block_overlaps)
tile_generator = progress(tile_iterator(x, _n_tiles, block_sizes=axes_net_div_by, n_block_overlaps=n_block_overlaps), disable=(not _show_tile_progress), total=num_tiles_used)
return (tile_generator, tuple(sh), create_empty_output)
return (x, axes, axes_net, axes_net_div_by, _permute_axes, resizer, n_tiles, grid, grid_dict, channel, predict_direct, tiling_setup)
def _predict_generator(self, img, axes=None, normalizer=None, n_tiles=None, show_tile_progress=True, **predict_kwargs):
predict_kwargs.setdefault('verbose', 0)
(x, axes, axes_net, axes_net_div_by, _permute_axes, resizer, n_tiles, grid, grid_dict, channel, predict_direct, tiling_setup) = self._predict_setup(img, axes, normalizer, n_tiles, show_tile_progress, predict_kwargs)
if (np.prod(n_tiles) > 1):
(tile_generator, output_shape, create_empty_output) = tiling_setup()
prob = create_empty_output(1)
dist = create_empty_output(self.config.n_rays)
if self._is_multiclass():
prob_class = create_empty_output((self.config.n_classes + 1))
result = (prob, dist, prob_class)
else:
result = (prob, dist)
for (tile, s_src, s_dst) in tile_generator:
result_tile = predict_direct(tile)
s_src = [slice((s.start // grid_dict.get(a, 1)), (s.stop // grid_dict.get(a, 1))) for (s, a) in zip(s_src, axes_net)]
s_dst = [slice((s.start // grid_dict.get(a, 1)), (s.stop // grid_dict.get(a, 1))) for (s, a) in zip(s_dst, axes_net)]
s_src[channel] = slice(None)
s_dst[channel] = slice(None)
(s_src, s_dst) = (tuple(s_src), tuple(s_dst))
for (part, part_tile) in zip(result, result_tile):
part[s_dst] = part_tile[s_src]
(yield)
else:
result = predict_direct(x)
result = [resizer.after(part, axes_net) for part in result]
result[0] = np.take(result[0], 0, axis=channel)
result[1] = np.maximum(0.001, result[1])
result[1] = np.moveaxis(result[1], channel, (- 1))
if self._is_multiclass():
result[2] = np.moveaxis(result[2], channel, (- 1))
(yield tuple(result))
(_predict_generator)
def predict(self, *args, **kwargs):
r = None
for r in self._predict_generator(*args, **kwargs):
pass
return r
def _predict_sparse_generator(self, img, prob_thresh=None, axes=None, normalizer=None, n_tiles=None, show_tile_progress=True, b=2, **predict_kwargs):
if (prob_thresh is None):
prob_thresh = self.thresholds.prob
predict_kwargs.setdefault('verbose', 0)
(x, axes, axes_net, axes_net_div_by, _permute_axes, resizer, n_tiles, grid, grid_dict, channel, predict_direct, tiling_setup) = self._predict_setup(img, axes, normalizer, n_tiles, show_tile_progress, predict_kwargs)
def _prep(prob, dist):
prob = np.take(prob, 0, axis=channel)
dist = np.moveaxis(dist, channel, (- 1))
dist = np.maximum(0.001, dist)
return (prob, dist)
(proba, dista, pointsa, prob_class) = ([], [], [], [])
if (np.prod(n_tiles) > 1):
(tile_generator, output_shape, create_empty_output) = tiling_setup()
sh = list(output_shape)
sh[channel] = 1
(proba, dista, pointsa, prob_classa) = ([], [], [], [])
for (tile, s_src, s_dst) in tile_generator:
results_tile = predict_direct(tile)
s_src = [slice((s.start // grid_dict.get(a, 1)), (s.stop // grid_dict.get(a, 1))) for (s, a) in zip(s_src, axes_net)]
s_dst = [slice((s.start // grid_dict.get(a, 1)), (s.stop // grid_dict.get(a, 1))) for (s, a) in zip(s_dst, axes_net)]
s_src[channel] = slice(None)
s_dst[channel] = slice(None)
(s_src, s_dst) = (tuple(s_src), tuple(s_dst))
(prob_tile, dist_tile) = results_tile[:2]
(prob_tile, dist_tile) = _prep(prob_tile[s_src], dist_tile[s_src])
bs = list((((b if (s.start == 0) else (- 1)), (b if (s.stop == _sh) else (- 1))) for (s, _sh) in zip(s_dst, sh)))
bs.pop(channel)
inds = _ind_prob_thresh(prob_tile, prob_thresh, b=bs)
proba.extend(prob_tile[inds].copy())
dista.extend(dist_tile[inds].copy())
_points = np.stack(np.where(inds), axis=1)
offset = list((s.start for (i, s) in enumerate(s_dst)))
offset.pop(channel)
_points = (_points + np.array(offset).reshape((1, len(offset))))
_points = (_points * np.array(self.config.grid).reshape((1, len(self.config.grid))))
pointsa.extend(_points)
if self._is_multiclass():
p = results_tile[2][s_src].copy()
p = np.moveaxis(p, channel, (- 1))
prob_classa.extend(p[inds])
(yield)
else:
results = predict_direct(x)
(prob, dist) = results[:2]
(prob, dist) = _prep(prob, dist)
inds = _ind_prob_thresh(prob, prob_thresh, b=b)
proba = prob[inds].copy()
dista = dist[inds].copy()
_points = np.stack(np.where(inds), axis=1)
pointsa = (_points * np.array(self.config.grid).reshape((1, len(self.config.grid))))
if self._is_multiclass():
p = np.moveaxis(results[2], channel, (- 1))
prob_classa = p[inds].copy()
proba = np.asarray(proba)
dista = np.asarray(dista).reshape(((- 1), self.config.n_rays))
pointsa = np.asarray(pointsa).reshape(((- 1), self.config.n_dim))
idx = resizer.filter_points(x.ndim, pointsa, axes_net)
proba = proba[idx]
dista = dista[idx]
pointsa = pointsa[idx]
if self._is_multiclass():
prob_classa = np.asarray(prob_classa).reshape(((- 1), (self.config.n_classes + 1)))
prob_classa = prob_classa[idx]
(yield (proba, dista, prob_classa, pointsa))
else:
prob_classa = None
(yield (proba, dista, pointsa))
(_predict_sparse_generator)
def predict_sparse(self, *args, **kwargs):
r = None
for r in self._predict_sparse_generator(*args, **kwargs):
pass
return r
def _predict_instances_generator(self, img, axes=None, normalizer=None, sparse=True, prob_thresh=None, nms_thresh=None, scale=None, n_tiles=None, show_tile_progress=True, verbose=False, return_labels=True, predict_kwargs=None, nms_kwargs=None, overlap_label=None, return_predict=False):
if (predict_kwargs is None):
predict_kwargs = {}
if (nms_kwargs is None):
nms_kwargs = {}
if (return_predict and sparse):
sparse = False
warnings.warn('Setting sparse to False because return_predict is True')
nms_kwargs.setdefault('verbose', verbose)
_axes = self._normalize_axes(img, axes)
_axes_net = self.config.axes
_permute_axes = self._make_permute_axes(_axes, _axes_net)
_shape_inst = tuple((s for (s, a) in zip(_permute_axes(img).shape, _axes_net) if (a != 'C')))
if (scale is not None):
if isinstance(scale, numbers.Number):
scale = tuple(((scale if (a in 'XYZ') else 1) for a in _axes))
scale = tuple(scale)
((len(scale) == len(_axes)) or _raise(ValueError(f'scale {scale} must be of length {len(_axes)}, i.e. one value for each of the axes {_axes}')))
for (s, a) in zip(scale, _axes):
((s > 0) or _raise(ValueError('scale values must be greater than 0')))
(((s in (1, None)) or (a in 'XYZ')) or warnings.warn(f'replacing scale value {s} for non-spatial axis {a} with 1'))
scale = tuple(((s if (a in 'XYZ') else 1) for (s, a) in zip(scale, _axes)))
(verbose and print(f'scaling image by factors {scale} for axes {_axes}'))
img = ndi.zoom(img, scale, order=1)
(yield 'predict')
res = None
if sparse:
for res in self._predict_sparse_generator(img, axes=axes, normalizer=normalizer, n_tiles=n_tiles, prob_thresh=prob_thresh, show_tile_progress=show_tile_progress, **predict_kwargs):
if (res is None):
(yield 'tile')
else:
for res in self._predict_generator(img, axes=axes, normalizer=normalizer, n_tiles=n_tiles, show_tile_progress=show_tile_progress, **predict_kwargs):
if (res is None):
(yield 'tile')
res = (tuple(res) + (None,))
if self._is_multiclass():
(prob, dist, prob_class, points) = res
else:
(prob, dist, points) = res
prob_class = None
(yield 'nms')
res_instances = self._instances_from_prediction(_shape_inst, prob, dist, points=points, prob_class=prob_class, prob_thresh=prob_thresh, nms_thresh=nms_thresh, scale=(None if (scale is None) else dict(zip(_axes, scale))), return_labels=return_labels, overlap_label=overlap_label, **nms_kwargs)
if return_predict:
(yield (res_instances, tuple(res[:(- 1)])))
else:
(yield res_instances)
(_predict_instances_generator)
def predict_instances(self, *args, **kwargs):
r = None
for r in self._predict_instances_generator(*args, **kwargs):
pass
return r
def predict_instances_big(self, img, axes, block_size, min_overlap, context=None, labels_out=None, labels_out_dtype=np.int32, show_progress=True, **kwargs):
from ..big import _grid_divisible, BlockND, OBJECT_KEYS
from ..matching import relabel_sequential
n = img.ndim
axes = axes_check_and_normalize(axes, length=n)
grid = self._axes_div_by(axes)
axes_out = self._axes_out.replace('C', '')
shape_dict = dict(zip(axes, img.shape))
shape_out = tuple((shape_dict[a] for a in axes_out))
if (context is None):
context = self._axes_tile_overlap(axes)
if np.isscalar(block_size):
block_size = (n * [block_size])
if np.isscalar(min_overlap):
min_overlap = (n * [min_overlap])
if np.isscalar(context):
context = (n * [context])
(block_size, min_overlap, context) = (list(block_size), list(min_overlap), list(context))
assert (n == len(block_size) == len(min_overlap) == len(context))
if ('C' in axes):
i = axes_dict(axes)['C']
block_size[i] = img.shape[i]
min_overlap[i] = context[i] = 0
block_size = tuple((_grid_divisible(g, v, name='block_size', verbose=False) for (v, g, a) in zip(block_size, grid, axes)))
min_overlap = tuple((_grid_divisible(g, v, name='min_overlap', verbose=False) for (v, g, a) in zip(min_overlap, grid, axes)))
context = tuple((_grid_divisible(g, v, name='context', verbose=False) for (v, g, a) in zip(context, grid, axes)))
print(f'effective: block_size={block_size}, min_overlap={min_overlap}, context={context}', flush=True)
for (a, c, o) in zip(axes, context, self._axes_tile_overlap(axes)):
if (c < o):
print(f'{a}: context of {c} is small, recommended to use at least {o}', flush=True)
blocks = BlockND.cover(img.shape, axes, block_size, min_overlap, context, grid)
if (np.isscalar(labels_out) and (bool(labels_out) is False)):
labels_out = None
elif (labels_out is None):
labels_out = np.zeros(shape_out, dtype=labels_out_dtype)
else:
((labels_out.shape == shape_out) or _raise(ValueError(f"'labels_out' must have shape {shape_out} (axes {axes_out}).")))
polys_all = {}
label_offset = 1
kwargs_override = dict(axes=axes, overlap_label=None, return_labels=True, return_predict=False)
if show_progress:
kwargs_override['show_tile_progress'] = False
for (k, v) in kwargs_override.items():
if (k in kwargs):
print(f"changing '{k}' from {kwargs[k]} to {v}", flush=True)
kwargs[k] = v
blocks = tqdm(blocks, disable=(not show_progress))
for block in blocks:
(labels, polys) = self.predict_instances(block.read(img, axes=axes), **kwargs)
labels = block.crop_context(labels, axes=axes_out)
(labels, polys) = block.filter_objects(labels, polys, axes=axes_out)
labels = relabel_sequential(labels, label_offset)[0]
if (labels_out is not None):
block.write(labels_out, labels, axes=axes_out)
for (k, v) in polys.items():
polys_all.setdefault(k, []).append(v)
label_offset += len(polys['prob'])
del labels
polys_all = {k: (np.concatenate(v) if (k in OBJECT_KEYS) else v[0]) for (k, v) in polys_all.items()}
return (labels_out, polys_all)
def optimize_thresholds(self, X_val, Y_val, nms_threshs=[0.3, 0.4, 0.5], iou_threshs=[0.3, 0.5, 0.7], predict_kwargs=None, optimize_kwargs=None, save_to_json=True):
if (predict_kwargs is None):
predict_kwargs = {}
if (optimize_kwargs is None):
optimize_kwargs = {}
def _predict_kwargs(x):
if ('n_tiles' in predict_kwargs):
return predict_kwargs
else:
return {**predict_kwargs, 'n_tiles': self._guess_n_tiles(x), 'show_tile_progress': False}
Yhat_val = [self.predict(x, **_predict_kwargs(x))[:2] for x in X_val]
(opt_prob_thresh, opt_measure, opt_nms_thresh) = (None, (- np.inf), None)
for _opt_nms_thresh in nms_threshs:
(_opt_prob_thresh, _opt_measure) = optimize_threshold(Y_val, Yhat_val, model=self, nms_thresh=_opt_nms_thresh, iou_threshs=iou_threshs, **optimize_kwargs)
if (_opt_measure > opt_measure):
(opt_prob_thresh, opt_measure, opt_nms_thresh) = (_opt_prob_thresh, _opt_measure, _opt_nms_thresh)
opt_threshs = dict(prob=opt_prob_thresh, nms=opt_nms_thresh)
self.thresholds = opt_threshs
print(end='', file=sys.stderr, flush=True)
print('Using optimized values: prob_thresh={prob:g}, nms_thresh={nms:g}.'.format(prob=self.thresholds.prob, nms=self.thresholds.nms))
if (save_to_json and (self.basedir is not None)):
print("Saving to 'thresholds.json'.")
save_json(opt_threshs, str((self.logdir / 'thresholds.json')))
return opt_threshs
def _guess_n_tiles(self, img):
axes = self._normalize_axes(img, axes=None)
shape = list(img.shape)
if ('C' in axes):
del shape[axes_dict(axes)['C']]
b = (self.config.train_batch_size ** (1.0 / self.config.n_dim))
n_tiles = [int(np.ceil((s / (p * b)))) for (s, p) in zip(shape, self.config.train_patch_size)]
if ('C' in axes):
n_tiles.insert(axes_dict(axes)['C'], 1)
return tuple(n_tiles)
def _normalize_axes(self, img, axes):
if (axes is None):
axes = self.config.axes
assert ('C' in axes)
if ((img.ndim == (len(axes) - 1)) and (self.config.n_channel_in == 1)):
axes = axes.replace('C', '')
return axes_check_and_normalize(axes, img.ndim)
def _compute_receptive_field(self, img_size=None):
from scipy.ndimage import zoom
if (img_size is None):
img_size = tuple(((g * (128 if (self.config.n_dim == 2) else 64)) for g in self.config.grid))
if np.isscalar(img_size):
img_size = ((img_size,) * self.config.n_dim)
img_size = tuple(img_size)
assert all((_is_power_of_2(s) for s in img_size))
mid = tuple(((s // 2) for s in img_size))
x = np.zeros((((1,) + img_size) + (self.config.n_channel_in,)), dtype=np.float32)
z = np.zeros_like(x)
x[(((0,) + mid) + (slice(None),))] = 1
y = self.keras_model.predict(x, verbose=0)[0][(0, ..., 0)]
y0 = self.keras_model.predict(z, verbose=0)[0][(0, ..., 0)]
grid = tuple((np.array(x.shape[1:(- 1)]) / np.array(y.shape)).astype(int))
assert (grid == self.config.grid)
y = zoom(y, grid, order=0)
y0 = zoom(y0, grid, order=0)
ind = np.where((np.abs((y - y0)) > 0))
return [((m - np.min(i)), (np.max(i) - m)) for (m, i) in zip(mid, ind)]
def _axes_tile_overlap(self, query_axes):
query_axes = axes_check_and_normalize(query_axes)
try:
self._tile_overlap
except AttributeError:
self._tile_overlap = self._compute_receptive_field()
overlap = dict(zip(self.config.axes.replace('C', ''), tuple((max(rf) for rf in self._tile_overlap))))
return tuple((overlap.get(a, 0) for a in query_axes))
def export_TF(self, fname=None, single_output=True, upsample_grid=True):
(Concatenate, UpSampling2D, UpSampling3D, Conv2DTranspose, Conv3DTranspose) = keras_import('layers', 'Concatenate', 'UpSampling2D', 'UpSampling3D', 'Conv2DTranspose', 'Conv3DTranspose')
Model = keras_import('models', 'Model')
if ((self.basedir is None) and (fname is None)):
raise ValueError("Need explicit 'fname', since model directory not available (basedir=None).")
if self._is_multiclass():
warnings.warn('multi-class mode not supported yet, removing classification output from exported model')
grid = self.config.grid
prob = self.keras_model.outputs[0]
dist = self.keras_model.outputs[1]
assert (self.config.n_dim in (2, 3))
if (upsample_grid and any(((g > 1) for g in grid))):
conv_transpose = (Conv2DTranspose if (self.config.n_dim == 2) else Conv3DTranspose)
upsampling = (UpSampling2D if (self.config.n_dim == 2) else UpSampling3D)
prob = conv_transpose(1, ((1,) * self.config.n_dim), strides=grid, padding='same', kernel_initializer='ones', use_bias=False)(prob)
dist = upsampling(grid)(dist)
inputs = self.keras_model.inputs[0]
outputs = (Concatenate()([prob, dist]) if single_output else [prob, dist])
csbdeep_model = Model(inputs, outputs)
fname = ((self.logdir / 'TF_SavedModel.zip') if (fname is None) else Path(fname))
export_SavedModel(csbdeep_model, str(fname))
return csbdeep_model |
class TAscFlt(TFlt):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TAscFlt_swiginit(self, _snap.new_TAscFlt(*args))
def Save(self, SOut):
return _snap.TAscFlt_Save(self, SOut)
__swig_destroy__ = _snap.delete_TAscFlt |
def load_model(name, model_type, is_eval=False, device='cpu', checkpoint=None):
model = registry.get_model_class(name).from_pretrained(model_type=model_type)
if (checkpoint is not None):
model.load_checkpoint(checkpoint)
if is_eval:
model.eval()
if (device == 'cpu'):
model = model.float()
return model.to(device) |
def test_psp_head():
with pytest.raises(AssertionError):
PSPHead(in_channels=32, channels=16, num_classes=19, pool_scales=1)
head = PSPHead(in_channels=32, channels=16, num_classes=19)
assert (not _conv_has_norm(head, sync_bn=False))
head = PSPHead(in_channels=32, channels=16, num_classes=19, norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 32, 45, 45)]
head = PSPHead(in_channels=32, channels=16, num_classes=19, pool_scales=(1, 2, 3))
if torch.cuda.is_available():
(head, inputs) = to_cuda(head, inputs)
assert (head.psp_modules[0][0].output_size == 1)
assert (head.psp_modules[1][0].output_size == 2)
assert (head.psp_modules[2][0].output_size == 3)
outputs = head(inputs)
assert (outputs.shape == (1, head.num_classes, 45, 45)) |
def convert_cmake_value_to_python_value(cmake_value, cmake_type):
cmake_type = cmake_type.upper()
up_val = cmake_value.upper()
if (cmake_type == 'BOOL'):
return (not ((up_val in ('FALSE', 'OFF', 'N', 'NO', '0', '', 'NOTFOUND')) or up_val.endswith('-NOTFOUND')))
elif (cmake_type == 'FILEPATH'):
if up_val.endswith('-NOTFOUND'):
return None
else:
return cmake_value
else:
return cmake_value |
_utils.test(arch=[ti.cuda, ti.cpu], real_matrix_scalarize=False)
def test_local_matrix_indexing_in_loop():
s = ti.field(ti.i32, shape=(3, 3))
def test():
mat = ti.Matrix([[((x * 3) + y) for y in range(3)] for x in range(3)])
for i in range(3):
for j in range(3):
s[(i, j)] = (mat[(i, j)] + 1)
test()
for i in range(3):
for j in range(3):
assert (s[(i, j)] == (((i * 3) + j) + 1)) |
class CheckPointState(object):
def __init__(self):
self.root_problem = None
self.temp_root = None
self.cumulative_time = 0 |
def do_analyse_sick(file_path, dev=True, delta=1, stop=None):
results = []
with open(file_path, 'r', encoding='utf-8') as file:
find_entry = False
output = [0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
for line in file:
if (not find_entry):
if line.startswith('data round'):
output[0] = int(line.split(' ')[(- 4)].split(':')[(- 1)])
if ((stop is not None) and (output[0] > stop)):
break
if line.startswith('==> for dev'):
output[1] = float(line.split(' ')[(- 7)][:(- 1)])
output[2] = float(line.split(' ')[(- 5)][:(- 1)])
output[3] = float(line.split(' ')[(- 3)][:(- 1)])
output[4] = float(line.split(' ')[(- 1)])
find_entry = True
elif line.startswith('~~> for test'):
output[5] = float(line.split(' ')[(- 7)][:(- 1)])
output[6] = float(line.split(' ')[(- 5)][:(- 1)])
output[7] = float(line.split(' ')[(- 3)][:(- 1)])
output[8] = float(line.split(' ')[(- 1)])
results.append(output)
find_entry = False
output = [0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
if (len(results) > 0):
print('max step:', results[(- 1)][0])
sort = (1 if dev else 5)
sort += delta
output = list(sorted(results, key=(lambda elem: elem[sort]), reverse=(delta in [1, 2])))
for elem in output[:10]:
print(('step: %d, dev_loss: %.4f, dev_pearson: %.4f, dev_spm: %.4f, dev_mse: %.4f, test_loss: %.4f, test_pearson: %.4f, test_spm: %.4f, test_mse: %.4f,' % (elem[0], elem[1], elem[2], elem[3], elem[4], elem[5], elem[6], elem[7], elem[8]))) |
class LogAnomaly(ParamInfoMixin):
algorithms = {'one_class_svm': ('logai.algorithms.anomaly_detection_algo.one_class_svm', 'OneClassSVMDetector', 'OneClassSVMParams'), 'isolation_forest': ('logai.algorithms.anomaly_detection_algo.isolation_forest', 'IsolationForestDetector', 'IsolationForestParams'), 'lof': ('logai.algorithms.anomaly_detection_algo.local_outlier_factor', 'LOFDetector', 'LOFParams'), 'distribution_divergence': ('logai.algorithms.anomaly_detection_algo.distribution_divergence', 'DistributionDivergence', 'DistributionDivergenceParams'), 'dbl': ('logai.algorithms.anomaly_detection_algo.dbl', 'DBLDetector', 'DBLDetectorParams'), 'ets': ('logai.algorithms.anomaly_detection_algo.ets', 'ETSDetector', 'ETSDetectorParams')}
def __init__(self):
self.app = None
self.attributes = None
def results(self):
return self.app.results
def execute_anomaly_detection(self, config: WorkFlowConfig):
self.app = LogAnomalyDetection(config)
self.app.execute()
return
def get_anomalies(self, attributes=[]):
df = self.get_results(attributes)
df = df[df['is_anomaly']]
return df
def get_results(self, attributes=[]):
df = self.app.results
if (not attributes):
return df
for (k, v) in attributes.items():
df = df.loc[(df[k] == v)]
return df
def get_attributes(self):
return self.app.attributes
def get_event_group(self):
return self.app.event_group
def json_to_config(self, json_config):
config = json.loads(json_config)
workflow_config = WorkFlowConfig.from_dict(config)
return workflow_config
def yaml_to_config(self, yaml_config):
config = yaml.safe_load(yaml_config)
workflow_config = WorkFlowConfig.from_dict(config)
return workflow_config |
def load_valid_paths():
with open('./valid_paths.txt', 'r') as fp:
paths = [line.strip() for line in fp if (line.strip() != '')]
return paths |
class CartanType(cartan_type.CartanType_decorator):
def __classcall__(cls, ct, marked_nodes):
ct = cartan_type.CartanType(ct)
if (not marked_nodes):
return ct
if any(((node not in ct.index_set()) for node in marked_nodes)):
raise ValueError('invalid marked node')
marked_nodes = tuple(sorted(marked_nodes))
return super().__classcall__(cls, ct, marked_nodes)
def __init__(self, ct, marked_nodes):
cartan_type.CartanType_decorator.__init__(self, ct)
self._marked_nodes = marked_nodes
if ct.is_finite():
self.__class__ = CartanType_finite
elif ct.is_affine():
self.__class__ = CartanType_affine
abstract_classes = tuple((cls for cls in self._stable_abstract_classes if isinstance(ct, cls)))
if abstract_classes:
self._add_abstract_superclass(abstract_classes)
_stable_abstract_classes = [cartan_type.CartanType_finite, cartan_type.CartanType_affine, cartan_type.CartanType_simple, cartan_type.CartanType_simply_laced, cartan_type.CartanType_crystallographic]
def _repr_(self, compact=False):
if (not compact):
base = repr(self._type)
else:
try:
base = self._type._repr_(compact=True)
except TypeError:
base = repr(self._type)
if (len(self._marked_nodes) == 1):
return (base + ' with node {} marked'.format(self._marked_nodes[0]))
return (base + ' with nodes {} marked'.format(self._marked_nodes))
def _latex_(self):
from sage.misc.latex import latex
ret = self._type._latex_()
if self.options('latex_marked'):
if (len(self._marked_nodes) == 1):
ret += ' \\text{{ with node ${}$ marked}} '.format(latex(self._marked_nodes[0]))
else:
ret += ' \\text{{ with nodes ${}$ marked}} '.format(latex(self._marked_nodes))
return ret
def _ascii_art_node(self, label):
if (label in self._marked_nodes):
return self.options('marked_node_str')
return 'O'
def _latex_draw_node(self, x, y, label, position='below=4pt', fill='white'):
ret = cartan_type.CartanType_abstract._latex_draw_node(self, x, y, label, position, fill)
if (label in self._marked_nodes):
ret += self._latex_draw_mark(x, y)
return ret
def _latex_draw_mark(self, x, y, color='black', thickness='thin'):
ret = '\\draw[shift={{({}, {})}}, {}, {}] (0.25cm, 0.25cm) -- (-0.25cm, -0.25cm);\n'.format(x, y, color, thickness)
ret += '\\draw[shift={{({}, {})}}, {}, {}] (0.25cm, -0.25cm) -- (-0.25cm, 0.25cm);\n'.format(x, y, color, thickness)
return ret
def _latex_dynkin_diagram(self, label=None, node=None, node_dist=2):
if (label is None):
label = (lambda i: i)
if (node is None):
node = self._latex_draw_node
return self._type._latex_dynkin_diagram(label, node, node_dist)
def ascii_art(self, label=None, node=None):
if (label is None):
label = (lambda i: i)
if (node is None):
node = self._ascii_art_node
return self._type.ascii_art(label, node)
def dynkin_diagram(self):
result = self._type.dynkin_diagram().copy()
result._cartan_type = self
return result
def dual(self):
return self._type.dual().marked_nodes(self._marked_nodes)
def relabel(self, relabelling):
rct = self._type.relabel(relabelling)
rd = rct._relabelling
marked_nodes = [rd[node] for node in self._marked_nodes]
return rct.marked_nodes(marked_nodes)
def marked_nodes(self, marked_nodes):
if (not marked_nodes):
return self._type
return CartanType(self._type, marked_nodes)
def _default_folded_cartan_type(self):
from sage.combinat.root_system.type_folded import CartanTypeFolded
vct = self._type._default_folded_cartan_type()
sigma = vct.folding_orbit()
marked_nodes = sum([sigma[i] for i in self._marked_nodes], ())
folding = vct._folding.marked_nodes(marked_nodes)
return CartanTypeFolded(self, folding, sigma)
def type(self):
return self._type.type() |
class CarSprite(pyglet.shapes.Rectangle):
def __init__(self, actor_id, traffic_manager, color, batch=None, group=None):
super().__init__(0, 0, 1, 1, batch=batch, group=group)
self.traffic_manager = traffic_manager
self._actor_id = actor_id
self._color = color
def update(self):
actor_state = self.traffic_manager.get_actor_state(self._actor_id)
self.position = actor_state.location[:2]
self.width = actor_state.width
self.height = actor_state.length
self.anchor_position = ((self.width / 2.0), (self.height / 2.0))
self.rotation = ((- np.degrees(actor_state.rotation[1])) + 90.0)
self.color = self._color
self.opacity = 255 |
def create_librispeech_txt(dataset_dir):
output_dir = dataset_dir
with pushd(output_dir):
for part in Parts:
dest_meta_filename_gz = ('%s.txt.gz' % part)
if os.path.exists(dest_meta_filename_gz):
print('File exists:', dest_meta_filename_gz)
continue
dest_meta_filename = ('%s.txt' % part)
dest_meta_file = open(dest_meta_filename, 'w')
dest_meta_file.write('[\n')
zip_filename = ('%s/%s.zip' % (dataset_dir, part))
assert os.path.exists(zip_filename)
zip_file = ZipFile(zip_filename)
assert zip_file.filelist
count_lines = 0
for info in zip_file.filelist:
assert isinstance(info, ZipInfo)
path = info.filename.split('/')
if path[0].startswith(part):
subdir = path[0]
assert (subdir == part)
if path[(- 1)].endswith('.trans.txt'):
print('read', part, path[(- 1)])
for line in zip_file.read(info).decode('utf8').splitlines():
(seq_name, txt) = line.split(' ', 1)
count_lines += 1
ogg_filename = ('%s/%s.flac.ogg' % ('/'.join(path[:(- 1)]), seq_name))
ogg_bytes = zip_file.read(ogg_filename)
assert (len(ogg_bytes) > 0)
with tempfile.NamedTemporaryFile(suffix='.ogg') as temp_file:
temp_file.write(ogg_bytes)
temp_file.flush()
duration_str = subprocess.check_output(['ffprobe', temp_file.name, '-show_entries', 'format=duration', '-v', 'quiet', '-of', 'compact'], stderr=subprocess.STDOUT).decode('utf8').strip()
duration_str = duration_str.split('=')[(- 1)]
assert (float(duration_str) > 0)
dest_meta_file.write(("{'text': %r, 'file': %r, 'seq_name': '%s', 'duration': %s},\n" % (txt, ogg_filename, ('%s-%s' % (part, seq_name)), duration_str)))
assert (count_lines > 0)
dest_meta_file.write(']\n')
dest_meta_file.close()
sh('gzip', dest_meta_filename)
assert os.path.exists(dest_meta_filename_gz) |
def initialize_from_weights_file(model, weights_file, broadcast=True):
initialize_gpu_0_from_weights_file(model, weights_file)
if broadcast:
broadcast_parameters(model) |
class EmptyRandomEnv6x6(EmptyEnv):
def __init__(self):
super().__init__(size=6, agent_start_pos=None) |
def _jit_build_partition_tree(xmin, xmax, ymin, ymax, zmin, zmax, total_ywidth, total_zwidth, M, clustering, q):
ind = (len(clustering) - 1)
while (len(q) > 0):
(_, xmin, xmax, ymin, ymax, zmin, zmax, parent_ind, is_left) = heapq.heappop(q)
if (parent_ind >= 0):
clustering[(parent_ind, (0 if is_left else 1))] = (ind + M)
if (ind < 0):
assert (((- ind) - 1) == ((((xmin * total_ywidth) * total_zwidth) + (ymin * total_zwidth)) + zmin))
xwidth = (xmax - xmin)
ywidth = (ymax - ymin)
zwidth = (zmax - zmin)
if ((xwidth == 1) and (ywidth == 1) and (zwidth == 1)):
pass
else:
lxmin = rxmin = xmin
lxmax = rxmax = xmax
lymin = rymin = ymin
lymax = rymax = ymax
lzmin = rzmin = zmin
lzmax = rzmax = zmax
if ((xwidth >= ywidth) and (xwidth > 1)):
xmid = (xmin + (xwidth // 2))
lxmax = xmid
rxmin = xmid
elif (ywidth > 1):
ymid = (ymin + (ywidth // 2))
lymax = ymid
rymin = ymid
else:
zmid = (zmin + (zwidth // 2))
lzmax = zmid
rzmin = zmid
lsize = (((lxmax - lxmin) * (lymax - lymin)) * (lzmax - lzmin))
rsize = (((rxmax - rxmin) * (rymax - rymin)) * (rzmax - rzmin))
heapq.heappush(q, ((- lsize), lxmin, lxmax, lymin, lymax, lzmin, lzmax, ind, True))
heapq.heappush(q, ((- rsize), rxmin, rxmax, rymin, rymax, rzmin, rzmax, ind, False))
ind -= 1
for i in range(len(clustering)):
li = int(clustering[(i, 0)])
ri = int(clustering[(i, 1)])
lsize = (1 if (li < M) else clustering[((li - M), 3)])
rsize = (1 if (ri < M) else clustering[((ri - M), 3)])
clustering[(i, 3)] = (lsize + rsize) |
class RankSelection(SelectionFunction[T]):
def get_index(self, population: list[T]) -> int:
random_value = randomness.next_float()
bias = config.configuration.search_algorithm.rank_bias
return int((len(population) * (((bias - sqrt(((bias ** 2) - ((4.0 * (bias - 1.0)) * random_value)))) / 2.0) / (bias - 1.0)))) |
_ASSIGNERS.register_module()
class HungarianAssigner(BaseAssigner):
def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
self.cls_cost = build_match_cost(cls_cost)
self.reg_cost = build_match_cost(reg_cost)
self.iou_cost = build_match_cost(iou_cost)
def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-07):
assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.'
(num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0))
assigned_gt_inds = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long)
assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long)
if ((num_gts == 0) or (num_bboxes == 0)):
if (num_gts == 0):
assigned_gt_inds[:] = 0
return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
(img_h, img_w, _) = img_meta['img_shape']
factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0)
cls_cost = self.cls_cost(cls_pred, gt_labels)
normalize_gt_bboxes = (gt_bboxes / factor)
reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
bboxes = (bbox_cxcywh_to_xyxy(bbox_pred) * factor)
iou_cost = self.iou_cost(bboxes, gt_bboxes)
cost = ((cls_cost + reg_cost) + iou_cost)
cost = cost.detach().cpu()
(matched_row_inds, matched_col_inds) = linear_sum_assignment(cost)
matched_row_inds = torch.from_numpy(matched_row_inds).to(bbox_pred.device)
matched_col_inds = torch.from_numpy(matched_col_inds).to(bbox_pred.device)
assigned_gt_inds[:] = 0
assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1)
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) |
class Evaluator():
def __init__(self, cfg_=None, timer_=None):
self.timer = timer_
self.cfg = cfg_
self.ref_coordinate = cfg_.DATA_CONFIG.REF_COOR
self.batch_time = AverageMeter()
self.data_time = AverageMeter()
self.Success_main = Success()
self.Precision_main = Precision()
self.Success_batch = Success()
self.Precision_batch = Precision()
def update_iou(self, gt, pred, iou_dims=3):
this_overlap = estimateOverlap(gt, pred, dim=iou_dims, ref_coord=self.ref_coordinate)
print('-> 3D IOU is {: 2.2f}%'.format((this_overlap * 100)))
this_accuracy = estimateAccuracy(gt, pred, dim=iou_dims)
self.Success_main.add_overlap(this_overlap)
self.Precision_main.add_accuracy(this_accuracy)
self.Success_batch.add_overlap(this_overlap)
self.Precision_batch.add_accuracy(this_accuracy)
def __enter__(self):
pass
def __exit__(self, e, ev, t):
self.Success_batch.reset()
self.Precision_batch.reset() |
def corpus_dataflow_match(references, candidates, lang):
LANGUAGE = Language((root_dir + '/parser/languages.so'), lang)
parser = Parser()
parser.set_language(LANGUAGE)
parser = [parser, dfg_function[lang]]
match_count = 0
total_count = 0
scores = []
for i in range(len(candidates)):
references_sample = references[i]
candidate = candidates[i]
for reference in references_sample:
try:
candidate = remove_comments_and_docstrings(candidate, 'java')
except:
pass
try:
reference = remove_comments_and_docstrings(reference, 'java')
except:
pass
cand_dfg = get_data_flow(candidate, parser)
ref_dfg = get_data_flow(reference, parser)
normalized_cand_dfg = normalize_dataflow(cand_dfg)
normalized_ref_dfg = normalize_dataflow(ref_dfg)
if (len(normalized_ref_dfg) > 0):
total_count += len(normalized_ref_dfg)
current_match_count = 0
for dataflow in normalized_ref_dfg:
if (dataflow in normalized_cand_dfg):
match_count += 1
normalized_cand_dfg.remove(dataflow)
current_match_count += 1
scores.append((float(current_match_count) / len(normalized_ref_dfg)))
else:
scores.append(0.0)
if (total_count == 0):
print('WARNING: There is no reference data-flows extracted from the whole corpus, and the data-flow match score degenerates to 0. Please consider ignoring this score.')
return 0
score = (match_count / total_count)
return score |
def register_Ns3Ipv4Header_methods(root_module, cls):
cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('DscpTypeToString', 'std::string', [param('ns3::Ipv4Header::DscpType', 'dscp')], is_const=True)
cls.add_method('EcnTypeToString', 'std::string', [param('ns3::Ipv4Header::EcnType', 'ecn')], is_const=True)
cls.add_method('EnableChecksum', 'void', [])
cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetDscp', 'ns3::Ipv4Header::DscpType', [], is_const=True)
cls.add_method('GetEcn', 'ns3::Ipv4Header::EcnType', [], is_const=True)
cls.add_method('GetFragmentOffset', 'uint16_t', [], is_const=True)
cls.add_method('GetIdentification', 'uint16_t', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetPayloadSize', 'uint16_t', [], is_const=True)
cls.add_method('GetProtocol', 'uint8_t', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetSource', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetTos', 'uint8_t', [], is_const=True)
cls.add_method('GetTtl', 'uint8_t', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('IsChecksumOk', 'bool', [], is_const=True)
cls.add_method('IsDontFragment', 'bool', [], is_const=True)
cls.add_method('IsLastFragment', 'bool', [], is_const=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetDestination', 'void', [param('ns3::Ipv4Address', 'destination')])
cls.add_method('SetDontFragment', 'void', [])
cls.add_method('SetDscp', 'void', [param('ns3::Ipv4Header::DscpType', 'dscp')])
cls.add_method('SetEcn', 'void', [param('ns3::Ipv4Header::EcnType', 'ecn')])
cls.add_method('SetFragmentOffset', 'void', [param('uint16_t', 'offsetBytes')])
cls.add_method('SetIdentification', 'void', [param('uint16_t', 'identification')])
cls.add_method('SetLastFragment', 'void', [])
cls.add_method('SetMayFragment', 'void', [])
cls.add_method('SetMoreFragments', 'void', [])
cls.add_method('SetPayloadSize', 'void', [param('uint16_t', 'size')])
cls.add_method('SetProtocol', 'void', [param('uint8_t', 'num')])
cls.add_method('SetSource', 'void', [param('ns3::Ipv4Address', 'source')])
cls.add_method('SetTos', 'void', [param('uint8_t', 'tos')])
cls.add_method('SetTtl', 'void', [param('uint8_t', 'ttl')])
return |
.parametrize('observation_shape', [(100,), (4, 84, 84), ((100,), (200,))])
.parametrize('q_func_factory', [MeanQFunctionFactory(), QRQFunctionFactory()])
.parametrize('scalers', [None, 'min_max'])
def test_fqe(observation_shape: Shape, q_func_factory: QFunctionFactory, scalers: Optional[str]) -> None:
(observation_scaler, action_scaler, reward_scaler) = create_scaler_tuple(scalers, observation_shape)
algo = DDPGConfig(actor_encoder_factory=DummyEncoderFactory(), critic_encoder_factory=DummyEncoderFactory()).create()
algo.create_impl(observation_shape, 2)
config = FQEConfig(encoder_factory=DummyEncoderFactory(), observation_scaler=observation_scaler, action_scaler=action_scaler, reward_scaler=reward_scaler, q_func_factory=q_func_factory)
fqe = FQE(algo=algo, config=config)
algo_tester(fqe, observation_shape, test_policy_copy=False, test_policy_optim_copy=False, test_from_json=False, test_q_function_copy=False, test_q_function_optim_copy=False) |
def test_union_numpy_empty_1():
text = 'union[float64[parameters={"wonky": "boop"}], unknown]'
parsedtype = deduce_type(text)
assert isinstance(parsedtype, ak.types.UnionType)
assert (str(parsedtype) == text) |
class Compositional_dot_Transformer(nn.Module):
def __init__(self, dim, search_dim, value_dim, search, retrieval, nonlinear, gumbel, concat, separate, bias):
super(Compositional_dot_Transformer, self).__init__()
self.dim = dim
self.search_dim = search_dim
self.value_dim = value_dim
self.head_dim = (search_dim // search)
self.head_v_dim = (value_dim // retrieval)
self.nonlinear = nonlinear
self.search = search
self.retrieval = retrieval
self.scaling = (self.head_dim ** (- 0.5))
self.gumbel = gumbel
self.concat = concat
self.separate = separate
self.query_net = nn.Linear(dim, search_dim, bias=bias)
self.key_net = nn.Linear(dim, search_dim, bias=bias)
self.value_net = nn.Linear(dim, value_dim, bias=bias)
assert ((self.head_dim * search) == search_dim)
assert ((self.head_v_dim * retrieval) == value_dim)
self.value_query = nn.Linear(dim, search_dim, bias=bias)
if self.separate:
self.value_key = GroupLinearLayer(self.retrieval, self.head_v_dim, self.head_dim, bias=bias)
else:
self.value_key = nn.Linear(self.head_v_dim, self.head_dim, bias=bias)
extra = 0
if self.concat:
self.in_ = nn.Linear(dim, self.head_v_dim, bias=bias)
extra = 1
if self.nonlinear:
self.out_proj = nn.Sequential(nn.Linear(((self.search + extra) * self.head_v_dim), dim, bias=bias), nn.ReLU(), nn.Linear(dim, dim, bias=bias))
else:
self.out_proj = nn.Linear(((self.search + extra) * self.head_v_dim), dim, bias=bias)
def forward(self, x):
(bsz, n, _) = x.shape
q = (self.query_net(x).view(bsz, n, self.search, self.head_dim) * self.scaling)
k = self.key_net(x).view(bsz, n, self.search, self.head_dim)
v = self.value_net(x).view(bsz, n, self.retrieval, self.head_v_dim)
q = q.transpose(2, 1).contiguous()
k = k.permute(0, 2, 3, 1).contiguous()
v = v.transpose(2, 1).contiguous().unsqueeze(1)
score = torch.matmul(q, k)
mask = torch.zeros_like(score[(0, 0)]).fill_diagonal_(1).unsqueeze(0).unsqueeze(0)
mask = mask.repeat(bsz, self.search, 1, 1).bool()
score.masked_fill_(mask, float('-inf'))
if self.gumbel:
score = F.gumbel_softmax(score, dim=(- 1)).unsqueeze(2)
else:
score = F.softmax(score, dim=(- 1)).unsqueeze(2)
out = torch.matmul(score, v).permute(0, 3, 1, 2, 4).reshape(bsz, n, self.search, self.retrieval, self.head_v_dim)
q_v = (self.value_query(x).view(bsz, n, self.search, 1, self.head_dim) * self.scaling)
if self.separate:
z = out.contiguous().view(((bsz * n) * self.search), self.retrieval, self.head_v_dim)
k_v = self.value_key(z).view(bsz, n, self.search, self.retrieval, self.head_v_dim)
else:
k_v = self.value_key(out)
k_v = k_v.permute(0, 1, 2, 4, 3).contiguous()
v_score = torch.matmul(q_v, k_v).view(bsz, n, self.search, self.retrieval, 1)
if self.gumbel:
v_score = F.gumbel_softmax(v_score, dim=3)
else:
v_score = F.softmax(v_score, dim=3)
out = (v_score * out).sum(dim=3).reshape(bsz, n, (self.search * self.head_v_dim))
if self.concat:
in_ = self.in_(x)
out = torch.cat([out, in_], dim=(- 1))
return (self.out_proj(out), score, v_score) |
def write_continents_top(continents):
top_continents_list = ''
for continent in continents.keys():
top_continents_list += 'Continent: {continent}, found - {count}\n'.format(continent=continent, count=continents[continent])
try:
with open('{dest}/{txt}/{result_file}'.format(dest=RESULTS_DIR, txt=TXT_DIR, result_file=CONTINENTS_TOP_TXT_FILE), mode='w') as file:
file.write(top_continents_list)
except FileNotFoundError:
print('{color}Error: destination file write failed{reset}'.format(color=ERROR_COLOR, reset=RESET_COLOR)) |
def copy_flax_attn_params(hf_backbone, flax_attn_params):
for (k, v) in flax_attn_params.items():
if k.startswith('transformer'):
torch_key = k.replace('transformer.resblocks', 'text_model.encoder.layers')
else:
torch_key = k.replace('visual.transformer.resblocks', 'vision_model.encoder.layers')
torch_key = torch_key.replace('attn', 'self_attn')
torch_key = torch_key.replace('key', 'k_proj')
torch_key = torch_key.replace('value', 'v_proj')
torch_key = torch_key.replace('query', 'q_proj')
torch_key = torch_key.replace('out', 'out_proj')
if (('bias' in torch_key) and (v.ndim == 2)):
shape = (v.shape[0] * v.shape[1])
v = v.reshape(shape)
if (('weight' in torch_key) and ('out' in torch_key)):
shape = ((v.shape[0] * v.shape[1]), v.shape[2])
v = v.reshape(shape).T
if (('weight' in torch_key) and ('out' not in torch_key)):
shape = (v.shape[0], (v.shape[1] * v.shape[2]))
v = v.reshape(shape).T
v = torch.from_numpy(v)
hf_backbone.state_dict()[torch_key].copy_(v) |
class VoxelResBackBone8x(nn.Module):
def __init__(self, model_cfg, input_channels, grid_size, **kwargs):
super().__init__()
self.model_cfg = model_cfg
norm_fn = partial(nn.BatchNorm1d, eps=0.001, momentum=0.01)
self.sparse_shape = (grid_size[::(- 1)] + [1, 0, 0])
self.conv_input = spconv.SparseSequential(spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), norm_fn(16), nn.ReLU())
block = post_act_block
self.conv1 = spconv.SparseSequential(SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'), SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key='res1'))
self.conv2 = spconv.SparseSequential(block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'), SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res2'))
self.conv3 = spconv.SparseSequential(block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'), SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res3'))
self.conv4 = spconv.SparseSequential(block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'), SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res4'))
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, bias=False, indice_key='spconv_down2'), norm_fn(128), nn.ReLU())
self.num_point_features = 128
self.backbone_channels = {'x_conv1': 16, 'x_conv2': 32, 'x_conv3': 64, 'x_conv4': 128}
def forward(self, batch_dict):
(voxel_features, voxel_coords) = (batch_dict['voxel_features'], batch_dict['voxel_coords'])
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(features=voxel_features, indices=voxel_coords.int(), spatial_shape=self.sparse_shape, batch_size=batch_size)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.conv_out(x_conv4)
batch_dict.update({'encoded_spconv_tensor': out, 'encoded_spconv_tensor_stride': 8})
batch_dict.update({'multi_scale_3d_features': {'x_conv1': x_conv1, 'x_conv2': x_conv2, 'x_conv3': x_conv3, 'x_conv4': x_conv4}})
batch_dict.update({'multi_scale_3d_strides': {'x_conv1': 1, 'x_conv2': 2, 'x_conv3': 4, 'x_conv4': 8}})
return batch_dict |
class MeanLastFractionalSuccess(BaseMetric):
def __init__(self):
super(MeanLastFractionalSuccess, self).__init__(name='last_fractional_success')
self.per_episode_scores = []
self.total_number_of_episodes = 0
return
def process_episode(self, episode_obj):
self.total_number_of_episodes += 1
self.per_episode_scores.append(episode_obj.infos[(- 1)]['fractional_success'])
def get_metric_score(self):
return (np.mean(self.per_episode_scores), np.std(self.per_episode_scores))
def reset(self):
self.per_episode_scores = []
self.total_number_of_episodes = 0 |
class distill():
def __init__(self, args, model, teacher):
self.args = args
self.student = model
self.teacher = teacher
self.student_layers = self.sampled_layer(args.arch, self.student)
self.teacher_layers = self.sampled_layer(args.teacher_arch, self.teacher)
def kwargs(**kwargs):
return kwargs
setattr(tcl.Conv2d, 'pre_defined', kwargs(kernel_initializer=tf.keras.initializers.he_normal(), use_biases=False, activation_fn=None, trainable=True))
setattr(tcl.BatchNorm, 'pre_defined', kwargs(trainable=True))
self.student.aux_layers = [tf.keras.Sequential([tcl.Conv2d([1, 1], tl.gamma.shape[(- 1)]), tcl.BatchNorm()]) for (sl, tl) in zip(self.student_layers, self.teacher_layers)]
self.margin = 1.0
def sampled_layer(self, arch, model):
if ('WResNet' in arch):
for i in range(1, 3):
model.Layers[('BasicBlock%d.0/bn' % i)].keep_feat = 'pre_act'
model.Layers['bn_last'].keep_feat = 'pre_act'
return ([model.Layers[('BasicBlock%d.0/bn' % i)] for i in range(1, 3)] + [model.Layers['bn_last']])
def loss(self, sl, tl, aux):
s = aux(sl.feat, training=True)
t = tf.stop_gradient(tl.feat)
(B, H, W, D) = s.shape
return (((tf.reduce_sum(tf.abs(((tf.square((s + self.margin)) * tf.cast(tf.logical_and((s > (- self.margin)), (t <= 0.0)), tf.float32)) + (tf.square((s - self.margin)) * tf.cast(tf.logical_and((s <= self.margin), (t > 0.0)), tf.float32))))) / B) / H) / W)
def initialize_student(self, dataset):
optimizer = tf.keras.optimizers.SGD(self.args.learning_rate, 0.9, nesterov=True)
train_loss = tf.keras.metrics.Mean(name='train_loss')
(jit_compile=True)
def init_forward(input):
self.teacher(input, training=False)
with tf.GradientTape(persistent=True) as tape:
self.student(input, training=True)
distill_loss = []
for (i, data) in enumerate(zip(self.student_layers, self.teacher_layers, self.student.aux_layers)):
distill_loss.append((self.loss(*data) * (2 ** (((- len(self.student_layers)) + i) + 1))))
distill_loss = tf.add_n(distill_loss)
gradients = tape.gradient(distill_loss, self.student.trainable_variables)
if (self.args.weight_decay > 0.0):
gradients = [((g + (v * self.args.weight_decay)) if (g is not None) else g) for (g, v) in zip(gradients, self.student.trainable_variables)]
optimizer.apply_gradients(zip(gradients, self.student.trainable_variables))
train_loss.update_state(distill_loss)
for e in range(int((self.args.train_epoch * 0.3))):
for (imgs, _) in dataset:
init_forward(imgs)
print(('Aux Epoch: %d: loss: %.4f' % (e, train_loss.result())))
train_loss.reset_states() |
class PygGraphPropPredDataset(InMemoryDataset):
def __init__(self, name, root='dataset', transform=None, pre_transform=None, meta_dict=None):
self.name = name
if (meta_dict is None):
self.dir_name = '_'.join(name.split('-'))
if osp.exists(osp.join(root, (self.dir_name + '_pyg'))):
self.dir_name = (self.dir_name + '_pyg')
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col=0, keep_default_na=False)
if (not (self.name in master)):
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
if (osp.isdir(self.root) and (not osp.exists(osp.join(self.root, (('RELEASE_v' + str(self.meta_info['version'])) + '.txt'))))):
print((self.name + ' has been updated.'))
if (input('Will you update the dataset now? (y/N)\n').lower() == 'y'):
shutil.rmtree(self.root)
self.download_name = self.meta_info['download_name']
self.num_tasks = int(self.meta_info['num tasks'])
self.eval_metric = self.meta_info['eval metric']
self.task_type = self.meta_info['task type']
self.__num_classes__ = int(self.meta_info['num classes'])
self.binary = (self.meta_info['binary'] == 'True')
super(PygGraphPropPredDataset, self).__init__(self.root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
def get_idx_split(self, split_type=None):
if (split_type is None):
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train_idx = pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header=None).values.T[0]
valid_idx = pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header=None).values.T[0]
test_idx = pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header=None).values.T[0]
return {'train': torch.tensor(train_idx, dtype=torch.long), 'valid': torch.tensor(valid_idx, dtype=torch.long), 'test': torch.tensor(test_idx, dtype=torch.long)}
def num_classes(self):
return self.__num_classes__
def raw_file_names(self):
if self.binary:
return ['data.npz']
else:
file_names = ['edge']
if (self.meta_info['has_node_attr'] == 'True'):
file_names.append('node-feat')
if (self.meta_info['has_edge_attr'] == 'True'):
file_names.append('edge-feat')
return [(file_name + '.csv.gz') for file_name in file_names]
def processed_file_names(self):
return 'geometric_data_processed.pt'
def download(self):
url = self.meta_info['url']
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
shutil.rmtree(self.root)
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop downloading.')
shutil.rmtree(self.root)
exit((- 1))
def process(self):
add_inverse_edge = (self.meta_info['add_inverse_edge'] == 'True')
if (self.meta_info['additional node files'] == 'None'):
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if (self.meta_info['additional edge files'] == 'None'):
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
data_list = read_graph_pyg(self.raw_dir, add_inverse_edge=add_inverse_edge, additional_node_files=additional_node_files, additional_edge_files=additional_edge_files, binary=self.binary)
if (self.task_type == 'subtoken prediction'):
graph_label_notparsed = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header=None).values
graph_label = [str(graph_label_notparsed[i][0]).split(' ') for i in range(len(graph_label_notparsed))]
for (i, g) in enumerate(data_list):
g.y = graph_label[i]
else:
if self.binary:
graph_label = np.load(osp.join(self.raw_dir, 'graph-label.npz'))['graph_label']
else:
graph_label = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header=None).values
has_nan = np.isnan(graph_label).any()
for (i, g) in enumerate(data_list):
if ('classification' in self.task_type):
if has_nan:
g.y = torch.from_numpy(graph_label[i]).view(1, (- 1)).to(torch.float32)
else:
g.y = torch.from_numpy(graph_label[i]).view(1, (- 1)).to(torch.long)
else:
g.y = torch.from_numpy(graph_label[i]).view(1, (- 1)).to(torch.float32)
if (self.pre_transform is not None):
data_list = [self.pre_transform(data) for data in data_list]
(data, slices) = self.collate(data_list)
print('Saving...')
torch.save((data, slices), self.processed_paths[0]) |
.filterwarnings('ignore::pytest.PytestUnhandledThreadExceptionWarning')
def test_killing_endless_loop():
config.configuration.module_name = 'tests.fixtures.examples.loop'
module_name = config.configuration.module_name
tracer = ExecutionTracer()
tracer.current_thread_identifier = threading.current_thread().ident
with install_import_hook(module_name, tracer):
module = importlib.import_module(module_name)
importlib.reload(module)
executor = TestCaseExecutor(tracer)
cluster = generate_test_cluster(module_name)
transformer = AstToTestCaseTransformer(cluster, False, EmptyConstantProvider())
transformer.visit(ast.parse('def test_case_0():\n anything = module_0.loop_with_condition()\n'))
test_case = transformer.testcases[0]
executor.execute(test_case)
for thread in threading.enumerate():
if ('_execute_test_case' in thread.name):
thread.join()
assert (len(threading.enumerate()) == 1) |
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, drop_last=False, pin_memory=True, persistent_workers=True, **kwargs):
(rank, world_size) = get_dist_info()
if dist:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=shuffle, seed=seed)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = (num_gpus * samples_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
if (digit_version(torch.__version__) >= digit_version('1.8.0')):
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=pin_memory, shuffle=shuffle, worker_init_fn=init_fn, drop_last=drop_last, persistent_workers=persistent_workers, **kwargs)
else:
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=pin_memory, shuffle=shuffle, worker_init_fn=init_fn, drop_last=drop_last, **kwargs)
return data_loader |
class Simulator():
def __init__(self, level_filename: Optional[str]=None, level: Optional[List[str]]=None, interactive_jar_path: Optional[str]=None, astar_jar_path: Optional[str]=None):
if ((level_filename is None) and (level is None)):
raise ValueError('level_filename OR level_txt must be provided!')
elif (level is None):
level = load_level(level_filename)
if (interactive_jar_path is None):
interactive_jar_path = INTERACTIVE_JAR_PATH
if (astar_jar_path is None):
astar_jar_path = ASTAR_JAR_PATH
self.level_filename = level_filename
self.level = level
self.interactive_jar_path = interactive_jar_path
self.astar_jar_path = astar_jar_path
def interactive(self):
t = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
save_level(self.level, t.name)
print(f'Playing level interactively -- {t.name}!')
_ = subprocess.run(['java', '-jar', self.interactive_jar_path, t.name, IMAGE_PATH], stdout=subprocess.PIPE)
t.close()
os.unlink(t.name)
def astar(self, render: bool=True):
t = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
save_level(self.level, t.name)
print(f'Running Astar agent on level! -- {t.name}')
render_str = ('human' if render else 'norender')
_ = subprocess.run(['java', '-jar', self.astar_jar_path, t.name, render_str, IMAGE_PATH], stdout=subprocess.PIPE)
t.close()
os.unlink(t.name)
def __call__(self, simulate_mode: str='interactive', render: bool=True):
if (simulate_mode == 'interactive'):
self.interactive()
else:
self.astar(render) |
class SerializedInteraction():
request: Request
response: Response
checks: list[SerializedCheck]
status: Status
recorded_at: str
def from_interaction(cls, interaction: Interaction) -> SerializedInteraction:
return cls(request=interaction.request, response=interaction.response, checks=[SerializedCheck.from_check(check) for check in interaction.checks], status=interaction.status, recorded_at=interaction.recorded_at) |
class GraphConvolution(layers.Layer):
def __init__(self, input_dim: int, output_dim: int, num_features_nonzero: int, dropout: float=0.0, is_sparse_inputs: bool=False, activation: Callable[([tf.Tensor], tf.Tensor)]=tf.nn.relu, norm: bool=False, bias: bool=False, featureless: bool=False, **kwargs: Optional) -> None:
super(GraphConvolution, self).__init__(**kwargs)
self.dropout = dropout
self.activation = activation
self.is_sparse_inputs = is_sparse_inputs
self.featureless = featureless
self.bias = bias
self.norm = norm
self.num_features_nonzero = num_features_nonzero
self.weights_ = []
for i in range(1):
w = self.add_weight(('weight' + str(i)), [input_dim, output_dim], dtype=tf.float32)
self.weights_.append(w)
if self.bias:
self.bias = self.add_weight('bias', [output_dim], dtype=tf.float32)
def call(self, inputs: Tuple[(tf.Tensor, tf.Tensor)], training: bool=True) -> tf.Tensor:
(x, support_) = inputs
if ((training is not False) and self.is_sparse_inputs):
x = sparse_dropout(x, self.dropout, self.num_features_nonzero)
elif (training is not False):
x = tf.nn.dropout(x, self.dropout)
supports = list()
for i in range(len(support_)):
if (not self.featureless):
pre_sup = dot(x, self.weights_[i], sparse=self.is_sparse_inputs)
else:
pre_sup = self.weights_[i]
support = dot(support_[i], pre_sup, sparse=True)
supports.append(support)
output = tf.add_n(supports)
axis = list(range((len(output.get_shape()) - 1)))
(mean, variance) = tf.nn.moments(output, axis)
scale = None
offset = None
variance_epsilon = 0.001
output = tf.nn.batch_normalization(output, mean, variance, offset, scale, variance_epsilon)
if self.bias:
output += self.bias
if self.norm:
return tf.nn.l2_normalize(self.activation(output), axis=None, epsilon=1e-12)
return self.activation(output) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.