code stringlengths 101 5.91M |
|---|
class DMMN(nn.Module):
def __init__(self, phase, base, head, extra):
super(DMMN, self).__init__()
self.phase = phase
self.num_classes = config['num_classes']
self.num_params = config['num_motion_model_param']
self.priorbox = PriorBox(config)
with torch.no_grad():
self.priors = Variable(self.priorbox.forward())
self.input_frame_num = config['frame_max_input_num']
self.base = base
self.param_layers = nn.ModuleList(head[0])
self.p_c_layers = nn.ModuleList(head[1])
self.p_e_layers = nn.ModuleList(head[2])
self.conv1 = nn.ModuleList([base.conv1, base.bn1, base.relu, base.maxpool])
self.conv2 = base.layer1
self.conv3 = base.layer2
self.conv4 = base.layer3
self.conv5 = base.layer4
self.conv6 = extra.layer1
self.conv7 = extra.layer2
if (phase == 'test'):
self.softmax = nn.Softmax(dim=(- 1))
self.detect = Detect(config['num_classes'], config['test']['detect_bkg_label'], config['test']['detect_top_k'], config['test']['detect_conf_thresh'], config['test']['detect_nms_thresh'], config['test']['detect_exist_thresh'])
self.apply(param_init)
def forward(self, x, times=None):
sources = list()
param = list()
p_c = list()
p_e = list()
for conv in self.conv1:
x = conv(x)
show_feature_map(x, 'conv_1')
x = self.conv2(x)
show_feature_map(x, 'conv_2')
sources += [x]
x = self.conv3(x)
show_feature_map(x, 'conv_3')
sources += [x]
x = self.conv4(x)
show_feature_map(x, 'conv_4')
sources += [x]
x = self.conv5(x)
show_feature_map(x, 'conv_5')
sources += [x]
x = self.conv6(x)
show_feature_map(x, 'conv_6')
sources += [x]
x = self.conv7(x)
show_feature_map(x, 'conv_7')
sources += [x]
i = 0
for (x, p, m, c) in zip(sources, self.param_layers, self.p_c_layers, self.p_e_layers):
show_feature_map(p(x), 'param{}'.format(i))
show_feature_map(m(x), 'p_c{}'.format(i))
show_feature_map(c(x), 'p_e{}'.format(i))
param.append(p(x).squeeze_(dim=2).permute(0, 2, 3, 1).contiguous())
p_c.append(m(x).squeeze_(dim=2).permute(0, 2, 3, 1).contiguous())
p_e.append(c(x).squeeze_(dim=2).permute(0, 2, 3, 1).contiguous())
param = torch.cat([o.view(o.size(0), (- 1)) for o in param], 1)
p_c = torch.cat([o.view(o.size(0), (- 1)) for o in p_c], 1)
p_e = torch.cat([o.view(o.size(0), (- 1)) for o in p_e], 1)
param = param.view(param.size(0), (- 1), 4, (config['num_motion_model_param'] // 4))
p_c = p_c.view(p_c.size(0), (- 1), 1, self.num_classes).permute(0, 2, 1, 3).contiguous()
p_e = p_e.view(p_e.size(0), (- 1), self.input_frame_num, 2).permute(0, 2, 1, 3).contiguous()
if (self.phase == 'test'):
output = self.detect(param, self.softmax(p_c), self.softmax(p_e), self.priors, times)
else:
output = (param, p_c, p_e)
return output
def load_weights(self, weight_file):
(other, ext) = os.path.splitext(weight_file)
if ((ext == '.pkl') or '.pth'):
print('Loading weights into state dict...')
model_data = torch.load(weight_file)
self.load_state_dict(model_data['state_dict'])
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def load_base_weights(self, base_file):
if (os.path.splitext(base_file)[1] in ['.pkl', '.pth']):
print('Loading base net weights into state dict...')
model_data = torch.load(base_file)
assert (config['base_net']['arch'] == model_data['arch'])
self.base.load_state_dict(model_data['state_dict'], strict=False)
print('Finish')
else:
print('Sorry only .pth and .pkl files supported.')
def load_weights(self, resume):
model_data = torch.load(resume)
self.load_state_dict(model_data)
def build_extra_net1(in_channel):
torch.nn.Conv3d(in_channel, 32, kernel_size=1, stride=1, bias=False)
torch.nn.BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
torch.nn.Conv3d(32, 32, kernel_size=3, stride=1, padding=1, bias=False)
torch.nn.BatchNorm3d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
torch.nn.Conv3d(32, in_channel, kernel_size=1, stride=1, bias=False)
torch.nn.BatchNorm3d(in_channel, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
def build(phase):
if (phase not in ['train', 'test']):
print((('ERROR: Phase: ' + phase) + ' not recognized'))
return
base_net = generate_resnext101(config['num_classes'], frame_size=config['frame_size'], num_frames=config['frame_max_input_num'], cuda=config['cuda'])
extra_net_inplanes = base_net.layer4[2].conv3.out_channels
extra_net = generate_extra_model(cuda=config['cuda'], inplanes=extra_net_inplanes)
param_layers = []
p_c_layers = []
p_e_layers = []
scales = config['frame_work']['boxes_scales']
aspect_ratios = config['frame_work']['aspect_ratios']
num_boxes = [((2 + len(_s)) + (2 * len(_a))) for (_s, _a) in zip(scales, aspect_ratios)]
num_channels_dims = config['frame_work']['channel_dims']
num_temporal_dims = config['frame_work']['temporal_dims']
for (k, c, t) in zip(num_boxes, num_channels_dims, num_temporal_dims):
param_layer = nn.Conv3d(in_channels=c, out_channels=(k * config['num_motion_model_param']), kernel_size=(t, 3, 3), padding=(0, 1, 1), stride=(1, 1, 1), bias=True)
p_c_layer = nn.Conv3d(in_channels=c, out_channels=(k * config['num_classes']), kernel_size=(t, 3, 3), padding=(0, 1, 1), stride=(1, 1, 1), bias=True)
p_e_layer = nn.Conv3d(in_channels=c, out_channels=((k * config['frame_max_input_num']) * 2), kernel_size=(t, 3, 3), padding=(0, 1, 1), stride=(1, 1, 1), bias=True)
if config['cuda']:
param_layer = param_layer.cuda()
p_c_layer = p_c_layer.cuda()
p_e_layer = p_e_layer.cuda()
param_layers += [param_layer]
p_c_layers += [p_c_layer]
p_e_layers += [p_e_layer]
head = (param_layers, p_c_layers, p_e_layers)
return DMMN(phase=phase, base=base_net, extra=extra_net, head=head) |
def test_sort_strings():
content = ak.operations.from_iter(['one', 'two', 'three', 'four', 'five'], highlevel=False)
assert (to_list(ak.operations.sort(content, axis=0, ascending=True, stable=False)) == ['five', 'four', 'one', 'three', 'two'])
assert (to_list(ak.operations.sort(content, axis=0, ascending=False, stable=False)) == ['two', 'three', 'one', 'four', 'five']) |
_video_fx
def volumex(clip, factor):
return clip.fl((lambda gf, t: (factor * gf(t))), keep_duration=True) |
class LayoutLMTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES |
class Box(Space):
def __init__(self, low, high, shape=None):
if (shape is None):
assert (low.shape == high.shape)
self.low = low
self.high = high
else:
assert (np.isscalar(low) and np.isscalar(high))
self.low = (low + np.zeros(shape))
self.high = (high + np.zeros(shape))
def sample(self):
return np.random.uniform(low=self.low, high=self.high, size=self.low.shape)
def contains(self, x):
return ((x.shape == self.shape) and (x >= self.low).all() and (x <= self.high).all())
def shape(self):
return self.low.shape
def flat_dim(self):
return np.prod(self.low.shape)
def bounds(self):
return (self.low, self.high)
def flatten(self, x):
return np.asarray(x).flatten()
def unflatten(self, x):
return np.asarray(x).reshape(self.shape)
def flatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape((xs.shape[0], (- 1)))
def unflatten_n(self, xs):
xs = np.asarray(xs)
return xs.reshape(((xs.shape[0],) + self.shape))
def __repr__(self):
return ('Box' + str(self.shape))
def __eq__(self, other):
return (isinstance(other, Box) and np.allclose(self.low, other.low) and np.allclose(self.high, other.high))
def __hash__(self):
return hash((self.low, self.high))
def new_tensor_variable(self, name, extra_dims):
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype=theano.config.floatX) |
def test_NumpyArray_shape():
v2a = ak.contents.numpyarray.NumpyArray(np.arange(((2 * 3) * 5), dtype=np.int64).reshape(2, 3, 5))
def f(out, obj):
out[0] = len(obj)
out[1] = len(obj[0])
out[2] = len(obj[0][0])
out[3] = obj[0][0][0]
out[4] = obj[0][0][1]
out[5] = obj[0][1][0]
out[6] = obj[0][1][1]
out[7] = obj[1][0][0]
out[8] = obj[1][1][1]
out = np.zeros(9, dtype=np.float64)
f(out, ak.highlevel.Array(v2a))
assert (out.tolist() == [2.0, 3.0, 5.0, 0.0, 1.0, 5.0, 6.0, 15.0, 21.0]) |
def contrast_epoch(pretrain_loader, model, optimizer, pretrain_meter, cur_epoch, global_step, num_steps, num_optimizer_steps, num_warmup_steps, cfg):
model.train()
pretrain_meter.iter_tic()
for (cur_step, (visual_clip, audio_clip)) in enumerate(pretrain_loader):
global_step += 1
for i in range(len(visual_clip)):
visual_clip[i] = visual_clip[i].cuda(non_blocking=True)
audio_clip = audio_clip.cuda(non_blocking=True)
(loss, acc) = model(visual_clip, audio_clip)
misc.check_nan_losses(loss)
loss.backward()
lr = optim.get_lr(cfg.SOLVER.LR_POLICY, cfg.SOLVER.BASE_LR, cfg.SOLVER.WARMUP_START_LR, global_step, num_optimizer_steps, num_warmup_steps)
optim.set_lr(optimizer, lr)
optimizer.step()
for p in model.parameters():
p.grad = None
if (cfg.NUM_GPUS > 1):
(loss, acc) = du.all_reduce([loss, acc])
loss = loss.item()
acc = acc.item()
pretrain_meter.iter_toc()
pretrain_meter.update_stats(loss, acc, lr, (audio_clip.size(0) * du.get_world_size()))
pretrain_meter.log_iter_stats(cur_epoch, cur_step, global_step)
sd = (model.module.state_dict() if (cfg.NUM_GPUS > 1) else model.state_dict())
ckpt = {'step': global_step, 'state_dict': sd, 'optimizer': optimizer.state_dict()}
if (cfg.PRETRAIN.PREEMPTIBLE and ((global_step % cfg.PRETRAIN.SAVE_PERIOD) == 0) and (du.get_rank() == 0)):
path = f'step_latest.pyth'
cu.save_checkpoint(ckpt, filename=os.path.join(cfg.SAVE_DIR, path))
if ((global_step % cfg.LOG_PERIOD) == 0):
logger.info('PROGRESS: {}%'.format(round(((100 * global_step) / num_steps), 4)))
logger.info('EVALERR: {}%'.format(loss))
if ((global_step == num_steps) and ((cur_step + 1) != len(pretrain_loader))):
return global_step
pretrain_meter.iter_tic()
pretrain_meter.log_epoch_stats(cur_epoch, global_step)
pretrain_meter.reset()
return global_step |
_model_architecture('transformer_lm', 'transformer_lm_gpt2_big')
def transformer_lm_gpt2_big(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1600)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 6400)
args.decoder_layers = getattr(args, 'decoder_layers', 48)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 25)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu_accurate')
base_lm_architecture(args) |
def main(args):
audio_dataset = load_dataset(**DATASET_ARGS)
def gen():
i = 0
idxes = list(range(len(audio_dataset)))
random.shuffle(idxes)
for k in idxes:
try:
(yield _write_convo(k, audio_dataset[k]))
except ValueError:
pass
else:
i += 1
if (i >= args.max_examples):
break
ds = Dataset.from_generator(gen)
ds.save_to_disk(args.output_folder) |
class SysbenchMemoryBenchmark(node.AppConfig):
def __init__(self, disagg_addr: int, disagg_size: int, disaggregated: bool, time_limit: int, num_threads=1):
self.disagg_addr = disagg_addr
self.disagg_size = disagg_size
self.disaggregated = disaggregated
self.time_limit = time_limit
self.num_threads = num_threads
def config_files(self):
m = {'farmem.ko': open('../images/farmem/farmem.ko', 'rb')}
return {**m, **super().config_files()}
def run_cmds(self, _):
cmds = ['mount -t proc proc /proc', 'mount -t sysfs sysfs /sys', 'free -m']
if self.disaggregated:
cmds.append(f'insmod /tmp/guest/farmem.ko base_addr=0x{self.disagg_addr:x} size=0x{self.disagg_size:x} nnid=1 drain_node=1')
cmds.append('free -m')
cmds.append('numactl -H')
sysbench_cmd = f'sysbench --time={self.time_limit} --histogram=on memory --memory-oper=read --memory-block-size=16M --memory-access-mode=rnd --memory-total-size=0 run'
parallel_cmd = str()
for i in range(self.num_threads):
parallel_cmd += f'numactl --membind={(1 if self.disaggregated else 0)} --physcpubind={i} {sysbench_cmd} & '
parallel_cmd += 'wait'
cmds.append(parallel_cmd)
return cmds |
class MFVISemanticDependency(nn.Module):
def __init__(self, max_iter=3):
super().__init__()
self.max_iter = max_iter
def __repr__(self):
return f'{self.__class__.__name__}(max_iter={self.max_iter})'
_grad()
def forward(self, scores, mask, target=None):
logQ = self.mean_field_variational_inference(*(s.requires_grad_() for s in scores), mask)
marginals = logQ.exp()
if (target is None):
return marginals
loss = ((- logQ.gather((- 1), target.unsqueeze((- 1)))[mask].sum()) / mask.sum())
return (loss, marginals)
def mean_field_variational_inference(self, s_edge, s_sib, s_cop, s_grd, mask):
(batch_size, seq_len, _) = mask.shape
(hs, ms) = torch.stack(torch.where(torch.ones_like(mask[0]))).view((- 1), seq_len, seq_len).sort(0)[0].unbind()
mask = mask.permute(2, 1, 0)
mask2o = (mask.unsqueeze(1) & mask.unsqueeze(2))
mask2o = (mask2o & hs.unsqueeze((- 1)).ne(hs.new_tensor(range(seq_len))).unsqueeze((- 1)))
mask2o = (mask2o & ms.unsqueeze((- 1)).ne(ms.new_tensor(range(seq_len))).unsqueeze((- 1)))
s_edge = s_edge.permute(2, 1, 0)
s_sib = (s_sib.permute(2, 1, 3, 0) * mask2o)
s_cop = (s_cop.permute(2, 1, 3, 0) * mask2o)
s_grd = (s_grd.permute(2, 1, 3, 0) * mask2o)
q = s_edge.new_zeros(2, seq_len, seq_len, batch_size)
for _ in range(self.max_iter):
q = q.softmax(0)
f = (((q[1].unsqueeze(1) * s_sib) + (q[1].transpose(0, 1).unsqueeze(0) * s_cop)) + (q[1].unsqueeze(0) * s_grd)).sum(2)
q = torch.stack((torch.zeros_like(q[0]), (s_edge + f)))
return q.permute(3, 2, 1, 0).log_softmax((- 1)) |
def spacy_deps(doc):
tups = []
for (tki, token) in enumerate(doc):
dep = ((token.text + '_') + str(tki))
head = ((token.head.text + '_') + str(token.head.i))
arc = token.dep_
tups.append((dep, head, arc))
return tups |
(allow_output_mutation=True)
def init_bert_tokenizer():
tokenizer = BlipBase.init_tokenizer()
return tokenizer |
_utils.test()
def test_name_error():
with pytest.raises(ti.TaichiNameError, match='Name "a" is not defined'):
def foo():
(a + 1)
foo() |
()
def list():
_echo_run_names('Algorithms', _get_runs_dict(benchmark_algos))
_echo_run_names('Policies', _get_runs_dict(benchmark_policies))
_echo_run_names('Baselines', _get_runs_dict(benchmark_baselines))
_echo_run_names('Q Functions', _get_runs_dict(benchmark_q_functions))
_echo_run_names('Automatic benchmarking', _get_runs_dict(benchmark_auto)) |
def is_optional(ann):
if (ann is Optional):
raise RuntimeError('Attempted to use Optional without a contained type. Please add a contained type, e.g. Optional[int]')
def safe_is_subclass(the_type, super_type):
if (not inspect.isclass(the_type)):
return False
return issubclass(the_type, super_type)
if (not hasattr(ann, '__module__')):
return False
union_optional = False
if ((ann.__module__ == 'typing') and (getattr(ann, '__origin__', None) is Union)):
args = getattr(ann, '__args__', ())
if (len(args) == 2):
union_optional = ((safe_is_subclass(args[1], type(None)) and (not safe_is_subclass(args[0], type(None)))) or (safe_is_subclass(args[0], type(None)) and (not safe_is_subclass(args[1], type(None)))))
optional = ((ann.__module__ == 'typing') and (getattr(ann, '__origin__', None) is Optional))
return (optional or union_optional) |
_selection.register('chunk')
def chunk_selection(doc: Doc) -> Iterable[Candidate]:
surface_forms = []
spans = list(doc.ents)
ent_words: Set[str] = set()
sentence_indices = []
for span in spans:
ent_words.update((token.i for token in span))
for np in doc.noun_chunks:
while ((len(np) > 1) and (np[0].dep_ not in ('advmod', 'amod', 'compound'))):
np = np[1:]
if (not any(((w.i in ent_words) for w in np))):
spans.append(np)
for sent in doc.sents:
sentence_indices.append((sent.start, sent.end))
for span in filter_spans(spans):
for (i, token_indices) in enumerate(sentence_indices):
if ((span.start >= token_indices[0]) and (span.end <= token_indices[1])):
surface_forms.append((i, span))
break
return _merge_surface_forms(surface_forms) |
_grad()
def final_test(data_loader, model, device, file, save_feature=False):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
final_result = []
saved_features = {}
for batch in metric_logger.log_every(data_loader, 10, header):
videos = batch[0]
target = batch[1]
ids = batch[2]
chunk_nb = batch[3]
split_nb = batch[4]
videos = videos.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
if save_feature:
(output, saved_feature) = model(videos, save_feature=save_feature)
else:
output = model(videos)
loss = criterion(output, target)
for i in range(output.size(0)):
string = '{} {} {} {} {}\n'.format(ids[i], str(output.data[i].cpu().numpy().tolist()), str(int(target[i].cpu().numpy())), str(int(chunk_nb[i].cpu().numpy())), str(int(split_nb[i].cpu().numpy())))
final_result.append(string)
if save_feature:
if (ids[i] not in saved_features):
saved_features[ids[i]] = {'chunk_id': [], 'split_id': [], 'label': int(target[i].cpu().numpy()), 'feature': [], 'logit': []}
saved_features[ids[i]]['chunk_id'].append(int(chunk_nb[i].cpu().numpy()))
saved_features[ids[i]]['split_id'].append(int(split_nb[i].cpu().numpy()))
saved_features[ids[i]]['feature'].append(saved_feature.data[i].cpu().numpy().tolist())
saved_features[ids[i]]['logit'].append(output.data[i].cpu().numpy().tolist())
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = videos.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
if (not os.path.exists(file)):
os.mknod(file)
with open(file, 'w') as f:
f.write('{}, {}\n'.format(acc1, acc5))
for line in final_result:
f.write(line)
if save_feature:
feature_file = file.replace(file[(- 4):], '_feature.pkl')
pickle.dump(saved_features, open(feature_file, 'wb'))
metric_logger.synchronize_between_processes()
print('* {top1.global_avg:.3f} {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
class AffinePermutationTypeG(AffinePermutation):
def check(self):
if (not self):
return
if (not (len(self) == 6)):
raise ValueError('length of list must be 6')
s = sum((((i // 6) - ((i % 6) == 0)) for i in self if (i > 6)))
if (s % 2):
raise ValueError('type G affine permutations have an even number of entries greater than 6 to the left of the 7th position')
s = sum(((((- i) // 6) + 1) for i in self if (i <= 0)))
if (s % 2):
raise ValueError('type G affine permutations have an even number of entries less than 0 to the right of the 0th position')
def value(self, i, base_window=False):
N = 6
if base_window:
self[(i - 1)]
window = ((i - 1) // N)
return (self[((i - 1) % N)] + (window * N))
def position(self, i):
N = 6
for r in range(N):
if ((self[r] % N) == (i % N)):
diff = ((i - self[r]) // N)
return ((r + (diff * N)) + 1)
return False
def apply_simple_reflection_right(self, i):
if (i not in self.parent().index_set()):
raise ValueError('index not in index set')
j = i
l = self[:]
if (j == 1):
l[0] = self(2)
l[1] = self(1)
l[2] = self(4)
l[3] = self(3)
l[4] = self(6)
l[5] = self(5)
elif (j == 2):
l[1] = self(3)
l[2] = self(2)
l[3] = self(5)
l[4] = self(4)
elif (j == 0):
l[0] = self((- 1))
l[1] = self(0)
l[4] = self(7)
l[5] = self(8)
return type(self)(self.parent(), l, check=False)
def apply_simple_reflection_left(self, i):
if (i not in self.parent().index_set()):
raise ValueError('index not in index set')
l = []
if (i == 1):
for m in range(6):
res = (self[m] % 6)
if ((res == 1) or (res == 3) or (res == 5)):
l.append((self[m] + 1))
elif ((res == 2) or (res == 4) or (res == 0)):
l.append((self[m] - 1))
else:
l.append(self[m])
elif (i == 2):
for m in range(6):
res = (self[m] % 6)
if ((res == 2) or (res == 4)):
l.append((self[m] + 1))
elif ((res == 3) or (res == 5)):
l.append((self[m] - 1))
else:
l.append(self[m])
elif (i == 0):
for m in range(6):
res = (self[m] % 6)
if ((res == 1) or (res == 2)):
l.append((self[m] - 2))
elif ((res == 5) or (res == 0)):
l.append((self[m] + 2))
else:
l.append(self[m])
return type(self)(self.parent(), l, check=False)
def has_right_descent(self, i) -> bool:
if (i not in self.parent().index_set()):
raise ValueError('index not in index set')
if (i == 0):
return (self.value(0) > self.value(2))
return (self.value(i) > self.value((i + 1)))
def has_left_descent(self, i) -> bool:
if (i not in self.parent().index_set()):
raise ValueError('index not in index set')
if (i == 0):
return (self.position(0) > self.position(2))
return (self.position(i) > self.position((i + 1)))
def to_type_a(self):
A = AffinePermutationGroup(['A', 5, 1])
return A(self) |
def collect_all_mutex_groups(groups, atoms):
all_groups = []
uncovered_facts = atoms.copy()
for group in groups:
uncovered_facts.difference_update(group)
all_groups.append(group)
all_groups += [[fact] for fact in uncovered_facts]
return all_groups |
def create_temp_tfrecords(sources, targets):
output_file = tempfile.NamedTemporaryFile()
writer = tf.python_io.TFRecordWriter(output_file.name)
for (source, target) in zip(sources, targets):
ex = tf.train.Example()
ex.features.feature['source'].bytes_list.value.extend([source.encode('utf-8')])
ex.features.feature['target'].bytes_list.value.extend([target.encode('utf-8')])
writer.write(ex.SerializeToString())
writer.close()
return output_file |
class LlamaInt8(CausalInt8Model):
config_name: str = 'llama_int8'
def __init__(self, weights_path: Optional[str]=None):
super().__init__(LLamaInt8Engine.config_name, weights_path) |
class Text(object):
def __init__(self, ax):
self._ax = ax
self._text = None
def artists(self):
return [self._text]
def draw(self, x, y, text, **kwargs):
if (self._text is None):
self._text = self._ax.text(x, y, text, **kwargs)
else:
self._text.set_text(text) |
def counter_to_file(cnt, filepath):
with open(filepath, 'w') as f:
output = '\n'.join(['{};{}'.format(c, c_count) for (c, c_count) in cnt.most_common()])
f.write(output) |
class SPNet(nn.Module):
def __init__(self, channel=32, ind=50):
super(SPNet, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.upsample_2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.upsample_4 = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=True)
self.upsample_8 = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True)
self.layer_rgb = Res2Net_model(ind)
self.layer_dep = Res2Net_model(ind)
self.layer_dep0 = nn.Conv2d(1, 3, kernel_size=1)
self.fu_0 = CIM0(64, 64)
self.fu_1 = CIM(256, 128)
self.pool_fu_1 = maxpool()
self.fu_2 = CIM(512, 256)
self.pool_fu_2 = maxpool()
self.fu_3 = CIM(1024, 512)
self.pool_fu_3 = maxpool()
self.fu_4 = CIM(2048, 1024)
self.pool_fu_4 = maxpool()
self.rgb_conv_4 = nn.Sequential(BasicConv2d(2048, 256, 3, padding=1), self.relu)
self.rgb_gcm_4 = GCM(2048, channel)
self.rgb_conv_3 = nn.Sequential(BasicConv2d((1024 + 32), 256, 3, padding=1), self.relu)
self.rgb_gcm_3 = GCM((1024 + 32), channel)
self.rgb_conv_2 = nn.Sequential(BasicConv2d((512 + 32), 128, 3, padding=1), self.relu)
self.rgb_gcm_2 = GCM((512 + 32), channel)
self.rgb_conv_1 = nn.Sequential(BasicConv2d((256 + 32), 128, 3, padding=1), self.relu)
self.rgb_gcm_1 = GCM((256 + 32), channel)
self.rgb_conv_0 = nn.Sequential(BasicConv2d((64 + 32), 64, 3, padding=1), self.relu)
self.rgb_gcm_0 = GCM((64 + 32), channel)
self.rgb_conv_out = nn.Conv2d(channel, 1, 1)
self.dep_conv_4 = nn.Sequential(BasicConv2d(2048, 256, 3, padding=1), self.relu)
self.dep_gcm_4 = GCM(2048, channel)
self.dep_conv_3 = nn.Sequential(BasicConv2d((1024 + 32), 256, 3, padding=1), self.relu)
self.dep_gcm_3 = GCM((1024 + 32), channel)
self.dep_conv_2 = nn.Sequential(BasicConv2d((512 + 32), 128, 3, padding=1), self.relu)
self.dep_gcm_2 = GCM((512 + 32), channel)
self.dep_conv_1 = nn.Sequential(BasicConv2d((256 + 32), 128, 3, padding=1), self.relu)
self.dep_gcm_1 = GCM((256 + 32), channel)
self.dep_conv_0 = nn.Sequential(BasicConv2d((64 + 32), 64, 3, padding=1), self.relu)
self.dep_gcm_0 = GCM((64 + 32), channel)
self.dep_conv_out = nn.Conv2d(channel, 1, 1)
self.ful_conv_4 = nn.Sequential(BasicConv2d(2048, 256, 3, padding=1), self.relu)
self.ful_gcm_4 = GCM(1024, channel)
self.ful_conv_3 = nn.Sequential(BasicConv2d((1024 + (32 * 3)), 256, 3, padding=1), self.relu)
self.ful_gcm_3 = GCM((512 + 32), channel)
self.ful_conv_2 = nn.Sequential(BasicConv2d((512 + (32 * 3)), 128, 3, padding=1), self.relu)
self.ful_gcm_2 = GCM((256 + 32), channel)
self.ful_conv_1 = nn.Sequential(BasicConv2d((256 + (32 * 3)), 128, 3, padding=1), self.relu)
self.ful_gcm_1 = GCM((128 + 32), channel)
self.ful_conv_0 = nn.Sequential(BasicConv2d((128 + (32 * 3)), 64, 3, padding=1), self.relu)
self.ful_gcm_0 = GCM((64 + 32), channel)
self.ful_conv_out = nn.Conv2d(channel, 1, 1)
self.ful_layer4 = MFA(channel)
self.ful_layer3 = MFA(channel)
self.ful_layer2 = MFA(channel)
self.ful_layer1 = MFA(channel)
self.ful_layer0 = MFA(channel)
def forward(self, imgs, depths):
(img_0, img_1, img_2, img_3, img_4) = self.layer_rgb(imgs)
(dep_0, dep_1, dep_2, dep_3, dep_4) = self.layer_dep(self.layer_dep0(depths))
ful_0 = self.fu_0(img_0, dep_0)
ful_1 = self.fu_1(img_1, dep_1, ful_0)
ful_2 = self.fu_2(img_2, dep_2, self.pool_fu_1(ful_1))
ful_3 = self.fu_3(img_3, dep_3, self.pool_fu_2(ful_2))
ful_4 = self.fu_4(img_4, dep_4, self.pool_fu_3(ful_3))
x_rgb_42 = self.rgb_gcm_4(img_4)
x_rgb_3_cat = torch.cat([img_3, self.upsample_2(x_rgb_42)], dim=1)
x_rgb_32 = self.rgb_gcm_3(x_rgb_3_cat)
x_rgb_2_cat = torch.cat([img_2, self.upsample_2(x_rgb_32)], dim=1)
x_rgb_22 = self.rgb_gcm_2(x_rgb_2_cat)
x_rgb_1_cat = torch.cat([img_1, self.upsample_2(x_rgb_22)], dim=1)
x_rgb_12 = self.rgb_gcm_1(x_rgb_1_cat)
x_rgb_0_cat = torch.cat([img_0, x_rgb_12], dim=1)
x_rgb_02 = self.rgb_gcm_0(x_rgb_0_cat)
rgb_out = self.upsample_4(self.rgb_conv_out(x_rgb_02))
x_dep_42 = self.dep_gcm_4(dep_4)
x_dep_3_cat = torch.cat([dep_3, self.upsample_2(x_dep_42)], dim=1)
x_dep_32 = self.dep_gcm_3(x_dep_3_cat)
x_dep_2_cat = torch.cat([dep_2, self.upsample_2(x_dep_32)], dim=1)
x_dep_22 = self.dep_gcm_2(x_dep_2_cat)
x_dep_1_cat = torch.cat([dep_1, self.upsample_2(x_dep_22)], dim=1)
x_dep_12 = self.dep_gcm_1(x_dep_1_cat)
x_dep_0_cat = torch.cat([dep_0, x_dep_12], dim=1)
x_dep_02 = self.dep_gcm_0(x_dep_0_cat)
dep_out = self.upsample_4(self.dep_conv_out(x_dep_02))
x_ful_42 = self.ful_gcm_4(ful_4)
x_ful_3_cat = torch.cat([ful_3, self.ful_layer3(self.upsample_2(x_ful_42), self.upsample_2(x_rgb_42), self.upsample_2(x_dep_42))], dim=1)
x_ful_32 = self.ful_gcm_3(x_ful_3_cat)
x_ful_2_cat = torch.cat([ful_2, self.ful_layer2(self.upsample_2(x_ful_32), self.upsample_2(x_rgb_32), self.upsample_2(x_dep_32))], dim=1)
x_ful_22 = self.ful_gcm_2(x_ful_2_cat)
x_ful_1_cat = torch.cat([ful_1, self.ful_layer1(self.upsample_2(x_ful_22), self.upsample_2(x_rgb_22), self.upsample_2(x_dep_22))], dim=1)
x_ful_12 = self.ful_gcm_1(x_ful_1_cat)
x_ful_0_cat = torch.cat([ful_0, self.ful_layer0(x_ful_12, x_rgb_12, x_dep_12)], dim=1)
x_ful_02 = self.ful_gcm_0(x_ful_0_cat)
ful_out = self.upsample_4(self.ful_conv_out(x_ful_02))
return (rgb_out, dep_out, ful_out)
def _make_agant_layer(self, inplanes, planes):
layers = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True))
return layers
def _make_transpose(self, block, planes, blocks, stride=1):
upsample = None
if (stride != 1):
upsample = nn.Sequential(nn.ConvTranspose2d(self.inplanes, planes, kernel_size=2, stride=stride, padding=0, bias=False), nn.BatchNorm2d(planes))
elif (self.inplanes != planes):
upsample = nn.Sequential(nn.Conv2d(self.inplanes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes))
layers = []
for i in range(1, blocks):
layers.append(block(self.inplanes, self.inplanes))
layers.append(block(self.inplanes, planes, stride, upsample))
self.inplanes = planes
return nn.Sequential(*layers) |
def md_tree_to_graph(root):
from itertools import product, combinations
from sage.graphs.graph import Graph
def tree_to_vertices_and_edges(root):
if (root.node_type == NodeType.NORMAL):
return (root.children, [])
children_ve = [tree_to_vertices_and_edges(child) for child in root.children]
vertices = [v for (vs, es) in children_ve for v in vs]
edges = [e for (vs, es) in children_ve for e in es]
vertex_lists = [vs for (vs, es) in children_ve]
if (root.node_type == NodeType.PRIME):
for (vs1, vs2) in zip(vertex_lists, vertex_lists[1:]):
for (v1, v2) in product(vs1, vs2):
edges.append((v1, v2))
elif (root.node_type == NodeType.SERIES):
for (vs1, vs2) in combinations(vertex_lists, 2):
for (v1, v2) in product(vs1, vs2):
edges.append((v1, v2))
return (vertices, edges)
(vs, es) = tree_to_vertices_and_edges(root)
return Graph([vs, es], format='vertices_and_edges') |
def get_zenodo_json(doi):
request = requests.get(doi, headers={'accept': 'application/citeproc+json'})
base_url = request.json()['URL']
record = base_url.split('/')[(- 1)]
json_url = (' + record)
request = requests.get(json_url, headers={'accept': 'application/json'})
record_json = request.json()
return record_json |
def _fit_saga(X, y, eta, alpha, loss, penalty, max_iter, rng):
if sparse.issparse(X):
X = X.toarray()
(n_samples, n_features) = X.shape
n_vectors = y.shape[1]
g = np.empty((n_samples, n_features))
coef_ = np.zeros((n_vectors, n_features))
for i in range(n_samples):
p = coef_.dot(X[i])
g[i] = ((- loss.get_update(p, y[i])) * X[i])
d = np.sum(g, axis=0)
for _ in range(max_iter):
for _ in range(n_samples):
i = rng.randint((n_samples - 1))
p = coef_.dot(X[i])
gi = ((- loss.get_update(p, y[i])) * X[i])
coef_ -= (eta * (((gi - g[i]) + (d / n_samples)) + (alpha * coef_)))
if (penalty is not None):
coef_ = penalty.projection(coef_, eta)
d += (gi - g[i])
g[i] = gi
return coef_ |
_method
class Tableau(ClonableList, metaclass=InheritComparisonClasscallMetaclass):
def __classcall_private__(cls, t):
if isinstance(t, cls):
return t
try:
t = [tuple(_) for _ in t]
except TypeError:
raise ValueError('a tableau must be a list of iterables')
return Tableaux_all().element_class(Tableaux_all(), t)
def __init__(self, parent, t, check=True):
if isinstance(t, Tableau):
ClonableList.__init__(self, parent, t, check=False)
return
t = [tuple(_) for _ in t]
ClonableList.__init__(self, parent, t, check=check)
def __richcmp__(self, other, op):
if isinstance(other, Tableau):
return richcmp(list(self), list(other), op)
else:
return richcmp(list(self), other, op)
def __hash__(self):
return hash(tuple(self))
def check(self):
lens = [len(row) for row in self]
for (a, b) in zip(lens, lens[1:]):
if (a < b):
raise ValueError('a tableau must be a list of iterables of weakly decreasing length')
if (lens and (lens[(- 1)] == 0)):
raise ValueError('a tableau must not have empty rows')
def _repr_(self):
return self.parent().options._dispatch(self, '_repr_', 'display')
def _repr_list(self):
return repr([list(_) for _ in self])
__str__ = _repr_list
def _repr_diagram(self):
if (not self):
return ' -'
str_tab = [[str(data) for data in row] for row in self]
col_widths = ([2] * len(str_tab[0]))
for row in str_tab:
for (i, e) in enumerate(row):
col_widths[i] = max(col_widths[i], len(e))
if (self.parent().options('convention') == 'Russian'):
col_width = (max(col_widths) + 1)
max_height = max(((a + len(val)) for (a, val) in enumerate(str_tab)))
str_list = []
for i in range(max_height):
st = (' ' * (((max_height - i) - 1) * col_width))
for a in range((i + 1)):
b = (i - a)
if ((len(str_tab[b:]) > 0) and (len(str_tab[b][a:]) > 0)):
st += str_tab[b][a].rjust(col_width, ' ').ljust(((col_width * 2) - 1), ' ')
else:
st += (' ' * ((col_width * 2) - 1))
str_list.append(st)
import re
mm = (min((len(re.search('^ +', sline)[0]) for sline in str_list)) - 1)
str_list = [sline[mm:] for sline in str_list]
str_list.reverse()
return '\n'.join(str_list)
if (self.parent().options('convention') == 'French'):
str_tab = reversed(str_tab)
return '\n'.join(((' ' + ' '.join(('{:>{width}}'.format(e, width=col_widths[i]) for (i, e) in enumerate(row)))) for row in str_tab))
def _repr_compact(self):
if (not self):
return '-'
return '/'.join((','.join((('%s' % r) for r in row)) for row in self))
def _ascii_art_(self):
ascii = self.parent().options._dispatch(self, '_ascii_art_', 'ascii_art')
from sage.typeset.ascii_art import AsciiArt
return AsciiArt(ascii.splitlines())
def _unicode_art_(self):
from sage.typeset.unicode_art import UnicodeArt
return UnicodeArt(self._ascii_art_table(use_unicode=True).splitlines())
_ascii_art_repr = _repr_diagram
def _ascii_art_table(self, use_unicode=False):
from sage.combinat.output import ascii_art_table
self.parent().options('convention')
return ascii_art_table(self, use_unicode=use_unicode, convention=self.parent().options('convention'))
def _ascii_art_compact(self):
if (not self):
return '.'
if (self.parent().options('convention') == 'Russian'):
from sage.combinat.output import ascii_art_table_russian
return ascii_art_table_russian(self, compact=True)
if (self.parent().options('convention') == 'English'):
T = self
else:
T = reversed(self)
str_tab = [[str(_) for _ in row] for row in T]
col_widths = ([1] * len(self[0]))
for row in str_tab:
for (i, e) in enumerate(row):
col_widths[i] = max(col_widths[i], len(e))
return '\n'.join(((('|' + '|'.join(('{:^{width}}'.format(e, width=col_widths[i]) for (i, e) in enumerate(row)))) + '|') for row in str_tab))
def _latex_(self):
return self.parent().options._dispatch(self, '_latex_', 'latex')
_latex_list = _repr_list
def _latex_diagram(self):
if (len(self) == 0):
return '{\\emptyset}'
from sage.combinat.output import tex_from_array
return tex_from_array(self)
def __truediv__(self, t):
from sage.combinat.partition import _Partitions
if isinstance(t, list):
t = _Partitions(t)
if (not self.shape().contains(t)):
raise ValueError('the shape of the tableau must contain the partition')
st = [list(row) for row in self]
for (i, t_i) in enumerate(t):
st_i = st[i]
for j in range(t_i):
st_i[j] = None
from sage.combinat.skew_tableau import SkewTableau
return SkewTableau(st)
def __call__(self, *cell):
try:
(i, j) = cell
except ValueError:
(i, j) = cell[0]
try:
return self[i][j]
except IndexError:
raise IndexError(('the cell (%d,%d) is not contained in %s' % (i, j, repr(self))))
def level(self):
return 1
def components(self):
return [self]
_map(name='shape')
def shape(self):
from sage.combinat.partition import Partition
return Partition([len(row) for row in self])
def size(self):
return sum([len(row) for row in self])
def corners(self):
return self.shape().corners()
_map(order=2, name='conjugate')
def conjugate(self):
if self:
conj = [[] for _ in repeat(None, len(self[0]))]
for row in self:
for (j, x) in enumerate(row):
conj[j].append(x)
else:
conj = []
if isinstance(self, StandardTableau):
return StandardTableau(conj)
return Tableau(conj)
def pp(self):
print(self._repr_diagram())
def plot(self, descents=False):
from sage.plot.polygon import polygon
from sage.plot.line import line
from sage.plot.text import text
if (descents and (not self.is_standard())):
raise ValueError("the tableau must be standard for 'descents=True'")
if (self.parent().options('convention') == 'English'):
m = 1
else:
m = (- 1)
p = self.shape()
r = p.conjugate()
if (self.parent().options('convention') == 'Russian'):
pp = p
rr = r
h = [((- i) - 1) for i in range(len(p))]
v = [(i + 1) for i in range(len(r))]
else:
pp = ([0] * len(p))
rr = ([0] * len(r))
h = ([0] * len(p))
v = ([0] * len(r))
G = line([(0, 0), (p[0], pp[0])], axes=False, figsize=1.5)
for i in range(len(p)):
G += line([(h[i], (m * ((- i) - 1))), ((h[i] + p[i]), (pp[i] + (m * ((- i) - 1))))])
G += line([(0, 0), ((- rr[0]), (m * (- r[0])))])
for i in range(len(r)):
G += line([((i + 1), v[i]), (((i + 1) - rr[i]), (v[i] + (m * (- r[i]))))])
if descents:
t = StandardTableau(self)
for i in t.standard_descents():
c = t.cells_containing(i)[0]
if (self.parent().options('convention') == 'Russian'):
G += polygon([(((c[1] + 1) - v[c[0]]), (m * ((- c[1]) - c[0]))), (((c[1] + 2) - v[c[0]]), (m * (((- c[1]) - c[0]) - 1))), (((c[1] + 1) - v[c[0]]), (m * (((- c[1]) - c[0]) - 2))), ((c[1] - v[c[0]]), (m * (((- c[1]) - c[0]) - 1)))], rgbcolor=(1, 0, 1))
else:
G += polygon([(c[1], (m * (- c[0]))), ((c[1] + 1), (m * (- c[0]))), ((c[1] + 1), (m * ((- c[0]) - 1))), (c[1], (m * ((- c[0]) - 1)))], rgbcolor=(1, 0, 1))
if (self.parent().options('convention') == 'Russian'):
for c in self.cells():
G += text(str(self.entry(c)), (((c[1] + 1) - v[c[0]]), (m * (((- c[1]) - c[0]) - 1))))
else:
for c in self.cells():
G += text(str(self.entry(c)), ((c[1] + 0.5), (m * ((- c[0]) - 0.5))))
return G
def to_word_by_row(self):
from sage.combinat.words.word import Word
w = []
for row in reversed(self):
w += row
return Word(w)
def to_word_by_column(self):
from sage.combinat.words.word import Word
w = []
for row in self.conjugate():
w += row[::(- 1)]
return Word(w)
def to_word(self):
return self.to_word_by_row()
def descents(self):
descents = []
for i in range(1, len(self)):
for j in range(len(self[i])):
if (self[i][j] > self[(i - 1)][j]):
descents.append((i, j))
return descents
def major_index(self):
descents = self.descents()
p = self.shape()
return (len(descents) + sum([p.leg_length(*d) for d in descents]))
def inversions(self):
inversions = []
previous_row = None
for (i, row) in enumerate(self):
for (j, entry) in enumerate(row):
for k in range((j + 1), len(row)):
if (entry > row[k]):
inversions.append(((i, j), (i, k)))
if (i == 0):
continue
for k in range(j):
if (entry > previous_row[k]):
inversions.append(((i, j), ((i - 1), k)))
previous_row = row
return inversions
def inversion_number(self):
p = self.shape()
return (len(self.inversions()) - sum((p.arm_length(*cell) for cell in self.descents())))
def to_sign_matrix(self, max_entry=None):
from sage.rings.integer_ring import ZZ
from sage.sets.positive_integers import PositiveIntegers
PI = PositiveIntegers()
for row in self:
if any(((c not in PI) for c in row)):
raise ValueError('the entries must be non-negative integers')
from sage.matrix.matrix_space import MatrixSpace
if (max_entry is None):
max_entry = max([max(c) for c in self])
MS = MatrixSpace(ZZ, len(self[0]), max_entry)
Tconj = self.conjugate()
conj_len = len(Tconj)
d = {(((conj_len - i) - 1), (elem - 1)): 1 for (i, row) in enumerate(Tconj) for elem in row}
partial_sum_matrix = MS(d)
from copy import copy
sign_matrix = copy(MS.zero())
for j in range(max_entry):
sign_matrix[(0, j)] = partial_sum_matrix[(0, j)]
for i in range(1, conj_len):
for j in range(max_entry):
sign_matrix[(i, j)] = (partial_sum_matrix[(i, j)] - partial_sum_matrix[((i - 1), j)])
return sign_matrix
def schuetzenberger_involution(self, n=None, check=True):
if (check and (self not in SemistandardTableaux())):
raise ValueError('the tableau must be semistandard')
w = [i for row in self for i in reversed(row)]
if (not w):
return self
if (n is None):
n = max(w)
N = (n + 1)
wi = [(N - i) for i in w]
t = Tableau([[wi[0]]])
for k in wi[1:]:
t = t.bump(k)
if isinstance(self, StandardTableau):
return StandardTableau(list(t))
elif isinstance(self, SemistandardTableau):
return SemistandardTableau(list(t))
return t
_map(order=2, name='evacuation')
def evacuation(self, n=None, check=True):
return self.schuetzenberger_involution(n, check)
_map(name='standardization')
def standardization(self, check=True):
if (check and (self not in SemistandardTableaux())):
raise ValueError('the tableau must be semistandard')
T = from_shape_and_word(self.shape(), self.to_word_by_row().standard_permutation())
return StandardTableaux()(T)
def bender_knuth_involution(self, k, rows=None, check=True):
if (check and (self not in SemistandardTableaux())):
raise ValueError('the tableau must be semistandard')
from sage.combinat.skew_tableau import SkewTableau
sk = SkewTableau(self).bender_knuth_involution(k, rows, False)
return SemistandardTableaux()(list(sk))
_map(name='reading word permutation')
def reading_word_permutation(self):
return permutation.Permutation(self.standardization().to_word())
def entries(self):
return sum(self, ())
def entry(self, cell):
(i, j) = cell
return self[i][j]
def weight(self):
if (len(self) == 0):
return []
m = max((max(row) for row in self))
res = ([0] * m)
for row in self:
for i in row:
if (i > 0):
res[(i - 1)] += 1
return res
evaluation = weight
def is_row_strict(self):
return all(((row[i] < row[(i + 1)]) for row in self for i in range((len(row) - 1))))
def is_row_increasing(self, weak=False):
if weak:
def test(a, b):
return (a <= b)
else:
def test(a, b):
return (a < b)
return all((test(a, b) for row in self for (a, b) in zip(row, row[1:])))
def is_column_increasing(self, weak=False):
if weak:
def test(a, b):
return (a <= b)
else:
def test(a, b):
return (a < b)
def tworow(a, b):
return all((test(a[i], b_i) for (i, b_i) in enumerate(b)))
return all((tworow(self[r], self[(r + 1)]) for r in range((len(self) - 1))))
def is_column_strict(self):
def tworow(a, b):
return all(((a[i] < b_i) for (i, b_i) in enumerate(b)))
return all((tworow(self[r], self[(r + 1)]) for r in range((len(self) - 1))))
def is_semistandard(self):
return (self.is_row_increasing(weak=True) and self.is_column_increasing())
def is_standard(self):
entries = sorted(self.entries())
return ((entries == list(range(1, (self.size() + 1)))) and self.is_row_strict() and self.is_column_strict())
def is_increasing(self):
return (self.is_row_strict() and self.is_column_strict())
def is_rectangular(self):
if (len(self) == 0):
return True
return (len(self[(- 1)]) == len(self[0]))
def vertical_flip(self):
if (not self.is_rectangular()):
raise TypeError('the tableau must be rectangular to use vertical_flip()')
return Tableau([row for row in reversed(self)])
def rotate_180(self):
if (not self.is_rectangular()):
raise TypeError('the tableau must be rectangular to use rotate_180()')
return Tableau([[rline for rline in reversed(row)] for row in reversed(self)])
def cells(self):
s = []
for (i, row) in enumerate(self):
s += [(i, j) for j in range(len(row))]
return s
def cells_containing(self, i):
cell_list = []
for r in range((len(self) - 1), (- 1), (- 1)):
rth_row = self[r]
for (c, val) in enumerate(rth_row):
if (val == i):
cell_list.append((r, c))
return cell_list
def leq(self, secondtab):
if (secondtab not in Tableaux()):
raise TypeError('{} must be a tableau'.format(secondtab))
sh = self.shape()
if (sh != secondtab.shape()):
raise TypeError('the tableaux must be the same shape')
return all(((self[a][b] <= secondtab[a][b]) for a in range(len(self)) for b in range(len(self[a]))))
def k_weight(self, k):
res = []
w = self.weight()
s = self.cells()
for l in range(1, (len(w) + 1)):
new_s = [(i, j) for (i, j) in s if (self[i][j] == l)]
if (new_s == []):
res.append(0)
continue
x = set((((i - j) % (k + 1)) for (i, j) in new_s))
res.append(len(x))
return res
def is_k_tableau(self, k):
shapes = self.to_chain()
kshapes = [la.k_conjugate(k) for la in shapes]
return all((kshapes[(i + 1)].contains(kshapes[i]) for i in range((len(shapes) - 1))))
def restrict(self, n):
res = [[y for y in row if (y <= n)] for row in self]
res = [row for row in res if row]
try:
return self.parent()(res)
except Exception:
try:
return self.parent().Element(res)
except Exception:
return Tableau(res)
def restriction_shape(self, n):
from sage.combinat.partition import Partition
res = [len([y for y in row if (y <= n)]) for row in self]
return Partition(res)
def to_chain(self, max_entry=None):
if (max_entry is None):
if (len(self) == 0):
max_entry = 0
else:
max_entry = max((max(row) for row in self))
return [self.restriction_shape(k) for k in range((max_entry + 1))]
_map(name='to Gelfand-Tsetlin pattern')
def to_Gelfand_Tsetlin_pattern(self):
from sage.combinat.gelfand_tsetlin_patterns import GelfandTsetlinPatterns
return GelfandTsetlinPatterns()(self)
def anti_restrict(self, n):
t_new = [[(None if (g <= n) else g) for g in row] for row in self]
from sage.combinat.skew_tableau import SkewTableau
return SkewTableau(t_new)
def to_list(self):
return [list(row) for row in self]
def bump(self, x):
to_insert = x
new_t = self.to_list()
for row in new_t:
i = 0
while (i < len(row)):
if (to_insert < row[i]):
t = to_insert
to_insert = row[i]
row[i] = t
break
i += 1
if (i == len(row)):
row.append(to_insert)
if isinstance(self, SemistandardTableau):
return SemistandardTableau(new_t)
return Tableau(new_t)
new_t.append([to_insert])
if isinstance(self, SemistandardTableau):
return SemistandardTableau(new_t)
return Tableau(new_t)
def schensted_insert(self, i, left=False):
if left:
return self._left_schensted_insert(i)
else:
return self.bump(i)
def _left_schensted_insert(self, letter):
h = len(self)
if (h == 0):
return Tableau([[letter]])
h1 = (h + 1)
rep = self.to_list()
rep.reverse()
width = len(rep[(h - 1)])
heights = (self._heights() + [h1])
for j in range(1, (width + 2)):
i = heights[(j - 1)]
while ((i != h1) and (rep[(i - 1)][(j - 1)] >= letter)):
i += 1
if (i == heights[(j - 1)]):
if (j == 1):
rep = ([[letter]] + rep)
else:
rep[(i - 2)].append(letter)
break
elif ((i == h1) and (j == width)):
if (rep[(i - 2)][(j - 1)] < letter):
rep[(i - 2)].append(letter)
else:
new_letter = rep[(i - 2)][(j - 1)]
rep[(i - 2)][(j - 1)] = letter
rep[(i - 2)].append(new_letter)
break
else:
new_letter = rep[(i - 2)][(j - 1)]
rep[(i - 2)][(j - 1)] = letter
letter = new_letter
rep.reverse()
return Tableau(rep)
def insert_word(self, w, left=False):
if left:
w = [i for i in reversed(w)]
res = self
for i in w:
res = res.schensted_insert(i, left=left)
return res
def reverse_bump(self, loc):
if (not self.is_semistandard()):
raise ValueError('reverse bumping is only defined for semistandard tableaux')
try:
(r, c) = loc
if ((r, c) not in self.corners()):
raise ValueError('invalid corner')
except TypeError:
r = loc
c = (len(self[r]) - 1)
new_t = self.to_list()
to_move = new_t[r].pop()
if (not new_t[r]):
new_t.pop()
from bisect import bisect_left
for row in reversed(new_t[:r]):
c = (bisect_left(row, to_move, lo=c) - 1)
(row[c], to_move) = (to_move, row[c])
if isinstance(self, SemistandardTableau):
return (SemistandardTableau(new_t), to_move)
return (Tableau(new_t), to_move)
def bump_multiply(self, other):
if (not isinstance(other, Tableau)):
raise TypeError('other must be a Tableau')
row = len(other)
product = Tableau([list(a) for a in self])
while row:
row -= 1
for i in other[row]:
product = product.bump(i)
return product
def slide_multiply(self, other):
st = []
if (len(self) == 0):
return other
else:
l = len(self[0])
for row in other:
st.append((((None,) * l) + row))
for row in self:
st.append(row)
from sage.combinat.skew_tableau import SkewTableau
return SkewTableau(st).rectify()
def _slide_up(self, c):
new_st = self.to_list()
(spotl, spotc) = c
while ([spotl, spotc] != [0, 0]):
if (spotc == 0):
new_st[spotl][spotc] = new_st[(spotl - 1)][spotc]
spotl -= 1
continue
elif (spotl == 0):
new_st[spotl][spotc] = new_st[spotl][(spotc - 1)]
spotc -= 1
continue
else:
below = new_st[(spotl - 1)][spotc]
left = new_st[spotl][(spotc - 1)]
if (below >= left):
new_st[spotl][spotc] = new_st[(spotl - 1)][spotc]
spotl -= 1
continue
else:
new_st[spotl][spotc] = new_st[spotl][(spotc - 1)]
spotc -= 1
continue
new_st[0][0] = 0
return Tableau(new_st)
def _slide_down(self, c, n):
new_st = self.to_list()
new_st_shape = [len(x) for x in self]
(spotl, spotc) = c
while True:
go_right = None
if ((len(new_st_shape) > (spotl + 1)) and (new_st_shape[(spotl + 1)] >= (spotc + 1))):
upper_neighbor = new_st[(spotl + 1)][spotc]
go_right = False
if (new_st_shape[spotl] != (spotc + 1)):
right_neighbor = new_st[spotl][(spotc + 1)]
if ((go_right is None) or (upper_neighbor > right_neighbor)):
go_right = True
if (go_right is True):
new_st[spotl][spotc] = right_neighbor
spotc += 1
elif (go_right is False):
new_st[spotl][spotc] = upper_neighbor
spotl += 1
else:
break
new_st[spotl][spotc] = (n + 2)
return Tableau(new_st)
def promotion_inverse(self, n):
if self.is_rectangular():
n = Integer(n)
if (self.size() == 0):
return self
s = self.shape()[0]
l = self.weight()[0]
word = [(i - 1) for row in reversed(self) for i in row if (i > 1)]
t = Tableau([])
t = t.insert_word(word)
t = t.to_list()
if (l < s):
for i in range(l):
t[(len(t) - 1)].append((n + 1))
else:
t.append([(n + 1) for i in range(s)])
return Tableau(t)
p = self
for c in reversed(self.cells_containing(1)):
p = p._slide_down(c, n)
return Tableau([[(i - 1) for i in row] for row in p])
def promotion(self, n):
if self.is_rectangular():
t = self.rotate_180()
t = [tuple((((n + 2) - i) for i in row)) for row in t]
t = Tableau(t).promotion_inverse(n)
t = [tuple((((n + 2) - i) for i in row)) for row in t]
return Tableau(t).rotate_180()
p = self
for c in self.cells_containing((n + 1)):
p = p._slide_up(c)
return Tableau([[(i + 1) for i in row] for row in p])
def row_stabilizer(self):
k = self.size()
gens = [list(range(1, (k + 1)))]
for row in self:
for j in range((len(row) - 1)):
gens.append((row[j], row[(j + 1)]))
return PermutationGroup(gens)
def column_stabilizer(self):
k = self.size()
gens = [list(range(1, (k + 1)))]
ell = len(self)
while (ell > 1):
ell -= 1
for (i, val) in enumerate(self[ell]):
gens.append((val, self[(ell - 1)][i]))
return PermutationGroup(gens)
def height(self):
return len(self)
def _heights(self):
cor = self.corners()
if (not cor):
return []
ncor = len(cor)
k = len(self)
cor = [[(k - i), (j + 1)] for (i, j) in reversed(cor)]
heights = ([1] * cor[0][1])
for i in range(1, ncor):
heights += ([cor[i][0]] * (cor[i][1] - cor[(i - 1)][1]))
return heights
def last_letter_lequal(self, tab2):
n = self.size()
if (not isinstance(tab2, Tableau)):
try:
tab2 = Tableau(tab2)
except Exception:
raise TypeError('tab2 must be a standard tableau')
if (tab2.size() != n):
raise ValueError('tab2 must be the same size as self')
if (self == tab2):
return True
for j in range(n, 1, (- 1)):
self_j_pos = None
for i in range(len(self)):
if (j in self[i]):
self_j_pos = i
break
tab2_j_pos = None
for i in range(len(tab2)):
if (j in tab2[i]):
tab2_j_pos = i
break
if (self_j_pos < tab2_j_pos):
return True
if (tab2_j_pos < self_j_pos):
return False
def charge(self):
return self.to_word().charge()
def cocharge(self):
return self.to_word().cocharge()
def add_entry(self, cell, m):
tab = self.to_list()
(r, c) = cell
try:
tab[r][c] = m
except IndexError:
if (r >= len(tab)):
if ((r == len(tab)) and (c == 0)):
tab.append([m])
else:
raise IndexError(('%s is not an addable cell of the tableau' % ((r, c),)))
else:
tab_r = tab[r]
if (c == len(tab_r)):
tab_r.append(m)
else:
raise IndexError(('%s is not an addable cell of the tableau' % ((r, c),)))
if (tab in self.parent()):
return self.parent()(tab)
else:
try:
return self.parent().Element(tab)
except Exception:
return Tableau(tab)
def catabolism(self):
h = self.height()
if (h == 0):
return self
else:
return Tableau(self[1:]).insert_word(self[0], left=True)
def catabolism_sequence(self):
h = self.height()
res = [self]
newterm = self
while (h > 1):
newterm = newterm.catabolism()
res.append(newterm)
h = newterm.height()
return res
def lambda_catabolism(self, part):
part = [min(part[i], len(self[i])) for i in range(min(len(self), len(part)))]
if (self.shape() == part):
return Tableau([])
m = len(part)
w1 = list(sum((row for row in reversed(self[m:])), ()))
w2 = []
for (i, row) in enumerate(reversed(self[:m])):
w2 += row[part[((- 1) - i)]:]
return Tableau([]).insert_word((w2 + w1))
def reduced_lambda_catabolism(self, part):
part1 = part
if (self == []):
return self
res = self.lambda_catabolism(part)
if (res == []):
return res
if (res == 0):
return 0
a = self[0][0]
part = [min(part1[i], len(self[i])) for i in range(min(len(part1), len(self)))]
tt_part = Tableau([([(a + i)] * part[i]) for i in range(len(part))])
t_part = Tableau([[self[i][j] for j in range(part[i])] for i in range(len(part))])
if (t_part == tt_part):
return res
else:
return 0
def catabolism_projector(self, parts):
res = self
for p in parts:
res = res.reduced_lambda_catabolism(p)
if (res == 0):
return 0
if (res == []):
return self
else:
return Tableau([])
def promotion_operator(self, i):
chain = self.to_chain()
part = self.shape()
weight = self.weight()
perm = permutation.from_reduced_word(range(1, (len(weight) + 1)))
l = part.add_horizontal_border_strip(i)
ltab = [from_chain((chain + [next])) for next in l]
return [x.symmetric_group_action_on_values(perm) for x in ltab]
def raise_action_from_words(self, f, *args):
w = self.to_word()
w = f(w, *args)
return from_shape_and_word(self.shape(), w)
def symmetric_group_action_on_values(self, perm):
return self.raise_action_from_words(symmetric_group_action_on_values, perm)
def socle(self):
h = self.height()
if (h == 0):
return 0
w1row = self[0]
i = 0
while (i < (len(w1row) - 1)):
if (w1row[(i + 1)] != (w1row[i] + 1)):
break
i += 1
return (i + 1)
def atom(self):
ll = [t.socle() for t in self.catabolism_sequence()]
lres = ll[:]
for i in range(1, len(ll)):
lres[i] = (ll[i] - ll[(i - 1)])
return lres
def symmetric_group_action_on_entries(self, w):
w = (w + [(i + 1) for i in range(len(w), self.size())])
try:
return self.parent()([[w[(entry - 1)] for entry in row] for row in self])
except Exception:
return Tableau([[w[(entry - 1)] for entry in row] for row in self])
def is_key_tableau(self):
T_conj = self.conjugate()
return all(((x in T_conj[(i - 1)]) for i in range(1, len(T_conj)) for x in T_conj[i]))
def right_key_tableau(self):
if (not self):
return self
cols_list = self.conjugate()
key = [[] for _ in cols_list]
for (i, col_a) in enumerate(cols_list):
right_cols = cols_list[(i + 1):]
for elem in reversed(col_a):
key_val = elem
update = []
for col_b in right_cols:
if (col_b and (key_val <= col_b[(- 1)])):
key_val = col_b[(- 1)]
update.append(col_b[:(- 1)])
else:
update.append(col_b)
key[i].insert(0, key_val)
right_cols = update
return Tableau(key).conjugate()
def left_key_tableau(self):
if (not self):
return self
cols_list = self.conjugate()
key = [[] for _ in cols_list]
key[0] = list(cols_list[0])
from bisect import bisect_right
for (i, col_a) in enumerate(cols_list[1:], 1):
left_cols = cols_list[:i]
for elem in reversed(col_a):
key_val = elem
update = []
for col_b in reversed(left_cols):
j = (bisect_right(col_b, key_val) - 1)
key_val = col_b[j]
update.insert(0, col_b[:j])
left_cols = update
key[i].insert(0, key_val)
return Tableau(key).conjugate()
def _segments(self):
segments = {}
for (r, row) in enumerate(self):
for c in range(len(row)):
for j in range((c + 1)):
if ((row[j] != (r + 1)) and ((r, row[j]) not in segments)):
segments[(r, row[j])] = j
return segments
def seg(self):
return len(self._segments())
def flush(self):
for i in range((len(self) - 1)):
if (len(self[i]) <= len(self[(i + 1)])):
raise ValueError('only defined for tableaux with strictly decreasing parts')
f = 0
S = self._segments().items()
for s in S:
if (((s[0][0] != (len(self) - 1)) and (s[1] == len(self[(s[0][0] + 1)])) and (self[(s[0][0] + 1)][(- 1)] <= s[0][1])) or ((s[0][0] == (len(self) - 1)) and (s[1] == 0))):
f += 1
else:
for t in S:
if (((s[0][0] + 1) == t[0][0]) and (s[1] == t[1]) and (((s[1] >= 1) and (self[(s[0][0] + 1)][(s[1] - 1)] <= self[s[0][0]][s[1]])) or ((s[1] < 1) and (self[(s[0][0] + 1)][s[1]] != (s[0][0] + 2))))):
f += 1
return f
def content(self, k, multicharge=[0]):
for (r, row) in enumerate(self):
try:
return ((row.index(k) - r) + multicharge[0])
except ValueError:
pass
raise ValueError(('%d does not appear in tableau' % k))
def residue(self, k, e, multicharge=(0,)):
for (r, row) in enumerate(self):
try:
return IntegerModRing(e)(((row.index(k) - r) + multicharge[0]))
except ValueError:
pass
raise ValueError(('%d does not appear in the tableau' % k))
def residue_sequence(self, e, multicharge=(0,)):
res = ([0] * self.size())
for (r, row) in enumerate(self):
for (c, entry) in enumerate(row):
res[(entry - 1)] = ((multicharge[0] - r) + c)
from sage.combinat.tableau_residues import ResidueSequence
return ResidueSequence(e, multicharge, res, check=False)
def degree(self, e, multicharge=(0,)):
n = self.size()
if (n == 0):
return 0
deg = self.shape()._initial_degree(e, multicharge)
res = self.shape().initial_tableau().residue_sequence(e, multicharge)
for r in self.reduced_row_word():
if (res[r] == res[(r + 1)]):
deg -= 2
elif ((res[r] == (res[(r + 1)] + 1)) or (res[r] == (res[(r + 1)] - 1))):
deg += (((e == 2) and 2) or 1)
res = res.swap_residues(r, (r + 1))
return deg
def codegree(self, e, multicharge=(0,)):
if (not self):
return 0
conj_shape = self.shape().conjugate()
codeg = conj_shape._initial_degree(e)
res = conj_shape.initial_tableau().residue_sequence(e)
for r in self.reduced_column_word():
if (res[r] == res[(r + 1)]):
codeg -= 2
elif ((res[r] == (res[(r + 1)] + 1)) or (res[r] == (res[(r + 1)] - 1))):
codeg += (((e == 2) and 2) or 1)
res = res.swap_residues(r, (r + 1))
return codeg
def first_row_descent(self):
for row in range(len(self)):
for col in range((len(self[row]) - 1)):
if (self[row][col] > self[row][(col + 1)]):
return (row, col)
return None
def first_column_descent(self):
for row in range((len(self) - 1)):
col = 0
while (col < len(self[(row + 1)])):
if (self[row][col] > self[(row + 1)][col]):
return (row, col)
col += 1
return None
def reduced_row_word(self):
return permutation.Permutation(list(self.entries())).inverse().reduced_word_lexmin()
def reduced_column_word(self):
data = list(self.conjugate().entries())
return permutation.Permutation(data).inverse().reduced_word_lexmin()
def hillman_grassl(self):
from sage.combinat.hillman_grassl import hillman_grassl, WeakReversePlanePartition
return WeakReversePlanePartition(hillman_grassl(list(self)))
def sulzgruber_correspondence(self):
from sage.combinat.hillman_grassl import sulzgruber_correspondence, WeakReversePlanePartition
return WeakReversePlanePartition(sulzgruber_correspondence(list(self))) |
_module()
class FPNHead(BaseDecodeHead):
def __init__(self, feature_strides, **kwargs):
super(FPNHead, self).__init__(input_transform='multiple_select', **kwargs)
assert (len(feature_strides) == len(self.in_channels))
assert (min(feature_strides) == feature_strides[0])
self.feature_strides = feature_strides
self.scale_heads = nn.ModuleList()
for i in range(len(feature_strides)):
head_length = max(1, int((np.log2(feature_strides[i]) - np.log2(feature_strides[0]))))
scale_head = []
for k in range(head_length):
scale_head.append(ConvModule((self.in_channels[i] if (k == 0) else self.channels), self.channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
if (feature_strides[i] != feature_strides[0]):
scale_head.append(Upsample(scale_factor=2, mode='bilinear', align_corners=self.align_corners))
self.scale_heads.append(nn.Sequential(*scale_head))
def forward(self, inputs):
x = self._transform_inputs(inputs)
output = self.scale_heads[0](x[0])
for i in range(1, len(self.feature_strides)):
output = (output + resize(self.scale_heads[i](x[i]), size=output.shape[2:], mode='bilinear', align_corners=self.align_corners))
output = self.cls_seg(output)
return output |
class ExecutionFlowBuilder():
def __init__(self, trace: ExecutionTrace, known_code_objects: dict[(int, CodeObjectMetaData)]):
self.trace = trace
self.known_code_objects = known_code_objects
def _finish_basic_block(self, instr_index: int, basic_block: list[Instr], import_instr: (UniqueInstruction | None), efb_state: ExecutionFlowBuilderState) -> LastInstrState:
if (instr_index > 0):
last_instr = basic_block[(instr_index - 1)]
efb_state.offset -= 2
else:
last_instr = self._continue_at_last_basic_block(efb_state)
if ((not last_instr) and import_instr):
last_instr = self._continue_before_import(efb_state, import_instr)
return LastInstrState(efb_state.file, last_instr, efb_state.co_id, efb_state.bb_id, efb_state.offset, import_start=True)
return LastInstrState(efb_state.file, last_instr, efb_state.co_id, efb_state.bb_id, efb_state.offset)
def get_last_instruction(self, file: str, instr: Instr, trace_pos: int, offset: int, co_id: int, bb_id: int, import_instr: (UniqueInstruction | None)=None) -> LastInstrState:
(basic_block, bb_offset) = self._get_basic_block(co_id, bb_id)
instr_index = self.locate_in_basic_block(instr, offset, basic_block, bb_offset)
efb_state = ExecutionFlowBuilderState(bb_id, co_id, file, offset)
if (trace_pos < 0):
return self._finish_basic_block(instr_index, basic_block, import_instr, efb_state)
unique_instr = self._create_unique_instruction(efb_state.file, instr, efb_state.co_id, efb_state.bb_id, efb_state.offset)
last_traced_instr = self.trace.executed_instructions[trace_pos]
last_instr = self._determine_last_instruction(efb_state, basic_block, instr_index, last_traced_instr, unique_instr)
if (last_traced_instr.opcode in op.OP_RETURN):
last_instr = self._handle_return_instructions(efb_state, instr, last_instr, last_traced_instr, unique_instr)
if (not last_instr):
last_instr = self._handle_method_invocation(efb_state, import_instr, last_traced_instr)
if ((not efb_state.call) and (not efb_state.returned)):
last_instr = self._handle_generator_and_exceptions(efb_state, last_instr, last_traced_instr)
return LastInstrState(efb_state.file, last_instr, efb_state.co_id, efb_state.bb_id, offset=efb_state.offset, jump=efb_state.jump, call=efb_state.call, returned=efb_state.returned, exception=efb_state.exception, import_start=efb_state.import_start, import_back_call=efb_state.import_back_call)
def _determine_last_instruction(self, efb_state: ExecutionFlowBuilderState, basic_block, instr_index, last_traced_instr, unique_instr) -> Instr:
if (instr_index > 0):
last_instr = basic_block[(instr_index - 1)]
efb_state.offset -= 2
elif unique_instr.is_jump_target:
if (last_traced_instr.is_jump() and (last_traced_instr.argument == efb_state.bb_id)):
assert (efb_state.co_id == last_traced_instr.code_object_id), 'Jump to instruction must originate from same code object'
last_instr = self._continue_at_last_traced(last_traced_instr, efb_state)
efb_state.jump = True
else:
last_instr = self._continue_at_last_basic_block(efb_state)
else:
last_instr = self._continue_at_last_basic_block(efb_state)
return last_instr
def _handle_return_instructions(self, efb_state: ExecutionFlowBuilderState, instr, last_instr, last_traced_instr, unique_instr):
if (instr.opcode != op.IMPORT_NAME):
if last_instr:
if ((last_instr.opcode in op.OP_CALL) or ((last_instr.opcode in op.TRACED_INSTRUCTIONS) and (last_instr.opcode != last_traced_instr.opcode))):
last_instr = self._continue_at_last_traced(last_traced_instr, efb_state)
efb_state.returned = True
else:
last_instr = self._continue_at_last_traced(last_traced_instr, efb_state)
efb_state.returned = True
else:
last_instr = self._continue_at_last_traced(last_traced_instr, efb_state)
efb_state.import_back_call = unique_instr
efb_state.returned = True
return last_instr
def _handle_method_invocation(self, efb_state: ExecutionFlowBuilderState, import_instr: (UniqueInstruction | None), last_traced_instr) -> Instr:
efb_state.call = True
if (not import_instr):
last_instr = self._continue_at_last_traced(last_traced_instr, efb_state)
else:
last_instr = self._continue_before_import(efb_state, import_instr)
efb_state.import_start = True
return last_instr
def _handle_generator_and_exceptions(self, efb_state: ExecutionFlowBuilderState, last_instr, last_traced_instr: ExecutedInstruction) -> Instr:
if (last_instr.opcode in [op.YIELD_VALUE, op.YIELD_FROM]):
last_instr = self._continue_at_last_traced(last_traced_instr, efb_state)
elif (last_instr and (last_instr.opcode in op.TRACED_INSTRUCTIONS) and (last_instr.opcode != last_traced_instr.opcode)):
last_instr = self._continue_at_last_traced(last_traced_instr, efb_state)
efb_state.exception = True
return last_instr
def _create_unique_instruction(self, module: str, instr: Instr, code_object_id: int, node_id: int, offset: int) -> UniqueInstruction:
code_meta = self.known_code_objects.get(code_object_id)
assert code_meta, 'Unknown code object id'
return UniqueInstruction(module, instr.name, code_object_id, node_id, code_meta, offset, instr.arg, instr.lineno)
def _continue_at_last_traced(self, last_traced_instr: ExecutedInstruction, efb_state: ExecutionFlowBuilderState) -> Instr:
efb_state.file = last_traced_instr.file
efb_state.co_id = last_traced_instr.code_object_id
efb_state.bb_id = last_traced_instr.node_id
last_instr = self._locate_traced_in_bytecode(last_traced_instr)
efb_state.offset = last_traced_instr.offset
return last_instr
def _continue_at_last_basic_block(self, efb_state: ExecutionFlowBuilderState) -> Instr:
last_instr = None
if (efb_state.bb_id > 0):
efb_state.bb_id = (efb_state.bb_id - 1)
last_instr = self._get_last_in_basic_block(efb_state.co_id, efb_state.bb_id)
efb_state.offset -= 2
return last_instr
def _continue_before_import(self, efb_state: ExecutionFlowBuilderState, import_instr: UniqueInstruction) -> Instr:
efb_state.co_id = import_instr.code_object_id
efb_state.bb_id = import_instr.node_id
efb_state.offset = import_instr.offset
instr = Instr(import_instr.name, arg=import_instr.arg, lineno=import_instr.lineno)
(basic_block, bb_offset) = self._get_basic_block(efb_state.co_id, efb_state.bb_id)
instr_index = self.locate_in_basic_block(instr, efb_state.offset, basic_block, bb_offset)
if (instr_index > 0):
last_instr = basic_block[(instr_index - 1)]
efb_state.offset -= 2
else:
last_instr = self._continue_at_last_basic_block(efb_state)
return last_instr
def _get_last_in_basic_block(self, code_object_id: int, basic_block_id: int) -> Instr:
code_object = self.known_code_objects.get(code_object_id)
assert code_object, 'Unknown code object id'
instr = None
for node in code_object.original_cfg.nodes:
if ((node.index == basic_block_id) and node.basic_block):
instr = node.basic_block[(- 1)]
break
assert instr, 'Block did not contain a last instruction'
return instr
def _get_basic_block(self, code_object_id: int, basic_block_id: int) -> tuple[(list[Instr], int)]:
code_object = self.known_code_objects[code_object_id]
assert (code_object is not None), 'Unknown code object id'
for node in code_object.original_cfg.nodes:
if ((node.index == basic_block_id) and node.basic_block):
return (node.basic_block, node.offset)
raise InstructionNotFoundException
def _locate_traced_in_bytecode(self, instr: ExecutedInstruction) -> Instr:
(basic_block, bb_offset) = self._get_basic_block(instr.code_object_id, instr.node_id)
for instruction in basic_block:
if ((instr.opcode == instruction.opcode) and (instr.lineno == instruction.lineno) and (instr.offset == bb_offset)):
return instruction
bb_offset += 2
raise InstructionNotFoundException
def locate_in_basic_block(instr: Instr, instr_offset: int, basic_block: list[Instr], bb_offset: int) -> int:
for (index, instruction) in enumerate(basic_block):
if ((instruction == instr) and (instr_offset == bb_offset)):
return index
bb_offset += 2
raise InstructionNotFoundException |
def test_set_get_limit(stopping_condition):
stopping_condition.set_limit(42)
assert (stopping_condition.limit() == 42) |
def finalize_autosummaries() -> None:
global _finalized
tfutil.assert_tf_initialized()
if _finalized:
return None
_finalized = True
tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list])
with tf.device(None), tf.control_dependencies(None):
for (name, vars_list) in _vars.items():
name_id = name.replace('/', '_')
with tfutil.absolute_name_scope(('Autosummary/' + name_id)):
moments = tf.add_n(vars_list)
moments /= moments[0]
with tf.control_dependencies([moments]):
reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list]
with tf.name_scope(None), tf.control_dependencies(reset_ops):
mean = moments[1]
std = tf.sqrt((moments[2] - tf.square(moments[1])))
tf.summary.scalar(name, mean)
if enable_custom_scalars:
tf.summary.scalar((('xCustomScalars/' + name) + '/margin_lo'), (mean - std))
tf.summary.scalar((('xCustomScalars/' + name) + '/margin_hi'), (mean + std))
layout = None
if enable_custom_scalars:
cat_dict = OrderedDict()
for series_name in sorted(_vars.keys()):
p = series_name.split('/')
cat = (p[0] if (len(p) >= 2) else '')
chart = ('/'.join(p[1:(- 1)]) if (len(p) >= 3) else p[(- 1)])
if (cat not in cat_dict):
cat_dict[cat] = OrderedDict()
if (chart not in cat_dict[cat]):
cat_dict[cat][chart] = []
cat_dict[cat][chart].append(series_name)
categories = []
for (cat_name, chart_dict) in cat_dict.items():
charts = []
for (chart_name, series_names) in chart_dict.items():
series = []
for series_name in series_names:
series.append(layout_pb2.MarginChartContent.Series(value=series_name, lower=(('xCustomScalars/' + series_name) + '/margin_lo'), upper=(('xCustomScalars/' + series_name) + '/margin_hi')))
margin = layout_pb2.MarginChartContent(series=series)
charts.append(layout_pb2.Chart(title=chart_name, margin=margin))
categories.append(layout_pb2.Category(title=cat_name, chart=charts))
layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
return layout |
def _update_model_cond(old_model, new_model):
for idx in range(0, len(new_model.WN)):
wavenet = new_model.WN[idx]
n_channels = wavenet.n_channels
n_layers = wavenet.n_layers
n_mel_channels = wavenet.cond_layers[0].weight.shape[1]
cond_layer = torch.nn.Conv1d(n_mel_channels, ((2 * n_channels) * n_layers), 1)
cond_layer_weight = []
cond_layer_bias = []
for i in range(0, n_layers):
_cond_layer = torch.nn.utils.remove_weight_norm(wavenet.cond_layers[i])
cond_layer_weight.append(_cond_layer.weight)
cond_layer_bias.append(_cond_layer.bias)
cond_layer.weight = torch.nn.Parameter(torch.cat(cond_layer_weight))
cond_layer.bias = torch.nn.Parameter(torch.cat(cond_layer_bias))
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
wavenet.cond_layer = cond_layer
del wavenet.cond_layers |
class DSPySuggestionError(AssertionError):
def __init__(self, id: str, msg: str, target_module: Any=None, state: Any=None) -> None:
super().__init__(msg)
self.id = id
self.msg = msg
self.target_module = target_module
self.state = state |
_logical_op_with_cast_to_and_from('Bool')
def __and_(g, input, other):
return g.op('And', input, other) |
.parametrize('GradientBoosting, X, y', [(HistGradientBoostingClassifier, X_classification, y_classification), (HistGradientBoostingRegressor, X_regression, y_regression)])
.parametrize('rng_type', ('none', 'int', 'instance'))
def test_random_seeds_warm_start(GradientBoosting, X, y, rng_type):
def _get_rng(rng_type):
if (rng_type == 'none'):
return None
elif (rng_type == 'int'):
return 42
else:
return np.random.RandomState(0)
random_state = _get_rng(rng_type)
gb_1 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state)
gb_1.set_params(scoring=check_scoring(gb_1))
gb_1.fit(X, y)
random_seed_1_1 = gb_1._random_seed
gb_1.fit(X, y)
random_seed_1_2 = gb_1._random_seed
random_state = _get_rng(rng_type)
gb_2 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state, warm_start=True)
gb_2.set_params(scoring=check_scoring(gb_2))
gb_2.fit(X, y)
random_seed_2_1 = gb_2._random_seed
gb_2.fit(X, y)
random_seed_2_2 = gb_2._random_seed
if (rng_type == 'none'):
assert (random_seed_1_1 != random_seed_1_2 != random_seed_2_1)
elif (rng_type == 'int'):
assert (random_seed_1_1 == random_seed_1_2 == random_seed_2_1)
else:
assert (random_seed_1_1 == random_seed_2_1 != random_seed_1_2)
assert (random_seed_2_1 == random_seed_2_2) |
class Evaluator(object):
def __init__(self):
self.reset()
def reset(self):
self.lm_vis_count_all = np.array(([0.0] * 8))
self.lm_dist_all = np.array(([0.0] * 8))
def add(self, output, sample):
landmark_vis_count = sample['landmark_vis'].cpu().numpy().sum(axis=0)
landmark_vis_float = torch.unsqueeze(sample['landmark_vis'].float(), dim=2)
landmark_vis_float = torch.cat([landmark_vis_float, landmark_vis_float], dim=2).cpu().detach().numpy()
lm_pos_map = output['lm_pos_map']
(batch_size, _, pred_h, pred_w) = lm_pos_map.size()
lm_pos_reshaped = lm_pos_map.reshape(batch_size, 8, (- 1))
(lm_pos_y, lm_pos_x) = np.unravel_index(torch.argmax(lm_pos_reshaped, dim=2).cpu().numpy(), (pred_h, pred_w))
lm_pos_output = np.stack([(lm_pos_x / (pred_w - 1)), (lm_pos_y / (pred_h - 1))], axis=2)
landmark_dist = np.sum(np.sqrt(np.sum(np.square(((landmark_vis_float * lm_pos_output) - (landmark_vis_float * sample['landmark_pos_normalized'].cpu().numpy()))), axis=2)), axis=0)
self.lm_vis_count_all += landmark_vis_count
self.lm_dist_all += landmark_dist
def evaluate(self):
lm_dist = (self.lm_dist_all / self.lm_vis_count_all)
lm_dist_all = (self.lm_dist_all / self.lm_vis_count_all).mean()
return {'lm_dist': lm_dist, 'lm_dist_all': lm_dist_all} |
.parametrize('lil_container', LIL_CONTAINERS)
def test_svc_with_custom_kernel(lil_container):
def kfunc(x, y):
return safe_sparse_dot(x, y.T)
X_sp = lil_container(X)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp)) |
def OneHotEncoded(y_train):
y_t = np.zeros((len(y_train), Num_Class), dtype=int)
for (i, x) in enumerate(y_train):
y_t[i][(int(x) - 1)] = 1
return y_t |
def mp_join(*args):
dir_path = os.path.join(*args)
if (not os.path.exists(dir_path)):
os.makedirs(dir_path)
return dir_path |
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('linalg', parent_package, top_path)
config.add_data_dir('tests')
src_dir = 'lapack_lite'
lapack_lite_src = [os.path.join(src_dir, 'python_xerbla.c'), os.path.join(src_dir, 'f2c_z_lapack.c'), os.path.join(src_dir, 'f2c_c_lapack.c'), os.path.join(src_dir, 'f2c_d_lapack.c'), os.path.join(src_dir, 'f2c_s_lapack.c'), os.path.join(src_dir, 'f2c_lapack.c'), os.path.join(src_dir, 'f2c_blas.c'), os.path.join(src_dir, 'f2c_config.c'), os.path.join(src_dir, 'f2c.c')]
all_sources = config.paths(lapack_lite_src)
lapack_info = get_info('lapack_opt', 0)
def get_lapack_lite_sources(ext, build_dir):
if (not lapack_info):
print('### Warning: Using unoptimized lapack ###')
return all_sources
else:
if (sys.platform == 'win32'):
print('### Warning: python_xerbla.c is disabled ###')
return []
return [all_sources[0]]
config.add_extension('lapack_lite', sources=['lapack_litemodule.c', get_lapack_lite_sources], depends=['lapack_lite/f2c.h'], extra_info=lapack_info)
config.add_extension('_umath_linalg', sources=['umath_linalg.c.src', get_lapack_lite_sources], depends=['lapack_lite/f2c.h'], extra_info=lapack_info, libraries=['npymath'])
return config |
class CscDataset(object):
def __init__(self, file_path):
self.data = json.load(open(file_path, 'r', encoding='utf-8'))
def load(self):
data_list = []
for item in self.data:
data_list.append(((item['original_text'] + '\t') + item['correct_text']))
return {'text': data_list} |
class RandAugment(object):
def __init__(self, n, m):
self.n = n
self.m = m
self.count = 0
self.augment_pool = augment_pool()
def __call__(self, img):
ops = random.sample(self.augment_pool, k=self.n)
for (op, minval, maxval) in ops:
val = np.random.uniform(1, self.m)
print('val', val)
img = op(img, val)
self.count = (self.count + 1)
return img |
def setup_seed(SEED):
setup_plain_seed(SEED)
torch.manual_seed(SEED)
torch.random.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False |
class Adadelta(Optimizer):
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0, **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
_get_updates_support
def get_updates(self, loss, params, learning_rate_multipliers):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = (accumulators + delta_accumulators)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if (self.initial_decay > 0):
lr *= (1.0 / (1.0 + (self.decay * K.cast(self.iterations, K.dtype(self.decay)))))
for (p, g, a, d_a, lmul) in zip(params, grads, accumulators, delta_accumulators, learning_rate_multipliers):
new_a = ((self.rho * a) + ((1.0 - self.rho) * K.square(g)))
self.updates.append(K.update(a, new_a))
update = ((g * K.sqrt((d_a + self.epsilon))) / K.sqrt((new_a + self.epsilon)))
new_p = (p - ((lr * lmul) * update))
if (getattr(p, 'constraint', None) is not None):
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
new_d_a = ((self.rho * d_a) + ((1 - self.rho) * K.square(update)))
self.updates.append(K.update(d_a, new_d_a))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)), 'rho': self.rho, 'decay': float(K.get_value(self.decay)), 'epsilon': self.epsilon}
base_config = super(Adadelta, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def test_as_numer_denom():
(x, y) = Rational(17, 26).as_numer_denom()
assert (x == Integer(17))
assert (y == Integer(26))
(x, y) = Integer((- 5)).as_numer_denom()
assert (x == Integer((- 5)))
assert (y == Integer(1)) |
def estimate_visib_mask_est(d_test, d_est, visib_gt, delta):
visib_est = estimate_visib_mask(d_test, d_est, delta)
visib_est = np.logical_or(visib_est, np.logical_and(visib_gt, (d_est > 0)))
return visib_est |
class RegNetPreTrainedModel(PreTrainedModel):
config_class = RegNetConfig
base_model_prefix = 'regnet'
main_input_name = 'pixel_values'
supports_gradient_checkpointing = True
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, RegNetModel):
module.gradient_checkpointing = value |
def _circuit_parameter_shift(element: Union[(OpTreeCircuit, QuantumCircuit, OpTreeValue)], parameter: ParameterExpression) -> Union[(None, OpTreeSum, OpTreeValue)]:
if isinstance(element, OpTreeValue):
return OpTreeValue(0.0)
if isinstance(element, OpTreeCircuit):
circuit = element.circuit
input_type = 'leaf'
elif isinstance(element, QuantumCircuit):
circuit = element
input_type = 'circuit'
else:
raise ValueError('element must be a CircuitTreeLeaf or a QuantumCircuit')
circuit = OpTreeDerivative.transpile_to_supported_instructions(circuit)
if (parameter not in circuit._parameter_table):
return OpTreeValue(0.0)
iref_to_data_index = {id(inst.operation): idx for (idx, inst) in enumerate(circuit.data)}
shift_sum = OpTreeSum()
for param_reference in circuit._parameter_table[parameter]:
(original_gate, param_index) = param_reference
m = iref_to_data_index[id(original_gate)]
fac = original_gate.params[0].gradient(parameter)
pshift_circ = copy.deepcopy(circuit)
mshift_circ = copy.deepcopy(circuit)
pshift_gate = pshift_circ.data[m].operation
mshift_gate = mshift_circ.data[m].operation
p_param = pshift_gate.params[param_index]
m_param = mshift_gate.params[param_index]
shift_constant = 0.5
pshift_gate.params[param_index] = (p_param + (np.pi / (4 * shift_constant)))
mshift_gate.params[param_index] = (m_param - (np.pi / (4 * shift_constant)))
if (input_type == 'leaf'):
shift_sum.append(OpTreeCircuit(pshift_circ), (shift_constant * fac))
shift_sum.append(OpTreeCircuit(mshift_circ), ((- shift_constant) * fac))
else:
shift_sum.append(pshift_circ, (shift_constant * fac))
shift_sum.append(mshift_circ, ((- shift_constant) * fac))
return shift_sum |
class TestFeatureAlign(unittest.TestCase):
def test_caffe_pytorch_feat_align(self):
caffe_feat_path = '/export/home/lxy/cvpalgo-fast-reid/tools/deploy/caffe_R50_output'
pytorch_feat_path = '/export/home/lxy/cvpalgo-fast-reid/demo/logs/R50_256x128_pytorch_feat_output'
feat_filenames = os.listdir(caffe_feat_path)
for feat_name in feat_filenames:
caffe_feat = np.load(os.path.join(caffe_feat_path, feat_name))
pytorch_feat = np.load(os.path.join(pytorch_feat_path, feat_name))
sim = np.dot(caffe_feat, pytorch_feat.transpose())[0][0]
assert (sim > 0.97), f'Got similarity {sim} and feature of {feat_name} is not aligned'
def test_model_performance(self):
caffe_feat_path = '/export/home/lxy/cvpalgo-fast-reid/tools/deploy/caffe_R50_output'
feat_filenames = os.listdir(caffe_feat_path)
feats = []
for feat_name in feat_filenames:
caffe_feat = np.load(os.path.join(caffe_feat_path, feat_name))
feats.append(caffe_feat) |
class ExtEnum():
def __init__(self, enum: Enum, *args, **kargs) -> None:
assert isinstance(enum, Enum)
self.enum = enum
self.args = args
self.__dict__.update(kargs)
self._member_ = kargs.keys()
def name(self):
return self.enum.name
def value(self):
return self.enum.value
def __hash__(self):
return hash(self.enum)
def __eq__(self, other):
return (self.enum == other)
def __repr__(self) -> str:
kargs = {k: self.__dict__[k] for k in self._member_}
return (repr(self.enum) + f'{self.args}{kargs}') |
class Batch():
def __init__(self):
self.max_num_frames_per_slice = NumbersDict(0)
self.num_slices = 0
self.seqs = []
def __repr__(self):
return ('<Batch start_seq:%r, len(seqs):%i>' % (self.start_seq, len(self.seqs)))
def try_sequence_as_slice(self, length):
return [NumbersDict.max([self.max_num_frames_per_slice, length]), (self.num_slices + 1)]
def add_sequence_as_slice(self, seq_idx, seq_start_frame, length):
(self.max_num_frames_per_slice, self.num_slices) = self.try_sequence_as_slice(length)
self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=(seq_start_frame + length), batch_slice=(self.num_slices - 1), batch_frame_offset=0)]
def add_frames(self, seq_idx, seq_start_frame, length, frame_dim_corresponds=True):
batch_frame_offset = self.max_num_frames_per_slice
if frame_dim_corresponds:
batch_frame_offset = NumbersDict(batch_frame_offset.max_value())
self.max_num_frames_per_slice = NumbersDict(self.max_num_frames_per_slice.max_value())
self.max_num_frames_per_slice += length
self.num_slices = max(self.num_slices, 1)
self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=(seq_start_frame + length), batch_slice=0, batch_frame_offset=batch_frame_offset)]
def init_with_one_full_sequence(self, seq_idx, dataset):
assert (not self.seqs)
(start, end) = dataset.get_start_end_frames_full_seq(seq_idx)
self.add_frames(seq_idx=seq_idx, seq_start_frame=start, length=(end - start))
def get_all_slices_num_frames(self):
return (self.max_num_frames_per_slice * self.num_slices)
def get_total_num_frames(self):
return sum([s.frame_length for s in self.seqs])
def start_seq(self):
if (not self.seqs):
return None
return min([s.seq_idx for s in self.seqs])
def end_seq(self):
if (not self.seqs):
return None
return (max([s.seq_idx for s in self.seqs]) + 1)
def get_num_seqs(self):
if (not self.seqs):
return 0
return (self.end_seq - self.start_seq) |
def create_log_df(search_string, log_file_path):
idx = [14, 16, 17, 18, 19, 20, 21, 22]
cols = ['fitness', 'up_time', 'x_dist', 'abs_y_dev', 'avg_footstep', 'var_alpha', 'var_beta', 'var_gamma']
file_list = list()
with open(log_file_path) as f:
for line in f:
if (search_string in line):
file_list.append([float(line.split(' ')[i].replace(',', '')) for i in idx])
df = pd.DataFrame(np.array(file_list).reshape(100, 8), columns=cols)
return df |
def test_two_compatible_by_ones_input_shapes():
data = [[[(1,), (3,)], (3,)], [[(1, 3), (3, 3)], (3, 3)], [[(3, 1), (3, 3)], (3, 3)], [[(1, 3), (3, 1)], (3, 3)], [[(1, 1), (3, 3)], (3, 3)], [[(1, 1), (1, 3)], (1, 3)], [[(1, 1), (3, 1)], (3, 1)], [[(1, 0), (0, 0)], (0, 0)], [[(0, 1), (0, 0)], (0, 0)], [[(1, 0), (0, 1)], (0, 0)], [[(1, 1), (0, 0)], (0, 0)], [[(1, 1), (1, 0)], (1, 0)], [[(1, 1), (0, 1)], (0, 1)]]
for (input_shapes, expected_shape) in data:
assert_shapes_correct(input_shapes, expected_shape)
assert_shapes_correct(input_shapes[::(- 1)], expected_shape) |
class RegistrationCounter():
def __init__(self):
self.nb_calls = 0
def __call__(self, to_register_func):
self.nb_calls += 1
assert (to_register_func.func is _delete_folder) |
def read_jsonl(path: str):
with open(path) as fh:
return [json.loads(line) for line in fh.readlines() if line] |
class MSRVTTQADataset(BaseDataset):
def __init__(self, *args, split='', **kwargs):
assert (split in ['train', 'val', 'test'])
self.split = split
self.metadata = None
self.ans_lab_dict = None
if (split == 'train'):
names = ['msrvtt_qa_train']
elif (split == 'val'):
names = ['msrvtt_qa_test']
elif (split == 'test'):
names = ['msrvtt_qa_test']
super().__init__(*args, **kwargs, names=names, text_column_name='questions', remove_duplicate=False)
self.names = names
self._load_metadata()
def _load_metadata(self):
metadata_dir = './meta_data/msrvtt'
split_files = {'train': 'msrvtt_qa_train.jsonl', 'val': 'msrvtt_qa_val.jsonl', 'test': 'msrvtt_qa_test.jsonl'}
answer_fp = os.path.join(metadata_dir, 'msrvtt_train_ans2label.json')
with open(answer_fp, 'r') as JSON:
self.ans_lab_dict = json.load(JSON)
for name in self.names:
split = name.split('_')[(- 1)]
target_split_fp = split_files[split]
metadata = pd.read_json(os.path.join(metadata_dir, target_split_fp), lines=True)
if (self.metadata is None):
self.metadata = metadata
else:
self.metadata.update(metadata)
print('total {} samples for {}'.format(len(self.metadata), self.names))
def get_text(self, sample):
text = sample['question']
encoding = self.tokenizer(text, padding='max_length', truncation=True, max_length=self.max_text_len, return_special_tokens_mask=True)
return (text, encoding)
def get_answer_label(self, sample):
text = sample['answer']
ans_total_len = (len(self.ans_lab_dict) + 1)
try:
ans_label = self.ans_lab_dict[text]
except KeyError:
ans_label = (- 100)
scores = np.zeros(ans_total_len).astype(int)
scores[ans_label] = 1
return (text, ans_label, scores)
def __getitem__(self, index):
sample = self.metadata.iloc[index]
image_tensor = self.get_video(sample)
text = self.get_text(sample)
qid = index
if (self.split != 'test'):
(answers, labels, scores) = self.get_answer_label(sample)
else:
answers = list()
labels = list()
scores = list()
return {'image': image_tensor, 'text': text, 'vqa_answer': answers, 'vqa_labels': labels, 'vqa_scores': scores, 'qid': qid}
def __len__(self):
return len(self.metadata) |
class FEVERSentenceFormatter(FeverFormatter):
def format_line(self, line):
annotation = line['label']
if (annotation is None):
annotation = line['verifiable']
pages = []
if ('evidence' in line):
pages = [[(ev[2], ev[3]) for ev in annotation if (ev[2] is not None)] for annotation in line['evidence']]
return {'claim': self.tokenize(line['claim']), 'evidence': pages, 'label': self.label_schema.get_id(annotation), 'label_text': annotation} |
class ChainedScheduler(_LRScheduler):
def __init__(self, schedulers):
for scheduler_idx in range(1, len(schedulers)):
if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer):
raise ValueError('ChainedScheduler expects all schedulers to belong to the same optimizer, but got schedulers at index {} and {} to be different'.format(0, scheduler_idx))
self._schedulers = list(schedulers)
def step(self):
for scheduler in self._schedulers:
scheduler.step()
def state_dict(self):
state_dict = {key: value for (key, value) in self.__dict__.items() if (key not in ('optimizer', '_schedulers'))}
state_dict['_schedulers'] = ([None] * len(self._schedulers))
for (idx, s) in enumerate(self._schedulers):
state_dict['_schedulers'][idx] = s.state_dict()
return state_dict
def load_state_dict(self, state_dict):
_schedulers = state_dict.pop('_schedulers')
self.__dict__.update(state_dict)
state_dict['_schedulers'] = _schedulers
for (idx, s) in enumerate(_schedulers):
self._schedulers[idx].load_state_dict(s) |
def match_classes(views, sample_level):
(all_features, keys, dataset_size, subset_size, num_matched_classes, nclasses) = match_classes_with_shuffle(views, 0, None, False, False, return_class_dict=True, add_vid=True, align=sample_level, if_shuffle_each_view=False, if_shuffle_classes=True)
return all_features |
class DistilBertTokenizationTest(BertTokenizationTest):
tokenizer_class = DistilBertTokenizer
def get_tokenizer(self, **kwargs):
return DistilBertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
.slow
def test_sequence_builders(self):
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]))
assert (encoded_pair == (((([tokenizer.cls_token_id] + text) + [tokenizer.sep_token_id]) + text_2) + [tokenizer.sep_token_id])) |
def test_clone_2():
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = 'test'
new_selector = clone(selector)
assert (not hasattr(new_selector, 'own_attribute')) |
def make_features(batch, side, data_type='text'):
assert (side in ['src', 'tgt'])
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = (side + '_feat_')
keys = sorted([k for k in batch.__dict__ if (feat_start in k)])
features = [batch.__dict__[k] for k in keys]
levels = ([data] + features)
if (data_type == 'text'):
return torch.cat([level.unsqueeze(2) for level in levels], 2)
else:
return levels[0] |
class VecEnv(ABC):
num_envs: int
num_obs: int
num_privileged_obs: int
num_actions: int
max_episode_length: int
privileged_obs_buf: torch.Tensor
obs_buf: torch.Tensor
rew_buf: torch.Tensor
reset_buf: torch.Tensor
episode_length_buf: torch.Tensor
extras: dict
device: torch.device
def step(self, actions: torch.Tensor) -> Tuple[(torch.Tensor, Union[(torch.Tensor, None)], torch.Tensor, torch.Tensor, dict)]:
pass
def reset(self, env_ids: Union[(list, torch.Tensor)]):
pass
def get_observations(self) -> torch.Tensor:
pass
def get_privileged_observations(self) -> Union[(torch.Tensor, None)]:
pass |
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write(((msg % args) + '\n'))
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write((('WARNING: ' + (msg % args)) + '\n'))
def error(self, msg, *args, **kwargs):
self.f.write((('ERROR: ' + (msg % args)) + '\n'))
critical = debug |
def sysconfig_get_python_inc(plat_specific=0, prefix=None):
if (prefix is None):
prefix = sys.real_prefix
return old_get_python_inc(plat_specific, prefix) |
def find_max_matching(node_weights: Dict[(SimpleNode, float)], edge_weights: Dict[(Tuple[(SimpleNode, SimpleNode)], float)]) -> Tuple[(Dict[(int, int)], float)]:
edges = list(edge_weights.keys())
edge_ratings = {e: edge_rating(e[0], e[1], edge_weights, node_weights) for e in edges}
random.shuffle(edges)
edges = sorted(edges, key=(lambda e: edge_ratings[e]), reverse=True)
pathset = PathSet(node_weights.keys())
for edge in edges:
pathset.add_if_eligible(edge)
max_match = []
max_match_weight = 0
for node in node_weights.keys():
path = pathset.paths[node]
if (not path.active):
continue
if (path.end is not node):
continue
if (path.length == 0):
continue
if path.is_cycle():
unpacked_cycle = unpack_path(pathset, path)
first_edge = unpacked_cycle.pop(0)
(match_a, match_a_weight) = max_path_matching(unpacked_cycle, edge_ratings)
unpacked_cycle.insert(0, first_edge)
last_edge = unpacked_cycle.pop()
(match_b, match_b_weight) = max_path_matching(unpacked_cycle, edge_ratings)
unpacked_cycle.append(last_edge)
if (match_a_weight > match_b_weight):
match = match_a
match_weight = match_a_weight
else:
match = match_b
match_weight = match_b_weight
elif (path.length == 1):
if (pathset.next_vertex(path.end) is path.start):
edge = pathset.edge_to_next(path.end)
else:
edge = pathset.edge_to_prev(path.end)
assert (pathset.next_vertex(path.end) is path.start)
(match, match_weight) = ([edge], edge_ratings[edge])
else:
unpacked_path = unpack_path(pathset, path)
(match, match_weight) = max_path_matching(unpacked_path, edge_ratings)
max_match.extend(match)
max_match_weight += match_weight
matching = {u.id: v.id for (u, v) in max_match}
for n in node_weights.keys():
if (n.id not in matching):
matching[n.id] = n.id
return (matching, max_match_weight) |
def _evaluate(env, act, num_eval_ep=500, max_steps=100, verbose=False):
return evaluate(env, act, num_eval_ep, max_steps, verbose)[3] |
def train(rank, num_epochs, world_size):
worker_utils.init_process(rank, world_size)
torch.manual_seed(0)
model = create_model(smp.Unet, 'mobilenet_v2')
torch.cuda.set_device(rank)
model.cuda(rank)
model = DistributedDataParallel(model, device_ids=[rank])
optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
criterion_dice = smp.losses.DiceLoss(mode='multiclass')
scaler = torch.cuda.amp.GradScaler(enabled=True)
(train_loader, val_loader) = get_dataloader(rank, world_size)
for epoch in range(num_epochs):
losses = metric_utils.AvgMeter()
if (rank == 0):
logging.info('Rank: {}/{} Epoch: [{}/{}]'.format(rank, world_size, (epoch + 1), num_epochs))
model.train()
for batch in train_loader:
with torch.cuda.amp.autocast(enabled=True):
image = batch['image'].cuda(rank, non_blocking=True)
mask = batch['mask'].cuda(rank, non_blocking=True)
pred = model(image)
loss = criterion_dice(pred, mask)
losses.update(loss.cpu().item(), image.size(0))
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad(set_to_none=True)
loss = losses.avg
global_loss = metric_utils.global_meters_all_avg(rank, world_size, loss)
if (rank == 0):
logging.info(f'Epoch: {(epoch + 1)} Train Loss: {global_loss[0]:.3f}')
if ((epoch % 5) == 0):
if (rank == 0):
logging.info('Running evaluation on the validation set.')
model.eval()
losses = metric_utils.AvgMeter()
with torch.no_grad():
for batch in val_loader:
with torch.cuda.amp.autocast(enabled=True):
image = batch['image'].cuda(rank, non_blocking=True)
mask = batch['mask'].cuda(rank, non_blocking=True)
pred = model(image)
loss = criterion_dice(pred, mask)
losses.update(loss.cpu().item(), image.size(0))
loss = losses.avg
global_loss = metric_utils.global_meters_all_avg(rank, world_size, loss)
if (rank == 0):
logging.info(f'Epoch: {(epoch + 1)} Val Loss: {global_loss[0]:.3f}')
if (rank == 0):
torch.save(model.module.state_dict(), f'{config.model_serialization}_{rank}.pth') |
def test_numpyarray():
assert (ak_from_buffers(*ak_to_buffers(ak_Array([1, 2, 3, 4, 5]))).to_list() == [1, 2, 3, 4, 5])
assert (pickle.loads(pickle.dumps(ak_Array([1, 2, 3, 4, 5]), (- 1))).to_list() == [1, 2, 3, 4, 5]) |
def parse_csvy_density(csvy_model_config: Configuration, time_explosion: u.Quantity) -> u.Quantity:
if hasattr(csvy_model_config, 'velocity'):
velocity = quantity_linspace(csvy_model_config.velocity.start, csvy_model_config.velocity.stop, (csvy_model_config.velocity.num + 1)).cgs
else:
velocity_field_index = [field.name for field in csvy_model_config.datatype.fields].index('velocity')
velocity_unit = u.Unit(csvy_model_config.datatype.fields[velocity_field_index].unit)
velocity = (csvy_model_config.velocity.values * velocity_unit)
adjusted_velocity = velocity.insert(0, 0)
v_middle = ((adjusted_velocity[1:] * 0.5) + (adjusted_velocity[:(- 1)] * 0.5))
no_of_shells = (len(adjusted_velocity) - 1)
if hasattr(csvy_model_config, 'density'):
(density_0, time_0) = parse_density_section(csvy_model_config.density, v_middle, time_explosion)
return calculate_density_after_time(density_0, time_0, time_explosion) |
def score_run_dot_product(run_pd_id_emb_agg: dict, q_ids_agg_emb: dict):
run_scores_aggregated_emb = {}
for (q_id, retrieved_list) in run_pd_id_emb_agg.items():
run_scores_aggregated_emb.update({q_id: {}})
q_emb = q_ids_agg_emb.get(q_id)
for (candidate_id, candidate_emb) in retrieved_list.items():
run_scores_aggregated_emb.get(q_id).update({candidate_id: int(np.vdot(q_emb, candidate_emb))})
run_scores_aggregated_emb_sorted = {}
for (q_id, run) in run_scores_aggregated_emb.items():
run_scores_aggregated_emb_sorted.update({q_id: {k: v for (k, v) in sorted(run.items(), key=(lambda item: item[1]), reverse=True)}})
return run_scores_aggregated_emb_sorted |
def randTAH3(shape: list[int]):
s2 = 0.
s3 = 0.
r3 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
r8 = ((s2 * s3) * tf.random.normal(shape, dtype=TF_FLOAT))
m00 = tf.dtypes.complex(tf.cast(0.0, TF_FLOAT), (r8 + r3))
m11 = tf.dtypes.complex(tf.cast(0.0, TF_FLOAT), (r8 - r3))
m22 = tf.dtypes.complex(tf.cast(0.0, TF_FLOAT), ((- 2) * r8))
r01 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
r02 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
r12 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
i01 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
i02 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
i12 = (s2 * tf.random.normal(shape, dtype=TF_FLOAT))
m01 = tf.dtypes.complex(r01, i01)
m10 = tf.dtypes.complex((- r01), i01)
m02 = tf.dtypes.complex(r02, i02)
m20 = tf.dtypes.complex((- r02), i02)
m12 = tf.dtypes.complex(r12, i12)
m21 = tf.dtypes.complex((- r12), i12)
return tf.stack([tf.stack([m00, m10, m20], axis=(- 1)), tf.stack([m01, m11, m21], axis=(- 1)), tf.stack([m02, m12, m22], axis=(- 1))], axis=(- 1)) |
def check_edges(graph, source, target, num, isExtra=None):
edges = [edge for edge in graph.edge if ((edge.source == source) and (edge.target == target))]
assert (len(edges) == num)
if (num == 1):
assert (edges[0].isExtra == isExtra) |
def create_optimizer(args, model: nn.Module, optimizer: Optional[optim.Optimizer]=None):
opt_model = model
if (optimizer is None):
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if ('bias' not in name)]
optimizer_grouped_parameters = [{'params': [p for (n, p) in opt_model.named_parameters() if ((n in decay_parameters) and p.requires_grad)], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in opt_model.named_parameters() if ((n not in decay_parameters) and p.requires_grad)], 'weight_decay': 0.0}]
(optimizer_cls, optimizer_kwargs) = Trainer.get_optimizer_cls_and_kwargs(args)
optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if (optimizer_cls.__name__ == 'Adam8bit'):
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
print(f'skipped {module}: {(skipped / (2 ** 20))}M params')
manager.register_module_override(module, 'weight', {'optim_bits': 32})
logger.debug(f'bitsandbytes: will optimize {module} in fp32')
print(f'skipped: {(skipped / (2 ** 20))}M params')
return optimizer |
def _limit_lengths(seqs, max_length=None, max_tokens=None):
max_length = (max_length or float('inf'))
lengths = [min(s.nelement(), max_length) for s in seqs]
if (max_tokens is not None):
num_tokens = sum(lengths)
if (num_tokens > max_tokens):
max_length = int(floor((num_tokens / len(seqs))))
lengths = [min(length, max_length) for length in lengths]
return lengths |
class RuleList(BayesianRuleList):
def __init__(self, min_rule_len=1, max_rule_len=2, min_support=0.02, lambda_=20, eta=1, iters=30000, n_chains=30, alpha=1, fim_method='eclat', feature_names=None, category_names=None, seed=None, verbose=0, discretize_method='mdlp', numeric_features=None):
super(RuleList, self).__init__(min_rule_len=min_rule_len, max_rule_len=max_rule_len, min_support=min_support, lambda_=lambda_, eta=eta, iters=iters, n_chains=n_chains, alpha=alpha, fim_method=fim_method, feature_names=feature_names, category_names=category_names, seed=seed)
self.discretize_method = discretize_method
self.numeric_features = numeric_features
self.discretizer = None
def _validate_discretizer(self):
if (type(self.discretize_method) == str):
self.discretizer = get_discretizer(self.discretize_method, continuous_features=self.numeric_features, random_state=self.seed)
else:
self.discretizer = self.discretize_method
if (not hasattr(self.discretizer, 'fit')):
raise ValueError('discretizer should have method fit!')
if (not hasattr(self.discretizer, 'transform')):
raise ValueError('discretizer should have method transform!')
def fit(self, X, y):
self._validate_discretizer()
self.discretizer.fit(X, y)
X_disc = self.discretizer.transform(X)
self.category_names = compute_mdlp_all_intervals(self.discretizer)
super(RuleList, self).fit(X_disc, y)
return self
def predict_proba(self, x):
if (self.discretizer is not None):
x = self.discretizer.transform(x)
return super(RuleList, self).predict_proba(x)
def caught_matrix(self, x):
if (self.discretizer is not None):
x = self.discretizer.transform(x)
return super(RuleList, self).caught_matrix(x)
def decision_path(self, x):
if (self.discretizer is not None):
x = self.discretizer.transform(x)
return super(RuleList, self).decision_path(x)
def explain(self, x, trace_all=False):
x = x.reshape(1, (- 1))
assert (x.shape[1] == self.n_features)
decision_path = self.decision_path(x)
queried_rules = np.arange(self.n_rules)[decision_path]
if trace_all:
return [self.rule_list[i] for i in queried_rules]
return self.rule_list[queried_rules[(- 1)]] |
(python=ALL_PYTHONS, reuse_venv=True)
def tests(session):
session.install('-r', 'requirements-test-full.txt', './awkward-cpp', '.')
session.run('pytest', *(session.posargs if session.posargs else ['tests'])) |
def recursive_glob(rootdir='.', suffix=''):
image_paths = []
for (looproot, _, filenames) in os.walk(rootdir):
for filename in filenames:
if filename.endswith(suffix):
image_paths.append(os.path.join(looproot, filename))
return image_paths |
def generate_cubic():
generator = GenericCurveGenerator(width=img_width, height=img_height)
generator.saltpepper = 0.9
generator.curvetype = 'cubic'
generator.max_consecutive_distance = 15
prefix = generator.generate_filename_prefix()
generator.output_file = create_new_absolute_filename(('Cubic-' + prefix))
generator.generate_curve() |
def handle_interrupted(context: ExecutionContext, event: events.Interrupted) -> None:
click.echo()
context.is_interrupted = True
display_section_name('KeyboardInterrupt', '!', bold=False) |
def EncoderTest(verbose=True):
shape = (2, 4, 224, 224)
encoder = WNet.UEnc(shape[1])
data = torch.rand((shape[0], 3, shape[2], shape[3]))
encoded = encoder(data)
assert (tuple(encoded.shape) == shape)
var = torch.var(encoded)
mean = torch.mean(encoded)
if verbose:
print(('Passed Encoder Test with var=%s and mean=%s' % (var, mean)))
return encoded |
def test_flatten_array_with_prefix():
result = flatten_array([['foo', 'bar'], 'tar'], prefix='test')
expected = {'test__0__0': 'foo', 'test__0__1': 'bar', 'test__1': 'tar'}
assert (result == expected) |
def segRead(fn, start, end):
fo = open(fn, 'r')
line = fo.readlines()[start:end]
fo.close()
return line |
class HyperbolicPlane(Parent, UniqueRepresentation):
def __init__(self):
Parent.__init__(self, category=Sets().Metric().WithRealizations())
self.a_realization()
def _repr_(self):
return 'Hyperbolic plane'
def a_realization(self):
return self.UHP()
UHP = HyperbolicModelUHP
UpperHalfPlane = UHP
PD = HyperbolicModelPD
PoincareDisk = PD
KM = HyperbolicModelKM
KleinDisk = KM
HM = HyperbolicModelHM
Hyperboloid = HM |
class SpeakerClassifiDataset(Dataset):
def __init__(self, mode, file_path, meta_data, max_timestep=None):
self.root = file_path
self.speaker_num = 1251
self.meta_data = meta_data
self.max_timestep = max_timestep
self.usage_list = open(self.meta_data, 'r').readlines()
cache_path = os.path.join(CACHE_PATH, f'{mode}.pkl')
if os.path.isfile(cache_path):
print(f'[SpeakerClassifiDataset] - Loading file paths from {cache_path}')
with open(cache_path, 'rb') as cache:
dataset = pickle.load(cache)
else:
dataset = eval('self.{}'.format(mode))()
os.makedirs(os.path.dirname(cache_path), exist_ok=True)
with open(cache_path, 'wb') as cache:
pickle.dump(dataset, cache)
print(f'[SpeakerClassifiDataset] - there are {len(dataset)} files found')
self.dataset = dataset
self.label = self.build_label(self.dataset)
def build_label(self, train_path_list):
y = []
for path in train_path_list:
id_string = path.split('/')[(- 3)]
y.append((int(id_string[2:]) - 10001))
return y
def label2speaker(self, labels):
return [f'id{(label + 10001)}' for label in labels]
def train(self):
dataset = []
print('search specified wav name for training set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 1):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching training set wav')
return dataset
def dev(self):
dataset = []
print('search specified wav name for dev set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 2):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching dev set wav')
return dataset
def test(self):
dataset = []
print('search specified wav name for test set')
for string in tqdm.tqdm(self.usage_list):
pair = string.split()
index = pair[0]
if (int(index) == 3):
x = list(self.root.glob(('*/wav/' + pair[1])))
dataset.append(str(x[0]))
print('finish searching test set wav')
return dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
(wav, sr) = torchaudio.load(self.dataset[idx])
wav = wav.squeeze(0)
length = wav.shape[0]
if (self.max_timestep != None):
if (length > self.max_timestep):
start = random.randint(0, int((length - self.max_timestep)))
wav = wav[start:(start + self.max_timestep)]
length = self.max_timestep
def path2name(path):
return Path('-'.join(Path(path).parts[(- 3):])).stem
path = self.dataset[idx]
return (wav.numpy(), self.label[idx], path2name(path))
def collate_fn(self, samples):
return zip(*samples) |
def test_count_all_paths_with_label_seq_partly_dominated(recalc=False, check=False, check_with_factor=False):
fsa = get_std_fsa_1label()
n_ = 4
n = sympy.Symbol('n', integer=True, positive=True)
factor = sympy.Symbol('fact', real=True, positive=True)
res = count_all_paths_with_label_seq_partly_dominated_inefficient(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n_, prob_dom=0.5, normalized=False, verbosity=2)
print(res)
if recalc:
res_ = count_all_paths_with_label_seq_partly_dominated(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n_, factor=1)
else:
res_ = {('B', 'a'): {'B': (((2 * n) * ((13 * (n ** 2)) - 1)) / 3), 'a': (((2 * n) * (((11 * (n ** 2)) + (6 * n)) + 1)) / 3)}, ('a', 'B'): {'B': (((2 * n) * ((19 * (n ** 2)) - 1)) / 3), 'a': (((2 * n) * (((5 * (n ** 2)) + (6 * n)) + 1)) / 3)}}
print(res_)
for (key, counts) in res_.items():
assert (set(counts.keys()) == {BlankLabel, Label1})
for (label, c) in counts.items():
c__ = c.subs(n, n_).doit()
print(('%s, label %s ->' % (key, label)), c, '=', c__)
assert (c__ == res[key][label])
c = fixed_factor_power = sympy.Symbol('c', integer=True, nonnegative=True)
if recalc:
res__ = count_all_paths_with_label_seq_partly_dominated(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n_, factor=1, fixed_factor_power=fixed_factor_power)
else:
res__ = {('B', 'a'): {'B': sympy.Piecewise(((((2 * n) * ((4 * (n ** 2)) - 1)) / 3), sympy.Eq(c, (2 * n))), (((2 * n) * (c + (2 * n))), sympy.Eq((c - (2 * n)), (- 1))), (((2 * n) * ((2 * c) + 1)), ((c >= n) & ((c - (2 * n)) <= (- 1)))), (((4 * n) * ((c - n) + 1)), (((c - n) >= (- 1)) & ((c - (2 * n)) <= (- 1)))), (0, True)), 'a': sympy.Piecewise(((((2 * n) * (((2 * (n ** 2)) + (3 * n)) + 1)) / 3), sympy.Eq(c, (2 * n))), (((2 * n) * ((- c) + (4 * n))), ((c >= n) & ((c - (2 * n)) <= (- 1)))), (((2 * n) * (c + 1)), ((c - n) <= (- 1))), (((2 * n) * (((- c) + (2 * n)) - 1)), (((c - n) >= (- 1)) & ((c - (2 * n)) <= (- 1)))), (0, True))}, ('a', 'B'): {'B': sympy.Piecewise(((n * (((n ** 2) + (2 * n)) + 1)), sympy.Eq(c, 0)), (((n * (((5 * (n ** 2)) + (3 * n)) - 2)) / 3), sympy.Eq(c, (2 * n))), ((n * (((2 * c) + (3 * n)) + 1)), ((c - (2 * n)) <= (- 1))), ((4 * (n ** 2)), sympy.Eq((c - (2 * n)), (- 1))), (0, True)), 'a': sympy.Piecewise(((n * (((n ** 2) + (2 * n)) + 1)), sympy.Eq(c, 0)), (((n * (((n ** 2) + (3 * n)) + 2)) / 3), sympy.Eq(c, (2 * n))), ((n * (n + 1)), ((c - (2 * n)) <= (- 1))), (0, True))}}
print(res__)
for (key, counts) in res__.items():
assert (set(counts.keys()) == {BlankLabel, Label1})
for (label, c) in counts.items():
print(('%s, label %s ->' % (key, label)), c)
if check:
c_sum = 0
for fixed_count in range(0, ((2 * n_) + 1)):
c__ = c.subs({n: n_, fixed_factor_power: fixed_count}).doit()
print((' c %i -> %i' % (fixed_count, c__)))
c_sum += c__
assert (c_sum == res[key][label])
d = (counts[BlankLabel] - counts[Label1])
d = d.simplify()
d = sympy_utils.simplify_and(d)
print('diff:', d)
for i in range(3):
print(('assume ((4 * n - 1 + i) / 3) natural number, i = %i.' % i))
print((' first pos i%i' % i), d.subs(fixed_factor_power, ((((4 * n) - 1) + i) / 3)).simplify())
print((' last neg i%i' % i), d.subs(fixed_factor_power, (((((4 * n) - 1) + i) / 3) - 1)).simplify())
s_pos = sympy_utils.sum_over_piecewise(d, fixed_factor_power, ((((4 * n) - 1) + i) / 3), (2 * n), extra_condition=sympy.Ge(n, 4))
print((' diff sum c={(4n-1+%i)/3}^2n:' % i), s_pos)
s_neg = sympy_utils.sum_over_piecewise(d, fixed_factor_power, 0, (((((4 * n) - 1) + i) / 3) - 1), extra_condition=sympy.Ge(n, 4))
print((' diff sum c=0^{(4n-1+%i)/3-1}:' % i), s_neg)
print(' tot:', (s_pos + s_neg).simplify())
s = sympy_utils.sum_over_piecewise(d, fixed_factor_power, 0, (2 * n))
print('diff sum c=0^2n:', s)
s = sympy_utils.sum_over_piecewise(d, fixed_factor_power, n, (2 * n))
print('diff sum c=n^2n:', s)
s = sympy_utils.sum_over_piecewise(d, fixed_factor_power, 0, (n - 1))
print('diff sum c=0^{n-1}:', s)
s = sympy_utils.sum_over_piecewise(counts[BlankLabel], fixed_factor_power, 0, (2 * n))
print('blank sum c=0^2n:', s)
if check_with_factor:
res = count_all_paths_with_label_seq_partly_dominated_inefficient(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n_, prob_dom=0.6, verbosity=1)
print(res)
res_ = count_all_paths_with_label_seq_partly_dominated(fsa=fsa, label_seq_template=Label1StrTemplate, dom_label=BlankLabel, n=n, factor=factor)
print(res_)
for (key, counts) in res_.items():
print('key:', key)
assert (set(counts.keys()) == {BlankLabel, Label1})
d = (counts[BlankLabel] - counts[Label1])
print(d) |
def test_sign_synthetic_policy_continuous():
with pytest.raises(ValueError):
context = np.array([1.0, 1.0])
sign_synthetic_policy_continuous(context=context)
with pytest.raises(ValueError):
context = [1.0, 1.0]
sign_synthetic_policy_continuous(context=context)
n_rounds = 10
dim_context = 3
context = np.ones([n_rounds, dim_context])
continuous_actions = sign_synthetic_policy_continuous(context=context)
assert ((continuous_actions.shape[0] == n_rounds) and (continuous_actions.ndim == 1)) |
def main(args):
verbose = args.verbose
num_threads = args.num_threads
from topaz.torch import set_num_threads
set_num_threads(num_threads)
use_cuda = topaz.cuda.set_device(args.device)
from topaz.model.factory import load_model
model = load_model(args.model)
model.eval()
model.fill()
if use_cuda:
model.cuda()
destdir = args.destdir
if (not os.path.exists(destdir)):
os.makedirs(destdir)
for path in args.paths:
basename = os.path.basename(path)
image_name = os.path.splitext(basename)[0]
image = load_image(path)
with torch.no_grad():
X = torch.from_numpy(np.array(image, copy=False)).unsqueeze(0).unsqueeze(0)
if use_cuda:
X = X.cuda()
score = model(X).data[(0, 0)].cpu().numpy()
im = Image.fromarray(score)
path = (os.path.join(destdir, image_name) + '.tiff')
if verbose:
print('# saving:', path)
im.save(path, 'tiff') |
def format_checker(input_folder_path):
input_files = glob.glob((input_folder_path + '*.jsonl'))
assert (len(input_files) == 5), 'missing prediction files - should be 5 files'
for each_file in input_files:
curr_category_name = each_file.split('/')[(- 1)].split('-')[(- 1)].replace('.jsonl', '')
assert (curr_category_name in ['positive', 'negative', 'can_not_test', 'death', 'cure']), 'check your event category name.'
format_checker_each_file(curr_category_name, each_file)
return None |
def parse(content):
header = content[0:1024]
header = MRCHeader._make(header_struct.unpack(content[:1024]))
extbytes = header.next
start = (1024 + extbytes)
extended_header = content[1024:start]
content = content[start:]
if (header.mode == 0):
dtype = np.int8
elif (header.mode == 1):
dtype = np.int16
elif (header.mode == 2):
dtype = np.float32
elif (header.mode == 3):
dtype = '2h'
elif (header.mode == 4):
dtype = np.complex64
elif (header.mode == 6):
dtype = np.uint16
elif (header.mode == 16):
dtype = '3B'
array = np.frombuffer(content, dtype=dtype)
array = np.reshape(array, (header.nz, header.ny, header.nx))
if (header.nz == 1):
array = array[0]
return (array, header, extended_header) |
class CustomLoaderCallback(Callback):
def __init__(self, loading_dir: str):
super(CustomLoaderCallback, self).__init__()
self.loading_dir = loading_dir
def set_model(self, model):
self.model = model
print('-- Loading ', self.loading_dir)
self.model.load_weights(os.path.join(self.loading_dir, MODEL_WEIGHTS_FILENAME))
with open(os.path.join(self.loading_dir, OPTIMIZER_WEIGHTS_FILENAME), 'rb') as f:
optimizer_weights = pickle.load(f)
with open(os.path.join(self.loading_dir, LEARNING_RATE_FILENAME), 'rb') as f:
learning_rate = pickle.load(f)
self.model.optimizer.learning_rate.assign(learning_rate)
self.model._make_train_function()
self.model.optimizer.set_weights(optimizer_weights) |
def __stable_idx_answer(shape, zoom, tile_size=256):
dim0_tile_fraction = (shape[0] / tile_size)
dim1_tile_fraction = (shape[1] / tile_size)
if ((dim0_tile_fraction < 1) or (dim1_tile_fraction < 1)):
raise StopIteration()
num_tiles_dim0 = int(np.ceil(dim0_tile_fraction))
num_tiles_dim1 = int(np.ceil(dim1_tile_fraction))
tile_idxs_dim0 = [(i * tile_size) for i in range((num_tiles_dim0 + 1))]
tile_idxs_dim1 = [(i * tile_size) for i in range((num_tiles_dim1 + 1))]
pair_runner = (lambda coll: [slice(c0, c1) for (c0, c1) in zip(coll[:(- 1)], coll[1:])])
row_slices = pair_runner(tile_idxs_dim0)
col_slices = pair_runner(tile_idxs_dim1)
rows = zip(range((num_tiles_dim0 - 1), (- 1), (- 1)), row_slices)
cols = enumerate(col_slices)
rows_cols = product(rows, cols)
def transform_iteration(row_col):
((y, slice_y), (x, slice_x)) = row_col
return (zoom, y, x, slice_y, slice_x)
return map(transform_iteration, rows_cols) |
class LispFunction(ExpectFunction):
def _instancedoc_(self):
M = self._parent
return M.help(self._name) |
class RCToMLTBijectionTypeB(RCToKRTBijectionTypeB):
def run(self):
letters = CrystalOfLetters(self.rigged_con.parent()._cartan_type.classical())
ret_crystal_path = []
while self.cur_dims:
dim = self.cur_dims[0]
ret_crystal_path.append([])
if (dim[0] == self.n):
self.cur_dims.pop(1)
while (dim[0] > 0):
dim[0] -= 1
b = self.next_state(dim[0])
ret_crystal_path[(- 1)].append(letters(b))
self.cur_dims.pop(0)
return ret_crystal_path |
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False, output_loading_info=False, _prefix=None, tf_to_pt_weight_rename=None):
try:
import tensorflow as tf
import torch
except ImportError:
logger.error('Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see and for installation instructions.')
raise
pt_state_dict = {k: v.numpy() for (k, v) in pt_state_dict.items()}
return load_pytorch_state_dict_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info, _prefix=_prefix, tf_to_pt_weight_rename=tf_to_pt_weight_rename) |
def get_number_of_params_path(model, nodes, print_on=False, include_routers=True):
(names, params) = ([], [])
if include_routers:
for (name, param) in model.named_parameters():
if (((('.' + str(nodes[(- 1)])) + '.classifier') in name) or any([((('.' + str(node)) + '.transform') in name) for node in nodes]) or any([((('.' + str(node)) + '.router') in name) for node in nodes[:(- 1)]])):
names.append(name)
params.append(param)
else:
for (name, param) in model.named_parameters():
if (((('.' + str(nodes[(- 1)])) + '.classifier') in name) or any([((('.' + str(node)) + '.transform') in name) for node in nodes])):
names.append(name)
params.append(param)
if print_on:
print('\nCount the number of parameters below: ')
for name in names:
print((' ' + name))
return sum((p.numel() for p in params)) |
def TStrUtil_StripEnd(Str, SearchStr, NewStr):
return _snap.TStrUtil_StripEnd(Str, SearchStr, NewStr) |
def _default_generator_blocks():
return [Block(64, 0.5), Block(128, 0.5), Block(256, 0.5), Block(512, 0), Block(512, 0), Block(512, 0), Block(512, 0)] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.