code stringlengths 101 5.91M |
|---|
def test_cascade_run():
KEYSIZE = 512
KEYNUM = 10
tl = Timeline(.0)
alice = QKDNode('alice', tl)
bob = QKDNode('bob', tl)
alice.set_seed(0)
bob.set_seed(0)
pair_bb84_protocols(alice.protocol_stack[0], bob.protocol_stack[0])
pair_cascade_protocols(alice.protocol_stack[1], bob.protocol_stack[1])
qc0 = QuantumChannel('qc0', tl, distance=1000.0, attenuation=2e-05, polarization_fidelity=0.97)
qc1 = QuantumChannel('qc1', tl, distance=1000.0, attenuation=2e-05, polarization_fidelity=0.97)
qc0.set_ends(alice, bob.name)
qc1.set_ends(bob, alice.name)
cc0 = ClassicalChannel('cc0', tl, distance=1000.0)
cc1 = ClassicalChannel('cc1', tl, distance=1000.0)
cc0.set_ends(alice, bob.name)
cc1.set_ends(bob, alice.name)
pa = Parent(alice, KEYSIZE, KEYNUM)
pb = Parent(bob, KEYSIZE, KEYNUM)
alice.protocol_stack[1].upper_protocols.append(pa)
pa.lower_protocols.append(alice.protocol_stack[1])
bob.protocol_stack[1].upper_protocols.append(pb)
pb.lower_protocols.append(bob.protocol_stack[1])
process = Process(pa, 'push', [])
event = Event(0, process)
tl.schedule(event)
tl.init()
tl.run()
assert (pa.counter == pb.counter == KEYNUM)
for (k1, k2) in zip(pa.keys, pb.keys):
assert (k1 == k2)
assert (k1 < (2 ** KEYSIZE))
assert (alice.protocol_stack[1].error_bit_rate == 0) |
def test_merge_examples_with_body_examples():
parameter_examples = []
request_body_examples = {'type': 'body', 'examples': [{'foo': 'example1'}, {'foo': 'example2'}, {'foo': 'example3'}]}
result = examples.merge_examples(parameter_examples, request_body_examples)
assert (len(result) == 3)
assert all(((('body' in static_parameters) and ('foo' in static_parameters['body'])) for static_parameters in result)) |
class PyDown(gdb.Command):
def __init__(self):
gdb.Command.__init__(self, 'py-down', gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False) |
def get_bias_metric_specs() -> List[MetricSpec]:
demographic_categories = ['race', 'gender']
target_categories = ['adjective', 'profession']
cross_dem_target = itertools.product(demographic_categories, target_categories)
return ([MetricSpec(class_name='helm.benchmark.metrics.bias_metrics.BiasMetric', args={'mode': 'associations', 'demographic_category': dem, 'target_category': tgt}) for (dem, tgt) in cross_dem_target] + [MetricSpec(class_name='helm.benchmark.metrics.bias_metrics.BiasMetric', args={'mode': 'representation', 'demographic_category': dem}) for dem in demographic_categories]) |
def compute_average_precision_detection_wrapper(input_triple, tiou_thresholds=np.linspace(0.05, 0.95, 10)):
(query, ground_truth, prediction) = input_triple
scores = compute_average_precision_detection(ground_truth, prediction, tiou_thresholds=tiou_thresholds)
return (query, scores) |
_module()
class PointRend(TwoStageDetector):
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(PointRend, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) |
def max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False):
if return_indices:
raise NotImplementedError('return_indices is not yet implemented!')
if (stride is None):
stride = torch.jit.annotate(List[int], [])
return torch.nn.functional.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode, return_indices) |
class Bottleneck(nn.Module):
expansion: int = 4
def __init__(self, c1, c2, s=1, downsample=None) -> None:
super().__init__()
self.conv1 = nn.Conv2d(c1, c2, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(c2)
self.conv2 = nn.Conv2d(c2, c2, 3, s, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.conv3 = nn.Conv2d(c2, (c2 * self.expansion), 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d((c2 * self.expansion))
self.downsample = downsample
def forward(self, x: Tensor) -> Tensor:
identity = x
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return F.relu(out) |
def log_nucleus_multinomial_sample(x, size=1, nucleus_p=np.log(0.95)):
assert (nucleus_p <= 0)
if (len(x) == 1):
return ([0] * size)
inds = np.argsort((- x))
sortedx = x[inds]
c = np.logaddexp.accumulate(sortedx)
last_ind = bisect(c, (nucleus_p + c[(- 1)]))
idxs = []
for i in range(size):
key = (np.log(np.random.uniform()) + c[last_ind])
idxs.append(inds[bisect(c, key)])
return idxs |
class SE(nn.Module):
def __init__(self, in_planes, se_planes):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = F.relu(self.se1(out))
out = self.se2(out).sigmoid()
out = (x * out)
return out |
def onehot(indexes, N=None, ignore_index=None):
if (N is None):
N = (indexes.max() + 1)
sz = list(indexes.size())
output = indexes.new().byte().resize_(*sz, N).zero_()
output.scatter_((- 1), indexes.unsqueeze((- 1)), 1)
if ((ignore_index is not None) and (ignore_index >= 0)):
output.masked_fill_(indexes.eq(ignore_index).unsqueeze((- 1)), 0)
return output |
def parse_arguments():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--event', help='event file', required=False)
parser.add_argument('--dir', help='event directory', required=False)
return parser.parse_args() |
def conv_bn_no_relu(inp, oup, stride):
return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup)) |
class BiaffineScorer2(nn.Module):
def __init__(self, n_in_a=800, n_in_b=800, n_out=400, n_out_label=1, bias_x=False, bias_y=False, scaling=False, dropout=0.33):
super(BiaffineScorer2, self).__init__()
self.l = MLP(n_in=n_in_a, n_out=n_out, dropout=dropout)
self.r = MLP(n_in=n_in_b, n_out=n_out, dropout=dropout)
self.attn = Biaffine(n_in=n_out, n_out=n_out_label, bias_x=bias_x, bias_y=bias_y)
self.scaling = (0 if (not scaling) else (n_out ** (1 / 4)))
def forward(self, h, q):
src = self.l(h)
dec = self.r(q)
return self.attn.forward_v2(src, dec)
def forward2(self, h, q):
src = self.l(h)
dec = self.r(q)
return self.attn.forward_v3(src, dec) |
def test_contextual_confusion_matrix_overlap(expected, observed):
expected_return = (None, 1, 1, 5)
returned = contextual_confusion_matrix(expected, observed, weighted=False)
np.testing.assert_array_equal(np.array(returned), np.array(expected_return)) |
class PReLUParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PRELUPARAMETER |
def train(train_loader, model, criterion, optimizer, epoch, use_cuda):
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(train_loader))
for (batch_idx, (inputs, targets)) in enumerate(train_loader):
data_time.update((time.time() - end))
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda(non_blocking=True))
(inputs, targets) = (torch.autograd.Variable(inputs), torch.autograd.Variable(targets))
outputs = model(inputs)
loss = criterion(outputs, targets)
(prec1, prec5) = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.data[0], inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(batch=(batch_idx + 1), size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg)
bar.next()
bar.finish()
return (losses.avg, top1.avg) |
_BOX_HEADS.register('resnet_c5_head')
def resnet_c5_head(dim_in, spatial_scale):
model = ResNet_C5_Head(dim_in, spatial_scale, norm=get_norm())
if cfg.BACKBONE.RESNET.USE_WS:
model = convert_conv2convws_model(model)
return model |
class DeformRoIPool(nn.Module):
def __init__(self, output_size, spatial_scale=1.0, sampling_ratio=0, gamma=0.1):
super(DeformRoIPool, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.gamma = float(gamma)
def forward(self, input, rois, offset=None):
return deform_roi_pool(input, rois, offset, self.output_size, self.spatial_scale, self.sampling_ratio, self.gamma) |
def huber_loss(x, delta=1.0):
'Reference:
return tf.where((tf.abs(x) < delta), (tf.square(x) * 0.5), (delta * (tf.abs(x) - (0.5 * delta)))) |
class MobileNetV1(nn.Module):
def __init__(self) -> None:
super().__init__()
self.stage1 = nn.Sequential(ConvBNReLU(3, 8, 3, 2, 1, 0.1), DWConv(8, 16, 1), DWConv(16, 32, 2), DWConv(32, 32, 1), DWConv(32, 64, 2), DWConv(64, 64, 1))
self.stage2 = nn.Sequential(DWConv(64, 128, 2), DWConv(128, 128, 1), DWConv(128, 128, 1), DWConv(128, 128, 1), DWConv(128, 128, 1), DWConv(128, 128, 1))
self.stage3 = nn.Sequential(DWConv(128, 256, 2), DWConv(256, 256, 1))
self.out_channels = [64, 128, 256]
def forward(self, x: Tensor) -> Tensor:
x1 = self.stage1(x)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
return (x1, x2, x3) |
def main():
frame = np.zeros((200, 500, 3), np.uint8)
count = 0
cvui.init(WINDOW_NAME)
while True:
frame[:] = (49, 52, 49)
if cvui.button(frame, 110, 80, 'Hello, world!'):
count += 1
cvui.printf(frame, 250, 90, 0.4, , 'Button click count: %d', count)
cvui.imshow(WINDOW_NAME, frame)
if (cv2.waitKey(20) == 27):
break |
def _print_keep_alive(seconds_since_start):
print(('Keep alive, current job runs for %dmin\n' % (seconds_since_start / 60))) |
def rho_inverse(elt):
pa = elt.parent()
BR = pa.base_ring().base_ring()
M_BR = Multizetas(BR)
if (elt == pa.zero()):
return M_BR.zero()
(pw, _) = next(iter(elt))
(p, w) = pw
N = ((2 * p) + sum((int(c) for c in w)))
v = elt.homogeneous_to_vector()
w = (v * rho_matrix_inverse(N))
return sum(((cf * b) for (cf, b) in zip(w, M_BR.basis_data(BR, N)))) |
class BagREDataset(data.Dataset):
def __init__(self, path, rel2id, tokenizer, entpair_as_bag=False, bag_size=None, mode=None):
super().__init__()
self.tokenizer = tokenizer
self.rel2id = rel2id
self.entpair_as_bag = entpair_as_bag
self.bag_size = bag_size
f = open(path)
self.data = []
for line in f:
line = line.rstrip()
if (len(line) > 0):
self.data.append(eval(line))
f.close()
if (mode == None):
self.weight = np.zeros(len(self.rel2id), dtype=np.float32)
self.bag_scope = []
self.name2id = {}
self.bag_name = []
self.facts = {}
for (idx, item) in enumerate(self.data):
fact = (item['h']['id'], item['t']['id'], item['relation'])
if (item['relation'] != 'NA'):
self.facts[fact] = 1
if entpair_as_bag:
name = (item['h']['id'], item['t']['id'])
else:
name = fact
if (name not in self.name2id):
self.name2id[name] = len(self.name2id)
self.bag_scope.append([])
self.bag_name.append(name)
self.bag_scope[self.name2id[name]].append(idx)
self.weight[self.rel2id[item['relation']]] += 1.0
self.weight = (1.0 / (self.weight ** 0.05))
self.weight = torch.from_numpy(self.weight)
else:
pass
def __len__(self):
return len(self.bag_scope)
def __getitem__(self, index):
bag = self.bag_scope[index]
if (self.bag_size is not None):
if (self.bag_size <= len(bag)):
resize_bag = random.sample(bag, self.bag_size)
else:
resize_bag = (bag + list(np.random.choice(bag, (self.bag_size - len(bag)))))
bag = resize_bag
seqs = None
rel = self.rel2id[self.data[bag[0]]['relation']]
for sent_id in bag:
item = self.data[sent_id]
seq = list(self.tokenizer(item))
if (seqs is None):
seqs = []
for i in range(len(seq)):
seqs.append([])
for i in range(len(seq)):
seqs[i].append(seq[i])
for i in range(len(seqs)):
seqs[i] = torch.cat(seqs[i], 0)
return ([rel, self.bag_name[index], len(bag)] + seqs)
def collate_fn(data):
data = list(zip(*data))
(label, bag_name, count) = data[:3]
seqs = data[3:]
for i in range(len(seqs)):
seqs[i] = torch.cat(seqs[i], 0)
scope = []
start = 0
for c in count:
scope.append((start, (start + c)))
start += c
assert (start == seqs[0].size(0))
label = torch.tensor(label).long()
return ([label, bag_name, scope] + seqs) |
class BlipImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_rescale: bool=True, rescale_factor: Union[(int, float)]=(1 / 255), do_normalize: bool=True, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, do_convert_rgb: bool=True, **kwargs) -> None:
super().__init__(**kwargs)
size = (size if (size is not None) else {'height': 384, 'width': 384})
size = get_size_dict(size, default_to_square=True)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = (image_mean if (image_mean is not None) else OPENAI_CLIP_MEAN)
self.image_std = (image_std if (image_std is not None) else OPENAI_CLIP_STD)
self.do_convert_rgb = do_convert_rgb
def resize(self, image: np.ndarray, size: Dict[(str, int)], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
size = get_size_dict(size, default_to_square=True)
if (('height' not in size) or ('width' not in size)):
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
output_size = (size['height'], size['width'])
return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs)
def rescale(self, image: np.ndarray, scale: Union[(int, float)], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs):
return rescale(image, scale=scale, data_format=data_format, **kwargs)
def normalize(self, image: np.ndarray, mean: Union[(float, List[float])], std: Union[(float, List[float])], data_format: Optional[Union[(str, ChannelDimension)]]=None, **kwargs) -> np.ndarray:
return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs)
def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[(str, int)]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[(float, List[float])]]=None, image_std: Optional[Union[(float, List[float])]]=None, return_tensors: Optional[Union[(str, TensorType)]]=None, do_convert_rgb: bool=None, data_format: ChannelDimension=ChannelDimension.FIRST, **kwargs) -> PIL.Image.Image:
do_resize = (do_resize if (do_resize is not None) else self.do_resize)
resample = (resample if (resample is not None) else self.resample)
do_rescale = (do_rescale if (do_rescale is not None) else self.do_rescale)
rescale_factor = (rescale_factor if (rescale_factor is not None) else self.rescale_factor)
do_normalize = (do_normalize if (do_normalize is not None) else self.do_normalize)
image_mean = (image_mean if (image_mean is not None) else self.image_mean)
image_std = (image_std if (image_std is not None) else self.image_std)
do_convert_rgb = (do_convert_rgb if (do_convert_rgb is not None) else self.do_convert_rgb)
size = (size if (size is not None) else self.size)
size = get_size_dict(size, default_to_square=False)
images = make_list_of_images(images)
if (not valid_images(images)):
raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')
if ((do_resize and (size is None)) or (resample is None)):
raise ValueError('Size and resample must be specified if do_resize is True.')
if (do_rescale and (rescale_factor is None)):
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if (do_normalize and ((image_mean is None) or (image_std is None))):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [self.resize(image=image, size=size, resample=resample) for image in images]
if do_rescale:
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
if do_normalize:
images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images]
images = [to_channel_dimension_format(image, data_format) for image in images]
encoded_outputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)
return encoded_outputs |
def extract_model_state_dict(ckpt_path, model_name='model', prefixes_to_ignore=[]):
checkpoint = torch.load(ckpt_path, map_location=torch.device('cpu'))
checkpoint_ = {}
if ('state_dict' in checkpoint):
checkpoint = checkpoint['state_dict']
for (k, v) in checkpoint.items():
if (not k.startswith(model_name)):
continue
k = k[(len(model_name) + 1):]
for prefix in prefixes_to_ignore:
if k.startswith(prefix):
print('ignore', k)
break
else:
checkpoint_[k] = v
return checkpoint_ |
def test_resave_pretrain():
test_pt_file = tempfile.NamedTemporaryFile(dir=f'{TEST_WORKING_DIR}/out', suffix='.pt', delete=False)
try:
test_pt_file.close()
pt = pretrain.Pretrain(filename=test_pt_file.name, vec_filename=f'{TEST_WORKING_DIR}/in/tiny_emb.xz')
check_pretrain(pt)
pt2 = pretrain.Pretrain(filename=test_pt_file.name, vec_filename=f'unban_mox_opal')
check_pretrain(pt2)
pt3 = torch.load(test_pt_file.name)
check_embedding(pt3['emb'])
finally:
os.unlink(test_pt_file.name) |
class CreateAIDACONLL(PipelineJob):
def __init__(self, preprocess_jobs: Dict[(str, PipelineJob)], opts):
super().__init__(requires=['data/indexes/redirects_en.ttl.bz2.dict', 'data/indexes/freebase_links_en.ttl.bz2.dict', 'data/indexes/page_ids_en.ttl.bz2.dict', 'data/indexes/disambiguations_en.ttl.bz2.dict', 'data/benchmarks/aida-yago2-dataset/AIDA-YAGO2-dataset.tsv'], provides=['data/benchmarks/aida-yago2-dataset/conll_dataset.pickle', f'data/versions/{opts.data_version_name}/indexes/found_conll_entities.pickle', f'data/versions/{opts.data_version_name}/indexes/not_found_conll_entities.pickle'], preprocess_jobs=preprocess_jobs, opts=opts)
def _run(self):
with open('data/indexes/redirects_en.ttl.bz2.dict', 'rb') as f:
redirects_en = pickle.load(f)
redirects_en_values = set(redirects_en.values())
with open('data/indexes/freebase_links_en.ttl.bz2.dict', 'rb') as f:
fb_to_wikiname_dict = pickle.load(f)
with open('data/indexes/disambiguations_en.ttl.bz2.dict', 'rb') as f:
disambiguations_dict = pickle.load(f)
with open('data/indexes/page_ids_en.ttl.bz2.dict', 'rb') as f:
page_id_to_wikiname_dict = pickle.load(f)
conll2003_ner_en = self._download(url=' folder='data/downloads')
subprocess.check_call(['tar', 'xzf', conll2003_ner_en, '-C', 'data/benchmarks/aida-yago2-dataset/'])
try:
subprocess.call(['cat data/benchmarks/aida-yago2-dataset/ner/etc/tags.eng data/benchmarks/aida-yago2-dataset/ner/etc/tags.eng.testb > data/benchmarks/aida-yago2-dataset/ner/eng.all'], shell=True)
except subprocess.CalledProcessError as e:
print(e.output)
with open('data/benchmarks/aida-yago2-dataset/AIDA-YAGO2-dataset.tsv') as f1:
with open('data/benchmarks/aida-yago2-dataset/ner/eng.all') as f2:
merged_el = list()
el = f1.readlines()
ner = f2.readlines()
ner_i = 0
last_entity = None
for (el_i, el_line) in enumerate(el):
ner_line = ner[ner_i]
if (len(el_line.strip()) == 0):
merged_el.append([''])
continue
while ((len(el_line.strip()) > 0) and (len(ner_line.strip()) == 0)):
ner_i += 1
ner_line = ner[ner_i]
if el_line.startswith('-DOCSTART-'):
merged_el.append([el_line.strip()])
ner_i += 1
continue
el_fields = el_line.strip().split('\t')
ner_fields = ner_line.strip().split()
if (len(el_fields) == 1):
bio_ner = 'O'
rest = []
else:
(bio, etype) = ner_fields[2].split('-')
if (last_entity != el_fields[3]):
bio = 'B'
rest = el_fields[2:]
last_entity = el_fields[3]
bio_ner = f'{bio}-{etype}'
merged_el.append(([el_fields[0], bio_ner] + rest))
ner_i += 1
conll_dataset = list()
mentions = list()
sentence_nr = 0
tok_nr = 0
split = 'train'
print(merged_el[:10])
for (line_nr, line_items) in enumerate(tqdm.tqdm(merged_el)):
if ((len(line_items) > 0) and line_items[0].startswith('-DOCSTART-') and ('testa' in line_items[0])):
split = 'valid'
if ((len(line_items) > 0) and line_items[0].startswith('-DOCSTART-') and ('testb' in line_items[0])):
split = 'test'
if ((len(line_items) == 1) and (line_items[0] == '')):
sentence_nr += 1
tok_nr = 0
elif ((len(line_items) > 0) and line_items[0].startswith('-DOCSTART-')):
sentence_nr = 0
conll_dataset.append({'tok': ' '.join(line_items), 'bio-tag': None, 'bio': None, 'tag': None, 'mention': None, 'yago_name': None, 'wiki_name': None, 'wiki_id': None, 'fb_id': None, 'doc_start': True, 'is_nil': None, 'sent_nr': sentence_nr, 'tok_nr': tok_nr, 'split': split})
elif (len(line_items) == 2):
tok_nr += 1
conll_dataset.append({'tok': line_items[0], 'bio-tag': 'O', 'bio': 'O', 'tag': None, 'mention': None, 'yago_name': None, 'wiki_name': None, 'wiki_id': None, 'fb_id': None, 'doc_start': False, 'is_nil': None, 'sent_nr': sentence_nr, 'tok_nr': tok_nr, 'split': split})
elif (len(line_items) == 4):
tok_nr += 1
conll_dataset.append({'tok': line_items[0], 'bio-tag': line_items[1], 'bio': line_items[1].split('-')[0], 'tag': line_items[1].split('-')[1], 'mention': line_items[2], 'yago_name': line_items[3], 'wiki_name': line_items[3], 'wiki_id': line_items[3], 'fb_id': line_items[3], 'doc_start': False, 'is_nil': True, 'sent_nr': sentence_nr, 'tok_nr': tok_nr, 'split': split})
elif ((len(line_items) == 6) or (len(line_items) == 7)):
tok_nr += 1
conll_dataset.append({'tok': line_items[0], 'bio-tag': line_items[1], 'bio': line_items[1].split('-')[0], 'tag': line_items[1].split('-')[1], 'mention': line_items[2], 'yago_name': line_items[3], 'wiki_name': Counter({line_items[4].split('/')[(- 1)]: 1}), 'wiki_id': line_items[5], 'fb_id': (line_items[6] if (len(line_items) == 7) else None), 'doc_start': False, 'is_nil': False, 'sent_nr': sentence_nr, 'tok_nr': tok_nr, 'split': split})
if (conll_dataset[(- 1)]['fb_id'] in fb_to_wikiname_dict):
key = fb_to_wikiname_dict[conll_dataset[(- 1)]['fb_id']]
if (key in redirects_en):
key = redirects_en[key]
conll_dataset[(- 1)]['wiki_name'][key] += 1
if (conll_dataset[(- 1)]['wiki_id'] in page_id_to_wikiname_dict):
key = page_id_to_wikiname_dict[conll_dataset[(- 1)]['wiki_id']]
if (key in redirects_en):
key = redirects_en[key]
conll_dataset[(- 1)]['wiki_name'][key] += 1
for wn in set(map(redirects_en.get, conll_dataset[(- 1)]['wiki_name'].keys())):
if (wn not in conll_dataset[(- 1)]['wiki_name']):
conll_dataset[(- 1)]['wiki_name'][wn] += 1
if (conll_dataset[(- 1)]['mention'].replace(' ', '_') in redirects_en):
conll_dataset[(- 1)]['wiki_name'][redirects_en[conll_dataset[(- 1)]['mention'].replace(' ', '_')]] += 1
for wn in set(conll_dataset[(- 1)]['wiki_name'].keys()):
if (wn in disambiguations_dict):
conll_dataset[(- 1)]['wiki_name'][wn] = 0
for wn in set(conll_dataset[(- 1)]['wiki_name'].keys()):
if (wn in redirects_en_values):
conll_dataset[(- 1)]['wiki_name'][wn] += 5
else:
raise Exception('Error {}'.format(line_items))
if ((len(conll_dataset) > 0) and (conll_dataset[(- 1)]['bio'] == 'B') and (not conll_dataset[(- 1)]['is_nil'])):
mentions.append(conll_dataset[(- 1)])
with open('data/benchmarks/aida-yago2-dataset/conll_dataset.pickle', 'wb') as f:
pickle.dump(conll_dataset, f)
with open(f'data/versions/{self.opts.data_version_name}/indexes/entity_counter.pickle', 'rb') as f:
all_entity_counter = pickle.load(f)
all_found_conll_entities = set()
all_conll_entities = set()
all_not_found_conll_entities = set()
for item in conll_dataset:
if ((not item['is_nil']) and (item['bio'] == 'B')):
(name, count) = item['wiki_name'].most_common()[0]
if (name in all_entity_counter):
all_found_conll_entities.add(name)
else:
all_not_found_conll_entities.add(name)
all_conll_entities.add(name)
with open(f'data/versions/{self.opts.data_version_name}/indexes/found_conll_entities.pickle', 'wb') as f:
pickle.dump(all_found_conll_entities, f)
with open(f'data/versions/{self.opts.data_version_name}/indexes/not_found_conll_entities.pickle', 'wb') as f:
pickle.dump(all_not_found_conll_entities, f)
self.log(f'Found {len(all_found_conll_entities)} and not found {len(all_not_found_conll_entities)} of AIDA-CoNLL entities in the entities dictionary.') |
def test_weights(sdfg_name, gpu):
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
self.fc1 = nn.Linear(784, 120)
self.fc2 = nn.Linear(120, 32)
self.fc3 = nn.Linear(32, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
run_pytorch_module(Module(), sdfg_name, gpu, shape=(4, 784), use_max=False) |
def intersectionAndUnion(imPred, imLab, numClass):
imPred = (imPred * (imLab >= 0))
intersection = (imPred * (imPred == imLab))
(area_intersection, _) = np.histogram(intersection, bins=numClass, range=(1, numClass))
(area_pred, _) = np.histogram(imPred, bins=numClass, range=(1, numClass))
(area_lab, _) = np.histogram(imLab, bins=numClass, range=(1, numClass))
area_union = ((area_pred + area_lab) - area_intersection)
return (area_intersection, area_union) |
class GCNConv(MessagePassing):
_cached_edge_index: Optional[Tuple[(Tensor, Tensor)]]
_cached_adj_t: Optional[SparseTensor]
def __init__(self, in_channels: int, out_channels: int, improved: bool=False, cached: bool=False, add_self_loops: bool=True, normalize: bool=True, bias: bool=True, **kwargs):
kwargs.setdefault('aggr', 'add')
super(GCNConv, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.add_self_loops = add_self_loops
self.normalize = normalize
self._cached_edge_index = None
self._cached_adj_t = None
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
self._cached_edge_index = None
self._cached_adj_t = None
def forward(self, x: Tensor, edge_index: Adj, edge_weight: OptTensor=None) -> Tensor:
if self.normalize:
if isinstance(edge_index, Tensor):
cache = self._cached_edge_index
if (cache is None):
(edge_index, edge_weight) = gcn_norm(edge_index, edge_weight, x.size(self.node_dim), self.improved, self.add_self_loops)
if self.cached:
self._cached_edge_index = (edge_index, edge_weight)
else:
(edge_index, edge_weight) = (cache[0], cache[1])
elif isinstance(edge_index, SparseTensor):
cache = self._cached_adj_t
if (cache is None):
edge_index = gcn_norm(edge_index, edge_weight, x.size(self.node_dim), self.improved, self.add_self_loops)
if self.cached:
self._cached_adj_t = edge_index
else:
edge_index = cache
x = (x self.weight)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=None)
if (self.bias is not None):
out += self.bias
return out
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return (x_j if (edge_weight is None) else (edge_weight.view((- 1), 1) * x_j))
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
return matmul(adj_t, x, reduce=self.aggr)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels, self.out_channels) |
def check_Kraus_local_2(c4, c6, P, a1=None, assume_nonsingular=False):
if (not assume_nonsingular):
if (not c4c6_nonsingular(c4, c6)):
return (False, 0, 0)
e = P.ramification_index()
P2 = (P ** e)
c4val = c4.valuation(P)
if (c4val == 0):
if (a1 is None):
(flag, t) = sqrt_mod_4((- c6), P)
if (not flag):
return (False, 0, 0)
a1 = make_integral((c4 / t), P, e)
a13 = (a1 ** 3)
a3 = make_integral(((c6 + (a13 ** 2)) / (4 * a13)), P, (2 * e))
if test_a1a3_local(c4, c6, P, a1, a3):
return (True, a1, a3)
else:
raise RuntimeError('check_Kraus_local_2 fails')
if (c4val >= (4 * e)):
if (a1 is None):
a1 = c4.parent().zero()
(flag, a3) = sqrt_mod_4((c6 / 8), P)
if flag:
if test_a1a3_local(c4, c6, P, a1, a3):
return (True, a1, a3)
else:
raise RuntimeError('check_Kraus_local_2 fails')
else:
return (False, 0, 0)
P2res = ([a1] if a1 else P2.residues())
for a1 in P2res:
Px = (((- (a1 ** 6)) + ((3 * (a1 ** 2)) * c4)) + (2 * c6))
if (Px.valuation(P) >= (4 * e)):
(flag, a3) = sqrt_mod_4((Px / 16), P)
if flag:
a1sq = (a1 * a1)
if ((((4 * a1sq) * Px) - (((a1sq ** 2) - c4) ** 2)).valuation(P) >= (8 * e)):
if test_a1a3_local(c4, c6, P, a1, a3):
return (True, a1, a3)
else:
raise RuntimeError('check_Kraus_local_2 fails')
return (False, 0, 0) |
def test_getter(nlp_pipeline):
Word.add_property('upos_xpos', getter=(lambda self: f'{self.upos}_{self.xpos}'))
doc = nlp_pipeline(EN_DOC)
assert (EN_DOC_UPOS_XPOS == tuple((tuple((word.upos_xpos for word in sentence.words)) for sentence in doc.sentences))) |
def batchify(TEXT, device, data, bsz):
data = TEXT.numericalize([data.examples[0].text])
nbatch = (data.size(0) // bsz)
data = data.narrow(0, 0, (nbatch * bsz))
data = data.view(bsz, (- 1)).t().contiguous()
return data.to(device) |
(auto_optimize=True, device=dtypes.DeviceType.CPU)
def spmv(A_row: dace.uint32[(M + 1)], A_col: dace.uint32[nnz], A_val: dtype[nnz], x: dtype[N], y: dtype[M]):
for i in range((A_row.size - 1)):
cols = A_col[A_row[i]:A_row[(i + 1)]]
vals = A_val[A_row[i]:A_row[(i + 1)]]
y[i] = (vals x[cols]) |
def complex_flatten(real, imag):
real = tf.keras.layers.Flatten()(real)
imag = tf.keras.layers.Flatten()(imag)
return (real, imag) |
def get_reconciler_common_network_args(env, embedding_dim):
network_args = dict(name='reconciler_common_network', output_dim=embedding_dim, hidden_sizes=(256,), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, batch_normalization=False)
return network_args |
def decl_texture_arg(num_dimensions, name):
arg_id = impl.get_runtime().compiling_callable.insert_texture_param(num_dimensions, name)
dbg_info = _ti_core.DebugInfo(impl.get_runtime().get_current_src_info())
return TextureSampler(_ti_core.make_texture_ptr_expr(arg_id, num_dimensions, 0, dbg_info), num_dimensions) |
def get_rotated_fmnist_loaders(angle, data_path, model_class='LeNet', download=False):
if (model_class == 'MLP'):
shift_tforms = transforms.Compose([RotationTransform(angle), transforms.ToTensor(), ReshapeTransform(((- 1),))])
else:
shift_tforms = transforms.Compose([RotationTransform(angle), transforms.ToTensor()])
rotated_fmnist_val_test_set = datasets.FashionMNIST(data_path, train=False, transform=shift_tforms, download=download)
(shift_val_loader, shift_test_loader) = val_test_split(rotated_fmnist_val_test_set, val_size=2000)
return (shift_val_loader, shift_test_loader) |
class ClipPercentile(LoopEntryTransform):
def __init__(self, upper_percentile: float, lower_percentile: float=None, loop_axis=None, entries=(defs.KEY_IMAGES,)) -> None:
super().__init__(loop_axis=loop_axis, entries=entries)
self.upper_percentile = upper_percentile
if (lower_percentile is None):
lower_percentile = (100 - upper_percentile)
self.lower_percentile = lower_percentile
def transform_entry(self, np_entry, entry, loop_i=None) -> np.ndarray:
return self._clip(np_entry)
def _clip(self, arr: np.ndarray):
upper_max = np.percentile(arr, self.upper_percentile)
arr[(arr > upper_max)] = upper_max
lower_max = np.percentile(arr, self.lower_percentile)
arr[(arr < lower_max)] = lower_max
return arr |
class FSM(nn.Module):
def __init__(self, c1, c2):
super().__init__()
self.conv_atten = nn.Conv2d(c1, c1, 1, bias=False)
self.conv = nn.Conv2d(c1, c2, 1, bias=False)
def forward(self, x: Tensor) -> Tensor:
atten = self.conv_atten(F.avg_pool2d(x, x.shape[2:])).sigmoid()
feat = torch.mul(x, atten)
x = (x + feat)
return self.conv(x) |
def _generate_dataset(args_namespace):
return generate_dataset(args_namespace.language, *args_namespace.files) |
def parse_args():
desc = 'Tensorflow implementation of StarGAN_v2'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--phase', type=str, default='train', help='train or test or refer_test ?')
parser.add_argument('--dataset', type=str, default='celebA-HQ_gender', help='dataset_name')
parser.add_argument('--refer_img_path', type=str, default='refer_img.jpg', help='reference image path')
parser.add_argument('--iteration', type=int, default=100000, help='The number of training iterations')
parser.add_argument('--batch_size', type=int, default=8, help='The size of batch size')
parser.add_argument('--print_freq', type=int, default=1000, help='The number of image_print_freq')
parser.add_argument('--save_freq', type=int, default=10000, help='The number of ckpt_save_freq')
parser.add_argument('--gpu_num', type=int, default=1, help='The number of gpu')
parser.add_argument('--decay_flag', type=str2bool, default=True, help='The decay_flag')
parser.add_argument('--decay_iter', type=int, default=50000, help='decay start iteration')
parser.add_argument('--lr', type=float, default=0.0001, help='The learning rate')
parser.add_argument('--ema_decay', type=float, default=0.999, help='ema decay value')
parser.add_argument('--adv_weight', type=float, default=1, help='The weight of Adversarial loss')
parser.add_argument('--sty_weight', type=float, default=1, help='The weight of Style reconstruction loss')
parser.add_argument('--ds_weight', type=float, default=1, help='The weight of style diversification loss')
parser.add_argument('--cyc_weight', type=float, default=1, help='The weight of Cycle-consistency loss')
parser.add_argument('--r1_weight', type=float, default=1, help='The weight of R1 regularization')
parser.add_argument('--gp_weight', type=float, default=10, help='The gradient penalty lambda')
parser.add_argument('--gan_type', type=str, default='gan', help='gan / lsgan / hinge / wgan-gp / wgan-lp / dragan')
parser.add_argument('--sn', type=str2bool, default=False, help='using spectral norm')
parser.add_argument('--ch', type=int, default=32, help='base channel number per layer')
parser.add_argument('--n_layer', type=int, default=4, help='The number of resblock')
parser.add_argument('--n_critic', type=int, default=1, help='number of D updates per each G update')
parser.add_argument('--style_dim', type=int, default=16, help='length of style code')
parser.add_argument('--num_style', type=int, default=5, help='number of styles to sample')
parser.add_argument('--img_height', type=int, default=256, help='The height size of image')
parser.add_argument('--img_width', type=int, default=256, help='The width size of image ')
parser.add_argument('--img_ch', type=int, default=3, help='The size of image channel')
parser.add_argument('--augment_flag', type=str2bool, default=True, help='Image augmentation use or not')
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint', help='Directory name to save the checkpoints')
parser.add_argument('--result_dir', type=str, default='results', help='Directory name to save the generated images')
parser.add_argument('--log_dir', type=str, default='logs', help='Directory name to save training logs')
parser.add_argument('--sample_dir', type=str, default='samples', help='Directory name to save the samples on training')
return check_args(parser.parse_args()) |
def test_jax_scvi_training(n_latent: int=5, dropout_rate: float=0.1):
adata = synthetic_iid()
JaxSCVI.setup_anndata(adata, batch_key='batch')
model = JaxSCVI(adata, n_latent=n_latent, dropout_rate=dropout_rate)
assert model.module.training
with mock.patch('scvi.module._jaxvae.nn.Dropout', wraps=nn.Dropout) as mock_dropout_cls:
mock_dropout = mock.Mock()
mock_dropout.side_effect = (lambda h, **_kwargs: h)
mock_dropout_cls.return_value = mock_dropout
model.train(1, train_size=0.5, check_val_every_n_epoch=1)
assert (not model.module.training)
mock_dropout_cls.assert_called()
mock_dropout.assert_has_calls(((12 * [mock.call(mock.ANY, deterministic=False)]) + (8 * [mock.call(mock.ANY, deterministic=True)]))) |
def get_include(user=False):
from distutils.dist import Distribution
import os
import sys
virtualenv = (hasattr(sys, 'real_prefix') or (sys.prefix != getattr(sys, 'base_prefix', sys.prefix)))
if virtualenv:
return os.path.join(sys.prefix, 'include', 'site', ('python' + sys.version[:3]))
else:
dist = Distribution({'name': 'pybind11'})
dist.parse_config_files()
dist_cobj = dist.get_command_obj('install', create=True)
if user:
dist_cobj.user = user
dist_cobj.prefix = ''
dist_cobj.finalize_options()
return os.path.dirname(dist_cobj.install_headers) |
def check_likelihood_grad_BO(likelihood):
df = simple_run_experiments(get_likelihood_grad_BO, likelihood=likelihood, mz_hat=np.linspace(1, 3, 10), tz0_hat=1)
return df |
def main():
parser = TestOptions()
opts = parser.parse()
domains = [chr(i) for i in range(ord('A'), (ord('Z') + 1))]
print('\n--- load dataset ---')
datasets = ([None] * opts.num_domains)
loaders = ([None] * opts.num_domains)
for i in range(opts.num_domains):
datasets[i] = dataset_single(opts, i)
loaders[i] = torch.utils.data.DataLoader(datasets[i], batch_size=1, num_workers=opts.nThreads)
print('\n--- load model ---')
model = MD_multi(opts)
model.setgpu(opts.gpu)
model.resume(opts.resume, train=False)
model.eval()
result_dir = os.path.join(opts.result_dir, opts.name)
if (not os.path.exists(result_dir)):
os.mkdir(result_dir)
print('\n--- testing ---')
for d in range(opts.num_domains):
for (idx, data) in enumerate(loaders[d]):
(img, c_org) = data
print('{}/{}'.format(idx, len(loaders[d])))
if (idx > num):
break
(img, c_org) = (img.cuda(), c_org.cuda())
imgs = [img]
names = ['input']
for idx2 in range(opts.num):
with torch.no_grad():
imgs_ = model.test_forward_random(img)
for i in range(opts.num_domains):
imgs.append(imgs_[i])
names.append('output{}_{}_{}'.format(domains[d], domains[i], idx2))
save_imgs(imgs, names, os.path.join(result_dir, '{}_{}'.format(domains[d], idx)))
save_concat_imgs(imgs, 'output{}_{}'.format(domains[d], idx), result_dir)
return |
_router.get('/weekly_info', response_model=TotalStatsByWeek, response_description='Get gender statistics per English outlet aggregated WEEKLY between two dates')
def expertwomen_weekly_info(request: Request, begin: str=Query(description='Start date in yyyy-mm-dd format'), end: str=Query(description='End date in yyyy-mm-dd format')) -> TotalStatsByWeek:
if (not dateutils.is_valid_date_range(begin, end, LOWER_BOUND_START_DATE)):
raise HTTPException(status_code=416, detail=f"Date range error: Should be between {LOWER_BOUND_START_DATE} and tomorrow's date")
result = _expertwomen_weekly_info(request, begin, end)
logger.info(('Obtained weekly info for English outlets between %s and %s' % (begin, end)))
return result |
class LevelMapper(object):
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-06):
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
target_lvls = torch.floor((self.lvl0 + torch.log2(((s / self.s0) + self.eps))))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return (target_lvls.to(torch.int64) - self.k_min) |
def test_2layers():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
(in_dim, hidden_dim, out_dim) = (Dim(7, name='in'), Dim(11, name='hidden'), Dim(13, name='out'))
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32'), 'classes': Tensor('classes', [batch_dim, time_dim], dtype='int32', sparse_dim=out_dim)})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.layer1 = rf.Linear(in_dim, hidden_dim)
self.layer2 = rf.Linear(hidden_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
x = rf.relu(self.layer1(x))
x = self.layer2(x)
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output()
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step) |
class JuPyMake(JoinFeature):
def __init__(self):
JoinFeature.__init__(self, 'jupymake', [PythonModule('JuPyMake', spkg='jupymake')]) |
def KChainComplexMorphism(morphism):
source = KChainComplex(morphism.domain())
target = KChainComplex(morphism.codomain())
matrix_list = morphism_dictmat(morphism)
return KenzoChainComplexMorphism(__kmorphismchaincomplex_aux1__(matrix_list, source._kenzo, target._kenzo)) |
def test_nhypergeom_rvs_shape():
x = nhypergeom.rvs(22, [7, 8, 9], [[12], [13]], size=(5, 1, 2, 3))
assert (x.shape == (5, 1, 2, 3)) |
def bilerp(vf, p):
(u, v) = p
(s, t) = ((u - 0.5), (v - 0.5))
(iu, iv) = (ti.floor(s), ti.floor(t))
(fu, fv) = ((s - iu), (t - iv))
a = sample(vf, iu, iv)
b = sample(vf, (iu + 1), iv)
c = sample(vf, iu, (iv + 1))
d = sample(vf, (iu + 1), (iv + 1))
return lerp(lerp(a, b, fu), lerp(c, d, fu), fv) |
class TestLabeledRegionsDataset():
def test_init(self):
pass
def test_get_item(self):
pass |
def get_params():
params = []
for i in xrange(1, 801):
p = np.load((('./perceptual_models/hourglass/hourglass_weights_' + str(i)) + '.npy'))
if (len(p.shape) == 4):
p = p.swapaxes(0, 1).swapaxes(0, 2).swapaxes(1, 3)
params.append(p)
return params |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
return |
class Inferer():
def __init__(self, config):
self.config = config
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
torch.set_num_threads(1)
self.model_preproc = registry.instantiate(registry.lookup('model', config['model']).Preproc, config['model'])
self.model_preproc.load()
def load_model(self, logdir, step):
model = registry.construct('model', self.config['model'], preproc=self.model_preproc, device=self.device)
model.to(self.device)
model.eval()
model.visualize_flag = False
optimizer = registry.construct('optimizer', self.config['optimizer'], params=model.parameters())
saver = saver_mod.Saver(model, optimizer)
last_step = saver.restore(logdir, step=step, map_location=self.device)
if (not last_step):
raise Exception('Attempting to infer on untrained model')
return model
def infer(self, model, output_path, args):
output = open(output_path, 'w')
orig_data = registry.construct('dataset', self.config['data'][args.section])
sliced_orig_data = maybe_slice(orig_data, args.start_offset, args.limit)
preproc_data = self.model_preproc.dataset(args.section)
sliced_preproc_data = maybe_slice(preproc_data, args.start_offset, args.limit)
with torch.no_grad():
if (args.mode == 'infer'):
assert (len(orig_data) == len(preproc_data))
self._inner_infer(model, args.beam_size, args.output_history, sliced_orig_data, sliced_preproc_data, output, args.nproc)
elif (args.mode == 'debug'):
self._debug(model, sliced_orig_data, output)
elif (args.mode == 'visualize_attention'):
model.visualize_flag = True
model.decoder.visualize_flag = True
self._visualize_attention(model, args.beam_size, args.output_history, sliced_orig_data, args.res1, args.res2, args.res3, output)
def _inner_infer(self, model, beam_size, output_history, sliced_orig_data, sliced_preproc_data, output, nproc):
list_items = [(idx, oi, pi) for (idx, (oi, pi)) in enumerate(zip(sliced_orig_data, sliced_preproc_data))]
cp = parallelizer.CPUParallelizer(nproc)
params = [(beam_size, output_history, indices, orig_items, preproc_items) for (indices, orig_items, preproc_items) in list_items]
write_all(output, cp.parallel_map([(functools.partial(self._infer_single, model), params)]))
def _infer_single(self, model, param):
(beam_size, output_history, index, orig_item, preproc_item) = param
try:
beams = beam_search.beam_search(model, orig_item, preproc_item, beam_size=beam_size, max_steps=1000)
decoded = []
for beam in beams:
(model_output, inferred_code) = beam.inference_state.finalize()
decoded.append({'model_output': model_output, 'inferred_code': inferred_code, 'score': beam.score, **({'choice_history': beam.choice_history, 'score_history': beam.score_history} if output_history else {})})
result = {'index': index, 'beams': decoded}
except Exception as e:
result = {'index': index, 'error': str(e)}
return (json.dumps(result) + '\n')
def _debug(self, model, sliced_data, output):
for (i, item) in enumerate(tqdm.tqdm(sliced_data)):
((_, history),) = model.compute_loss([item], debug=True)
output.write((json.dumps({'index': i, 'history': history}) + '\n'))
output.flush()
def _visualize_attention(self, model, beam_size, output_history, sliced_data, res1file, res2file, res3file, output):
res1 = json.load(open(res1file, 'r'))
res1 = res1['per_item']
res2 = json.load(open(res2file, 'r'))
res2 = res2['per_item']
res3 = json.load(open(res3file, 'r'))
res3 = res3['per_item']
interest_cnt = 0
cnt = 0
for (i, item) in enumerate(tqdm.tqdm(sliced_data)):
if (res1[i]['hardness'] != 'extra'):
continue
cnt += 1
if ((res1[i]['exact'] == 0) and (res2[i]['exact'] == 0) and (res3[i]['exact'] == 0)):
continue
interest_cnt += 1
print(((interest_cnt * 1.0) / cnt)) |
def print_top3_scores(filename):
top3 = get_top3_topics(filename)
for (k, v) in top3:
print('{}\t{}\t{}'.format(topic_map[k], k, v)) |
def get_lr_schedulers(enc_optim, dec_optim, enc_lr_gamma, dec_lr_gamma, enc_scheduler_type, dec_scheduler_type, epochs_per_stage):
milestones = np.cumsum(epochs_per_stage)
max_epochs = milestones[(- 1)]
schedulers = [dt.misc.create_scheduler(scheduler_type=enc_scheduler_type, optim=enc_optim, gamma=enc_lr_gamma, milestones=milestones, max_epochs=max_epochs), dt.misc.create_scheduler(scheduler_type=dec_scheduler_type, optim=dec_optim, gamma=dec_lr_gamma, milestones=milestones, max_epochs=max_epochs)]
return schedulers |
def label2onehot(labels, dim):
batch_size = labels.size(0)
out = torch.zeros(batch_size, dim)
out[(np.arange(batch_size), labels.long())] = 1
return out |
def same_shapes(*xs):
shapes = []
for x in xs:
if isinstance(x, Matrix):
shapes.append(x.get_shape())
elif isinstance(x, list):
shapes.append(tuple(get_list_shape(x)))
elif isinstance(x, Expr):
shapes.append(tuple(x.ptr.get_rvalue_type().shape()))
else:
return (False, f'same_shapes() received an unexpected argument of type: {x}')
if (len(set(shapes)) != 1):
return (False, f'required shapes to be the same, got shapes {shapes}')
return (True, None) |
class csv_dataset(data.Dataset):
def __init__(self, path, tokenizer=None, preprocess_fn=None, delim=',', binarize_sent=False, drop_unlabeled=False, text_key='sentence', label_key='label', **kwargs):
self.is_lazy = False
self.preprocess_fn = preprocess_fn
self.SetTokenizer(tokenizer)
self.path = path
self.delim = delim
self.text_key = text_key
self.label_key = label_key
self.drop_unlabeled = drop_unlabeled
if ('.tsv' in self.path):
self.delim = '\t'
self.X = []
self.Y = []
try:
cols = [text_key]
if isinstance(label_key, list):
cols += label_key
else:
cols += [label_key]
data = pd.read_csv(self.path, sep=self.delim, usecols=cols, encoding='latin-1')
except:
data = pd.read_csv(self.path, sep=self.delim, usecols=[text_key], encoding='latin-1')
data = data.dropna(axis=0)
self.X = data[text_key].values.tolist()
try:
self.Y = data[label_key].values
except Exception as e:
self.Y = (np.ones(len(self.X)) * (- 1))
if binarize_sent:
self.Y = binarize_labels(self.Y, hard=binarize_sent)
def SetTokenizer(self, tokenizer):
if (tokenizer is None):
self.using_tokenizer = False
if (not hasattr(self, '_tokenizer')):
self._tokenizer = tokenizer
else:
self.using_tokenizer = True
self._tokenizer = tokenizer
def GetTokenizer(self):
return self._tokenizer
def tokenizer(self):
if self.using_tokenizer:
return self._tokenizer
return None
def __len__(self):
return len(self.X)
def __getitem__(self, index):
x = self.X[index]
if (self.tokenizer is not None):
x = self.tokenizer.EncodeAsIds(x, self.preprocess_fn)
elif (self.preprocess_fn is not None):
x = self.preprocess_fn(x)
y = self.Y[index]
if isinstance(y, str):
if (self.tokenizer is not None):
y = self.tokenizer.EncodeAsIds(y, self.preprocess_fn)
elif (self.preprocess_fn is not None):
y = self.preprocess_fn(y)
return {'text': x, 'length': len(x), 'label': y}
def write(self, writer_gen=None, path=None, skip_header=False):
if (path is None):
path = (self.path + '.results')
print(('generating csv at ' + path))
with open(path, 'w') as csvfile:
c = csv.writer(csvfile, delimiter=self.delim)
if (writer_gen is not None):
if (not skip_header):
header = (((self.label_key,) + tuple(next(writer_gen))) + (self.text_key,))
c.writerow(header)
for (i, row) in enumerate(writer_gen):
row = (((self.Y[i],) + tuple(row)) + (self.X[i],))
c.writerow(row)
else:
c.writerow([self.label_key, self.text_key])
for row in zip(self.Y, self.X):
c.writerow(row) |
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[40000, 4], help='input point cloud size')
parser.add_argument('--modality', type=str, default='point', choices=['point', 'image', 'multi'], help='input data modality')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
args = parser.parse_args()
return args |
class Logger(object):
def __init__(self, filename='Default.log'):
self.terminal = sys.stdout
self.log = open(filename, 'w')
def delink(self):
self.log.close()
def writeTerminalOnly(self, message):
self.terminal.write(message)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass |
def get_dataset(args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate=False, local_rank=(- 1)):
file_path = (args.eval_data_file if evaluate else args.train_data_file)
if args.line_by_line:
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size) |
def nets_to_graph_def(nets, shapes=None, **kwargs):
shapes = {}
nets = [copy.deepcopy(net.Proto()) for net in nets]
shapes = copy.deepcopy(shapes)
return protos_to_graph_def(nets, shapes, **kwargs) |
def CntSelfEdges(tspec, *args):
if (type(tspec) == PUNGraph):
return CntSelfEdges_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return CntSelfEdges_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return CntSelfEdges_PDirNet(tspec, *args)
if (type(tspec) == PNGraph):
return CntSelfEdges_PNGraph(tspec, *args)
if (type(tspec) == PNEANet):
return CntSelfEdges_PNEANet(tspec, *args)
if (type(tspec) == PNGraphMP):
return CntSelfEdges_PNGraphMP(tspec, *args)
if (type(tspec) == PNEANetMP):
return CntSelfEdges_PNEANetMP(tspec, *args)
raise TypeError('First argument has invalid type') |
class arcsine_gen(rv_continuous):
def _shape_info(self):
return []
def _pdf(self, x):
with np.errstate(divide='ignore'):
return ((1.0 / np.pi) / np.sqrt((x * (1 - x))))
def _cdf(self, x):
return ((2.0 / np.pi) * np.arcsin(np.sqrt(x)))
def _ppf(self, q):
return (np.sin(((np.pi / 2.0) * q)) ** 2.0)
def _stats(self):
mu = 0.5
mu2 = (1.0 / 8)
g1 = 0
g2 = ((- 3.0) / 2.0)
return (mu, mu2, g1, g2)
def _entropy(self):
return (- 0.) |
class Partition6(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[18]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[19]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[20]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:6'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1]
self.lookup = {'l_0': 'encoder.18', 'l_1': 'encoder.19', 'l_2': 'encoder.20'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_1(t_0, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
t_0 = self.l_2(t_0, attention_mask=attention_mask, position_bias=x0, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None)
return list(flatten((x0, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def conv1x1(in_plane, out_plane, stride=1):
return nn.Conv2d(in_plane, out_plane, kernel_size=1, stride=stride, padding=0, bias=False) |
def should_stop_early(args, valid_loss):
if (valid_loss is None):
return False
if (args.patience <= 0):
return False
def is_better(a, b):
return ((a > b) if args.maximize_best_checkpoint_metric else (a < b))
prev_best = getattr(should_stop_early, 'best', None)
if ((prev_best is None) or is_better(valid_loss, prev_best)):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if (should_stop_early.num_runs >= args.patience):
logger.info("early stop since valid performance hasn't improved for last {} runs".format(args.patience))
return True
else:
return False |
def download(url, folder='.', overwrite=False, verbose=True):
import urllib.request
import os
import sys
def rename_path(downloadpath):
splitfullpath = downloadpath.split(os.path.sep)
fname = splitfullpath[(- 1)]
fnamesplit = fname.split('.')
newname = fnamesplit[0]
newnamesplit = newname.split('(')
if (len(newnamesplit) == 1):
num = 1
else:
num = int(newnamesplit[(- 1)][:(- 1)])
num += 1
newname = '{}({}).{}'.format(newnamesplit[0], num, fnamesplit[(- 1)])
return os.path.sep.join(((splitfullpath[:(- 1)] + newnamesplit[:(- 1)]) + [newname]))
if (sys.version_info < (3,)):
urlretrieve = urllib.urlretrieve
else:
urlretrieve = urllib.request.urlretrieve
folder = os.path.abspath(os.path.expanduser(folder))
if (not os.path.exists(folder)):
os.makedirs(folder)
if isinstance(url, str):
filenames = [url.split('/')[(- 1)]]
elif isinstance(url, list):
filenames = [u.split('/')[(- 1)] for u in url]
downloadpath = [os.path.sep.join([folder, f]) for f in filenames]
for (i, download) in enumerate(downloadpath):
if os.path.exists(download):
if (overwrite is True):
if (verbose is True):
print('overwriting {}'.format(download))
elif (overwrite is False):
while (os.path.exists is True):
download = rename_path(download)
if (verbose is True):
print('file already exists, new file is called {}'.format(download))
downloadpath[i] = download
urllist = (url if isinstance(url, list) else [url])
for (u, f) in zip(urllist, downloadpath):
print('Downloading {}'.format(u))
urlretrieve(u, f)
print((' saved to: ' + f))
print('Download completed!')
return (downloadpath if isinstance(url, list) else downloadpath[0]) |
def unique(l):
lu = []
for l1 in l:
if (l1 not in lu):
lu.append(l1)
return lu |
def ref_all_gather(x_data, n_devices):
results = []
for i in range(n_devices):
results.append((x_data * i))
return results |
def evaluate(args, model, tokenizer, mode, prefix=''):
eval_task = args.task_name
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, mode)
if ((not os.path.exists(eval_output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(eval_output_dir)
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = torch.nn.DataParallel(model)
logger.info('***** Running evaluation {} *****'.format(prefix))
logger.info(' Num examples = %d', len(eval_dataset))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc='Evaluating'):
model.eval()
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
logits = logits.sigmoid()
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = (eval_loss / nb_eval_steps)
df = pd.read_csv(os.path.join(args.data_dir, '{}.tsv'.format(mode)), sep='\t')
threshold = 0.5
y_pred = (preds > threshold).astype(int)
df['pred_labels'] = list(map((lambda x: ','.join(['{}_{}'.format(i, v) for (i, v) in enumerate(x)])), y_pred))
result = eval_hoc(df, mode)
roc_auc = eval_roc_auc(out_label_ids, preds, args.num_labels)
result['micro_roc_auc'] = roc_auc['micro']
result['loss'] = eval_loss
output_eval_file = os.path.join(args.output_dir, (args.result_prefix + '{}_results.txt'.format((mode if (mode != 'dev') else 'eval'))))
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(prefix))
for key in result.keys():
logger.info(' %s = %s', key, str(result[key]))
writer.write(('%s = %s\n' % (key, str(result[key]))))
if args.output_all_logits:
output_all_logit_file = os.path.join(args.output_dir, (args.result_prefix + '{}_all_logits.txt'.format(mode)))
with open(output_all_logit_file, 'w') as writer:
logger.info('***** Output all logits {} *****'.format(prefix))
for sample in preds:
writer.write(('\t'.join(['{:.3f}'.format(v) for v in sample]) + '\n'))
output_roc_auc_file = os.path.join(args.output_dir, (args.result_prefix + '{}_roc_auc_for_each_class.txt'.format(mode)))
with open(output_roc_auc_file, 'w') as writer:
logger.info('***** output ROC curve and ROC area for each class {} *****'.format(prefix))
for key in roc_auc.keys():
logger.info(' %s = %s', key, str(roc_auc[key]))
writer.write(('%s = %s\n' % (key, str(roc_auc[key]))))
return (result, preds, df[['index', 'labels', 'pred_labels']]) |
def ResNet101_rpn_conv4_frozen_features(model):
return build_generic_detection_model(model, ResNet.add_ResNet101_conv4_body, freeze_conv_body=True) |
def script_qconfig(qconfig):
return QConfig(activation=torch.jit.script(qconfig.activation())._c, weight=torch.jit.script(qconfig.weight())._c) |
class PrefixSet(object):
def __init__(self):
self._set = set()
def train(self, word_s):
for word in word_s:
for index in range(len(word)):
self._set.add(word[:(index + 1)])
def __contains__(self, key):
return (key in self._set) |
class NCESoftmaxLoss(nn.Module):
def __init__(self, nce_t):
super(NCESoftmaxLoss, self).__init__()
self.loss = nn.CrossEntropyLoss(reduction='none')
self.nce_t = nce_t
def forward(self, x_ret, y_ret):
(x, _) = x_ret
(y, _) = y_ret
bsz = x.shape[0]
scores = (((x / torch.norm(x, dim=1, keepdim=True)) (y / torch.norm(y, dim=1, keepdim=True)).t()) / self.nce_t)
label = torch.arange(bsz, device=x.device)
loss = (self.loss(scores, label) + self.loss(scores.t(), label))
return loss |
class SubbandNet(nn.Module):
def __init__(self, _, cfg):
super().__init__()
self.cfg = cfg
self.dim = cfg.dim
self.out_dim = cfg.out_dim
self.hid_dim = cfg.hid_dim
self.bw_span_diag = getattr(cfg, 'bw_span_diag', False)
self.max_bw = float(eval(str(cfg.max_bw)))
if self.bw_span_diag:
self.max_bw *= np.sqrt(2.0)
self.bws = cfg.bws
self.bws = [((float(eval(str(l))) * self.max_bw), (float(eval(str(u))) * self.max_bw)) for (l, u) in self.bws]
self.quantize = getattr(cfg, 'quantize_freq', False)
self.n_subbands = int(getattr(cfg, 'n_subbands', 4))
self.sb_agl_range = (float(eval(str(getattr(cfg, 'sb_agl_range', 0.25)))) * np.pi)
self.sb_agl_delta = (float(eval(str(getattr(cfg, 'sb_agl_delta', 0.25)))) * np.pi)
self.sbs = nn.ModuleList()
for i in range(self.n_subbands):
la = ((i * self.sb_agl_delta) - (0.5 * self.sb_agl_range))
ua = (la + self.sb_agl_range)
self.sbs.append(SubbandSeries(self.bws, self.hid_dim, self.out_dim, la, ua, self.n_subbands, quantize=self.quantize, mix_init_type=getattr(cfg, 'mix_init_type', 'none'), out_init_type=getattr(cfg, 'out_init_type', 'none'), acc_method=getattr(cfg, 'acc_method', 'sum'), fan_uniform=getattr(cfg, 'fan_uniform', False)))
self.out_levels = getattr(cfg, 'out_levels', None)
self.inp_mult_const = float(getattr(cfg, 'inp_mult_const', 0.5))
def get_modules(self):
return self.sbs
def prune_model(self, proportion, n):
from torch.nn.utils import prune
num_params_to_prune = 0
modules = self.get_modules()
for module in modules:
num_params_to_prune += (sum([p.numel() for p in module.parameters()]) * 1e-06)
prune.l1_unstructured(module, 'weight', proportion)
prune.remove(module, 'weight')
return (proportion * num_params_to_prune)
def forward(self, x, fst_n=None, out_levels=None, retain_inter_grad=False):
out_levels = (self.out_levels if (out_levels is None) else out_levels)
xdim = len(x.shape)
if (xdim == 2):
x = x.unsqueeze(0)
x = (x * self.inp_mult_const)
all_sb_res = {}
for sbs_i in self.sbs:
sbs_out = sbs_i(x, fst_n=fst_n, retain_inter_grad=retain_inter_grad)
for (k, lst) in sbs_out.items():
if (k not in all_sb_res):
all_sb_res[k] = []
all_sb_res[k].append(lst)
all_res = {}
for (key, sb_lst) in all_sb_res.items():
assert (len(sb_lst) == self.n_subbands)
all_res[key] = []
for res_i in range(len(sb_lst[0])):
lst = []
for sb_i in range(self.n_subbands):
lst.append(sb_lst[sb_i][res_i])
all_res[key].append(lst)
for key in ['out', 'acc']:
key_all = ('all_%s' % key)
all_res[key_all] = []
for lst in all_res[('sb_%s' % key)]:
slst = sum(lst)
if retain_inter_grad:
slst.retain_grad()
all_res[key_all].append(slst)
if (out_levels is not None):
all_res[key] = [all_res[key_all][i] for i in out_levels]
else:
all_res[key] = all_res[key_all]
all_res['out_lst'] = all_res['acc']
all_res['all_out_lst'] = all_res['all_acc']
return all_res |
def test_kernel_and_bias_defaults():
gs = GraphSAGE(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)
for layer in gs._aggs:
assert isinstance(layer.kernel_initializer, tf.initializers.GlorotUniform)
assert isinstance(layer.bias_initializer, tf.initializers.Zeros)
assert (layer.kernel_regularizer is None)
assert (layer.bias_regularizer is None)
assert (layer.kernel_constraint is None)
assert (layer.bias_constraint is None) |
def _load_csv(F):
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec |
def main():
parser = argparse.ArgumentParser(description='OGBL-DDI (GNN)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--use_sage', action='store_true')
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--batch_size', type=int, default=(64 * 1024))
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--eval_steps', type=int, default=5)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = (f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu')
device = torch.device(device)
dataset = PygLinkPropPredDataset(name='ogbl-ddi', transform=T.ToSparseTensor())
data = dataset[0]
adj_t = data.adj_t.to(device)
split_edge = dataset.get_edge_split()
torch.manual_seed(12345)
idx = torch.randperm(split_edge['train']['edge'].size(0))
idx = idx[:split_edge['valid']['edge'].size(0)]
split_edge['eval_train'] = {'edge': split_edge['train']['edge'][idx]}
if args.use_sage:
model = SAGE(args.hidden_channels, args.hidden_channels, args.hidden_channels, args.num_layers, args.dropout).to(device)
else:
model = GCN(args.hidden_channels, args.hidden_channels, args.hidden_channels, args.num_layers, args.dropout).to(device)
emb = torch.nn.Embedding(data.adj_t.size(0), args.hidden_channels).to(device)
predictor = LinkPredictor(args.hidden_channels, args.hidden_channels, 1, args.num_layers, args.dropout).to(device)
evaluator = Evaluator(name='ogbl-ddi')
loggers = {'': Logger(args.runs, args), '': Logger(args.runs, args), '': Logger(args.runs, args)}
for run in range(args.runs):
torch.nn.init.xavier_uniform_(emb.weight)
model.reset_parameters()
predictor.reset_parameters()
optimizer = torch.optim.Adam(((list(model.parameters()) + list(emb.parameters())) + list(predictor.parameters())), lr=args.lr)
for epoch in range(1, (1 + args.epochs)):
loss = train(model, predictor, emb.weight, adj_t, split_edge, optimizer, args.batch_size)
if ((epoch % args.eval_steps) == 0):
results = test(model, predictor, emb.weight, adj_t, split_edge, evaluator, args.batch_size)
for (key, result) in results.items():
loggers[key].add_result(run, result)
if ((epoch % args.log_steps) == 0):
for (key, result) in results.items():
(train_hits, valid_hits, test_hits) = result
print(key)
print(f'Run: {(run + 1):02d}, Epoch: {epoch:02d}, Loss: {loss:.4f}, Train: {(100 * train_hits):.2f}%, Valid: {(100 * valid_hits):.2f}%, Test: {(100 * test_hits):.2f}%')
print('---')
for key in loggers.keys():
print(key)
loggers[key].print_statistics(run)
for key in loggers.keys():
print(key)
loggers[key].print_statistics() |
def align_to_char_level(span_starts, span_ends, token_to_char, subtoken_map=None, new_token_map=None):
char_map = {}
reverse_char_map = {}
for (idx, (start, end)) in enumerate(zip(span_starts, span_ends)):
(new_start, new_end) = (start.copy(), end.copy())
try:
if (subtoken_map is not None):
(new_start, new_end) = (subtoken_map[new_start], subtoken_map[new_end])
if ((new_start is None) or (new_end is None)):
char_map[(start, end)] = (None, None)
continue
if (new_token_map is not None):
(new_start, new_end) = (new_token_map[new_start], new_token_map[new_end])
(new_start, new_end) = (token_to_char[new_start][0], token_to_char[new_end][1])
char_map[(start, end)] = (idx, (new_start, new_end))
reverse_char_map[(new_start, new_end)] = (idx, (start, end))
except IndexError:
char_map[(start, end)] = (None, None)
continue
return (char_map, reverse_char_map) |
def get(identifier):
if (identifier is None):
return linear
return get_from_module(identifier, globals(), 'activation function') |
def build_activation(act_func, inplace=True):
if (act_func == 'relu'):
return nn.ReLU(inplace=inplace)
elif (act_func == 'relu6'):
return nn.ReLU6(inplace=inplace)
elif (act_func == 'tanh'):
return nn.Tanh()
elif (act_func == 'sigmoid'):
return nn.Sigmoid()
elif (act_func is None):
return None
else:
raise ValueError(('do not support: %s' % act_func)) |
def create_projection_head(args, device, use_checkpoint=True):
projection_head = vits.__dict__['DINOHead'](in_dim=args.feat_dim, out_dim=args.mlp_out_dim, nlayers=args.num_mlp_layers)
projection_head.to(device)
if ((args.load_from_head is not None) and (use_checkpoint == True)):
print(f'NOTE: load head from {args.load_from_head}')
projection_head.load_state_dict(torch.load(args.load_from_head, map_location='cpu'), strict=True)
return projection_head |
def latex_dual(elt):
M = (elt.parent().cartan_type().rank() + 2)
from sage.combinat.tableau import Tableau
from sage.combinat.output import tex_from_array
if (not elt):
return '{\\emptyset}'
tab = [['\\overline{{{}}}'.format((M - elt[0].value))]]
for i in range(1, len(elt)):
if ((elt[(i - 1)] < elt[i]) or ((elt[(i - 1)].value != 0) and (elt[(i - 1)] == elt[i]))):
tab.append(['\\overline{{{}}}'.format((M - elt[i].value))])
else:
l = (len(tab) - 1)
tab[l].append('\\overline{{{}}}'.format((M - elt[i].value)))
for x in tab:
x.reverse()
T = Tableau(tab).conjugate()
return tex_from_array([list(row) for row in T]) |
class Blog(BaseDataset):
__doc__ = f'''
This data originates from blog posts. The raw HTML-documents of the blog posts were
crawled and processed. The prediction task associated with the data is the
prediction of the number of comments in the upcoming 24 hours. In order to simulate
this situation, we choose a basetime (in the past) and select the blog posts that
were published at most 72 hours before the selected base date/time. Then, we
calculate all the features of the selected blog posts from the information that was
available at the basetime, therefore each instance corresponds to a blog post. The
target is the number of comments that the blog post received in the next 24 hours
relative to the basetime.
In the train data, the basetimes were in the years 2010 and 2011. In the test data
the basetimes were in February and March 2012. This simulates the real-world
situtation in which training data from the past is available to predict events in
the future.
The train data was generated from different basetimes that may temporally overlap.
Therefore, if you simply split the train into disjoint partitions, the underlying
time intervals may overlap. Therefore, the you should use the provided, temporally
disjoint train and test splits in order to ensure that the evaluation is fair.
{BASE_DATASET_DESCRIPTION}
Features:
Features 0-49 (float):
50 features containing the average, standard deviation, minimum, maximum
and median of feature 50-59 for the source of the current blog post, by
which we mean the blog on which the post appeared. For example,
myblog.blog.org would be the source of the post
myblog.blog.org/post_2010_09_10
Feature 50 (int):
Total number of comments before basetime
Feature 51 (int):
Number of comments in the last 24 hours before the basetime
Feature 52 (int):
If T1 is the datetime 48 hours before basetime and T2 is the datetime 24
hours before basetime, then this is the number of comments in the time
period between T1 and T2
Feature 53 (int):
Number of comments in the first 24 hours after the publication of the blog
post, but before basetime
Feature 54 (int):
The difference between Feature 51 and Feature 52
Features 55-59 (int):
The same thing as Features 50-51, but for links (trackbacks) instead of
comments
Feature 60 (float):
The length of time between the publication of the blog post and basetime
Feature 61 (int):
The length of the blog post
Features 62-261 (int):
The 200 bag of words features for 200 frequent words of the text of the
blog post
Features 262-268 (int):
Binary indicators for the weekday (Monday-Sunday) of the basetime
Features 269-275 (int):
Binary indicators for the weekday (Monday-Sunday) of the date of
publication of the blog post
Feature 276 (int):
Number of parent pages: we consider a blog post P as a parent of blog post
B if B is a reply (trackback) to P
Features 277-279 (float):
Minimum, maximum and average of the number of comments the parents received
Targets:
int:
The number of comments in the next 24 hours (relative to baseline)
Source:
Examples:
Load in the data set::
>>> dataset = Blog()
>>> dataset.shape
(52397, 281)
Split the data set into features and targets, as NumPy arrays::
>>> X, y = dataset.split()
>>> X.shape, y.shape
((52397, 279), (52397,))
Perform a train/test split, also outputting NumPy arrays::
>>> train_test_split = dataset.split(test_size=0.2, random_seed=42)
>>> X_train, X_test, y_train, y_test = train_test_split
>>> X_train.shape, y_train.shape, X_test.shape, y_test.shape
((41949, 279), (41949,), (10448, 279), (10448,))
Output the underlying Pandas DataFrame::
>>> df = dataset.to_pandas()
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
'''
_url = '
_features = range(279)
_targets = [279]
def _prep_data(self, data: bytes) -> pd.DataFrame:
buffer = io.BytesIO(data)
with zipfile.ZipFile(buffer, 'r') as zip_file:
csv = zip_file.read('blogData_train.csv').decode('utf-8')
csv_file = io.StringIO(csv)
df = pd.read_csv(csv_file, header=None)
return df |
def augment_dataset(d, programs):
programs = np.random.permutation(programs).tolist()
for (program_name, apt_name) in tqdm(programs):
augmented_progs_i = []
augmented_progs_i_new_inst = []
augmented_preconds_i = []
state_list_i = []
if (program_name in d.keys()):
continue
if multi_process:
d[program_name] = str(current_process())
if (((len(d.keys()) % 20) == 0) and verbose):
print(len(d.keys()))
state_file = program_name.replace('withoutconds', 'initstate').replace('.txt', '.json')
with open(program_name, 'r') as f:
lines_program = f.readlines()
program = lines_program[4:]
with open(state_file, 'r') as fst:
init_state = json.load(fst)
relations_per_object = {}
for cstate in init_state:
precond = [k for k in cstate.keys()][0]
if (precond in precondtorelation.keys()):
relation = precondtorelation[precond]
object1 = cstate[precond][0][0]
container = tuple(cstate[precond][1])
if (container not in relations_per_object.keys()):
relations_per_object[container] = []
relations_per_object[container] += [(object1, relation)]
object_replace_map = {}
for container in relations_per_object.keys():
replace_candidates = []
for object_and_relation in relations_per_object[container]:
if (object_and_relation in merge_dict.keys()):
replace_candidates.append(merge_dict[object_and_relation])
intersection = []
object_replace_map[container] = []
if ((len(replace_candidates) > 0) and (len([l for l in replace_candidates if (len(l) == 0)]) == 0)):
intersection = list(set.intersection(*[set(l) for l in replace_candidates]))
candidates = [x for x in intersection if (x != container[0])]
if (len(candidates) > 0):
cont = random.randint(1, min(len(candidates), 5))
if (cont > 1):
object_replace = random.sample(candidates, (cont - 1))
object_replace_map[container] += object_replace
objects_prog = object_replace_map.keys()
npgs = 0
cont = []
for obj_and_id in objects_prog:
cont.append(len(object_replace_map[obj_and_id]))
ori_precond = init_state
recursive_selection = augmentation_utils.recursiveSelection(cont, 0, [])
for rec_id in recursive_selection:
new_lines = program
precond_modif = copy.deepcopy(ori_precond)
precond_modif = str(precond_modif).replace("'", '"')
for (iti, obj_and_id) in enumerate(objects_prog):
(orign_object, idi) = obj_and_id
object_new = object_replace_map[obj_and_id][rec_id[iti]]
new_lines = [x.replace('<{}> ({})'.format(orign_object, idi), '<{}> ({})'.format(object_new.lower().replace(' ', '_'), idi)) for x in new_lines]
precond_modif = precond_modif.replace('["{}", "{}"]'.format(orign_object, idi), '["{}", "{}"]'.format(object_new.lower().replace(' ', '_'), idi))
try:
init_state = ast.literal_eval(precond_modif)
(message, final_state, graph_state_list, input_graph, id_mapping, info, graph_helper, modified_script) = check_programs.check_script(new_lines, init_state, '../example_graphs/{}.json'.format(apt_name), None, False, {}, {})
except:
pdb.set_trace()
lines_program_newinst = []
for script_line in modified_script:
script_line_str = '[{}]'.format(script_line.action.name)
if script_line.object():
script_line_str += ' <{}> ({})'.format(script_line.object().name, script_line.object().instance)
if script_line.subject():
script_line_str += ' <{}> ({})'.format(script_line.subject().name, script_line.subject().instance)
for (k, v) in id_mapping.items():
(obj_name, obj_number) = k
id = v
script_line_str = script_line_str.replace('<{}> ({})'.format(obj_name, id), '<{}> ({}.{})'.format(obj_name, obj_number, id))
lines_program_newinst.append(script_line_str)
augmented_progs_i_new_inst.append(lines_program_newinst)
state_list_i.append(graph_state_list)
augmented_progs_i.append(new_lines)
augmented_preconds_i.append(init_state)
npgs += 1
if (npgs > thres):
break
if write_augment_data:
augmentation_utils.write_data(augmented_data_dir, program_name, augmented_progs_i)
augmentation_utils.write_data(augmented_data_dir, program_name, augmented_progs_i_new_inst, 'executable_programs/{}/'.format(apt_name))
augmentation_utils.write_precond(augmented_data_dir, program_name, augmented_preconds_i)
augmentation_utils.write_graph(augmented_data_dir, program_name, state_list_i, apt_name) |
def remove_files_patterns(root_dir, patterns, ignores=None, verbose=False):
from itertools import chain
if (ignores is None):
ignores = []
for _f in chain(*[glob.glob(os.path.join(root_dir, pattern)) for pattern in patterns]):
can_remove = True
for ignore in ignores:
if fnmatch.fnmatch(_f, os.path.join(root_dir, ignore)):
can_remove = False
break
if can_remove:
output(('removing "%s"' % _f), verbose=verbose)
os.remove(_f) |
def get_pseudo_label_DS_for_one_segment(args, sample_gt_path):
step_scores = np.load(sample_gt_path)
video_sid = sample_gt_path.split('/')[(- 2)]
segment_sid = sample_gt_path.split('/')[(- 1)].split('.')[0]
(matched_steps, matched_steps_scores) = find_matching_of_a_segment(step_scores, criteria=args.label_find_matched_steps_criteria, threshold=args.label_find_matched_steps_for_segments_thresh, topK=args.label_find_matched_steps_for_segments_topK)
pseudo_label_DS = {'indices': matched_steps, 'values': matched_steps_scores}
return pseudo_label_DS |
def test_scalar_norm_optimization(rng, config_ocp, y, geometry, F, bcs, u, p):
config_ocp.set('OptimizationRoutine', 'algorithm', 'bfgs')
config_ocp.set('OptimizationRoutine', 'rtol', '1e-3')
u.vector().vec().set(0.001)
u.vector().apply('')
norm_y = ((y * y) * geometry.dx)
tracking_goal = rng.uniform(0.25, 0.75)
J = cashocs.ScalarTrackingFunctional(norm_y, tracking_goal)
config_ocp.set('LineSearch', 'initial_stepsize', '4e3')
ocp = cashocs.OptimalControlProblem(F, bcs, J, y, u, p, config=config_ocp)
ocp.solve(algorithm='bfgs', rtol=0.001)
assert ((0.5 * pow((assemble(norm_y) - tracking_goal), 2)) < 1e-15)
assert (cashocs.verification.control_gradient_test(ocp, rng=rng) > 1.9)
assert (cashocs.verification.control_gradient_test(ocp, rng=rng) > 1.9)
assert (cashocs.verification.control_gradient_test(ocp, rng=rng) > 1.9) |
def extract_class(file_name):
match = re.search('(\\d+)_(.+)\\.jpg', file_name)
if match:
return match.group(2)
else:
match = re.search('(\\d+)_(.+)\\.png', file_name)
if match:
return match.group(2)
return None |
_data_model
class GraphData():
def __init__(self, j: Dict[(str, Any)]) -> None:
dispatches = j['dispatches']
self.dispatches = [Dispatch(x) for x in dispatches] |
class DensityPlot(GraphicPrimitive):
def __init__(self, xy_data_array, xrange, yrange, options):
self.xrange = xrange
self.yrange = yrange
self.xy_data_array = xy_data_array
self.xy_array_row = len(xy_data_array)
self.xy_array_col = len(xy_data_array[0])
GraphicPrimitive.__init__(self, options)
def get_minmax_data(self):
from sage.plot.plot import minmax_data
return minmax_data(self.xrange, self.yrange, dict=True)
def _allowed_options(self):
return {'plot_points': 'How many points to use for plotting precision', 'cmap': 'the name of a predefined colormap,\n a list of colors or an instance of a\n matplotlib Colormap. Type: import matplotlib.cm; matplotlib.cm.datad.keys()\n for available colormap names.', 'interpolation': 'What interpolation method to use'}
def _repr_(self):
return ('DensityPlot defined by a %s x %s data grid' % (self.xy_array_row, self.xy_array_col))
def _render_on_subplot(self, subplot):
options = self.options()
cmap = get_cmap(options['cmap'])
(x0, x1) = (float(self.xrange[0]), float(self.xrange[1]))
(y0, y1) = (float(self.yrange[0]), float(self.yrange[1]))
subplot.imshow(self.xy_data_array, origin='lower', cmap=cmap, extent=(x0, x1, y0, y1), interpolation=options['interpolation']) |
def drop_out(input, keep_prob, is_train):
if is_train:
out = tf.nn.dropout(input, keep_prob)
else:
keep_prob = 1
out = tf.nn.dropout(input, keep_prob)
return out |
def pil_loader(path: str) -> Image.Image:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.