code stringlengths 101 5.91M |
|---|
def _normalize_clip_observation(x, clip_range=[(- 5.0), 5.0]):
rms = RunningMeanStd(shape=x.shape[1:])
norm_x = tf.clip_by_value(((x - rms.mean) / rms.std), min(clip_range), max(clip_range))
return (norm_x, rms) |
def latency(args, model_path, forecaster, train_loader, test_loader, records):
try:
forecaster.load(model_path)
except:
forecaster.fit(train_loader, epochs=1)
(latency, latency_onnx, latency_vino, latency_jit) = ([], [], [], [])
latency_trim_portion = 0.1
latency_percentile = [50, 90, 95, 99]
if args.quantize:
import onnxruntime
sess_options = onnxruntime.SessionOptions()
if args.cores:
sess_options.intra_op_num_threads = args.cores
sess_options.inter_op_num_threads = args.cores
forecaster.quantize(test_loader, framework=args.quantize_type, sess_options=sess_options, thread_num=(args.cores if args.cores else None))
print('QUANTIZATION DONE')
if ('torch' in args.inference_framework):
import torch
if (args.model == 'autoformer'):
for (x, y, x_, y_) in test_loader:
st = time.time()
yhat = forecaster.predict((x.numpy(), y.numpy(), x_.numpy(), y_.numpy()))
latency.append((time.time() - st))
else:
for (x, y) in test_loader:
st = time.time()
yhat = forecaster.predict(x.numpy(), quantize=args.quantize)
latency.append((time.time() - st))
records['torch_latency'] = stats.trim_mean(latency, latency_trim_portion)
records['torch_percentile_latency'] = np.percentile(latency, latency_percentile)
if ('onnx' in args.inference_framework):
if (args.cores and (not args.quantize)):
forecaster.build_onnx(thread_num=args.cores)
for (x, y) in test_loader:
st = time.time()
yhat = forecaster.predict_with_onnx(x.numpy(), quantize=args.quantize)
latency_onnx.append((time.time() - st))
records['onnx_latency'] = stats.trim_mean(latency_onnx, latency_trim_portion)
records['onnx_percentile_latency'] = np.percentile(latency_onnx, latency_percentile)
if ('openvino' in args.inference_framework):
if (args.cores and (not args.quantize)):
forecaster.build_openvino(thread_num=args.cores)
for (x, y) in test_loader:
st = time.time()
yhat = forecaster.predict_with_openvino(x.numpy(), quantize=args.quantize)
latency_vino.append((time.time() - st))
records['openvino_latency'] = stats.trim_mean(latency_vino, latency_trim_portion)
records['openvino_percentile_latency'] = np.percentile(latency_vino, latency_percentile)
if ('jit' in args.inference_framework):
if args.cores:
forecaster.build_jit(thread_num=args.cores)
for (x, y) in test_loader:
st = time.time()
yhat = forecaster.predict_with_jit(x.numpy(), quantize=args.quantize)
latency_jit.append((time.time() - st))
records['jit_latency'] = stats.trim_mean(latency_jit, latency_trim_portion)
records['jit_percentile_latency'] = np.percentile(latency_jit, latency_percentile) |
def text_to_sequence(text, cleaner_names):
sequence = []
clean_text = _clean_text(text, cleaner_names)
for symbol in clean_text:
symbol_id = _symbol_to_id[symbol]
sequence += [symbol_id]
return sequence |
def save_dataset(transform, train_, test_, filename):
torch.save({'transform': transform, 'train': train_, 'test': test_}, filename) |
def getBounds(lvls_arr: list, n_lvl: float):
lower = lvls_arr[0]
upper = lvls_arr[1]
for (i, v) in enumerate(lvls_arr[:(- 1)]):
if (n_lvl <= v):
break
lower = v
upper = lvls_arr[(i + 1)]
return (lower, upper) |
class CplxToConcatenatedReal(BaseCplxToReal):
def __init__(self, dim=(- 1)):
super().__init__()
self.dim = dim
def forward(self, input):
return cplx.to_concatenated_real(input, None, self.dim) |
def get_idx_dicts(data):
(ent_set, rel_set) = (set(), set())
for (lhs, rel, rhs) in data:
ent_set.add(lhs)
rel_set.add(rel)
ent_set.add(rhs)
ent_list = sorted(list(ent_set))
rel_list = sorted(list(rel_set))
(ent_to_idx, rel_to_idx) = ({}, {})
for (i, ent) in enumerate(ent_list):
ent_to_idx[ent] = i
for (j, rel) in enumerate(rel_list):
rel_to_idx[rel] = j
return (ent_to_idx, rel_to_idx) |
class ZScoreNormalize(intnormb.LocationScaleCLIMixin, intnormb.SingleImageNormalizeCLI):
def __init__(self, *, norm_value: float=1.0, **kwargs: typing.Any):
super().__init__(norm_value=norm_value, **kwargs)
self.voi: (intnormt.ImageLike | None) = None
def calculate_location(self, image: intnormt.ImageLike, /, mask: (intnormt.ImageLike | None)=None, *, modality: intnormt.Modality=intnormt.Modality.T1) -> float:
if (self.voi is None):
raise intnorme.NormalizationError("'voi' needs to be set.")
loc: float = float(self.voi.mean())
return loc
def calculate_scale(self, image: intnormt.ImageLike, /, mask: (intnormt.ImageLike | None)=None, *, modality: intnormt.Modality=intnormt.Modality.T1) -> float:
if (self.voi is None):
raise intnorme.NormalizationError("'voi' needs to be set.")
scale: float = float(self.voi.std())
return scale
def setup(self, image: intnormt.ImageLike, /, mask: (intnormt.ImageLike | None)=None, *, modality: intnormt.Modality=intnormt.Modality.T1) -> None:
self.voi = self._get_voi(image, mask, modality=modality)
def teardown(self) -> None:
del self.voi
self.voi = None
def name() -> str:
return 'zscore'
def fullname() -> str:
return 'Z-Score'
def description() -> str:
return 'Standardize an MR image by the foreground intensities.'
def plot_histogram_from_args(self, args: argparse.Namespace, /, normalized: intnormt.ImageLike, mask: (intnormt.ImageLike | None)=None) -> None:
if (mask is None):
mask = self.estimate_foreground(normalized)
super().plot_histogram_from_args(args, normalized, mask) |
.register('MobileNetV2')
def build_mbv2_backbone(cfg):
in_channels = cfg.MODEL.BACKBONE.IN_PLANES
base_channels = cfg.MODEL.BACKBONE.BASE_PLANES
out_channels = cfg.MODEL.HEAD.FEATURE_DIMS
round_nearest = cfg.MODEL.COMPRESSION.ROUND_NEAREST
width_multiplier = cfg.MODEL.COMPRESSION.WIDTH_MULTIPLIER
conv_layer = get_conv(cfg)
norm_layer = get_norm(cfg)
act_layer = get_act(cfg)
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
base_channels = make_divisible((base_channels * width_multiplier), round_nearest)
for i in range(len(inverted_residual_setting)):
channel = inverted_residual_setting[i][1]
inverted_residual_setting[i][1] = make_divisible((channel * width_multiplier), round_nearest)
return MobileNetV2Backbone(in_channels=in_channels, out_channels=out_channels, base_channels=base_channels, inverted_residual_setting=inverted_residual_setting, conv_layer=conv_layer, norm_layer=norm_layer, act_layer=act_layer) |
class TestIterators(unittest.TestCase):
def test_counting_iterator(self, ref=None, itr=None):
if (ref is None):
assert (itr is None)
ref = list(range(10))
itr = iterators.CountingIterator(ref)
else:
assert (len(ref) == 10)
assert (itr is not None)
self.assertTrue(itr.has_next())
self.assertEqual(itr.n, 0)
self.assertEqual(next(itr), ref[0])
self.assertEqual(itr.n, 1)
self.assertEqual(next(itr), ref[1])
self.assertEqual(itr.n, 2)
itr.skip(3)
self.assertEqual(itr.n, 5)
self.assertEqual(next(itr), ref[5])
itr.skip(3)
self.assertEqual(itr.n, 9)
self.assertEqual(next(itr), ref[9])
self.assertFalse(itr.has_next())
def test_grouped_iterator(self):
x = list(range(10))
itr = iterators.GroupedIterator(x, 1)
self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])
itr = iterators.GroupedIterator(x, 4)
self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]])
itr = iterators.GroupedIterator(x, 5)
self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
x = list(range(30))
ref = list(iterators.GroupedIterator(x, 3))
itr = iterators.GroupedIterator(x, 3)
self.test_counting_iterator(ref, itr)
def test_sharded_iterator(self):
x = list(range(10))
itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0)
self.assertEqual(list(itr), x)
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0)
self.assertEqual(list(itr), [0, 2, 4, 6, 8])
itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1)
self.assertEqual(list(itr), [1, 3, 5, 7, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.assertEqual(list(itr), [0, 3, 6, 9])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1)
self.assertEqual(list(itr), [1, 4, 7, None])
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2)
self.assertEqual(list(itr), [2, 5, 8, None])
x = list(range(30))
ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0))
itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0)
self.test_counting_iterator(ref, itr)
def test_counting_iterator_take(self):
ref = list(range(10))
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
itr = iterators.CountingIterator(ref)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
def test_counting_iterator_buffered_iterator_take(self):
ref = list(range(10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(itr), len(list(iter(itr))))
self.assertEqual(len(itr), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(len(buffered_itr), 5)
self.assertEqual(len(list(iter(buffered_itr))), 5)
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr)
itr.take(5)
self.assertEqual(next(itr), ref[0])
self.assertEqual(next(itr), ref[1])
itr.skip(2)
self.assertEqual(next(itr), ref[4])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr)
ref = list(range(4, 10))
buffered_itr = iterators.BufferedIterator(2, ref)
itr = iterators.CountingIterator(buffered_itr, start=4)
itr.take(5)
self.assertEqual(len(itr), 5)
self.assertEqual(len(buffered_itr), 1)
self.assertEqual(next(itr), ref[0])
self.assertFalse(itr.has_next())
self.assertRaises(StopIteration, next, buffered_itr) |
class SquareBoxCoder(box_coder.BoxCoder):
def __init__(self, scale_factors=None):
if scale_factors:
if (len(scale_factors) != 3):
raise ValueError('The argument scale_factors must be a list of length 3.')
if any(((scalar <= 0) for scalar in scale_factors)):
raise ValueError('The values in scale_factors must all be greater than 0.')
self._scale_factors = scale_factors
def code_size(self):
return 3
def _encode(self, boxes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt((ha * wa))
(ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes()
l = tf.sqrt((h * w))
la += EPSILON
l += EPSILON
tx = ((xcenter - xcenter_a) / la)
ty = ((ycenter - ycenter_a) / la)
tl = tf.log((l / la))
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
tl *= self._scale_factors[2]
return tf.transpose(tf.stack([ty, tx, tl]))
def _decode(self, rel_codes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt((ha * wa))
(ty, tx, tl) = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
tl /= self._scale_factors[2]
l = (tf.exp(tl) * la)
ycenter = ((ty * la) + ycenter_a)
xcenter = ((tx * la) + xcenter_a)
ymin = (ycenter - (l / 2.0))
xmin = (xcenter - (l / 2.0))
ymax = (ycenter + (l / 2.0))
xmax = (xcenter + (l / 2.0))
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) |
class DeepPrunerProcessor(nn.Module):
def __init__(self, cfg):
super(DeepPrunerProcessor, self).__init__()
self.cfg = cfg.copy()
self.batch_norm = cfg.model.batch_norm
self.patch_match_disparity_sample_number = cfg.model.cost_processor.patch_match_disparity_sample_number
self.uniform_disparity_sample_number = cfg.model.cost_processor.uniform_disparity_sample_number
self.confidence_range_predictor_args = cfg.model.cost_processor.confidence_range_predictor
self.confidence_range_predictor_args.update(disparity_sample_number=self.patch_match_disparity_sample_number, batch_norm=self.batch_norm)
self.confidence_range_predictor = ConfidenceRangePredictor(**self.confidence_range_predictor_args)
self.cost_aggregator = build_cost_aggregator(cfg)
self.disparity_conv = nn.Sequential(nn.Conv2d(1, 1, kernel_size=5, stride=1, padding=2, bias=True), nn.ReLU(inplace=True))
self.disparity_feature_conv = conv_bn_relu(self.batch_norm, in_planes=self.uniform_disparity_sample_number, out_planes=self.uniform_disparity_sample_number, kernel_size=5, stride=1, padding=2, dilation=1, bias=True)
def forward(self, stage, left, right, disparity_sample, min_disparity_feature=None, max_disparity_feature=None):
raw_cost = fast_cat_fms(left, right, disp_sample=disparity_sample)
raw_cost = torch.cat((raw_cost, disparity_sample.unsqueeze(1)), dim=1)
if (stage == 'pre'):
output = self.confidence_range_predictor(raw_cost, disparity_sample)
else:
min_disparity_feature = min_disparity_feature.unsqueeze(2).expand((- 1), (- 1), self.uniform_disparity_sample_number, (- 1), (- 1))
max_disparity_feature = max_disparity_feature.unsqueeze(2).expand((- 1), (- 1), self.uniform_disparity_sample_number, (- 1), (- 1))
raw_cost = torch.cat((raw_cost, min_disparity_feature, max_disparity_feature), dim=1)
cost = self.cost_aggregator(raw_cost)[0]
prob_volume = F.softmax(cost, dim=1)
disparity = torch.sum((prob_volume * disparity_sample), dim=1, keepdim=True)
disparity = F.interpolate((disparity * 2), scale_factor=(2, 2), mode='bilinear', align_corners=False)
disparity_feature = F.interpolate(cost, scale_factor=(2, 2), mode='bilinear', align_corners=False)
disparity = self.disparity_conv(disparity)
disparity_feature = self.disparity_feature_conv(disparity_feature)
output = [disparity, disparity_feature]
return output |
def merge_cl_lines(content_lines, space_spliters):
def overlap_len(min1, len1, min2, len2):
min_ = min1
max_ = (min1 + len1)
if (min1 > min2):
min_ = min2
if ((min1 + len1) < (min2 + len2)):
max_ = (min2 + len2)
return max(0, ((len1 + len2) - (max_ - min_)))
def needs_merge(cl1, cl2, page_id, th=0.2, ave_char_width=5):
overlap_l = overlap_len(cl1[1][1], (cl1[1][3] - cl1[1][1]), cl2[1][1], (cl2[1][3] - cl2[1][1]))
if ((rect_distance(cl1[1], cl2[1]) < ave_char_width) and (min((cl1[1][2] - cl1[1][0]), (cl2[1][2] - cl2[1][0])) < (2 * ave_char_width)) and ((overlap_l / max(min((cl1[1][3] - cl1[1][1]), (cl2[1][3] - cl2[1][1])), 1)) > th)):
return True
width = page2img_size[page_id][0]
if (((abs((cl1[1][0] - space_spliters[page_id][0])) + abs((cl1[1][2] - space_spliters[page_id][1]))) < (4 * ave_char_width)) or ((abs((cl1[1][0] - space_spliters[page_id][2])) + abs((cl1[1][2] - space_spliters[page_id][3]))) < (4 * ave_char_width))):
return False
if (((abs((cl2[1][0] - space_spliters[page_id][0])) + abs((cl2[1][2] - space_spliters[page_id][1]))) < (4 * ave_char_width)) or ((abs((cl2[1][0] - space_spliters[page_id][2])) + abs((cl2[1][2] - space_spliters[page_id][3]))) < (4 * ave_char_width))):
return False
if (((cl1[1][2] - cl1[1][0]) > (width / 6)) and ((cl2[1][2] - cl2[1][0]) > (width / 6))):
return False
if (rect_distance(cl1[1][:4], cl2[1][:4]) > (5 * ave_char_width)):
return False
overlap_l = overlap_len(cl1[1][1], (cl1[1][3] - cl1[1][1]), cl2[1][1], (cl2[1][3] - cl2[1][1]))
if ((overlap_l / max(min((cl1[1][3] - cl1[1][1]), (cl2[1][3] - cl2[1][1])), 1)) > th):
return True
return False
def need_merge_ids_in_groups(cur_id, id_groups):
ids = set()
for x in id_groups[cur_id]:
for i in range(len(id_groups)):
if (x in id_groups[i]):
ids.add(i)
return ids
def make_unique(id_groups):
cur_id = 0
while (cur_id < len(id_groups)):
need_merge_ids = need_merge_ids_in_groups(cur_id, id_groups)
if (len(need_merge_ids) == 1):
cur_id += 1
else:
need_merge_ids = list(need_merge_ids)
need_merge_ids.sort()
assert (need_merge_ids[0] == cur_id)
for i in range((len(need_merge_ids) - 1), 0, (- 1)):
id_groups[cur_id].extend(id_groups[need_merge_ids[i]])
id_groups.pop(need_merge_ids[i])
id_groups[cur_id] = list(set(id_groups[cur_id]))
id_groups[cur_id].sort()
for g in id_groups:
g.sort()
def which_group(x, groups):
for (gid, g) in enumerate(groups):
if (x in g):
return gid
return None
def cl_join(cl_s, page_id):
if (len(cl_s) == 0):
return cl_s
if (len(cl_s) == 1):
return cl_s[0]
cl_x_id = [[cl_s[cl_id][1][0], cl_id] for cl_id in range(len(cl_s))]
cl_x_id.sort(key=(lambda x: x[0]))
sorted_cl_ids = [x[1] for x in cl_x_id]
strings = [cl_s[cl_i][0] for cl_i in sorted_cl_ids]
max_box = max_bbox([cl[1] for cl in cl_s])
chars = []
for cl_i in sorted_cl_ids:
chars.extend(cl_s[cl_i][2])
return [' '.join(strings), max_box, chars]
content_lines_merged = []
for _ in range(len(content_lines)):
content_lines_merged.append([])
for page_id in range(content_lines.__len__()):
need_merge_line_id_groups = []
RANGE = 5
for cl_id in range(content_lines[page_id].__len__()):
cur_group = []
for cur_cl_id in range(max(0, (cl_id - RANGE)), min(len(content_lines[page_id]), (cl_id + RANGE))):
if needs_merge(content_lines[page_id][cl_id], content_lines[page_id][cur_cl_id], page_id):
cur_group.append(cur_cl_id)
if (len(cur_group) > 1):
need_merge_line_id_groups.append(cur_group)
if (len(cur_group) > 1):
need_merge_line_id_groups.append(cur_group)
make_unique(need_merge_line_id_groups)
all_need_to_merge_ids = []
for group in need_merge_line_id_groups:
all_need_to_merge_ids.extend(group)
new_cl_this_page = []
merged_to_new = ([False] * len(need_merge_line_id_groups))
for cl_id in range(len(content_lines[page_id])):
if (cl_id not in all_need_to_merge_ids):
new_cl_this_page.append(content_lines[page_id][cl_id])
else:
group_id = which_group(cl_id, need_merge_line_id_groups)
if (not merged_to_new[group_id]):
cl_lists = [content_lines[page_id][i] for i in need_merge_line_id_groups[group_id]]
new_cl_this_page.append(cl_join(cl_lists, page_id))
merged_to_new[group_id] = True
content_lines_merged[page_id] = new_cl_this_page
return content_lines_merged |
def cal_phi(x, y, z, nx, ny, nz):
(poi_normal_x, poi_normal_y) = (((y * nz) - (ny * z)), ((z * nx) - (x * nz)))
phi_rad = np.arctan2(poi_normal_y, poi_normal_x)
return phi_rad |
def _make_copying_data_provider_base(data_sources_source, data_sources_schema, reader=tf.TextLineReader, num_samples=None, source_delimiter=' ', **kwargs):
decoder_source = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='source_tokens', length_feature_name='source_len', append_token='SEQUENCE_END', delimiter=source_delimiter)
dataset_source = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_source, reader=reader, decoder=decoder_source, num_samples=num_samples, items_to_descriptions={})
dataset_schemas = None
if (data_sources_schema is not None):
decoder_schemas = split_tokens_decoder.SplitTokensDecoder(tokens_feature_name='schema_loc', delimiter=' ')
dataset_schemas = tf.contrib.slim.dataset.Dataset(data_sources=data_sources_schema, reader=reader, decoder=decoder_schemas, num_samples=num_samples, items_to_descriptions={})
return (dataset_source, dataset_schemas) |
class DmolNet(nn.Module):
H: hps.Hyperparams
def setup(self):
self.out_conv = Conv1x1((self.H.num_mixtures * 10), precision=self.H.conv_precision)
def loglik(self, px_z, x):
return logistic_mix_logpmf(self.out_conv(px_z), x)
def sample(self, px_z, rng):
img = logistic_mix_sample(self.out_conv(px_z), rng)
return jnp.round(((jnp.clip(img, (- 1), 1) + 1) * 127.5)).astype(jnp.uint8) |
class Classifier(nn.Module):
def __init__(self, in_nc=2048, out_nc=2, layers=(2048,), norm_layer=nn.BatchNorm1d, act_layer=nn.ReLU(True), use_dropout=False):
super(Classifier, self).__init__()
self.idx_tensor = None
channels = (([in_nc] + list(layers)) + [out_nc])
self.model = []
for i in range(1, (len(channels) - 1)):
self.model += make_linear_block(channels[(i - 1)], channels[i], norm_layer=norm_layer, act_layer=act_layer, use_dropout=use_dropout)
self.model += make_linear_block(channels[(- 2)], channels[(- 1)], norm_layer=None, act_layer=None, use_dropout=use_dropout)
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x) |
class CondenseNet(nn.Module):
def __init__(self, channels, init_block_channels, groups, in_channels=3, in_size=(224, 224), num_classes=1000):
super(CondenseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', CondenseInitBlock(in_channels=in_channels, out_channels=init_block_channels))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
if (i != 0):
stage.add_module('trans{}'.format((i + 1)), TransitionBlock())
for (j, out_channels) in enumerate(channels_per_stage):
stage.add_module('unit{}'.format((j + 1)), CondenseUnit(in_channels=in_channels, out_channels=out_channels, groups=groups))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('post_activ', PostActivation(in_channels=in_channels))
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = CondenseLinear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
init.constant_(module.weight, 1)
init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
def create_qg_prompt(caption):
INTRO_BLURB = 'Given an image description, generate one or two multiple-choice questions that verifies if the image description is correct.\nClassify each concept into a type (object, human, animal, food, activity, attribute, counting, color, material, spatial, location, shape, other), and then generate a question for each type.\n'
formated_prompt = f'''<s>[INST] <<SYS>>
{INTRO_BLURB}
<</SYS>>
'''
formated_prompt += f'Description: {caption} [/INST] Entities:'
return formated_prompt |
def get_node_mapper(lcc: np.ndarray) -> dict:
mapper = {}
counter = 0
for node in lcc:
mapper[node] = counter
counter += 1
return mapper |
def get_number_footer_lines(docbody, page_break_posns):
num_breaks = len(page_break_posns)
num_footer_lines = 0
empty_line = 0
keep_checking = 1
p_wordSearch = re.compile('([A-Za-z0-9-]+)', re.UNICODE)
if (num_breaks > 2):
while keep_checking:
cur_break = 1
if ((((page_break_posns[cur_break] - num_footer_lines) - 1) < 0) or (((page_break_posns[cur_break] - num_footer_lines) - 1) > (len(docbody) - 1))):
break
if docbody[((page_break_posns[cur_break] - num_footer_lines) - 1)].isspace():
empty_line = 1
grps_headLineWords = p_wordSearch.findall(docbody[((page_break_posns[cur_break] - num_footer_lines) - 1)])
cur_break = (cur_break + 1)
while ((cur_break < num_breaks) and keep_checking):
grps_thisLineWords = p_wordSearch.findall(docbody[((page_break_posns[cur_break] - num_footer_lines) - 1)])
if empty_line:
if (len(grps_thisLineWords) != 0):
keep_checking = 0
elif ((len(grps_thisLineWords) == 0) or (len(grps_headLineWords) != len(grps_thisLineWords))):
keep_checking = 0
else:
keep_checking = check_boundary_lines_similar(grps_headLineWords, grps_thisLineWords)
cur_break = (cur_break + 1)
if keep_checking:
num_footer_lines = (num_footer_lines + 1)
empty_line = 0
return num_footer_lines |
def sklearn_DecisionTreeClassifier(*args, **kwargs):
return sklearn.tree.DecisionTreeClassifier(*args, **kwargs) |
def load_nifi_volume(filepath: str, normalize: bool=False) -> np.ndarray:
proxy_img = nib.load(filepath)
proxy_img.uncache()
img = np.array(proxy_img.dataobj)
if normalize:
img = zero_mean_unit_variance_normalization(img)
return img |
.parametrize('std', [EnglishNumberNormalizer(), EnglishTextNormalizer()])
def test_number_normalizer(std):
assert (std('two') == '2')
assert (std('thirty one') == '31')
assert (std('five twenty four') == '524')
assert (std('nineteen ninety nine') == '1999')
assert (std('twenty nineteen') == '2019')
assert (std('two point five million') == '2500000')
assert (std('four point two billions') == 's')
assert (std('200 thousand') == '200000')
assert (std('200 thousand dollars') == '$200000')
assert (std('$20 million') == '$')
assert (std('52.4 million') == '')
assert (std('77 thousands') == '77000s')
assert (std('two double o eight') == '2008')
assert (std('three thousand twenty nine') == '3029')
assert (std('forty three thousand two hundred sixty') == '43260')
assert (std('forty three thousand two hundred and sixty') == '43260')
assert (std('nineteen fifties') == '1950s')
assert (std('thirty first') == '31st')
assert (std('thirty three thousand and three hundred and thirty third') == '33333rd')
assert (std('three billion') == '')
assert (std('millions') == '1000000s')
assert (std('july third twenty twenty') == 'july 3rd 2020')
assert (std('august twenty sixth twenty twenty one') == 'august 26th 2021')
assert (std('3 14') == '3 14')
assert (std('3.14') == '3.14')
assert (std('3 point 2') == '3.2')
assert (std('3 point 14') == '3.14')
assert (std('fourteen point 4') == '14.4')
assert (std('two point two five dollars') == '$2.25')
assert (std('two hundred million dollars') == '$')
assert (std('$20.1 million') == '$')
assert (std('ninety percent') == '90%')
assert (std('seventy six per cent') == '76%')
assert (std('double oh seven') == '007')
assert (std('double zero seven') == '007')
assert (std('nine one one') == '911')
assert (std('nine double one') == '911')
assert (std('one triple oh one') == '10001')
assert (std('two thousandth') == '2000th')
assert (std('thirty two thousandth') == '32000th')
assert (std('minus 500') == '-500')
assert (std('positive twenty thousand') == '+20000')
assert (std('two dollars and seventy cents') == '$2.70')
assert (std('3 cents') == '3')
assert (std('$0.36') == '36')
assert (std('three euros and sixty five cents') == '3.65')
assert (std('three and a half million') == '3500000')
assert (std('forty eight and a half dollars') == '$48.5')
assert (std('b747') == 'b 747')
assert (std('10 th') == '10th')
assert (std('10th') == '10th') |
class MeterpreterSession(MsfSession):
def read(self):
return self.rpc.call(MsfRpcMethod.SessionMeterpreterRead, [self.sid])['data']
def write(self, data):
if (not data.endswith('\n')):
data += '\n'
self.rpc.call(MsfRpcMethod.SessionMeterpreterWrite, [self.sid, data])
def runsingle(self, data):
self.rpc.call(MsfRpcMethod.SessionMeterpreterRunSingle, [self.sid, data])
return self.read()
def runscript(self, path):
self.rpc.call(MsfRpcMethod.SessionMeterpreterScript, [self.sid, path])
return self.read()
def info(self):
return self.__dict__[self.sid]
def sep(self):
return self.rpc.call(MsfRpcMethod.SessionMeterpreterDirectorySeparator, [self.sid])['separator']
def detach(self):
return self.rpc.call(MsfRpcMethod.SessionMeterpreterSessionDetach, [self.sid])
def kill(self):
self.rpc.call(MsfRpcMethod.SessionMeterpreterSessionKill, [self.sid])
def tabs(self, line):
return self.rpc.call(MsfRpcMethod.SessionMeterpreterTabs, [self.sid, line])['tabs']
def load_plugin(self, plugin):
end_strs = ['Success', 'has already been loaded']
out = self.run_with_output(f'load {plugin}', end_strs)
self.__dict__[self.sid]['plugins'].append(plugin)
return out
def run_with_output(self, cmd, end_strs=None, timeout=301, timeout_exception=True, api_call='write'):
if (api_call == 'write'):
self.write(cmd)
out = ''
else:
out = self.runsingle(cmd)
time.sleep(5)
out += self.gather_output(cmd, out, end_strs, timeout, timeout_exception)
return out
def gather_output(self, cmd, out, end_strs, timeout, timeout_exception):
counter = 1
while (counter < timeout):
out += self.read()
if (end_strs == None):
if (len(out) > 0):
return out
elif any(((end_str in out) for end_str in end_strs)):
return out
time.sleep(5)
counter += 1
if timeout_exception:
msg = f'Command <{repr(cmd)[1:(- 1)]}> timed out in <{timeout}s> on session <{self.sid}>'
if (end_strs == None):
msg += f' without finding any termination strings within <{end_strs}> in the output: <{out}>'
raise MsfError(msg)
else:
return out
def run_shell_cmd_with_output(self, cmd, end_strs, exit_shell=True, timeout=301, timeout_exception=True):
self.start_shell()
out = self.run_with_output(cmd, end_strs, timeout=timeout, timeout_exception=timeout_exception)
if (exit_shell == True):
self.read()
res = self.detach()
if ('result' in res):
if (res['result'] != 'success'):
raise MsfError(('Shell failed to exit on meterpreter session ' + self.sid))
return out
def start_shell(self):
cmd = 'shell'
end_strs = ['>']
self.run_with_output(cmd, end_strs)
return True
def import_psh(self, script_path):
if ('powershell' not in self.info['plugins']):
self.load_plugin('powershell')
end_strs = ['[-]', '[+]']
out = self.run_with_output(f'powershell_import {script_path}', end_strs)
if ('failed to load' in out):
raise MsfRpcError(f'File {script_path} failed to load.')
return out
def run_psh_cmd(self, ps_cmd, timeout=310, timeout_exception=True):
if ('powershell' not in self.info['plugins']):
self.load_plugin('powershell')
ps_cmd = f'powershell_execute "{ps_cmd}"'
out = self.run_with_output(ps_cmd, ['[-]', '[+]'], timeout=timeout, timeout_exception=timeout_exception)
return out
def get_writeable_dir(self):
if (self.info['write_dir'] == ''):
out = self.run_shell_cmd_with_output('echo %TEMP%', ['>'])
write_dir = (out.split('\n')[1][:(- 1)] + '\\')
self.__dict__[self.sid]['write_dir'] = write_dir
return write_dir
else:
return self.info['write_dir'] |
class AbstractWT(nn.Module):
def fit(self, X=None, train_loader=None, pretrained_model=None, lr: float=0.001, num_epochs: int=20, seed: int=42, attr_methods='Saliency', target=6, lamlSum: float=1.0, lamhSum: float=1.0, lamL2norm: float=1.0, lamCMF: float=1.0, lamConv: float=1.0, lamL1wave: float=1.0, lamL1attr: float=1.0):
torch.manual_seed(seed)
if ((X is None) and (train_loader is None)):
raise ValueError('Either X or train_loader must be passed!')
elif (train_loader is None):
if ('ndarray' in str(type(X))):
X = torch.Tensor(X).to(self.device)
X = X.float()
if (self.wt_type == 'DWT2d'):
X = X.unsqueeze(1)
X = [(X[i], np.nan) for i in range(X.shape[0])]
train_loader = torch.utils.data.DataLoader(X, shuffle=True, batch_size=len(X))
params = list(self.parameters())
optimizer = torch.optim.Adam(params, lr=lr)
loss_f = get_loss_f(lamlSum=lamlSum, lamhSum=lamhSum, lamL2norm=lamL2norm, lamCMF=lamCMF, lamConv=lamConv, lamL1wave=lamL1wave, lamL1attr=lamL1attr)
trainer = Trainer(pretrained_model, self, optimizer, loss_f, use_residuals=True, target=target, attr_methods=attr_methods, n_print=1, device=self.device)
self.train()
trainer(train_loader, epochs=num_epochs)
self.train_losses = trainer.train_losses
self.eval() |
class ExternalOptimizerInterface():
def __init__(self, loss, var_list=None, equalities=None, inequalities=None, var_to_bounds=None, **optimizer_kwargs):
self._loss = loss
self._equalities = (equalities or [])
self._inequalities = (inequalities or [])
if (var_list is None):
self._vars = tf.trainable_variables()
else:
self._vars = list(var_list)
packed_bounds = None
if (var_to_bounds is not None):
left_packed_bounds = []
right_packed_bounds = []
for var in self._vars:
shape = var.get_shape().as_list()
bounds = ((- np.infty), np.infty)
if (var in var_to_bounds):
bounds = var_to_bounds[var]
left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))
right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))
packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))
self._packed_bounds = packed_bounds
self._update_placeholders = [tf.placeholder(var.dtype) for var in self._vars]
self._var_updates = [var.assign(tf.reshape(placeholder, _get_shape_tuple(var))) for (var, placeholder) in zip(self._vars, self._update_placeholders)]
loss_grads = _compute_gradients(loss, self._vars)
equalities_grads = [_compute_gradients(equality, self._vars) for equality in self._equalities]
inequalities_grads = [_compute_gradients(inequality, self._vars) for inequality in self._inequalities]
self.optimizer_kwargs = optimizer_kwargs
self._packed_var = self._pack(self._vars)
self._packed_loss_grad = self._pack(loss_grads)
self._packed_equality_grads = [self._pack(equality_grads) for equality_grads in equalities_grads]
self._packed_inequality_grads = [self._pack(inequality_grads) for inequality_grads in inequalities_grads]
dims = [_prod(_get_shape_tuple(var)) for var in self._vars]
accumulated_dims = list(_accumulate(dims))
self._packing_slices = [slice(start, end) for (start, end) in zip(accumulated_dims[:(- 1)], accumulated_dims[1:])]
def minimize(self, session=None, feed_dict=None, fetches=None, step_callback=None, loss_callback=None, **run_kwargs):
session = (session or tf.get_default_session())
feed_dict = (feed_dict or {})
fetches = (fetches or [])
loss_callback = (loss_callback or (lambda *fetches: None))
step_callback = (step_callback or (lambda xk: None))
loss_grad_func = self._make_eval_func([self._loss, self._packed_loss_grad], session, feed_dict, fetches, loss_callback)
equality_funcs = self._make_eval_funcs(self._equalities, session, feed_dict, fetches)
equality_grad_funcs = self._make_eval_funcs(self._packed_equality_grads, session, feed_dict, fetches)
inequality_funcs = self._make_eval_funcs(self._inequalities, session, feed_dict, fetches)
inequality_grad_funcs = self._make_eval_funcs(self._packed_inequality_grads, session, feed_dict, fetches)
initial_packed_var_val = session.run(self._packed_var)
packed_var_val = self._minimize(initial_val=initial_packed_var_val, loss_grad_func=loss_grad_func, equality_funcs=equality_funcs, equality_grad_funcs=equality_grad_funcs, inequality_funcs=inequality_funcs, inequality_grad_funcs=inequality_grad_funcs, packed_bounds=self._packed_bounds, step_callback=step_callback, optimizer_kwargs=self.optimizer_kwargs)
var_vals = [packed_var_val[packing_slice] for packing_slice in self._packing_slices]
session.run(self._var_updates, feed_dict=dict(zip(self._update_placeholders, var_vals)), **run_kwargs)
def _minimize(self, initial_val, loss_grad_func, equality_funcs, equality_grad_funcs, inequality_funcs, inequality_grad_funcs, packed_bounds, step_callback, optimizer_kwargs):
raise NotImplementedError('To use ExternalOptimizerInterface, subclass from it and implement the _minimize() method.')
def _pack(cls, tensors):
if (not tensors):
return None
elif (len(tensors) == 1):
return tf.reshape(tensors[0], [(- 1)])
else:
flattened = [tf.reshape(tensor, [(- 1)]) for tensor in tensors]
return tf.concat(flattened, 0)
def _make_eval_func(self, tensors, session, feed_dict, fetches, callback=None):
if (not isinstance(tensors, list)):
tensors = [tensors]
num_tensors = len(tensors)
def eval_func(x):
augmented_feed_dict = {var: x[packing_slice].reshape(_get_shape_tuple(var)) for (var, packing_slice) in zip(self._vars, self._packing_slices)}
augmented_feed_dict.update(feed_dict)
augmented_fetches = (tensors + fetches)
augmented_fetch_vals = session.run(augmented_fetches, feed_dict=augmented_feed_dict)
if callable(callback):
callback(*augmented_fetch_vals[num_tensors:])
return augmented_fetch_vals[:num_tensors]
return eval_func
def _make_eval_funcs(self, tensors, session, feed_dict, fetches, callback=None):
return [self._make_eval_func(tensor, session, feed_dict, fetches, callback) for tensor in tensors] |
class PGDAttack(Attack):
def __init__(self, args, model, nb_iter, loss_fn=nn.CrossEntropyLoss(reduction='sum')):
super(PGDAttack, self).__init__(args, model, nb_iter, loss_fn)
self.args = args
self.model = model
if (args.attack_ball == 'Linf'):
self.adversary = LinfPGDAttack(self.model, loss_fn=loss_fn, eps=args.epsilon, nb_iter=nb_iter, eps_iter=0.01, rand_init=True, clip_min=args.clip_min, clip_max=args.clip_max, targeted=False)
elif (args.attack_ball == 'L2'):
self.adversary = L2PGDAttack(self.model, loss_fn=loss_fn, eps=args.epsilon, nb_iter=nb_iter, eps_iter=0.01, rand_init=True, clip_min=args.clip_min, clip_max=args.clip_max, targeted=False)
else:
raise NotImplementedError
def train(self, train_loader, test_loader, l_test_classifiers, l_train_classif=None):
pass
def perturb(self, x, target):
(advcorrect, clncorrect, test_clnloss, test_advloss) = (0, 0, 0, 0)
x = x.to(self.args.dev)
target = target.to(self.args.dev)
with torch.no_grad():
output = self.model(x)
test_clnloss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
clncorrect += pred.eq(target.view_as(pred)).sum().item()
advdata = self.adversary.perturb(x, target)
with torch.no_grad():
output = self.model(advdata)
test_advloss += F.cross_entropy(output, target, reduction='sum').item()
pred = output.max(1, keepdim=True)[1]
advcorrect += pred.eq(target.view_as(pred)).sum().item()
print('Clean loss: {:.4f},Adv acc: {}/{} ({:.2f}%)\n'.format(test_clnloss, clncorrect, len(x), ((100.0 * clncorrect) / len(x))))
print('Adv loss: {:.4f},Adv acc: {}/{} ({:.2f}%)\n'.format(test_advloss, advcorrect, len(x), ((100.0 * advcorrect) / len(x)))) |
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
type_b = type(value_b)
type_a = type(value_a)
if (type_a is type_b):
return value_a
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, six.string_types):
value_a = str(value_a)
elif (isinstance(value_a, tuple) and isinstance(value_b, list)):
value_a = list(value_a)
elif (isinstance(value_a, list) and isinstance(value_b, tuple)):
value_a = tuple(value_a)
else:
raise ValueError('Type mismatch ({} vs. {}) with values ({} vs. {}) for config key: {}'.format(type_b, type_a, value_b, value_a, full_key))
return value_a |
def cmd_output_fixer(cmd: str) -> str:
cmd = cmd.strip(' \n')
if (len(cmd) < 2):
return cmd
stupidity = re.compile('^[ \\n\\r]*```.*\\n(.*)\\n```$', re.MULTILINE)
result = stupidity.search(cmd)
if result:
print('this would have been captured by the multi-line regex 1')
cmd = result.group(1)
print(('new command: ' + cmd))
stupidity = re.compile('^[ \\n\\r]*~~~.*\\n(.*)\\n~~~$', re.MULTILINE)
result = stupidity.search(cmd)
if result:
print('this would have been captured by the multi-line regex 2')
cmd = result.group(1)
print(('new command: ' + cmd))
stupidity = re.compile('^[ \\n\\r]*~~~.*\\n(.*)\\n~~~$', re.MULTILINE)
cmd = remove_wrapping_characters(cmd, '`\'"')
if cmd.startswith('$ '):
cmd = cmd[2:]
return cmd |
def read(*paths: Any, **kwargs: Any) -> str:
with open(Path(__file__).parent.joinpath(*paths), encoding=kwargs.get('encoding', 'utf8')) as open_file:
content = open_file.read().strip()
return content |
def test_getitem():
np.random.seed(0)
torch.manual_seed(0)
root_path = './tests/data/lyft'
ann_file = './tests/data/lyft/lyft_infos.pkl'
class_names = ('car', 'truck', 'bus', 'emergency_vehicle', 'other_vehicle', 'motorcycle', 'bicycle', 'pedestrian', 'animal')
point_cloud_range = [(- 80), (- 80), (- 10), 80, 80, 10]
pipelines = [dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5, file_client_args=dict(backend='disk')), dict(type='LoadPointsFromMultiSweeps', sweeps_num=2, file_client_args=dict(backend='disk')), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='GlobalRotScaleTrans', rot_range=[(- 0.523599), 0.523599], scale_ratio_range=[0.85, 1.15], translation_std=[0, 0, 0]), dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='PointShuffle'), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])]
lyft_dataset = LyftDataset(ann_file, pipelines, root_path)
data = lyft_dataset[0]
points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels_3d = data['gt_labels_3d']._data
pts_filename = data['img_metas']._data['pts_filename']
pcd_horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']
pcd_scale_factor = data['img_metas']._data['pcd_scale_factor']
pcd_rotation = data['img_metas']._data['pcd_rotation']
sample_idx = data['img_metas']._data['sample_idx']
pcd_rotation_expected = np.array([[0., (- 0.), 0.0], [0., 0., 0.0], [0.0, 0.0, 1.0]])
assert (pts_filename == 'tests/data/lyft/lidar/host-a017_lidar1_.bin')
assert (pcd_horizontal_flip is True)
assert (abs((pcd_scale_factor - 1.)) < 1e-05)
assert np.allclose(pcd_rotation, pcd_rotation_expected, 0.001)
assert (sample_idx == 'b98a05255ba2632ecb31f0e6fcc8d3cd6ee76b6d0ba55b72f08fc54')
expected_points = torch.tensor([[61.4785, (- 3.7393), 6.7699, 0.4001], [47.7904, (- 3.9887), 6.0926, 0.0], [52.5683, (- 4.2178), 6.7179, 0.0], [52.4867, (- 4.0315), 6.7057, 0.0], [59.8372, (- 1.7366), 6.5864, 0.4001], [53.0842, (- 3.7064), 6.7811, 0.0], [60.5549, (- 3.4978), 6.6578, 0.4001], [59.1695, (- 1.291), 7.0296, 0.2], [53.0702, (- 3.8868), 6.7807, 0.0], [47.9579, (- 4.1648), 5.6219, 0.2], [59.8226, (- 1.5522), 6.5867, 0.4001], [61.2858, (- 4.2254), 7.3089, 0.2], [49.9896, (- 4.5202), 5.8823, 0.2], [61.4597, (- 4.6402), 7.334, 0.2], [59.8244, (- 1.3499), 6.5895, 0.4001]])
expected_gt_bboxes_3d = torch.tensor([[63.2257, 17.5206, (- 0.6307), 2.0109, 5.1652, 1.9471, (- 1.5868)], [(- 25.3804), 27.4598, (- 2.3297), 2.7412, 8.4792, 3.4343, (- 1.5939)], [(- 15.2098), (- 7.0109), (- 2.2566), 0.7931, 0.841, 1.7916, 1.509]])
expected_gt_labels = np.array([0, 4, 7])
original_classes = lyft_dataset.CLASSES
assert torch.allclose(points, expected_points, 0.01)
assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 0.001)
assert np.all((gt_labels_3d.numpy() == expected_gt_labels))
assert (original_classes == class_names)
lyft_dataset = LyftDataset(ann_file, None, root_path, classes=['car', 'pedestrian'])
assert (lyft_dataset.CLASSES != original_classes)
assert (lyft_dataset.CLASSES == ['car', 'pedestrian'])
lyft_dataset = LyftDataset(ann_file, None, root_path, classes=('car', 'pedestrian'))
assert (lyft_dataset.CLASSES != original_classes)
assert (lyft_dataset.CLASSES == ('car', 'pedestrian'))
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('car\npedestrian\n')
lyft_dataset = LyftDataset(ann_file, None, root_path, classes=tmp_file.name)
assert (lyft_dataset.CLASSES != original_classes)
assert (lyft_dataset.CLASSES == ['car', 'pedestrian']) |
def parepare_dataset(sess, num_repeats):
data_file_size = data_info[(sess + '_dataset_length')]
NUM_CLASSES = data_info['label_length']
x_set = f0.create_dataset(('x_' + sess), ((data_file_size * num_repeats), (SIZE_SUB * SIZE_TOP), (SIZE_SUB * SIZE_TOP), 3), dtype='f')
s_set = f0.create_dataset(('s_' + sess), ((data_file_size * num_repeats), (SIZE_SUB * SIZE_TOP), (SIZE_SUB * SIZE_TOP), (NUM_CLASSES + 1)), dtype='f')
y_set = f0.create_dataset(('y_' + sess), ((data_file_size * num_repeats), NUM_POINTS, 3), dtype='f')
p_set = f0.create_dataset(('p_' + sess), ((data_file_size * num_repeats), NUM_POINTS, 2), dtype='i')
l_set = f0.create_dataset(('l_' + sess), ((data_file_size * num_repeats), NUM_POINTS), dtype='i')
d_set = f0.create_dataset(('d_' + sess), ((data_file_size * num_repeats), NUM_POINTS, 3), dtype='i')
c_set = f0.create_dataset(('c_' + sess), ((data_file_size * num_repeats), 20), dtype='S10')
sample_list = np.arange((data_file_size * num_repeats))
if (sess == 'train'):
np.random.shuffle(sample_list)
idx_sample = 0
node_loss_rate_list = []
time_begin = time.time_ns()
for (category_idx, category) in zip(range(len(dir_list)), dir_list):
category_dir = os.path.join(dataset_dir, category)
file_path = os.path.join(category_dir, (sess + '_files.txt'))
with open(file_path, 'r') as fin:
files = fin.readlines()
files = [f[2:(- 1)] for f in files]
for file in files:
with h5py.File(os.path.join(category_dir, file), 'r') as f:
(file_data, file_label, file_seg) = (f['data'][:], category_idx, f['label_seg'][:])
file_seg += label_offset[category_idx]
for (data, label) in zip(file_data, file_seg):
for i_repeats in range(num_repeats):
(mat, seg, pos, node_loss_rate) = GPGL2_seg(data, label, NUM_CLASSES, NUM_CUTS, SIZE_TOP, SIZE_SUB)
x_set[sample_list[idx_sample]] = mat
y_set[(sample_list[idx_sample], category_idx)] = 1
s_set[sample_list[idx_sample]] = seg
p_set[sample_list[idx_sample]] = pos
l_set[sample_list[idx_sample]] = label
d_set[sample_list[idx_sample]] = data
c_set[sample_list[idx_sample]] = np.string_(dir_list[category_idx])
print(((((((dataset_name + ':') + sess) + ': idx=') + str(idx_sample)) + '/') + str(x_set.shape[0])), ('node_loss_rate=' + str(node_loss_rate)))
idx_sample += 1
if (idx_sample == (data_file_size * num_repeats)):
break
node_loss_rate_list.append(node_loss_rate)
time_end = time.time_ns()
node_loss_rate_final = np.array(node_loss_rate_list).mean()
x_set.attrs['NUM_REPEATS'] = num_repeats
x_set.attrs['node loss ratio'] = node_loss_rate_final
return (idx_sample, node_loss_rate_final, (time_end - time_begin)) |
class CPDataset(data.Dataset):
def __init__(self, opt):
super(CPDataset, self).__init__()
self.opt = opt
self.stage = opt.stage
self.fine_height = opt.fine_height
self.fine_width = opt.fine_width
self.radius = opt.radius
self.grid_image = opt.grid_image
self.data_path = opt.data_path
self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.im_names = [opt.imname]
self.c_names = [opt.cname]
def name(self):
return 'CPDataset'
def __getitem__(self, index):
c_name = self.c_names[index]
im_name = self.im_names[index]
if (self.stage == 'GMM'):
c = Image.open(osp.join(self.data_path, 'cloth', c_name))
cm = Image.open(osp.join(self.data_path, 'cloth-mask', c_name))
else:
c = Image.open(osp.join(self.data_path, 'warp-cloth', c_name))
cm = Image.open(osp.join(self.data_path, 'warp-mask', c_name))
c = self.transform(c)
cm_array = np.array(cm)
cm_array = (cm_array >= 128).astype(np.float32)
cm = torch.from_numpy(cm_array)
cm.unsqueeze_(0)
im = Image.open(osp.join(self.data_path, 'image', im_name))
im = self.transform(im)
parse_name = im_name.replace('.jpg', '.png')
im_parse = Image.open(osp.join(self.data_path, 'image-parse', parse_name))
parse_array = np.array(im_parse)
parse_shape = (parse_array > 0).astype(np.float32)
parse_head = ((((parse_array == 1).astype(np.float32) + (parse_array == 2).astype(np.float32)) + (parse_array == 4).astype(np.float32)) + (parse_array == 13).astype(np.float32))
parse_cloth = (((parse_array == 5).astype(np.float32) + (parse_array == 6).astype(np.float32)) + (parse_array == 7).astype(np.float32))
parse_shape = Image.fromarray((parse_shape * 255).astype(np.uint8))
parse_shape = parse_shape.resize(((self.fine_width // 16), (self.fine_height // 16)), Image.BILINEAR)
parse_shape = parse_shape.resize((self.fine_width, self.fine_height), Image.BILINEAR)
shape = self.transform(parse_shape)
phead = torch.from_numpy(parse_head)
pcm = torch.from_numpy(parse_cloth)
im_c = ((im * pcm) + (1 - pcm))
im_h = ((im * phead) - (1 - phead))
pose_name = im_name.replace('.jpg', '_keypoints.json')
with open(osp.join(self.data_path, 'pose', pose_name), 'r') as f:
pose_label = json.load(f)
pose_data = pose_label['people'][0]['pose_keypoints']
pose_data = np.array(pose_data)
pose_data = pose_data.reshape(((- 1), 3))
point_num = pose_data.shape[0]
pose_map = torch.zeros(point_num, self.fine_height, self.fine_width)
r = self.radius
im_pose = Image.new('L', (self.fine_width, self.fine_height))
pose_draw = ImageDraw.Draw(im_pose)
for i in range(point_num):
one_map = Image.new('L', (self.fine_width, self.fine_height))
draw = ImageDraw.Draw(one_map)
pointx = pose_data[(i, 0)]
pointy = pose_data[(i, 1)]
if ((pointx > 1) and (pointy > 1)):
draw.rectangle(((pointx - r), (pointy - r), (pointx + r), (pointy + r)), 'white', 'white')
pose_draw.rectangle(((pointx - r), (pointy - r), (pointx + r), (pointy + r)), 'white', 'white')
one_map = self.transform(one_map)
pose_map[i] = one_map[0]
im_pose = self.transform(im_pose)
agnostic = torch.cat([shape, im_h, pose_map], 0)
if (self.stage == 'GMM'):
im_g = Image.open(self.grid_image)
im_g = self.transform(im_g)
else:
im_g = ''
result = {'c_name': c_name, 'im_name': im_name, 'cloth': c, 'cloth_mask': cm, 'image': im, 'agnostic': agnostic, 'parse_cloth': im_c, 'shape': shape, 'head': im_h, 'pose_image': im_pose, 'grid_image': im_g}
return result
def __len__(self):
return len(self.im_names) |
class MobileNetV2_MPNCOV(nn.Module):
def __init__(self, num_classes=1000, width_mult=1.0):
super(MobileNetV2_MPNCOV, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
input_channel = int((input_channel * width_mult))
self.last_channel = int((last_channel * max(1.0, width_mult)))
features = [ConvBNReLU(3, input_channel, stride=2)]
for (t, c, n, s) in inverted_residual_setting:
output_channel = int((c * width_mult))
for i in range(n):
stride = (s if (i == 0) else 1)
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
self.features = nn.Sequential(*features)
self.dimension_reduction = nn.Sequential(nn.Conv2d(self.last_channel, 256, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(256), nn.ReLU6(inplace=True))
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(32896, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = self.dimension_reduction(x)
x = MPNCOV.CovpoolLayer(x)
x = MPNCOV.SqrtmLayer(x, 5)
x = MPNCOV.TriuvecLayer(x)
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
return x |
class Linear(fa_constructor.Linear):
def __init__(self, in_features: int, out_features: int, bias: bool=True, layer_config: dict=None) -> None:
if (layer_config is None):
layer_config = {}
layer_config['type'] = 'brsf'
super(Linear, self).__init__(in_features, out_features, bias, layer_config) |
def ade_quad_double_track(target, start, sols, gamma=0, verbose=1):
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_target_system
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_start_system
from phcpy.phcpy2c3 import py2c_ade_manypaths_qd
from phcpy.interface import store_quaddobl_system
from phcpy.interface import store_quaddobl_solutions
from phcpy.interface import load_quaddobl_solutions
store_quaddobl_system(target)
py2c_copy_quaddobl_container_to_target_system()
store_quaddobl_system(start)
py2c_copy_quaddobl_container_to_start_system()
dim = len(start)
store_quaddobl_solutions(dim, sols)
if (gamma == 0):
from random import uniform
from cmath import exp, pi
angle = uniform(0, (2 * pi))
gamma = exp((angle * complex(0, 1)))
if (verbose > 0):
print('random gamma constant :', gamma)
fail = py2c_ade_manypaths_qd(verbose, gamma.real, gamma.imag)
if (fail == 0):
if (verbose > 0):
print('Path tracking with AD was a success!')
else:
print('Path tracking with AD failed!')
return load_quaddobl_solutions() |
def test_trunc_normal_init():
def _random_float(a, b):
return (((b - a) * random.random()) + a)
def _is_trunc_normal(tensor, mean, std, a, b):
z_samples = ((tensor.view((- 1)) - mean) / std)
z_samples = z_samples.tolist()
a0 = ((a - mean) / std)
b0 = ((b - mean) / std)
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return (p_value > 0.0001)
conv_module = nn.Conv2d(3, 16, 3)
mean = _random_float((- 3), 3)
std = _random_float(0.01, 1)
a = _random_float((mean - (2 * std)), mean)
b = _random_float(mean, (mean + (2 * std)))
trunc_normal_init(conv_module, mean, std, a, b, bias=0.1)
assert _is_trunc_normal(conv_module.weight, mean, std, a, b)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
trunc_normal_init(conv_module_no_bias) |
class DepthEvaluationArguments(ArgumentsBase):
DESCRIPTION = 'SGDepth Depth Evaluation'
def __init__(self):
super().__init__()
self._harness_init_system()
self._harness_init_model()
self._harness_init_depth()
self._eval_init_logging()
def parse(self):
opt = self._parse()
opt.train_learning_rate = 0
opt.train_scheduler_step_size = 1000
opt.train_weight_decay = 0
opt.train_weights_init = 'scratch'
opt.train_depth_grad_scale = 0
opt.train_segmentation_grad_scale = 0
return opt |
def _main(client_only=False):
parser = options.general_parser()
options.add_server_args(parser)
if (not client_only):
options.add_data_args(parser)
(args, _) = parser.parse_known_args()
if (not client_only):
(_, agent_cls) = find_agent_cls(args)
if (args.data_type is None):
args.data_type = agent_cls.data_type
logging.getLogger('tornado.access').setLevel(logging.WARNING)
server_process = Process(target=start_server, args=(args,))
server_process.start()
time.sleep(3)
else:
server_process = None
client = start_client(args)
evaluate(args, client, server_process) |
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, scope='conv_0'):
with tf.variable_scope(scope):
if (pad_type == 'zero'):
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]])
if (pad_type == 'reflect'):
x = tf.pad(x, [[0, 0], [pad, pad], [pad, pad], [0, 0]], mode='REFLECT')
x = tf.layers.conv2d(inputs=x, filters=channels, kernel_size=kernel, kernel_initializer=weight_init, kernel_regularizer=weight_regularizer, strides=stride, use_bias=use_bias)
return x |
class SupConLoss1(nn.Module):
def __init__(self, temperature=0.07, exclude_other_pos=False):
super().__init__()
self._t = temperature
self._exclude_pos = exclude_other_pos
logger.info(f'initializing {self.__class__.__name__} with t: {self._t}, exclude_pos: {self._exclude_pos}')
def forward(self, proj_feat1, proj_feat2, target=None, mask: Tensor=None, **kwargs):
batch_size = proj_feat1.size(0)
if (mask is not None):
assert (mask.shape == torch.Size([batch_size, batch_size]))
pos_mask = (mask == 1)
neg_mask = (mask == 0)
elif (target is not None):
if isinstance(target, list):
target = torch.Tensor(target).to(device=proj_feat2.device)
mask = torch.eq(target[(..., None)], target[(None, ...)])
pos_mask = (mask == True)
neg_mask = (mask == False)
else:
pos_mask = torch.eye(batch_size, dtype=torch.float, device=proj_feat2.device)
neg_mask = (1 - pos_mask)
return self._forward(proj_feat1, proj_feat2, pos_mask.float(), neg_mask.float(), **kwargs)
def _forward(self, proj_feat1, proj_feat2, pos_mask, neg_mask, **kwargs):
assert (is_normalized(proj_feat1) and is_normalized(proj_feat2)), f'features need to be normalized first'
assert (proj_feat1.shape == proj_feat2.shape), (proj_feat1.shape, proj_feat2.shape)
batch_size = len(proj_feat1)
unselect_diganal_mask = (1 - torch.eye((batch_size * 2), (batch_size * 2), dtype=torch.float, device=proj_feat2.device))
pos_mask = pos_mask.repeat(2, 2)
neg_mask = neg_mask.repeat(2, 2)
pos_mask *= unselect_diganal_mask
neg_mask *= unselect_diganal_mask
(sim_exp, sim_logits) = exp_sim_temperature(proj_feat1, proj_feat2, self._t)
assert (pos_mask.shape == sim_exp.shape == neg_mask.shape), (pos_mask.shape, sim_exp.shape, neg_mask.shape)
self.sim_exp = sim_exp
self.sim_logits = sim_logits
self.pos_mask = pos_mask
self.neg_mask = neg_mask
(pos_count, neg_count) = (pos_mask.sum(1), neg_mask.sum(1))
pos_sum = (sim_exp * pos_mask).sum(1, keepdim=True).repeat(1, (batch_size * 2))
neg_sum = (sim_exp * neg_mask).sum(1, keepdim=True).repeat(1, (batch_size * 2))
if self._exclude_pos:
neg_ratio = (neg_count.float() / (pos_count + neg_count).float())
log_pos_div_sum_pos_neg = (sim_logits - torch.log(((sim_exp + (neg_sum / (neg_ratio + 0.0001)[(..., None)].repeat(1, (batch_size * 2)))) + 1e-16)))
else:
log_pos_div_sum_pos_neg = (sim_logits - torch.log(((pos_sum + neg_sum) + 1e-16)))
loss = ((log_pos_div_sum_pos_neg * pos_mask).sum(1) / pos_count)
loss = (- loss.mean())
if torch.isnan(loss):
raise RuntimeError(loss)
return loss |
def main():
config = vars(parse_args())
if (config['name'] is None):
if config['deep_supervision']:
config['name'] = ('%s_%s_wDS' % (config['dataset'], config['arch']))
else:
config['name'] = ('%s_%s_woDS' % (config['dataset'], config['arch']))
os.makedirs(('models/%s' % config['name']), exist_ok=True)
print(('-' * 20))
for key in config:
print(('%s: %s' % (key, config[key])))
print(('-' * 20))
with open(('models/%s/config.yml' % config['name']), 'w') as f:
yaml.dump(config, f)
if (config['loss'] == 'BCEWithLogitsLoss'):
criterion = nn.BCEWithLogitsLoss().cuda()
else:
criterion = losses.__dict__[config['loss']]().cuda()
cudnn.benchmark = True
model = archs.__dict__[config['arch']](config['num_classes'], config['input_channels'], config['deep_supervision'])
model = model.cuda()
params = filter((lambda p: p.requires_grad), model.parameters())
if (config['optimizer'] == 'Adam'):
optimizer = optim.Adam(params, lr=config['lr'], weight_decay=config['weight_decay'])
elif (config['optimizer'] == 'SGD'):
optimizer = optim.SGD(params, lr=config['lr'], momentum=config['momentum'], nesterov=config['nesterov'], weight_decay=config['weight_decay'])
else:
raise NotImplementedError
if (config['scheduler'] == 'CosineAnnealingLR'):
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=config['epochs'], eta_min=config['min_lr'])
elif (config['scheduler'] == 'ReduceLROnPlateau'):
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=config['factor'], patience=config['patience'], verbose=1, min_lr=config['min_lr'])
elif (config['scheduler'] == 'MultiStepLR'):
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[int(e) for e in config['milestones'].split(',')], gamma=config['gamma'])
elif (config['scheduler'] == 'ConstantLR'):
scheduler = None
else:
raise NotImplementedError
img_ids = glob(os.path.join('inputs', config['dataset'], 'images', ('*' + config['img_ext'])))
img_ids = [os.path.splitext(os.path.basename(p))[0] for p in img_ids]
(train_img_ids, val_img_ids) = train_test_split(img_ids, test_size=0.2, random_state=41)
train_transform = Compose([RandomRotate90(), transforms.Flip(), Resize(config['input_h'], config['input_w']), transforms.Normalize()])
val_transform = Compose([Resize(config['input_h'], config['input_w']), transforms.Normalize()])
train_dataset = Dataset(img_ids=train_img_ids, img_dir=os.path.join('inputs', config['dataset'], 'images'), mask_dir=os.path.join('inputs', config['dataset'], 'masks'), img_ext=config['img_ext'], mask_ext=config['mask_ext'], num_classes=config['num_classes'], transform=train_transform)
val_dataset = Dataset(img_ids=val_img_ids, img_dir=os.path.join('inputs', config['dataset'], 'images'), mask_dir=os.path.join('inputs', config['dataset'], 'masks'), img_ext=config['img_ext'], mask_ext=config['mask_ext'], num_classes=config['num_classes'], transform=val_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True, num_workers=config['num_workers'], drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=config['batch_size'], shuffle=False, num_workers=config['num_workers'], drop_last=False)
log = OrderedDict([('epoch', []), ('lr', []), ('loss', []), ('iou', []), ('val_loss', []), ('val_iou', []), ('val_dice', [])])
best_iou = 0
trigger = 0
for epoch in range(config['epochs']):
print(('Epoch [%d/%d]' % (epoch, config['epochs'])))
train_log = train(config, train_loader, model, criterion, optimizer)
val_log = validate(config, val_loader, model, criterion)
if (config['scheduler'] == 'CosineAnnealingLR'):
scheduler.step()
elif (config['scheduler'] == 'ReduceLROnPlateau'):
scheduler.step(val_log['loss'])
print(('loss %.4f - iou %.4f - val_loss %.4f - val_iou %.4f' % (train_log['loss'], train_log['iou'], val_log['loss'], val_log['iou'])))
log['epoch'].append(epoch)
log['lr'].append(config['lr'])
log['loss'].append(train_log['loss'])
log['iou'].append(train_log['iou'])
log['val_loss'].append(val_log['loss'])
log['val_iou'].append(val_log['iou'])
log['val_dice'].append(val_log['dice'])
pd.DataFrame(log).to_csv(('models/%s/log.csv' % config['name']), index=False)
trigger += 1
if (val_log['iou'] > best_iou):
torch.save(model.state_dict(), ('models/%s/model.pth' % config['name']))
best_iou = val_log['iou']
print('=> saved best model')
trigger = 0
if ((config['early_stopping'] >= 0) and (trigger >= config['early_stopping'])):
print('=> early stopping')
break
torch.cuda.empty_cache() |
def main():
wpt_file = sys.argv[1]
with open(wpt_file, 'r') as fd:
wpts = [l.strip() for l in fd]
for ii in range(3):
generate_waypoint_pattern('direct to %s', wpts)
generate_waypoint_pattern('direct %s', wpts)
generate_waypoint_pattern('turn %s', wpts)
generate_waypoint_pattern('after %s', wpts)
generate_waypoint_pattern('at %s', wpts) |
def load_index_to_gpu(index: faiss.IndexIVFPQ, single_gpu_id=None):
if ((faiss.get_num_gpus() == 1) or (single_gpu_id is not None)):
res = faiss.StandardGpuResources()
res.setTempMemory(((128 * 1024) * 1024))
co = faiss.GpuClonerOptions()
co.useFloat16 = (index.pq.M >= 56)
if (single_gpu_id is None):
single_gpu_id = 0
index = faiss.index_cpu_to_gpu(res, single_gpu_id, index, co)
else:
co = faiss.GpuMultipleClonerOptions()
co.shard = False
co.useFloat16 = (index.pq.M >= 56)
index = faiss.index_cpu_to_all_gpus(index, co)
return index |
def Train_or_Eval(model, state='Train'):
if (state == 'Train'):
model.train()
else:
model.eval() |
def PGD_perturb(sess, gradient, x, y, x_placeholder, y_placeholder, num_step, step_size, max_perturb):
perturb = np.zeros(x.shape)
for num in range(num_step):
perturb += (step_size * np.sign(sess.run(gradient, feed_dict={x_placeholder: (x + perturb), y_placeholder: y})))
perturb = np.clip(perturb, (- max_perturb), max_perturb)
perturb = (np.clip((x + perturb), 0, 1.0) - x)
return np.clip((x + perturb), 0, 1.0) |
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(((i in self.cache_index) for i in indices)):
return
if (not self.data_file):
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += (self.data_offsets[(i + 1)] - self.data_offsets[i])
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = (self.data_offsets[(i + 1)] - self.data_offsets[i])
a = self.cache[ptx:(ptx + size)]
self.data_file.seek((self.data_offsets[i] * self.element_size))
self.data_file.readinto(a)
ptx += size
if self.data_file:
self.data_file.close()
self.data_file = None
_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[(i + 1)]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx:(ptx + a.size)])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1
return item |
def ciou_loss(boxes1: torch.Tensor, boxes2: torch.Tensor, reduction: str='none', eps: float=1e-07) -> torch.Tensor:
(x1, y1, x2, y2) = boxes1.unbind(dim=(- 1))
(x1g, y1g, x2g, y2g) = boxes2.unbind(dim=(- 1))
assert (x2 >= x1).all(), 'bad box: x1 larger than x2'
assert (y2 >= y1).all(), 'bad box: y1 larger than y2'
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsct = torch.zeros_like(x1)
mask = ((ykis2 > ykis1) & (xkis2 > xkis1))
intsct[mask] = ((xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]))
union = (((((x2 - x1) * (y2 - y1)) + ((x2g - x1g) * (y2g - y1g))) - intsct) + eps)
iou = (intsct / union)
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
diag_len = ((((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2)) + eps)
x_p = ((x2 + x1) / 2)
y_p = ((y2 + y1) / 2)
x_g = ((x1g + x2g) / 2)
y_g = ((y1g + y2g) / 2)
distance = (((x_p - x_g) ** 2) + ((y_p - y_g) ** 2))
w_pred = (x2 - x1)
h_pred = (y2 - y1)
w_gt = (x2g - x1g)
h_gt = (y2g - y1g)
v = ((4 / (math.pi ** 2)) * torch.pow((torch.atan((w_gt / h_gt)) - torch.atan((w_pred / h_pred))), 2))
with torch.no_grad():
alpha = (v / (((1 - iou) + v) + eps))
loss = (((1 - iou) + (distance / diag_len)) + (alpha * v))
if (reduction == 'mean'):
loss = (loss.mean() if (loss.numel() > 0) else (0.0 * loss.sum()))
elif (reduction == 'sum'):
loss = loss.sum()
return loss |
def get_logger(log_file=None):
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s: - %(message)s', datefmt='%Y%m%d %H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
del logger.handlers[:]
if log_file:
file_handler = logging.FileHandler(log_file, mode='w')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger |
def read_epe(stream):
sentences = []
for line in stream:
sentence = json.loads(line)
i = 0
for node in sentence['nodes']:
i = max(i, len(node.get('negation', [])))
sentence['negations'] = i
sentences.append(sentence)
return sentences |
class PlainDecoder(nn.Module):
def __init__(self, cfg):
super(PlainDecoder, self).__init__()
self.cfg = cfg
self.dropout = nn.Dropout2d(0.1)
self.conv8 = nn.Conv2d(128, cfg.num_classes, 1)
def forward(self, x):
x = self.dropout(x)
x = self.conv8(x)
x = F.interpolate(x, size=[self.cfg.img_height, self.cfg.img_width], mode='bilinear', align_corners=False)
return x |
class group(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1):
super(group, self).__init__()
self.conv_a = mfm(in_channels, in_channels, 1, 1, 0, dilation)
self.conv = mfm(in_channels, out_channels, kernel_size, stride, padding, dilation)
def forward(self, x):
x = self.conv_a(x)
x = self.conv(x)
return x |
def get_node_and_core_number(bigdl_type='float'):
result = callBigDlFunc(bigdl_type, 'getNodeAndCoreNumber')
return (result[0], result[1]) |
class Data(abc.ABC):
def losses(self, targets, outputs, loss_fn, inputs, model, aux=None):
raise NotImplementedError('Data.losses is not implemented.')
def losses_train(self, targets, outputs, loss_fn, inputs, model, aux=None):
return self.losses(targets, outputs, loss_fn, inputs, model, aux=aux)
def losses_test(self, targets, outputs, loss_fn, inputs, model, aux=None):
return self.losses(targets, outputs, loss_fn, inputs, model, aux=aux)
def train_next_batch(self, batch_size=None):
def test(self): |
def preprocess_stage_3_build_dict(path, dict_sz, train, valid=None, test=None):
import operator
d = {}
def _count(fname, dic):
with open(os.path.join(path, fname), 'r') as fd:
lines = fd.read().splitlines()
for l in lines:
l = [w for w in l.split(' ') if (w != '')]
for w in l:
if (w in dic):
dic[w] += 1
else:
dic[w] = 1
return dic
d = _count(train, d)
if (valid is not None):
d = _count(valid, d)
d = _count(test, d)
sorted_d = sorted(d.items(), key=operator.itemgetter(1), reverse=True)
sorted_d = sorted_d[:(dict_sz - 3)]
keep_words = [tup[0] for tup in sorted_d]
def _update(fname, keep_list):
with open(os.path.join(path, fname), 'r') as fd:
lines = fd.read().splitlines()
new_lines = []
for l in lines:
l = [w for w in l.split(' ') if (w != '')]
new_l = []
for w in l:
if (w in keep_list):
new_l.append(w)
else:
new_l.append('<unk>')
new_l = ' '.join(new_l)
new_lines.append(new_l)
with open(os.path.join(path, ('final_' + fname)), 'w') as fd:
fd.write('\n'.join(new_lines))
_update(train, keep_words)
if (valid is not None):
_update(valid, keep_words)
_update(test, keep_words) |
def compute_mean_word_length(stanza_doc):
return np.mean([len(word.text) for word in stanza_doc.sentences[0].words]) |
class PHNN(nn.Module):
def __init__(self, p_type, p_args, hparams, beta, device, p_module=__name__):
super().__init__()
self.device = device
self.p_type = getattr(sys.modules[p_module], p_type)
self.p_args = p_args
self.predictor = self.p_type(*self.p_args).to(self.device)
self.len = len(self.predictor.state_dict())
self.lenp = sum((1 for _ in iter(self.predictor.parameters())))
self.hparams = hparams
(self.dJ, self.dJddw) = ([], [])
self.beta = beta
self.flat_w = self.flattenParamVector()
self.flat_wdot = torch.rand(self.flat_w.shape)
self.count = 0
self.time = 0
self.pLoss = []
self.pW = []
self.pWdot = []
self.initializeRecord()
def createStateVector(self, velocity=True, first_instance=True):
w = []
wdot = []
itr = iter(self.predictor.parameters())
for i in range(self.lenp):
param = next(itr)
w.append(param.to(self.device))
if first_instance:
wdot.append(torch.rand(param.shape).to(self.device))
if (velocity == True):
return (w, wdot)
else:
return w
def flattenParamVector(self):
itr = iter(self.predictor.parameters())
w = next(itr).view((- 1))
for i in range(1, self.lenp):
w = torch.cat((w, next(itr).view((- 1))))
return w
def makeFMatrix(self):
n = len(self.flat_wdot)
i1 = [[0, n]]
for i in range(1, n):
i1.append([i, (n + i)])
i1 = torch.LongTensor(i1)
i2 = [[n, 0]]
for i in range(1, n):
i2.append([(n + i), i])
i2 = torch.LongTensor(i2)
i3 = [[n, n]]
for i in range(1, n):
i3.append([(n + i), (n + i)])
i3 = torch.LongTensor(i3)
i = torch.cat((i1, i2, i3))
v = torch.Tensor(np.concatenate((np.ones(n), ((- 1) * np.ones(n)), ((- self.beta) * np.ones(n)))))
F = torch.sparse.FloatTensor(i.t(), v, torch.Size([(2 * n), (2 * n)]))
del i1, i2, i3, i, v
return F
def gradient(self):
itr = iter(self.predictor.parameters())
dJddw = ((2.0 * self.hparams[1]) * self.flat_wdot)
dJ_reg = ((2.0 * self.hparams[2]) * self.flat_w)
dJ = list(map(add, [next(itr).grad for i in range(self.len)], dJ_reg))
(self.dJ, self.dJddw) = (dJ, dJddw)
def additionalTermsLoss(self):
return torch.add((self.hparams[1] * torch.dot(self.flat_wdot, self.flat_wdot)), (self.hparams[2] * torch.dot(self.flat_w, self.flat_w)))
def assignNewState(self, xi):
self.flat_w = torch.Tensor(xi[:len(self.flat_w)])
self.flat_wdot = torch.Tensor(xi[len(self.flat_w):(2 * len(self.flat_w))])
def loadStateDict(self):
new_state_dict = self.makeStateDict()
del self.predictor
self.predictor = self.p_type(*self.p_args).to(self.device)
self.predictor.load_state_dict(new_state_dict)
del new_state_dict
def makeStateDict(self):
d = {}
k = 0
for (i, key) in enumerate(self.predictor.state_dict().keys()):
num_el = torch.numel(self.predictor.state_dict()[key])
d[key] = self.flat_w[k:(k + num_el)].view(self.predictor.state_dict()[key].shape)
k += num_el
return d
def flattenGradient(self):
dJ = self.dJ[0].view((- 1))
for i in range(1, self.len):
dJ = torch.cat((dJ, self.dJ[i].view((- 1))))
return (dJ.to(self.device), self.dJddw.to(self.device))
def assignFlatGradient(self):
(self.flat_dJ, self.flat_dJddw) = self.flattenGradient()
def getConcatGradient(self):
return torch.cat((self.flat_dJ, self.flat_dJddw))
def fixInputOutput(self, x, y):
self.x = x
self.y = y
def setXi(self):
self.xi = torch.cat((self.flat_w.to(self.device), self.flat_wdot.to(self.device)))
def getParamShape(self):
return self.shape
def pred_accuracy(self, testloader):
tot = 0
count = 0
for (i, d) in enumerate(testloader):
(x, y) = d
(x, y) = (x.to(0), y.to(0))
(_, idx) = torch.max(torch.exp(self.predictor.forward(x)), 1)
for i in range(len(idx)):
if (idx[i] == y[i]):
count += 1
tot += 1
return (count / tot)
def initializeRecord(self):
for i in range(len(self.flat_w)):
self.pW.append([])
self.pWdot.append([])
def recordLoss(self, main_loss, additional_terms_loss, delta_t):
if ((self.time % delta_t) == 0):
self.pLoss.append((main_loss + additional_terms_loss))
def plotLoss(self):
plt.plot(self.pLoss, color='red')
plt.ylabel('Loss')
plt.xlabel('time (delta_t units)')
def recordParameters(self, pW, pWdot, delta_t):
if ((self.time % delta_t) == 0):
for i in range(len(pW)):
self.pW[i].append(pW[i].cpu().detach().numpy())
self.pWdot[i].append(pWdot[i].cpu().detach().numpy())
def plotParameters(self):
for i in range(len(self.pW)):
plt.plot(self.pW[i])
def plotVelocities(self):
for i in range(len(self.pW)):
plt.plot(self.pWdot[i])
def perturb(self):
pass
def forward(self, t, xi):
self.assignNewState(xi)
self.loadStateDict()
yhat = self.predictor.forward(self.x)
loss = self.criterion(yhat, self.y)
loss.backward()
self.recordLoss(loss, self.additionalTermsLoss(), self.time_delta)
self.recordParameters(self.flat_w, self.flat_wdot, self.time_delta)
del loss, yhat
self.gradient()
self.assignFlatGradient()
grad_flat = self.getConcatGradient()
dxdt = torch.zeros(len(grad_flat)).to(self.device)
n = (len(dxdt) // 2)
dxdt[:n] = grad_flat[n:(2 * n)]
dxdt[n:(2 * n)] = (((- 1) * grad_flat[:n]) - ((1 * self.beta) * grad_flat[n:(2 * n)]))
del grad_flat
self.time += 1
if ((self.time % 1000) == 0):
print('odeint iter: {} '.format(self.time))
dxdt = dxdt.detach().cpu().numpy()
return dxdt
def fit(self, trainloader, epoch=3, time_delta=1, iter_accuracy=10, ode_t=0.25, ode_step=10, criterion='nll'):
if (criterion == 'nll'):
self.criterion = F.nll_loss
else:
self.criterion = F.mse_loss
t = np.linspace(0.0, ode_t, ode_step)
if time_delta:
self.time_delta = time_delta
else:
self.time_delta = float('inf')
self.setXi()
for e in range(epoch):
for (i, data) in enumerate(trainloader):
(x, y) = data
(x, y) = (x.to(self.device), y.to(self.device))
self.fixInputOutput(x, y)
func = self
xi = solve_ivp(func, t, self.xi.cpu().detach().numpy())
xi = [el[(- 1)] for el in xi.y]
self.xi = torch.Tensor(xi)
self.assignNewState(xi)
self.loadStateDict()
self.count += 1
if (((self.count % iter_accuracy) == 0) and (self.count != 0)):
print('Number of odeint and parameters reassignment iterations: {}'.format(self.count))
print('In-training accuracy estimate: {}'.format(self.pred_accuracy(trainloader))) |
def _weight_init_range(n_in, n_out):
range = ((4.0 * math.sqrt(6.0)) / math.sqrt((n_in + n_out)))
return {'minval': (- range), 'maxval': range} |
def download(url, path=None, overwrite=False, sha1_hash=None):
if (path is None):
fname = url.split('/')[(- 1)]
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[(- 1)])
else:
fname = path
if (overwrite or (not os.path.exists(fname)) or (sha1_hash and (not check_sha1(fname, sha1_hash)))):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if (not os.path.exists(dirname)):
os.makedirs(dirname)
print(('Downloading %s from %s...' % (fname, url)))
r = requests.get(url, stream=True)
if (r.status_code != 200):
raise RuntimeError(('Failed downloading url %s' % url))
total_length = r.headers.get('content-length')
with open(fname, 'wb') as f:
if (total_length is None):
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
else:
total_length = int(total_length)
for chunk in tqdm(r.iter_content(chunk_size=1024), total=int(((total_length / 1024.0) + 0.5)), unit='KB', unit_scale=False, dynamic_ncols=True):
f.write(chunk)
if (sha1_hash and (not check_sha1(fname, sha1_hash))):
raise UserWarning('File {} is downloaded but the content hash does not match. The repo may be outdated or download may be incomplete. If the "repo_url" is overridden, consider switching to the default repo.'.format(fname))
return fname |
class Homoglyphs():
def __init__(self, categories=None, languages=None, alphabet=None, strategy=STRATEGY_IGNORE, ascii_strategy=STRATEGY_IGNORE, ascii_range=ASCII_RANGE):
if (strategy not in (STRATEGY_LOAD, STRATEGY_IGNORE, STRATEGY_REMOVE)):
raise ValueError('Invalid strategy')
self.strategy = strategy
self.ascii_strategy = ascii_strategy
self.ascii_range = ascii_range
if ((not categories) and (not languages) and (not alphabet)):
categories = ('LATIN', 'COMMON')
self.categories = set((categories or []))
self.languages = set((languages or []))
self.alphabet = set((alphabet or []))
if self.categories:
alphabet = Categories.get_alphabet(self.categories)
self.alphabet.update(alphabet)
if self.languages:
alphabet = Languages.get_alphabet(self.languages)
self.alphabet.update(alphabet)
self.table = self.get_table(self.alphabet)
def get_table(alphabet):
table = defaultdict(set)
with open(os.path.join(DATA_LOCATION, 'confusables_sept2022.json')) as f:
data = json.load(f)
for char in alphabet:
if (char in data):
for homoglyph in data[char]:
if (homoglyph in alphabet):
table[char].add(homoglyph)
return table
def get_restricted_table(source_alphabet, target_alphabet):
table = defaultdict(set)
with open(os.path.join(DATA_LOCATION, 'confusables_sept2022.json')) as f:
data = json.load(f)
for char in source_alphabet:
if (char in data):
for homoglyph in data[char]:
if (homoglyph in target_alphabet):
table[char].add(homoglyph)
return table
def uniq_and_sort(data):
result = list(set(data))
result.sort(key=(lambda x: ((- len(x)), x)))
return result
def _update_alphabet(self, char):
langs = Languages.detect(char)
if langs:
self.languages.update(langs)
alphabet = Languages.get_alphabet(langs)
self.alphabet.update(alphabet)
else:
category = Categories.detect(char)
if (category is None):
return False
self.categories.add(category)
alphabet = Categories.get_alphabet([category])
self.alphabet.update(alphabet)
self.table = self.get_table(self.alphabet)
return True
def _get_char_variants(self, char):
if (char not in self.alphabet):
if (self.strategy == STRATEGY_LOAD):
if (not self._update_alphabet(char)):
return []
elif (self.strategy == STRATEGY_IGNORE):
return [char]
elif (self.strategy == STRATEGY_REMOVE):
return []
alt_chars = self.table.get(char, set())
if alt_chars:
alt_chars2 = [self.table.get(alt_char, set()) for alt_char in alt_chars]
alt_chars.update(*alt_chars2)
alt_chars.add(char)
return self.uniq_and_sort(alt_chars)
def _get_combinations(self, text, ascii=False):
variations = []
for char in text:
alt_chars = self._get_char_variants(char)
if ascii:
alt_chars = [char for char in alt_chars if (ord(char) in self.ascii_range)]
if ((not alt_chars) and (self.ascii_strategy == STRATEGY_IGNORE)):
return
if alt_chars:
variations.append(alt_chars)
if variations:
for variant in product(*variations):
(yield ''.join(variant))
def get_combinations(self, text):
return list(self._get_combinations(text))
def _to_ascii(self, text):
for variant in self._get_combinations(text, ascii=True):
if (max(map(ord, variant)) in self.ascii_range):
(yield variant)
def to_ascii(self, text):
return self.uniq_and_sort(self._to_ascii(text)) |
def getFirstLineInLogWithCertainPattern(filePathToLog, pattern):
foundLine = None
f = open(filePathToLog, 'r')
newLine = f.readline()
while newLine:
if (newLine.find(pattern) > (- 1)):
foundLine = newLine
break
newLine = f.readline()
f.close()
return foundLine |
def compute_return(reward, value, discount, bootstrap, lmbda, gamma):
next_values = torch.cat([value[1:], bootstrap[None]], 0)
target = (reward + (((gamma * discount) * next_values) * (1 - lmbda)))
outputs = []
accumulated_reward = bootstrap
for t in reversed(range(reward.shape[0])):
discount_factor = discount[t]
accumulated_reward = (target[t] + (((gamma * discount_factor) * accumulated_reward) * lmbda))
outputs.append(accumulated_reward)
returns = torch.flip(torch.stack(outputs), [0])
return returns |
class BasicSwap(TransformationPass):
def __init__(self, coupling_map, initial_layout=None):
super().__init__()
self.coupling_map = coupling_map
self.initial_layout = initial_layout
def run(self, dag):
new_dag = DAGCircuit()
if (self.initial_layout is None):
if self.property_set['layout']:
self.initial_layout = self.property_set['layout']
else:
self.initial_layout = Layout.generate_trivial_layout(*dag.qregs.values())
if (len(dag.qubits()) != len(self.initial_layout)):
raise TranspilerError('The layout does not match the amount of qubits in the DAG')
if (len(self.coupling_map.physical_qubits) != len(self.initial_layout)):
raise TranspilerError('Mappers require to have the layout to be the same size as the coupling map')
current_layout = self.initial_layout.copy()
for layer in dag.serial_layers():
subdag = layer['graph']
for gate in subdag.twoQ_gates():
physical_q0 = current_layout[gate.qargs[0]]
physical_q1 = current_layout[gate.qargs[1]]
if (self.coupling_map.distance(physical_q0, physical_q1) != 1):
swap_layer = DAGCircuit()
path = self.coupling_map.shortest_undirected_path(physical_q0, physical_q1)
for swap in range((len(path) - 2)):
connected_wire_1 = path[swap]
connected_wire_2 = path[(swap + 1)]
qubit_1 = current_layout[connected_wire_1]
qubit_2 = current_layout[connected_wire_2]
for qreg in current_layout.get_registers():
if (qreg not in swap_layer.qregs.values()):
swap_layer.add_qreg(qreg)
swap_layer.apply_operation_back(SwapGate(), qargs=[qubit_1, qubit_2], cargs=[])
edge_map = current_layout.combine_into_edge_map(self.initial_layout)
new_dag.compose_back(swap_layer, edge_map)
for swap in range((len(path) - 2)):
current_layout.swap(path[swap], path[(swap + 1)])
edge_map = current_layout.combine_into_edge_map(self.initial_layout)
new_dag.extend_back(subdag, edge_map)
return new_dag |
class Dataset(object):
def __init__(self, data_path):
self.num_items = setting.num_items
self.num_users = setting.num_users
self.batch_size = setting.batch_size
self.kshot_num = setting.kshot_num
self.kshot_second_num = setting.kshot_second_num
self.kshot_third_num = setting.kshot_third_num
self.padding_number_items = self.num_items
self.padding_number_users = self.num_users
self.oracle_user_ebd = np.load(setting.oracle_user_ebd_path)
self.oracle_item_ebd = np.load(setting.oracle_item_ebd_path)
def get_positive_instances_user_task(self, data, all_dict):
(target_user, k_shot_item, second_order_uesrs, oracle_user_ebd, mask_num_second_order_user, third_order_items, mask_num_third_order_item) = ([], [], [], [], [], [], [])
(target_user_first_order, target_user_second_order, target_user_third_order) = ({}, {}, {})
with open('./fastgcn_first_order_user.txt', 'r') as f:
count = (- 1)
line = f.readline()
while ((line != '') and (line != None)):
count += 1
arr = line.strip().split(' ')
target_user_first_order[count] = []
for neighbor in arr:
target_user_first_order[count].append(int(neighbor))
line = f.readline()
with open('./fastgcn_second_order_user.txt', 'r') as f:
count = (- 1)
line = f.readline()
while ((line != '') and (line != None)):
count += 1
arr = line.strip().split(' ')
target_user_second_order[count] = []
for neighbor in arr:
target_user_second_order[count].append(int(neighbor))
line = f.readline()
with open('./fastgcn_third_order_user.txt', 'r') as f:
count = (- 1)
line = f.readline()
while ((line != '') and (line != None)):
count += 1
arr = line.strip().split(' ')
target_user_third_order[count] = []
for neighbor in arr:
target_user_third_order[count].append(int(neighbor))
line = f.readline()
for user in range(setting.num_users):
target_user.append(user)
k_shot_item.append(target_user_first_order[user])
second_order_uesrs.append(target_user_second_order[user])
third_order_items.append(target_user_third_order[user])
oracle_user_ebd.append(data.oracle_user_ebd[user])
mask_num_second_order_user.append(len(target_user_second_order[user]))
mask_num_third_order_item.append(len(target_user_third_order[user]))
return (target_user, k_shot_item, second_order_uesrs, third_order_items, oracle_user_ebd, mask_num_second_order_user, mask_num_third_order_item)
def get_positive_instances_item_task(self, data, all_dict):
(target_item, k_shot_user, second_order_items, oracle_item_ebd, mask_num_second_order_item, third_order_users, mask_num_third_order_user) = ([], [], [], [], [], [], [])
(target_item_first_order, target_item_second_order, target_item_third_order) = ({}, {}, {})
with open('./fastgcn_first_order_item.txt', 'r') as f:
count = (- 1)
line = f.readline()
while ((line != '') and (line != None)):
count += 1
arr = line.strip().split(' ')
target_item_first_order[count] = []
for neighbor in arr:
target_item_first_order[count].append(int(neighbor))
line = f.readline()
with open('./fastgcn_second_order_item.txt', 'r') as f:
count = (- 1)
line = f.readline()
while ((line != '') and (line != None)):
count += 1
arr = line.strip().split(' ')
target_item_second_order[count] = []
for neighbor in arr:
target_item_second_order[count].append(int(neighbor))
line = f.readline()
with open('./fastgcn_third_order_item.txt', 'r') as f:
count = (- 1)
line = f.readline()
while ((line != '') and (line != None)):
count += 1
arr = line.strip().split(' ')
target_item_third_order[count] = []
for neighbor in arr:
target_item_third_order[count].append(int(neighbor))
line = f.readline()
for item in range(setting.num_items):
target_item.append(item)
k_shot_user.append(target_item_first_order[item])
second_order_items.append(target_item_second_order[item])
third_order_users.append(target_item_third_order[item])
oracle_item_ebd.append(data.oracle_user_ebd[item])
mask_num_second_order_item.append(len(target_item_second_order[item]))
mask_num_third_order_user.append(len(target_item_third_order[item]))
return (target_item, k_shot_user, second_order_items, third_order_users, oracle_item_ebd, mask_num_second_order_item, mask_num_third_order_user) |
class KnowledgeSource():
def __init__(self, mongo_connection_string=None, database='kilt', collection='knowledgesource'):
if (not mongo_connection_string):
mongo_connection_string = DEFAULT_MONGO_CONNECTION_STRING
self.client = MongoClient(mongo_connection_string)
self.db = self.client[database][collection]
def get_all_pages_cursor(self):
cursor = self.db.find({})
return cursor
def get_num_pages(self):
return self.db.estimated_document_count()
def get_page_by_id(self, wikipedia_id):
page = self.db.find_one({'_id': str(wikipedia_id)})
return page
def get_page_by_title(self, wikipedia_title, attempt=0):
page = self.db.find_one({'wikipedia_title': str(wikipedia_title)})
return page
def get_page_from_url(self, url):
page = None
parsed = urlparse.urlparse(url)
record = parse_qs(parsed.query)
if ('title' in record):
title = record['title'][0].replace('_', ' ')
page = self.get_page_by_title(title)
if (page == None):
title = url.split('/')[(- 1)].replace('_', ' ')
page = self.get_page_by_title(title)
if (page == None):
title = _get_title_from_wikipedia_url(url, client=self.client)
if title:
pageid = _get_pageid_from_api(title, client=self.client)
if pageid:
page = self.get_page_by_id(pageid)
return page |
def coords_grid(batch, ht, wd, device):
coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device))
coords = torch.stack(coords[::(- 1)], dim=0).float()
return coords[None].repeat(batch, 1, 1, 1) |
class ConvBertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
class MultiScaleRandomCrop(object):
def __init__(self, scales, size, interpolation=Image.BILINEAR):
self.scales = scales
self.size = size
self.interpolation = interpolation
def __call__(self, img):
min_length = min(img.size[0], img.size[1])
crop_size = int((min_length * self.scale))
image_width = img.size[0]
image_height = img.size[1]
x1 = (self.tl_x * (image_width - crop_size))
y1 = (self.tl_y * (image_height - crop_size))
x2 = (x1 + crop_size)
y2 = (y1 + crop_size)
img = img.crop((x1, y1, x2, y2))
return img.resize((self.size, self.size), self.interpolation)
def randomize_parameters(self):
self.scale = self.scales[random.randint(0, (len(self.scales) - 1))]
self.tl_x = random.random()
self.tl_y = random.random() |
def _conjugate_gradient(f_Ax, b, cg_iters, residual_tol=1e-10):
p = b.clone()
r = b.clone()
x = torch.zeros_like(b)
rdotr = torch.dot(r, r)
for _ in range(cg_iters):
z = f_Ax(p)
v = (rdotr / torch.dot(p, z))
x += (v * p)
r -= (v * z)
newrdotr = torch.dot(r, r)
mu = (newrdotr / rdotr)
p = (r + (mu * p))
rdotr = newrdotr
if (rdotr < residual_tol):
break
return x |
def test_recursive_true_and_corrupt_file_ignored():
dataloader = _init_dataloader(imdir=NESTED_IMAGE_DIR, recursive=True)
(all_filenames, ims_arr, all_bad_images) = _iterate_over_dataloader(dataloader)
all_ims = torch.stack(ims_arr)
assert (all_ims.shape == tuple([5, 3, 224, 224]))
assert (len(all_filenames) == 5)
assert (len(all_bad_images) == 1) |
def filter_answers(answers_dset, min_occurence):
occurence = {}
for ans_entry in answers_dset:
answers = ans_entry['answers']
gtruth = ans_entry['multiple_choice_answer']
gtruth = preprocess_answer(gtruth)
if (gtruth not in occurence):
occurence[gtruth] = set()
occurence[gtruth].add(ans_entry['question_id'])
for answer in list(occurence):
if (len(occurence[answer]) < min_occurence):
occurence.pop(answer)
print(('Num of answers that appear >= %d times: %d' % (min_occurence, len(occurence))))
return occurence |
class FactorizedAntisymmetry(Module):
spin_split: ParticleSplit
compute_input_streams: ComputeInputStreams
backflow: Backflow
jastrow: Jastrow
rank: int
ndense_resnet: int
nlayers_resnet: int
kernel_initializer_resnet: WeightInitializer
bias_initializer_resnet: WeightInitializer
activation_fn_resnet: Activation
resnet_use_bias: bool = True
def setup(self):
self._compute_input_streams = self.compute_input_streams
self._backflow = self.backflow
self._jastrow = self.jastrow
.compact
def __call__(self, elec_pos: Array) -> SLArray:
(input_stream_1e, input_stream_2e, r_ei, r_ee) = self._compute_input_streams(elec_pos)
stream_1e = self._backflow(input_stream_1e, input_stream_2e)
split_spins = split(stream_1e, self.spin_split, axis=(- 2))
def fn_to_antisymmetrize(x_one_spin):
resnet_outputs = [SimpleResNet(self.ndense_resnet, 1, self.nlayers_resnet, self.activation_fn_resnet, self.kernel_initializer_resnet, self.bias_initializer_resnet, use_bias=self.resnet_use_bias)(x_one_spin) for _ in range(self.rank)]
return jnp.concatenate(resnet_outputs, axis=(- 1))
slog_antisyms = cast(SLArray, FactorizedAntisymmetrize([fn_to_antisymmetrize for _ in split_spins])(split_spins))
(sign_psi, log_antisyms) = slog_sum_over_axis(slog_antisyms, axis=(- 1))
jastrow_part = self._jastrow(input_stream_1e, input_stream_2e, stream_1e, r_ei, r_ee)
return (sign_psi, (log_antisyms + jastrow_part)) |
class RolloutBaseline(Baseline):
def __init__(self, model, problem, opts, epoch=0):
super(Baseline, self).__init__()
self.problem = problem
self.opts = opts
self._update_model(model, epoch)
def _update_model(self, model, epoch, dataset=None):
self.model = copy.deepcopy(model)
if (dataset is not None):
if (len(dataset) != self.opts.val_size):
print('Warning: not using saved baseline dataset since val_size does not match')
dataset = None
elif ((dataset[0] if (self.problem.NAME == 'tsp') else dataset[0]['loc']).size(0) != self.opts.graph_size):
print('Warning: not using saved baseline dataset since graph_size does not match')
dataset = None
if (dataset is None):
if self.opts.multi_distribution_baseline:
self.dataset = {'uniform': self.problem.make_dataset(size=self.opts.graph_size, num_samples=(self.opts.val_size // 3), distribution='uniform', n_cluster=self.opts.n_cluster, n_cluster_mix=self.opts.n_cluster_mix, mix_data=self.opts.generate_mix_data), 'cluster': self.problem.make_dataset(size=self.opts.graph_size, num_samples=(self.opts.val_size // 3), distribution='cluster', n_cluster=self.opts.n_cluster, n_cluster_mix=self.opts.n_cluster_mix, mix_data=self.opts.generate_mix_data), 'mixed': self.problem.make_dataset(size=self.opts.graph_size, num_samples=(self.opts.val_size // 3), distribution='mixed', n_cluster=self.opts.n_cluster, n_cluster_mix=self.opts.n_cluster_mix, mix_data=self.opts.generate_mix_data)}
else:
self.dataset = self.problem.make_dataset(size=self.opts.graph_size, num_samples=self.opts.val_size, distribution=self.opts.data_distribution, n_cluster=self.opts.n_cluster, n_cluster_mix=self.opts.n_cluster_mix, mix_data=self.opts.generate_mix_data)
else:
self.dataset = dataset
self.bl_vals = rollout(self.model, self.dataset, self.opts, progress_bar=True).cpu().numpy()
self.mean = self.bl_vals.mean()
self.epoch = epoch
def wrap_dataset(self, dataset):
return BaselineDataset(dataset, rollout(self.model, dataset, self.opts, progress_bar=True).view((- 1), 1))
def unwrap_batch(self, batch):
return (batch['data'], batch['baseline'].view((- 1)))
def eval(self, x, c):
with torch.no_grad():
(v, _) = self.model(x)
return (v, 0)
def epoch_callback(self, model, epoch):
candidate_vals = rollout(model, self.dataset, self.opts, progress_bar=True).cpu().numpy()
candidate_mean = candidate_vals.mean()
print('Epoch {} candidate mean {}, baseline epoch {} mean {}, difference {}'.format(epoch, candidate_mean, self.epoch, self.mean, (candidate_mean - self.mean)))
if ((candidate_mean - self.mean) < 0):
(t, p) = ttest_rel(candidate_vals, self.bl_vals)
p_val = (p / 2)
assert (t < 0), 'T-statistic should be negative'
if (p_val < self.opts.bl_alpha):
print('p-value: {}, Update baseline'.format(p_val))
self._update_model(model, epoch)
else:
print('p-value: {}'.format(p_val))
def state_dict(self):
return {'model': self.model, 'dataset': self.dataset, 'epoch': self.epoch}
def load_state_dict(self, state_dict):
load_model = copy.deepcopy(self.model)
get_inner_model(load_model).load_state_dict(get_inner_model(state_dict['model']).state_dict())
self._update_model(load_model, state_dict['epoch'], state_dict['dataset']) |
def resnet152(pretrained=False, progress=True, **kwargs):
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs) |
def staged_forward(fixed_exp_z, fixed_id_z, fixed_noise_z, generator_ddp, deform_ddp, vae_net_id, vae_net_exp, stage, alpha, metadata, opt):
device = fixed_exp_z.device
img_size = metadata['img_size']
batch_size = fixed_exp_z.shape[0]
z_exp = fixed_exp_z
z_id = fixed_id_z
noise = fixed_noise_z
neutral_face_flag = False
split_batch_size = z_exp.shape[0]
with torch.no_grad():
pixels_all = []
depth_all = []
pose_all = []
intersections_deform_all = []
intersections_canonic_all = []
is_valid_all = []
for split in range(1):
subset_z_exp = z_exp[(split * split_batch_size):((split + 1) * split_batch_size)]
subset_z_id = z_id[(split * split_batch_size):((split + 1) * split_batch_size)]
subset_noise = noise[(split * split_batch_size):((split + 1) * split_batch_size)]
t = time.time()
z = torch.cat([subset_z_id, subset_noise], dim=1)
batch_size = subset_z_exp.size()[0]
(raw_frequencies, raw_phase_shifts) = generator_ddp.siren.mapping_network(z)
truncated_frequencies = raw_frequencies
truncated_phase_shifts = raw_phase_shifts
(wp_sample_deform, wp_inter_back_deform, levels, w_ray_origins, w_ray_directions, pitch, yaw, _) = generator_ddp.generate_points(subset_z_exp.size()[0], subset_z_exp.device, **metadata)
(gen_positions, output, intersections_deform, intersections_canonical, is_valid) = generator_ddp.forward(subset_z_id, subset_z_exp, subset_noise, wp_sample_deform, wp_inter_back_deform, levels, w_ray_origins, w_ray_directions, pitch, yaw, neutral_face_flag, deform_ddp, alpha, metadata, freq=truncated_frequencies, phase=truncated_phase_shifts, stage_forward_flag=True)
(gen_imgs, depth, weights, transparency) = output
pixels_all.append(gen_imgs)
pixels_all_cat = torch.cat([p for p in pixels_all], dim=0)
pixels_all_cat = pixels_all_cat.cpu()
return pixels_all_cat |
_tf
class TestTFPegasusCommon(TFModelTesterMixin, unittest.TestCase):
all_model_classes = ((TFPegasusForConditionalGeneration,) if is_tf_available() else ())
all_generative_model_classes = ((TFPegasusForConditionalGeneration,) if is_tf_available() else ())
model_tester_cls = ModelTester
is_encoder_decoder = True
test_pruning = False
def setUp(self):
self.model_tester = self.model_tester_cls(self)
self.config_tester = ConfigTester(self, config_class=PegasusConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
pass
def test_saved_model_with_hidden_states_output(self):
pass
def test_saved_model_with_attentions_output(self):
pass
def test_compile_tf_model(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-05, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
model_class = self.all_generative_model_classes[0]
input_ids = {'decoder_input_ids': tf.keras.Input(batch_shape=(2, 2000), name='decoder_input_ids', dtype='int32'), 'input_ids': tf.keras.Input(batch_shape=(2, 2000), name='input_ids', dtype='int32')}
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
outputs = tf.keras.layers.Dense(2, activation='softmax', name='outputs')(hidden_states)
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_model_common_attributes(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_layer_with_bias()
assert (x is None)
name = model.get_prefix_bias_name()
assert (name is None) |
def hard_attention_arc_eager_decoder(decoder_inputs, encoder_inputs, initial_state, attention_states, cell, predict_end_attention=True, decoder_vocab_sizes=None, output_size=None, num_heads=1, embed_functions=None, loop_functions=None, output_projections=None, transition_state_map=None, dtype=tf.float32, scope=None, initial_state_attention=False):
if (not decoder_inputs):
raise ValueError('Must provide at least 1 input to attention decoder.')
if (num_heads < 1):
raise ValueError('With less than 1 heads, use a non-attention decoder.')
if (not attention_states.get_shape()[1:2].is_fully_defined()):
raise ValueError(('Shape[1] and [2] of attention_states must be known: %s' % attention_states.get_shape()))
if (output_size is None):
output_size = cell.output_size
use_nonlinear = False
feed_pointer_encoding = True
max_stack_size = int((data_utils.MAX_OUTPUT_SIZE / 2))
with tf.variable_scope((scope or 'attention_decoder')):
batch_size = tf.shape(attention_states)[0]
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
attention_vec_size = attn_size
hidden = tf.reshape(attention_states, [(- 1), attn_length, 1, attn_size])
num_heads = (2 if predict_end_attention else 1)
hidden_features = []
v = []
y_w = []
for a in xrange(num_heads):
k = tf.get_variable(('AttnW_%d' % a), [1, 1, attn_size, attention_vec_size])
hidden_features.append(tf.nn.conv2d(hidden, k, [1, 1, 1, 1], 'SAME'))
v.append(tf.get_variable(('AttnV_%d' % a), [attention_vec_size]))
y_m_a = tf.get_variable(('AttnInputLinearW_%d' % a), [attn_size, attention_vec_size], dtype=dtype)
y_bias_a = tf.get_variable(('AttnInputLinearBias_%d' % a), [attention_vec_size], dtype=dtype, initializer=tf.constant_initializer(0.0, dtype=dtype))
y_w.append((y_m_a, y_bias_a))
def attention(query):
return seq2seq_helpers.attention(query, num_heads, y_w, v, hidden, hidden_features, attention_vec_size, attn_length, use_global_attention=False)
parse_logits = []
ind_logits = []
end_ind_logits = []
state = initial_state
prev = None
batch_attn_size = tf.pack([batch_size, attn_size])
attns = tf.zeros(batch_attn_size, dtype=dtype)
attns.set_shape([None, attn_size])
stack_top_emb = tf.reshape(tf.zeros(tf.pack([batch_size, attn_size]), dtype=tf.float32), [(- 1), attn_size])
buffer_head_emb = tf.reshape(tf.zeros(tf.pack([batch_size, attn_size]), dtype=tf.float32), [(- 1), attn_size])
(thin_stack_enc, thin_stack_head_next) = seq2seq_helpers.init_thin_stack(batch_size, max_stack_size)
buffer_head = tf.zeros(tf.pack([batch_size]), dtype=tf.int32)
transition_state = tf.fill(tf.pack([batch_size]), data_utils.PAD_STATE)
for (i, decoder_input) in enumerate(decoder_inputs):
if (i > 0):
tf.get_variable_scope().reuse_variables()
if ((loop_functions is not None) and (prev is not None)):
prev_symbol = tf.argmax(prev, 1)
with tf.variable_scope('loop_function', reuse=True):
inp = loop_functions['parse'](prev, None)
else:
prev_symbol = decoder_input['parse']
with tf.variable_scope('embed_function', reuse=True):
inp = embed_functions['parse'](prev_symbol)
transition_state = tf.gather(transition_state_map, prev_symbol)
if (i == 1):
buffer_head = seq2seq_helpers.update_buffer_head(buffer_head, attn_inds, transition_state)
elif (i > 1):
thin_stack_enc = seq2seq_helpers.write_thin_stack_vals(thin_stack_enc, thin_stack_head_next, buffer_head, batch_size, max_stack_size)
thin_stack_head_next = seq2seq_helpers.pure_shift_thin_stack(thin_stack_head_next, transition_state)
thin_stack_head_next = seq2seq_helpers.pure_reduce_thin_stack(thin_stack_head_next, transition_state)
buffer_head = seq2seq_helpers.update_buffer_head(buffer_head, attn_inds, transition_state)
if (i > 0):
stack_top_enc_inds = seq2seq_helpers.extract_stack_head_entries(thin_stack_enc, thin_stack_head_next, batch_size)
stack_top_emb = seq2seq_helpers.hard_state_selection(stack_top_enc_inds, hidden, batch_size, attn_length)
buffer_head_emb = seq2seq_helpers.hard_state_selection(buffer_head, hidden, batch_size, attn_length)
with tf.variable_scope('DecoderInputAttentionLinear'):
if feed_pointer_encoding:
x = linear([inp, stack_top_emb, buffer_head_emb], cell.output_size, True)
else:
x = linear([inp], cell.output_size, True)
(cell_output, state) = cell(x, state)
with tf.variable_scope('AttentionCall'):
(pointer_logits, _, _) = attention(state)
ind_logits.append(pointer_logits[0])
if predict_end_attention:
end_ind_logits.append(pointer_logits[1])
if (loop_functions is not None):
attn_inds = tf.to_int32(tf.argmax(pointer_logits[0], 1))
elif (i < (len(decoder_inputs) - 1)):
attn_inds = decoder_inputs[(i + 1)]['att']
else:
attn_inds = (data_utils.PAD_ID * tf.ones(tf.pack([batch_size]), tf.int32))
attns = seq2seq_helpers.hard_state_selection(attn_inds, hidden, batch_size, attn_length)
with tf.variable_scope('AttnOutputProjection'):
output = linear([cell_output, attns, stack_top_emb], output_size, True)
if use_nonlinear:
output = tf.relu(output)
logit = (tf.matmul(output, output_projections['parse'][0]) + output_projections['parse'][1])
target_vocab_size = tf.shape(output_projections['parse'][0])[1]
if (loop_functions is not None):
logit = seq2seq_helpers.mask_decoder_only_shift(logit, thin_stack_head_next, transition_state_map, target_vocab_size, batch_size)
logit = seq2seq_helpers.mask_decoder_only_reduce(logit, thin_stack_head_next, transition_state_map, max_stack_size, target_vocab_size, batch_size)
prev = logit
parse_logits.append(logit)
if predict_end_attention:
logits = [{'parse': parse_logit, 'att': ind_logit, 'endatt': end_ind_logit} for (parse_logit, ind_logit, end_ind_logit) in zip(parse_logits, ind_logits, end_ind_logits)]
else:
logits = [{'parse': parse_logit, 'att': ind_logit} for (parse_logit, ind_logit) in zip(parse_logits, ind_logits)]
return (logits, state) |
_model
def nf_regnet_b5(pretrained=False, **kwargs):
return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) |
def download_and_extract():
dest_directory = DATA_DIR
if (not os.path.exists(dest_directory)):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[(- 1)]
filepath = os.path.join(dest_directory, filename)
if (not os.path.exists(filepath)):
def _progress(count, block_size, total_size):
sys.stdout.write(('\rDownloading %s %.2f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0))))
sys.stdout.flush()
(filepath, _) = urllib.urlretrieve(DATA_URL, filepath, reporthook=_progress)
print('Downloaded', filename)
tarfile.open(filepath, 'r:gz').extractall(dest_directory) |
_start_docstrings('XLM-RoBERTa Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. ', XLM_ROBERTA_START_DOCSTRING)
class XLMRobertaForTokenClassification(RobertaForTokenClassification):
config_class = XLMRobertaConfig |
class EpanechnikovProposal(Proposal):
def density(self, z):
return (0.75 * (1 - (z ** 2)))
def kl(self, m, s):
return ((((((0.5 * (m ** 2)) + ((s ** 2) / 10)) - torch.log((s + self.eps))) + (0.5 * np.log((2 * np.pi)))) - (5 / 3)) + np.log(3)).sum(1)
def kl_uniform(self, m, s):
return ((((- 5) / 3) + np.log(6)) - torch.log((s + self.eps))).sum(1) |
def batchify_distributed(data, bsz, args, epoch):
np.random.seed(epoch)
pointer = np.random.randint(0, len(data))
data = torch.cat((data[pointer:], data[0:pointer]), dim=0)
num_replicas = dist.get_world_size()
rank = dist.get_rank()
num_samples = int(math.ceil(((data.size(0) * 1.0) / num_replicas)))
total_size = (num_samples * num_replicas)
data = torch.cat((data, data[:(total_size - data.size(0))]), dim=0)
assert (data.size(0) == total_size)
offset = (num_samples * rank)
data = data[offset:(offset + num_samples)]
assert (len(data) == num_samples)
nbatch = (data.size(0) // bsz)
data = data.narrow(0, 0, (nbatch * bsz))
data = data.view(bsz, (- 1)).t().contiguous()
if args.cuda:
data = data.cuda()
return data |
def create_angle_grid():
w = np.linspace((- 1), 1, 128)
agl_grid = np.degrees(np.arcsin(w))
return agl_grid |
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset, MultiImageMixDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'RepeatDataset'):
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
elif (cfg['type'] == 'MultiImageMixDataset'):
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif (isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(cfg.get('split', None), (list, tuple))):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset |
class EltwiseSubEmbed(nn.Module):
def __init__(self, nonlinearity='square', use_batch_norm=False, use_classifier=False, num_features=0, num_classes=0):
super(EltwiseSubEmbed, self).__init__()
self.nonlinearity = nonlinearity
if ((nonlinearity is not None) and (nonlinearity not in ['square', 'abs'])):
raise KeyError('Unknown nonlinearity:', nonlinearity)
self.use_batch_norm = use_batch_norm
self.use_classifier = use_classifier
if self.use_batch_norm:
self.bn = nn.BatchNorm1d(num_features)
self.bn.weight.data.fill_(1)
self.bn.bias.data.zero_()
if self.use_classifier:
assert ((num_features > 0) and (num_classes > 0))
self.classifier = nn.Linear(num_features, num_classes)
self.classifier.weight.data.normal_(0, 0.001)
self.classifier.bias.data.zero_()
def forward(self, x1, x2):
x = (x1 - x2)
if (self.nonlinearity == 'square'):
x = x.pow(2)
elif (self.nonlinearity == 'abs'):
x = x.abs()
if self.use_batch_norm:
x = self.bn(x)
if self.use_classifier:
x = x.view(x.size(0), (- 1))
x = self.classifier(x)
else:
x = x.sum(1)
return x |
class DatasetManger(metaclass=ABCMeta):
def __init__(self, task_type, batch_size, dataset_splitter: DatasetSplitter):
self.todo: List[Task] = []
self.doing: Dict[(int, DoingTask)] = {}
self._task_type = task_type
self._batch_size = batch_size
self._dataset_splitter = dataset_splitter
self._latest_task_end_time = 0
def get_latest_task_end_time(self):
return self._latest_task_end_time
def get_task_count(self):
epoch_task_count = self._dataset_splitter.get_shard_count()
return (len(self.todo) + epoch_task_count)
def get_epoch(self):
pass
def completed(self):
pass
def get_task(self, node_type, node_id) -> Task:
pass
def recover_task(self, task):
pass
def report_task_status(self, task_id, success):
pass
def get_completed_step(self):
pass
def checkpoint(self):
pass
def restore_checkpoint(self, checkpoint):
pass |
class T5Converter(SpmConverter):
def vocab(self, proto):
num_extra_ids = self.original_tokenizer._extra_ids
vocab = [(piece.piece, piece.score) for piece in proto.pieces]
vocab += [(f'<extra_id_{i}>', 0.0) for i in range((num_extra_ids - 1), (- 1), (- 1))]
return vocab
def post_processor(self):
return processors.TemplateProcessing(single=['$A', '</s>'], pair=['$A', '</s>', '$B', '</s>'], special_tokens=[('</s>', self.original_tokenizer.convert_tokens_to_ids('</s>'))]) |
class CarBikeCollision(Scenario):
def init_scene(self, prefix, settings=None, spectator_tr=None):
super().init_scene(prefix, settings, spectator_tr)
blueprint_library = self.world.get_blueprint_library()
car_tr = carla.Transform(carla.Location(50, (- 255), 0.04), carla.Rotation(yaw=0))
car = self.world.spawn_actor(blueprint_library.filter('*lincoln*')[0], car_tr)
bike_tr = carla.Transform(carla.Location(85, (- 245), 0.04), carla.Rotation(yaw=(- 90)))
bike = self.world.spawn_actor(blueprint_library.filter('*gazelle*')[0], bike_tr)
self.wait(1)
car.set_target_velocity(carla.Vector3D((+ 30), 0, 0))
bike.set_target_velocity(carla.Vector3D(0, (- 12), 0))
self.add_actor(car, 'Car')
self.add_actor(bike, 'Bike')
self.wait(1) |
def get_dl(mode: str, cfg_ds: dict, cfg_dl: dict) -> DataLoader:
ds = get_ds(cfg_ds, mode)
ds = list(ds.values())
cfg = ({k: v for (k, v) in cfg_dl.items() if (k not in {'train', 'val', 'test'})} | cfg_dl.get(mode, {}))
cfg['pin_memory'] = cfg.get('pin_memory', True)
cfg['collate_fn'] = ds[0].collate_fn
use_ddp = cfg.pop('use_ddp', False)
seed = cfg.pop('seed', 42)
if use_ddp:
(shuffle, drop_last) = (cfg.pop('shuffle', False), cfg.pop('drop_last', False))
seeds = [(seed * (10 ** i)) for (i, _) in enumerate(ds)]
samplers = [DistributedSampler(d, shuffle=shuffle, drop_last=drop_last, seed=s) for (d, s) in zip(ds, seeds)]
else:
samplers = [None for _ in ds]
dl = [DataLoader(d, sampler=s, **cfg) for (d, s) in zip(ds, samplers)]
return (dl[0] if (len(dl) == 1) else ConcatDataLoader(dl)) |
class Data2VecTextConfig(PretrainedConfig):
model_type = 'data2vec-text'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout |
def assert_keys_equal(result_keys: List[str], target_keys: List[str]) -> bool:
return (set(result_keys) == set(target_keys)) |
class ValueBFS(Search):
def __init__(self, forward_predictor: ForwardPredictor, forward_enumerator: ForwardEnumerator, value_heuristic: ValueHeuristic, action_enumerator: ActionEnumerator, random_state_enumerator: RandomStateEnumerator, random_state_predictor: RandomStatePredictor, opponent_action_enumerator: OpponentActionEnumerator, opponent_action_predictor: OpponentActionPredictor):
super().__init__(forward_predictor, forward_enumerator, value_heuristic, action_enumerator, random_state_enumerator, random_state_predictor, opponent_action_enumerator, opponent_action_predictor)
def expand(self, graph: ValueGraph, state: State, prev_node=None, depth=3):
node = graph.get_node(state)
if (node is None):
node = graph.add_state(state)
if (prev_node is not None):
node.parents.add(prev_node)
prev_node.children.add(node)
if (depth == 0):
value = self.value_heuristic.evaluate(state)
return value
else:
value = 0.0
next_state_to_values = dict()
if (state.state_type == state.STATE_TYPES[0]):
max_value = float('-inf')
max_action = None
action_to_expected_value = dict()
if (node.actions is None):
node.actions = self.action_enumerator.enumerate(state)
if (not node.next_states):
for action in node.actions:
node.next_states.update(self.forward_enumerator.enumerate(state, action))
for next_state in node.next_states:
value = self.expand(graph, next_state, node, (depth - 1))
next_state_to_values[next_state] = value
for action in node.actions:
if (action not in node.action_to_next_state_probs):
node.action_to_next_state_probs[action] = self.forward_predictor.predict(state, action, next_state)
expected_value = 0.0
for next_state in node.next_states:
expected_value += (next_state_to_values[next_state] * node.action_to_next_state_probs[action][next_state])
action_to_expected_value[action] = expected_value
if (expected_value > max_value):
max_value = expected_value
max_action = action
node.best_action = max_action
node.value = max_value
value = max_value
elif (state.state_type == state.STATE_TYPES[1]):
min_value = float('inf')
min_action = None
action_to_expected_value = dict()
if (node.actions is None):
node.actions = self.opponent_action_enumerator.enumerate(state)
if (not node.next_states):
for action in node.actions:
node.next_states.update(self.forward_enumerator.enumerate(state, action))
for next_state in node.next_states:
value = self.expand(graph, next_state, node, (depth - 1))
next_state_to_values[next_state] = value
for action in node.actions:
if (action not in node.action_to_next_state_probs):
node.action_to_next_state_probs[action] = self.forward_predictor.predict(state, action, next_state)
expected_value = 0.0
for next_state in node.next_states:
expected_value += (next_state_to_values[next_state] * node.action_to_next_state_probs[action][next_state])
action_to_expected_value[action] = expected_value
if (expected_value < min_value):
min_value = expected_value
min_action = action
node.best_action = min_action
node.value = min_value
value = min_value
elif (state.state_type == state.STATE_TYPES[2]):
value = 0.0
if (node.next_states is None):
node.next_states = self.random_state_enumerator.enumerate(state)
if (not node.probs_over_next_states):
node.probs_over_next_states = self.random_state_predictor.predict(state, node.next_states)
for next_state in node.next_states:
value += (self.expand(graph, next_state, node, (depth - 1)) * node.probs_over_next_states[next_state])
return value |
class DownsampleB(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleB, self).__init__()
self.avg = nn.AvgPool2d(stride)
self.expand_ratio = (nOut // nIn)
def forward(self, x):
x = self.avg(x)
return torch.cat(([x] + ([x.mul(0)] * (self.expand_ratio - 1))), 1) |
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
if (name not in networks_map):
raise ValueError(('Name of network unknown %s' % name))
func = networks_map[name]
(func)
def network_fn(images):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes, is_training=is_training)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn |
def has_metadata_cell(cells, fn):
for c in cells:
if re.search(f"update_nb_metadata\('{fn}'", c['source']):
return c |
_builder('vizwiz')
class VizWizBuilder(VQA2Builder):
def __init__(self):
super().__init__()
self.dataset_name = 'vizwiz'
self.set_dataset_class(VizWizDataset)
def update_registry_for_model(self, config):
super().update_registry_for_model(config) |
def compute_progress(dir, iter, egs_dir, run_opts, get_raw_nnet_from_am=True):
suffix = ('mdl' if get_raw_nnet_from_am else 'raw')
prev_model = '{0}/{1}.{2}'.format(dir, (iter - 1), suffix)
model = '{0}/{1}.{2}'.format(dir, iter, suffix)
common_lib.background_command("{command} {dir}/log/progress.{iter}.log nnet3-info {model} '&&' nnet3-show-progress --use-gpu=no {prev_model} {model} ".format(command=run_opts.command, dir=dir, iter=iter, model=model, prev_model=prev_model))
if (((iter % 10) == 0) and (iter > 0)):
common_lib.background_command('{command} {dir}/log/full_progress.{iter}.log nnet3-show-progress --use-gpu=no --verbose=2 {prev_model} {model}\n '.format(command=run_opts.command, dir=dir, iter=iter, model=model, prev_model=prev_model))
common_lib.background_command('{command} {dir}/log/full_info.{iter}.log nnet3-info --verbose=2 {model}\n '.format(command=run_opts.command, dir=dir, iter=iter, model=model)) |
def GetUpdate(observe, collector, return_to_original_position=True):
go_to_js = GetPlanToJointStateService()
req = GetHomeRequest()
servo_mode = GetServoModeService()
def update():
q0 = collector.q
servo_mode('servo')
max_tries = 10
tries = 0
res = None
while ((tries < max_tries) and ((res is None) or ('failure' in res.ack.lower()))):
res = go_to_js(req)
if ((res is None) or ('failure' in res.ack.lower())):
rospy.logerr(res.ack)
raise RuntimeError('UPDATE(): error moving out of the way')
observe()
if return_to_original_position:
if (q0 is not None):
max_tries = 10
tries = 0
res2 = None
while ((tries < max_tries) and ((res2 is None) or ('failure' in res2.ack.lower()))):
res2 = go_to_js(MakeServoToJointStateRequest(q0))
else:
raise RuntimeError('GetUpdate::update(): collector had joint position stored at None')
else:
res2 = go_to_js(req)
if ((res2 is None) or ('failure' in res2.ack.lower())):
rospy.logerr(res2.ack)
raise RuntimeError('GetUpdate::UPDATE(): error returning to original joint pose')
return True
return update |
class FPN(Backbone):
_fuse_type: torch.jit.Final[str]
def __init__(self, bottom_up, in_features, out_channels, norm='', top_block=None, fuse_type='sum', square_pad=0):
super(FPN, self).__init__()
assert isinstance(bottom_up, Backbone)
assert in_features, in_features
input_shapes = bottom_up.output_shape()
strides = [input_shapes[f].stride for f in in_features]
in_channels_per_feature = [input_shapes[f].channels for f in in_features]
_assert_strides_are_log2_contiguous(strides)
lateral_convs = []
output_convs = []
use_bias = (norm == '')
for (idx, in_channels) in enumerate(in_channels_per_feature):
lateral_norm = get_norm(norm, out_channels)
output_norm = get_norm(norm, out_channels)
lateral_conv = Conv2d(in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm)
output_conv = Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
stage = int(math.log2(strides[idx]))
self.add_module('fpn_lateral{}'.format(stage), lateral_conv)
self.add_module('fpn_output{}'.format(stage), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.lateral_convs = lateral_convs[::(- 1)]
self.output_convs = output_convs[::(- 1)]
self.top_block = top_block
self.in_features = tuple(in_features)
self.bottom_up = bottom_up
self._out_feature_strides = {'p{}'.format(int(math.log2(s))): s for s in strides}
if (self.top_block is not None):
for s in range(stage, (stage + self.top_block.num_levels)):
self._out_feature_strides['p{}'.format((s + 1))] = (2 ** (s + 1))
self._out_features = list(self._out_feature_strides.keys())
self._out_feature_channels = {k: out_channels for k in self._out_features}
self._size_divisibility = strides[(- 1)]
self._square_pad = square_pad
assert (fuse_type in {'avg', 'sum'})
self._fuse_type = fuse_type
def size_divisibility(self):
return self._size_divisibility
def padding_constraints(self):
return {'square_size': self._square_pad}
def forward(self, x):
bottom_up_features = self.bottom_up(x)
results = []
prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[(- 1)]])
results.append(self.output_convs[0](prev_features))
for (idx, (lateral_conv, output_conv)) in enumerate(zip(self.lateral_convs, self.output_convs)):
if (idx > 0):
features = self.in_features[((- idx) - 1)]
features = bottom_up_features[features]
top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode='nearest')
lateral_features = lateral_conv(features)
prev_features = (lateral_features + top_down_features)
if (self._fuse_type == 'avg'):
prev_features /= 2
results.insert(0, output_conv(prev_features))
if (self.top_block is not None):
if (self.top_block.in_feature in bottom_up_features):
top_block_in_feature = bottom_up_features[self.top_block.in_feature]
else:
top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
results.extend(self.top_block(top_block_in_feature))
assert (len(self._out_features) == len(results))
return {f: res for (f, res) in zip(self._out_features, results)}
def output_shape(self):
return {name: ShapeSpec(channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]) for name in self._out_features} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.