code stringlengths 101 5.91M |
|---|
class SplAtConv2d(Module):
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2, reduction_factor=4, rectify=False, rectify_avg=False, norm_layer=None, dropblock_prob=0.0, **kwargs):
super(SplAtConv2d, self).__init__()
padding = _pair(padding)
self.rectify = (rectify and ((padding[0] > 0) or (padding[1] > 0)))
self.rectify_avg = rectify_avg
inter_channels = max(((in_channels * radix) // reduction_factor), 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, (channels * radix), kernel_size, stride, padding, dilation, groups=(groups * radix), bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, (channels * radix), kernel_size, stride, padding, dilation, groups=(groups * radix), bias=bias, **kwargs)
self.use_bn = (norm_layer is not None)
if self.use_bn:
self.bn0 = norm_layer((channels * radix))
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(inter_channels)
self.fc2 = Conv2d(inter_channels, (channels * radix), 1, groups=self.cardinality)
if (dropblock_prob > 0.0):
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn0(x)
if (self.dropblock_prob > 0.0):
x = self.dropblock(x)
x = self.relu(x)
(batch, rchannel) = x.shape[:2]
if (self.radix > 1):
if (torch.__version__ < '1.5'):
splited = torch.split(x, int((rchannel // self.radix)), dim=1)
else:
splited = torch.split(x, (rchannel // self.radix), dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, (- 1), 1, 1)
if (self.radix > 1):
if (torch.__version__ < '1.5'):
attens = torch.split(atten, int((rchannel // self.radix)), dim=1)
else:
attens = torch.split(atten, (rchannel // self.radix), dim=1)
out = sum([(att * split) for (att, split) in zip(attens, splited)])
else:
out = (atten * x)
return out.contiguous() |
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True)
cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True)
cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True)
cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')])
return |
def test_key_to_hand():
key = jnp.array([0, , , ], dtype=jnp.int32)
hand = _key_to_hand(key)
assert jnp.all((hand == jnp.arange(52, dtype=jnp.int32)))
key = jnp.array([, , , 0], dtype=jnp.int32)
hand = _key_to_hand(key)
correct_hand = jnp.arange(52, dtype=jnp.int32)[::(- 1)]
sorted_correct_hand = jnp.concatenate([jnp.sort(correct_hand[:13]), jnp.sort(correct_hand[13:26]), jnp.sort(correct_hand[26:39]), jnp.sort(correct_hand[39:])]).reshape((- 1))
assert jnp.all((hand == sorted_correct_hand))
key = jnp.array([, , , ], dtype=jnp.int32)
hand = _key_to_hand(key)
correct_hand = jnp.array([12, 9, 8, 6, 3, 2, 13, 24, 22, 16, 15, 36, 45, 10, 7, 4, 21, 37, 31, 51, 50, 49, 47, 43, 41, 40, 11, 1, 25, 23, 19, 18, 17, 35, 34, 33, 48, 44, 42, 0, 5, 20, 14, 26, 38, 32, 30, 29, 28, 27, 39, 46])
sorted_correct_hand = jnp.concatenate([jnp.sort(correct_hand[:13]), jnp.sort(correct_hand[13:26]), jnp.sort(correct_hand[26:39]), jnp.sort(correct_hand[39:])]).reshape((- 1))
print(hand)
assert jnp.all((hand == sorted_correct_hand)) |
class RegSegHead(BaseSegHead):
def __init__(self, mid_channels=[8, 128], **kwargs):
super(RegSegHead, self).__init__(**kwargs)
self.head4 = ConvModule(self.in_channels[0], mid_channels[0], kernel_size=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.head8 = ConvModule(self.in_channels[1], mid_channels[1], kernel_size=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.head16 = ConvModule(self.in_channels[2], mid_channels[1], kernel_size=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.conv8 = ConvModule(mid_channels[1], self.channels, kernel_size=3, stride=1, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.conv4 = ConvModule((self.channels + mid_channels[0]), self.channels, kernel_size=3, stride=1, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def forward(self, x):
(x4, x8, x16) = x
x16 = self.head16(x16)
x8 = self.head8(x8)
x4 = self.head4(x4)
x16 = F.interpolate(x16, size=x8.shape[(- 2):], mode='bilinear', align_corners=False)
x8 = (x8 + x16)
x8 = self.conv8(x8)
x8 = F.interpolate(x8, size=x4.shape[(- 2):], mode='bilinear', align_corners=False)
x4 = torch.cat((x8, x4), dim=1)
x4 = self.conv4(x4)
return self.classify(x4) |
def get_2lvlmel_timestamp_embeddings(audio, model):
(embedmel, tmel) = model.get_timestamp_mels(audio, window_size=1920)
(embed1, t1) = model.get_timestamp_embeddings(audio)
(embed2, t2) = model.get_timestamp_embeddings(audio, window_size=(model.timestamp_window * 4))
embed = torch.cat((embed1, embed2, embedmel), dim=(- 1))
return (embed, t1) |
def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
num_before = len(dataset_dicts)
def visible_keypoints_in_image(dic):
annotations = dic['annotations']
return sum(((np.array(ann['keypoints'][2::3]) > 0).sum() for ann in annotations if ('keypoints' in ann)))
dataset_dicts = [x for x in dataset_dicts if (visible_keypoints_in_image(x) >= min_keypoints_per_image)]
num_after = len(dataset_dicts)
logger = logging.getLogger(__name__)
logger.info('Removed {} images with fewer than {} keypoints.'.format((num_before - num_after), min_keypoints_per_image))
return dataset_dicts |
class ActNorm(tf.keras.Model):
def __init__(self, latent_dim, act_norm_init, **kwargs):
super().__init__(**kwargs)
if (act_norm_init is None):
self.scale = tf.Variable(tf.ones((latent_dim,)), trainable=True, name='act_norm_scale')
self.bias = tf.Variable(tf.zeros((latent_dim,)), trainable=True, name='act_norm_bias')
else:
self._initalize_parameters_data_dependent(act_norm_init)
def call(self, target, inverse=False):
if (not inverse):
return self._forward(target)
else:
return self._inverse(target)
def _forward(self, target):
z = ((self.scale * target) + self.bias)
ldj = tf.math.reduce_sum(tf.math.log(tf.math.abs(self.scale)), axis=(- 1))
return (z, ldj)
def _inverse(self, target):
return ((target - self.bias) / self.scale)
def _initalize_parameters_data_dependent(self, init_data):
if (tf.rank(init_data) == 2):
mean = tf.math.reduce_mean(init_data, axis=0)
std = tf.math.reduce_std(init_data, axis=0)
elif (tf.rank(init_data) == 3):
mean = tf.math.reduce_mean(init_data, axis=(0, 1))
std = tf.math.reduce_std(init_data, axis=(0, 1))
else:
raise ConfigurationError(f'''Currently, ActNorm supports only 2D and 3D Tensors,
but act_norm_init contains data with shape {init_data.shape}.''')
scale = (1.0 / std)
bias = (((- 1.0) * mean) / std)
self.scale = tf.Variable(scale, trainable=True, name='act_norm_scale')
self.bias = tf.Variable(bias, trainable=True, name='act_norm_bias') |
def expand_params(params):
params = params.copy()
for (k, v) in params.items():
if isinstance(v, dict):
v_s = list(expand_params(v))
if (len(v_s) > 1):
for (v_, exps) in v_s:
params_ = params.copy()
params_[k] = v_
for (new_params, expansions) in expand_params(params_):
(yield (new_params, ([(('%s{%s}' % (k, a)), b) for (a, b) in exps] + expansions)))
return
if isinstance(v, sweep):
for (v_, name) in v:
params_ = params.copy()
params_[k] = v_
for (new_params, expansions) in expand_params(params_):
(yield (new_params, (expansions + [(k, name)])))
return
(yield (params, [])) |
class AdamOptimizerConfig(Config):
amsgrad: bool = False
betas: Tuple[(float, float)] = (0.9, 0.999) |
def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False):
if balance:
raise NotImplementedError
if ((len(A.shape) != 2) or (A.shape[0] != A.shape[1])):
raise ValueError('expected A to be like a square matrix')
if (A.shape[1] != B.shape[0]):
raise ValueError('shapes of matrices A {} and B {} are incompatible'.format(A.shape, B.shape))
ident = _ident_like(A)
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
n = A.shape[0]
if (len(B.shape) == 1):
n0 = 1
elif (len(B.shape) == 2):
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = (2 ** (- 53))
tol = u_d
if (traceA is None):
if is_linear_operator:
warn('Trace of LinearOperator not available, it will be estimated. Provide `traceA` to ensure performance.', stacklevel=3)
traceA = (traceest(A, m3=1) if is_linear_operator else _trace(A))
mu = (traceA / float(n))
A = (A - (mu * ident))
A_1_norm = (onenormest(A) if is_linear_operator else _exact_1_norm(A))
if ((t * A_1_norm) == 0):
(m_star, s) = (0, 1)
else:
ell = 2
norm_info = LazyOperatorNormInfo((t * A), A_1_norm=(t * A_1_norm), ell=ell)
(m_star, s) = _fragment_3_1(norm_info, n0, tol, ell=ell)
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance) |
class MDPParamsGenerator(object):
def __init__(self, params_schedule_fn):
assert callable(params_schedule_fn), 'params scheduling function must be a callable'
self.params_schedule_fn = params_schedule_fn
def from_fixed_param(mdp_params_always):
naive_schedule_fn = (lambda _ignored: mdp_params_always)
return MDPParamsGenerator(naive_schedule_fn)
def generate(self, outside_information={}):
assert (type(outside_information) is dict)
mdp_params = self.params_schedule_fn(outside_information)
return mdp_params |
def _is_polars_df(X):
if (hasattr(X, 'columns') and hasattr(X, 'schema')):
try:
pl = sys.modules['polars']
except KeyError:
return False
return isinstance(X, pl.DataFrame)
return False |
class BlockSampler(_BasicSampler):
def __init__(self, dataset, params, is_training=True, seed=0):
self.num_points_per_sample = 0
self.box_size_x = 0
self.box_size_y = 0
self.sliding_ratio = 0
self.sparse_thresh = 0
self.modify_type = ['raw']
self.ignore_fine_bounds = False
super(BlockSampler, self).__init__(*[dataset, params, is_training])
self.__max_sample_try = 10
self.length = (int((len(self.dataset) / self.num_points_per_sample)) + 1)
self.random_machine = np.random.RandomState(seed)
(self.x_grid, self.y_grid, self.y_grid) = (None, None, None)
self.q = BlockQuery(self.dataset.points, np.array([1, 1, 1], dtype=np.int), [self.box_size_x, self.box_size_y], ignore_bounds=self.ignore_fine_bounds)
self.modify_func = PointModifier(self.modify_type)
if (not self.is_training):
(x, y) = self._get_sliding_block_center(sliding_block_ratio=self.sliding_ratio)
(x_grid, y_grid) = np.meshgrid(x, y)
self.x_grid = x_grid.flatten()
self.y_grid = y_grid.flatten()
self.z_grid = np.full(len(self.x_grid), self.dataset.min_bounds[2])
self.length = len(self.x_grid)
def modify_points(self, points, *args, **kwargs):
return self.modify_func(points, *args, min_bounds=self.dataset.min_bounds, max_bounds=self.dataset.max_bounds, block_size_x=self.box_size_x, block_size_y=self.box_size_y, **kwargs)
def sample(self, ind, set_random_machine=None, *args, **kwargs):
random_machine = (set_random_machine if (set_random_machine is not None) else self.random_machine)
points = self.dataset.points.view()
scene_extract_mask = None
if self.is_training:
for _ in range(self.__max_sample_try):
center_point = points[random_machine.randint(0, len(points))]
scene_extract_mask = self._extract_block(center_point)
if (not isinstance(scene_extract_mask, list)):
break
assert (not isinstance(scene_extract_mask, list)), 'Point cloud too sparse'
else:
center_point = np.array([self.x_grid[ind], self.y_grid[ind], self.z_grid[ind]])
scene_extract_mask = self._extract_block(center_point)
if isinstance(scene_extract_mask, list):
return _gen_empty_sample(self.num_points_per_sample, self.modify_func.shape, self.dataset.labels.shape[1])
sample_mask = self._get_fix_random_sample_mask(len(scene_extract_mask), random_machine)
scene_extract_mask = scene_extract_mask[sample_mask]
(points, colors, labels) = self.dataset[scene_extract_mask]
points_centered = self.modify_points(points)
return (points_centered, points, labels, colors)
def cal_length(self):
return self.length
def _get_sliding_block_center(self, sliding_block_ratio):
x = self.dataset.min_bounds[0]
x_grid = [x]
while (x < self.dataset.max_bounds[0]):
x += (self.box_size_x * sliding_block_ratio)
x_grid.append(x)
y = self.dataset.min_bounds[1]
y_grid = [y]
while (y < self.dataset.max_bounds[1]):
y += (self.box_size_y * sliding_block_ratio)
y_grid.append(y)
return (x_grid, y_grid)
def _get_fix_random_sample_mask(self, points_length, random_machine):
if ((points_length - self.num_points_per_sample) > 0):
true_array = np.ones(self.num_points_per_sample, dtype=bool)
false_array = np.zeros((points_length - self.num_points_per_sample), dtype=bool)
sample_mask = np.concatenate((true_array, false_array), axis=0)
random_machine.shuffle(sample_mask)
else:
sample_mask = np.arange(points_length)
cat_num = (self.num_points_per_sample - len(sample_mask))
cat_index = random_machine.randint(0, len(sample_mask), cat_num)
sample_mask = np.concatenate((sample_mask, sample_mask[cat_index]), axis=0)
random_machine.shuffle(sample_mask)
return sample_mask
def _extract_block(self, center_point):
mask = self.q.search(center_point, self.dataset.points)
if (len(mask) <= (self.sparse_thresh * self.num_points_per_sample)):
return []
return mask |
def movement(keys):
glMatrixMode(GL_MODELVIEW)
global x, y, angle
dx = 5
da = 20
if keys[key.W]:
glTranslatef(0, dx, 0)
y += dx
if keys[key.S]:
glTranslatef(0, (- dx), 0)
y -= dx
if keys[key.A]:
glTranslatef((- dx), 0, 0)
x -= dx
if keys[key.D]:
glTranslatef(dx, 0, 0)
x += dx
if keys[key.UP]:
glRotatef(da, 0, 0, 1)
angle += da
if keys[key.DOWN]:
glRotatef((- da), 0, 0, 1)
angle -= da
if keys[key.Q]:
glTranslatef(x, y, 0)
glRotatef(da, 0, 0, 1)
glTranslatef((- x), (- y), 0)
print(x, y, angle) |
_model
def resnet152d(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)
return _create_resnet('resnet152d', pretrained, **model_args) |
def _block_op(b, opname, *args, **kwargs):
if ('::' in opname):
aten = False
ns_opname = opname
else:
aten = kwargs.pop('aten', False)
ns = ('aten' if aten else 'onnx')
ns_opname = ((ns + '::') + opname)
n = b.addNode(ns_opname, list(args))
for (k, v) in sorted(kwargs.items()):
if (k == 'inplace'):
continue
_add_attribute(n, k, v, aten=aten)
if (len(list(n.outputs())) == 1):
return n.output()
return tuple((o for o in n.outputs())) |
class Quantizer():
def __init__(self, mod, patterns=DEFAULT_QUANTIZATION_PATTERNS, quant_ctor=DefaultQuant):
self.root = mod
self.graph = mod.graph
self.quant_ctor = quant_ctor
self.state_dict = self.root.state_dict()
self.modules = dict(self.root.named_modules())
self.matches = self._find_matches(patterns)
self.quants = self._find_quants(quant_ctor)
def observe(self, args):
args_iter = iter(args)
env = {}
def load_arg(a):
return map_arg(a, (lambda node: env[node.name]))
for node in self.graph.nodes:
if (node.op == 'placeholder'):
result = next(args_iter)
elif (node.op == 'get_attr'):
result = self.state_dict[node.target]
elif (node.op == 'call_function'):
result = node.target(*load_arg(node.args), **load_arg(node.kwargs))
elif (node.op == 'call_method'):
(self_obj, *args) = load_arg(node.args)
kwargs = load_arg(node.kwargs)
result = getattr(self_obj, node.target)(*args, **kwargs)
elif (node.op == 'call_module'):
result = self.modules[node.target](*load_arg(node.args), **load_arg(node.kwargs))
env[node.name] = result
(root_node, obj) = self.matches.get(node.name, (None, None))
if (root_node is node):
obj.observe(node, env)
if (node.name in self.quants):
self.quants[node.name].observe(node, env)
return load_arg(self.graph.result)
def quantize(self):
self.quantized_graph = Graph()
env = {}
quant_env = {}
def load_arg(n, quantized):
if (not quantized):
if ((n.name not in env) and (n.name in quant_env)):
env[n.name] = Proxy(quant_env[n.name]).dequantize().node
return env[n.name]
else:
if ((n.name not in quant_env) and (n.name in env)):
quant_env[n.name] = self.quants[n.name].quantize(env[n.name])
return quant_env[n.name]
def copy_recursive(node):
def load_or_emit(n):
if ((n.name in env) or (e.name in quant_env)):
return load_arg(n, quantized=False)
else:
return copy_recusive(n)
r = env[node.name] = self.quantized_graph.node_copy(node, (lambda n: load_arg(n, quantized=False)))
return r
for node in self.graph.nodes:
(root_node, obj) = self.matches.get(node.name, (None, None))
if (root_node is None):
env[node.name] = self.quantized_graph.node_copy(node, (lambda n: load_arg(n, quantized=False)))
elif (root_node is node):
r = obj.quantize(self, node, (lambda a: map_arg(a, (lambda n: load_arg(n, quantized=True)))))
if (r is NotImplemented):
env[node.name] = copy_recursive(node)
else:
quant_env[node.name] = r
self.quantized_graph.output(load_arg(self.graph.result, quantized=False))
return GraphModule(self.root, self.quantized_graph)
def _find_matches(self, patterns):
modules = dict(self.root.named_modules())
match_map = {}
def apply_match(pattern, node, match):
if isinstance(pattern, tuple):
(s, *args) = pattern
apply_match(s, node, match)
for (subpattern, arg) in zip(args, node.args):
apply_match(subpattern, arg, match)
else:
match_map[node.name] = match
for node in reversed(self.graph.nodes):
if (node.name not in match_map):
for (pattern, value) in patterns.items():
if matches(modules, node, pattern):
apply_match(pattern, node, (node, value(self, node)))
return match_map
def _find_quants(self, quant_ctor):
quants = {}
def visit_arg(n):
if (n.name not in quants):
quants[n.name] = quant_ctor(self, n)
for node in self.graph.nodes:
if (node.name in self.matches):
map_arg(node.args, visit_arg)
map_arg(node.kwargs, visit_arg)
return quants |
def isFull(layout):
layout_int = int(layout)
if (packed_mask & layout_int):
return False
return True |
def save_model_json(model, path):
actual_dict = OrderedDict()
for (k, v) in model.state_dict().items():
actual_dict[k] = v.tolist()
with open(path, 'w') as f:
json.dump(actual_dict, f) |
class L2_3(BasePenalty):
def __init__(self, alpha):
self.alpha = alpha
def get_spec(self):
spec = (('alpha', float64),)
return spec
def params_to_dict(self):
return dict(alpha=self.alpha)
def value(self, w):
return (self.alpha * np.sum((np.abs(w) ** (2 / 3))))
def derivative(self, w):
return (2 / ((3 * (np.abs(w) ** (1 / 3))) + 1e-12))
def prox_1d(self, value, stepsize, j):
return prox_2_3(value, (self.alpha * stepsize))
def subdiff_distance(self, w, grad, ws):
subdiff_dist = np.zeros_like(grad)
for (idx, j) in enumerate(ws):
if (w[j] == 0):
subdiff_dist[idx] = 0.0
else:
subdiff_dist[idx] = np.abs(((- grad[idx]) - (((np.sign(w[j]) * self.alpha) * 2) / (3 * (np.abs(w[j]) ** (1 / 3))))))
return subdiff_dist
def is_penalized(self, n_features):
return np.ones(n_features, bool_)
def generalized_support(self, w):
return (w != 0) |
def get_statistic(sess_clicks, sess_date_sorted, ori_articles, start_time, end_time):
articles = {}
seq_len_list = []
delta_time_list = []
for (sid, date) in sess_date_sorted:
seq = sess_clicks[sid]
seq_len_list.append(len(seq))
for (article_id, date) in seq:
delta_time_list.append(date['delta_h'])
if (article_id in articles):
articles[article_id] += [date['click_t']]
else:
articles[article_id] = [date['click_t']]
print('articles', len(articles))
pickle.dump(list(articles.keys()), open('../data/adressa/articles_list.txt', 'wb'))
print('Total sessions: {}, avg {:.5} clicks per session.'.format(len(sess_clicks), (sum(seq_len_list) / len(seq_len_list))))
print('Total articles: {}, avg {:.5} clicks per article.'.format(len(articles), (len(delta_time_list) / len(articles)))) |
class TorchSequentialBatch(NamedTuple):
query_id: torch.LongTensor
padding_mask: torch.BoolTensor
features: TensorMap |
()
def dummy_test_cluster() -> ModuleTestCluster:
test_cluster = generate_test_cluster('tests.fixtures.seeding.initialpopulationseeding.dummycontainer')
return test_cluster |
class FeatureExtractionPipeline(Pipeline):
def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs):
if (tokenize_kwargs is None):
tokenize_kwargs = {}
if (truncation is not None):
if ('truncation' in tokenize_kwargs):
raise ValueError('truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)')
tokenize_kwargs['truncation'] = truncation
preprocess_params = tokenize_kwargs
postprocess_params = {}
if (return_tensors is not None):
postprocess_params['return_tensors'] = return_tensors
return (preprocess_params, {}, postprocess_params)
def preprocess(self, inputs, **tokenize_kwargs) -> Dict[(str, GenericTensor)]:
return_tensors = self.framework
model_inputs = self.tokenizer(inputs, return_tensors=return_tensors, **tokenize_kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, return_tensors=False):
if return_tensors:
return model_outputs[0]
if (self.framework == 'pt'):
return model_outputs[0].tolist()
elif (self.framework == 'tf'):
return model_outputs[0].numpy().tolist()
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs) |
.sm70
_utils.test(arch=[ti.cuda], half2_vectorization=True)
def test_half2_vectorize():
half2 = ti.types.vector(n=2, dtype=ti.f16)
table = half2.field(shape=40, needs_grad=True)
embeddings = half2.field(shape=(40, 16), needs_grad=True)
B = 1
def test(B: ti.i32):
for (i, level) in ti.ndrange(B, 16):
w = 4.0
local_feature = ti.Vector([ti.f16(0.0), ti.f16(0.0)])
for index in ti.static(range(64)):
local_feature += (w * table[index])
embeddings[(i, level)] = local_feature
test(B)
for i in range(10):
test.grad(B)
ti.sync()
for i in range(40):
for j in range(16):
embeddings.grad[(i, j)] = half2(1.0)
for i in range(1000):
test.grad(B)
ti.sync()
assert (table.grad.to_numpy() == 64).all() |
def point_conv(name, input_tensor, index_tensor, filter_size, in_channels, out_channels, stddev=0.05, extra_chan=None):
with tf.variable_scope(name):
W = tf.get_variable('W', [1, filter_size, in_channels, out_channels], initializer=tf.contrib.layers.xavier_initializer(True))
b = tf.get_variable('b', [out_channels], initializer=tf.constant_initializer(0.01))
conv_input = tf.gather_nd(input_tensor, tf.expand_dims(index_tensor, axis=2))
if (extra_chan is not None):
conv_input = tf.concat([conv_input, tf.expand_dims(extra_chan, axis=2)], axis=2)
conv_input = tf.expand_dims(conv_input, axis=0)
conv_output = tf.nn.conv2d(conv_input, W, strides=[1, 1, 1, 1], padding='VALID')
conv_output = tf.nn.bias_add(conv_output, b)
return tf.squeeze(conv_output, [0, 2]) |
class BertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, **kwargs):
super().__init__(unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError("Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, vocab_path):
index = 0
if os.path.isdir(vocab_path):
vocab_file = os.path.join(vocab_path, VOCAB_FILES_NAMES['vocab_file'])
else:
vocab_file = vocab_path
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!'.format(vocab_file))
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
class WipeExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_wipe(state, node, info):
new_node = node.copy()
new_node.states.discard(State.DIRTY)
new_node.states.add(State.CLEAN)
(yield state.change_state([ChangeNode(new_node)]))
def check_wipe(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo):
char_node = _get_character_node(state)
if (not _is_character_close_to(state, node)):
info.error('{} is not close to {}', char_node, node)
return False
nodes_in_hands = _find_nodes_from(state, char_node, [Relation.HOLDS_RH, Relation.HOLDS_LH])
if (len(nodes_in_hands) == 0):
info.error('{} does not hold anything in hands', char_node)
return
return True |
class GroupingOperation(Function):
def forward(ctx, features, idx):
ctx.save_for_backward(idx, features)
return _ext.group_points(features, idx)
def backward(ctx, grad_out):
(idx, features) = ctx.saved_tensors
N = features.size(2)
grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N)
return (grad_features, torch.zeros_like(idx)) |
(scope='module')
def base_recs_dict():
converted_dict = {}
for (user, item, score) in base_recs_data:
converted_dict.setdefault(user, [])
converted_dict[user].append((item, score))
for (user, items) in converted_dict.items():
items = sorted(items, key=(lambda x: x[1]), reverse=True)
converted_dict[user] = items
return converted_dict |
class FormDatabase():
def __init__(self, cost_functional_list: List[_typing.CostFunctional], state_forms: List[ufl.Form], bcs_list: List[List[fenics.DirichletBC]], preconditioner_forms: List[ufl.Form]):
self.cost_functional_list = cost_functional_list
self.state_forms = state_forms
self.bcs_list = bcs_list
self.preconditioner_forms = preconditioner_forms
self.lagrangian = cost_functional.Lagrangian(self.cost_functional_list, self.state_forms) |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
parser.add_argument('--input_nc', type=int, default=1, help='# of input image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--load_size', type=int, default=256, help='scale images to this size')
parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
parser.add_argument('--max_dataset_size', type=int, default=float('inf'), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, _) = parser.parse_known_args()
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
(opt, _) = parser.parse_known_args()
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain
if opt.suffix:
suffix = (('_' + opt.suffix.format(**vars(opt))) if (opt.suffix != '') else '')
opt.name = (opt.name + suffix)
self.print_options(opt)
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if (len(opt.gpu_ids) > 0):
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt |
def do_train_movie_hpm(cfg, model, train_loader, val_loader, test_loader, optimizer, scheduler, loss_fn, num_query, num_query_test, start_epoch, acc_best):
writer = SummaryWriter(log_dir=cfg.logs_dir)
use_cuda = torch.cuda.is_available()
last_acc_val = acc_best
loss = 0.0
for epoch in range(start_epoch, cfg.max_epochs):
model.train()
for (ii, (img, target, path)) in enumerate(train_loader):
optimizer.zero_grad()
img = (img.cuda() if use_cuda else img)
target = (target.cuda() if use_cuda else target)
(scores, feats) = model(img)
loss = []
for (jj, (score, feat)) in enumerate(zip(scores, feats)):
loss.append(loss_fn(score, feat, target))
loss = torch.stack(loss).sum()
loss.backward()
optimizer.step()
acc = (score.max(1)[1] == target).float().mean()
loss = float(loss)
acc = float(acc)
if ((ii % 196) == 0):
start_time = datetime.datetime.now()
start_time = ('%4d:%d:%d-%2d:%2d:%2d' % (start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second))
print('{} - Train: epoch: {} {}/{} Loss: {:.04f} Acc: {:.1%} Lr: {:.2e}'.format(start_time, epoch, (ii + 1), len(train_loader), loss, acc, scheduler.get_last_lr()[0]))
(mAP, cmc1, cmc5, cmc10, cmc20) = inference_movie_aligned(model, val_loader, num_query)
acc_test = cmc1
start_time = datetime.datetime.now()
start_time = ('%4d:%d:%d-%2d:%2d:%2d' % (start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second))
line = '{} - Final: cmc1: {:.1%} cmc5: {:.1%} cmc10: {:.1%} cmc20: {:.1%} mAP: {:.1%}\n'.format(start_time, cmc1, cmc5, cmc10, cmc20, mAP)
print(line)
f = open(os.path.join(cfg.logs_dir, 'logs.txt'), 'a')
f.write(line)
f.close()
acc_test = (0.5 * (cmc1 + mAP))
is_best = (acc_test >= last_acc_val)
if is_best:
save_checkpoint({'state_dict': model.state_dict(), 'epoch': (epoch + 1), 'best_acc': acc_test}, is_best, fpath=cfg.logs_dir)
last_acc_val = acc_test
writer.add_scalar('train_loss', float(loss), (epoch + 1))
writer.add_scalar('test_rank1', float(cmc1), (epoch + 1))
writer.add_scalar('test_mAP', float(mAP), (epoch + 1))
scheduler.step()
last_model_wts = torch.load(os.path.join(cfg.logs_dir, 'checkpoint_best.pth'))
model.load_state_dict(last_model_wts['state_dict'])
(mAP, cmc1, cmc5, cmc10, cmc20) = inference_movie_aligned(model, test_loader, num_query_test)
start_time = datetime.datetime.now()
start_time = ('%4d:%d:%d-%2d:%2d:%2d' % (start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second))
line = '{} - Final: cmc1: {:.1%} cmc5: {:.1%} cmc10: {:.1%} cmc20: {:.1%} mAP: {:.1%}\n'.format(start_time, cmc1, cmc5, cmc10, cmc20, mAP)
print(line)
f = open(os.path.join(cfg.logs_dir, 'logs.txt'), 'a')
f.write(line)
f.close() |
def pretrain():
x_train = load_data()
with tf.Session(config=TF_CONFIG) as sess:
gan = GAN(sess, MODEL_CONFIG)
gan.init_all()
if (EXP_CONFIG['pretrained_dir'] is not None):
gan.load_latest(EXP_CONFIG['pretrained_dir'])
gan.train(x_train, TRAIN_CONFIG) |
def parse_requirements(fname='requirements.txt', with_version=True):
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
if line.startswith('-r '):
target = line.split(' ')[1]
for info in parse_require_file(target):
(yield info)
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
pat = (('(' + '|'.join(['>=', '==', '>'])) + ')')
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if (len(parts) > 1):
(op, rest) = parts[1:]
if (';' in rest):
(version, platform_deps) = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = (op, version)
(yield info)
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if (line and (not line.startswith('#'))):
for info in parse_line(line):
(yield info)
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if (with_version and ('version' in info)):
parts.extend(info['version'])
if (not sys.version.startswith('3.4')):
platform_deps = info.get('platform_deps')
if (platform_deps is not None):
parts.append((';' + platform_deps))
item = ''.join(parts)
(yield item)
packages = list(gen_packages_items())
return packages |
def reducible_primes_Billerey(E, num_l=None, max_l=None, verbose=False):
if verbose:
print("E = {}, finding reducible primes using Billerey's algorithm".format(E.ainvs()))
if (max_l is None):
max_l = 200
if (num_l is None):
num_l = 8
K = E.base_field()
DK = K.discriminant()
E1 = E.global_integral_model()
ED = E1.discriminant().norm()
B0 = ZZ(((6 * DK) * ED)).prime_divisors()
if verbose:
print('First doing naive test of primes up to {}...'.format(max_l))
max_small_prime = 200
OK_small_primes = reducible_primes_naive(E1, max_l=max_small_prime, num_P=200, verbose=verbose)
if verbose:
print('Naive test of primes up to {} returns {}.'.format(max_small_prime, OK_small_primes))
B1 = Billerey_B_bound(E1, max_l, num_l, max_small_prime, verbose)
if (B1 == [0]):
if verbose:
print('... B_bound ineffective using max_l={}, moving on to R-bound'.format(max_l))
B1 = Billerey_R_bound(E1, max_l, num_l, max_small_prime, verbose)
if (B1 == [0]):
if verbose:
print('... R_bound ineffective using max_l={}', format(max_l))
return [0]
if verbose:
print('... R_bound = {}'.format(B1))
elif verbose:
print('... B_bound = {}'.format(B1))
B = sorted(set(((B0 + B1) + OK_small_primes)))
if verbose:
print('... combined bound = {}'.format(B))
num_p = 100
B = Frobenius_filter(E1, B, num_p)
if verbose:
print('... after Frobenius filter = {}'.format(B))
return B |
def check_type(module, expected_type):
if hasattr(module, 'unwrapped_module'):
assert isinstance(module.unwrapped_module, expected_type), f'{type(module.unwrapped_module)} != {expected_type}'
else:
assert isinstance(module, expected_type), f'{type(module)} != {expected_type}' |
class BackpackMlp(eqx.Module, StateDictSerializationMixin):
c_fc: hnn.Linear
c_proj: hnn.Linear
act: Callable = eqx.static_field()
def init(Embed: Axis, Mlp: Axis, Out: AxisSpec, activation_fn: Union[(str, Callable)], *, key, use_bias: bool=True) -> 'BackpackMlp':
(k_fc, k_proj) = jrandom.split(key, 2)
c_fc = hnn.Linear.init(Out=Mlp, In=Embed, key=k_fc, use_bias=use_bias)
c_proj = hnn.Linear.init(Out=Out, In=Mlp, key=k_proj, use_bias=use_bias)
if isinstance(activation_fn, str):
activation_fn = ACT2FN[activation_fn]
act = activation_fn
return BackpackMlp(c_fc=c_fc, c_proj=c_proj, act=act)
_call
def __call__(self, x: NamedArray):
x = self.c_fc(x)
x = self.act(x)
x = self.c_proj(x)
return x
def from_state_dict(self, state_dict: StateDict, prefix: Optional[str]=None) -> 'BackpackMlp':
d = {}
d.update(unflatten_linear_layers(apply_prefix(prefix, 'c_proj'), state_dict, self.c_proj, out_dims_first_in_dict=False))
d.update(unflatten_linear_layers(apply_prefix(prefix, 'c_fc'), state_dict, self.c_fc, out_dims_first_in_dict=False))
return super().from_state_dict(d, prefix)
def update_state_dict(self, state_dict: StateDict, prefix: Optional[str]=None) -> StateDict:
my_dict: StateDict = {}
super().update_state_dict(my_dict, prefix)
my_dict.update(flatten_linear_layers(apply_prefix(prefix, 'c_proj'), self.c_proj, out_dims_first_in_dict=False))
my_dict.update(flatten_linear_layers(apply_prefix(prefix, 'c_fc'), self.c_fc, out_dims_first_in_dict=False))
state_dict.update(my_dict)
return state_dict |
class LightPrompt(torch.nn.Module):
def __init__(self, token_dim, token_num_per_group, group_num=1, inner_prune=None):
super(LightPrompt, self).__init__()
self.inner_prune = inner_prune
self.token_list = torch.nn.ParameterList([torch.nn.Parameter(torch.empty(token_num_per_group, token_dim)) for i in range(group_num)])
self.token_init(init_method='kaiming_uniform')
def token_init(self, init_method='kaiming_uniform'):
if (init_method == 'kaiming_uniform'):
for token in self.token_list:
torch.nn.init.kaiming_uniform_(token, nonlinearity='leaky_relu', mode='fan_in', a=0.01)
else:
raise ValueError('only support kaiming_uniform init, more init methods will be included soon')
def inner_structure_update(self):
return self.token_view()
def token_view(self):
pg_list = []
for (i, tokens) in enumerate(self.token_list):
token_dot = torch.mm(tokens, torch.transpose(tokens, 0, 1))
token_sim = torch.sigmoid(token_dot)
inner_adj = torch.where((token_sim < self.inner_prune), 0, token_sim)
edge_index = inner_adj.nonzero().t().contiguous()
pg_list.append(Data(x=tokens, edge_index=edge_index, y=torch.tensor([i]).long()))
pg_batch = Batch.from_data_list(pg_list)
return pg_batch |
_grad()
def validate(model, dataloader, converter, cfg):
model.eval()
layout = (cfg['POST_PROCESS']['LAYOUT'] if ('LAYOUT' in cfg['POST_PROCESS']) else 'generic')
page_decoder = PageDecoder(se_thres=cfg['POST_PROCESS']['SOL_EOL_CONF_THRES'], max_steps=cfg['POST_PROCESS']['READ_ORDER_MAX_STEP'], layout=layout)
total_De = 0
total_Se = 0
total_Ie = 0
total_Len = 0
to_log = ''
for sample in tqdm(dataloader):
images = sample['image'].cuda()
labels = sample['label']
num_chars = sample['num_char_per_line']
filename = sample['filename']
(pred_det_rec, pred_read_order, pred_sol, pred_eol) = model(images)
pred_det_rec = pred_det_rec[0].cpu().numpy()
pred_read_order = pred_read_order[0].cpu().numpy()
pred_sol = pred_sol[0].cpu().numpy()
pred_eol = pred_eol[0].cpu().numpy()
pred_det_rec = det_rec_nms(pred_det_rec=pred_det_rec, img_shape=images.shape[(- 2):], dis_weight=cfg['POST_PROCESS']['DIS_WEIGHT'], conf_thres=cfg['POST_PROCESS']['CONF_THRES'], nms_thres=cfg['POST_PROCESS']['NMS_THRES'])
(line_results, _) = page_decoder.decode(output=pred_det_rec, pred_read=pred_read_order, pred_start=pred_sol, pred_end=pred_eol, img_shape=images.shape[(- 2):])
(De, Se, Ie, Len, to_log_) = eval_page_performance(pred_det_rec, line_results, labels[0], num_chars[0], converter)
total_De += De
total_Se += Se
total_Ie += Ie
total_Len += Len
total_AR = ((((total_Len - total_De) - total_Se) - total_Ie) / total_Len)
total_CR = (((total_Len - total_De) - total_Se) / total_Len)
to_log += (filename[0] + '\n')
to_log += to_log_
to_log += '\ntotally AR: {:6f} CR: {:6f} De: {} Se: {} Ie: {} Len: {}\n'.format(total_AR, total_CR, total_De, total_Se, total_Ie, total_Len)
log_path = os.path.join(cfg['OUTPUT_FOLDER'], 'val_log.txt')
with open(log_path, 'w') as f:
f.write(to_log) |
def interpolate(audio_encoder, generator, args, interpolate_num=10, img_filename_base='img'):
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir, exist_ok=True)
audio_temp_filename = (osp.join(args.output_dir, 'audio0_{}.wav'.format(img_filename_base)), osp.join(args.output_dir, 'audio1_{}.wav'.format(img_filename_base)))
print('text to speech')
pairs = [(args.input_text, audio_temp_filename[0]), (args.input_text_other, audio_temp_filename[1])]
caption_to_audio_one_by_one(pairs)
print('extract audio feature...')
(audio_feature_all, feature_len_all) = ([], [])
for audio_filename in audio_temp_filename:
(audio_features, feature_lens) = extract_only_one_feature(audio_encoder, audio_filename, cuda=False)
audio_feature_all.append(audio_features[0])
feature_len_all.append(feature_lens[0])
for idx in range((interpolate_num + 1)):
alpha = (float(idx) / interpolate_num)
audio_feature_all.append(((audio_feature_all[0] * alpha) + (audio_feature_all[1] * (1 - alpha))))
audio_feature_all = torch.from_numpy(np.array(audio_feature_all[2:]))
noise = torch.FloatTensor(1, cfg.GAN.Z_DIM).normal_(0, 1)
if cfg.CUDA:
noise = noise.cuda()
audio_feature_all = audio_feature_all.cuda()
generator = generator.cuda()
else:
generator = generator.cpu()
print('generate images')
for (idx, audio_feature) in enumerate(audio_feature_all):
audio_feature = audio_feature.float().unsqueeze(0)
(imgs, _, _) = generator.forward(noise, audio_feature)
save_image(imgs[(- 1)], osp.join(args.output_dir, (img_filename_base + '_{}.jpg'.format(idx)))) |
class TestTargetEncoder(TestCase):
def setUp(self):
self.hierarchical_cat_example = pd.DataFrame({'Compass': ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W'], 'Speed': ['slow', 'slow', 'slow', 'slow', 'medium', 'medium', 'medium', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast', 'fast'], 'Animal': ['Cat', 'Cat', 'Cat', 'Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Dog', 'Tiger', 'Tiger', 'Wolf', 'Wolf', 'Cougar'], 'Plant': ['Rose', 'Rose', 'Rose', 'Rose', 'Daisy', 'Daisy', 'Daisy', 'Daisy', 'Daffodil', 'Daffodil', 'Daffodil', 'Daffodil', 'Bluebell', 'Bluebell', 'Bluebell', 'Bluebell'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1]}, columns=['Compass', 'Speed', 'Animal', 'Plant', 'target'])
self.hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}, 'Animal': {'Feline': ('Cat', 'Tiger', 'Cougar'), 'Canine': ('Dog', 'Wolf')}, 'Plant': {'Flower': ('Rose', 'Daisy', 'Daffodil', 'Bluebell'), 'Tree': ('Ash', 'Birch')}}
def test_target_encoder(self):
np_X = th.create_array(n_rows=100)
np_X_t = th.create_array(n_rows=50, extras=True)
np_y = (np.random.randn(np_X.shape[0]) > 0.5)
np_y_t = (np.random.randn(np_X_t.shape[0]) > 0.5)
X = th.create_dataset(n_rows=100)
X_t = th.create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2)
enc.fit(X, y)
th.verify_numeric(enc.transform(X_t))
th.verify_numeric(enc.transform(X_t, y_t))
def test_target_encoder_fit_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectUsedInFit(self):
k = 2
f = 10
binary_cat_example = pd.DataFrame({'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]})
encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f)
encoder.fit(binary_cat_example, binary_cat_example['target'])
trend_mapping = encoder.mapping['Trend']
ordinal_mapping = encoder.ordinal_encoder.category_mapping[0]['mapping']
self.assertAlmostEqual(0.4125, trend_mapping[ordinal_mapping.loc['DOWN']], delta=0.0001)
self.assertEqual(0.5, trend_mapping[ordinal_mapping.loc['FLAT']])
self.assertAlmostEqual(0.5874, trend_mapping[ordinal_mapping.loc['UP']], delta=0.0001)
def test_target_encoder_fit_transform_HaveConstructorSetSmoothingAndMinSamplesLeaf_ExpectCorrectValueInResult(self):
k = 2
f = 10
binary_cat_example = pd.DataFrame({'Trend': ['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]})
encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f)
result = encoder.fit_transform(binary_cat_example, binary_cat_example['target'])
values = result['Trend'].array
self.assertAlmostEqual(0.5874, values[0], delta=0.0001)
self.assertAlmostEqual(0.5874, values[1], delta=0.0001)
self.assertAlmostEqual(0.4125, values[2], delta=0.0001)
self.assertEqual(0.5, values[3])
def test_target_encoder_fit_transform_HaveCategoricalColumn_ExpectCorrectValueInResult(self):
k = 2
f = 10
binary_cat_example = pd.DataFrame({'Trend': pd.Categorical(['UP', 'UP', 'DOWN', 'FLAT', 'DOWN', 'UP', 'DOWN', 'FLAT', 'FLAT', 'FLAT'], categories=['UP', 'FLAT', 'DOWN']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]})
encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f)
result = encoder.fit_transform(binary_cat_example, binary_cat_example['target'])
values = result['Trend'].array
self.assertAlmostEqual(0.5874, values[0], delta=0.0001)
self.assertAlmostEqual(0.5874, values[1], delta=0.0001)
self.assertAlmostEqual(0.4125, values[2], delta=0.0001)
self.assertEqual(0.5, values[3])
def test_target_encoder_fit_transform_HaveNanValue_ExpectCorrectValueInResult(self):
k = 2
f = 10
binary_cat_example = pd.DataFrame({'Trend': pd.Series([np.nan, np.nan, 'DOWN', 'FLAT', 'DOWN', np.nan, 'DOWN', 'FLAT', 'FLAT', 'FLAT']), 'target': [1, 1, 0, 0, 1, 0, 0, 0, 1, 1]})
encoder = encoders.TargetEncoder(cols=['Trend'], min_samples_leaf=k, smoothing=f)
result = encoder.fit_transform(binary_cat_example, binary_cat_example['target'])
values = result['Trend'].array
self.assertAlmostEqual(0.5874, values[0], delta=0.0001)
self.assertAlmostEqual(0.5874, values[1], delta=0.0001)
self.assertAlmostEqual(0.4125, values[2], delta=0.0001)
self.assertEqual(0.5, values[3])
def test_HandleMissingIsValueAndNanInTest_ExpectMean(self):
df = pd.DataFrame({'color': ['a', 'a', 'a', 'b', 'b', 'b'], 'outcome': [1.6, 0, 0, 1, 0, 1]})
train = df.drop('outcome', axis=1)
target = df.drop('color', axis=1)
test = pd.Series([np.nan, 'b'], name='color')
test_target = pd.Series([0, 0])
enc = encoders.TargetEncoder(cols=['color'], handle_missing='value')
enc.fit(train, target['outcome'])
obtained = enc.transform(test, test_target)
self.assertEqual(0.6, list(obtained['color'])[0])
def test_HandleUnknownValue_HaveUnknownInTest_ExpectMean(self):
train = pd.Series(['a', 'a', 'a', 'b', 'b', 'b'], name='color')
target = pd.Series([1.6, 0, 0, 1, 0, 1], name='target')
test = pd.Series(['c', 'b'], name='color')
test_target = pd.Series([0, 0])
enc = encoders.TargetEncoder(cols=['color'], handle_unknown='value')
enc.fit(train, target)
obtained = enc.transform(test, test_target)
self.assertEqual(0.6, list(obtained['color'])[0])
def test_hierarchical_smoothing(self):
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass'])
result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target'])
values = result['Compass'].array
self.assertAlmostEqual(0.6226, values[0], delta=0.0001)
self.assertAlmostEqual(0.9038, values[2], delta=0.0001)
self.assertAlmostEqual(0.1766, values[5], delta=0.0001)
self.assertAlmostEqual(0.4605, values[7], delta=0.0001)
self.assertAlmostEqual(0.4033, values[11], delta=0.0001)
def test_hierarchical_smoothing_multi(self):
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass', 'Speed', 'Animal'])
result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target'])
values = result['Compass'].array
self.assertAlmostEqual(0.6226, values[0], delta=0.0001)
self.assertAlmostEqual(0.9038, values[2], delta=0.0001)
self.assertAlmostEqual(0.1766, values[5], delta=0.0001)
self.assertAlmostEqual(0.4605, values[7], delta=0.0001)
self.assertAlmostEqual(0.4033, values[11], delta=0.0001)
values = result['Speed'].array
self.assertAlmostEqual(0.6827, values[0], delta=0.0001)
self.assertAlmostEqual(0.3962, values[4], delta=0.0001)
self.assertAlmostEqual(0.446, values[7], delta=0.0001)
values = result['Animal'].array
self.assertAlmostEqual(0.7887, values[0], delta=0.0001)
self.assertAlmostEqual(0.3248, values[5], delta=0.0001)
self.assertAlmostEqual(0.619, values[11], delta=0.0001)
self.assertAlmostEqual(0.1309, values[13], delta=0.0001)
self.assertAlmostEqual(0.837, values[15], delta=0.0001)
def test_hierarchical_part_named_cols(self):
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Compass'])
result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target'])
values = result['Compass'].array
self.assertAlmostEqual(0.6226, values[0], delta=0.0001)
self.assertAlmostEqual(0.9038, values[2], delta=0.0001)
self.assertAlmostEqual(0.1766, values[5], delta=0.0001)
self.assertAlmostEqual(0.4605, values[7], delta=0.0001)
self.assertAlmostEqual(0.4033, values[11], delta=0.0001)
values = result['Speed'].array
self.assertEqual('slow', values[0])
def test_hierarchy_pandas_index(self):
df = pd.DataFrame({'hello': ['a', 'b', 'c', 'a', 'a', 'b', 'c', 'd', 'd'], 'world': [0, 1, 0, 0, 1, 0, 0, 1, 1]}, columns=pd.Index(['hello', 'world']))
cols = df.select_dtypes(include='object').columns
self.hierarchical_map = {'hello': {'A': ('a', 'b'), 'B': ('c', 'd')}}
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=cols)
result = enc.fit_transform(df, df['world'])
values = result['hello'].array
self.assertAlmostEqual(0.3616, values[0], delta=0.0001)
self.assertAlmostEqual(0.4541, values[1], delta=0.0001)
self.assertAlmostEqual(0.2425, values[2], delta=0.0001)
self.assertAlmostEqual(0.7425, values[7], delta=0.0001)
def test_hierarchy_single_mapping(self):
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=self.hierarchical_map, cols=['Plant'])
result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target'])
values = result['Plant'].array
self.assertAlmostEqual(0.6828, values[0], delta=0.0001)
self.assertAlmostEqual(0.5, values[4], delta=0.0001)
self.assertAlmostEqual(0.5, values[8], delta=0.0001)
self.assertAlmostEqual(0.3172, values[12], delta=0.0001)
def test_hierarchy_no_mapping(self):
hierarchical_map = {'Plant': {'Rose': 'Rose', 'Daisy': 'Daisy', 'Daffodil': 'Daffodil', 'Bluebell': 'Bluebell'}}
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant'])
result = enc.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target'])
values = result['Plant'].array
self.assertAlmostEqual(0.6828, values[0], delta=0.0001)
self.assertAlmostEqual(0.5, values[4], delta=0.0001)
self.assertAlmostEqual(0.5, values[8], delta=0.0001)
self.assertAlmostEqual(0.3172, values[12], delta=0.0001)
def test_hierarchy_error(self):
hierarchical_map = {'Plant': {'Flower': {'Rose': ('Pink', 'Yellow', 'Red')}, 'Tree': 'Ash'}}
with self.assertRaises(ValueError):
encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Plant'])
def test_trivial_hierarchy(self):
trivial_hierarchical_map = {'Plant': {'Plant': ('Rose', 'Daisy', 'Daffodil', 'Bluebell')}}
enc_hier = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=trivial_hierarchical_map, cols=['Plant'])
result_hier = enc_hier.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target'])
enc_no_hier = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, cols=['Plant'])
result_no_hier = enc_no_hier.fit_transform(self.hierarchical_cat_example, self.hierarchical_cat_example['target'])
pd.testing.assert_series_equal(result_hier['Plant'], result_no_hier['Plant'])
def test_hierarchy_multi_level(self):
hierarchy_multi_level_df = pd.DataFrame({'Animal': ['Cat', 'Cat', 'Dog', 'Dog', 'Dog', 'Osprey', 'Kite', 'Kite', 'Carp', 'Carp', 'Carp', 'Clownfish', 'Clownfish', 'Lizard', 'Snake', 'Snake'], 'target': [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1]}, columns=['Animal', 'target'])
hierarchy_multi_level = {'Animal': {'Warm-Blooded': {'Mammals': ('Cat', 'Dog'), 'Birds': ('Osprey', 'Kite'), 'Fish': ('Carp', 'Clownfish')}, 'Cold-Blooded': {'Reptiles': 'Lizard', 'Amphibians': ('Snake', 'Frog')}}}
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchy_multi_level, cols=['Animal'])
result = enc.fit_transform(hierarchy_multi_level_df, hierarchy_multi_level_df['target'])
values = result['Animal'].array
self.assertAlmostEqual(0.6261, values[0], delta=0.0001)
self.assertAlmostEqual(0.9065, values[2], delta=0.0001)
self.assertAlmostEqual(0.2556, values[5], delta=0.0001)
self.assertAlmostEqual(0.368, values[8], delta=0.0001)
self.assertAlmostEqual(0.4626, values[11], delta=0.0001)
self.assertAlmostEqual(0.1535, values[13], delta=0.0001)
self.assertAlmostEqual(0.4741, values[14], delta=0.0001)
def test_hierarchy_columnwise_compass(self):
(X, y) = load_compass()
cols = X.columns[(~ X.columns.str.startswith('HIER'))]
HIER_cols = X.columns[X.columns.str.startswith('HIER')]
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['compass'])
result = enc.fit_transform(X[cols], y)
values = result['compass'].array
self.assertAlmostEqual(0.6226, values[0], delta=0.0001)
self.assertAlmostEqual(0.9038, values[2], delta=0.0001)
self.assertAlmostEqual(0.1766, values[5], delta=0.0001)
self.assertAlmostEqual(0.4605, values[7], delta=0.0001)
self.assertAlmostEqual(0.4033, values[11], delta=0.0001)
def test_hierarchy_columnwise_postcodes(self):
(X, y) = load_postcodes('binary')
cols = X.columns[(~ X.columns.str.startswith('HIER'))]
HIER_cols = X.columns[X.columns.str.startswith('HIER')]
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode'])
result = enc.fit_transform(X[cols], y)
values = result['postcode'].array
self.assertAlmostEqual(0.8448, values[0], delta=0.0001)
def test_hierarchy_columnwise_missing_level(self):
(X, y) = load_postcodes('binary')
HIER_cols = ['HIER_postcode_1', 'HIER_postcode_2', 'HIER_postcode_4']
with self.assertRaises(ValueError):
encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=X[HIER_cols], cols=['postcode'])
def test_hierarchy_mapping_no_cols(self):
hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}}
with self.assertRaises(ValueError):
encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map)
def test_hierarchy_mapping_cols_missing(self):
X = ['N', 'N', 'NE', 'NE', 'NE', 'SE', 'SE', 'S', 'S', 'S', 'S', 'W', 'W', 'W', 'W', 'W']
hierarchical_map = {'Compass': {'N': ('N', 'NE'), 'S': ('S', 'SE'), 'W': 'W'}}
y = [1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1]
enc = encoders.TargetEncoder(verbose=1, smoothing=2, min_samples_leaf=2, hierarchy=hierarchical_map, cols=['Compass'])
with self.assertRaises(ValueError):
enc.fit_transform(X, y) |
def adjust_learning_rate(optimizer, epoch):
lr = args.lr_
if (args.epochs == 300):
lr = (args.lr_ * (0.1 ** (epoch // 75)))
else:
lr = (args.lr_ * (0.1 ** (epoch // 30)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr |
def rows_to_triplets(labels: List[PandasRowData]) -> List[RowData]:
return [[(index, j, y) for (j, y) in row_labels] for (index, row_labels) in enumerate(labels)] |
def main():
parser = ArgumentParser(description=__doc__.rstrip(), formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
add_args(parser)
options = parser.parse_args()
show_mesh_info(options) |
class PlanarFlow(nn.Module):
def __init__(self, nd=1):
super(PlanarFlow, self).__init__()
self.nd = nd
self.activation = torch.tanh
self.register_parameter('u', nn.Parameter(torch.randn(self.nd)))
self.register_parameter('w', nn.Parameter(torch.randn(self.nd)))
self.register_parameter('b', nn.Parameter(torch.randn(1)))
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.nd))
self.u.data.uniform_((- stdv), stdv)
self.w.data.uniform_((- stdv), stdv)
self.b.data.fill_(0)
self.make_invertible()
def make_invertible(self):
u = self.u.data
w = self.w.data
dot = torch.dot(u, w)
m = ((- 1) + math.log((1 + math.exp(dot))))
du = (((m - dot) / torch.norm(w)) * w)
u = (u + du)
self.u.data = u
def forward(self, z, logp=None, reverse=False):
assert (not reverse), 'Planar normalizing flow cannot be reversed.'
(logp - torch.log((self._detgrad(z) + 1e-08)))
h = self.activation((torch.mm(z, self.w.view(self.nd, 1)) + self.b))
z = (z + (self.u.expand_as(z) * h))
f = self.sample(z)
if (logp is not None):
qf = self.log_density(z, logp)
return (f, qf)
else:
return f
def sample(self, z):
h = self.activation((torch.mm(z, self.w.view(self.nd, 1)) + self.b))
output = (z + (self.u.expand_as(z) * h))
return output
def _detgrad(self, z):
with torch.enable_grad():
z = z.requires_grad_(True)
h = self.activation((torch.mm(z, self.w.view(self.nd, 1)) + self.b))
psi = grad(h, z, grad_outputs=torch.ones_like(h), create_graph=True, only_inputs=True)[0]
u_dot_psi = torch.mm(psi, self.u.view(self.nd, 1))
detgrad = (1 + u_dot_psi)
return detgrad
def log_density(self, z, logqz):
return (logqz - torch.log((self._detgrad(z) + 1e-08))) |
def add_gpu(ax, gpu_id, memory, utilization):
memory = sanitize_memory(memory)
utilization = sanitize_utilizaton(utilization)
frac = utilization
fat = memory
height = GPU_HEIGHT
x = (gpu_id * (GPU_MAX_WIDTH + SPACE))
y = GPU_DOWN_BORDER
bb = Rectangle((x, y), GPU_MAX_WIDTH, GPU_HEIGHT, linewidth=BB_LINE_WIDTH, fill=None)
rect = Rectangle((x, y), fat, height, linewidth=0, fill=None)
ax.add_patch(rect)
ax.add_patch(bb)
ax.fill_between([x, (x + fat)], y, (y + (height * frac)), hatch='\\', color=None) |
class Augmentation():
input_args: Optional[Tuple[str]] = None
def _init(self, params=None):
if params:
for (k, v) in params.items():
if ((k != 'self') and (not k.startswith('_'))):
setattr(self, k, v)
def get_transform(self, *args) -> Transform:
raise NotImplementedError
def __call__(self, aug_input) -> Transform:
args = _get_aug_input_args(self, aug_input)
tfm = self.get_transform(*args)
assert isinstance(tfm, (Transform, TransformList)), f'{type(self)}.get_transform must return an instance of Transform! Got {{type(tfm)}} instead.'
aug_input.transform(tfm)
return tfm
def _rand_range(self, low=1.0, high=None, size=None):
if (high is None):
(low, high) = (0, low)
if (size is None):
size = []
return np.random.uniform(low, high, size)
def __repr__(self):
try:
sig = inspect.signature(self.__init__)
classname = type(self).__name__
argstr = []
for (name, param) in sig.parameters.items():
assert ((param.kind != param.VAR_POSITIONAL) and (param.kind != param.VAR_KEYWORD)), "The default __repr__ doesn't support *args or **kwargs"
assert hasattr(self, name), 'Attribute {} not found! Default __repr__ only works if attributes match the constructor.'.format(name)
attr = getattr(self, name)
default = param.default
if (default is attr):
continue
attr_str = pprint.pformat(attr)
if ('\n' in attr_str):
attr_str = '...'
argstr.append('{}={}'.format(name, attr_str))
return '{}({})'.format(classname, ', '.join(argstr))
except AssertionError:
return super().__repr__()
__str__ = __repr__ |
(scope='module')
def simplemodels_model_data():
model = pyhf.simplemodels.uncorrelated_background(signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0])
data = [51, 48]
return (model, data) |
def test_getSubscriptionByNilId():
get_url = (brokerIp + '/v2/subscription/nil')
headers = {'Content-Type': 'application/json'}
r = requests.get(get_url, headers=headers)
assert (r.status_code == 404) |
class Ignore(Action):
def perform(self, token_stream, text):
return None
def __repr__(self):
return 'IGNORE' |
(goos.PixelatedContShapeFlow)
class PixelatedContShapeFlowImpl(GeometryImpl):
def eval(self, grid: gridlock.Grid, params: RenderParams):
grid.draw_cuboid(self.shape.pos, self.shape.extents, self.shape.material.permittivity(params.wlen))
new_grid = gridlock.Grid(grid.exyz, ext_dir=grid.ext_dir, initial=0, num_grids=3)
contrast = (self.shape.material2.permittivity(params.wlen) - self.shape.material.permittivity(params.wlen))
shape_coords = self.shape.get_edge_coords()
for axis in range(3):
grid_coords = new_grid.shifted_exyz(axis, which_grid=gridlock.GridType.COMP)
grid_coords = [(c if (c.shape == co.shape) else c[:(- 1)]) for (c, co) in zip(grid_coords, grid.exyz)]
mat = get_rendering_matrix(shape_coords, grid_coords)
grid_vals = ((contrast * mat) self.shape.array.flatten())
new_grid.grids[axis] = np.reshape(grid_vals, new_grid.grids[axis].shape)
return new_grid
def grad(self, grid: gridlock.Grid, params: RenderParams):
contrast = (self.shape.material2.permittivity(params.wlen) - self.shape.material.permittivity(params.wlen))
shape_coords = self.shape.get_edge_coords()
grad = np.zeros_like(self.shape.array)
for axis in range(3):
grid_coords = grid.shifted_exyz(axis, which_grid=gridlock.GridType.COMP)
grid_coords = [(c if (c.shape == co.shape) else c[:(- 1)]) for (c, co) in zip(grid_coords, grid.exyz)]
mat = get_rendering_matrix(shape_coords, grid_coords)
grid_vals = ((contrast * mat.T) grid.grids[axis].flatten())
grad += np.real(np.reshape(grid_vals, self.shape.array.shape))
if np.iscomplexobj(grid_vals):
grad *= 2
return goos.PixelatedContShapeFlow.Grad(array_grad=grad) |
.parametrize('alpha', [0.95, [0.05, 0.95], (0.05, 0.95), np.array([0.05, 0.95]), None])
def test_valid_alpha(alpha: Any) -> None:
check_alpha(alpha=alpha) |
class Layer():
def __init__(self, model, in_dim, output_dim, activation=dynet.tanh):
ident = str(next(global_counter))
self.act = activation
self.W = model.add_parameters((output_dim, in_dim))
self.b = model.add_parameters(output_dim)
def __call__(self, x):
W = dynet.parameter(self.W)
b = dynet.parameter(self.b)
return self.act(((W * x) + b)) |
def save_to_pkl(object_to_save, path_to_file: str):
os.makedirs(os.path.dirname(path_to_file), exist_ok=True)
with open(path_to_file, 'wb') as fd:
pickle.dump(object_to_save, fd) |
def check_int(n, minimum=0):
if (minimum == 0):
msg = 'a non-negative integer'
else:
msg = f'an integer at least {minimum}'
if ((n not in NonNegativeIntegers()) or (n < minimum)):
raise ValueError((('number of elements must be ' + msg) + f', not {n}'))
return Integer(n) |
class GCN(ScalableGNN):
def __init__(self, num_nodes: int, in_channels, hidden_channels: int, out_channels: int, num_layers: int, dropout: float=0.0, drop_input: bool=True, batch_norm: bool=False, residual: bool=False, linear: bool=False, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__(num_nodes, hidden_channels, num_layers, pool_size, buffer_size, device)
self.in_channels = in_channels
self.out_channels = out_channels
self.dropout = dropout
self.drop_input = drop_input
self.batch_norm = batch_norm
self.residual = residual
self.linear = linear
self.lins = ModuleList()
if linear:
self.lins.append(Linear(in_channels, hidden_channels))
self.lins.append(Linear(hidden_channels, out_channels))
self.convs = ModuleList()
for i in range(num_layers):
in_dim = out_dim = hidden_channels
if ((i == 0) and (not linear)):
in_dim = in_channels
if ((i == (num_layers - 1)) and (not linear)):
out_dim = out_channels
conv = GCNConv(in_dim, out_dim, normalize=False)
self.convs.append(conv)
self.bns = ModuleList()
for i in range(num_layers):
bn = BatchNorm1d(hidden_channels)
self.bns.append(bn)
def reg_modules(self):
if self.linear:
return ModuleList((list(self.convs) + list(self.bns)))
else:
return ModuleList((list(self.convs[:(- 1)]) + list(self.bns)))
def nonreg_modules(self):
return (self.lins if self.linear else self.convs[(- 1):])
def reset_parameters(self):
super().reset_parameters()
for lin in self.lins:
lin.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
for bn in self.bns:
bn.reset_parameters()
def forward(self, x: Tensor, adj_t: SparseTensor, *args) -> Tensor:
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
if self.linear:
x = self.lins[0](x).relu_()
x = F.dropout(x, p=self.dropout, training=self.training)
for (conv, bn, hist) in zip(self.convs[:(- 1)], self.bns, self.histories):
h = conv(x, adj_t)
if self.batch_norm:
h = bn(h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
x = h.relu_()
x = self.push_and_pull(hist, x, *args)
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[(- 1)](x, adj_t)
if (not self.linear):
return h
if self.batch_norm:
h = self.bns[(- 1)](h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
h = h.relu_()
h = F.dropout(h, p=self.dropout, training=self.training)
return self.lins[1](h)
_grad()
def forward_layer(self, layer, x, adj_t, state):
if (layer == 0):
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
if self.linear:
x = self.lins[0](x).relu_()
x = F.dropout(x, p=self.dropout, training=self.training)
else:
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[layer](x, adj_t)
if ((layer < (self.num_layers - 1)) or self.linear):
if self.batch_norm:
h = self.bns[layer](h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
h = h.relu_()
if self.linear:
h = F.dropout(h, p=self.dropout, training=self.training)
h = self.lins[1](h)
return h |
def parallel_test(model_cls, model_kwargs, checkpoint, dataset, data_func, gpus, workers_per_gpu=1):
ctx = multiprocessing.get_context('spawn')
idx_queue = ctx.Queue()
result_queue = ctx.Queue()
num_workers = (len(gpus) * workers_per_gpu)
workers = [ctx.Process(target=worker_func, args=(model_cls, model_kwargs, checkpoint, dataset, data_func, gpus[(i % len(gpus))], idx_queue, result_queue)) for i in range(num_workers)]
for w in workers:
w.daemon = True
w.start()
for i in range(len(dataset)):
idx_queue.put(i)
results = [None for _ in range(len(dataset))]
prog_bar = mmcv.ProgressBar(task_num=len(dataset))
for _ in range(len(dataset)):
(idx, res) = result_queue.get()
results[idx] = res
prog_bar.update()
print('\n')
for worker in workers:
worker.terminate()
return results |
def get_mixer_args(args):
args.rnn_hidden_dim = 64
args.qmix_hidden_dim = 32
args.two_hyper_layers = False
args.hyper_hidden_dim = 64
args.qtran_hidden_dim = 64
args.lr = 0.0005
args.epsilon = 1
args.min_epsilon = 0.05
anneal_steps = 50000
args.anneal_epsilon = ((args.epsilon - args.min_epsilon) / anneal_steps)
args.epsilon_anneal_scale = 'step'
args.train_steps = 1
args.batch_size = 32
args.buffer_size = int(5000.0)
args.save_cycle = 5000
args.target_update_cycle = 200
args.lambda_opt = 1
args.lambda_nopt = 1
args.grad_norm_clip = 10
args.noise_dim = 16
args.lambda_mi = 0.001
args.lambda_ql = 1
args.entropy_coefficient = 0.001
return args |
def test_kernels_diagK():
n_b = 2
n = 10
m = 20
d = 3
dists = [Euclid.distance, Euclid.distance]
for (i, kerneltype) in enumerate([QuadExp, Matern, Linear]):
if (kerneltype is Linear):
kernel = kerneltype(n, d)
else:
kernel = kerneltype(n, dists[i])
x = torch.randn(n_b, n, d, m)
diagK1 = kernel.diagK(x)
diagK2 = torch.diagonal(kernel(x, x), dim1=2, dim2=3)
assert torch.allclose(diagK1, diagK2) |
class PolynomialRing_dense_mod_p(PolynomialRing_dense_finite_field, PolynomialRing_dense_mod_n, PolynomialRing_singular_repr):
def __init__(self, base_ring, name='x', implementation=None, element_class=None, category=None):
if (element_class is None):
given_implementation = implementation
for implementation in self._implementation_names(implementation, base_ring):
if (implementation == 'FLINT'):
try:
from .polynomial_zmod_flint import Polynomial_zmod_flint as element_class
except ImportError:
if given_implementation:
raise
continue
self._implementation_repr = ''
elif (implementation == 'NTL'):
try:
from .polynomial_modn_dense_ntl import Polynomial_dense_mod_p as element_class
except ImportError:
if given_implementation:
raise
continue
self._implementation_repr = ' (using NTL)'
elif (implementation == 'GF2X'):
try:
from .polynomial_gf2x import Polynomial_GF2X as element_class
except ImportError:
if given_implementation:
raise
continue
self._implementation_repr = ' (using GF2X)'
break
PolynomialRing_dense_mod_n.__init__(self, base_ring, name=name, implementation=implementation, element_class=element_class, category=category)
self._has_singular = can_convert_to_singular(self)
def _implementation_names_impl(implementation, base_ring, sparse):
if sparse:
return NotImplemented
modulus = base_ring.characteristic()
if (modulus == 2):
defaults = ['GF2X', 'NTL', None]
elif (implementation == 'GF2X'):
raise ValueError('GF2X only supports modulus 2')
elif (modulus <= sys.maxsize):
defaults = ['FLINT', None]
elif (implementation == 'FLINT'):
raise ValueError(('FLINT does not support modulus %s' % modulus))
else:
defaults = ['NTL', None]
if (implementation in defaults):
return defaults
elif (implementation in ['NTL', 'FLINT']):
return [implementation]
return NotImplemented
def irreducible_element(self, n, algorithm=None):
from sage.libs.pari.all import pari
from sage.rings.finite_rings.conway_polynomials import conway_polynomial, exists_conway_polynomial
p = self.characteristic()
n = int(n)
if (n < 1):
raise ValueError('degree must be at least 1')
if (algorithm is None):
if (n == 1):
return self(((- 1), 1))
elif exists_conway_polynomial(p, n):
algorithm = 'conway'
elif (p == 2):
try:
from .polynomial_gf2x import GF2X_BuildSparseIrred_list
except ImportError:
algorithm = 'adleman-lenstra'
else:
algorithm = 'minimal_weight'
else:
algorithm = 'adleman-lenstra'
elif (algorithm == 'primitive'):
if exists_conway_polynomial(p, n):
algorithm = 'conway'
else:
algorithm = 'ffprimroot'
if (algorithm == 'adleman-lenstra'):
return self(pari(p).ffinit(n))
elif (algorithm == 'conway'):
return self(conway_polynomial(p, n))
elif (algorithm == 'first_lexicographic'):
if (p == 2):
from .polynomial_gf2x import GF2X_BuildIrred_list
return self(GF2X_BuildIrred_list(n))
else:
pass
elif (algorithm == 'ffprimroot'):
return self(pari(p).ffinit(n).ffgen().ffprimroot().charpoly())
elif (algorithm == 'minimal_weight'):
if (p == 2):
from .polynomial_gf2x import GF2X_BuildSparseIrred_list
return self(GF2X_BuildSparseIrred_list(n))
else:
raise NotImplementedError("'minimal_weight' option only implemented for p = 2")
elif (algorithm == 'random'):
if (p == 2):
from .polynomial_gf2x import GF2X_BuildRandomIrred_list
return self(GF2X_BuildRandomIrred_list(n))
else:
pass
return PolynomialRing_dense_finite_field.irreducible_element(self, n, algorithm) |
def _compute_f1(origin, found, right):
recall = (0 if (origin == 0) else (right / origin))
precision = (0 if (found == 0) else (right / found))
f1 = (0.0 if ((recall + precision) == 0) else (((2 * precision) * recall) / (precision + recall)))
return (recall, precision, f1) |
def measure(sdfg, dreport=None, repetitions=30, print_report: bool=False):
arguments = {}
for cstate in sdfg.nodes():
for dnode in cstate.data_nodes():
array = sdfg.arrays[dnode.data]
if array.transient:
continue
if (dreport is not None):
try:
data = dreport.get_first_version(dnode.data)
if (data.shape != array.shape):
data = np.random.rand(*array.shape)
arguments[dnode.data] = dace.data.make_array_from_descriptor(array, data)
except KeyError:
arguments[dnode.data] = dace.data.make_array_from_descriptor(array)
else:
arguments[dnode.data] = dace.data.make_array_from_descriptor(array, np.random.rand(*array.shape))
try:
with dace.config.set_temporary('debugprint', value=True):
with dace.config.set_temporary('instrumentation', 'report_each_invocation', value=False):
with dace.config.set_temporary('compiler', 'allow_view_arguments', value=True):
csdfg = sdfg.compile()
for _ in range(repetitions):
csdfg(**arguments)
csdfg.finalize()
except Exception as e:
return math.inf
report = sdfg.get_latest_report()
if print_report:
print(report)
durations = next(iter(next(iter(report.durations.values())).values()))
return np.median(np.array(durations)) |
class Planner():
def __init__(self) -> None:
pass
def gradient(self) -> torch.Tensor:
pass |
def filter_clusters(clusters: List, threshold: int=1) -> List:
return [tuple((tuple(mention) for mention in cluster)) for cluster in clusters if (len(cluster) >= threshold)] |
def _compute_length_inputs(path: str, target_shape: Tuple[(int, int)]):
(w, h) = get_image_shape_without_loading(path)
ratio = (w / h)
new_h = target_shape[0]
new_w = np.minimum((new_h * ratio), target_shape[1])
return new_w |
def register_types(module):
root_module = module.get_root()
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core')
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('InetSocketAddress', import_from_module='ns.network')
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('LogComponent', import_from_module='ns.core')
module.add_class('Mac48Address', import_from_module='ns.network')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NonCopyable', destructor_visibility='protected', import_from_module='ns.core')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('ParameterLogger', import_from_module='ns.core')
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SystemWallClockMs', import_from_module='ns.core')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4Interface', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv4L3ClickProtocol', parent=root_module['ns3::Ipv4'])
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('Ipv4ClickRouting', parent=root_module['ns3::Ipv4RoutingProtocol'])
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type=u'map')
module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )', u'ns3::LogTimePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )*', u'ns3::LogTimePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )&', u'ns3::LogTimePrinter&')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )', u'ns3::LogNodePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )*', u'ns3::LogNodePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & )&', u'ns3::LogNodePrinter&')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
nested_module = module.add_cpp_namespace('tests')
register_types_ns3_tests(nested_module) |
class NMNIST(Dataset):
def __init__(self, dataset_path, n_steps, transform=None):
self.path = dataset_path
self.samples = []
self.labels = []
self.transform = transform
self.n_steps = n_steps
for i in tqdm(range(10)):
sample_dir = (((dataset_path + '/') + str(i)) + '/')
for f in listdir(sample_dir):
filename = join(sample_dir, f)
if isfile(filename):
self.samples.append(filename)
self.labels.append(i)
def __getitem__(self, index):
filename = self.samples[index]
label = self.labels[index]
data = np.zeros((2, 34, 34, self.n_steps))
f = open(filename, 'r')
lines = f.readlines()
for line in lines:
if (line is None):
break
line = line.split()
line = [int(l) for l in line]
pos = (line[0] - 1)
if (pos >= 1156):
channel = 1
pos -= 1156
else:
channel = 0
y = (pos % 34)
x = int(math.floor((pos / 34)))
for i in range(1, len(line)):
if (line[i] >= self.n_steps):
break
data[(channel, x, y, (line[i] - 1))] = 1
if self.transform:
data = self.transform(data)
data = data.type(torch.float32)
else:
data = torch.FloatTensor(data)
return (data, label)
def __len__(self):
return len(self.samples) |
_nlp_labeling_function(text_field='article', pre=[combine_text], resources=dict(celebrity_knowledge_base=load_celebrity_knowledge_base()))
def person_in_db(x, celebrity_knowledge_base):
for ent in x.doc.ents:
if ((ent.label_ == 'PERSON') and (ent.text.lower() in celebrity_knowledge_base)):
return POSITIVE
return ABSTAIN |
def tia_perspective(src):
(img_h, img_w) = src.shape[:2]
thresh = (img_h // 2)
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, (img_h - np.random.randint(thresh))])
dst_pts.append([0, (img_h - np.random.randint(thresh))])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst |
def eval_single_model(dataset, model_name):
top_1 = tf.keras.metrics.SparseCategoricalAccuracy()
top_5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5)
model = timm.create_model(model_name, pretrained=True)
model.eval()
model = model.to('cuda')
all_top_1 = []
all_top_5 = []
for (image_batch, label_batch) in dataset.as_numpy_iterator():
with torch.no_grad():
image_batch = torch.Tensor(image_batch).to('cuda')
image_batch = image_batch.permute(0, 3, 1, 2)
logits = model(image_batch)
batch_accuracy_top_1 = top_1(label_batch, logits.cpu().numpy())
batch_accuracy_top_5 = top_5(label_batch, logits.cpu().numpy())
all_top_1.append(batch_accuracy_top_1)
all_top_5.append(batch_accuracy_top_5)
return (np.mean(all_top_1), np.mean(all_top_5)) |
def test_read_yaml_params():
trainer = SingleObjectiveTrainer(dataHandler, model, correctness_loss, validation_metrics, save_to_path, params)
assert (trainer.seed == 42)
assert (trainer.learning_rate == 0.001)
assert (trainer.batch_size_training == 500)
assert (trainer.shuffle_training is True)
assert (trainer.drop_last_batch_training is True)
assert (trainer.batch_size_validation == 500)
assert (trainer.shuffle_validation is True)
assert (trainer.drop_last_batch_validation is False)
assert (trainer.number_of_epochs == 50)
assert (trainer.anneal is True)
assert (trainer.beta_start == 0)
assert (trainer.beta_cap == 0.3)
assert (trainer.beta_step == (0.3 / 10000)) |
class ColorAugmenterBase(dptaugmenterbase.AugmenterBase):
def __init__(self, keyword):
super().__init__(keyword=keyword) |
def presid_gmres(A, b, verbose, x0=None, tol=1e-05, restart=None, maxiter=None, M=None, **kwargs):
callback = gmres_counter(verbose)
(A, M, x, b, postprocess) = make_system(A, M, x0, b)
n = len(b)
if (maxiter is None):
maxiter = (n * 10)
if (restart is None):
restart = 20
restart = min(restart, n)
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, (ltr + 'gmresrevcom'))
bnrm2 = np.linalg.norm(b)
Mb_nrm2 = np.linalg.norm(psolve(b))
get_residual = (lambda : np.linalg.norm((matvec(x) - b)))
atol = tol
if (bnrm2 == 0):
return (postprocess(b), 0)
ptol_max_factor = 1.0
ptol = (Mb_nrm2 * min(ptol_max_factor, (atol / bnrm2)))
resid = np.nan
presid = np.nan
ndx1 = 1
ndx2 = (- 1)
work = _aligned_zeros(((6 + restart) * n), dtype=x.dtype)
work2 = _aligned_zeros(((restart + 1) * ((2 * restart) + 2)), dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
old_ijob = ijob
first_pass = True
resid_ready = False
iter_num = 1
while True:
if ((presid / bnrm2) < atol):
resid = (presid / bnrm2)
info = 1
if info:
ptol = 10000
(x, iter_, presid, info, ndx1, ndx2, sclr1, sclr2, ijob) = revcom(b, x, restart, work, work2, iter_, presid, info, ndx1, ndx2, ijob, ptol)
slice1 = slice((ndx1 - 1), ((ndx1 - 1) + n))
slice2 = slice((ndx2 - 1), ((ndx2 - 1) + n))
if (ijob == (- 1)):
if (resid_ready and (callback is not None)):
callback((presid / bnrm2))
resid_ready = False
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += (sclr1 * matvec(x))
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
if ((not first_pass) and (old_ijob == 3)):
resid_ready = True
first_pass = False
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += (sclr1 * matvec(work[slice1]))
if (resid_ready and (callback is not None)):
callback((presid / bnrm2))
resid_ready = False
iter_num = (iter_num + 1)
elif (ijob == 4):
if ftflag:
info = (- 1)
ftflag = False
(resid, info) = _stoptest(work[slice1], atol)
if (info or (presid > ptol)):
ptol_max_factor = min(1.0, (1.5 * ptol_max_factor))
else:
ptol_max_factor = max(1e-16, (0.25 * ptol_max_factor))
if (resid != 0):
ptol = (presid * min(ptol_max_factor, (atol / resid)))
else:
ptol = (presid * ptol_max_factor)
old_ijob = ijob
ijob = 2
if (iter_num > maxiter):
info = maxiter
break
if ((info >= 0) and (not (resid <= atol))):
info = maxiter
return (postprocess(x), info, mydict['resnorms']) |
def projection_head(hiddens, is_training, name='head_contrastive'):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
mid_dim = hiddens.shape[(- 1)]
out_dim = FLAGS.proj_out_dim
hiddens_list = [hiddens]
if (FLAGS.proj_head_mode == 'none'):
pass
elif (FLAGS.proj_head_mode == 'linear'):
hiddens = linear_layer(hiddens, is_training, out_dim, use_bias=False, use_bn=True, name='l_0')
hiddens_list.append(hiddens)
elif (FLAGS.proj_head_mode == 'nonlinear'):
for j in range(FLAGS.num_proj_layers):
if (j != (FLAGS.num_proj_layers - 1)):
(dim, bias_relu) = (mid_dim, True)
else:
(dim, bias_relu) = (FLAGS.proj_out_dim, False)
hiddens = linear_layer(hiddens, is_training, dim, use_bias=bias_relu, use_bn=True, name=('nl_%d' % j))
hiddens = (tf.nn.relu(hiddens) if bias_relu else hiddens)
hiddens_list.append(hiddens)
else:
raise ValueError('Unknown head projection mode {}'.format(FLAGS.proj_head_mode))
if (FLAGS.train_mode == 'pretrain'):
hiddens = hiddens_list[(- 1)]
else:
hiddens = hiddens_list[FLAGS.ft_proj_selector]
return hiddens |
def create_model(model_name='mnasnet_100', pretrained=None, num_classes=1000, in_chans=3, checkpoint_path='', **kwargs):
model_kwargs = dict(num_classes=num_classes, in_chans=in_chans, pretrained=pretrained, **kwargs)
if (model_name in globals()):
create_fn = globals()[model_name]
model = create_fn(**model_kwargs)
else:
raise RuntimeError(('Unknown model (%s)' % model_name))
if (checkpoint_path and (not pretrained)):
load_checkpoint(model, checkpoint_path)
return model |
('bidaf')
class BidirectionalAttentionFlow(Model):
def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, num_highway_layers: int, phrase_layer: Seq2SeqEncoder, attention_similarity_function: SimilarityFunction, modeling_layer: Seq2SeqEncoder, span_end_encoder: Seq2SeqEncoder, dropout: float=0.2, mask_lstms: bool=True, initializer: InitializerApplicator=InitializerApplicator(), regularizer: Optional[RegularizerApplicator]=None) -> None:
super(BidirectionalAttentionFlow, self).__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._highway_layer = TimeDistributed(Highway(text_field_embedder.get_output_dim(), num_highway_layers))
self._phrase_layer = phrase_layer
self._matrix_attention = MatrixAttention(attention_similarity_function)
self._modeling_layer = modeling_layer
self._span_end_encoder = span_end_encoder
encoding_dim = phrase_layer.get_output_dim()
modeling_dim = modeling_layer.get_output_dim()
span_start_input_dim = ((encoding_dim * 4) + modeling_dim)
self._span_start_predictor = TimeDistributed(torch.nn.Linear(span_start_input_dim, 1))
span_end_encoding_dim = span_end_encoder.get_output_dim()
span_end_input_dim = ((encoding_dim * 4) + span_end_encoding_dim)
self._span_end_predictor = TimeDistributed(torch.nn.Linear(span_end_input_dim, 1))
if (modeling_layer.get_input_dim() != (4 * encoding_dim)):
raise ConfigurationError('The input dimension to the modeling_layer must be equal to 4 times the encoding dimension of the phrase_layer. Found {} and 4 * {} respectively.'.format(modeling_layer.get_input_dim(), encoding_dim))
if (text_field_embedder.get_output_dim() != phrase_layer.get_input_dim()):
raise ConfigurationError('The output dimension of the text_field_embedder (embedding_dim + char_cnn) must match the input dimension of the phrase_encoder. Found {} and {}, respectively.'.format(text_field_embedder.get_output_dim(), phrase_layer.get_input_dim()))
if (span_end_encoder.get_input_dim() != ((encoding_dim * 4) + (modeling_dim * 3))):
raise ConfigurationError('The input dimension of the span_end_encoder should be equal to 4 * phrase_layer.output_dim + 3 * modeling_layer.output_dim. Found {} and (4 * {} + 3 * {}) respectively.'.format(span_end_encoder.get_input_dim(), encoding_dim, modeling_dim))
self._span_start_accuracy = CategoricalAccuracy()
self._span_end_accuracy = CategoricalAccuracy()
self._span_accuracy = BooleanAccuracy()
self._squad_metrics = SquadEmAndF1()
if (dropout > 0):
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = (lambda x: x)
self._mask_lstms = mask_lstms
initializer(self)
def forward(self, question: Dict[(str, torch.LongTensor)], passage: Dict[(str, torch.LongTensor)], span_start: torch.IntTensor=None, span_end: torch.IntTensor=None, metadata: List[Dict[(str, Any)]]=None) -> Dict[(str, torch.Tensor)]:
embedded_question = self._highway_layer(self._text_field_embedder(question))
embedded_passage = self._highway_layer(self._text_field_embedder(passage))
batch_size = embedded_question.size(0)
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question).float()
passage_mask = util.get_text_field_mask(passage).float()
question_lstm_mask = (question_mask if self._mask_lstms else None)
passage_lstm_mask = (passage_mask if self._mask_lstms else None)
encoded_question = self._dropout(self._phrase_layer(embedded_question, question_lstm_mask))
encoded_passage = self._dropout(self._phrase_layer(embedded_passage, passage_lstm_mask))
encoding_dim = encoded_question.size((- 1))
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
passage_question_attention = util.last_dim_softmax(passage_question_similarity, question_mask)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
masked_similarity = util.replace_masked_values(passage_question_similarity, question_mask.unsqueeze(1), (- .0))
question_passage_similarity = masked_similarity.max(dim=(- 1))[0].squeeze((- 1))
question_passage_attention = util.masked_softmax(question_passage_similarity, passage_mask)
question_passage_vector = util.weighted_sum(encoded_passage, question_passage_attention)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(batch_size, passage_length, encoding_dim)
final_merged_passage = torch.cat([encoded_passage, passage_question_vectors, (encoded_passage * passage_question_vectors), (encoded_passage * tiled_question_passage_vector)], dim=(- 1))
modeled_passage = self._dropout(self._modeling_layer(final_merged_passage, passage_lstm_mask))
modeling_dim = modeled_passage.size((- 1))
span_start_input = self._dropout(torch.cat([final_merged_passage, modeled_passage], dim=(- 1)))
span_start_logits = self._span_start_predictor(span_start_input).squeeze((- 1))
span_start_probs = util.masked_softmax(span_start_logits, passage_mask)
span_start_representation = util.weighted_sum(modeled_passage, span_start_probs)
tiled_start_representation = span_start_representation.unsqueeze(1).expand(batch_size, passage_length, modeling_dim)
span_end_representation = torch.cat([final_merged_passage, modeled_passage, tiled_start_representation, (modeled_passage * tiled_start_representation)], dim=(- 1))
encoded_span_end = self._dropout(self._span_end_encoder(span_end_representation, passage_lstm_mask))
span_end_input = self._dropout(torch.cat([final_merged_passage, encoded_span_end], dim=(- 1)))
span_end_logits = self._span_end_predictor(span_end_input).squeeze((- 1))
span_end_probs = util.masked_softmax(span_end_logits, passage_mask)
span_start_logits = util.replace_masked_values(span_start_logits, passage_mask, (- .0))
span_end_logits = util.replace_masked_values(span_end_logits, passage_mask, (- .0))
best_span = self._get_best_span(span_start_logits, span_end_logits)
output_dict = {'span_start_logits': span_start_logits, 'span_start_probs': span_start_probs, 'span_end_logits': span_end_logits, 'span_end_probs': span_end_probs, 'best_span': best_span}
if (span_start is not None):
loss = nll_loss(util.masked_log_softmax(span_start_logits, passage_mask), span_start.squeeze((- 1)))
self._span_start_accuracy(span_start_logits, span_start.squeeze((- 1)))
loss += nll_loss(util.masked_log_softmax(span_end_logits, passage_mask), span_end.squeeze((- 1)))
self._span_end_accuracy(span_end_logits, span_end.squeeze((- 1)))
self._span_accuracy(best_span, torch.stack([span_start, span_end], (- 1)))
output_dict['loss'] = loss
if (metadata is not None):
output_dict['best_span_str'] = []
for i in range(batch_size):
passage_str = metadata[i]['original_passage']
offsets = metadata[i]['token_offsets']
predicted_span = tuple(best_span[i].data.cpu().numpy())
start_offset = offsets[predicted_span[0]][0]
end_offset = offsets[predicted_span[1]][1]
best_span_string = passage_str[start_offset:end_offset]
output_dict['best_span_str'].append(best_span_string)
answer_texts = metadata[i].get('answer_texts', [])
if answer_texts:
self._squad_metrics(best_span_string, answer_texts)
return output_dict
def get_metrics(self, reset: bool=False) -> Dict[(str, float)]:
(exact_match, f1_score) = self._squad_metrics.get_metric(reset)
return {'start_acc': self._span_start_accuracy.get_metric(reset), 'end_acc': self._span_end_accuracy.get_metric(reset), 'span_acc': self._span_accuracy.get_metric(reset), 'em': exact_match, 'f1': f1_score}
def _get_best_span(span_start_logits: Variable, span_end_logits: Variable) -> Variable:
if ((span_start_logits.dim() != 2) or (span_end_logits.dim() != 2)):
raise ValueError('Input shapes must be (batch_size, passage_length)')
(batch_size, passage_length) = span_start_logits.size()
max_span_log_prob = ([(- 1e+20)] * batch_size)
span_start_argmax = ([0] * batch_size)
best_word_span = Variable(span_start_logits.data.new().resize_(batch_size, 2).fill_(0)).long()
span_start_logits = span_start_logits.data.cpu().numpy()
span_end_logits = span_end_logits.data.cpu().numpy()
for b in range(batch_size):
for j in range(passage_length):
val1 = span_start_logits[(b, span_start_argmax[b])]
if (val1 < span_start_logits[(b, j)]):
span_start_argmax[b] = j
val1 = span_start_logits[(b, j)]
val2 = span_end_logits[(b, j)]
if ((val1 + val2) > max_span_log_prob[b]):
best_word_span[(b, 0)] = span_start_argmax[b]
best_word_span[(b, 1)] = j
max_span_log_prob[b] = (val1 + val2)
return best_word_span
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BidirectionalAttentionFlow':
embedder_params = params.pop('text_field_embedder')
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
num_highway_layers = params.pop('num_highway_layers')
phrase_layer = Seq2SeqEncoder.from_params(params.pop('phrase_layer'))
similarity_function = SimilarityFunction.from_params(params.pop('similarity_function'))
modeling_layer = Seq2SeqEncoder.from_params(params.pop('modeling_layer'))
span_end_encoder = Seq2SeqEncoder.from_params(params.pop('span_end_encoder'))
dropout = params.pop('dropout', 0.2)
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
mask_lstms = params.pop('mask_lstms', True)
params.assert_empty(cls.__name__)
return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, span_end_encoder=span_end_encoder, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer) |
def NeuralNetwork(x, params):
x = F.linear(x, params[0], params[1])
x = F.relu(x)
x = F.linear(x, params[2], params[3])
x = F.relu(x)
x = F.linear(x, params[4], params[5])
return x |
def normalize_names_markov(names, markov_trace_version):
if markov_trace_version:
names = normalize_names(4, names)
else:
if isinstance(names, tuple):
names = list(names)
if (isinstance(names, list) and (len(names) > 3)):
names = normalize_names(3, names[0:3])
else:
names = normalize_names(3, names)
return names |
def test_ArrayBuilder_record():
def f1(x):
x.begin_record()
x.field('x').append(1)
x.field('y').append(1.1)
x.end_record()
x.begin_record()
x.field('x').append(2)
x.field('y').append(2.2)
x.end_record()
return x
a = ak.highlevel.ArrayBuilder()
b = f1(a)
assert (ak.operations.to_list(a.snapshot()) == [{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}])
assert (ak.operations.to_list(b.snapshot()) == [{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}])
c = f1.py_func(a)
assert (ak.operations.to_list(a.snapshot()) == [{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}, {'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}])
assert (ak.operations.to_list(c.snapshot()) == [{'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}, {'x': 1, 'y': 1.1}, {'x': 2, 'y': 2.2}]) |
class TestNormalizeTree(TransformTest):
def test_parserbehaviour_is_what_we_coded_for(self):
t = self.fragment(u'if x: y').root
self.assertLines(u'\n(root): StatListNode\n stats[0]: IfStatNode\n if_clauses[0]: IfClauseNode\n condition: NameNode\n body: ExprStatNode\n expr: NameNode\n', self.treetypes(t))
def test_wrap_singlestat(self):
t = self.run_pipeline([NormalizeTree(None)], u'if x: y')
self.assertLines(u'\n(root): StatListNode\n stats[0]: IfStatNode\n if_clauses[0]: IfClauseNode\n condition: NameNode\n body: StatListNode\n stats[0]: ExprStatNode\n expr: NameNode\n', self.treetypes(t))
def test_wrap_multistat(self):
t = self.run_pipeline([NormalizeTree(None)], u'\n if z:\n x\n y\n ')
self.assertLines(u'\n(root): StatListNode\n stats[0]: IfStatNode\n if_clauses[0]: IfClauseNode\n condition: NameNode\n body: StatListNode\n stats[0]: ExprStatNode\n expr: NameNode\n stats[1]: ExprStatNode\n expr: NameNode\n', self.treetypes(t))
def test_statinexpr(self):
t = self.run_pipeline([NormalizeTree(None)], u'\n a, b = x, y\n ')
self.assertLines(u'\n(root): StatListNode\n stats[0]: SingleAssignmentNode\n lhs: TupleNode\n args[0]: NameNode\n args[1]: NameNode\n rhs: TupleNode\n args[0]: NameNode\n args[1]: NameNode\n', self.treetypes(t))
def test_wrap_offagain(self):
t = self.run_pipeline([NormalizeTree(None)], u'\n x\n y\n if z:\n x\n ')
self.assertLines(u'\n(root): StatListNode\n stats[0]: ExprStatNode\n expr: NameNode\n stats[1]: ExprStatNode\n expr: NameNode\n stats[2]: IfStatNode\n if_clauses[0]: IfClauseNode\n condition: NameNode\n body: StatListNode\n stats[0]: ExprStatNode\n expr: NameNode\n', self.treetypes(t))
def test_pass_eliminated(self):
t = self.run_pipeline([NormalizeTree(None)], u'pass')
self.assertTrue((len(t.stats) == 0)) |
class Dataset():
_info: DatasetInfo = DatasetInfo()
_citation: str = ''
def __getitem__(self, index) -> Music:
raise NotImplementedError
def __len__(self) -> int:
raise NotImplementedError
def info(cls):
return cls._info
def citation(cls):
return cls._citation
def save(self, root: Union[(str, Path)], kind: str='json', n_jobs: int=1, ignore_exceptions: bool=True, verbose: bool=True, **kwargs):
if (kind not in ('json', 'yaml')):
raise TypeError("`kind` must be either 'json' or 'yaml'.")
root = Path(root).expanduser().resolve()
root.mkdir(exist_ok=True)
def _saver(idx):
prefix = ('0' * (n_digits - len(str(idx))))
filename = (root / (((prefix + str(idx)) + '.') + kind))
if ignore_exceptions:
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
save(filename, self[idx], kind, **kwargs)
except Exception:
return False
return True
save(filename, self[idx], kind, **kwargs)
return True
n_digits = len(str(len(self)))
if verbose:
print('Converting and saving the dataset...')
if (n_jobs == 1):
count = 0
for idx in tqdm(range(len(self))):
if _saver(idx):
count += 1
else:
results = Parallel(n_jobs=n_jobs, backend='threading', verbose=5)((delayed(_saver)(idx) for idx in range(len(self))))
count = results.count(True)
if verbose:
print(f'Successfully saved {count} out of {len(self)} files.')
def split(self, filename: Union[(str, Path)]=None, splits: Sequence[float]=None, random_state: Any=None) -> Dict[(str, List[int])]:
if ((filename is not None) and Path(filename).is_file()):
with open(str(filename), encoding='utf-8') as f:
return json.load(f)
if (not isinstance(splits, (float, list, tuple))):
raise TypeError('`splits` must be of type float, list or tuple.')
if isinstance(splits, float):
if (splits <= 0):
raise ValueError('`splits` must be positive.')
if (splits >= 1):
raise ValueError('`splits` must be less than 1.')
splits = [splits, (1 - splits)]
if isinstance(splits, (list, tuple)):
if (sum(splits) != 1):
raise ValueError('`splits` must sum to 1.')
if ((len(splits) < 2) or (len(splits) > 3)):
raise ValueError('`splits` must have length 2 or 3.')
if (random_state is None):
rand_indices = permutation(len(self))
else:
if (not isinstance(random_state, RandomState)):
random_state = RandomState(random_state)
rand_indices = random_state.permutation(len(self))
boundaries = np.cumsum(([0.0] + list(splits)))
names = ('train', 'test', 'validation')
indices = {}
for (idx, (start, end)) in enumerate(zip(boundaries[:(- 1)], boundaries[1:])):
start_idx = int((start * len(self)))
end_idx = int((end * len(self)))
indices[names[idx]] = rand_indices[start_idx:end_idx]
if (filename is not None):
indices_ = {key: value.tolist() for (key, value) in indices.items()}
with open(str(filename), 'w', encoding='utf-8') as f:
f.write(json.dumps(indices_))
return indices
def to_pytorch_dataset(self, factory: Callable=None, representation: str=None, split_filename: Union[(str, Path)]=None, splits: Sequence[float]=None, random_state: Any=None, **kwargs: Any) -> Union[('TorchDataset', Dict[(str, 'TorchDataset')])]:
if ((representation is None) and (factory is None)):
raise TypeError('One of `representation` and `factory` must be given.')
if ((representation is not None) and (factory is not None)):
raise TypeError('Only one of `representation` and `factory` can be given.')
try:
from torch.utils.data import Dataset as TorchDataset
except ImportError as err:
raise ImportError('Optional package pytorch is required.') from err
class TorchMusicFactoryDataset(TorchDataset):
def __init__(self, dataset: Dataset, factory: Callable, subset: str='Full', indices: Sequence[int]=None):
super().__init__()
self.dataset = dataset
self.factory = factory
self.subset = subset
self.indices = indices
if (self.indices is not None):
self.indices = sorted((idx for idx in self.indices if (idx < len(self.dataset))))
def __repr__(self) -> str:
return f'TorchMusicFactoryDataset(dataset={self.dataset}, factory={self.subset}, subset={self.factory})'
def __getitem__(self, index):
if (self.indices is None):
return self.factory(self.dataset[index])
return self.factory(self.dataset[self.indices[index]])
def __len__(self) -> int:
if (self.indices is None):
return len(self.dataset)
return len(self.indices)
class TorchRepresentationDataset(TorchMusicFactoryDataset):
def __init__(self, dataset: Dataset, representation: str, subset: str='Full', indices: Sequence[int]=None, **kwargs: Any):
self.representation = representation
def factory(music):
return music.to_representation(representation, **kwargs)
super().__init__(dataset, factory=factory, subset=subset, indices=indices)
def __repr__(self) -> str:
return f'TorchRepresentationDataset(dataset={self.dataset}, representation={self.representation}, subset={self.subset})'
if (splits is None):
if (representation is not None):
return TorchRepresentationDataset(self, representation, **kwargs)
return TorchMusicFactoryDataset(self, factory)
datasets: Dict[(str, 'TorchDataset')] = {}
indices_list = self.split(split_filename, splits, random_state)
for (key, value) in indices_list.items():
if (representation is not None):
datasets[key] = TorchRepresentationDataset(self, representation, key, value, **kwargs)
else:
datasets[key] = TorchMusicFactoryDataset(self, factory, key, value)
return datasets
def to_tensorflow_dataset(self, factory: Callable=None, representation: str=None, split_filename: Union[(str, Path)]=None, splits: Sequence[float]=None, random_state: Any=None, **kwargs: Any) -> Union[('TFDataset', Dict[(str, 'TFDataset')])]:
if ((representation is None) and (factory is None)):
raise TypeError('One of `representation` and `factory` must be given.')
if ((representation is not None) and (factory is not None)):
raise TypeError('Only one of `representation` and `factory` can be given.')
try:
import tensorflow as tf
from tensorflow.data import Dataset as TFDataset
except ImportError as err:
raise ImportError('Optional package tensorflow is required.') from err
if (representation is not None):
def _gen(indices):
for idx in indices:
(yield self[idx].to_representation(representation, **kwargs))
else:
def _gen(indices):
for idx in indices:
(yield factory(self[idx]))
if (splits is None):
indices = np.arange(len(self))
return TFDataset.from_generator(_gen, tf.float32, args=[indices])
datasets: Dict[(str, TFDataset)] = {}
indices_list = self.split(split_filename, splits, random_state)
for (key, value) in indices_list.items():
indices = np.array(value)
datasets[key] = TFDataset.from_generator(_gen, tf.float32, args=[indices])
return datasets |
class TestLBFGSBBounds(object):
def setup_method(self):
self.bounds = ((1, None), (None, None))
self.solution = (1, 0)
def fun(self, x, p=2.0):
return ((1.0 / p) * ((x[0] ** p) + (x[1] ** p)))
def jac(self, x, p=2.0):
return (x ** (p - 1))
def fj(self, x, p=2.0):
return (self.fun(x, p), self.jac(x, p))
def test_l_bfgs_b_bounds(self):
(x, f, d) = optimize.fmin_l_bfgs_b(self.fun, [0, (- 1)], fprime=self.jac, bounds=self.bounds)
assert_((d['warnflag'] == 0), d['task'])
assert_allclose(x, self.solution, atol=1e-06)
def test_l_bfgs_b_funjac(self):
(x, f, d) = optimize.fmin_l_bfgs_b(self.fj, [0, (- 1)], args=(2.0,), bounds=self.bounds)
assert_((d['warnflag'] == 0), d['task'])
assert_allclose(x, self.solution, atol=1e-06)
def test_minimize_l_bfgs_b_bounds(self):
res = optimize.minimize(self.fun, [0, (- 1)], method='L-BFGS-B', jac=self.jac, bounds=self.bounds)
assert_(res['success'], res['message'])
assert_allclose(res.x, self.solution, atol=1e-06) |
def make_output_format(format, ev_dir):
os.makedirs(ev_dir, exist_ok=True)
if (format == 'stdout'):
return HumanOutputFormat(sys.stdout)
elif (format == 'json'):
return JSONOutputFormat(osp.join(ev_dir, 'progress.json'))
elif (format == 'csv'):
return CSVOutputFormat(osp.join(ev_dir, 'progress.csv'))
elif (format == 'tensorboard'):
return TensorboardOutputFormat(osp.join(ev_dir, 'tb'))
else:
raise ValueError(('Unknown format specified: %s' % (format,))) |
def model_test_mode(args, feeder, hparams, global_step):
with tf.variable_scope('Tacotron_model', reuse=tf.AUTO_REUSE) as scope:
model = create_model('Tacotron', hparams)
model.initialize(feeder.eval_inputs, feeder.eval_input_lengths, feeder.eval_speaker_embeddings, feeder.eval_mel_targets, feeder.eval_token_targets, targets_lengths=feeder.eval_targets_lengths, global_step=global_step, is_training=False, is_evaluating=True, split_infos=feeder.eval_split_infos)
model.add_loss()
return model |
class DAVIS_Train(data.Dataset):
MAX_TRAINING_SKIP = 100
def __init__(self, root, output_size, imset='2017/train.txt', clip_n=3, max_obj_n=7, max_skip=5, increment=5, samples=2, choice='order', crop=False):
self.root = root
self.clip_n = clip_n
self.output_size = output_size
self.max_obj_n = max_obj_n
self.max_skip = max_skip
self.increment = increment
self.smaples = samples
self.sample_choice = choice
self.crop = crop
dataset_path = os.path.join(root, 'ImageSets', imset)
self.dataset_list = list()
with open(os.path.join(dataset_path), 'r') as lines:
for line in lines:
dataset_name = line.strip()
if (len(dataset_name) > 0):
self.dataset_list.append(dataset_name)
print(f' "DAVIS17": {len(self.dataset_list)} videos.')
self.random_horizontal_flip = mytrans.RandomHorizontalFlip(0.3)
self.color_jitter = TF.ColorJitter(0.1, 0.1, 0.1, 0.02)
self.random_affine = mytrans.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.95, 1.05), shear=10)
if self.crop:
self.random_resize_crop = mytrans.RandomResizedCrop(400, (0.8, 1), (0.95, 1.05))
else:
self.resize = mytrans.Resize(output_size)
self.to_tensor = TF.ToTensor()
self.normalize = TF.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.to_onehot = mytrans.ToOnehot(max_obj_n, shuffle=True)
def increase_max_skip(self):
self.max_skip = min((self.max_skip + self.increment), self.MAX_TRAINING_SKIP)
def set_max_skip(self, max_skip):
self.max_skip = max_skip
def __len__(self):
return (len(self.dataset_list) * self.smaples)
def __getitem__(self, idx):
video_name = self.dataset_list[(idx // self.smaples)]
img_dir = os.path.join(self.root, 'JPEGImages', '480p', video_name)
mask_dir = os.path.join(self.root, 'Annotations', '480p', video_name)
img_list = sorted(glob(os.path.join(img_dir, '*.jpg')))
mask_list = sorted(glob(os.path.join(mask_dir, '*.png')))
img_n = len(img_list)
obj_n = 1
while (obj_n == 1):
if (self.sample_choice == 'order'):
idx_list = list()
last_sample = (- 1)
sample_n = min(self.clip_n, img_n)
for i in range(sample_n):
if (i == 0):
last_sample = random.choice(range(0, ((img_n - sample_n) + 1)))
else:
last_sample = random.choice(range((last_sample + 1), min(((last_sample + self.max_skip) + 1), (((img_n - sample_n) + i) + 1))))
idx_list.append(last_sample)
elif (self.sample_choice == 'random'):
idx_list = list(range(img_n))
random.shuffle(idx_list)
sample_n = min(self.clip_n, img_n)
idx_list = idx_list[:sample_n]
else:
raise NotImplementedError()
while (len(idx_list) < self.clip_n):
idx_list.append(idx_list[(- 1)])
if (not self.crop):
frames = torch.zeros((self.clip_n, 3, *self.output_size), dtype=torch.float)
masks = torch.zeros((self.clip_n, self.max_obj_n, *self.output_size), dtype=torch.float)
else:
frames = torch.zeros((self.clip_n, 3, 400, 400), dtype=torch.float)
masks = torch.zeros((self.clip_n, self.max_obj_n, 400, 400), dtype=torch.float)
for (i, frame_idx) in enumerate(idx_list):
img = load_image_in_PIL(img_list[frame_idx], 'RGB')
mask = load_image_in_PIL(mask_list[frame_idx], 'P')
if (i > 0):
img = self.color_jitter(img)
(img, mask) = self.random_affine(img, mask)
if self.crop:
(img, mask) = self.random_resize_crop(img, mask)
else:
(img, mask) = self.resize(img, mask)
mask = np.array(mask, np.uint8)
if (i == 0):
(mask, obj_list) = self.to_onehot(mask)
obj_n = (len(obj_list) + 1)
else:
(mask, _) = self.to_onehot(mask, obj_list)
frames[i] = self.normalize(self.to_tensor(img))
masks[i] = mask
info = {'name': video_name, 'idx_list': idx_list}
return (frames, masks, obj_n, info) |
def test_vb_indices(homologous_radial1d_geometry):
homologous_radial1d_geometry.v_inner_boundary = homologous_radial1d_geometry.v_inner[0]
homologous_radial1d_geometry.v_outer_boundary = homologous_radial1d_geometry.v_outer[(- 1)]
assert (homologous_radial1d_geometry.v_inner_boundary_index == 0)
assert (homologous_radial1d_geometry.v_outer_boundary_index == len(homologous_radial1d_geometry.v_inner))
vib_index = homologous_radial1d_geometry.v_inner_boundary_index
vob_index = homologous_radial1d_geometry.v_outer_boundary_index
assert np.all((homologous_radial1d_geometry.v_inner[vib_index:vob_index] == homologous_radial1d_geometry.v_inner))
EPSILON_VELOCITY_SHIFT = ((1 * u.km) / u.s)
homologous_radial1d_geometry.v_inner_boundary = (homologous_radial1d_geometry.v_inner[0] + EPSILON_VELOCITY_SHIFT)
assert (homologous_radial1d_geometry.v_inner_boundary_index == 0)
homologous_radial1d_geometry.v_inner_boundary = (homologous_radial1d_geometry.v_inner[0] - EPSILON_VELOCITY_SHIFT)
assert (homologous_radial1d_geometry.v_inner_boundary_index == 0)
homologous_radial1d_geometry.v_inner_boundary = (homologous_radial1d_geometry.v_inner[1] - EPSILON_VELOCITY_SHIFT)
assert (homologous_radial1d_geometry.v_inner_boundary_index == 0)
homologous_radial1d_geometry.v_inner_boundary = (homologous_radial1d_geometry.v_inner[1] + EPSILON_VELOCITY_SHIFT)
assert (homologous_radial1d_geometry.v_inner_boundary_index == 1)
homologous_radial1d_geometry.v_outer_boundary = (homologous_radial1d_geometry.v_outer[(- 1)] + EPSILON_VELOCITY_SHIFT)
assert (homologous_radial1d_geometry.v_outer_boundary_index == 12)
homologous_radial1d_geometry.v_outer_boundary = (homologous_radial1d_geometry.v_outer[(- 1)] - EPSILON_VELOCITY_SHIFT)
assert (homologous_radial1d_geometry.v_outer_boundary_index == 12)
homologous_radial1d_geometry.v_outer_boundary = (homologous_radial1d_geometry.v_outer[(- 2)] + EPSILON_VELOCITY_SHIFT)
assert (homologous_radial1d_geometry.v_outer_boundary_index == 12)
homologous_radial1d_geometry.v_outer_boundary = (homologous_radial1d_geometry.v_outer[(- 2)] - EPSILON_VELOCITY_SHIFT)
assert (homologous_radial1d_geometry.v_outer_boundary_index == 11) |
_params({'X': ['array-like'], 'y': ['array-like'], 'sampling_strategy': [Mapping, callable, None], 'random_state': ['random_state'], 'verbose': ['boolean']}, prefer_skip_nested_validation=True)
def make_imbalance(X, y, *, sampling_strategy=None, random_state=None, verbose=False, **kwargs):
target_stats = Counter(y)
if (isinstance(sampling_strategy, Mapping) or callable(sampling_strategy)):
sampling_strategy_ = check_sampling_strategy(sampling_strategy, y, 'under-sampling', **kwargs)
if verbose:
print(f'The original target distribution in the dataset is: {target_stats}')
rus = RandomUnderSampler(sampling_strategy=sampling_strategy_, replacement=False, random_state=random_state)
(X_resampled, y_resampled) = rus.fit_resample(X, y)
if verbose:
print(f'Make the dataset imbalanced: {Counter(y_resampled)}')
return (X_resampled, y_resampled) |
def split_data_line(line, dialect=None):
delimiters = ',\t'
csv.field_size_limit(int((ctypes.c_ulong((- 1)).value // 2)))
if (line[(- 1)] == '\n'):
line = line[:(- 1)]
line = line.strip()
sniff_line = line
if (not any(((d in line) for d in delimiters))):
sniff_line += ','
if (dialect is None):
dialect = csv.Sniffer().sniff(sniff_line, delimiters=delimiters)
workaround_csv_sniffer_bug_last_field(sniff_line=sniff_line, dialect=dialect, delimiters=delimiters)
row = next(csv.reader([line], dialect))
return (row, dialect) |
class MediumPayloadCompleteQuaternion():
SIZE = 32
def from_reader(reader: _ResponseReader):
assert (reader.remaining() >= MediumPayloadCompleteQuaternion.SIZE)
rv = MediumPayloadCompleteQuaternion()
rv.timestamp = Timestamp.from_reader(reader)
rv.quaternion = Quaternion.from_reader(reader)
rv.free_acceleration = FreeAcceleration.from_reader(reader)
return rv
def from_bytes(bites):
reader = _ResponseReader(bites)
return MediumPayloadCompleteQuaternion.from_reader(reader)
def __repr__(self):
return _pretty_print(self) |
class LoopBasedReplacement():
INTRINSIC_TO_DACE = {'SUM': '__dace_sum', 'PRODUCT': '__dace_product', 'ANY': '__dace_any', 'ALL': '__dace_all', 'COUNT': '__dace_count', 'MINVAL': '__dace_minval', 'MAXVAL': '__dace_maxval', 'MERGE': '__dace_merge'}
def replaced_name(func_name: str) -> str:
return LoopBasedReplacement.INTRINSIC_TO_DACE[func_name]
def has_transformation() -> bool:
return True |
def parse_args():
parser = argparse.ArgumentParser(description='Train a DAMSM network')
parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default='cfg/DAMSM/bird.yaml', type=str)
parser.add_argument('--gpu', dest='gpu_id', type=int, default=0)
parser.add_argument('--data_dir', dest='data_dir', type=str, default='data/birds')
parser.add_argument('--manualSeed', type=int, default=0, help='manual seed')
args = parser.parse_args()
return args |
def process_stats(stats, info, global_step, steps_per_stats, log_f):
info['avg_step_time'] = (stats['step_time'] / steps_per_stats)
info['avg_grad_norm'] = (stats['grad_norm'] / steps_per_stats)
info['train_ppl'] = utils.safe_exp((stats['loss'] / stats['predict_count']))
info['speed'] = (stats['total_count'] / (1000 * stats['step_time']))
is_overflow = False
train_ppl = info['train_ppl']
if (math.isnan(train_ppl) or math.isinf(train_ppl) or (train_ppl > 1e+20)):
utils.print_out((' step %d overflow, stop early' % global_step), log_f)
is_overflow = True
return is_overflow |
def main(args):
aggregate_eval(args.encoded_ctx_file, args.encoded_qa_file, args.output_top1000s, args.label_file, args.aggregation_mode, args.candidate_mode, args.output_dir, args.output_file_name) |
def make_args():
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', '-n', type=str, default='1HVFourRoom')
return parser.parse_args() |
def test_pretokenized():
nlp = stanza.Pipeline(**{'processors': 'tokenize', 'dir': TEST_MODELS_DIR, 'lang': 'en', 'tokenize_pretokenized': True})
doc = nlp(EN_DOC_PRETOKENIZED)
assert (EN_DOC_PRETOKENIZED_GOLD_TOKENS == '\n\n'.join([sent.tokens_string() for sent in doc.sentences]))
assert all([(doc.text[token._start_char:token._end_char] == token.text) for sent in doc.sentences for token in sent.tokens])
doc = nlp(EN_DOC_PRETOKENIZED_LIST)
assert (EN_DOC_PRETOKENIZED_LIST_GOLD_TOKENS == '\n\n'.join([sent.tokens_string() for sent in doc.sentences]))
assert all([(doc.text[token._start_char:token._end_char] == token.text) for sent in doc.sentences for token in sent.tokens]) |
class HTMLBuilder(object):
_entity_re = re.compile('&([^;]+);')
_entities = name2codepoint.copy()
_entities['apos'] = 39
_empty_elements = {'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', 'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param', 'source', 'wbr'}
_boolean_attributes = {'selected', 'checked', 'compact', 'declare', 'defer', 'disabled', 'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap'}
_plaintext_elements = {'textarea'}
_c_like_cdata = {'script', 'style'}
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if (tag[:2] == '__'):
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = ('<' + tag)
for (key, value) in iteritems(arguments):
if (value is None):
continue
if (key[(- 1)] == '_'):
key = key[:(- 1)]
if (key in self._boolean_attributes):
if (not value):
continue
if (self._dialect == 'xhtml'):
value = (('="' + key) + '"')
else:
value = ''
else:
value = (('="' + escape(value)) + '"')
buffer += ((' ' + key) + value)
if ((not children) and (tag in self._empty_elements)):
if (self._dialect == 'xhtml'):
buffer += ' />'
else:
buffer += '>'
return buffer
buffer += '>'
children_as_string = ''.join([text_type(x) for x in children if (x is not None)])
if children_as_string:
if (tag in self._plaintext_elements):
children_as_string = escape(children_as_string)
elif ((tag in self._c_like_cdata) and (self._dialect == 'xhtml')):
children_as_string = (('/*<![CDATA[*/' + children_as_string) + '/*]]>*/')
buffer += (((children_as_string + '</') + tag) + '>')
return buffer
return proxy
def __repr__(self):
return ('<%s for %r>' % (self.__class__.__name__, self._dialect)) |
def IsErrorSuppressedByNolint(category, linenum):
return ((linenum in _error_suppressions.get(category, set())) or (linenum in _error_suppressions.get(None, set()))) |
class SawyerBoxCloseV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'lid_pos': obs[3:6], 'box_pos': obs[9:11], 'extra_info': obs[[6, 7, 8, 11]]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_lid = (o_d['lid_pos'] + np.array([0.0, 0.0, (+ 0.02)]))
pos_box = (np.array([*o_d['box_pos'], 0.15]) + np.array([0.0, 0.0, 0.0]))
if (np.linalg.norm((pos_curr[:2] - pos_lid[:2])) > 0.01):
return np.array([*pos_lid[:2], 0.2])
elif (abs((pos_curr[2] - pos_lid[2])) > 0.05):
return pos_lid
elif (abs((pos_curr[2] - pos_box[2])) > 0.04):
return np.array([pos_curr[0], pos_curr[1], pos_box[2]])
else:
return pos_box
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_lid = (o_d['lid_pos'] + np.array([0.0, 0.0, (+ 0.02)]))
if ((np.linalg.norm((pos_curr[:2] - pos_lid[:2])) > 0.01) or (abs((pos_curr[2] - pos_lid[2])) > 0.13)):
return 0.5
else:
return 1.0 |
def register_Ns3Dot11sRouteChange_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::dot11s::RouteChange const &', 'arg0')])
cls.add_instance_attribute('destination', 'ns3::Mac48Address', is_const=False)
cls.add_instance_attribute('interface', 'uint32_t', is_const=False)
cls.add_instance_attribute('lifetime', 'ns3::Time', is_const=False)
cls.add_instance_attribute('metric', 'uint32_t', is_const=False)
cls.add_instance_attribute('retransmitter', 'ns3::Mac48Address', is_const=False)
cls.add_instance_attribute('seqnum', 'uint32_t', is_const=False)
cls.add_instance_attribute('type', 'std::string', is_const=False)
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.