code stringlengths 101 5.91M |
|---|
class GiraffeCombine(nn.Module):
def __init__(self, in_channels, stride, fpn_config, fpn_channels, inputs_offsets, target_reduction, weight_method='attn'):
super(GiraffeCombine, self).__init__()
self.in_channels = in_channels
self.stride = stride
self.inputs_offsets = inputs_offsets
self.weight_method = weight_method
self.resample = nn.ModuleDict()
reduction_base = stride[0]
target_channels_idx = int(math.log((target_reduction // reduction_base), 2))
for (idx, offset) in enumerate(inputs_offsets):
if (offset < len(in_channels)):
in_channel = in_channels[offset]
input_reduction = stride[offset]
else:
node_idx = offset
input_reduction = fpn_config[node_idx]['reduction']
input_channels_idx = int(math.log((input_reduction // reduction_base), 2))
in_channel = in_channels[input_channels_idx]
reduction_ratio = (target_reduction / input_reduction)
if (weight_method == 'concat'):
self.resample[str(offset)] = ResampleFeatureMap(in_channel, in_channel, reduction_ratio=reduction_ratio)
else:
self.resample[str(offset)] = ResampleFeatureMap(in_channel, fpn_channels[target_channels_idx], reduction_ratio=reduction_ratio)
if (weight_method == 'concat'):
src_channels = (fpn_channels[target_channels_idx] * len(inputs_offsets))
target_channels = fpn_channels[target_channels_idx]
if ((weight_method == 'attn') or (weight_method == 'fastattn')):
self.edge_weights = nn.Parameter(torch.ones(len(inputs_offsets)), requires_grad=True)
else:
self.edge_weights = None
def forward(self, x):
dtype = x[0].dtype
nodes = []
if (len(self.inputs_offsets) == 0):
return None
for (offset, resample) in zip(self.inputs_offsets, self.resample.values()):
input_node = x[offset]
input_node = resample(input_node)
nodes.append(input_node)
if (self.weight_method == 'attn'):
normalized_weights = torch.softmax(self.edge_weights.to(dtype=dtype), dim=0)
out = (torch.stack(nodes, dim=(- 1)) * normalized_weights)
out = torch.sum(out, dim=(- 1))
elif (self.weight_method == 'fastattn'):
edge_weights = nn.functional.relu(self.edge_weights.to(dtype=dtype))
weights_sum = torch.sum(edge_weights)
out = torch.stack([((nodes[i] * edge_weights[i]) / (weights_sum + 0.0001)) for i in range(len(nodes))], dim=(- 1))
out = torch.sum(out, dim=(- 1))
elif (self.weight_method == 'sum'):
out = torch.stack(nodes, dim=(- 1))
out = torch.sum(out, dim=(- 1))
elif (self.weight_method == 'concat'):
out = torch.cat(nodes, dim=1)
else:
raise ValueError('unknown weight_method {}'.format(self.weight_method))
return out |
class Schaffer02(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 100.0)] * self.N), ([100.0] * self.N)))
self.custom_bounds = [((- 10), 10), ((- 10), 10)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
num = ((sin(((x[0] ** 2) - (x[1] ** 2))) ** 2) - 0.5)
den = ((1 + (0.001 * ((x[0] ** 2) + (x[1] ** 2)))) ** 2)
return (0.5 + (num / den)) |
def analyse_type_annotation(annotation, env, assigned_value=None):
base_type = None
is_ambiguous = False
explicit_pytype = explicit_ctype = False
if annotation.is_dict_literal:
warning(annotation.pos, "Dicts should no longer be used as type annotations. Use 'cython.int' etc. directly.")
for (name, value) in annotation.key_value_pairs:
if (not name.is_string_literal):
continue
if (name.value in ('type', b'type')):
explicit_pytype = True
if (not explicit_ctype):
annotation = value
elif (name.value in ('ctype', b'ctype')):
explicit_ctype = True
annotation = value
if (explicit_pytype and explicit_ctype):
warning(annotation.pos, 'Duplicate type declarations found in signature annotation')
arg_type = annotation.analyse_as_type(env)
if (annotation.is_name and (not annotation.cython_attribute) and (annotation.name in ('int', 'long', 'float'))):
if ((assigned_value is not None) and (arg_type is not None) and (not arg_type.is_pyobject)):
assigned_type = assigned_value.infer_type(env)
if (assigned_type and assigned_type.is_pyobject):
is_ambiguous = True
arg_type = None
if (arg_type in (PyrexTypes.c_long_type, PyrexTypes.c_int_type, PyrexTypes.c_float_type)):
arg_type = (PyrexTypes.c_double_type if (annotation.name == 'float') else py_object_type)
elif ((arg_type is not None) and annotation.is_string_literal):
warning(annotation.pos, "Strings should no longer be used for type declarations. Use 'cython.int' etc. directly.")
if (arg_type is not None):
if (explicit_pytype and (not explicit_ctype) and (not arg_type.is_pyobject)):
warning(annotation.pos, 'Python type declaration in signature annotation does not refer to a Python type')
base_type = CAnalysedBaseTypeNode(annotation.pos, type=arg_type, is_arg=True)
elif is_ambiguous:
warning(annotation.pos, 'Ambiguous types in annotation, ignoring')
else:
warning(annotation.pos, 'Unknown type declaration in annotation, ignoring')
return (base_type, arg_type) |
def laplacian(v, irho):
if ((irho is None) or (irho == 1)):
Lap = v.laplace
elif isinstance(irho, Differentiable):
so = (irho.space_order // 2)
Lap = sum([getattr((irho._subs(d, (d + (d.spacing / 2))) * getattr(v, ('d%s' % d.name))(x0=(d + (d.spacing / 2)), fd_order=so)), ('d%s' % d.name))(x0=(d - (d.spacing / 2)), fd_order=so) for d in irho.dimensions])
else:
Lap = (irho * v.laplace)
return Lap |
class ResBlock(nn.Module):
def __init__(self, chan_in, hidden_size, chan_out):
super().__init__()
self.net = nn.Sequential(nn.Conv2d(chan_in, hidden_size, 3, padding=1), nn.ReLU(), nn.Conv2d(hidden_size, hidden_size, 3, padding=1), nn.ReLU(), nn.Conv2d(hidden_size, chan_out, 1))
def forward(self, x):
return (self.net(x) + x) |
class TestNetGradientChecker(test_util.TestCase):
def test_net_gradient_checker(self):
model = model_helper.ModelHelper(name='test')
const = model.net.AddExternalInputs('const1', 'const2')
fc = brew.fc(model, dim_in=3, dim_out=4, blob_in='X', blob_out='Y', axis=0)
dist = [model.net.SquaredL2Distance([fc, c]) for c in const]
losses = [model.net.AveragedLoss(d) for d in dist]
workspace.RunNetOnce(model.param_init_net)
NetGradientChecker.Check(model.net, outputs_with_grad=losses, input_values={'X': np.array([1, 2, 3], dtype='float32'), const[0]: np.array([1, 1, 1, 1], dtype='float32'), const[1]: np.array([2, 2, 2, 2], dtype='float32')}, input_to_check='X')
def test_net_comparison(self):
net1 = core.Net('net1')
(a, b, c, d) = net1.AddExternalInputs('a', 'b', 'c', 'd')
a_b = net1.Sum([a, b], 'a+b')
c_d = net1.Sum([c, d], 'c+d')
x = net1.Mul([a_b, c_d], 'x')
net2 = core.Net('net2')
ac = net2.Mul([a, c], 'ac')
ad = net2.Mul([a, d], 'ad')
bc = net2.Mul([b, c], 'bc')
bd = net2.Mul([b, d], 'bd')
y = net2.Sum([ac, ad, bc, bd], 'y')
input_values = {blob: np.array([i], dtype=np.float32) for (i, blob) in enumerate([a, b, c, d])}
NetGradientChecker.CompareNets([net1, net2], [[x], [y]], [0], inputs_with_grads=[a, b, c, d], input_values=input_values) |
def print_as_conll(gold_examples, predicted_target_dict):
with codecs.open(out_conll_file, 'w', 'utf-8') as conll_file:
for (gold, pred) in zip(gold_examples, predicted_target_dict):
for target in sorted(pred):
result = (gold.get_predicted_target_conll(target, pred[target][0]) + '\n')
conll_file.write(result)
conll_file.close() |
(OperatorDef)
def print_op(text, op):
args = [(a.name, _arg_val(a)) for a in op.arg]
dev_opt_txt = format_device_option(op.device_option)
if dev_opt_txt:
args.append(('device_option', dev_opt_txt))
if text.c2_net_name:
text.add(call(((text.c2_net_name + '.') + op.type), ([list(op.input), list(op.output)] + args)))
else:
text.add(call(op.type, (list(op.input) + args), op.output, factor_prefixes=text.factor_prefixes))
for arg in op.arg:
if arg.HasField('n'):
with text.context(('arg: %s' % arg.name)):
text(arg.n) |
class EquivariantScalar(OutputModel):
def __init__(self, hidden_channels, activation='silu', allow_prior_model=True):
super(EquivariantScalar, self).__init__(allow_prior_model=allow_prior_model)
self.output_network = nn.ModuleList([GatedEquivariantBlock(hidden_channels, (hidden_channels // 2), activation=activation, scalar_activation=True), GatedEquivariantBlock((hidden_channels // 2), 1, activation=activation)])
self.reset_parameters()
def reset_parameters(self):
for layer in self.output_network:
layer.reset_parameters()
def pre_reduce(self, x, v, z, pos, batch):
for layer in self.output_network:
(x, v) = layer(x, v)
return (x + (v.sum() * 0)) |
def tf_2d_normal(x, y, mux, muy, sx, sy, rho):
normx = tf.subtract(x, mux)
normy = tf.subtract(y, muy)
sxsy = tf.multiply(sx, sy)
z = ((tf.square(tf.div(normx, sx)) + tf.square(tf.div(normy, sy))) - (2 * tf.div(tf.multiply(rho, tf.multiply(normx, normy)), sxsy)))
negRho = (1 - tf.square(rho))
result = tf.exp(tf.div((- z), (2 * negRho)))
denom = ((2 * np.pi) * tf.multiply(sxsy, tf.sqrt(negRho)))
result = tf.div(result, denom)
return result |
.parametrize('fn', [(lambda name: True), (lambda name: False), (lambda name: ('a' in name))])
def test_set_get_summary_filter(fn: SummaryFilter) -> None:
try:
set_summary_filter(fn)
assert (get_summary_filter() is fn)
finally:
set_summary_filter(default_summary_filter) |
def limit_author_list(all_authors, desired_authors=[], author_list_length_limit=10):
if (len(all_authors) <= author_list_length_limit):
return all_authors
author_list = [a for a in all_authors if (a in desired_authors)]
if (not author_list):
author_list = all_authors[:author_list_length_limit]
if (len(author_list) < len(all_authors)):
author_list += ['et~al.']
return author_list |
_REGISTRY.register()
def build_vovnet_fpn_backbone(cfg, input_shape: ShapeSpec):
bottom_up = build_vovnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelMaxPool(), fuse_type=cfg.MODEL.FPN.FUSE_TYPE)
return backbone |
def main():
frame = np.zeros((600, 800, 3), np.uint8)
values = []
checked = [False]
checked2 = [False]
value = [1.0]
value2 = [1.0]
value3 = [1.0]
padding = 10
img = cv2.imread('lena-face.jpg', cv2.IMREAD_COLOR)
imgRed = cv2.imread('lena-face-red.jpg', cv2.IMREAD_COLOR)
imgGray = cv2.imread('lena-face-gray.jpg', cv2.IMREAD_COLOR)
for i in range(0, 20):
values.append(random.uniform(0.0, 300.0))
cvui.init(WINDOW_NAME)
while True:
frame[:] = (49, 52, 49)
cvui.beginRow(frame, 10, 20, 100, 50)
cvui.text('This is ')
cvui.printf('a row')
cvui.checkbox('checkbox', checked)
cvui.window(80, 80, 'window')
cvui.rect(50, 50, 65280, )
cvui.sparkline(values, 50, 50)
cvui.counter(value)
cvui.button(100, 30, 'Fixed')
cvui.image(img)
cvui.button(img, imgGray, imgRed)
cvui.endRow()
padding = 50
cvui.beginRow(frame, 10, 150, 100, 50, padding)
cvui.text('This is ')
cvui.printf('another row')
cvui.checkbox('checkbox', checked2)
cvui.window(80, 80, 'window')
cvui.button(100, 30, 'Fixed')
cvui.printf('with 50px padding.')
cvui.endRow()
cvui.beginRow(frame, 10, 250, 100, 50)
cvui.text('This is ')
cvui.printf('another row with a trackbar ')
cvui.printf(' and a button ')
cvui.button(100, 30, 'button')
cvui.endRow()
cvui.beginColumn(frame, 50, 330, 100, 200)
cvui.text('Column 1 (no padding)')
cvui.button('button1')
cvui.button('button2')
cvui.text('End of column 1')
cvui.endColumn()
padding = 10
cvui.beginColumn(frame, 300, 330, 100, 200, padding)
cvui.text('Column 2 (padding = 10)')
cvui.button('button1')
cvui.button('button2')
cvui.text('End of column 2')
cvui.endColumn()
cvui.beginColumn(frame, 550, 330, 100, 200)
cvui.text('Column 3 (use space)')
cvui.space(5)
cvui.button('button1 5px below')
cvui.space(50)
cvui.text('Text 50px below')
cvui.space(20)
cvui.button('Button 20px below')
cvui.space(40)
cvui.text('End of column 2 (40px below)')
cvui.endColumn()
cvui.update()
cv2.imshow(WINDOW_NAME, frame)
if (cv2.waitKey(20) == 27):
break |
def P(alpha, m):
if (alpha >= ((2 * m) - 1)):
raise Exception
if ((m % 2) == 0):
if (alpha < m):
if ((alpha % 2) == 0):
b = (alpha // 2)
return [((2 * a), ((((2 * a) + (2 * b)) + 1) % (2 * m))) for a in range(m)]
else:
b = ((alpha - 1) // 2)
return [((2 * a), ((((2 * a) - (2 * b)) - 1) % (2 * m))) for a in range(m)]
else:
y = (alpha - m)
pairs = [(b, (((2 * y) - b) % (2 * m))) for b in range(y)]
pairs += [(c, (((((2 * m) + (2 * y)) - c) - 2) % (2 * m))) for c in range(((2 * y) + 1), ((m + y) - 1))]
pairs += [(((2 * m) + int(((- 1.5) - (0.5 * ((- 1) ** y))))), y), (((2 * m) + int(((- 1.5) + (0.5 * ((- 1) ** y))))), ((m + y) - 1))]
return pairs
elif (alpha < (m - 1)):
if ((alpha % 2) == 0):
b = (alpha // 2)
return [((2 * a), ((((2 * a) + (2 * b)) + 1) % (2 * m))) for a in range(m)]
else:
b = ((alpha - 1) // 2)
return [((2 * a), ((((2 * a) - (2 * b)) - 1) % (2 * m))) for a in range(m)]
else:
y = ((alpha - m) + 1)
pairs = [(b, ((2 * y) - b)) for b in range(y)]
pairs += [(c, (((2 * m) + (2 * y)) - c)) for c in range(((2 * y) + 1), (m + y))]
pairs += [(y, (m + y))]
return pairs |
def set_node_colors(c, x, cmap, colored_nodes):
node_colors = defaultdict((lambda x: '#8d8d8d'))
node_edge_colors = defaultdict((lambda x: '#4d4d4d'))
cnt = Counter([c[d] for d in colored_nodes])
num_groups = len(cnt)
if (cmap is None):
if (num_groups <= 10):
cmap = sns.color_palette().as_hex()
elif (num_groups <= 20):
cmap = sns.color_palette('tab20').as_hex()
else:
cmap = sns.color_palette('hls', num_groups).as_hex()
cmap = dict(zip([d[0] for d in cnt.most_common(num_groups)], [cmap[i] for i in range(num_groups)]))
bounds = np.linspace(0, 1, 11)
norm = mpl.colors.BoundaryNorm(bounds, ncolors=12, extend='both')
cmap_coreness = {k: sns.light_palette(v, n_colors=12).as_hex() for (k, v) in cmap.items()}
cmap_coreness_dark = {k: sns.dark_palette(v, n_colors=12).as_hex() for (k, v) in cmap.items()}
for d in colored_nodes:
node_colors[d] = cmap_coreness[c[d]][(norm(x[d]) - 1)]
node_edge_colors[d] = cmap_coreness_dark[c[d]][(- norm(x[d]))]
return (node_colors, node_edge_colors) |
def fully_connected(inputs, num_outputs, scope, use_xavier=True, stddev=0.001, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None):
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[(- 1)].value
weights = _variable_with_weight_decay('weights', shape=[num_input_units, num_outputs], use_xavier=use_xavier, stddev=stddev, wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = _variable_on_cpu('biases', [num_outputs], tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn')
if (activation_fn is not None):
outputs = activation_fn(outputs)
return outputs |
class CustomSavingCallback(Callback):
def __init__(self, output_dir: str, saving_freq: int, save_best_only: bool=False, keep_max_models: int=5):
super(CustomSavingCallback, self).__init__()
self.saving_dir = output_dir
self.saving_freq = saving_freq
self.save_best_only = save_best_only
self.keep_max_models = keep_max_models
self.epochs_since_last_save = 0
self.monitor = 'val_loss'
self.monitor_op = np.less
self.best_value = np.Inf
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.logs = logs
self.epochs_since_last_save += 1
if (self.epochs_since_last_save == self.saving_freq):
self._export_model(logs)
self.epochs_since_last_save = 0
def on_train_end(self, logs=None):
self._export_model(self.logs)
self.epochs_since_last_save = 0
def _export_model(self, logs):
timestamp = str(int(time.time()))
folder = os.path.join(self.saving_dir, timestamp)
if self.save_best_only:
current_value = logs.get(self.monitor)
if self.monitor_op(current_value, self.best_value):
print('\n{} improved from {:0.5f} to {:0.5f}, saving model to {}'.format(self.monitor, self.best_value, current_value, folder))
self.best_value = current_value
else:
print('\n{} did not improve from {:0.5f}'.format(self.monitor, self.best_value))
return
os.makedirs(folder)
model_json = self.model.to_json()
with open(os.path.join(folder, LAYERS_FILENAME), 'w') as f:
json.dump(model_json, f)
self.model.save_weights(os.path.join(folder, MODEL_WEIGHTS_FILENAME))
optimizer_weights = tf.keras.backend.batch_get_value(self.model.optimizer.weights)
with open(os.path.join(folder, OPTIMIZER_WEIGHTS_FILENAME), 'wb') as f:
pickle.dump(optimizer_weights, f)
learning_rate = self.model.optimizer.learning_rate
with open(os.path.join(folder, LEARNING_RATE_FILENAME), 'wb') as f:
pickle.dump(learning_rate, f)
epoch = (self._current_epoch + 1)
with open(os.path.join(folder, EPOCH_FILENAME), 'wb') as f:
pickle.dump(epoch, f)
self._clean_exports()
def _clean_exports(self):
timestamp_folders = [int(f) for f in os.listdir(self.saving_dir)]
timestamp_folders.sort(reverse=True)
if (len(timestamp_folders) > self.keep_max_models):
folders_to_remove = timestamp_folders[self.keep_max_models:]
for f in folders_to_remove:
shutil.rmtree(os.path.join(self.saving_dir, str(f))) |
def merge_with_parent(dc: FairseqDataclass, cfg: DictConfig, remove_missing=True):
if remove_missing:
if is_dataclass(dc):
target_keys = set(dc.__dataclass_fields__.keys())
else:
target_keys = set(dc.keys())
with open_dict(cfg):
for k in list(cfg.keys()):
if (k not in target_keys):
del cfg[k]
merged_cfg = OmegaConf.merge(dc, cfg)
merged_cfg.__dict__['_parent'] = cfg.__dict__['_parent']
OmegaConf.set_struct(merged_cfg, True)
return merged_cfg |
(scope='module')
def functional_fx(variable_x):
return sn.Functional('fx', variable_x, (2 * [10]), 'tanh') |
def create_integrated_db_with_infos(args, root_path):
db_infos_path = args.src_db_info
db_info_global_path = db_infos_path
global_db_path = (root_path / (args.new_db_name + '.npy'))
db_infos = pkl.load(open(db_infos_path, 'rb'))
db_info_global = copy.deepcopy(db_infos)
start_idx = 0
global_db_list = []
for (category, class_info) in db_infos.items():
print(('>>> Start processing %s' % category))
for (idx, info) in tqdm.tqdm(enumerate(class_info), total=len(class_info)):
obj_path = (root_path / info['path'])
obj_points = np.fromfile(str(obj_path), dtype=np.float32).reshape([(- 1), args.num_point_features])
num_points = obj_points.shape[0]
if (num_points != info['num_points_in_gt']):
obj_points = np.fromfile(str(obj_path), dtype=np.float64).reshape([(- 1), args.num_point_features])
num_points = obj_points.shape[0]
obj_points = obj_points.astype(np.float32)
assert (num_points == info['num_points_in_gt'])
db_info_global[category][idx]['global_data_offset'] = (start_idx, (start_idx + num_points))
start_idx += num_points
global_db_list.append(obj_points)
global_db = np.concatenate(global_db_list)
with open(global_db_path, 'wb') as f:
np.save(f, global_db)
with open(db_info_global_path, 'wb') as f:
pkl.dump(db_info_global, f)
print(f'Successfully create integrated database at {global_db_path}')
print(f'Successfully create integrated database info at {db_info_global_path}')
return (db_info_global, global_db) |
def test_apply_parallel():
a = np.arange(144).reshape(12, 12).astype(float)
expected1 = threshold_local(a, 3)
result1 = apply_parallel(threshold_local, a, chunks=(6, 6), depth=5, extra_arguments=(3,), extra_keywords={'mode': 'reflect'})
assert_array_almost_equal(result1, expected1)
def wrapped_gauss(arr):
return gaussian(arr, 1, mode='reflect')
expected2 = gaussian(a, 1, mode='reflect')
result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5)
assert_array_almost_equal(result2, expected2)
expected3 = gaussian(a, 1, mode='reflect')
result3 = apply_parallel(wrapped_gauss, da.from_array(a, chunks=(6, 6)), depth=5, compute=True)
assert isinstance(result3, np.ndarray)
assert_array_almost_equal(result3, expected3) |
class HardSigmoidJit(nn.Module):
def __init__(self, inplace: bool=False):
super(HardSigmoidJit, self).__init__()
def forward(self, x):
return hard_sigmoid_jit(x) |
class Recommender(BaseRecommender, ABC):
def fit(self, dataset: Dataset) -> None:
self._fit_wrap(dataset=dataset)
def predict(self, dataset: Dataset, k: int, queries: Optional[Union[(SparkDataFrame, Iterable)]]=None, items: Optional[Union[(SparkDataFrame, Iterable)]]=None, filter_seen_items: bool=True, recs_file_path: Optional[str]=None) -> Optional[SparkDataFrame]:
return self._predict_wrap(dataset=dataset, k=k, queries=queries, items=items, filter_seen_items=filter_seen_items, recs_file_path=recs_file_path)
def predict_pairs(self, pairs: SparkDataFrame, dataset: Optional[Dataset]=None, recs_file_path: Optional[str]=None, k: Optional[int]=None) -> Optional[SparkDataFrame]:
return self._predict_pairs_wrap(pairs=pairs, dataset=dataset, recs_file_path=recs_file_path, k=k)
def fit_predict(self, dataset: Dataset, k: int, queries: Optional[Union[(SparkDataFrame, Iterable)]]=None, items: Optional[Union[(SparkDataFrame, Iterable)]]=None, filter_seen_items: bool=True, recs_file_path: Optional[str]=None) -> Optional[SparkDataFrame]:
return self._fit_predict(dataset=dataset, k=k, queries=queries, items=items, filter_seen_items=filter_seen_items, recs_file_path=recs_file_path)
def get_features(self, ids: SparkDataFrame) -> Optional[Tuple[(SparkDataFrame, int)]]:
return self._get_features_wrap(ids, None) |
class AnnotationItem(object):
def __init__(self, style, text, tag='', size=0):
self.style = style
self.text = text
self.tag = tag
self.size = size
def start(self):
return (u"<span class='cython tag %s' title='%s'>%s" % (self.style, self.text, self.tag))
def end(self):
return (self.size, u'</span>') |
class LoderunnerCtrlProblem(LoderunnerProblem):
def __init__(self):
super(LoderunnerCtrlProblem, self).__init__()
self._max_path_length = ((((np.ceil((self._width / 2)) * self._height) + np.floor((self._height / 2))) * 2) - 1)
self._reward_weights = self._reward_weights
self.static_trgs = {'player': 1, 'enemies': 2, 'gold': (1, 10), 'win': 1, 'path-length': self._max_path_length}
self.cond_trgs = self.static_trgs
max_n_tile = (self._height * self._width)
self.cond_bounds = {'player': (0, max_n_tile), 'enemies': (0, max_n_tile), 'gold': (0, 10), 'win': (0, 1), 'path-length': (0, self._max_path_length)}
def get_episode_over(self, new_stats, old_stats):
return False
def get_reward(self, new_stats, old_stats):
return None |
def validate_iban(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(iban.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(iban.is_valid)
else:
return df.applymap(iban.is_valid)
return iban.is_valid(df) |
class EnvSpec(InOutSpec):
def __init__(self, observation_space, action_space):
super().__init__(action_space, observation_space)
def action_space(self):
return self.input_space
def observation_space(self):
return self.output_space
_space.setter
def action_space(self, action_space):
self._input_space = action_space
_space.setter
def observation_space(self, observation_space):
self._output_space = observation_space
def __eq__(self, other):
return ((self.observation_space == other.observation_space) and (self.action_space == other.action_space)) |
((device_cc() < 80), 'Device compute capability is insufficient for SM80 tests.')
class GemmF64TensorOpSm80(unittest.TestCase):
def test_SM80_Device_Gemm_f64n_f64t_f64t_tensor_op_f64_32x32x16_16x16x16(self):
math_inst = MathInstruction(instruction_shape=[8, 8, 4], element_a=cutlass.float64, element_b=cutlass.float64, element_accumulator=cutlass.float64, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add)
tile_description = TileDescription(threadblock_shape=[32, 32, 16], stages=4, warp_count=[2, 2, 1], math_instruction=math_inst)
A = TensorDescription(element=cutlass.float64, layout=cutlass.ColumnMajor, alignment=1)
B = TensorDescription(element=cutlass.float64, layout=cutlass.RowMajor, alignment=1)
C = TensorDescription(element=cutlass.float64, layout=cutlass.RowMajor, alignment=1)
element_epilogue = cutlass.float64
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(arch=80, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, 'universal'))
def test_SM80_Device_Gemm_f64t_f64n_f64t_tensor_op_f64_64x64x16_32x32x16(self):
math_inst = MathInstruction(instruction_shape=[8, 8, 4], element_a=cutlass.float64, element_b=cutlass.float64, element_accumulator=cutlass.float64, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add)
tile_description = TileDescription(threadblock_shape=[64, 64, 16], stages=4, warp_count=[2, 2, 1], math_instruction=math_inst)
A = TensorDescription(element=cutlass.float64, layout=cutlass.RowMajor, alignment=1)
B = TensorDescription(element=cutlass.float64, layout=cutlass.ColumnMajor, alignment=1)
C = TensorDescription(element=cutlass.float64, layout=cutlass.RowMajor, alignment=1)
element_epilogue = cutlass.float64
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(arch=80, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, 'universal')) |
def is_exact_match(answer_object, prediction):
ground_truths = get_ground_truths(answer_object)
for ground_truth in ground_truths:
if exact_match_score(prediction, ground_truth):
return True
return False |
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'), ('int0', 'intp'), ('uint0', 'uintp'), ('single', 'float'), ('csingle', 'cfloat'), ('singlecomplex', 'cfloat'), ('float_', 'double'), ('intc', 'int'), ('uintc', 'uint'), ('int_', 'long'), ('uint', 'ulong'), ('cfloat', 'cdouble'), ('longfloat', 'longdouble'), ('clongfloat', 'clongdouble'), ('longcomplex', 'clongdouble'), ('bool_', 'bool'), ('bytes_', 'string'), ('string_', 'string'), ('unicode_', 'unicode'), ('object_', 'object')]
if (sys.version_info[0] >= 3):
type_pairs.extend([('str_', 'unicode')])
else:
type_pairs.extend([('str_', 'string')])
for (alias, t) in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
to_remove = ['ulong', 'object', 'int', 'float', 'complex', 'bool', 'string', 'datetime', 'timedelta']
if (sys.version_info[0] >= 3):
to_remove.extend(['bytes', 'str'])
else:
to_remove.extend(['unicode', 'long'])
for t in to_remove:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass |
_interact(expo=(lambda : slider((- 10), 10, 0.1, 2)), c_real=(lambda : slider((- 2), 2, 0.01, 0.5, label='real part const.')), c_imag=(lambda : slider((- 2), 2, 0.01, 0.5, label='imag part const.')), iterations=(lambda : slider(1, 100, 1, 20, label='# iterations')), zoom_x=(lambda : range_slider((- 2), 2, 0.01, ((- 1.5), 1.5), label='Zoom X')), zoom_y=(lambda : range_slider((- 2), 2, 0.01, ((- 1.5), 1.5), label='Zoom Y')), plot_points=(lambda : slider(20, 400, 20, default=150, label='plot points')), dpi=(lambda : slider(20, 200, 10, default=80, label='dpi')))
def julia(expo, c_real, c_imag, iterations, zoom_x, zoom_y, plot_points, dpi):
z = SR.var('z')
I = CDF.gen()
f = symbolic_expression((((z ** expo) + c_real) + (c_imag * I))).function(z)
ff_j = fast_callable(f, vars=[z], domain=CDF)
from sage.interacts.library_cython import julia
html('<h2>Julia Fractal</h2>')
html(('Recursive Formula: $z \\leftarrow z^{%.2f} + (%.2f+%.2f*\\mathbb{I})$' % (expo, c_real, c_imag)))
complex_plot((lambda z: julia(ff_j, z, iterations)), zoom_x, zoom_y, plot_points=plot_points, dpi=dpi).show(frame=True, aspect_ratio=1) |
def test_preload():
with corenlp.CoreNLPClient(server_id='test_server_start_preload') as client:
time.sleep(140)
results = annotate_and_time(client, EN_DOC)
compare_ignoring_whitespace(results['annotation'], EN_PRELOAD_GOLD)
assert ((results['end_time'] - results['start_time']) < 3) |
def augment_edit_distance(candidates_info):
(reverse_properties, relation_dr, relations, upper_types, types) = process_ontology('ontology/fb_roles', 'ontology/fb_types', 'ontology/reverse_properties')
matcher = SemanticMatcher(reverse_properties, relation_dr, relations, upper_types, types)
hit_chance = 0
ex_chance = 0
count = 0
augmented_lists = []
for (i, instance) in enumerate(candidates_info):
candidates = instance['candidates']
gt = instance['canonical_expr']
print(i, len(candidates))
aux_candidates = []
for c in candidates:
if (gt == 'null'):
ex = False
else:
ex = matcher.same_logical_form(gt, c)
aux_candidates.append({'logical_form': c, 'ex': ex})
is_covered = any([x['ex'] for x in aux_candidates])
hit_chance += is_covered
is_exact = any([(x['logical_form'] == gt) for x in aux_candidates])
ex_chance += is_exact
if (is_covered and (not is_exact)):
alter_targets = [x['logical_form'] for x in aux_candidates if x['ex']]
if (len(alter_targets) == 1):
target_expr = alter_targets[0]
else:
selected = pick_closest_target_expr(gt, alter_targets)
target_expr = selected
else:
target_expr = gt
instance['candidates'] = aux_candidates
instance['target_expr'] = target_expr
count += 1
augmented_lists.append(instance)
print('Coverage', hit_chance, count, (hit_chance / count))
print('Exact', ex_chance, count, (ex_chance / count))
return augmented_lists |
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=''):
super(_BNBase, self).__init__()
self.add_module((name + 'bn'), batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0) |
def nodes_leq_threshold_matching(graph: Graph, node_weight_function, edge_weight_function, L, uf: UnionFind, verbose=False, record_history=False, threshold=0):
prev_graph = Graph.from_other(graph)
uf2 = UnionFind(elements=graph._nodes.keys())
hd = ValueSortedDict({n: node_weight_function(n) for n in graph.non_input_nodes})
total_merged = 0
def inner_loop():
for (u, weight_of_u) in hd.items():
if (weight_of_u > threshold):
print(f'done with nodes <= threshold {threshold}, breaking (last weight: {weight_of_u}). merged {total_merged}')
return (False, None, True)
u: Node
for v in sorted(u.in_edges, key=(lambda n: node_weight_function(n))):
if (v in graph.inputs):
continue
if check_cycle2(graph, v, u):
continue
graph.merge(uid=v.id, vid=u.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(v.id, u.id)
uf2.union(v.id, u.id)
hd.pop(v)
hd.pop(u)
hd[v] = node_weight_function(v)
return (True, weight_of_u, False)
for v in sorted(u.out_edges, key=(lambda n: node_weight_function(n))):
if check_cycle2(graph, u, v):
continue
warnings.warn(f"can't merge small node {u} backward, will merge forward and lose the name of {v}.")
graph.merge(uid=u.id, vid=v.id, edge_weight_function=edge_weight_function, uf=uf)
uf.union(u.id, v.id)
uf2.union(u.id, v.id)
hd.pop(u)
hd.pop(v)
hd[u] = node_weight_function(u)
return (True, weight_of_u, False)
return (False, None, False)
history_sizes = []
history_weights = []
while (len(hd) > L):
(merged_something, weight_of_u, threshold_cond) = inner_loop()
if threshold_cond:
break
if (not merged_something):
break
total_merged += 1
if record_history:
history_sizes.append((len(hd) + 1))
history_weights.append(weight_of_u)
if verbose:
print(f'Nodes: {len(hd)}, Smallest: {weight_of_u}')
matching = None
return (prev_graph, matching, graph, uf, uf2) |
def generate(node, environment, name, filename, stream=None, defer_init=False, optimized=True):
if (not isinstance(node, nodes.Template)):
raise TypeError("Can't compile non template nodes")
generator = environment.code_generator_class(environment, name, filename, stream, defer_init, optimized)
generator.visit(node)
if (stream is None):
return generator.stream.getvalue() |
class DataTrainingArguments():
train_data_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
eval_data_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
line_by_line: bool = field(default=False, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'})
mlm: bool = field(default=False, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'})
mlm_probability: float = field(default=0.15, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'})
block_size: int = field(default=(- 1), metadata={'help': 'Optional input sequence length after tokenization.The training dataset will be truncated in block of this size for training.Default to the model max input length for single sentence inputs (take into account special tokens).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) |
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
gen_and_reprocess_nbest(args) |
def get_optimizer(cfg, model):
base_lr = cfg.TRAIN.OPTIMIZER.BASE_LR
params = []
for (name, p) in model.named_parameters():
if p.requires_grad:
params.append({'params': p})
if (cfg.TRAIN.OPTIMIZER.TYPE == 'SGD'):
optimizer = SGD_GC(params, lr=base_lr, momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM, weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY, nesterov=True)
elif (cfg.TRAIN.OPTIMIZER.TYPE == 'ADAM'):
optimizer = torch.optim.Adam(params, lr=base_lr, betas=(0.9, 0.999), weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY)
return optimizer |
class _ValgrindWrapper(object):
def __init__(self) -> None:
self._commands_available: Dict[(str, bool)] = {}
if torch._C._valgrind_supported_platform():
for cmd in ('valgrind', 'callgrind_control', 'callgrind_annotate'):
self._commands_available[cmd] = (not subprocess.run(['which', cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE).returncode)
self._build_type: Optional[str] = None
build_search = re.search('BUILD_TYPE=(.+),', torch.__config__.show())
if (build_search is not None):
self._build_type = build_search.groups()[0].split(',')[0]
self._baseline_cache: Dict[(Tuple[(int, int)], Tuple[(Tuple[(FunctionCount, ...)], Tuple[(FunctionCount, ...)])])] = {}
def _validate(self) -> None:
if (not torch._C._valgrind_supported_platform()):
raise OSError('Valgrind is not supported on this platform.')
missing_cmds = [cmd for (cmd, available) in self._commands_available.items() if (not available)]
if missing_cmds:
raise OSError(('Missing: ' + ', '.join(missing_cmds)))
def collect_callgrind(self, stmt: str, setup: str, number: int, num_threads: int, collect_baseline: bool) -> CallgrindStats:
self._validate()
baseline_inclusive_stats: Tuple[(FunctionCount, ...)] = ()
baseline_exclusive_stats: Tuple[(FunctionCount, ...)] = ()
if collect_baseline:
cache_key = (number, num_threads)
if (cache_key not in self._baseline_cache):
self._baseline_cache[cache_key] = self._invoke(stmt='pass', setup='pass', number=number, num_threads=num_threads)
(baseline_inclusive_stats, baseline_exclusive_stats) = self._baseline_cache[cache_key]
(stmt_inclusive_stats, stmt_exclusive_stats) = self._invoke(stmt=stmt, setup=setup, number=number, num_threads=num_threads)
return CallgrindStats(stmt=stmt, setup=setup, number_per_run=number, num_threads=num_threads, built_with_debug_symbols=(self._build_type == 'RelWithDebInfo'), baseline_inclusive_stats=baseline_inclusive_stats, baseline_exclusive_stats=baseline_exclusive_stats, stmt_inclusive_stats=stmt_inclusive_stats, stmt_exclusive_stats=stmt_exclusive_stats)
def _invoke(self, stmt: str, setup: str, number: int, num_threads: int) -> Tuple[(Tuple[(FunctionCount, ...)], Tuple[(FunctionCount, ...)])]:
working_dir = tempfile.mkdtemp()
script_file = os.path.join(working_dir, 'timer_callgrind.py')
callgrind_out = os.path.join(working_dir, 'callgrind.out')
error_log = os.path.join(working_dir, 'error.txt')
stat_log = os.path.join(working_dir, 'callgrind_stat.txt')
stdout_stderr_log = os.path.join(working_dir, 'stdout_stderr.log')
def run(args: List[str], **kwargs: Any) -> Tuple[(subprocess.CompletedProcess, str)]:
f_stdout_stderr = open(stdout_stderr_log, 'wb')
try:
invocation = subprocess.run(args, stdout=f_stdout_stderr, stderr=subprocess.STDOUT, **kwargs)
with open(stdout_stderr_log, 'rt') as f:
return (invocation, f.read())
finally:
f_stdout_stderr.close()
try:
with open(script_file, 'wt') as f:
f.write(self._construct_script(stmt=stmt, setup=setup, number=number, num_threads=num_threads, error_log=error_log, stat_log=stat_log))
(valgrind_invocation, valgrind_invocation_output) = run(['valgrind', '--tool=callgrind', f'--callgrind-out-file={callgrind_out}', '--dump-line=yes', '--dump-instr=yes', '--instr-atstart=yes', '--collect-atstart=no', 'python', script_file])
if valgrind_invocation.returncode:
error_report = ''
if os.path.exists(error_log):
with open(error_log, 'rt') as f:
error_report = f.read()
if (not error_report):
error_report = ('Unknown error.\n' + valgrind_invocation_output)
raise OSError(f'''Failed to collect callgrind profile:
{error_report}''')
def parse_output(inclusive: bool) -> Tuple[(FunctionCount, ...)]:
(annotate_invocation, annotate_invocation_output) = run(['callgrind_annotate', f"--inclusive={('yes' if inclusive else 'no')}", callgrind_out], check=True)
begin_collecting = False
fn_counts = []
for l in annotate_invocation_output.splitlines(keepends=False):
if ((not begin_collecting) and re.match('Ir\\s+file:function', l)):
begin_collecting = True
continue
count_match = re.match('^\\s*([0-9,]+)\\s+(.+:.+)$', l)
if count_match:
(ir_str, file_function) = count_match.groups()
ir = int(ir_str.replace(',', ''))
fn_counts.append(FunctionCount(ir, file_function))
continue
if (begin_collecting and re.match('-+', l)):
continue
begin_collecting = False
return tuple(fn_counts)
return (parse_output(inclusive=True), parse_output(inclusive=False))
finally:
shutil.rmtree(working_dir)
def _construct_script(stmt: str, setup: str, number: int, num_threads: int, error_log: str, stat_log: str) -> str:
block_size = 100
loop_count = (number // block_size)
remainder = (number - (block_size * loop_count))
blocked_stmt = ''
if loop_count:
unrolled_stmts = textwrap.indent('\n'.join(([stmt] * block_size)), (' ' * 4))
blocked_stmt += f'''for _ in range({loop_count}):
{unrolled_stmts}
'''
if remainder:
blocked_stmt += '\n'.join(([stmt] * remainder))
return textwrap.dedent('\n import gc\n import os\n import subprocess\n import sys\n import time\n\n import torch\n torch.set_num_threads({num_threads})\n\n PID = os.getpid()\n\n def log_failure(msg):\n with open({error_log_repr}, "wt") as f:\n f.write(msg)\n sys.exit(1)\n\n def check_result(completed_process):\n if completed_process.returncode:\n log_failure(f"Command failed: {{\' \'.join(completed_process.args)}}")\n return completed_process\n\n # \n # == Check that subprocess matches parent \n # \n if sys.executable != "{parent_interpreter}":\n log_failure(\n "Interpreter mismatch:\\n"\n f" {{sys.executable}}\\n vs.\\n {parent_interpreter}"\n )\n\n if torch.__file__ != "{torch_file}":\n log_failure(\n "PyTorch does not match expected file:\\n"\n f" {{torch.__file__}}\\n vs.\\n {torch_file}"\n )\n\n # \n # == User specified setup \n # \n {setup}\n\n for _ in range({warmup_number}):\n {indented_stmt}\n\n # \n # == Callgrind management \n # \n with open("{stat_log}", "wb") as stat_file:\n # If many instances of callgrind are running at once, the output of\n # `callgrind_control` may exceed 16kb which would cause `subprocess.PIPE`\n # to deadlock. So instead we use a file.\n callgrind_stat = check_result(subprocess.run(\n ["callgrind_control", "--stat"],\n stdout=stat_file,\n stderr=subprocess.STDOUT,\n ))\n\n with open("{stat_log}", "rt") as stat_file:\n stat_lines = stat_file.read().splitlines()\n\n if f"PID {{PID}}: python {{__file__}}" not in stat_lines:\n log_failure("Process does not appear to be running callgrind.")\n\n gc.collect()\n time.sleep(0.01)\n\n # \n # == User code block \n # \n torch._C._valgrind_toggle()\n {blocked_stmt}\n\n # Sleep is to allow the interpreter to catch up before we stop collecting in\n # order to reduce jitter.\n time.sleep(0.01)\n torch._C._valgrind_toggle()\n ').strip().format(indented_stmt=textwrap.indent(stmt, (' ' * 4)), blocked_stmt=blocked_stmt, number=number, setup=setup, warmup_number=min(number, 10), num_threads=num_threads, error_log_repr=repr(error_log), stat_log=stat_log, parent_interpreter=sys.executable, torch_file=torch.__file__) |
def main(device='cpu'):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = (experiment_dir / 'hyperparams.yaml')
data_folder = '../../samples/ASR'
data_folder = (experiment_dir / data_folder).resolve()
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
(train_data, valid_data) = data_prep(data_folder)
gan_brain = EnhanceGanBrain(modules=hparams['modules'], hparams=hparams, run_opts={'device': device})
gan_brain.fit(range(hparams['N_epochs']), train_data, valid_data, train_loader_kwargs=hparams['dataloader_options'], valid_loader_kwargs=hparams['dataloader_options'])
gan_brain.evaluate(valid_data)
assert (gan_brain.test_loss < 0.002) |
def Horn(n, k):
K = Simplex(n)
sigma = K.n_cells(n)[0]
L = K.subsimplicial_set((K.faces(sigma)[:k] + K.faces(sigma)[(k + 1):]))
L.rename('({}, {})-Horn'.format(n, k))
L.rename_latex('\\Lambda^{{{}}}_{{{}}}'.format(n, k))
return L |
def get_saved_model_type_and_estimator(datasource, model_name):
meta = Model.load_metadata_from_db(datasource, model_name)
return (meta.get_type(), meta.get_meta('class_name')) |
class BroadcastRowBench(BroadcastMulBench):
def __init__(self, mode, device, dtype, M, N, K):
super(BroadcastRowBench, self).__init__(mode, device, dtype, 'row', M, N, K)
def module():
return 'broadcast_row' |
_model
def tresnet_m(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['tresnet_m']
model = TResNet(layers=[3, 4, 11, 3], num_classes=num_classes, in_chans=in_chans, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def train(config):
if (config.train.seed is not None):
pl.seed_everything(config.train.seed, workers=True)
trainer = create_trainer(config)
model = SequenceLightningModule(config)
if (config.train.get('pretrained_model_path', None) is not None):
model = SequenceLightningModule.load_from_checkpoint(config.train.pretrained_model_path, config=config, strict=config.train.pretrained_model_strict_load)
print('Loaded pretrained model from', config.train.pretrained_model_path)
if config.train.get('ignore_pretrained_layers', False):
pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
for (k, v) in model_dict.items():
for ignore_layer in config.train.ignore_pretrained_layers:
if (ignore_layer in k):
pretrained_dict[k] = v
model.load_state_dict(pretrained_dict)
if config.train.get('pretrained_freeze_encoder', False):
for (name, param) in model.named_parameters():
if (not ('decoder' in name)):
param.requires_grad = False
if config.train.validate_at_start:
print('Running validation before training')
trainer.validate(model)
if (config.train.ckpt is not None):
trainer.fit(model, ckpt_path=config.train.ckpt)
else:
trainer.fit(model)
if config.train.test:
trainer.test(model) |
def test_model(clusters, other_clusters, model, device, topic_docs, is_event, epoch, topics_counter, topics_num, threshold, use_args_feats, use_binary_feats):
update_args_feature_vectors(clusters, other_clusters, model, device, is_event)
(cluster_pairs, _) = generate_cluster_pairs(clusters, is_train=False)
merge(clusters, cluster_pairs, other_clusters, model, device, topic_docs, epoch, topics_counter, topics_num, threshold, is_event, use_args_feats, use_binary_feats) |
_task('wsc')
class WSCTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='DIR', help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item')
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
if (args.bpe == 'gpt2'):
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
def load_dictionary(cls, filename):
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
def setup_task(cls, args, **kwargs):
assert (args.criterion == 'wsc'), 'Must set --criterion=wsc'
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=0, combine=False, data_path=None, return_only=False, **kwargs):
def binarize(s: str, append_eos: bool=False):
if (self.tokenizer is not None):
s = self.tokenizer.encode(s)
if (self.bpe is not None):
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(s, append_eos=append_eos, add_if_not_exist=False).long()
if (self.args.init_token is not None):
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if (data_path is None):
data_path = os.path.join(self.args.data, (split + '.jsonl'))
if (not os.path.exists(data_path)):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for (sentence, pronoun_span, query, label) in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[:pronoun_span.start].text
suffix = sentence[pronoun_span.end:].text_with_ws
leading_space = (' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else '')
trailing_space = (' ' if pronoun_span.text_with_ws.endswith(' ') else '')
cand_spans = wsc_utils.filter_noun_chunks(wsc_utils.extended_noun_chunks(sentence), exclude_pronouns=True, exclude_query=query, exact_match=False)
def binarize_with_mask(txt):
toks = binarize(((((prefix + leading_space) + txt) + trailing_space) + suffix), append_eos=True)
mask = torch.zeros_like(toks, dtype=torch.uint8)
mask_start = len(binarize(prefix))
mask_size = len(binarize((leading_space + txt)))
mask[mask_start:(mask_start + mask_size)] = 1
return (toks, mask)
if (query is not None):
(query_toks, query_mask) = binarize_with_mask(query)
query_len = len(query_toks)
else:
(query_toks, query_mask, query_len) = (None, None, 0)
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
(cand_toks, cand_masks) = ([], [])
for cand_span in cand_spans:
(toks, mask) = binarize_with_mask(cand_span.text)
cand_toks.append(toks)
cand_masks.append(mask)
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert (cand_toks.size() == cand_masks.size())
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, ([1] * len(labels)))
dataset = {'id': IdDataset(), 'query_tokens': query_tokens, 'query_masks': query_masks, 'candidate_tokens': candidate_tokens, 'candidate_masks': candidate_masks, 'labels': labels, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(query_tokens, reduce=True)}
nested_dataset = NestedDictionaryDataset(dataset, sizes=[query_lengths])
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(nested_dataset, sort_order=[shuffle])
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + '\n').encode('utf-8'))
dataset = self.load_dataset('disambiguate_pronoun', data_path=h.name, return_only=True)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
(logits, _) = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=(- 1), dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze((- 1))).squeeze((- 1))
mask = mask.type_as(scores)
scores = ((scores * mask).sum(dim=(- 1)) / mask.sum(dim=(- 1)))
return scores
cand_lprobs = get_lprobs(sample['candidate_tokens'][0], sample['candidate_masks'][0])
if (sample['query_tokens'][0] is not None):
query_lprobs = get_lprobs(sample['query_tokens'][0].unsqueeze(0), sample['query_masks'][0].unsqueeze(0))
return ((query_lprobs >= cand_lprobs).all().item() == 1)
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample['candidate_tokens'][0][best_idx]
mask = sample['candidate_masks'][0][best_idx]
toks = full_cand[mask]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
def source_dictionary(self):
return self.vocab
def target_dictionary(self):
return self.vocab |
def train_aug(img, mask):
(img, mask) = (np.array(img), np.array(mask))
aug = get_training_transform()(image=img.copy(), mask=mask.copy())
(img, mask) = (aug['image'], aug['mask'])
return (img, mask) |
_spec_function('summarization_xsum_sampled')
def get_xsum_sampled_summarization_spec(temperature: float=0.3, device: str='cpu') -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.summarization_scenario.SummarizationScenario', args={'dataset_name': 'xsum-sampled', 'sampling_min_length': 50, 'sampling_max_length': 150, 'doc_max_length': 512})
adapter_spec = get_summarization_adapter_spec(num_sents=1, max_tokens=64, temperature=temperature)
return RunSpec(name=f'summarization_xsum:temperature={temperature},device={device}', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=(get_summarization_metric_specs({'task': 'summarization_xsum_sampled', 'device': device}) + get_generative_harms_metric_specs()), groups=['summarization_xsum']) |
class Launcher(TmuxLauncher):
def common_options(self):
return [Options(dataroot='./datasets/grumpifycat', name='grumpifycat_CUT', CUT_mode='CUT'), Options(dataroot='./datasets/grumpifycat', name='grumpifycat_FastCUT', CUT_mode='FastCUT')]
def commands(self):
return [('python train.py ' + str(opt)) for opt in self.common_options()]
def test_commands(self):
return [('python test.py ' + str(opt.set(phase='train'))) for opt in self.common_options()] |
def get_split_counts(input_file_size_in_gb: float, num_training_splits: Optional[int], num_dev_splits: Optional[int], num_test_splits: Optional[int], dev_ratio: Optional[float], test_ratio: Optional[float]) -> Tuple[(int, int, int, int)]:
if ((num_training_splits is not None) and (num_test_splits is not None) and (num_dev_splits is not None)):
if (test_ratio is not None):
raise ValueError('you included the flag num_test_splits, so you can not specify the flag --test_ratio')
if (dev_ratio is not None):
raise ValueError('you included the flag num_dev_splits, so you can not specify the flag --dev_ratio')
train_count = num_training_splits
test_count = num_test_splits
dev_count = num_dev_splits
num_splits = ((train_count + test_count) + dev_count)
else:
if (num_test_splits is not None):
err_msg = 'You included the flag --num_test_splits, but did not include --num_dev_splits, or'
err_msg += ' --num_training_splits. If you want to use any of these flags, you must include all of them.'
raise ValueError(err_msg)
if (num_dev_splits is not None):
err_msg = 'You included the flag --num_dev_splits, but did not include --num_training_splits, '
err_msg += 'or --num_test_splits. If you want to use any of these flags, you must include all of them.'
raise ValueError(err_msg)
dev_ratio = (dev_ratio if (dev_ratio is not None) else 0.0)
test_ratio = (test_ratio if (test_ratio is not None) else 0.0)
if (num_training_splits is None):
if (input_file_size_in_gb < 10):
train_count = 32
elif (input_file_size_in_gb < 100):
train_count = 128
else:
train_count = 256
else:
train_count = num_training_splits
num_splits = int((train_count / ((1 - dev_ratio) - test_ratio)))
test_count = int((num_splits * test_ratio))
dev_count = ((num_splits - test_count) - train_count)
return (train_count, dev_count, test_count, num_splits) |
def duplicate_naming(A, B):
no = dace.define_local([number], dace.float32)
number = dace.define_local([W], dace.float32)
duplicate_naming_inner(A, number)
(_[0:W])
def bla2(i):
(inp << number[i])
(out >> B[i])
out = (2 * inp) |
def register_pascal_voc(name, dirname, split, year):
DatasetCatalog.register(name, (lambda : load_voc_instances(dirname, split)))
MetadataCatalog.get(name).set(thing_classes=CLASS_NAMES, dirname=dirname, year=year, split=split) |
class IDDecoder(Decoder):
def decode(self, trg_sentence):
return ' '.join(map(str, trg_sentence)) |
def restore_checkpoint(model, fname):
logger.debug('Restoring model {0}'.format(fname))
assert tf.train.checkpoint_exists(fname)
checkpointer = tf.train.Checkpoint(model=model)
status = checkpointer.restore(fname)
if (not tf.executing_eagerly()):
status.initialize_or_restore(tf.get_default_session()) |
class Gamma(Augmentation):
def __init__(self, gamma_range=(0.5, 1.5), p=1):
super().__init__(p=p)
self.gamma_range = gamma_range
def __repr__(self):
return f'Gamma(gamma_range={self.gamma_range}, p={self.p})'
def __call__(self, image, layer=None, mask=None, keypoints=None, bounding_boxes=None, force=False):
if (force or self.should_run()):
image = image.copy()
image = image.astype(np.uint8)
value = random.uniform(self.gamma_range[0], self.gamma_range[1])
invGamma = (1.0 / value)
table = np.array([(((i / 255.0) ** invGamma) * 255) for i in np.arange(0, 256)]).astype('uint8')
frame = cv2.LUT(image, table)
outputs_extra = []
if ((mask is not None) or (keypoints is not None) or (bounding_boxes is not None)):
outputs_extra = [mask, keypoints, bounding_boxes]
if outputs_extra:
return ([frame] + outputs_extra)
else:
return frame |
class Squeeze(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inp):
return inp.squeeze() |
def DP_calc(TPR, TNR):
try:
X = (TPR / (1 - TPR))
Y = (TNR / (1 - TNR))
return ((math.sqrt(3) / math.pi) * (math.log(X, 10) + math.log(Y, 10)))
except (ZeroDivisionError, TypeError, ValueError):
return 'None' |
def validate(val_loader, dataset, net, criterion, optim, scheduler, curr_epoch, writer, curr_iter, save_pth=True):
net.eval()
val_loss = AverageMeter()
iou_acc = 0
error_acc = 0
dump_images = []
for (val_idx, data) in enumerate(val_loader):
(inputs, seg_gts, ood_gts, img_names, _) = data
assert ((len(inputs.size()) == 4) and (len(seg_gts.size()) == 3))
assert (inputs.size()[2:] == seg_gts.size()[1:])
batch_pixel_size = ((inputs.size(0) * inputs.size(2)) * inputs.size(3))
inputs = inputs.cuda()
seg_gts_cuda = seg_gts.cuda()
with torch.no_grad():
(main_out, anomaly_score) = net(inputs)
del inputs
assert (main_out.size()[2:] == seg_gts.size()[1:])
assert (main_out.size()[1] == datasets.num_classes)
main_loss = criterion(main_out, seg_gts_cuda)
val_loss.update(main_loss.item(), batch_pixel_size)
del seg_gts_cuda
predictions = main_out.data.max(1)[1].cpu()
if ((val_idx % 20) == 0):
if (args.local_rank == 0):
logging.info('validating: %d / %d', (val_idx + 1), len(val_loader))
if ((val_idx > 10) and args.test_mode):
break
if (val_idx < 10):
dump_images.append([seg_gts, predictions, img_names])
iou_acc += fast_hist(predictions.numpy().flatten(), seg_gts.numpy().flatten(), datasets.num_classes)
del main_out, val_idx, data
iou_acc_tensor = torch.cuda.FloatTensor(iou_acc)
torch.distributed.all_reduce(iou_acc_tensor, op=torch.distributed.ReduceOp.SUM)
iou_acc = iou_acc_tensor.cpu().numpy()
if (args.local_rank == 0):
evaluate_eval(args, net, optim, scheduler, val_loss, iou_acc, dump_images, writer, curr_epoch, dataset, None, curr_iter, save_pth=save_pth)
return val_loss.avg |
def ste_round(x: tf.Tensor) -> tf.Tensor:
error = tf.stop_gradient((tf.math.round(x) - x))
return (error + x) |
_incremental_state
class FairseqIncrementalDecoder(FairseqDecoder):
def __init__(self, dictionary):
super().__init__(dictionary)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
raise NotImplementedError
def extract_features(self, prev_output_tokens, encoder_out=None, incremental_state=None, **kwargs):
raise NotImplementedError
def reorder_incremental_state(self, incremental_state: Dict[(str, Dict[(str, Optional[Tensor])])], new_order: Tensor):
pass
def reorder_incremental_state_scripting(self, incremental_state: Dict[(str, Dict[(str, Optional[Tensor])])], new_order: Tensor):
for module in self.modules():
if hasattr(module, 'reorder_incremental_state'):
result = module.reorder_incremental_state(incremental_state, new_order)
if (result is not None):
incremental_state = result
def set_beam_size(self, beam_size):
if (getattr(self, '_beam_size', (- 1)) != beam_size):
seen = set()
def apply_set_beam_size(module):
if ((module != self) and hasattr(module, 'set_beam_size') and (module not in seen)):
seen.add(module)
module.set_beam_size(beam_size)
self.apply(apply_set_beam_size)
self._beam_size = beam_size |
class KBoundedQuotientBasis(CombinatorialFreeModule):
def __init__(self, kBoundedRing, prefix):
CombinatorialFreeModule.__init__(self, kBoundedRing.base_ring(), kBoundedRing.indices(), category=KBoundedQuotientBases(kBoundedRing), prefix=('%s%d' % (prefix, kBoundedRing.k)))
self._kBoundedRing = kBoundedRing
self.k = kBoundedRing.k
self.t = kBoundedRing.t
self._kbounded_partitions = Partitions_all_bounded(kBoundedRing.k)
__getitem__ = raw_getattr(KBoundedQuotientBases.ParentMethods, '__getitem__')
_repr_term = raw_getattr(KBoundedQuotientBases.ParentMethods, '_repr_term')
_element_constructor_ = raw_getattr(KBoundedQuotientBases.ParentMethods, '_element_constructor_') |
def _get_time_macro_clause(node):
if ((node.construction == 'AND') and (node.fields[0].construction == 'JOIN') and (node.fields[0].fields[0].construction == 'SCHEMA') and ('time_macro' in node.fields[0].fields[0].val)):
return node.fields[0]
else:
for field in node.fields:
ret_val = _get_time_macro_clause(field)
if (ret_val is not None):
return ret_val
return None |
def decompose_by_diameter(a_tree, strategy, max_size=None, min_size=None, max_diam=None):
def __ini_record__():
for node in a_tree.postorder_node_iter():
__update_node__(node)
def __find_midpoint_edge__(tre):
u = tre.seed_node.bestLCA.anchor
uel = (u.edge_length if u.edge_length else 0)
d = 0
while ((d + uel) < (tre.seed_node.diameter / 2)):
d += uel
u = u.parent_node
uel = (u.edge_length if u.edge_length else 0)
return u.edge
def __find_centroid_edge__(tre):
u = tre.seed_node
product = 0
acc_nleaf = 0
while (not u.is_leaf()):
max_child = None
max_child_nleaf = 0
for ch in u.child_node_iter():
if (ch.nleaf > max_child_nleaf):
max_child_nleaf = ch.nleaf
max_child = ch
acc_nleaf += (u.nleaf - max_child.nleaf)
new_product = (max_child.nleaf * acc_nleaf)
if (new_product <= product):
break
product = new_product
u = max_child
return u.edge
def __bisect__(tre, edg):
u = edg.tail_node
v = edg.head_node
u.remove_child(v)
tr1 = Tree(seed_node=v)
if (u.num_child_nodes() == 1):
p = u.parent_node
v = u.child_nodes()[0]
l_v = (v.edge_length if v.edge_length else 0)
u.remove_child(v)
if (p is None):
tre.seed_node = v
return (tre, tr1)
l_u = (u.edge_length if u.edge_length else 0)
p.remove_child(u)
p.add_child(v)
v.edge_length = (l_u + l_v)
u = p
while (u is not None):
__update_node__(u)
u = u.parent_node
return (tre, tr1)
def __clean_up__(tre):
for node in tre.postorder_node_iter():
delattr(node, 'nleaf')
delattr(node, 'anchor')
delattr(node, 'maxdepth')
delattr(node, 'diameter')
delattr(node, 'bestLCA')
def __update_node__(node):
if node.is_leaf():
node.anchor = node
node.maxdepth = 0
node.diameter = 0
node.bestLCA = node
node.nleaf = 1
return
d1 = (- 1)
d2 = (- 1)
anchor1 = None
node.diameter = 0
node.bestLCA = None
node.nleaf = 0
for ch in node.child_node_iter():
node.nleaf += ch.nleaf
d = ((ch.maxdepth + ch.edge_length) if ch.edge_length else 0)
if (d > d1):
d2 = d1
d1 = d
anchor1 = ch.anchor
elif (d > d2):
d2 = d
if (ch.diameter > node.diameter):
node.diameter = ch.diameter
node.bestLCA = ch.bestLCA
node.maxdepth = d1
node.anchor = anchor1
if ((d1 + d2) > node.diameter):
node.diameter = (d1 + d2)
node.bestLCA = node
def __get_breaking_edge__(tre, edge_type):
if ((tre.seed_node.nleaf <= max_size) and (tre.seed_node.diameter <= max_diam)):
return None
if (edge_type == 'midpoint'):
ed = __find_midpoint_edge__(tre)
elif (edge_type == 'centroid'):
ed = __find_centroid_edge__(tre)
else:
_LOG.warning("Invalid decomposition type! Please use either 'midpoint' or 'centroid'")
return None
n = ed.head_node.nleaf
if ((n < min_size) or ((tre.seed_node.nleaf - n) < min_size)):
return None
return ed
def __check_stop__(tre):
return (((tre.seed_node.nleaf <= max_size) and (tre.seed_node.diameter <= max_diam)) or ((tre.seed_node.nleaf // 2) < min_size))
def __break_by_MP_centroid__(tre):
ed = __get_breaking_edge__(tre, 'midpoint')
if (ed is None):
ed = __get_breaking_edge__(tre, 'centroid')
return ed
def __break(tre):
if (strategy == 'centroid'):
return __get_breaking_edge__(tre, 'centroid')
elif (strategy == 'midpoint'):
return __break_by_MP_centroid__(tre)
else:
raise Exception(('strategy not valid: %s' % strategy))
tqueue = Queue()
_LOG.debug('Starting brlen decomposition ...')
__ini_record__()
min_size = (min_size if min_size else 0)
max_size = (max_size if max_size else a_tree.seed_node.nleaf)
max_diam = (max_diam if max_diam else a_tree.seed_node.diameter)
_LOG.debug(('Now breaking by %s with min %d and max %d sizes and diameter %f ...' % (strategy, min_size, max_size, max_diam)))
e = __break(a_tree)
if (e is None):
__clean_up__(a_tree)
return [a_tree]
tree_map = []
tqueue.put((a_tree, e))
while (not tqueue.empty()):
(t, e) = tqueue.get()
(t1, t2) = __bisect__(t, e)
e1 = __break(t1)
if (e1 is None):
__clean_up__(t1)
tree_map.append(t1)
else:
tqueue.put((t1, e1))
e2 = __break(t2)
if (e2 is None):
__clean_up__(t2)
tree_map.append(t2)
else:
tqueue.put((t2, e2))
return tree_map |
def test_energy_decrease():
a = np.zeros((3, 3))
a[(1, 1)] = 1.0
gaussian_a = gaussian(a, preserve_range=True, sigma=1, mode='reflect')
assert (gaussian_a.std() < a.std()) |
_cache
def load_schema(schema_name: str) -> dict[(str, Any)]:
path = get_schema_path(schema_name)
with open(path) as fd:
return load_yaml(fd) |
def attach_metadata_to_scalars(field, metadata):
for f in field.all_scalars():
f.set_metadata(metadata) |
def parse_version_info(version_str):
ver_info = []
for x in version_str.split('.'):
if x.isdigit():
ver_info.append(int(x))
elif (x.find('rc') != (- 1)):
patch_version = x.split('rc')
ver_info.append(int(patch_version[0]))
ver_info.append(f'rc{patch_version[1]}')
return tuple(ver_info) |
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--wait-time', type=float, default=1, help='the interval of show (s), 0 is block')
parser.add_argument('--device', default='cuda:0', help='Device used for inference')
parser.add_argument('--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args |
class ShowProgress():
def __init__(self, iterable, total, desc, silent, start_delay):
self.iter = iter(iterable)
self.start_time = time.time()
self.pbar = None
self.total = total
self.desc = desc
self.start_delay = start_delay
self.silent = silent
self.unshown_count = 0
def __next__(self):
if ((self.pbar is None) and ((time.time() - self.start_time) > self.start_delay)):
self.pbar = tqdm.tqdm(total=self.total, initial=self.unshown_count, desc=self.desc, disable=self.silent)
self.pbar.start_t = self.start_time
if (self.pbar is not None):
self.pbar.update(1)
else:
self.unshown_count += 1
try:
return next(self.iter)
except StopIteration as e:
if (self.pbar is not None):
self.pbar.close()
raise e
def __iter__(self):
return self |
def pretokenize(in_path: str, out_path: str, src: str, tgt: str):
Args = namedtuple('Args', ['moses_source_lang', 'moses_target_lang', 'moses_no_dash_splits', 'moses_no_escape'])
args = Args(moses_source_lang=src, moses_target_lang=tgt, moses_no_dash_splits=False, moses_no_escape=False)
pretokenizer = MosesTokenizer(args)
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
f_o.write((pretokenizer.encode(s.strip()) + '\n')) |
class FeatureExtractor(nn.Sequential):
def __init__(self, in_channel, out_channel, ker_size, padding, stride, num_blocks=2, return_linear=False):
super(FeatureExtractor, self).__init__()
(self.add_module('conv_block_0', ConvBlock2DSN(in_channel, out_channel, ker_size, padding, stride)),)
for i in range((num_blocks - 1)):
self.add_module('conv_block_{}'.format((i + 1)), ConvBlock2DSN(out_channel, out_channel, ker_size, padding, stride))
if return_linear:
self.add_module('conv_block_{}'.format(num_blocks), ConvBlock2DSN(out_channel, out_channel, ker_size, padding, stride, bn=False, act=None))
else:
self.add_module('conv_block_{}'.format(num_blocks), ConvBlock2DSN(out_channel, out_channel, ker_size, padding, stride)) |
def main():
args = ArgParser().parse_args()
config = load_model_config(os.path.join(args.model_path, 'config.json'))
emap_file = args.entity_mfile
rmap_file = args.rel_mfile
data_files = args.data_files
if (args.format == 'h_r_t'):
if args.raw_data:
assert (emap_file is not None), 'When using RAW ID through --raw_data, entity_mfile should be provided.'
assert (rmap_file is not None), 'When using RAW ID through --raw_data, rel_mfile should be provided.'
assert (len(data_files) == 3), 'When using h_r_t, head.list, rel.list and tail.list should be provided.'
(head, rel, tail, id2e_map, id2r_map) = load_raw_triplet_data(head_f=data_files[0], rel_f=data_files[1], tail_f=data_files[2], emap_f=emap_file, rmap_f=rmap_file)
else:
(head, rel, tail) = load_triplet_data(head_f=data_files[0], rel_f=data_files[1], tail_f=data_files[2])
elif (args.format == 'h_r_*'):
if args.raw_data:
assert (emap_file is not None), 'When using RAW ID through --raw_data, entity_mfile should be provided.'
assert (rmap_file is not None), 'When using RAW ID through --raw_data, rel_mfile should be provided.'
assert (len(data_files) == 2), 'When using h_r_*, head.list and rel.list should be provided.'
(head, rel, tail, id2e_map, id2r_map) = load_raw_triplet_data(head_f=data_files[0], rel_f=data_files[1], tail_f=None, emap_f=emap_file, rmap_f=rmap_file)
else:
(head, rel, tail) = load_triplet_data(head_f=data_files[0], rel_f=data_files[1], tail_f=None)
elif (args.format == 'h_*_t'):
if args.raw_data:
assert (emap_file is not None), 'When using RAW ID through --raw_data, entity_mfile should be provided.'
assert (rmap_file is not None), 'When using RAW ID through --raw_data, rel_mfile should be provided.'
assert (len(data_files) == 2), 'When using h_*_t, head.list and tail.list should be provided.'
(head, rel, tail, id2e_map, id2r_map) = load_raw_triplet_data(head_f=data_files[0], rel_f=None, tail_f=data_files[1], emap_f=emap_file, rmap_f=rmap_file)
else:
(head, rel, tail) = load_triplet_data(head_f=data_files[0], rel_f=None, tail_f=data_files[1])
elif (args.format == '*_r_t'):
if args.raw_data:
assert (emap_file is not None), 'When using RAW ID through --raw_data, entity_mfile should be provided.'
assert (rmap_file is not None), 'When using RAW ID through --raw_data, rel_mfile should be provided.'
assert (len(data_files) == 2), 'When using *_r_t rel.list and tail.list should be provided.'
(head, rel, tail, id2e_map, id2r_map) = load_raw_triplet_data(head_f=None, rel_f=data_files[0], tail_f=data_files[1], emap_f=emap_file, rmap_f=rmap_file)
else:
(head, rel, tail) = load_triplet_data(head_f=None, rel_f=data_files[0], tail_f=data_files[1])
elif (args.format == 'h_*_*'):
if args.raw_data:
assert (emap_file is not None), 'When using RAW ID through --raw_data, entity_mfile should be provided.'
assert (rmap_file is not None), 'When using RAW ID through --raw_data, rel_mfile should be provided.'
assert (len(data_files) == 1), 'When using h_*_*, only head.list should be provided.'
(head, rel, tail, id2e_map, id2r_map) = load_raw_triplet_data(head_f=data_files[0], rel_f=None, tail_f=None, emap_f=emap_file, rmap_f=rmap_file)
else:
(head, rel, tail) = load_triplet_data(head_f=data_files[0], rel_f=None, tail_f=None)
elif (args.format == '*_r_*'):
if args.raw_data:
assert (emap_file is not None), 'When using RAW ID through --raw_data, entity_mfile should be provided.'
assert (rmap_file is not None), 'When using RAW ID through --raw_data, rel_mfile should be provided.'
assert (len(data_files) == 1), 'When using *_r_*, only rel.list should be provided.'
(head, rel, tail, id2e_map, id2r_map) = load_raw_triplet_data(head_f=None, rel_f=data_files[0], tail_f=None, emap_f=emap_file, rmap_f=rmap_file)
else:
(head, rel, tail) = load_triplet_data(head_f=None, rel_f=data_files[0], tail_f=None)
elif (args.format == '*_*_t'):
if args.raw_data:
assert (emap_file is not None), 'When using RAW ID through --raw_data, entity_mfile should be provided.'
assert (rmap_file is not None), 'When using RAW ID through --raw_data, rel_mfile should be provided.'
assert (len(data_files) == 1), 'When using *_*_t, only tail.list should be provided.'
(head, rel, tail, id2e_map, id2r_map) = load_raw_triplet_data(head_f=None, rel_f=None, tail_f=data_files[0], emap_f=emap_file, rmap_f=rmap_file)
else:
(head, rel, tail) = load_triplet_data(head_f=None, rel_f=None, tail_f=data_files[0])
else:
assert False, 'Unsupported format {}'.format(args.format)
model = ScoreInfer(args.gpu, config, args.model_path, args.score_func)
model.load_model()
result = model.topK(head, rel, tail, args.exec_mode, args.topK)
with open(args.output, 'w+') as f:
f.write('head\trel\ttail\tscore\n')
for res in result:
(hl, rl, tl, sl) = res
hl = hl.tolist()
rl = rl.tolist()
tl = tl.tolist()
sl = sl.tolist()
for (h, r, t, s) in zip(hl, rl, tl, sl):
if args.raw_data:
h = id2e_map[h]
r = id2r_map[r]
t = id2e_map[t]
f.write('{}\t{}\t{}\t{}\n'.format(h, r, t, s))
print('Inference Done')
print('The result is saved in {}'.format(args.output)) |
.qhsri
def test_pareto_sample_diverse_subset_raises_too_large_sample_size() -> None:
observations = tf.constant([[1.0, (- 1.0)], [(- 1.0), 1.0]])
pareto_set = Pareto(observations)
with pytest.raises(ValueError):
pareto_set.sample_diverse_subset(3, allow_repeats=False) |
class read_port():
def __init__(self):
self.latency = 1
def set_params(self, latency):
self.latency = latency
def get_latency(self):
return self.latency
def service_reads(self, incoming_requests_arr_np, incoming_cycles_arr):
out_cycles_arr = (incoming_cycles_arr + self.latency)
return out_cycles_arr |
def writeEdgesAndLabels(edges, labels, output_file):
with codecs.open(output_file, 'w', 'utf-8') as outfile:
for (edge, label) in itertools.izip(edges, labels):
outfile.write(('%s\t%s\n' % (label, edge)))
return |
def get_knowledge_fn():
gkf = GraphKnowledgeHessFunc(total_feature_num=4)
adjacency = np.zeros((4, 4))
adjacency[(0, 1)] = adjacency[(1, 0)] = 3.0
adjacency[(2, 3)] = adjacency[(3, 2)] = (- 2.0)
(intr_idx, intr_eff) = gkf.convert_adjacency_to_knowledge(adjacency)
gkf.knowledge_encoder(intr_idx, intr_eff)
return gkf |
def is_pythran_buffer(type_):
return (type_.is_numpy_buffer and is_pythran_supported_dtype(type_.dtype) and (type_.mode in ('c', 'strided')) and (not type_.cast)) |
def get_belief_openaigpt(sent):
if ('< | belief | >' in sent):
tmp = sent.strip(' ').split('< | belief | >')[(- 1)].split('< | action | >')[0]
else:
return []
tmp = tmp.strip(' .,')
tmp = tmp.replace('< | endofbelief | >', '')
tmp = tmp.replace('< | endoftext | >', '')
belief = tmp.split(',')
new_belief = []
for bs in belief:
bs = bs.strip(' .,')
if (bs not in new_belief):
new_belief.append(bs)
return new_belief |
class Partition6(nn.Module):
LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[17]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[17]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[18]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[18]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[18]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[19]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[19]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[19]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[20]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[20]/Attention[attn]/Linear[qkv]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:6'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1]
self.lookup = {'l_0': 'blocks.17.attn.proj', 'l_1': 'blocks.17.attn.proj_drop', 'l_2': 'blocks.17.drop_path', 'l_3': 'blocks.17.norm2', 'l_4': 'blocks.17.mlp.fc1', 'l_5': 'blocks.17.mlp.act', 'l_6': 'blocks.17.mlp.drop', 'l_7': 'blocks.17.mlp.fc2', 'l_8': 'blocks.17.mlp.drop', 'l_9': 'blocks.17.drop_path', 'l_10': 'blocks.18.norm1', 'l_11': 'blocks.18.attn.qkv', 'l_12': 'blocks.18.attn.attn_drop', 'l_13': 'blocks.18.attn.proj', 'l_14': 'blocks.18.attn.proj_drop', 'l_15': 'blocks.18.drop_path', 'l_16': 'blocks.18.norm2', 'l_17': 'blocks.18.mlp.fc1', 'l_18': 'blocks.18.mlp.act', 'l_19': 'blocks.18.mlp.drop', 'l_20': 'blocks.18.mlp.fc2', 'l_21': 'blocks.18.mlp.drop', 'l_22': 'blocks.18.drop_path', 'l_23': 'blocks.19.norm1', 'l_24': 'blocks.19.attn.qkv', 'l_25': 'blocks.19.attn.attn_drop', 'l_26': 'blocks.19.attn.proj', 'l_27': 'blocks.19.attn.proj_drop', 'l_28': 'blocks.19.drop_path', 'l_29': 'blocks.19.norm2', 'l_30': 'blocks.19.mlp.fc1', 'l_31': 'blocks.19.mlp.act', 'l_32': 'blocks.19.mlp.drop', 'l_33': 'blocks.19.mlp.fc2', 'l_34': 'blocks.19.mlp.drop', 'l_35': 'blocks.19.drop_path', 'l_36': 'blocks.20.norm1', 'l_37': 'blocks.20.attn.qkv'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4) = unflatten(args, self.input_structure)
t_0 = x4.transpose(1, 2)
t_0 = t_0.reshape(x1, x2, x3)
t_0 = self.l_0(t_0)
t_0 = self.l_1(t_0)
t_0 = self.l_2(t_0)
t_0 = (x0 + t_0)
t_1 = self.l_3(t_0)
t_1 = self.l_4(t_1)
t_1 = self.l_5(t_1)
t_1 = self.l_6(t_1)
t_1 = self.l_7(t_1)
t_1 = self.l_8(t_1)
t_1 = self.l_9(t_1)
t_1 = (t_0 + t_1)
t_0 = self.l_10(t_1)
t_2 = t_0.shape
t_3 = t_2[0]
t_4 = t_2[1]
t_2 = t_2[2]
t_0 = self.l_11(t_0)
t_5 = (t_2 // 16)
t_5 = t_0.reshape(t_3, t_4, 3, 16, t_5)
t_5 = t_5.permute(2, 0, 3, 1, 4)
t_0 = t_5[0]
t_6 = t_5[1]
t_5 = t_5[2]
t_6 = t_6.transpose((- 2), (- 1))
t_6 = (t_0 t_6)
t_6 = (t_6 * 0.125)
t_6 = t_6.softmax(dim=(- 1))
t_6 = self.l_12(t_6)
t_5 = (t_6 t_5)
t_5 = t_5.transpose(1, 2)
t_2 = t_5.reshape(t_3, t_4, t_2)
t_2 = self.l_13(t_2)
t_2 = self.l_14(t_2)
t_2 = self.l_15(t_2)
t_2 = (t_1 + t_2)
t_1 = self.l_16(t_2)
t_1 = self.l_17(t_1)
t_1 = self.l_18(t_1)
t_1 = self.l_19(t_1)
t_1 = self.l_20(t_1)
t_1 = self.l_21(t_1)
t_1 = self.l_22(t_1)
t_1 = (t_2 + t_1)
t_2 = self.l_23(t_1)
t_4 = t_2.shape
t_3 = t_4[0]
t_5 = t_4[1]
t_4 = t_4[2]
t_2 = self.l_24(t_2)
t_6 = (t_4 // 16)
t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6)
t_6 = t_6.permute(2, 0, 3, 1, 4)
t_2 = t_6[0]
t_0 = t_6[1]
t_6 = t_6[2]
t_0 = t_0.transpose((- 2), (- 1))
t_0 = (t_2 t_0)
t_0 = (t_0 * 0.125)
t_0 = t_0.softmax(dim=(- 1))
t_0 = self.l_25(t_0)
t_6 = (t_0 t_6)
t_6 = t_6.transpose(1, 2)
t_4 = t_6.reshape(t_3, t_5, t_4)
t_4 = self.l_26(t_4)
t_4 = self.l_27(t_4)
t_4 = self.l_28(t_4)
t_4 = (t_1 + t_4)
t_1 = self.l_29(t_4)
t_1 = self.l_30(t_1)
t_1 = self.l_31(t_1)
t_1 = self.l_32(t_1)
t_1 = self.l_33(t_1)
t_1 = self.l_34(t_1)
t_1 = self.l_35(t_1)
t_1 = (t_4 + t_1)
t_4 = self.l_36(t_1)
t_5 = t_4.shape
t_4 = self.l_37(t_4)
return list(flatten((t_1, t_5, t_4)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def make_schema(schema_name: str='simple_swagger.yaml', **kwargs: Any) -> dict[(str, Any)]:
schema = load_schema(schema_name)
return merge_recursively(kwargs, schema) |
def get_keys_to_not_convert(model):
tied_model = deepcopy(model)
tied_model.tie_weights()
tied_params = find_tied_parameters(tied_model)
if isinstance(tied_params, dict):
tied_keys = list(tied_params.values())
else:
tied_keys = sum([x[1:] for x in tied_params], [])
has_tied_params = (len(tied_keys) > 0)
is_base_model = (not hasattr(model, model.base_model_prefix))
if ((not has_tied_params) and is_base_model):
return []
list_modules = list(model.named_parameters())
list_last_module = [list_modules[(- 1)][0]]
intersection = (set(list_last_module) - set(tied_keys))
list_untouched = (tied_keys + list(intersection))
names_to_remove = ['.weight', '.bias']
filtered_module_names = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if (name_to_remove in name):
name = name.replace(name_to_remove, '')
filtered_module_names.append(name)
return filtered_module_names |
def parse_args():
parser = argparse.ArgumentParser(description='Extract DeepSpeech features from audio file', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', type=str, required=True, help='path to input audio file or directory')
parser.add_argument('--output', type=str, help='path to output file with DeepSpeech features')
parser.add_argument('--deepspeech', type=str, help='path to DeepSpeech 0.1.0 frozen model')
parser.add_argument('--metainfo', type=str, help='path to file with meta-information')
args = parser.parse_args()
return args |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
if (stride == 2):
kplanes = planes
self.bottleneck_shared = BottleneckShared(inplanes)
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Sequential(BottleneckLIP(planes), conv1x1(planes, planes))
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, (planes * self.expansion))
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
else:
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, (planes * self.expansion))
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def init_layer(self):
self.bn3.weight.data.zero_()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if (self.stride == 2):
shared = self.bottleneck_shared(x)
out = self.conv2[0].forward_with_shared(out, shared)
for layer in self.conv2[1:]:
out = layer(out)
else:
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
if (self.stride == 1):
residual = self.downsample(x)
else:
residual = self.downsample[0].forward_with_shared(x, shared)
for layer in self.downsample[1:]:
residual = layer(residual)
out += residual
out = self.relu(out)
return out |
class BatchSampler(BaseSampler):
def __init__(self, algo):
self.algo = algo
def start_worker(self):
parallel_sampler.populate_task(self.algo.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(policy_params=cur_params, max_samples=self.algo.batch_size, include_original_frames=True, max_path_length=self.algo.max_path_length, scope=self.algo.scope)
if self.algo.whole_paths:
return paths
else:
paths_truncated = parallel_sampler.truncate_paths(paths, self.algo.batch_size)
return paths_truncated |
class Fringe(object):
def __init__(self, grid):
self.fringe_list = []
self.distribution = []
self.grid = grid
def add(self, item):
if (item not in self.fringe_list):
self.fringe_list.append(item)
self.update_probs()
def pop(self):
assert (len(self.fringe_list) > 0)
choice_idx = np.random.choice(len(self.fringe_list), p=self.distribution)
removed_pos = self.fringe_list.pop(choice_idx)
self.update_probs()
return removed_pos
def update_probs(self):
self.distribution = (np.ones(len(self.fringe_list)) / len(self.fringe_list)) |
class TestFCLTransformConversion(unittest.TestCase):
def test_from_SE3(self):
M = pin.SE3.Random()
fcl_transform = pin.hppfcl.Transform3f(M)
self.assertTrue((M.rotation == fcl_transform.getRotation()).all())
self.assertTrue((M.translation == fcl_transform.getTranslation()).all())
def test_to_SE3(self):
fcl_transform = pin.hppfcl.Transform3f()
M = pin.SE3(fcl_transform)
self.assertTrue(M.isIdentity()) |
def format_result(r):
repr_str = repr(r)
if ('\n' in repr_str):
repr_str = repr(repr_str)
if (len(repr_str) > resultlimit):
repr_str = (repr_str[:resultlimit] + ' ...')
result = ('<%s 0x%x> (%s)' % (type(r).__name__, id(r), repr_str))
return result |
def ResBody10(net, from_layer, num_output, expend, eps=0.001):
kwargs = {'param': [dict(lr_mult=1, decay_mult=1)], 'weight_filler': dict(type='gaussian', std=0.01), 'bias_term': False}
bn_kwargs = {'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)], 'eps': eps}
sb_kwargs = {'bias_term': True, 'param': [dict(lr_mult=1, decay_mult=0), dict(lr_mult=1, decay_mult=0)], 'filler': dict(type='constant', value=1.0), 'bias_filler': dict(type='constant', value=0.0)}
stride = 1
if expend:
conv_expand_name = 'layer_{}_1_conv_expand'.format(num_output)
stride = 2
net[conv_expand_name] = L.Convolution(net[from_layer], num_output=num_output, kernel_size=1, pad=0, stride=stride, **kwargs)
branch2 = conv_expand_name
else:
branch2 = from_layer
conv_name = 'layer_{}_1_conv1'.format(num_output)
net[conv_name] = L.Convolution(net[from_layer], num_output=num_output, kernel_size=3, pad=1, stride=stride, **kwargs)
bn_name = 'layer_{}_1_bn2'.format(num_output)
net[bn_name] = L.BatchNorm(net[conv_name], in_place=True, **bn_kwargs)
sb_name = 'layer_{}_1_scale2'.format(num_output)
net[sb_name] = L.Scale(net[bn_name], in_place=True, **sb_kwargs)
relu_name = 'layer_{}_1_relu2'.format(num_output)
net[relu_name] = L.ReLU(net[sb_name], in_place=True)
conv_name = 'layer_{}_1_conv2'.format(num_output)
net[conv_name] = L.Convolution(net[relu_name], num_output=num_output, kernel_size=3, pad=1, stride=1, **kwargs)
sum_name = 'layer_{}_1_sum'.format(num_output)
net[sum_name] = L.Eltwise(net[conv_name], net[branch2])
if (num_output == 512):
bn_name = 'last_bn'
net[bn_name] = L.BatchNorm(net[sum_name], in_place=True, **bn_kwargs)
sb_name = 'last_scale'.format(num_output)
net[sb_name] = L.Scale(net[bn_name], in_place=True, **sb_kwargs)
relu_name = 'last_relu'.format(num_output)
net[relu_name] = L.ReLU(net[sb_name], in_place=True)
else:
bn_name = 'layer_{}_1_bn1'.format((2 * num_output))
net[bn_name] = L.BatchNorm(net[sum_name], in_place=False, **bn_kwargs)
sb_name = 'layer_{}_1_scale1'.format((2 * num_output))
net[sb_name] = L.Scale(net[bn_name], in_place=True, **sb_kwargs)
relu_name = 'layer_{}_1_relu1'.format((2 * num_output))
net[relu_name] = L.ReLU(net[sb_name], in_place=True) |
def load_model(model_path='', mode='all', scene_hop=5000, **kwds):
model = get_basic_model(mode=mode, scene_hop=scene_hop, **kwds)
return model |
class _SubsampleMetaSplitter():
def __init__(self, *, base_cv, fraction, subsample_test, random_state):
self.base_cv = base_cv
self.fraction = fraction
self.subsample_test = subsample_test
self.random_state = random_state
def split(self, X, y, **kwargs):
for (train_idx, test_idx) in self.base_cv.split(X, y, **kwargs):
train_idx = resample(train_idx, replace=False, random_state=self.random_state, n_samples=int((self.fraction * len(train_idx))))
if self.subsample_test:
test_idx = resample(test_idx, replace=False, random_state=self.random_state, n_samples=int((self.fraction * len(test_idx))))
(yield (train_idx, test_idx)) |
def get_glad_result_names(result_type):
if (result_type == 'batch'):
return ['loda_glad', 'loda', 'loda_aad']
else:
raise ValueError(('Invalid result type: %s' % result_type)) |
class ExplainerBase(metaclass=ExplainerABCMeta):
def __init__(self):
pass
def explain(self, **kwargs):
raise NotImplementedError
def explanation_type(self):
return 'local'
def __getstate__(self):
return {k: deepcopy(v) for (k, v) in self.__dict__.items()}
def __setstate__(self, state):
for (name, value) in state.items():
setattr(self, name, value)
def save(self, directory: str, filename: str=None, **kwargs):
os.makedirs(directory, exist_ok=True)
if (filename is None):
filename = f'{type(self).__name__}.pkl'
state = self.__getstate__()
if ('ignored_attributes' in kwargs):
for attr in kwargs['ignored_attributes']:
state.pop(attr, None)
with open(os.path.join(directory, filename), 'wb') as f:
dill.dump(state, f)
def load(cls, directory: str, filename: str=None, **kwargs):
if (filename is None):
filename = f'{cls.__name__}.pkl'
with open(os.path.join(directory, filename), 'rb') as f:
state = dill.load(f)
self = super(ExplainerBase, cls).__new__(cls)
self.__setstate__(state)
return self |
class Dialog(object):
def __init__(self, agents, args):
assert (len(agents) == 2)
self.agents = agents
self.args = args
self.domain = domain.get_domain(args.domain)
self.metrics = MetricsContainer()
self._register_metrics()
self.reward_func = args.reward
def _register_metrics(self):
self.metrics.register_average('dialog_len')
self.metrics.register_average('sent_len')
self.metrics.register_percentage('agree')
self.metrics.register_average('advantage')
self.metrics.register_time('time')
self.metrics.register_average('comb_rew')
for agent in self.agents:
self.metrics.register_average(('%s_rew' % agent.name))
self.metrics.register_percentage(('%s_sel' % agent.name))
self.metrics.register_uniqueness(('%s_unique' % agent.name))
ref_text = ' '.join(data.read_lines(self.args.ref_text))
self.metrics.register_ngram('full_match', text=ref_text)
def _is_selection(self, out):
return ((len(out) == 1) and (out[0] == '<selection>'))
def show_metrics(self):
return ' '.join([('%s=%s' % (k, v)) for (k, v) in self.metrics.dict().items()])
def run(self, ctxs, logger):
assert (len(self.agents) == len(ctxs))
for (agent, ctx) in zip(self.agents, ctxs):
print('feed_context:', ctx, type(ctx[0]))
agent.feed_context(ctx)
logger.dump_ctx(agent.name, ctx)
logger.dump(('-' * 80))
if (np.random.rand() < 0.5):
(writer, reader) = self.agents
else:
(reader, writer) = self.agents
conv = []
self.metrics.reset()
num_utterances = 0
while True:
out = writer.write()
num_utterances += 1
self.metrics.record('sent_len', len(out))
self.metrics.record('full_match', out)
self.metrics.record(('%s_unique' % writer.name), out)
conv.append(out)
reader.read(out)
if (not writer.human):
logger.dump_sent(writer.name, out)
if self._is_selection(out):
self.metrics.record(('%s_sel' % writer.name), 1)
self.metrics.record(('%s_sel' % reader.name), 0)
break
(writer, reader) = (reader, writer)
choices = []
for agent in self.agents:
choice = agent.choose()
print(agent.name, 'choice:', choice)
choices.append(choice)
logger.dump_choice(agent.name, choice[:(self.domain.selection_length() // 2)])
(agree, rewards) = self.domain.score_choices(choices, ctxs)
if (not agree):
rewards = [(- 1.0) for _ in rewards]
elif (self.reward_func == 'margin'):
rewards = rewards
elif (self.reward_func == 'fair'):
diff = (abs((rewards[0] - rewards[1])) * (- 0.1))
rewards = [diff for _ in rewards]
elif (self.reward_func == 'length'):
rewards = [float(num_utterances) for _ in rewards]
logger.dump(('-' * 80))
logger.dump_agreement(agree)
for (agent, reward) in zip(self.agents, rewards):
logger.dump_reward(agent.name, agree, reward)
logging.debug(('%s : %s : %s' % (str(agent.name), str(agree), str(rewards))))
agent.update(agree, reward)
if agree:
self.metrics.record('advantage', (rewards[0] - rewards[1]))
self.metrics.record('time')
self.metrics.record('dialog_len', len(conv))
self.metrics.record('agree', int(agree))
self.metrics.record('comb_rew', np.sum(rewards))
for (agent, reward) in zip(self.agents, rewards):
self.metrics.record(('%s_rew' % agent.name), reward)
logger.dump(('-' * 80))
logger.dump(self.show_metrics())
logger.dump(('-' * 80))
for (ctx, choice) in zip(ctxs, choices):
logger.dump(('debug: %s %s' % (' '.join(ctx), ' '.join(choice))))
return (conv, agree, rewards) |
_module()
class TensorRTRecognizer(EncodeDecodeRecognizer):
def __init__(self, trt_file: str, cfg: Any, device_id: int, show_score: bool=False):
if ('type' in cfg.model):
cfg.model.pop('type')
EncodeDecodeRecognizer.__init__(self, **cfg.model)
from mmcv.tensorrt import TRTWrapper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, you may have to build mmcv with TensorRT from source.')
model = TRTWrapper(trt_file, input_names=['input'], output_names=['output'])
self.model = model
self.device_id = device_id
self.cfg = cfg
def forward_train(self, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def aug_test(self, imgs, img_metas, **kwargs):
if isinstance(imgs, list):
for (idx, each_img) in enumerate(imgs):
if (each_img.dim() == 3):
imgs[idx] = each_img.unsqueeze(0)
imgs = imgs[0]
img_metas = img_metas[0]
elif ((len(img_metas) == 1) and isinstance(img_metas[0], list)):
img_metas = img_metas[0]
return self.simple_test(imgs, img_metas=img_metas)
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def simple_test(self, img: torch.Tensor, img_metas: Iterable, rescale: bool=False):
with torch.cuda.device(self.device_id), torch.no_grad():
trt_pred = self.model({'input': img})['output']
(label_indexes, label_scores) = self.label_convertor.tensor2idx(trt_pred, img_metas)
label_strings = self.label_convertor.idx2str(label_indexes)
results = []
for (string, score) in zip(label_strings, label_scores):
results.append(dict(text=string, score=score))
return results |
class Scheduler():
def __init__(self, optimizer: torch.optim.Optimizer, param_group_field: str, noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize: bool=True) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f'initial_{param_group_field}'
if initialize:
for (i, group) in enumerate(self.optimizer.param_groups):
if (param_group_field not in group):
raise KeyError(f'{param_group_field} missing from param_groups[{i}]')
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for (i, group) in enumerate(self.optimizer.param_groups):
if (self._initial_param_group_field not in group):
raise KeyError(f'{self._initial_param_group_field} missing from param_groups[{i}]')
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = (noise_seed if (noise_seed is not None) else 42)
self.update_groups(self.base_values)
def state_dict(self) -> Dict[(str, Any)]:
return {key: value for (key, value) in self.__dict__.items() if (key != 'optimizer')}
def load_state_dict(self, state_dict: Dict[(str, Any)]) -> None:
self.__dict__.update(state_dict)
def get_epoch_values(self, epoch: int):
return None
def get_update_values(self, num_updates: int):
return None
def step(self, epoch: int, metric: float=None) -> None:
self.metric = metric
values = self.get_epoch_values(epoch)
if (values is not None):
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float=None):
self.metric = metric
values = self.get_update_values(num_updates)
if (values is not None):
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if (not isinstance(values, (list, tuple))):
values = ([values] * len(self.optimizer.param_groups))
for (param_group, value) in zip(self.optimizer.param_groups, values):
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if (self.noise_range_t is not None):
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = (self.noise_range_t[0] <= t < self.noise_range_t[1])
else:
apply_noise = (t >= self.noise_range_t)
if apply_noise:
g = torch.Generator()
g.manual_seed((self.noise_seed + t))
if (self.noise_type == 'normal'):
while True:
noise = torch.randn(1, generator=g).item()
if (abs(noise) < self.noise_pct):
break
else:
noise = ((2 * (torch.rand(1, generator=g).item() - 0.5)) * self.noise_pct)
lrs = [(v + (v * noise)) for v in lrs]
return lrs |
class WrappedSocket(object):
def __init__(self, connection, socket, suppress_ragged_eofs=True):
self.connection = connection
self.socket = socket
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
self._closed = False
def fileno(self):
return self.socket.fileno()
def _decref_socketios(self):
if (self._makefile_refs > 0):
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, *args, **kwargs):
try:
data = self.connection.recv(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if (self.suppress_ragged_eofs and (e.args == ((- 1), 'Unexpected EOF'))):
return b''
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if (self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN):
return b''
else:
raise
except OpenSSL.SSL.WantReadError:
if (not util.wait_for_read(self.socket, self.socket.gettimeout())):
raise timeout('The read operation timed out')
else:
return self.recv(*args, **kwargs)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError(('read error: %r' % e))
else:
return data
def recv_into(self, *args, **kwargs):
try:
return self.connection.recv_into(*args, **kwargs)
except OpenSSL.SSL.SysCallError as e:
if (self.suppress_ragged_eofs and (e.args == ((- 1), 'Unexpected EOF'))):
return 0
else:
raise SocketError(str(e))
except OpenSSL.SSL.ZeroReturnError:
if (self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN):
return 0
else:
raise
except OpenSSL.SSL.WantReadError:
if (not util.wait_for_read(self.socket, self.socket.gettimeout())):
raise timeout('The read operation timed out')
else:
return self.recv_into(*args, **kwargs)
except OpenSSL.SSL.Error as e:
raise ssl.SSLError(('read error: %r' % e))
def settimeout(self, timeout):
return self.socket.settimeout(timeout)
def _send_until_done(self, data):
while True:
try:
return self.connection.send(data)
except OpenSSL.SSL.WantWriteError:
if (not util.wait_for_write(self.socket, self.socket.gettimeout())):
raise timeout()
continue
except OpenSSL.SSL.SysCallError as e:
raise SocketError(str(e))
def sendall(self, data):
total_sent = 0
while (total_sent < len(data)):
sent = self._send_until_done(data[total_sent:(total_sent + SSL_WRITE_BLOCKSIZE)])
total_sent += sent
def shutdown(self):
self.connection.shutdown()
def close(self):
if (self._makefile_refs < 1):
try:
self._closed = True
return self.connection.close()
except OpenSSL.SSL.Error:
return
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
x509 = self.connection.get_peer_certificate()
if (not x509):
return x509
if binary_form:
return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
return {'subject': ((('commonName', x509.get_subject().CN),),), 'subjectAltName': get_subj_alt_name(x509)}
def version(self):
return self.connection.get_protocol_version_name()
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if (self._makefile_refs < 1):
self.close()
else:
self._makefile_refs -= 1 |
class TestRequirementsCheck():
def test_passes_on_process_project(self):
self.uut = RequirementsCheck()
assert_equals([self.uut], self.uut.run())
def test_checks_requirements_on_start(self):
self.test_requirement = Requirement('-test-requirement-')
self.test_requirement.check = MagicMock()
orig_get_requirements = RequirementsCheck._get_requirements
RequirementsCheck._get_requirements = (lambda : [self.test_requirement])
try:
self.uut = RequirementsCheck()
self.test_requirement.check.assert_called_once_with()
finally:
RequirementsCheck._get_requirements = orig_get_requirements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.