code stringlengths 101 5.91M |
|---|
class LatticeDiagram(CombinatorialObject):
def boxes(self):
res = []
for i in range(1, (len(self) + 1)):
res += [(i, (j + 1)) for j in range(self[i])]
return res
def __getitem__(self, i):
if (i == 0):
raise ValueError('indexing starts at 1')
elif (i < 0):
i += 1
return self._list[(i - 1)]
def leg(self, i, j):
return [(i, k) for k in range((j + 1), (self[i] + 1))]
def arm_left(self, i, j):
return [(ip, j) for ip in range(1, i) if (j <= self[ip] <= self[i])]
def arm_right(self, i, j):
return [(ip, (j - 1)) for ip in range((i + 1), (len(self) + 1)) if ((j - 1) <= self[ip] < self[i])]
def arm(self, i, j):
return (self.arm_left(i, j) + self.arm_right(i, j))
def l(self, i, j):
return (self[i] - j)
def a(self, i, j):
return len(self.arm(i, j))
def size(self):
return sum(self._list)
def flip(self):
r = max(self)
return LatticeDiagram([(r - i) for i in self])
def boxes_same_and_lower_right(self, ii, jj):
res = []
for i in range(1, (len(self) + 1)):
if ((self[i] >= jj) and (i != ii)):
res.append((i, jj))
for i in range((ii + 1), (len(self) + 1)):
if (self[i] >= (jj - 1)):
res.append((i, (jj - 1)))
return res |
class SubjectFile():
def __init__(self, subject: str, **file_groups) -> None:
self.subject = subject
self.categories = {}
for (category_name, category_files) in file_groups.items():
self.categories[category_name] = FileCategory(category_files)
self._check_validity()
def _check_validity(self):
all_file_ids = []
for file_category in self.categories.values():
all_file_ids.extend(file_category.entries.keys())
if (len(all_file_ids) > len(set(all_file_ids))):
raise ValueError('Identifiers must be unique')
def get_all_files(self):
all_files = {}
for file_category in self.categories.values():
for (id_, filename) in file_category.entries.items():
all_files[id_] = filename
return all_files |
class ConvReLU1d(_FusedModule):
def __init__(self, conv, relu):
assert ((type(conv) == Conv1d) and (type(relu) == ReLU)), 'Incorrect types for input modules{}{}'.format(type(conv), type(relu))
super().__init__(conv, relu) |
def bahdanau_attention(num_units, memory, query, normalize=False):
with tf.variable_scope('bahdanau_attention'):
memory_layer = tf.layers.Dense(num_units, name='memory_layer', use_bias=False)
query_layer = tf.layers.Dense(num_units, name='query_layer', use_bias=False)
values = memory
keys = memory_layer(values)
processed_query = query_layer(query)
score = _bahdanau_score(processed_query, keys, normalize=normalize)
alignments = tf.nn.softmax(score)
context = tf.reduce_sum((tf.expand_dims(alignments, 2) * values), axis=1)
return (context, alignments) |
def test_walker_custom(line_graph):
walker = CustomWalker()
sampler = UnsupervisedSampler(line_graph, walker=walker)
batches = sampler.run(2)
assert (len(batches) == line_graph.number_of_nodes())
for (context_pairs, labels) in batches:
for (node, neighbour) in context_pairs[(labels == 1)]:
assert (node == neighbour) |
class TDNNBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dilation, activation=nn.ReLU, groups=1):
super(TDNNBlock, self).__init__()
self.conv = Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, groups=groups)
self.activation = activation()
self.norm = BatchNorm1d(input_size=out_channels)
def forward(self, x):
return self.norm(self.activation(self.conv(x))) |
class GaussianCNNRegressor(StochasticRegressor):
def __init__(self, input_shape, output_dim, filter_dims, num_filters, strides, padding, hidden_sizes, hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), name='GaussianCNNRegressor', learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_filter_dims=(), std_num_filters=(), std_strides=(), std_padding='SAME', std_hidden_sizes=(), std_hidden_nonlinearity=None, std_output_nonlinearity=None, layer_normalization=False, normalize_inputs=True, normalize_outputs=True, subsample_factor=1.0, optimizer=None, optimizer_args=None, use_trust_region=True, max_kl_step=0.01):
super().__init__(input_shape, output_dim, name)
self._use_trust_region = use_trust_region
self._subsample_factor = subsample_factor
self._max_kl_step = max_kl_step
self._normalize_inputs = normalize_inputs
self._normalize_outputs = normalize_outputs
with tf.compat.v1.variable_scope(self._name, reuse=False) as vs:
self._variable_scope = vs
if (optimizer_args is None):
optimizer_args = dict()
if (optimizer is None):
if use_trust_region:
optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
else:
optimizer = LbfgsOptimizer(**optimizer_args)
else:
optimizer = optimizer(**optimizer_args)
self._optimizer = optimizer
self.model = GaussianCNNRegressorModel(input_shape=input_shape, output_dim=output_dim, num_filters=num_filters, filter_dims=filter_dims, strides=strides, padding=padding, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, learn_std=learn_std, adaptive_std=adaptive_std, std_share_network=std_share_network, init_std=init_std, min_std=None, max_std=None, std_num_filters=std_num_filters, std_filter_dims=std_filter_dims, std_strides=std_strides, std_padding=std_padding, std_hidden_sizes=std_hidden_sizes, std_hidden_nonlinearity=std_hidden_nonlinearity, std_output_nonlinearity=std_output_nonlinearity, std_parameterization='exp', layer_normalization=layer_normalization)
self._initialize()
def _initialize(self):
input_var = tf.compat.v1.placeholder(tf.float32, shape=((None,) + self._input_shape))
with tf.compat.v1.variable_scope(self._variable_scope):
self.model.build(input_var)
ys_var = tf.compat.v1.placeholder(dtype=tf.float32, name='ys', shape=(None, self._output_dim))
old_means_var = tf.compat.v1.placeholder(dtype=tf.float32, name='old_means', shape=(None, self._output_dim))
old_log_stds_var = tf.compat.v1.placeholder(dtype=tf.float32, name='old_log_stds', shape=(None, self._output_dim))
y_mean_var = self.model.networks['default'].y_mean
y_std_var = self.model.networks['default'].y_std
means_var = self.model.networks['default'].means
log_stds_var = self.model.networks['default'].log_stds
normalized_means_var = self.model.networks['default'].normalized_means
normalized_log_stds_var = self.model.networks['default'].normalized_log_stds
normalized_ys_var = ((ys_var - y_mean_var) / y_std_var)
normalized_old_means_var = ((old_means_var - y_mean_var) / y_std_var)
normalized_old_log_stds_var = (old_log_stds_var - tf.math.log(y_std_var))
normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var)
mean_kl = tf.reduce_mean(self.model.networks['default'].dist.kl_sym(dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var), normalized_dist_info_vars))
loss = (- tf.reduce_mean(self.model.networks['default'].dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars)))
self._f_predict = tensor_utils.compile_function([input_var], means_var)
self._f_pdists = tensor_utils.compile_function([input_var], [means_var, log_stds_var])
optimizer_args = dict(loss=loss, target=self, network_outputs=[normalized_means_var, normalized_log_stds_var])
if self._use_trust_region:
optimizer_args['leq_constraint'] = (mean_kl, self._max_kl_step)
optimizer_args['inputs'] = [input_var, ys_var, old_means_var, old_log_stds_var]
else:
optimizer_args['inputs'] = [input_var, ys_var]
with tf.name_scope('update_opt'):
self._optimizer.update_opt(**optimizer_args)
def fit(self, xs, ys):
if (self._subsample_factor < 1):
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int((num_samples_tot * self._subsample_factor)))
(xs, ys) = (xs[idx], ys[idx])
if self._normalize_inputs:
self.model.networks['default'].x_mean.load(np.mean(xs, axis=0, keepdims=True))
self.model.networks['default'].x_std.load((np.std(xs, axis=0, keepdims=True) + 1e-08))
if self._normalize_outputs:
self.model.networks['default'].y_mean.load(np.mean(ys, axis=0, keepdims=True))
self.model.networks['default'].y_std.load((np.std(ys, axis=0, keepdims=True) + 1e-08))
if self._use_trust_region:
(old_means, old_log_stds) = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before = self._optimizer.loss(inputs)
tabular.record('{}/LossBefore'.format(self._name), loss_before)
self._optimizer.optimize(inputs)
loss_after = self._optimizer.loss(inputs)
tabular.record('{}/LossAfter'.format(self._name), loss_after)
if self._use_trust_region:
tabular.record('{}/MeanKL'.format(self._name), self._optimizer.constraint_val(inputs))
tabular.record('{}/dLoss'.format(self._name), (loss_before - loss_after))
def predict(self, xs):
return self._f_predict(xs)
def log_likelihood_sym(self, x_var, y_var, name=None):
params = self.dist_info_sym(x_var, name=name)
means_var = params['mean']
log_stds_var = params['log_std']
return self.model.networks[name].dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
def dist_info_sym(self, x_var, name=None):
with tf.compat.v1.variable_scope(self._variable_scope):
self.model.build(x_var, name=name)
means_var = self.model.networks[name].means
log_stds_var = self.model.networks[name].log_stds
return dict(mean=means_var, log_std=log_stds_var)
def get_params_internal(self):
return self._variable_scope.trainable_variables()
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_predict']
del new_dict['_f_pdists']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
def get_world_size():
if (not is_dist_avail_and_is_initalized()):
return 1
return dist.get_world_size() |
class MsraNERLoader(CNNERLoader):
def __init__(self):
super().__init__()
def download(self, dev_ratio: float=0.1, re_download: bool=False) -> str:
dataset_name = 'msra-ner'
data_dir = self._get_dataset_path(dataset_name=dataset_name)
modify_time = 0
for filepath in glob.glob(os.path.join(data_dir, '*')):
modify_time = os.stat(filepath).st_mtime
break
if (((time.time() - modify_time) > 1) and re_download):
shutil.rmtree(data_dir)
data_dir = self._get_dataset_path(dataset_name=dataset_name)
if (not os.path.exists(os.path.join(data_dir, 'dev.conll'))):
if (dev_ratio > 0):
assert (0 < dev_ratio < 1), 'dev_ratio should be in range (0,1).'
try:
with open(os.path.join(data_dir, 'train.conll'), 'r', encoding='utf-8') as f, open(os.path.join(data_dir, 'middle_file.conll'), 'w', encoding='utf-8') as f1, open(os.path.join(data_dir, 'dev.conll'), 'w', encoding='utf-8') as f2:
lines = []
for line in f:
line = line.strip()
if line:
lines.append(line)
else:
if (random.random() < dev_ratio):
f2.write(('\n'.join(lines) + '\n\n'))
else:
f1.write(('\n'.join(lines) + '\n\n'))
lines.clear()
os.remove(os.path.join(data_dir, 'train.conll'))
os.renames(os.path.join(data_dir, 'middle_file.conll'), os.path.join(data_dir, 'train.conll'))
finally:
if os.path.exists(os.path.join(data_dir, 'middle_file.conll')):
os.remove(os.path.join(data_dir, 'middle_file.conll'))
return data_dir |
class ICAutoMLForecaster(AutoMLMixIn, ForecasterBase, metaclass=AutodocABCMeta):
config_class = ICConfig
def information_criterion(self):
return self.config.information_criterion
def get_ic(self, model, train_data: pd.DataFrame, train_result: Tuple[(pd.DataFrame, Optional[pd.DataFrame])]) -> float:
raise NotImplementedError
def _model_name(self, theta) -> str:
raise NotImplementedError
def evaluate_theta(self, thetas: Iterator, train_data: TimeSeries, train_config=None, exog_data: TimeSeries=None) -> Tuple[(Any, ModelBase, Tuple[(TimeSeries, Optional[TimeSeries])])]:
best = None
y = (train_data.to_pd() if self.model._pandas_train else train_data)
y_exog = (exog_data.to_pd() if ((exog_data is not None) and self.model._pandas_train) else exog_data)
y_target = pd.DataFrame(y[self.model.target_name])
for theta in thetas:
start = time.time()
model = deepcopy(self.model)
self.set_theta(model, theta, train_data)
if (exog_data is None):
train_result = model._train(y, train_config=train_config)
else:
train_result = model._train_with_exog(y, train_config=train_config, exog_data=y_exog)
fit_time = (time.time() - start)
ic = float(self.get_ic(model=model, train_data=y_target, train_result=train_result))
logger.debug(f'{self._model_name(theta)}: {self.information_criterion.name}={ic:.3f}, Time={fit_time:.2f}s')
curr = {'theta': theta, 'model': model, 'train_result': train_result, 'ic': ic}
if (best is None):
best = curr
logger.debug(('First best model found (%.3f)' % ic))
current_ic = best['ic']
if (ic < current_ic):
logger.debug(('New best model found (%.3f < %.3f)' % (ic, current_ic)))
best = curr
(theta, model, train_result) = (best['theta'], best['model'], best['train_result'])
logger.info(f'Best model: {self._model_name(theta)}')
return (theta, model, train_result) |
class fp16Config():
enabled: bool
auto_cast: bool = True
fp16_master_weights_and_grads: bool = False
min_loss_scale: float = 0.0 |
def vit_jax_to_torch(jax_weights, num_layer=12):
torch_weights = dict()
conv_filters = jax_weights['embedding/kernel']
conv_filters = conv_filters.permute(3, 2, 0, 1)
torch_weights['patch_embed.projection.weight'] = conv_filters
torch_weights['patch_embed.projection.bias'] = jax_weights['embedding/bias']
torch_weights['pos_embed'] = jax_weights['Transformer/posembed_input/pos_embedding']
torch_weights['cls_token'] = jax_weights['cls']
torch_weights['ln1.weight'] = jax_weights['Transformer/encoder_norm/scale']
torch_weights['ln1.bias'] = jax_weights['Transformer/encoder_norm/bias']
for i in range(num_layer):
jax_block = f'Transformer/encoderblock_{i}'
torch_block = f'layers.{i}'
torch_weights[f'{torch_block}.ln1.weight'] = jax_weights[f'{jax_block}/LayerNorm_0/scale']
torch_weights[f'{torch_block}.ln1.bias'] = jax_weights[f'{jax_block}/LayerNorm_0/bias']
query_weight = jax_weights[f'{jax_block}/MultiHeadDotProductAttention_1/query/kernel']
query_bias = jax_weights[f'{jax_block}/MultiHeadDotProductAttention_1/query/bias']
key_weight = jax_weights[f'{jax_block}/MultiHeadDotProductAttention_1/key/kernel']
key_bias = jax_weights[f'{jax_block}/MultiHeadDotProductAttention_1/key/bias']
value_weight = jax_weights[f'{jax_block}/MultiHeadDotProductAttention_1/value/kernel']
value_bias = jax_weights[f'{jax_block}/MultiHeadDotProductAttention_1/value/bias']
qkv_weight = torch.from_numpy(np.stack((query_weight, key_weight, value_weight), 1))
qkv_weight = torch.flatten(qkv_weight, start_dim=1)
qkv_bias = torch.from_numpy(np.stack((query_bias, key_bias, value_bias), 0))
qkv_bias = torch.flatten(qkv_bias, start_dim=0)
torch_weights[f'{torch_block}.attn.attn.in_proj_weight'] = qkv_weight
torch_weights[f'{torch_block}.attn.attn.in_proj_bias'] = qkv_bias
to_out_weight = jax_weights[f'{jax_block}/MultiHeadDotProductAttention_1/out/kernel']
to_out_weight = torch.flatten(to_out_weight, start_dim=0, end_dim=1)
torch_weights[f'{torch_block}.attn.attn.out_proj.weight'] = to_out_weight
torch_weights[f'{torch_block}.attn.attn.out_proj.bias'] = jax_weights[f'{jax_block}/MultiHeadDotProductAttention_1/out/bias']
torch_weights[f'{torch_block}.ln2.weight'] = jax_weights[f'{jax_block}/LayerNorm_2/scale']
torch_weights[f'{torch_block}.ln2.bias'] = jax_weights[f'{jax_block}/LayerNorm_2/bias']
torch_weights[f'{torch_block}.ffn.layers.0.0.weight'] = jax_weights[f'{jax_block}/MlpBlock_3/Dense_0/kernel']
torch_weights[f'{torch_block}.ffn.layers.0.0.bias'] = jax_weights[f'{jax_block}/MlpBlock_3/Dense_0/bias']
torch_weights[f'{torch_block}.ffn.layers.1.weight'] = jax_weights[f'{jax_block}/MlpBlock_3/Dense_1/kernel']
torch_weights[f'{torch_block}.ffn.layers.1.bias'] = jax_weights[f'{jax_block}/MlpBlock_3/Dense_1/bias']
for (k, v) in torch_weights.items():
if (('weight' in k) and ('patch_embed' not in k) and ('ln' not in k)):
v = v.permute(1, 0)
torch_weights[k] = v
return torch_weights |
class WireframeOverlay(bpy.types.Operator):
bl_idname = 'object.wireframe_overlay'
bl_label = 'Create/Update Wireframe Overlay'
bl_options = {'REGISTER', 'UNDO'}
def DoesObjExist(objname):
for obj in bpy.data.objects:
if obj.name.endswith(objname):
return True
return False
def execute(self, context):
scn = context.scene
myaddon = scn.my_addon
objectname = myaddon.wireframe_obj_string
if (myaddon.wireframe_obj_string is not ''):
objectname = myaddon.wireframe_obj_string
else:
brenderObjname = context.active_object.name
objectname = GetCommonName(brenderObjname)
myaddon.wireframe_obj_string = objectname
WireframeOverlay.apply_wireframe(objectname)
return {'FINISHED'}
def apply_wireframe(objname):
scn = bpy.context.scene
myaddon = scn.my_addon
dupobjects = []
objectname = GetCommonName(objname)
if objectname.endswith('.wireframe'):
copynames = objectname
else:
copynames = (objectname + '.wireframe')
if WireframeOverlay.DoesObjExist(copynames):
scn.frame_set(0)
for obj in bpy.data.objects:
if obj.name.endswith(copynames):
obj.select = True
obj.data.bevel_depth = myaddon.wf_bevel_depth
obj.data.fill_mode = 'FULL'
obj.data.bevel_resolution = myaddon.wf_bevel_resolution
obj.data.offset = myaddon.wf_offset
obj.data.extrude = myaddon.wf_extrude
obj.select = False
return {'FINISHED'}
else:
for obj in bpy.data.objects:
if obj.name.endswith(objectname):
theobj = bpy.data.objects[obj.name]
new_obj = theobj.copy()
origName = new_obj.name
new_obj.name = (theobj.name + '.wireframe')
new_obj.data = theobj.data.copy()
new_obj.animation_data_clear()
scn.objects.link(new_obj)
dupobjects.append(new_obj)
mesh = bpy.data.meshes[origName]
mesh.name = new_obj.name
scn.frame_set(0)
for (i, obj) in enumerate(dupobjects):
if (i == 0):
continue
obj.hide = obj.hide_render = True
obj.keyframe_insert(data_path='hide')
obj.keyframe_insert(data_path='hide_render')
for (f, obj) in enumerate(dupobjects):
if (f == 0):
continue
scn.frame_set(f)
obj_prev = dupobjects[(f - 1)]
obj_prev.hide = obj_prev.hide_render = True
obj_prev.keyframe_insert(data_path='hide')
obj_prev.keyframe_insert(data_path='hide_render')
obj = dupobjects[f]
obj.hide = obj.hide_render = False
obj.keyframe_insert(data_path='hide')
obj.keyframe_insert(data_path='hide_render')
scn.frame_set(0)
for obj in bpy.data.objects:
if obj.name.endswith(copynames):
obj.hide = obj.hide_render = False
scn.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='ONLY_FACE')
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
obj.select = True
bpy.ops.object.convert(target='CURVE')
obj.data.bevel_depth = myaddon.wf_bevel_depth
obj.data.fill_mode = 'FULL'
obj.data.bevel_resolution = myaddon.wf_bevel_resolution
obj.data.offset = myaddon.wf_offset
obj.data.extrude = myaddon.wf_extrude
obj.hide = obj.hide_render = True
obj.select = False
BRENDER_wf_names.append(copynames)
return {'FINISHED'} |
def setup_custom_environment(custom_module):
if custom_module.endswith('.py'):
module = _import_file('fastreid.utils.env.custom_module', custom_module)
else:
module = importlib.import_module(custom_module)
assert (hasattr(module, 'setup_environment') and callable(module.setup_environment)), "Custom environment module defined in {} does not have the required callable attribute 'setup_environment'.".format(custom_module)
module.setup_environment() |
def fcn(split):
n = caffe.NetSpec()
(n.data, n.label) = L.Python(module='pascalcontext_layers', layer='PASCALContextSegDataLayer', ntop=2, param_str=str(dict(voc_dir='../../data/pascal', context_dir='../../data/pascal-context', split=split, seed=1337)))
(n.conv1_1, n.relu1_1) = conv_relu(n.data, 64, pad=100)
(n.conv1_2, n.relu1_2) = conv_relu(n.relu1_1, 64)
n.pool1 = max_pool(n.relu1_2)
(n.conv2_1, n.relu2_1) = conv_relu(n.pool1, 128)
(n.conv2_2, n.relu2_2) = conv_relu(n.relu2_1, 128)
n.pool2 = max_pool(n.relu2_2)
(n.conv3_1, n.relu3_1) = conv_relu(n.pool2, 256)
(n.conv3_2, n.relu3_2) = conv_relu(n.relu3_1, 256)
(n.conv3_3, n.relu3_3) = conv_relu(n.relu3_2, 256)
n.pool3 = max_pool(n.relu3_3)
(n.conv4_1, n.relu4_1) = conv_relu(n.pool3, 512)
(n.conv4_2, n.relu4_2) = conv_relu(n.relu4_1, 512)
(n.conv4_3, n.relu4_3) = conv_relu(n.relu4_2, 512)
n.pool4 = max_pool(n.relu4_3)
(n.conv5_1, n.relu5_1) = conv_relu(n.pool4, 512)
(n.conv5_2, n.relu5_2) = conv_relu(n.relu5_1, 512)
(n.conv5_3, n.relu5_3) = conv_relu(n.relu5_2, 512)
n.pool5 = max_pool(n.relu5_3)
(n.fc6, n.relu6) = conv_relu(n.pool5, 4096, ks=7, pad=0)
n.drop6 = L.Dropout(n.relu6, dropout_ratio=0.5, in_place=True)
(n.fc7, n.relu7) = conv_relu(n.drop6, 4096, ks=1, pad=0)
n.drop7 = L.Dropout(n.relu7, dropout_ratio=0.5, in_place=True)
n.score_fr = L.Convolution(n.drop7, num_output=60, kernel_size=1, pad=0, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])
n.upscore2 = L.Deconvolution(n.score_fr, convolution_param=dict(num_output=60, kernel_size=4, stride=2, bias_term=False), param=[dict(lr_mult=0)])
n.score_pool4 = L.Convolution(n.pool4, num_output=60, kernel_size=1, pad=0, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])
n.score_pool4c = crop(n.score_pool4, n.upscore2)
n.fuse_pool4 = L.Eltwise(n.upscore2, n.score_pool4c, operation=P.Eltwise.SUM)
n.upscore_pool4 = L.Deconvolution(n.fuse_pool4, convolution_param=dict(num_output=60, kernel_size=4, stride=2, bias_term=False), param=[dict(lr_mult=0)])
n.score_pool3 = L.Convolution(n.pool3, num_output=60, kernel_size=1, pad=0, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])
n.score_pool3c = crop(n.score_pool3, n.upscore_pool4)
n.fuse_pool3 = L.Eltwise(n.upscore_pool4, n.score_pool3c, operation=P.Eltwise.SUM)
n.upscore8 = L.Deconvolution(n.fuse_pool3, convolution_param=dict(num_output=60, kernel_size=16, stride=8, bias_term=False), param=[dict(lr_mult=0)])
n.score = crop(n.upscore8, n.data)
n.loss = L.SoftmaxWithLoss(n.score, n.label, loss_param=dict(normalize=False, ignore_label=255))
return n.to_proto() |
class RCSteps():
rc_builtin: RCBuiltin
names: Set[str] = dataclasses.field(default_factory=(lambda : set()))
steps: List[RCStep] = dataclasses.field(default_factory=(lambda : []))
def __post_init__(self):
self.names.add(self.rc_builtin.arg_name)
self.steps = [RCStep(base_name=self.rc_builtin.arg_name, sub=0, step='0')]
def copy_from(self, rc):
self.names = rc.names.copy()
self.steps = [step.copy() for step in rc.steps]
def active(self):
return (len(self.names) > 0)
def is_rc_ptr_name(self, name: str) -> bool:
return (name in self.names)
def is_rc_ptr_expr(self, e: Expression) -> bool:
if isinstance(e, ExprCast):
return self.is_rc_ptr_expr(e.expr)
elif isinstance(e, ExprParentheses):
return self.is_rc_ptr_expr(e.val)
elif isinstance(e, ExprIdentifier):
return self.is_rc_ptr_name(e.name)
else:
return False
def get_rc_ptr_name(self, e: Expression) -> Optional[str]:
if isinstance(e, ExprCast):
return self.get_rc_ptr_name(e.expr)
elif isinstance(e, ExprParentheses):
return self.get_rc_ptr_name(e.val)
elif isinstance(e, ExprIdentifier):
return (e.name if self.is_rc_ptr_name(e.name) else None)
else:
return None
def get_rc_step_name(self, e: Expression) -> Optional[str]:
name = self.get_rc_ptr_name(e)
if (name != None):
return name
if isinstance(e, ExprCast):
return self.get_rc_step_name(e.expr)
elif isinstance(e, ExprParentheses):
return self.get_rc_step_name(e.val)
elif isinstance(e, ExprOperator):
if ((e.op != '+') and (e.op != '-')):
return None
if (get_const(e.a) != None):
return self.get_rc_ptr_name(e.b)
elif (get_const(e.b) != None):
return self.get_rc_ptr_name(e.a)
else:
return None
else:
return None
def get_rc_check_name(self, expr: Expression) -> Optional[str]:
if isinstance(expr, ExprCast):
return self.get_rc_check_name(expr.expr)
elif isinstance(expr, ExprParentheses):
return self.get_rc_check_name(expr.val)
elif isinstance(expr, ExprDeref):
return self.get_rc_step_name(expr.addr)
else:
return None
def get_rc_step_size(self, e: Expression, allow_zero: bool=True) -> Optional[int]:
if isinstance(e, ExprCast):
return self.get_rc_step_size(e.expr)
elif isinstance(e, ExprParentheses):
return self.get_rc_step_size(e.val)
elif isinstance(e, ExprOperator):
if ((e.op != '+') and (e.op != '-')):
return None
if self.is_rc_ptr_expr(e.a):
rc_const = get_const(e.b)
return (None if (rc_const is None) else (rc_const if (e.op == '+') else (- rc_const)))
elif self.is_rc_ptr_expr(e.b):
rc_const = get_const(e.a)
return (None if (rc_const is None) else (rc_const if (e.op == '+') else (- rc_const)))
else:
return None
elif (allow_zero and self.is_rc_ptr_expr(e)):
return 0
else:
return None
def get_rc_step(self, e: Expression, allow_zero: bool) -> Optional[str]:
c = self.get_rc_step_size(e, allow_zero)
return (str(c) if (c is not None) else None)
def get_rc_check_offset(self, e: Expression) -> Optional[int]:
if isinstance(e, ExprCast):
return self.get_rc_check_offset(e.expr)
elif isinstance(e, ExprParentheses):
return self.get_rc_check_offset(e.val)
elif isinstance(e, ExprDeref):
return (0 if self.is_rc_ptr_expr(e.addr) else self.get_rc_step_size(e.addr))
else:
return None
def get_rc_check_name_and_offset(self, expr: Expression) -> Tuple[(Optional[str], Optional[int])]:
if isinstance(expr, ExprCast):
return self.get_rc_check_name_and_offset(expr.expr)
elif isinstance(expr, ExprParentheses):
return self.get_rc_check_name_and_offset(expr.val)
elif isinstance(expr, ExprDeref):
name = self.get_rc_step_name(expr.addr)
offset = self.get_rc_step_size(expr.addr, True)
if ((name is None) or (offset is None)):
return (None, None)
for step in reversed(self.steps):
if (0 <= offset):
return (add_name_sub(name, step.sub), offset)
if (not step.step.isdigit()):
break
offset += int(step.step)
raise Exception('Range check backward offset points before initial range check pointer.')
else:
return (None, None)
def get_rc_checks(self, expr: Expression) -> List[Tuple[(Expression, str, int)]]:
(rc_name, rc_offset) = self.get_rc_check_name_and_offset(expr)
if ((rc_name is not None) and (rc_offset is not None)):
return [(expr, rc_name, rc_offset)]
if isinstance(expr, ExprNeg):
return self.get_rc_checks(expr.val)
if (isinstance(expr, ExprOperator) or isinstance(expr, ExprPow)):
return (self.get_rc_checks(expr.a) + self.get_rc_checks(expr.b))
if isinstance(expr, ExprFuncCall):
rc_checks: List[Tuple[(Expression, str, int)]] = []
for arg in expr.rvalue.arguments.args:
rc_checks += self.get_rc_checks(arg.expr)
return rc_checks
if isinstance(expr, ExprTuple):
rc_checks = []
for arg in expr.members.args:
rc_checks += self.get_rc_checks(arg.expr)
return []
def add_step_from_var(self, base_name: str, expr: Expression, name_sub: Dict[(str, int)]) -> bool:
step = self.get_rc_step(expr, True)
if (step is None):
return False
prev_name = self.get_rc_step_name(expr)
if (self.steps[(- 1)].base_name != prev_name):
return False
self.names.add(base_name)
self.steps.append(RCStep(base_name, get_next_name_sub(base_name, name_sub), step))
return True
def add_func_step(self, called_rc_builtin: RCBuiltin, step_id: str) -> bool:
prev_step = self.steps[(- 1)]
if (called_rc_builtin.arg_name != prev_step.base_name):
return False
self.steps.append(RCStep(prev_step.base_name, (prev_step.sub + 1), f'rc_m{step_id}'))
return True
def get_assert_rc_check(self, lhs: Expression, rhs: Expression) -> List[Tuple[(Expression, str, int)]]:
if (not self.active()):
return []
rhs_checks = self.get_rc_checks(rhs)
lhs_checks = self.get_rc_checks(lhs)
if ((len(rhs_checks) == 0) and (len(lhs_checks) == 1)):
(rc_expr, rc_check_name, rc_check_offset) = lhs_checks[0]
if (rc_expr == lhs):
return [(rhs, rc_check_name, rc_check_offset)]
if ((len(lhs_checks) == 0) and (len(rhs_checks) == 1)):
(rc_expr, rc_check_name, rc_check_offset) = rhs_checks[0]
if (rc_expr == rhs):
return [(lhs, rc_check_name, rc_check_offset)]
return (lhs_checks + rhs_checks) |
def test_fortran_frontend_merge_comparison_arrays_offset():
test_string = '\n PROGRAM merge_test\n implicit none\n double precision, dimension(7) :: input1\n double precision, dimension(7) :: input2\n double precision, dimension(14) :: mask1\n double precision, dimension(14) :: mask2\n double precision, dimension(7) :: res\n CALL merge_test_function(input1, input2, mask1, mask2, res)\n end\n\n SUBROUTINE merge_test_function(input1, input2, mask1, mask2, res)\n double precision, dimension(7) :: input1\n double precision, dimension(7) :: input2\n double precision, dimension(14) :: mask1\n double precision, dimension(14) :: mask2\n double precision, dimension(7) :: res\n\n res = MERGE(input1, input2, mask1(3:9) .lt. mask2(5:11))\n\n END SUBROUTINE merge_test_function\n '
sdfg = fortran_parser.create_sdfg_from_string(test_string, 'merge_test', True)
sdfg.simplify(verbose=True)
sdfg.compile()
size = 7
first = np.full([size], 13, order='F', dtype=np.float64)
second = np.full([size], 42, order='F', dtype=np.float64)
mask1 = np.full([(size * 2)], 30, order='F', dtype=np.float64)
mask2 = np.full([(size * 2)], 0, order='F', dtype=np.float64)
res = np.full([size], 40, order='F', dtype=np.float64)
mask1[2:9] = 3
mask2[4:11] = 4
sdfg(input1=first, input2=second, mask1=mask1, mask2=mask2, res=res)
for val in res:
assert (val == 13) |
class Objects(Category_singleton):
def additional_structure(self):
return None
def super_categories(self):
return []
def __contains__(self, x):
return True
class SubcategoryMethods():
_method
def Homsets(self):
return HomsetsCategory.category_of(self)
_method
def Endsets(self):
return self.Homsets()._with_axiom('Endset')
class ParentMethods(): |
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, label, **kwargs):
for (idx, t) in enumerate(self.transforms):
kwargs['idx'] = idx
(img, label) = t(img, label, **kwargs)
return (img, label)
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string |
def eval_output(ref_fn, pred_fn):
pat = '\\nBLEU: (\\d+\\.?\\d*)\\nNIST: (\\d+\\.?\\d*)\\nMETEOR: (\\d+\\.?\\d*)\\nROUGE_L: (\\d+\\.?\\d*)\\nCIDEr: (\\d+\\.?\\d*)\\n'
eval_out = _sh_eval(pred_fn, ref_fn)
eval_out = eval_out.decode('utf-8')
scores = re.search(pat, eval_out).group(1, 2, 3, 4, 5)
return [float(x) for x in scores] |
class AutoConfig():
def __init__(self):
raise EnvironmentError('AutoConfig is designed to be instantiated using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method.')
def for_model(cls, model_type: str, *args, **kwargs):
if (model_type in CONFIG_MAPPING):
config_class = CONFIG_MAPPING[model_type]
return config_class(*args, **kwargs)
raise ValueError(f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}")
_list_option_in_docstrings()
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
kwargs['_from_auto'] = True
kwargs['name_or_path'] = pretrained_model_name_or_path
trust_remote_code = kwargs.pop('trust_remote_code', False)
(config_dict, _) = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (('auto_map' in config_dict) and ('AutoConfig' in config_dict['auto_map'])):
if (not trust_remote_code):
raise ValueError(f'Loading {pretrained_model_name_or_path} requires you to execute the configuration file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
if (kwargs.get('revision', None) is None):
logger.warn('Explicitly passing a `revision` is encouraged when loading a configuration with custom code to ensure no malicious code has been contributed in a newer revision.')
class_ref = config_dict['auto_map']['AutoConfig']
(module_file, class_name) = class_ref.split('.')
config_class = get_class_from_dynamic_module(pretrained_model_name_or_path, (module_file + '.py'), class_name, **kwargs)
return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif ('model_type' in config_dict):
config_class = CONFIG_MAPPING[config_dict['model_type']]
return config_class.from_dict(config_dict, **kwargs)
else:
for (pattern, config_class) in CONFIG_MAPPING.items():
if (pattern in str(pretrained_model_name_or_path)):
return config_class.from_dict(config_dict, **kwargs)
raise ValueError(f"Unrecognized model in {pretrained_model_name_or_path}. Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings in its name: {', '.join(CONFIG_MAPPING.keys())}")
def register(model_type, config):
if (issubclass(config, PretrainedConfig) and (config.model_type != model_type)):
raise ValueError(f'The config you are passing has a `model_type` attribute that is not consistent with the model type you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they match!')
CONFIG_MAPPING.register(model_type, config) |
def build_norm(norm_type, dimension=2):
if (norm_type == 'batch'):
norm = (nn.BatchNorm2d if (dimension == 2) else nn.BatchNorm1d)
elif (norm_type == 'group'):
norm = group_norm
else:
raise ValueError('Unkown norm_type: {}'.format(norm_type))
return norm |
def center_and_norm(x, axis=(- 1)):
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0) |
class KittiGenerator(Generator):
def __init__(self, base_dir, subset='train', **kwargs):
self.base_dir = base_dir
label_dir = os.path.join(self.base_dir, subset, 'labels')
image_dir = os.path.join(self.base_dir, subset, 'images')
self.id_to_labels = {}
for (label, id) in kitti_classes.items():
self.id_to_labels[id] = label
self.image_data = dict()
self.images = []
for (i, fn) in enumerate(os.listdir(label_dir)):
label_fp = os.path.join(label_dir, fn)
image_fp = os.path.join(image_dir, fn.replace('.txt', '.png'))
self.images.append(image_fp)
fieldnames = ['type', 'truncated', 'occluded', 'alpha', 'left', 'top', 'right', 'bottom', 'dh', 'dw', 'dl', 'lx', 'ly', 'lz', 'ry']
with open(label_fp, 'r') as csv_file:
reader = csv.DictReader(csv_file, delimiter=' ', fieldnames=fieldnames)
boxes = []
for (line, row) in enumerate(reader):
label = row['type']
cls_id = kitti_classes[label]
annotation = {'cls_id': cls_id, 'x1': row['left'], 'x2': row['right'], 'y2': row['bottom'], 'y1': row['top']}
boxes.append(annotation)
self.image_data[i] = boxes
super(KittiGenerator, self).__init__(**kwargs)
def size(self):
return len(self.images)
def num_classes(self):
return (max(kitti_classes.values()) + 1)
def name_to_label(self, name):
raise NotImplementedError()
def label_to_name(self, label):
return self.id_to_labels[label]
def image_aspect_ratio(self, image_index):
image = Image.open(self.images[image_index])
return (float(image.width) / float(image.height))
def load_image(self, image_index):
return read_image_bgr(self.images[image_index])
def load_annotations(self, image_index):
annotations = self.image_data[image_index]
boxes = np.zeros((len(annotations), 5))
for (idx, ann) in enumerate(annotations):
boxes[(idx, 0)] = float(ann['x1'])
boxes[(idx, 1)] = float(ann['y1'])
boxes[(idx, 2)] = float(ann['x2'])
boxes[(idx, 3)] = float(ann['y2'])
boxes[(idx, 4)] = int(ann['cls_id'])
return boxes |
_arg_scope
def resnet_block(inputs, scope, num_outputs=64, kernel_size=[3, 3], stride=[1, 1], padding='SAME', layer_dict={}):
with tf.variable_scope(scope):
layer = conv2d(inputs, num_outputs, kernel_size, stride, padding=padding, activation_fn=tf.nn.relu, scope='conv1')
layer = conv2d(layer, num_outputs, kernel_size, stride, padding=padding, activation_fn=None, scope='conv2')
outputs = tf.nn.relu(tf.add(inputs, layer))
_update_dict(layer_dict, scope, outputs)
return outputs |
def gumbel_noise(*sizes, epsilon=1e-09, **kwargs):
return (- torch.log(((- torch.log((torch.rand(*sizes, **kwargs) + epsilon))) + epsilon))) |
.experimental
def test_hit_rate_at_k_old(recs, true, true_users):
assertDictAlmostEqual(HitRate()(recs, true, [3, 1]), {3: (2 / 3), 1: (1 / 3)})
assertDictAlmostEqual(HitRate()(recs, true, [3, 1], true_users), {3: (1 / 4), 1: (0 / 3)}) |
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_array_equal(cv.get_feature_names_out(), unpickled_cv.get_feature_names_out()) |
def ema(model_dest: nn.Module, model_src: nn.Module, rate):
param_dict_src = dict(model_src.named_parameters())
for (p_name, p_dest) in model_dest.named_parameters():
p_src = param_dict_src[p_name]
assert (p_src is not p_dest)
p_dest.data.mul_(rate).add_(((1 - rate) * p_src.data)) |
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if (shell == 'ZMQInteractiveShell'):
return True
elif (shell == 'TerminalInteractiveShell'):
return False
else:
return False
except NameError:
return False |
class MultiLinearAttention(nn.Module):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.d_k = d_k
self.d_v = d_v
self.n_head = n_head
self.w_qs = nn.Linear(d_model, (n_head * d_k))
self.w_ks = nn.Linear(d_model, (n_head * d_k))
self.w_vs = nn.Linear(d_model, (n_head * d_v))
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt((2.0 / (d_model + d_k))))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt((2.0 / (d_model + d_k))))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt((2.0 / (d_model + d_v))))
self.attention = SingleCoreAttention(np.power(d_k, 0.5), self.d_v, n_head, dropout)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear((n_head * d_v), d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
(d_k, d_v, n_head) = (self.d_k, self.d_v, self.n_head)
(sz_b, len_q, _) = q.size()
(sz_b, len_k, _) = k.size()
(sz_b, len_v, _) = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view((- 1), len_q, d_k)
k = k.permute(2, 0, 1, 3).contiguous().view((- 1), len_k, d_k)
v = v.permute(2, 0, 1, 3).contiguous().view((- 1), len_v, d_v)
mask = mask.repeat(n_head, 1, 1)
(output, attn) = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, (- 1))
output = self.dropout(self.fc(output))
output = self.layer_norm((output + residual))
return (output, attn) |
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, mask):
assert (img.size == mask.size)
for t in self.transforms:
(img, mask) = t(img, mask)
return (img, mask) |
def ints_to_text(values, dic, level='word', remove_bookends=True):
good_values = values[np.where((values != 0))[0]]
if remove_bookends:
good_values = good_values[1:(- 1)]
tokens = [get_key(val, dic) for val in good_values]
if (level == 'word'):
return ' '.join(tokens)
return ''.join(tokens) |
class Table():
def __init__(self, inputs, outputs):
self.inputs = inputs
self.outputs = outputs
def __getitem__(self, query):
return np.interp(query, self.inputs, self.outputs) |
def _is_overlapping(chunk1: tuple, chunk2: tuple):
((_, s1, e1), (_, s2, e2)) = (chunk1, chunk2)
return ((s1 < e2) and (s2 < e1)) |
_function_dispatch(_binary_op_dispatcher)
def not_equal(x1, x2):
return compare_chararrays(x1, x2, '!=', True) |
def parse_example_proto(example_serialized):
feature_map = {'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=(- 1)), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value='')}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
feature_map.update({k: sparse_float32 for k in ['image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return (features['image/encoded'], label, bbox, features['image/class/text']) |
class GoalReachingEnv(object):
BASE_ENV = None
def __init__(self, goal_sampler, eval=False, reward_type='dense'):
self._goal_sampler = goal_sampler
self._goal = np.ones([2])
self.target_goal = self._goal
self.eval = eval
self.reward_type = reward_type
def _get_obs(self):
base_obs = self.BASE_ENV._get_obs(self)
goal_direction = (self._goal - self.get_xy())
if (not self.eval):
obs = np.concatenate([base_obs, goal_direction])
return obs
else:
return base_obs
def step(self, a):
self.BASE_ENV.step(self, a)
if (self.reward_type == 'dense'):
reward = (- np.linalg.norm((self.target_goal - self.get_xy())))
elif (self.reward_type == 'sparse'):
reward = (1.0 if (np.linalg.norm((self.get_xy() - self.target_goal)) <= 0.5) else 0.0)
done = False
if (self.eval and (np.linalg.norm((self.get_xy() - self.target_goal)) <= 0.5)):
done = True
obs = self._get_obs()
return (obs, reward, done, {})
def reset_model(self):
if ((self.target_goal is not None) or self.eval):
self._goal = self.target_goal
else:
self._goal = self._goal_sampler(self.np_random)
return self.BASE_ENV.reset_model(self) |
class Template():
def __init__(self, tensor=None, dim=None):
self.tensor = tensor
self.dim = dim |
def test_mhist(seed: int, dataset: str, version: str, workload: str, params: Dict[(str, Any)], overwrite: bool) -> None:
table = load_table(dataset, (params.get('version') or version))
model_path = (MODEL_ROOT / table.dataset)
model_path.mkdir(parents=True, exist_ok=True)
model_file = (model_path / f"{table.version}-mhist_bin{params['num_bins']}.pkl")
if model_file.is_file():
L.info(f'{model_file} already exists, directly load and use')
with open(model_file, 'rb') as f:
state = pickle.load(f)
else:
L.info(f"Construct MHist with at most {params['num_bins']} bins...")
state = construct_maxdiff(table, params['num_bins'])
with open(model_file, 'wb') as f:
pickle.dump(state, f, protocol=PKL_PROTO)
L.info(f'MHist saved to {model_file}')
partitions = state['partitions']
estimator = MHist(partitions, table)
L.info(f'Built MHist estimator: {estimator}')
run_test(dataset, version, workload, estimator, overwrite) |
def main():
parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
parser.add_argument('--model_name', type=str, default='transfo-xl-wt103', help='pretrained model name')
parser.add_argument('--split', type=str, default='test', choices=['all', 'valid', 'test'], help='which split to evaluate')
parser.add_argument('--batch_size', type=int, default=10, help='batch size')
parser.add_argument('--tgt_len', type=int, default=128, help='number of tokens to predict')
parser.add_argument('--ext_len', type=int, default=0, help='length of the extended context')
parser.add_argument('--mem_len', type=int, default=1600, help='length of the retained previous heads')
parser.add_argument('--clamp_len', type=int, default=1000, help='max positional embedding index')
parser.add_argument('--no_cuda', action='store_true', help='Do not use CUDA even though CUA is available')
parser.add_argument('--work_dir', type=str, required=True, help='path to the work_dir')
parser.add_argument('--no_log', action='store_true', help='do not log the eval result')
parser.add_argument('--same_length', action='store_true', help='set same length attention with masking')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
args = parser.parse_args()
assert (args.ext_len >= 0), 'extended context length must be non-negative'
if (args.server_ip and args.server_port):
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
logger.info('device: {}'.format(device))
tokenizer = TransfoXLTokenizer.from_pretrained(args.model_name)
corpus = TransfoXLCorpus.from_pretrained(args.model_name)
ntokens = len(corpus.vocab)
va_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len)
te_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len)
model = TransfoXLLMHeadModel.from_pretrained(args.model_name)
model = model.to(device)
logger.info('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format(args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len))
model.reset_length(args.tgt_len, args.ext_len, args.mem_len)
if (args.clamp_len > 0):
model.clamp_len = args.clamp_len
if args.same_length:
model.same_length = True
def evaluate(eval_iter):
model.eval()
(total_len, total_loss) = (0, 0.0)
start_time = time.time()
with torch.no_grad():
mems = None
for (idx, (data, target, seq_len)) in enumerate(eval_iter):
ret = model(data, target, mems)
(loss, _, mems) = ret
loss = loss.mean()
total_loss += (seq_len * loss.item())
total_len += seq_len
total_time = (time.time() - start_time)
logger.info('Time : {:.2f}s, {:.2f}ms/segment'.format(total_time, ((1000 * total_time) / (idx + 1))))
return (total_loss / total_len)
if (args.split == 'all'):
test_loss = evaluate(te_iter)
valid_loss = evaluate(va_iter)
elif (args.split == 'valid'):
valid_loss = evaluate(va_iter)
test_loss = None
elif (args.split == 'test'):
test_loss = evaluate(te_iter)
valid_loss = None
def format_log(loss, split):
log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format(split, loss, math.exp(loss))
return log_str
log_str = ''
if (valid_loss is not None):
log_str += format_log(valid_loss, 'valid')
if (test_loss is not None):
log_str += format_log(test_loss, 'test')
logger.info(('=' * 100))
logger.info(log_str)
logger.info(('=' * 100)) |
class NormalFormGame(SageObject, MutableMapping):
def __init__(self, generator=None):
self.players = []
self.utilities = {}
matrices = []
if (generator is not None):
if ((type(generator) is not list) and (type(generator) is not Game)):
raise TypeError('Generator function must be a list, gambit game or nothing')
if (type(generator) is list):
if (len(generator) == 1):
generator.append((- generator[(- 1)]))
matrices = generator
if (matrices[0].dimensions() != matrices[1].dimensions()):
raise ValueError('matrices must be the same size')
self._two_matrix_game(matrices)
elif (type(generator) is Game):
game = generator
self._gambit_game(game)
def __delitem__(self, key):
self.utilities.pop(key, None)
def __getitem__(self, key):
return self.utilities[key]
def __iter__(self):
return iter(self.utilities)
def __setitem__(self, key, value):
self.utilities[key] = value
def __len__(self):
return len(self.utilities)
def _repr_(self):
from pprint import pformat
base_str = 'Normal Form Game with the following utilities: {}'
return base_str.format(pformat(self.utilities))
def _latex_(self):
if (len(self.players) == 2):
(M1, M2) = self.payoff_matrices()
return ('\\left(%s, %s\\right)' % (M1._latex_(), M2._latex_()))
return latex(str(self))
def _two_matrix_game(self, matrices):
self.players = []
self.utilities = {}
self.add_player(matrices[0].dimensions()[0])
self.add_player(matrices[1].dimensions()[1])
for strategy_profile in self.utilities:
self.utilities[strategy_profile] = [matrices[0][strategy_profile], matrices[1][strategy_profile]]
def _gambit_game(self, game):
self.players = []
self.utilities = {}
for player in game.players:
num_strategies = len(player.strategies)
self.add_player(num_strategies)
for strategy_profile in self.utilities:
utility_vector = [float(game[strategy_profile][i]) for i in range(len(self.players))]
self.utilities[strategy_profile] = utility_vector
def _gambit_(self, as_integer=False, maximization=True):
from decimal import Decimal
strategy_sizes = [p.num_strategies for p in self.players]
g = Game.new_table(strategy_sizes)
sgn = 1
if (not maximization):
sgn = (- 1)
players = len(strategy_sizes)
for strategy_profile in self.utilities:
for i in range(players):
if as_integer:
g[strategy_profile][i] = (sgn * int(self.utilities[strategy_profile][i]))
else:
g[strategy_profile][i] = (sgn * Decimal(float(self.utilities[strategy_profile][i])))
return g
def is_constant_sum(self):
import sys
if (len(self.players) > 2):
return False
(m1, m2) = self.payoff_matrices()
c = (m1 + m2)
t = c[(0, 0)]
for row in c:
for i in row:
if (abs((t - i)) > sys.float_info.epsilon):
return False
return True
def payoff_matrices(self):
if (len(self.players) != 2):
raise ValueError('Only available for 2 player games')
if (not self._is_complete()):
raise ValueError('utilities have not been populated')
m1 = matrix(QQ, self.players[0].num_strategies, self.players[1].num_strategies)
m2 = matrix(QQ, self.players[0].num_strategies, self.players[1].num_strategies)
for strategy_profile in self.utilities:
m1[strategy_profile] = self[strategy_profile][0]
m2[strategy_profile] = self[strategy_profile][1]
return (m1, m2)
def add_player(self, num_strategies):
self.players.append(_Player(num_strategies))
self._generate_utilities(True)
def _generate_utilities(self, replacement):
strategy_sizes = [range(p.num_strategies) for p in self.players]
if (replacement is True):
self.utilities = {}
for profile in product(*strategy_sizes):
if (profile not in self.utilities.keys()):
self.utilities[profile] = ([False] * len(self.players))
def add_strategy(self, player):
self.players[player].add_strategy()
self._generate_utilities(False)
def _is_complete(self):
results = []
for profile in self.utilities.values():
results.append(all(((type(i) is not bool) for i in profile)))
return all(results)
def obtain_nash(self, algorithm=False, maximization=True, solver=None):
if (len(self.players) > 2):
raise NotImplementedError('Nash equilibrium for games with more than 2 players have not been implemented yet. Please see the gambit website ( that has a variety of available algorithms')
if (not self._is_complete()):
raise ValueError('utilities have not been populated')
from sage.features.lrs import LrsNash
if (not algorithm):
if self.is_constant_sum():
algorithm = 'lp'
elif LrsNash().is_present():
algorithm = 'lrs'
else:
algorithm = 'enumeration'
if (algorithm == 'lrs'):
LrsNash().require()
return self._solve_lrs(maximization)
if (algorithm == 'LCP'):
if (Game is None):
raise PackageNotFoundError('gambit')
return self._solve_LCP(maximization)
if algorithm.startswith('lp'):
return self._solve_LP(solver=solver, maximization=maximization)
if (algorithm == 'enumeration'):
return self._solve_enumeration(maximization)
raise ValueError("'algorithm' should be set to 'enumeration', 'LCP', 'lp' or 'lrs'")
def _solve_lrs(self, maximization=True):
from subprocess import PIPE, Popen
(m1, m2) = self.payoff_matrices()
if (maximization is False):
m1 = (- m1)
m2 = (- m2)
game_str = self._lrs_nash_format(m1, m2)
game_name = tmp_filename()
with open(game_name, 'w') as game_file:
game_file.write(game_str)
from sage.features.lrs import LrsNash
LrsNash().require()
process = Popen([LrsNash().absolute_filename(), game_name], stdout=PIPE, stderr=PIPE)
lrs_output = [bytes_to_str(row) for row in process.stdout]
process.terminate()
nasheq = Parser(lrs_output).format_lrs()
return sorted(nasheq)
def _solve_LCP(self, maximization):
g = self._gambit_(maximization)
output = ExternalLCPSolver().solve(g)
nasheq = Parser(output).format_gambit(g)
return sorted(nasheq)
def _solve_gambit_LP(self, maximization=True):
if (Game is None):
raise NotImplementedError('gambit is not installed')
g = self._gambit_(maximization=maximization)
output = ExternalLPSolver().solve(g)
nasheq = Parser(output).format_gambit(g)
return sorted(nasheq)
def _solve_LP(self, solver='glpk', maximization=True):
if (not self.is_constant_sum()):
raise ValueError('Input game needs to be a two player constant sum game')
if (solver == 'gambit'):
return self._solve_gambit_LP(maximization)
sgn = 1
if (not maximization):
sgn = (- 1)
strategy_sizes = [p.num_strategies for p in self.players]
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
y = p.new_variable(nonnegative=True)
v = p.new_variable(nonnegative=False)
p.add_constraint(((((sgn * self.payoff_matrices()[0]) * y) - v[0]) <= 0))
p.add_constraint(((matrix([([1] * strategy_sizes[1])]) * y) == 1))
p.set_objective(v[0])
p.solve()
y = tuple(p.get_values(y).values())
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
x = p.new_variable(nonnegative=True)
u = p.new_variable(nonnegative=False)
p.add_constraint(((((sgn * (- self.payoff_matrices()[0].T)) * x) - u[0]) <= 0))
p.add_constraint(((matrix([([1] * strategy_sizes[0])]) * x) == 1))
p.set_objective(u[0])
p.solve()
x = tuple(p.get_values(x).values())
return [[x, y]]
def _solve_enumeration(self, maximization=True):
(M1, M2) = self.payoff_matrices()
if (maximization is False):
M1 = (- M1)
M2 = (- M2)
potential_supports = [[tuple(support) for support in powerset(range(player.num_strategies))] for player in self.players]
potential_support_pairs = [pair for pair in product(*potential_supports) if (len(pair[0]) == len(pair[1]))]
equilibria = []
for pair in potential_support_pairs:
if (self._row_cond_dominance(pair[0], pair[1], M1) and self._row_cond_dominance(pair[1], pair[0], M2.transpose())):
a = self._solve_indifference(pair[0], pair[1], M2)
b = self._solve_indifference(pair[1], pair[0], M1.transpose())
if (a and b and self._is_NE(a, b, pair[0], pair[1], M1, M2)):
equilibria.append([tuple(a), tuple(b)])
return sorted(equilibria)
def _row_cond_dominance(self, p1_sup, p2_sup, matrix):
subm = matrix.matrix_from_rows_and_columns(list(p1_sup), list(p2_sup))
nbr_rows = subm.nrows()
nbr_cols = subm.ncols()
for s in range(nbr_rows):
strategy = subm.rows()[s]
for r in range(s, nbr_rows):
row = subm.rows()[r]
if (strategy != row):
if all(((strategy[i] < row[i]) for i in range(nbr_cols))):
return False
if all(((row[i] < strategy[i]) for i in range(nbr_cols))):
return False
return True
def _solve_indifference(self, support1, support2, M):
linearsystem = matrix(QQ, (len(support2) + 1), M.nrows())
for strategy1 in support1:
if (len(support2) == 1):
for strategy2 in range(M.ncols()):
if (M[strategy1][support2[0]] < M[strategy1][strategy2]):
return False
else:
for strategy_pair2 in range(len(support2)):
linearsystem[(strategy_pair2, strategy1)] = (M[strategy1][support2[strategy_pair2]] - M[strategy1][support2[(strategy_pair2 - 1)]])
linearsystem[((- 1), strategy1)] = 1
linearsystem_rhs = vector(([0 for i in range(len(support2))] + [1]))
try:
result = linearsystem.solve_right(linearsystem_rhs)
except ValueError:
return None
return result
def _is_NE(self, a, b, p1_support, p2_support, M1, M2):
if (not (all(((a[i] > 0) for i in p1_support)) and all(((b[j] > 0) for j in p2_support)) and all(((a[i] == 0) for i in range(len(a)) if (i not in p1_support))) and all(((b[j] == 0) for j in range(len(b)) if (j not in p2_support))))):
return False
p1_payoffs = [sum(((v * row[i]) for (i, v) in enumerate(b))) for row in M1.rows()]
p2_payoffs = [sum(((v * col[j]) for (j, v) in enumerate(a))) for col in M2.columns()]
if (not any(((i in p1_support) for (i, x) in enumerate(p1_payoffs) if (x == max(p1_payoffs))))):
return False
if (not any(((i in p2_support) for (i, x) in enumerate(p2_payoffs) if (x == max(p2_payoffs))))):
return False
return True
def _Hrepresentation(self, m1, m2):
from sage.misc.superseded import deprecation
deprecation(27745, 'NormalFormGame._Hrepresentation is deprecated as it creates the legacy input format. Use NormalFormGame._lrs_nash_format instead')
from sage.geometry.polyhedron.misc import _to_space_separated_string
m = self.players[0].num_strategies
n = self.players[1].num_strategies
midentity = list(matrix.identity(m))
nidentity = list(matrix.identity(n))
s = 'H-representation\n'
s += (('linearity 1 ' + str(((m + n) + 1))) + '\n')
s += 'begin\n'
s += (((str(((m + n) + 1)) + ' ') + str((m + 2))) + ' rational\n')
for f in list(midentity):
s += (('0 ' + _to_space_separated_string(f)) + ' 0 \n')
for e in list(m2.transpose()):
s += (('0 ' + _to_space_separated_string((- e))) + ' 1 \n')
s += '-1 '
for g in range(m):
s += '1 '
s += '0 \n'
s += 'end\n'
t = 'H-representation\n'
t += (('linearity 1 ' + str(((m + n) + 1))) + '\n')
t += 'begin\n'
t += (((str(((m + n) + 1)) + ' ') + str((n + 2))) + ' rational\n')
for e in list(m1):
t += (('0 ' + _to_space_separated_string((- e))) + ' 1 \n')
for f in list(nidentity):
t += (('0 ' + _to_space_separated_string(f)) + ' 0 \n')
t += '-1 '
for g in range(n):
t += '1 '
t += '0 \n'
t += 'end\n'
return (s, t)
def _lrs_nash_format(self, m1, m2):
from sage.geometry.polyhedron.misc import _to_space_separated_string
m = self.players[0].num_strategies
n = self.players[1].num_strategies
s = f'''{m} {n}
'''
for r in m1.rows():
s += (_to_space_separated_string(r) + '\n')
s += '\n'
for r in m2.rows():
s += (_to_space_separated_string(r) + '\n')
return s
def is_degenerate(self, certificate=False):
if (len(self.players) > 2):
raise NotImplementedError('Tests for Degeneracy is not yet implemented for games with more than two players.')
d = self._is_degenerate_pure(certificate)
if d:
return d
(M1, M2) = self.payoff_matrices()
potential_supports = [[tuple(support) for support in powerset(range(player.num_strategies))] for player in self.players]
potential_supports = [[i for i in k if (len(i) > 1)] for k in potential_supports]
potential_support_pairs = [pair for pair in product(*potential_supports) if (len(pair[0]) != len(pair[1]))]
potential_support_pairs.sort(key=(lambda x: sum([len(k) for k in x])))
for pair in potential_support_pairs:
if (len(pair[0]) < len(pair[1])):
strat = self._solve_indifference(pair[0], pair[1], M2)
if (strat and (len(self.best_responses(strat, player=0)) > len(pair[0]))):
if certificate:
return (True, (strat, 0))
else:
return True
elif (len(pair[1]) < len(pair[0])):
strat = self._solve_indifference(pair[1], pair[0], M1.transpose())
if (strat and (len(self.best_responses(strat, player=0)) > len(pair[1]))):
if certificate:
return (True, (strat, 1))
else:
return True
if certificate:
return (False, ())
else:
return False
def best_responses(self, strategy, player):
if (len(self.players) != 2):
raise ValueError('Only available for 2 player games')
if ((player != 0) and (player != 1)):
raise ValueError(('%s is not an index of the opponent, must be 0 or 1' % player))
strategy = vector(strategy)
if ((sum(strategy) != 1) or (min(strategy) < 0)):
raise ValueError('Strategy is not a probability distribution vector')
if (player == 0):
payoff_matrix = self.payoff_matrices()[0]
elif (player == 1):
payoff_matrix = self.payoff_matrices()[1].transpose()
if (len(strategy) != payoff_matrix.dimensions()[1]):
raise ValueError('Strategy is not of correct dimension')
payoffs = list((payoff_matrix * strategy))
indices = [i for (i, j) in enumerate(payoffs) if (j == max(payoffs))]
return indices
def _is_degenerate_pure(self, certificate=False):
(M1, M2) = self.payoff_matrices()
for (i, row) in enumerate(M2.rows()):
if (list(row).count(max(row)) > 1):
if certificate:
strat = [0 for k in range(M1.nrows())]
strat[i] = 1
return (True, (tuple(strat), 0))
else:
return True
for (j, col) in enumerate(M1.columns()):
if (list(col).count(max(col)) > 1):
if certificate:
strat = [0 for k in range(M1.ncols())]
strat[j] = 1
return (True, (tuple(strat), 1))
else:
return True
return False |
def main(cfg):
(train_loader, test_loader, num_query, num_classes) = make_data_loader(cfg, use_eraser=True)
model = build_model(num_classes, 'base', pretrain_choice=True)
model = (torch.nn.DataParallel(model).cuda() if torch.cuda.is_available() else model)
loss_func = make_loss()
optimizer = make_optimizer(cfg, model)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[40, 80], gamma=0.1)
if (cfg.train == 1):
start_epoch = 0
acc_best = 0.0
last_model_wts = torch.load(os.path.join('pre_feat', 'msmt_ini_imagenet.pth'))
model_dict = model.state_dict()
checkpoint_dict = {k: v for (k, v) in last_model_wts['state_dict'].items() if ((k in model_dict) and ('classifier' not in k))}
model_dict.update(checkpoint_dict)
model.load_state_dict(model_dict)
do_train(cfg, model, train_loader, test_loader, optimizer, scheduler, loss_func, num_query, start_epoch, acc_best)
else:
last_model_wts = torch.load(os.path.join(cfg.logs_dir, 'checkpoint_best.pth'))
model_dict = model.state_dict()
checkpoint_dict = {k: v for (k, v) in last_model_wts['state_dict'].items() if ((k in model_dict) and ('classifier' not in k))}
model_dict.update(checkpoint_dict)
model.load_state_dict(model_dict)
(mAP, cmc1, cmc5, cmc10, cmc20) = inference_path(model, test_loader, num_query)
start_time = datetime.datetime.now()
start_time = ('%4d:%d:%d-%2d:%2d:%2d' % (start_time.year, start_time.month, start_time.day, start_time.hour, start_time.minute, start_time.second))
line = '{} - Test: cmc1: {:.1%}, cmc5: {:.1%}, cmc10: {:.1%}, cmc20: {:.1%}, mAP: {:.1%}\n'.format(start_time, cmc1, cmc5, cmc10, cmc20, mAP)
print(line) |
class PcfFontFile(FontFile.FontFile):
name = 'name'
def __init__(self, fp, charset_encoding='iso8859-1'):
self.charset_encoding = charset_encoding
magic = l32(fp.read(4))
if (magic != PCF_MAGIC):
raise SyntaxError('not a PCF file')
super().__init__()
count = l32(fp.read(4))
self.toc = {}
for i in range(count):
type = l32(fp.read(4))
self.toc[type] = (l32(fp.read(4)), l32(fp.read(4)), l32(fp.read(4)))
self.fp = fp
self.info = self._load_properties()
metrics = self._load_metrics()
bitmaps = self._load_bitmaps(metrics)
encoding = self._load_encoding()
for ch in range(256):
ix = encoding[ch]
if (ix is not None):
(x, y, l, r, w, a, d, f) = metrics[ix]
glyph = ((w, 0), (l, (d - y), (x + l), d), (0, 0, x, y), bitmaps[ix])
self.glyph[ch] = glyph
def _getformat(self, tag):
(format, size, offset) = self.toc[tag]
fp = self.fp
fp.seek(offset)
format = l32(fp.read(4))
if (format & 4):
(i16, i32) = (b16, b32)
else:
(i16, i32) = (l16, l32)
return (fp, format, i16, i32)
def _load_properties(self):
properties = {}
(fp, format, i16, i32) = self._getformat(PCF_PROPERTIES)
nprops = i32(fp.read(4))
p = []
for i in range(nprops):
p.append((i32(fp.read(4)), i8(fp.read(1)), i32(fp.read(4))))
if (nprops & 3):
fp.seek((4 - (nprops & 3)), io.SEEK_CUR)
data = fp.read(i32(fp.read(4)))
for (k, s, v) in p:
k = sz(data, k)
if s:
v = sz(data, v)
properties[k] = v
return properties
def _load_metrics(self):
metrics = []
(fp, format, i16, i32) = self._getformat(PCF_METRICS)
append = metrics.append
if ((format & 65280) == 256):
for i in range(i16(fp.read(2))):
left = (i8(fp.read(1)) - 128)
right = (i8(fp.read(1)) - 128)
width = (i8(fp.read(1)) - 128)
ascent = (i8(fp.read(1)) - 128)
descent = (i8(fp.read(1)) - 128)
xsize = (right - left)
ysize = (ascent + descent)
append((xsize, ysize, left, right, width, ascent, descent, 0))
else:
for i in range(i32(fp.read(4))):
left = i16(fp.read(2))
right = i16(fp.read(2))
width = i16(fp.read(2))
ascent = i16(fp.read(2))
descent = i16(fp.read(2))
attributes = i16(fp.read(2))
xsize = (right - left)
ysize = (ascent + descent)
append((xsize, ysize, left, right, width, ascent, descent, attributes))
return metrics
def _load_bitmaps(self, metrics):
bitmaps = []
(fp, format, i16, i32) = self._getformat(PCF_BITMAPS)
nbitmaps = i32(fp.read(4))
if (nbitmaps != len(metrics)):
raise OSError('Wrong number of bitmaps')
offsets = []
for i in range(nbitmaps):
offsets.append(i32(fp.read(4)))
bitmapSizes = []
for i in range(4):
bitmapSizes.append(i32(fp.read(4)))
bitorder = (format & 8)
padindex = (format & 3)
bitmapsize = bitmapSizes[padindex]
offsets.append(bitmapsize)
data = fp.read(bitmapsize)
pad = BYTES_PER_ROW[padindex]
mode = '1;R'
if bitorder:
mode = '1'
for i in range(nbitmaps):
(x, y, l, r, w, a, d, f) = metrics[i]
(b, e) = (offsets[i], offsets[(i + 1)])
bitmaps.append(Image.frombytes('1', (x, y), data[b:e], 'raw', mode, pad(x)))
return bitmaps
def _load_encoding(self):
encoding = ([None] * 256)
(fp, format, i16, i32) = self._getformat(PCF_BDF_ENCODINGS)
(firstCol, lastCol) = (i16(fp.read(2)), i16(fp.read(2)))
(firstRow, lastRow) = (i16(fp.read(2)), i16(fp.read(2)))
i16(fp.read(2))
nencoding = (((lastCol - firstCol) + 1) * ((lastRow - firstRow) + 1))
encodingOffsets = [i16(fp.read(2)) for _ in range(nencoding)]
for i in range(firstCol, len(encoding)):
try:
encodingOffset = encodingOffsets[ord(bytearray([i]).decode(self.charset_encoding))]
if (encodingOffset != 65535):
encoding[i] = encodingOffset
except UnicodeDecodeError:
pass
return encoding |
class atlas_3_10_info(atlas_info):
_lib_names = ['satlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names |
def ste_ceil(x: tf.Tensor) -> tf.Tensor:
error = tf.stop_gradient((tf.math.ceil(x) - x))
return (error + x) |
def combine_bert(all_target_token, all_bert2target_map, all_bert_tokens, all_bert_embeddings):
output_embeddings = []
output_words = []
sentence_num = 0
for sentence in range(len(all_target_token)):
sentence_num += 1
if ((sentence_num % 10000) == 0):
logging.info('Re-merging and saving sentence {}'.format(sentence_num))
target_tokens = all_target_token[sentence]
target_tokens_mapping = all_bert2target_map[sentence]
bert_tokens = all_bert_tokens[sentence]
last_hidden_states = all_bert_embeddings[sentence]
bert_index = 0
saved = []
words = []
(bert_index_start, bert_index_end) = (0, 0)
for target_index in range(0, len(target_tokens)):
assert (len(target_tokens_mapping) == len(target_tokens))
num_of_bert_toks = target_tokens_mapping[target_index]
word = target_tokens[target_index]
bert_index_end += num_of_bert_toks
embedding = last_hidden_states[bert_index_start:bert_index_end].mean(axis=0)
saved.append(embedding)
words.append(word)
bert_index_start = bert_index_end
output_embeddings.append(saved)
output_words.append(words)
return (output_embeddings, output_words) |
def handlePushButton(obj):
global cameraURL
if (cameraURL == ''):
print('the camera URL is not set yet')
return
attributes = obj['attributes']
if ('detectedEvent' not in attributes):
return
event = attributes['detectedEvent']['value']
print(event)
lock.acquire()
global counter
eventType = event['type']
if (eventType == 'CLICK'):
counter = (counter + 1)
print(('counter = %d' % counter))
if (counter <= 5):
sendCommand('MOVE_FORWARD')
train('DEFECT')
sendCommand('MOVE_LEFT')
if ((counter > 5) and (counter <= 10)):
sendCommand('MOVE_FORWARD')
train('NORMAL')
sendCommand('MOVE_RIGHT')
if (counter > 10):
detect()
elif (eventType == 'RESET'):
reset()
sendCommand('MOVE_BACKWARD')
lock.release() |
def get_db_names() -> List[str]:
module_path = dirname(__file__)
files = os.listdir(f'{module_path}/database')
db_files = list(filter((lambda x: x.endswith('.db')), files))
db_names = list(map((lambda f: os.path.splitext(f)[0]), db_files))
return db_names |
class TypeKind(BaseEnumeration):
_kinds = []
_name_map = None
def spelling(self):
return conf.lib.clang_getTypeKindSpelling(self.value)
def __repr__(self):
return ('TypeKind.%s' % (self.name,)) |
def launch_batch(exp_name, collect_params=False):
if collect_params:
settings_list = []
assert (not EVALUATE)
if LOCAL:
print('Testing locally.')
else:
print('Launching batch of experiments on SLURM.')
with open('configs/evo/default_settings.json', 'r') as f:
default_config = json.load(f)
print('Loaded default config:\n{}'.format(default_config))
if LOCAL:
default_config['n_generations'] = 50000
i = 0
for exp_id in exp_ids:
for prob in problems:
prob_bcs = (global_bcs + local_bcs[prob])
for rep in representations:
for model in models:
if ((model == 'CNN') and (rep == 'cellular')):
continue
for bc_pair in prob_bcs:
for fix_el in fix_elites:
for fix_seed in fix_seeds:
if (fix_seed and (not fix_el)):
continue
for n_steps in n_steps_lst:
if (rep != 'cellular'):
if (n_steps != n_steps_lst[0]):
continue
if (('NCA' in model) and (n_steps <= 5)):
continue
for n_init_states in n_init_states_lst:
if ((n_init_states == 0) and (not (fix_seed and fix_el))):
continue
if ((n_init_states == 0) and ('Decoder' in model)):
continue
if (args.cross_eval and GENERATIVE_ONLY_CROSS_EVAL):
if ((n_init_states == 0) and (not ((model == 'CPPN') or (model == 'Sin2CPPN') or (model == 'SinCPPN')))):
continue
if (model in ['CPPN', 'GenCPPN', 'GenCPPN2', 'CPPNCA', 'DirectBinaryEncoding']):
algo = 'ME'
else:
algo = 'CMAME'
if ('CPPN' in model):
if (('Gen' not in model) and (model != 'CPPNCA')):
if ((n_init_states != 0) or (not fix_seed) or (not fix_el)):
continue
if ((model != 'CPPNCA') and (n_steps != 1)):
continue
if (('Decoder' in model) and (n_steps != 1)):
continue
if EVALUATE:
script_name = 'evo_eval.sh'
else:
script_name = 'evo_train.sh'
with open(script_name, 'r') as f:
content = f.read()
new_content = re.sub('python evolve.py -la \\d+', 'python evolve.py -la {}'.format(i), content)
with open(script_name, 'w') as f:
f.write(new_content)
exp_config = copy.deepcopy(default_config)
exp_config.update({'problem': prob, 'representation': rep, 'behavior_characteristics': bc_pair, 'algo': algo, 'model': model, 'fix_elites': fix_el, 'fix_level_seeds': fix_seed, 'exp_name': str(exp_id), 'save_levels': False, 'n_steps': n_steps, 'n_init_states': n_init_states, 'n_generations': 50000, 'multi_thread': (not args.single_thread)})
if args.render:
exp_config.update({'infer': True, 'render': True, 'visualize': True})
elif EVALUATE:
render_levels = (RENDER_LEVELS and (n_steps > 1))
render_levels = (RENDER_LEVELS and (algo != 'ME'))
exp_config.update({'infer': True, 'evaluate': True, 'render_levels': render_levels, 'save_levels': True, 'visualize': True})
print('Saving experiment config:\n{}'.format(exp_config))
with open('configs/evo/settings_{}.json'.format(i), 'w') as f:
json.dump(exp_config, f, ensure_ascii=False, indent=4)
if collect_params:
settings_list.append(exp_config)
elif LOCAL:
os.system('python evolve.py -la {}'.format(i))
else:
os.system('sbatch {}'.format(script_name))
i += 1
if collect_params:
return settings_list |
class StaticGNN(torch.nn.Module):
def __init__(self, dim_in, dim_out, num_layers=1, model_type='GCN', layer_id=0):
super(StaticGNN, self).__init__()
conv_model = self.build_conv_model(model_type)
self.convs = nn.ModuleList()
self.convs.append(conv_model(dim_in, dim_in))
self.layer_id = layer_id
self.num_layers = num_layers
for _ in range((num_layers - 1)):
self.convs.append(conv_model(dim_in, dim_in))
GNNHead = head_dict[cfg.dataset.task]
self.post_mp = GNNHead(dim_in=dim_in, dim_out=dim_out)
self.dropout = cfg.gnn.dropout
def build_conv_model(self, model_type):
if (model_type == 'GCN'):
return pyg_nn.GCNConv
elif (model_type == 'GAT'):
return pyg_nn.GATConv
elif (model_type == 'GraphSage'):
return pyg_nn.SAGEConv
else:
raise ValueError('Model {} unavailable'.format(model_type))
def forward(self, batch):
(x, edge_index) = (batch.node_feature, batch.edge_index)
for i in range(len(self.convs)):
x = self.convs[i](x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
batch.node_feature = x
batch.node_states[self.layer_id] = x.clone()
return batch |
class Module():
def __init__(self):
def __repr__(self):
return f'<{self.__class__.__name__}>'
def default_initial_state(self, *, batch_dims: Sequence[Dim]) -> Optional[rf.State]:
state = rf.State()
for (key, mod) in self.named_children():
sub_state = mod.default_initial_state(batch_dims=batch_dims)
if sub_state:
state[key] = sub_state
if state:
return state
return None
def get_default_name(self) -> str:
name = self.__class__.__name__
if name.startswith('_'):
name = name[1:]
if name[:1].isupper():
from returnn.util.basic import camel_case_to_snake_case
name = camel_case_to_snake_case(name)
return name
def __call__(self, *args, **kwargs) -> Union[(Tensor, Tuple[(Tensor, rf.State)], Any)]:
raise OptionalNotImplementedError
def get_deep(self, target: str) -> Any:
if (target == ''):
return self
atoms: List[str] = target.split('.')
mod: Module = self
for item in atoms[:(- 1)]:
if (not hasattr(mod, item)):
raise AttributeError(f'{mod} has no attribute `{item}`')
mod = getattr(mod, item)
if (not isinstance(mod, Module)):
raise AttributeError(f'`{item}` is not an rf.Module')
return getattr(mod, atoms[(- 1)])
def set_deep(self, target: str, value: Any) -> None:
if (target == ''):
raise AttributeError('Cannot set root module')
if ('.' in target):
(prefix, target) = target.rsplit('.', 2)
mod = self.get_deep(prefix)
if (not isinstance(mod, Module)):
raise AttributeError(f'{self}: `{prefix}` is not an rf.Module')
else:
mod = self
setattr(mod, target, value)
def children(self) -> Iterator[rf.Module]:
return self.modules(recurse=False, include_self=False)
def named_children(self) -> Iterator[Tuple[(str, rf.Module)]]:
return self.named_modules(recurse=False, include_self=False)
def modules(self, *, recurse: bool=True, include_self: bool=True) -> Iterator[rf.Module]:
for (name, child) in self.named_modules(recurse=recurse, include_self=include_self):
(yield child)
def named_modules(self, *, recurse: bool=True, include_self: bool=True, memo: Optional[Set[RefIdEq[rf.Module]]]=None, prefix: str='') -> Iterator[Tuple[(str, rf.Module)]]:
if (memo is None):
memo = set()
if (self in memo):
return
memo.add(RefIdEq(self))
if include_self:
(yield (prefix, self))
queue = [(prefix, self)]
while queue:
(prefix, mod) = queue.pop(0)
for (name, module) in vars(mod).items():
if (not isinstance(module, Module)):
continue
if (RefIdEq(module) in memo):
continue
sub_prefix = ((prefix + ('.' if (prefix and (not prefix.endswith('.'))) else '')) + name)
memo.add(RefIdEq(module))
(yield (sub_prefix, module))
if recurse:
queue.append((sub_prefix, module))
def named_parameters(self, *, recurse: bool=True) -> Iterator[Tuple[(str, rf.Parameter)]]:
memo: Set[RefIdEq[Tensor]] = set()
for (prefix, module) in (self.named_modules() if recurse else [('', self)]):
for (key, value) in vars(module).items():
if (isinstance(value, rf.Parameter) and (RefIdEq(value) not in memo)):
sub_prefix = ((prefix + ('.' if prefix else '')) + key)
memo.add(RefIdEq(value))
(yield (sub_prefix, value))
def parameters(self, *, recurse: bool=True) -> Iterator[rf.Parameter]:
for (name, param) in self.named_parameters(recurse=recurse):
(yield param)
def has_parameters(self):
for (_, _) in self.named_parameters(recurse=True):
return True
return False
def apply(self: T, fn: Callable[([rf.Module], None)]) -> T:
for child in self.children():
child.apply(fn)
fn(self)
return self |
_tokenizers
class BertJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertJapaneseTokenizer
def setUp(self):
super(BertJapaneseTokenizationTest, self).setUp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '', '', '', '', '##', '##', '##', '', '##', '', '##', '', '##']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = ' \n'
output_text = ' '
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize('\n')
self.assertListEqual(tokens, ['', '', '', '', '', '##', '', '', ''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14])
def test_mecab_tokenizer(self):
tokenizer = MecabTokenizer()
self.assertListEqual(tokenizer.tokenize(' \tiPhone8 \n \u3000 '), ['', '', 'iPhone', '8', '', '', '', '', '', ''])
def test_mecab_tokenizer_lower(self):
tokenizer = MecabTokenizer(do_lower_case=True)
self.assertListEqual(tokenizer.tokenize(' \tiPhone8 \n \u3000 '), ['', '', 'iphone', '8', '', '', '', '', '', ''])
def test_mecab_tokenizer_no_normalize(self):
tokenizer = MecabTokenizer(normalize_text=False)
self.assertListEqual(tokenizer.tokenize(' \tiPhone8 \n \u3000 '), ['', '', 'iPhone', '8', '', '', '', '', '', '\u3000', ''])
def test_wordpiece_tokenizer(self):
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '', '', '', '##', '##', '##']
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize(''), [])
self.assertListEqual(tokenizer.tokenize(''), [''])
self.assertListEqual(tokenizer.tokenize(''), ['', '##'])
self.assertListEqual(tokenizer.tokenize(' '), ['', '##', '[UNK]', ''])
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained('bert-base-japanese')
text = tokenizer.encode('', add_special_tokens=False)
text_2 = tokenizer.encode('', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([2] + text) + [3]))
assert (encoded_pair == (((([2] + text) + [3]) + text_2) + [3])) |
def bohrToMeters(value, dimension=1):
BOHR_CONSTANT = 5.2917725e-11
return (value * (BOHR_CONSTANT ** dimension)) |
def execute_operation(args, model, tokenizer, sent1, sent2=None):
if (sent2 is not None):
batch_tokens = tokenizer([((sent1 + ' </s> ') + sent2)], truncation=True, padding='longest', max_length=args.max_src_length, return_tensors='pt').to(torch_device_fusion)
generated = model.generate(**batch_tokens, max_length=args.max_target_length, num_beams=args.num_beams, num_return_sequences=args.num_return_sequences, temperature=args.temperature)
else:
batch_tokens = tokenizer([sent1], truncation=True, padding='longest', max_length=args.max_src_length, return_tensors='pt').to(torch_device_fusion)
generated = model.generate(**batch_tokens, max_length=args.max_target_length, num_beams=args.num_beams, num_return_sequences=args.num_return_sequences, length_penalty=2.0, repetition_penalty=2.0)
return tokenizer.batch_decode(generated, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0] |
def OA_520_plus_x(x):
from .orthogonal_arrays import incomplete_orthogonal_array
k = ((9 + x) + 1)
OA = incomplete_orthogonal_array(17, 31, [1])
OA.append(([30] * 17))
new_point = (31 * 17)
PBD = [[((i * 31) + xx) for (i, xx) in enumerate(B) if ((i < (9 + x)) or (xx < 30))] for B in OA]
PBD.extend([(list(range((i * 31), (((i * 31) + 30) + bool((i < (9 + x)))))) + [new_point]) for i in range(17)])
relabel = {v: i for (i, v) in enumerate(sorted(set().union(*PBD)))}
PBD = [[relabel[xx] for xx in B] for B in PBD]
subdesigns = {(9 + x): orthogonal_array(k, (9 + x)), 16: incomplete_orthogonal_array(k, 16, ([1] * 16)), 17: incomplete_orthogonal_array(k, 17, ([1] * 17)), 31: incomplete_orthogonal_array(k, 31, [1]), 32: incomplete_orthogonal_array(k, 32, ([1] * 2))}
OA = []
for B in PBD:
OA.extend([[B[xx] for xx in R] for R in subdesigns[len(B)]])
OA.append(([relabel[new_point]] * k))
return OA |
def evaluate(dataset, dataset_name, predictions, output_dir):
args = dict(dataset=dataset, predictions=predictions, output_dir=output_dir)
if (dataset_name in ['voc', 'pascal']):
evaluation_result = voc_evaluation(**args)
return evaluation_result
elif (dataset_name == 'coco'):
evaluation_result = coco_evaluation(**args)
return evaluation_result
else:
raise NotImplementedError
exit() |
.parametrize('mode,supports_masking,input_shape', [('sum', False, [(BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE), (BATCH_SIZE, 1)]), ('mean', True, (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE)), ('max', True, (BATCH_SIZE, SEQ_LENGTH, EMBEDDING_SIZE))])
def test_SequencePoolingLayer(mode, supports_masking, input_shape):
if ((version.parse(tf.__version__) >= version.parse('1.14.0')) and (mode != 'sum')):
return
with CustomObjectScope({'SequencePoolingLayer': sequence.SequencePoolingLayer}):
layer_test(sequence.SequencePoolingLayer, kwargs={'mode': mode, 'supports_masking': supports_masking}, input_shape=input_shape, supports_masking=supports_masking) |
def squeezenet1_0(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> SqueezeNet:
return _squeezenet('1_0', pretrained, progress, **kwargs) |
def assert_verts_equals(verts, check_list, tol=1e-05):
for v in check_list:
assert pt_almost_in(v, verts, tol), '{} was not found in verts: {}'.format(v, verts)
for v in verts:
assert pt_almost_in(v, check_list, tol), 'verts contains {}, which was not in check_list: {}'.format(v, check_list) |
class Worker(multiprocessing.Process):
def __init__(self, in_queue, out_queue, opts, tokenizer, redirects_en, keyword_processor, popular_entity_counter_dict, mention_entity_counter_popular_entities):
super().__init__()
self.in_queue = in_queue
self.out_queue = out_queue
self.tokenizer = tokenizer
self.redirects_en = redirects_en
self.keyword_processor = keyword_processor
self.popular_entity_counter_dict = popular_entity_counter_dict
self.mention_entity_counter_popular_entities = mention_entity_counter_popular_entities
self.opts = opts
def run(self):
for next_item in iter(self.in_queue.get, None):
file_name = next_item
self.out_queue.put((self.extract_data(next_item), file_name))
def extract_data(self, file_name):
len_prefix = len(f'data/versions/{self.opts.data_version_name}/wikiextractor_out/{self.opts.wiki_lang_version}/')
local_mention_counter = Counter()
out_file_names = list()
local_entities_found_in_article = list()
with io.open(file_name) as f:
for (i, wiki_article) in enumerate(f.readlines()):
wiki_article = json.loads(wiki_article)
debug = (wiki_article['id'] == '28490')
wiki_article_title_set = get_stopwordless_token_set(wiki_article['title'])
def corefers_with_title_entity(s):
s_set = get_stopwordless_token_set(s)
is_shorter = (len(s_set) <= len(wiki_article_title_set))
has_overlap = ((len(s_set.intersection(wiki_article_title_set)) / len(wiki_article_title_set)) > 0)
return (is_shorter and has_overlap)
wiki_article_normalized_wiki_entity = normalize_wiki_entity(wiki_article['title'], replace_ws=True)
wiki_article_title_entity = self.redirects_en.get(wiki_article_normalized_wiki_entity, wiki_article_normalized_wiki_entity)
local_entities_found_counter = Counter()
start_offset_dict = dict()
links_offsets = sorted(pickle.loads(base64.b64decode(wiki_article['internal_links'].encode('utf-8'))).items(), key=(lambda x: x[0][0]))
keywords_found = self.keyword_processor.extract_keywords(wiki_article['text'], span_info=True)
title_seen = False
category_seen = False
wiki_text_toks = list()
wiki_text_toks_len = 0
links_seen = 0
kw_seen = 0
inside_link = False
entity = None
inside_kw = False
after_punct = True
title_double_newline_seen = 0
last_char = None
reconstructed_wiki_text = ''
current_snippet = ''
if (len(links_offsets) == 0):
continue
if (links_offsets[links_seen][0][0] == (len(reconstructed_wiki_text) + len(current_snippet))):
inside_link = True
if ((len(keywords_found) > 0) and ((not inside_link) and (keywords_found[kw_seen][1] == (len(reconstructed_wiki_text) + len(current_snippet))))):
inside_kw = True
for (char_idx, char) in enumerate(list(wiki_article['text'])):
if category_seen:
break
if ((char == '\n') and (last_char != '.') and (not after_punct)):
char = '.'
if ((char == '.') or (last_char == '.')):
after_punct = True
else:
after_punct = False
current_snippet = (current_snippet + char)
if ((links_seen < len(links_offsets)) and (links_offsets[links_seen][0][0] == (len(reconstructed_wiki_text) + len(current_snippet)))):
if (len(current_snippet) > 0):
current_snippet_tokenized = self.tokenizer.tokenize(current_snippet)
wiki_text_toks.extend(zip(current_snippet_tokenized, ['O' for _ in current_snippet_tokenized], ['-' for _ in current_snippet_tokenized]))
reconstructed_wiki_text += current_snippet
current_snippet = ''
normalized_wiki_entity = normalize_wiki_entity(links_offsets[links_seen][1][1], replace_ws=True)
entity = self.redirects_en.get(normalized_wiki_entity, normalized_wiki_entity)
if (entity in self.popular_entity_counter_dict):
inside_link = True
inside_kw = False
if ((kw_seen < len(keywords_found)) and ((not inside_link) and (keywords_found[kw_seen][1] == (len(reconstructed_wiki_text) + len(current_snippet))))):
if (len(current_snippet) > 0):
current_snippet_tokenized = self.tokenizer.tokenize(current_snippet)
wiki_text_toks.extend(zip(current_snippet_tokenized, ['O' for _ in current_snippet_tokenized], ['-' for _ in current_snippet_tokenized]))
reconstructed_wiki_text += current_snippet
current_snippet = ''
inside_kw = True
if ((links_seen < len(links_offsets)) and (links_offsets[links_seen][0][1] == (len(reconstructed_wiki_text) + len(current_snippet)))):
if ((char_idx < (len(wiki_article['text']) - 1)) and ('Category:' in (current_snippet + wiki_article['text'][(char_idx + 1)]))):
category_seen = True
continue
if inside_link:
current_snippet_tokenized = self.tokenizer.tokenize(current_snippet)
wiki_text_toks.extend(zip(current_snippet_tokenized, [entity for _ in current_snippet_tokenized], [current_snippet for _ in current_snippet_tokenized]))
local_entities_found_counter[entity] += 1
local_mention_counter[current_snippet] += 1
reconstructed_wiki_text += current_snippet
current_snippet = ''
links_seen += 1
inside_link = False
entity = None
if ((kw_seen < len(keywords_found)) and (keywords_found[kw_seen][2] == (len(reconstructed_wiki_text) + len(current_snippet)))):
if ((char_idx < (len(wiki_article['text']) - 1)) and ('Category:' in (current_snippet + wiki_article['text'][(char_idx + 1)]))):
category_seen = True
continue
if inside_kw:
current_snippet_tokenized = self.tokenizer.tokenize(current_snippet)
if ((current_snippet in self.mention_entity_counter_popular_entities) and (wiki_article_normalized_wiki_entity in dict(self.mention_entity_counter_popular_entities[current_snippet])) and corefers_with_title_entity(current_snippet)):
local_mention_counter[current_snippet] += 1
wiki_text_toks.extend(zip(current_snippet_tokenized, [wiki_article_title_entity for _ in current_snippet_tokenized], [current_snippet for _ in current_snippet_tokenized]))
elif (current_snippet in self.mention_entity_counter_popular_entities):
local_mention_counter[current_snippet] += 1
wiki_text_toks.extend(zip(current_snippet_tokenized, ['UNK' for _ in current_snippet_tokenized], [current_snippet for _ in current_snippet_tokenized]))
else:
wiki_text_toks.extend(zip(current_snippet_tokenized, ['O' for _ in current_snippet_tokenized], ['-' for _ in current_snippet_tokenized]))
reconstructed_wiki_text += current_snippet
current_snippet = ''
inside_kw = False
inside_link = False
entity = None
kw_seen += 1
last_char = char
current_snippet_tokenized = self.tokenizer.tokenize(current_snippet)
wiki_text_toks.extend(zip(current_snippet_tokenized, ['O' for _ in current_snippet_tokenized], ['-' for _ in current_snippet_tokenized]))
reconstructed_wiki_text += current_snippet
out_file_path = os.path.dirname(f'data/versions/{self.opts.data_version_name}/wiki_training/raw/tmp/{file_name[len_prefix:]}')
if (not os.path.exists(out_file_path)):
os.makedirs(out_file_path, exist_ok=True)
out_file_name = f"{out_file_path}/{wiki_article['id']}.tsv"
pandas.DataFrame(wiki_text_toks).to_csv(out_file_name, sep='\t', header=None)
out_file_names.append(out_file_name)
local_entities_found_in_article.append((out_file_name, local_entities_found_counter))
return (out_file_names, local_mention_counter, local_entities_found_in_article) |
class TaskParameterDuplicateTypeWarning(UserWarning):
def __init__(self, task, type_: type):
super().__init__('Parameter type {} provided by task {} already exists'.format(type_.__name__, type(task).__name__)) |
class TrainDataset(Dataset):
def __init__(self, triples, nentity, nrelation, negative_sample_size, mode, count, true_head, true_tail, entity_dict):
self.len = len(triples['head'])
self.triples = triples
self.nentity = nentity
self.nrelation = nrelation
self.negative_sample_size = negative_sample_size
self.mode = mode
self.count = count
self.true_head = true_head
self.true_tail = true_tail
self.entity_dict = entity_dict
def __len__(self):
return self.len
def __getitem__(self, idx):
(head, relation, tail) = (self.triples['head'][idx], self.triples['relation'][idx], self.triples['tail'][idx])
(head_type, tail_type) = (self.triples['head_type'][idx], self.triples['tail_type'][idx])
positive_sample = [(head + self.entity_dict[head_type][0]), relation, (tail + self.entity_dict[tail_type][0])]
subsampling_weight = (self.count[(head, relation, head_type)] + self.count[(tail, ((- relation) - 1), tail_type)])
subsampling_weight = torch.sqrt((1 / torch.Tensor([subsampling_weight])))
if (self.mode == 'head-batch'):
negative_sample = torch.randint(self.entity_dict[head_type][0], self.entity_dict[head_type][1], (self.negative_sample_size,))
elif (self.mode == 'tail-batch'):
negative_sample = torch.randint(self.entity_dict[tail_type][0], self.entity_dict[tail_type][1], (self.negative_sample_size,))
else:
raise
positive_sample = torch.LongTensor(positive_sample)
return (positive_sample, negative_sample, subsampling_weight, self.mode)
def collate_fn(data):
positive_sample = torch.stack([_[0] for _ in data], dim=0)
negative_sample = torch.stack([_[1] for _ in data], dim=0)
subsample_weight = torch.cat([_[2] for _ in data], dim=0)
mode = data[0][3]
return (positive_sample, negative_sample, subsample_weight, mode) |
class Test_estimateAbsoluteMagnitude(TestCase):
def test_works_no_interp(self):
self.assertEqual(estimateAbsoluteMagnitude('O9'), (- 4.5))
self.assertEqual(estimateAbsoluteMagnitude('B5'), (- 1.2))
self.assertEqual(estimateAbsoluteMagnitude('A5'), 1.95)
def test_works_no_classnum(self):
self.assertEqual(estimateAbsoluteMagnitude('G'), 5.1)
self.assertEqual(estimateAbsoluteMagnitude('A'), 1.95)
def test_works_interp(self):
self.assertEqual(estimateAbsoluteMagnitude('A6'), 2.075)
self.assertEqual(estimateAbsoluteMagnitude('A0.5Iab'), (- 6.35))
def test_nan_on_invalid_types(self):
self.assertTrue(math.isnan(estimateAbsoluteMagnitude('L1')))
def test_works_on_other_L_types(self):
self.assertEqual(estimateAbsoluteMagnitude('O9V'), (- 4.5))
self.assertEqual(estimateAbsoluteMagnitude('B5III'), (- 2.2))
self.assertEqual(estimateAbsoluteMagnitude('F2Ia'), (- 8.0)) |
class HFPTTokenizer(object):
def __init__(self, pt_name=None):
self.pt_name = pt_name
self.added_sep_token = 0
self.added_cls_token = 0
self.enable_add_tokens = False
self.gpt_special_case = ((not self.enable_add_tokens) and ('gpt' in self.pt_name))
if (pt_name is None):
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
else:
self.tokenizer = AutoTokenizer.from_pretrained(pt_name)
if self.enable_add_tokens:
if (self.tokenizer.sep_token is None):
self.tokenizer.add_special_tokens({'sep_token': '<SEP>'})
self.added_sep_token = 1
if (self.tokenizer.cls_token is None):
self.tokenizer.add_special_tokens({'cls_token': '<CLS>'})
self.added_cls_token = 1
if self.gpt_special_case:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.sep_token = self.tokenizer.eos_token
def get_eot_token(self):
return self.tokenizer.encode(self.tokenizer.sep_token, add_special_tokens=False)[0]
def get_sot_token(self):
return self.tokenizer.encode(self.tokenizer.cls_token, add_special_tokens=False)[0]
def get_eot_token_list(self):
return self.tokenizer.encode(self.tokenizer.sep_token, add_special_tokens=False)
def get_sot_token_list(self):
return self.tokenizer.encode(self.tokenizer.cls_token, add_special_tokens=False)
def get_tokenizer_obj(self):
return self.tokenizer
def check_added_tokens(self):
return (self.added_sep_token + self.added_cls_token)
def tokenize(self, texts: Union[(str, List[str])], context_length: int=77):
if isinstance(texts, str):
texts = [texts]
padding = 'max_length'
seqstart = []
seqend = []
max_length = context_length
if (self.added_cls_token > 0):
seqstart = self.get_sot_token_list()
max_length = (max_length - 1)
if (self.added_sep_token > 0):
seqend = self.get_eot_token_list()
max_length = (max_length - 1)
tokens = self.tokenizer(texts, padding=padding, truncation=True, max_length=max_length)['input_ids']
for i in range(len(tokens)):
tokens[i] = ((seqstart + tokens[i]) + seqend)
if self.gpt_special_case:
for i in range(len(tokens)):
tokens[i][(- 1)] = self.get_eot_token()
result = torch.Tensor(tokens).type(torch.LongTensor)
return result
def get_vocab_size(self):
return self.tokenizer.vocab_size
def __call__(self, texts: Union[(str, List[str])], context_length: int=77):
return self.tokenize(texts, context_length) |
def exclude(cad_prescription_taken_by_patient, patient_1stDX_date, patient_start_date, interval, followup, baseline):
cad_prescription_taken_by_patient_exclude = defaultdict(dict)
cad_patient_take_prescription_exclude = defaultdict(dict)
for (drug, taken_by_patient) in cad_prescription_taken_by_patient.items():
for (patient, take_times) in taken_by_patient.items():
dates = [datetime.strptime(date, '%m/%d/%Y') for (date, days) in take_times if (date and days)]
dates = sorted(dates)
dates_days = {datetime.strptime(date, '%m/%d/%Y'): int(days) for (date, days) in take_times if (date and days)}
DX = patient_1stDX_date.get(patient, datetime.max)
index_date = dates[0]
start_date = patient_start_date.get(patient, datetime.max)
if (criteria_1_is_valid(index_date, DX) and criteria_2_is_valid(dates, interval, followup, dates_days) and criteria_3_is_valid(index_date, start_date, baseline)):
cad_prescription_taken_by_patient_exclude[drug][patient] = dates
cad_patient_take_prescription_exclude[patient][drug] = dates
return (cad_prescription_taken_by_patient_exclude, cad_patient_take_prescription_exclude) |
class DikSolver():
def __call__(self, facets):
return self.solve(facets)
def solve(self, facets, timeout=10, extra_time=2):
cube_str = self.format_cube(facets)
cmd = (shlex.quote(sage.features.rubiks.dikcube().absolute_filename()) + ' -p')
child = pexpect.spawn(cmd)
child.expect('Initialization done!')
child.sendline(cube_str)
child.send(chr(4))
ix = child.expect(['Solution[^\n]*:', pexpect.EOF, pexpect.TIMEOUT], timeout=timeout)
if (ix == 0):
child.expect(['[^\n]+'])
sol = child.after.strip()
start_time = time.time()
while (extra_time > (time.time() - start_time)):
ix = child.expect(['Solution[^\n]*:', pexpect.EOF, pexpect.TIMEOUT], timeout=(extra_time - int((time.time() - start_time))))
if (ix == 0):
child.expect(['[^\n]+'])
sol = child.after.strip()
else:
extra_time = 0
child.close(True)
sol = bytes_to_str(sol)
return ' '.join(((self.rot_map[m[0]] + str((4 - int(m[1])))) for m in reversed(sol.split(' ')))).replace('1', '').replace('3', "'")
elif (ix == 1):
child.close(True)
raise ValueError(bytes_to_str(child.before))
else:
child.close(True)
raise RuntimeError('timeout')
def format_cube(self, facets):
colors = sum([([i] * 8) for i in range(1, 7)], [])
facet_colors = ([0] * 54)
for i in range(48):
f = self.facet_map.index(facets[i])
facet_colors[f] = colors[i]
facet_colors[4] = 1
facet_colors[49] = 6
for i in range(2, 6):
facet_colors[(16 + (i * 3))] = i
return ''.join((str(c) for c in facet_colors))
facet_map = [1, 2, 3, 4, 0, 5, 6, 7, 8, 9, 10, 11, 17, 18, 19, 25, 26, 27, 33, 34, 35, 12, 0, 13, 20, 0, 21, 28, 0, 29, 36, 0, 37, 14, 15, 16, 22, 23, 24, 30, 31, 32, 38, 39, 40, 41, 42, 43, 44, 0, 45, 46, 47, 48]
rot_map = dict(zip('BLURDF', 'ULFRBD')) |
def to_length(nparray, length):
if (len(nparray) < length):
out = numpy.empty(length, dtype=nparray.dtype)
out[:len(nparray)] = nparray
else:
out = nparray
return pyarrow.py_buffer(out) |
def save(obj, filename):
filename += ('.pickle' if ('.pickle' not in filename) else '')
with open(filename, 'wb') as handle:
pickle.dump(obj, handle, protocol=pickle.HIGHEST_PROTOCOL) |
def main():
(colors, timelines) = read_data(sys.argv[1])
(lower_bound, upper_bound) = timelines.get_bounds()
graphic = GraphicRenderer(lower_bound, upper_bound)
top_legend = TopLegendRenderer()
range_values = timelines.get_all_range_values()
range_colors = []
for range_value in range_values:
range_colors.append(colors.lookup(range_value))
top_legend.set_legends(range_values, range_colors)
graphic.set_top_legend(top_legend)
data = TimelinesRenderer()
data.set_timelines(timelines, colors)
graphic.set_data(data)
range_mid = ((upper_bound - lower_bound) / 2)
range_width = ((upper_bound - lower_bound) / 10)
range_lo = (range_mid - (range_width / 2))
range_hi = (range_mid + (range_width / 2))
graphic.set_range(range_lo, range_hi)
main_window = MainWindow()
main_window.run(graphic) |
def register_Ns3OrganizationIdentifierChecker_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::OrganizationIdentifierChecker const &', 'arg0')])
return |
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x):
x = self.dwconv(x)
return x |
class LunaTransformerEncoderLayer(nn.Module):
def __init__(self, d_model: int=512, num_attention_heads: int=8, d_ff: int=2048, dropout_p: float=0.3) -> None:
super(LunaTransformerEncoderLayer, self).__init__()
self.luna_attention = LinearUnifiedNestedAttention(d_model, num_attention_heads)
self.feed_forward = PositionwiseFeedForwardNetwork(d_model, d_ff, dropout_p)
self.packed_context_layer_norm = nn.LayerNorm(d_model)
self.unpacked_context_layer_norm = nn.LayerNorm(d_model)
self.unpacked_context_layer_norm = nn.LayerNorm(d_model)
self.feed_forward_layer_norm = nn.LayerNorm(d_model)
def forward(self, inputs: torch.FloatTensor, p: torch.FloatTensor, attention_padding_mask: torch.FloatTensor=None):
(unpacked_context, packed_context) = self.luna_attention(query=inputs, key=inputs, value=inputs, p=p, attention_padding_mask=attention_padding_mask)
packed_context = self.packed_context_layer_norm((packed_context + p))
unpacked_context = self.unpacked_context_layer_norm((unpacked_context + inputs))
outputs = self.feed_forward(unpacked_context)
outputs = self.feed_forward_layer_norm((outputs + unpacked_context))
return (outputs, packed_context) |
('characters')
class TokenCharactersIndexer(TokenIndexer[List[int]]):
def __init__(self, namespace: str='token_characters', character_tokenizer: CharacterTokenizer=CharacterTokenizer()) -> None:
self._namespace = namespace
self._character_tokenizer = character_tokenizer
def count_vocab_items(self, token: Token, counter: Dict[(str, Dict[(str, int)])]):
if (token.text is None):
raise ConfigurationError('TokenCharactersIndexer needs a tokenizer that retains text')
for character in self._character_tokenizer.tokenize(token.text):
if (getattr(character, 'text_id', None) is None):
counter[self._namespace][character.text] += 1
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary, index_name: str) -> Dict[(str, List[List[int]])]:
indices: List[List[int]] = []
for token in tokens:
token_indices: List[int] = []
if (token.text is None):
raise ConfigurationError('TokenCharactersIndexer needs a tokenizer that retains text')
for character in self._character_tokenizer.tokenize(token.text):
if (getattr(character, 'text_id', None) is not None):
index = character.text_id
else:
index = vocabulary.get_token_index(character.text, self._namespace)
token_indices.append(index)
indices.append(token_indices)
return {index_name: indices}
def get_padding_lengths(self, token: List[int]) -> Dict[(str, int)]:
return {'num_token_characters': len(token)}
def get_padding_token(self) -> List[int]:
return []
def pad_token_sequence(self, tokens: Dict[(str, List[List[int]])], desired_num_tokens: Dict[(str, int)], padding_lengths: Dict[(str, int)]) -> Dict[(str, List[List[int]])]:
key = list(tokens.keys())[0]
padded_tokens = pad_sequence_to_length(tokens[key], desired_num_tokens[key], default_value=self.get_padding_token)
desired_token_length = padding_lengths['num_token_characters']
longest_token: List[int] = max(tokens[key], key=len, default=[])
padding_value = 0
if (desired_token_length > len(longest_token)):
padded_tokens.append(([padding_value] * desired_token_length))
padded_tokens = list(zip(*itertools.zip_longest(*padded_tokens, fillvalue=padding_value)))
if (desired_token_length > len(longest_token)):
padded_tokens.pop()
return {key: [list(token[:desired_token_length]) for token in padded_tokens]} |
class Metadata(namedtuple('Metadata', ['categorical_limit', 'expected_value', 'feature_specs'])):
__slots__ = () |
()
def main():
out_err_log_dir_path = Path('slurm/log')
if (not out_err_log_dir_path.exists()):
out_err_log_dir_path.makedirs()
exp_name = click.prompt(' experiment name', type=str)
if Path(f'conf/{exp_name}.yaml').exists():
conf_file_name = click.prompt(' conf file name', default=f'{exp_name}.yaml')
else:
conf_file_name = click.prompt(' conf file name', default='default.yaml')
if ('/' in conf_file_name):
conf_file_path = conf_file_name
else:
conf_file_path = f'conf/{conf_file_name}'
project_dir_path = Path('.').abspath()
text = TEMPLATE
text = text.replace('**exp**', exp_name)
text = text.replace('**cnf**', conf_file_path)
text = text.replace('**project**', project_dir_path)
if ('flanzi' in project_dir_path):
text = text.replace('source activate python3', '#source activate python3')
print('\n\n')
print(text)
out_file_path = ((Path('slurm') / exp_name) + '.sh')
out_file_path.write_text(text=text)
print('\n')
if click.confirm(' sbatch now?', default=True):
print('\n\n')
command = f'sbatch {out_file_path}'
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
(output, error) = process.communicate()
print('', output.decode())
if error:
print(' [ERROR] - ', error.decode()) |
class AudioClassifier(nn.Module):
def __init__(self):
super().__init__()
self.encoder = wavencoder.models.Wav2Vec(pretrained=False)
self.classifier = nn.Linear(512, 2)
def forward(self, x):
z = self.encoder(x)
z = torch.mean(z, dim=2)
out = self.classifier(z)
return out |
class DotLayer(Layer):
def __init__(self, *args, **kwargs):
super(DotLayer, self).__init__(*args, **kwargs)
def call(self, inputs, mask=None):
l1 = inputs[0]
l2 = inputs[1]
def f(i, l1, l2):
return T.clip(T.batched_tensordot(l1[i], l2[i], 1), FLOAT_MIN, FLOAT_MAX).astype(FLOATX)
return theano.map(f, T.arange(l1.shape[0]), non_sequences=[l1, l2])[0]
def get_output_shape_for(self, input_shape):
return (input_shape[0][0], input_shape[0][1]) |
def create_task(args):
task_name = args.task
n_class = TASK_NUM_CLASS[args.task]
if (args.model in ['wide_resnet']):
feature_extractor = ALL_MODELS[args.model](args.wide_resnet_depth, args.wide_resnet_width, args.wide_resnet_dropout, n_class, has_fc=False)
n_hidden_dim = feature_extractor(torch.randn(TASK_INPUT_SIZE[args.task])).size()[(- 1)]
elif (args.model == 'mlp'):
n_hidden_dim = args.mlp_hidden_dim
input_dim = np.prod(TASK_INPUT_SIZE[args.task])
feature_extractor = ALL_MODELS[args.model](input_dim, n_hidden_dim, n_class, has_fc=False)
elif (args.model == 'shake_shake'):
feature_extractor = ALL_MODELS[args.model](args.shake_shake_depth, args.shake_shake_base_channels, args.shake_shake_shake_forward, args.shake_shake_shake_backward, args.shake_shake_shake_image, TASK_INPUT_SIZE[args.task], n_class, has_fc=False)
n_hidden_dim = feature_extractor.feature_size
elif (args.model == 'pyramidnet'):
feature_extractor = ALL_MODELS[args.model](args.task, args.pyramidnet_depth, args.pyramidnet_alpha, args.pyramidnet_bottleneck, has_fc=False)
n_hidden_dim = feature_extractor.final_featuremap_dim
else:
raise ValueError(f'Invalid model {args.model}')
loss = sce_loss
output = output_classification
logger.info(f'Built model: {feature_extractor}')
return EmmentalTask(name=args.task, module_pool=nn.ModuleDict({'feature': feature_extractor, f'{task_name}_pred_head': nn.Linear(n_hidden_dim, n_class)}), task_flow=[{'name': 'feature', 'module': 'feature', 'inputs': [('_input_', 'image')]}, {'name': f'{task_name}_pred_head', 'module': f'{task_name}_pred_head', 'inputs': [('feature', 0)]}], loss_func=partial(loss, f'{task_name}_pred_head'), output_func=partial(output, f'{task_name}_pred_head'), scorer=Scorer(metrics=TASK_METRIC[task_name])) |
class BinaryExpr(Expr):
_operator: BinaryOperator
_lhs: Expr
_rhs: Expr
def __init__(self, operator: BinaryOperator, lhs: Expr, rhs: Expr):
super().__init__()
if ((operator is BinaryOperator.EQ) or (operator is BinaryOperator.NE)):
if (lhs.type is not rhs.type):
raise ValueError('Expression must have the same type: {} and {}'.format(lhs, rhs))
else:
expect_ty = binary_param_type(operator)
if (lhs.type is not expect_ty):
raise ValueError('Expression is expected to have type {}: {}'.format(expect_ty, lhs))
if (rhs.type is not expect_ty):
raise ValueError('Expression is expected to have type {}: {}'.format(expect_ty, rhs))
self._operator = operator
self._lhs = lhs
self._rhs = rhs
def operator(self):
return self._operator
def lhs(self):
return self._lhs
def rhs(self):
return self._rhs
def operands(self) -> List[Expr]:
return [self._lhs, self._rhs]
def type(self):
return binary_return_type(self._operator)
def __str__(self) -> str:
return '({} {} {})'.format(self._lhs, self._operator.value, self._rhs)
def __repr__(self) -> str:
return 'BinaryExpr({}, {!r}, {!r})'.format(self._operator.name, self._lhs, self._rhs) |
class Average_Meter():
def __init__(self, keys):
self.keys = keys
self.clear()
def add(self, dic):
for (key, value) in dic.items():
self.data_dic[key].append(value)
def get(self, keys=None, clear=False):
if (keys is None):
keys = self.keys
dataset = [float(np.mean(self.data_dic[key])) for key in keys]
if clear:
self.clear()
if (len(dataset) == 1):
dataset = dataset[0]
return dataset
def clear(self):
self.data_dic = {key: [] for key in self.keys} |
def test_scalar():
array = ak.Array({'x': [1, 2, 3]})
array['x'] = 4
assert (array.to_list() == [{'x': 4}, {'x': 4}, {'x': 4}]) |
def main():
print('ablation for regions: 10%')
fig = plt.figure(figsize=(10, 5), dpi=150)
plt.grid(True)
plot_i = 0
(h2,) = plt.plot(region_ids, ad_10_list, '--', marker=markers[plot_i], markersize=marker_size, markerfacecolor='none', label=labels[plot_i], linewidth=linewidth, color=_COLORS[(plot_i * 5)], clip_on=False)
plot_i += 1
(h3,) = plt.plot(region_ids, rete_2_list, '--', marker=markers[plot_i], markersize=marker_size, markerfacecolor='none', label=labels[plot_i], linewidth=linewidth, color=_COLORS[(plot_i * 5)], clip_on=False)
plot_i += 1
(h4,) = plt.plot(region_ids, re_2_list, '--', marker=markers[plot_i], markersize=marker_size, markerfacecolor='none', label=labels[plot_i], linewidth=linewidth, color=_COLORS[(plot_i * 5)], clip_on=False)
plot_i += 1
(h5,) = plt.plot(region_ids, te_2_list, '--', marker=markers[plot_i], markersize=marker_size, markerfacecolor='none', label=labels[plot_i], linewidth=linewidth, color=_COLORS[(plot_i * 5)], clip_on=False)
plot_i += 1
(h1,) = plt.plot(region_ids, mean_list, marker=markers[plot_i], markersize=marker_size, markerfacecolor='none', label=labels[plot_i], linewidth=linewidth, color=(0, (112 / 255.0), (68 / 255.0)), clip_on=False)
handles = [h2, h3, h4, h5, h1]
plt.legend(handles, labels, loc='upper left', fontsize=font_size, fancybox=True, framealpha=0.5, handlelength=handlelength)
plt.xlim(xlim)
plt.ylim([57, 100])
ax = plt.gca()
ax.set_xlabel('number of regions', fontsize=font_size)
ax.set_ylabel('accuracy (%)', fontsize=font_size)
plt.xticks(region_ids, labels=[str(_r) for _r in regions])
ax.set_yticks([60, 70, 80, 90, 100])
ax.xaxis.set_tick_params(labelsize=font_size)
ax.yaxis.set_tick_params(labelsize=font_size)
save_path = 'output/lm/ablation_regions.png'
mmcv.mkdir_or_exist(osp.dirname(save_path))
plt.savefig(save_path, dpi=fig.dpi, bbox_inches='tight')
print('save fig path: ', save_path)
os.system(f'{viewer} {save_path}') |
def to_f32(params):
return jax.tree_util.tree_map((lambda x: (x.astype(jnp.float32) if (x.dtype == jnp.bfloat16) else x)), params) |
class Conjunction(JunctorCondition):
def _simplified(self, parts):
result_parts = []
for part in parts:
if isinstance(part, Conjunction):
result_parts += part.parts
elif isinstance(part, Falsity):
return Falsity()
elif (not isinstance(part, Truth)):
result_parts.append(part)
if (not result_parts):
return Truth()
if (len(result_parts) == 1):
return result_parts[0]
return Conjunction(result_parts)
def to_untyped_strips(self):
result = []
for part in self.parts:
result += part.to_untyped_strips()
return result
def instantiate(self, var_mapping, init_facts, fluent_facts, result):
assert (not result), 'Condition not simplified'
for part in self.parts:
part.instantiate(var_mapping, init_facts, fluent_facts, result)
def negate(self):
return Disjunction([p.negate() for p in self.parts]) |
def is_aux_input(p):
return (p.name_hint.startswith('dropout:') or p.name_hint.startswith('gr:out:')) |
def joint_mloss(args):
print(f'Joint training of Mk and Bk ({args.trans_type}) with multi-step alignment loss.')
print(args)
device = (torch.device(('cuda:' + str(args.device))) if torch.cuda.is_available() else torch.device('cpu'))
dataset = DynRecDataset(name=args.dataset)
pinsage_hyperparam_list = get_pinsage_hyperparam_list(dataset_name=args.dataset)
if (args.log_dir != ''):
if os.path.exists(args.log_dir):
print('Removing existing tensorboard log..')
shutil.rmtree(args.log_dir)
writer = SummaryWriter(log_dir=args.log_dir)
else:
writer = None
if (args.checkpoint_path != ''):
os.makedirs(os.path.dirname(args.checkpoint_path), exist_ok=True)
K_list = [10, 20, 50, 100]
K_primary = 50
checkpoint = {'time_list': sliced_time_list, 'args': args.__dict__, 'pinsage_hyperparam_list': pinsage_hyperparam_list, 'best_val_recall_dict_list': [], 'best_embedding_list': [], 'model_state_dict_list': []}
best_trans_fun_list = []
checkpoint['trans_fun_state_dict_list'] = []
for i in range((len(sliced_time_list) - 1)):
time_train = sliced_time_list[i]
time_val = sliced_time_list[(i + 1)]
print(f'=======Train on G{time_train}, evaluate on G{time_val}\G{time_train}')
(edge_index_useritem_dict, num_users_dict, num_items_dict, _) = split_dynrecdataset(dataset, time_train=time_train, time_val=time_val)
time_dict = {}
time_dict['train'] = time_train
time_dict['val'] = time_val
print('====Basic stats')
split_list = ['train', 'val']
for split in split_list:
time = time_dict[split]
print(f'time: {time}')
print(f'#{split} users: ', num_users_dict[split])
print(f'#{split} items: ', num_items_dict[split])
print(f'#{split} edges: ', len(edge_index_useritem_dict[split][0]))
print()
item_attr_pair_dict = dataset.item_attr_pair_dict(time_dict['val'])
for (item_attr_name, (item_attr, item_attr_offset)) in item_attr_pair_dict.items():
item_attr_pair_dict[item_attr_name] = (item_attr.to(device), item_attr_offset.to(device))
train_dataset = RecDatasetNegsamling(edge_index_useritem_dict['train'][0], edge_index_useritem_dict['train'][1], num_users_dict['train'], num_items_dict['train'])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
train_config_dict = {'num_users': num_users_dict['train'], 'num_items': num_items_dict['train'], 'user': edge_index_useritem_dict['train'][0].to(device), 'item': edge_index_useritem_dict['train'][1].to(device), 'item_attr_pair_dict': item_attr_pair_dict}
eval_dict = {}
evaluator = RecallEvaluator(edge_index_useritem_dict, num_users_dict, num_items_dict)
eval_dict['evaluator'] = evaluator
eval_dict['config_dict'] = train_config_dict
eval_dict['time_train'] = time_train
eval_dict['time_val'] = time_val
print('Training PinSAGE...')
model = PinSAGE(emb_dim=pinsage_hyperparam_list[i]['emb_dim'], num_layers=pinsage_hyperparam_list[i]['num_layers'], item_encoder=ItemEncoder(pinsage_hyperparam_list[i]['emb_dim'], dataset.num_item_attrs_dict)).to(device)
print('PinSAGE hyperparameters')
print(pinsage_hyperparam_list[i]['emb_dim'])
print(pinsage_hyperparam_list[i]['num_layers'])
print()
optimizer = optim.Adam(model.parameters(), lr=0.001)
if (i == 0):
(best_embedding, best_val_recall_dict, best_model) = train_eval_loop_pinsage(args, model, device, train_dataset, train_loader, optimizer, train_config_dict, eval_dict, K_list, K_primary, time_train, time_val, writer)
prev_model = best_model
prev_model.eval()
else:
if (args.trans_type == 'linear'):
trans_fun = torch.nn.Linear(pinsage_hyperparam_list[i]['emb_dim'], pinsage_hyperparam_list[(i - 1)]['emb_dim'], bias=False).to(device)
else:
raise ValueError(f'Unknown transformation type called {args.trans_type}')
optimizer_trans = optim.Adam(trans_fun.parameters(), lr=0.001)
prev_model.refresh_all_embeddings(train_config_dict['num_users'], train_config_dict['num_items'], train_config_dict['user'], train_config_dict['item'], train_config_dict['item_attr_pair_dict'])
(x_user_prev_model, x_item_prev_model) = (prev_model.x_user.detach(), prev_model.x_item.detach())
(best_embedding, best_val_recall_dict, best_model, best_trans_fun) = train_eval_loop_pinsage_penalize(args, model, device, train_dataset, train_loader, optimizer, train_config_dict, eval_dict, K_list, K_primary, time_train, time_val, writer, x_user_prev_model, x_item_prev_model, trans_fun, optimizer_trans, best_trans_fun_list)
(emb_user, emb_item) = best_embedding
for param in best_trans_fun.parameters():
param.requires_grad = False
best_trans_fun_list.append(best_trans_fun)
checkpoint['trans_fun_state_dict_list'].append(best_trans_fun.state_dict())
for best_trans_fun in best_trans_fun_list[::(- 1)]:
emb_user = best_trans_fun(emb_user)
emb_item = best_trans_fun(emb_item)
best_embedding = (emb_user.detach(), emb_item.detach())
prev_model = best_model
prev_model.eval()
print(best_embedding)
print(best_val_recall_dict)
checkpoint['best_embedding_list'].append(best_embedding)
checkpoint['best_val_recall_dict_list'].append(best_val_recall_dict)
checkpoint['model_state_dict_list'].append(best_model.state_dict())
if (args.checkpoint_path != ''):
torch.save(checkpoint, args.checkpoint_path)
if (writer is not None):
writer.close() |
_builder('gqa_instruct')
class GQAInstructBuilder(BaseDatasetBuilder):
train_dataset_cls = GQAInstructDataset
eval_dataset_cls = GQAEvalDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/gqa/defaults_instruct.yaml', 'balanced_val': 'configs/datasets/gqa/balanced_val_instruct.yaml', 'balanced_testdev': 'configs/datasets/gqa/balanced_testdev_instruct.yaml'} |
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float('inf'), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='./results', help='saves results_cycle here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
self.isTrain = False |
class Clef2014Dataset(object):
def __init__(self, anno_dir, doc_dir, splits_filepath):
self.anno_dir = anno_dir
self.doc_dir = doc_dir
self.splits_filepath = splits_filepath
self.types = ['Negation', 'Subject', 'Uncertainty', 'Course', 'Severity', 'Conditional', 'Generic', 'BodyLocation', 'Timex']
self.annotations = self.get_annotations()
def get_annotations(self):
(anno_map, doc_map) = ({}, {})
anno_splits = {'train': [], 'dev': [], 'test': []}
with open(self.splits_filepath, 'r') as f:
splits = json.load(f)
splits = {k: set(v) for (k, v) in splits.items()}
doc_to_split = {doc_name: split for (split, doc_list) in splits.items() for doc_name in doc_list}
anno_filelist = glob.glob(f'{self.anno_dir}/*')
for fname in anno_filelist:
doc_name = fname.split('/')[(- 1)].split('.')[0]
anno_map[doc_name] = fname
anno_splits[doc_to_split[doc_name]].append(doc_name)
docs_filelist = glob.glob(f'{self.doc_dir}/*')
for fname in docs_filelist:
doc_name = fname.split('/')[(- 1)].split('.')[0]
doc_map[doc_name] = fname
self.splits = splits
self.anno_splits = anno_splits
return self._load(doc_map, anno_map)
def _fix_annotation_errs(self, rela):
disorder = rela['disorder']
if ((disorder.doc_name == '18908-109838-ECHO_REPORT') and (disorder.span[0] == (886, 910))):
entity = Entity(disorder.id_, disorder.doc_name, disorder.type_, ((887, 910),), 'Paradoxic septal motion')
entity.attribs = {key: value for (key, value) in disorder.attribs.items()}
rela['disorder'] = entity
elif ((disorder.doc_name == '04303-005081-DISCHARGE_SUMMARY') and (disorder.span[0] == (5854, 5869))):
entity = Entity(disorder.id_, disorder.doc_name, disorder.type_, ((5854, 5870),), 'pain medications')
entity.attribs = {key: value for (key, value) in disorder.attribs.items()}
rela['disorder'] = entity
elif ((disorder.doc_name == '13748-001753-DISCHARGE_SUMMARY') and (disorder.span[0] == (5560, 5581))):
entity = Entity(disorder.id_, disorder.doc_name, disorder.type_, ((5561, 5581),), 'paraseptal emphysema')
entity.attribs = {key: value for (key, value) in disorder.attribs.items()}
rela['disorder'] = entity
elif ((disorder.doc_name == '22567-017288-DISCHARGE_SUMMARY') and (disorder.span[0] == (8876, 8914))):
entity = Entity(disorder.id_, disorder.doc_name, disorder.type_, ((8877, 8914),), 'chronic obstructive pulmonary disease')
entity.attribs = {key: value for (key, value) in disorder.attribs.items()}
rela['disorder'] = entity
def _load_doc(self, fpath, document):
import collections
debug = collections.defaultdict(int)
relations = []
with open(fpath, 'r') as fp:
for line in fp:
row = re.split('[|]+', line.strip())
doc_name = row[0].split('.')[0]
rela = {}
cui = row[17]
if (cui.lower() == 'null'):
debug['NULL'] += 1
elif (cui.strip()[0] == 'C'):
debug['CUI'] += 1
else:
debug[cui] += 1
(cue, class_norm) = row[1:3]
rela['disorder'] = get_entity(class_norm, 'Disorder', cue, doc_name, document)
for (i, class_name) in zip((list(range(3, 19, 2)) + list(range(20, 22, 2))), self.types):
(class_norm, cue) = row[i:(i + 2)]
m = get_entity(class_norm, class_name, cue, doc_name, document)
rela[class_name.lower()] = m
rela['doctime'] = row[19]
self._fix_annotation_errs(rela)
relations.append(CLEFSlotFilled(**rela))
if (len(debug) > 2):
print(fpath, '\n', debug)
return relations
def _load(self, doc_map, anno_map):
annotations = {}
for doc_name in anno_map:
anno_filepath = anno_map[doc_name]
doc_text = open(doc_map[doc_name], 'r').read()
annotations[doc_name] = self._load_doc(anno_filepath, doc_text)
return annotations |
def create_scenario(state1, state2, seed_index, fidelity=1.0):
tl = Timeline()
tl.show_progress = False
a1 = FakeNode('a1', tl)
a2 = FakeNode('a2', tl)
a1.set_seed((2 * seed_index))
a2.set_seed(((2 * seed_index) + 1))
cc0 = ClassicalChannel('cc0', tl, 0, 100000.0)
cc1 = ClassicalChannel('cc1', tl, 0, 100000.0)
cc0.delay = ONE_MILLISECOND
cc1.delay = ONE_MILLISECOND
cc0.set_ends(a1, a2.name)
cc1.set_ends(a2, a1.name)
kept1 = Memory('kept1', tl, fidelity=fidelity, frequency=0, efficiency=1, coherence_time=1, wavelength=HALF_MICRON)
kept2 = Memory('kept2', tl, fidelity=fidelity, frequency=0, efficiency=1, coherence_time=1, wavelength=HALF_MICRON)
meas1 = Memory('mea1', tl, fidelity=fidelity, frequency=0, efficiency=1, coherence_time=1, wavelength=HALF_MICRON)
meas2 = Memory('mea2', tl, fidelity=fidelity, frequency=0, efficiency=1, coherence_time=1, wavelength=HALF_MICRON)
tl.init()
tl.quantum_manager.set([kept1.qstate_key, kept2.qstate_key], state1)
tl.quantum_manager.set([meas1.qstate_key, meas2.qstate_key], state2)
kept1.entangled_memory = {'node_id': 'a2', 'memo_id': 'kept2'}
kept2.entangled_memory = {'node_id': 'a1', 'memo_id': 'kept1'}
meas1.entangled_memory = {'node_id': 'a2', 'memo_id': 'meas2'}
meas2.entangled_memory = {'node_id': 'a1', 'memo_id': 'meas1'}
kept1.fidelity = kept2.fidelity = meas1.fidelity = meas2.fidelity = fidelity
ep1 = BBPSSW(a1, 'a1.ep1', kept1, meas1)
ep2 = BBPSSW(a2, 'a2.ep2', kept2, meas2)
a1.protocols.append(ep1)
a2.protocols.append(ep2)
ep1.set_others(ep2.name, a2.name, [kept2.name, meas2.name])
ep2.set_others(ep1.name, a1.name, [kept1.name, meas1.name])
ep1.start()
ep2.start()
tl.run()
assert (meas1.entangled_memory == meas2.entangled_memory == {'node_id': None, 'memo_id': None})
return (tl, kept1, kept2, meas1, meas2, ep1, ep2) |
class TestSampleModels(TestCase):
def setUp(self):
pass
def test_all_sampled_models(self):
huamnoid_1 = pin.buildSampleModelHumanoidRandom()
huamnoid_2 = pin.buildSampleModelHumanoidRandom(True)
huamnoid_3 = pin.buildSampleModelHumanoidRandom(False)
self.assertTrue((huamnoid_1 != huamnoid_2))
self.assertTrue((huamnoid_1 != huamnoid_3))
manipulator_1 = pin.buildSampleModelManipulator()
if pin.WITH_HPP_FCL:
geometry_manipulator_1 = pin.buildSampleGeometryModelManipulator(manipulator_1)
humanoid_4 = pin.buildSampleModelHumanoid()
humanoid_5 = pin.buildSampleModelHumanoid(True)
humanoid_6 = pin.buildSampleModelHumanoid(False)
self.assertTrue((humanoid_4 == humanoid_5))
self.assertTrue((humanoid_4 != humanoid_6))
if pin.WITH_HPP_FCL:
geometry_humanoid_2 = pin.buildSampleGeometryModelHumanoid(humanoid_4) |
def jacobi_symbol(a, b):
if ((b % 2) == 0):
raise ValueError(('second input must be odd, %s is not odd' % b))
return kronecker_symbol(a, b) |
def is_image_file(p: os.PathLike) -> bool:
return (os.path.splitext(p)[1].lower() in Image.EXTENSION) |
def test_missing_predict_proba():
base_estimator = SVC(probability=False, gamma='scale')
self_training = SelfTrainingClassifier(base_estimator)
with pytest.raises(AttributeError, match='predict_proba is not available'):
self_training.fit(X_train, y_train_missing_labels) |
def best_loss(tape):
losses = []
for entry in tape:
if (entry[0] == 'submit'):
losses.append(entry[2])
return min(losses) |
def get_tokens_with_boxes(unnormalized_word_boxes, pad_token_box, word_ids, max_seq_len=512):
unnormalized_token_boxes = []
for (i, word_idx) in enumerate(word_ids):
if (word_idx is None):
break
unnormalized_token_boxes.append(unnormalized_word_boxes[word_idx])
num_pad_tokens = ((len(word_ids) - i) - 1)
if (num_pad_tokens > 0):
unnormalized_token_boxes.extend(([pad_token_box] * num_pad_tokens))
if (len(unnormalized_token_boxes) < max_seq_len):
unnormalized_token_boxes.extend(([pad_token_box] * (max_seq_len - len(unnormalized_token_boxes))))
return unnormalized_token_boxes[:max_seq_len] |
class Confidence(object):
def __init__(self, system, gold, trials=N_TRIALS, percentiles=(90, 95, 99), n_jobs=1, metrics=['precision', 'recall', 'fscore'], measures=DEFAULT_MEASURE, fmt='none', type_weights=None):
if (Parallel is None):
raise ImportError('Package: "joblib" not available, please install to run significance tests.')
self.system = system
self.gold = gold
self.trials = trials
self.n_jobs = n_jobs
self.measures = parse_measures((measures or DEFAULT_MEASURE), incl_clustering=False)
self.metrics = metrics
self.percentiles = percentiles
self.fmt = (self.FMTS[fmt] if (not callable(fmt)) else fmt)
self.weighting = load_weighting(type_weights=type_weights)
def calibrate_trials(self, trials=[100, 250, 500, 1000, 2500, 5000, 10000], max_trials=20000):
import numpy as np
(tmp_trials, self.trials) = (self.trials, max_trials)
matrices = self._read_to_matrices()
print('measure', 'metric', 'pct', 'trials', 'stdev', sep='\t')
for measure in self.measures:
history = self.run_trials(matrices[measure][0])
for metric in self.metrics:
X = history[metric]
for p in self.percentiles:
v = ((100 - p) / 2)
for n in trials:
stats = [_percentile(sorted(random.sample(X, n)), v) for i in range(100)]
print(measure, metric, p, n, np.std(stats), sep='\t')
self.trials = tmp_trials
def run_trials(self, per_doc):
results = Parallel(n_jobs=self.n_jobs)((delayed(bootstrap_trials)(per_doc, share, self.metrics) for share in _job_shares(self.n_jobs, self.trials)))
history = defaultdict(list)
for res in results:
for metric in self.metrics:
history[metric].extend(res[metric])
return history
def intervals(self, per_doc):
history = self.run_trials(per_doc)
ret = {}
for (metric, values) in history.items():
values.sort()
ret[metric] = {p: (_percentile(values, ((100 - p) / 2)), _percentile(values, (100 - ((100 - p) / 2)))) for p in self.percentiles}
return ret
def _read_to_matrices(self):
gold = list(Reader(utf8_open(self.gold)))
system = list(Reader(utf8_open(self.system)))
doc_pairs = list(Evaluate.iter_pairs(system, gold))
counts = {}
for (measure, per_doc, overall) in Evaluate.count_all(doc_pairs, self.measures, weighting=self.weighting):
counts[measure] = (per_doc, overall)
return counts
def calculate_all(self):
counts = self._read_to_matrices()
results = [{'measure': measure, 'overall': {k: v for (k, v) in overall.results.items() if (k in self.metrics)}, 'intervals': self.intervals(per_doc)} for (measure, (per_doc, overall)) in sorted(counts.items(), key=(lambda tup: self.measures.index(tup[0])))]
return results
def __call__(self):
return self.fmt(self, self.calculate_all())
def tab_format(self, data):
percentiles = sorted(self.percentiles)
header = ((([u'measure', u'metric'] + [u'{:d}%('.format(p) for p in reversed(percentiles)]) + [u'score']) + [u'){:d}%'.format(p) for p in percentiles])
meta_format = u'{{{{[intervals][{{metric}}][{}][{}]:.3f}}}}'
formats = (([meta_format.format(p, 0) for p in reversed(percentiles)] + [u'{{[overall][{metric}]:.3f}}']) + [meta_format.format(p, 1) for p in percentiles])
measure_width = max(map(len, self.measures))
metric_width = max(map(len, self.metrics))
fmt = (u'{:%ds}\t{:%ds}' % (measure_width, metric_width))
rows = []
for entry in data:
for metric in self.metrics:
rows.append(([fmt.format(entry['measure'], metric)] + [cell.format(metric=metric).format(entry) for cell in formats]))
ret = (fmt + (u'\t{}' * len(formats))).format(*header)
ret += u''.join(((u'\n' + u'\t'.join(row)) for row in rows))
return ret
def read_tab_format(file):
headers = [field.rstrip() for field in next(file).strip().split('\t')]
by_measure = {}
for line in file:
row = dict(zip(headers, (field.rstrip() for field in line.rstrip().split('\t'))))
measure = row['measure']
if (measure not in by_measure):
cis = [int(field[:(- 2)]) for field in headers if (field[(- 2):] == '%(')]
by_measure[measure] = {'measure': measure, 'overall': {}, 'intervals': {metric: {} for metric in ('precision', 'recall', 'fscore')}}
metric = row['metric']
by_measure[measure]['overall'][metric] = float(row['score'])
for ci in cis:
by_measure[measure]['intervals'][metric][ci] = (float(row[('%d%%(' % ci)]), float(row[(')%d%%' % ci)]))
return list(by_measure.values())
FMTS = {'json': json_format, 'tab': tab_format, 'none': no_format}
def add_arguments(cls, p):
p.add_argument('system', metavar='FILE')
p.add_argument('-g', '--gold', required=True)
p.add_argument('-n', '--trials', default=N_TRIALS, type=int)
p.add_argument('-j', '--n_jobs', default=1, type=int, help='Number of parallel processes, use -1 for all CPUs')
p.add_argument('-p', '--percentiles', default=(90, 95, 99), type=(lambda x: map(int, x.split(','))), help='Output confidence intervals at these percentiles (default: 90,95,99)')
p.add_argument('--metrics', default='precision recall fscore'.split(), type=(lambda x: x.split(',')), help='Calculate CIs for which metrics (default: precision,recall,fscore)')
p.add_argument('-m', '--measure', dest='measures', action='append', metavar='NAME', help=MEASURE_HELP)
p.add_argument('--type-weights', metavar='FILE', default=None, help='File mapping gold and sys types to a weight, such as produced by weights-for-hierarchy')
p.add_argument('-f', '--fmt', default='tab', choices=cls.FMTS.keys())
p.set_defaults(cls=cls)
return p |
.mpl_image_compare
def test_heatmap_feature_order(explainer):
fig = plt.figure()
shap_values = explainer(explainer.data)
shap.plots.heatmap(shap_values, max_display=5, feature_order=np.array(range(shap_values.shape[1]))[::(- 1)], show=False)
plt.tight_layout()
return fig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.