code stringlengths 101 5.91M |
|---|
def vgg16(conv_layer, linear_layer, init_type, **kwargs):
n = [i for i in cfgs['16'] if isinstance(i, int)][(- 1)]
model = VGG(make_layers(cfgs['16'], conv_layer, batch_norm=False), n, linear_layer, **kwargs)
initialize_weights(model, init_type)
return model |
class Deconv3DBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size=3):
super().__init__()
self.block = nn.Sequential(SingleDeconv3DBlock(in_planes, out_planes), SingleConv3DBlock(out_planes, out_planes, kernel_size), nn.BatchNorm3d(out_planes), nn.ReLU(True))
def forward(self, x):
return self.block(x) |
class SupportVectorComponentTest(BaseRegressionComponentTest):
__test__ = True
res = dict()
res['default_boston'] = (- 0.)
res['default_boston_iterative'] = None
res['default_boston_sparse'] = (- 0.)
res['default_boston_iterative_sparse'] = None
res['default_diabetes'] = 0.
res['default_diabetes_iterative'] = None
res['default_diabetes_sparse'] = 0.
res['default_diabetes_iterative_sparse'] = None
sk_mod = sklearn.svm.SVR
module = LibSVM_SVR |
def global_version_update(version, patch=False):
for (pattern, fname) in REPLACE_FILES.items():
update_version_in_file(fname, version, pattern)
if (not patch):
update_version_in_examples(version) |
def random_truncated_masking(x, rand_func=None):
if (rand_func is None):
def tf_uniform_random(num):
return tf.random_uniform([num], minval=0.0, maxval=1.0)
rand_func = tf_uniform_random
input_shape = get_shape(x)
batch_size = input_shape[0]
input_channels = input_shape[(- 1)]
input_rank = len(input_shape)
random_mask_trunc = rand_func(batch_size)
random_mask_trunc = tf.reshape(random_mask_trunc, ([batch_size] + ([1] * (input_rank - 1))))
index_tpl = normalized_index_template(input_channels, dtype=x.dtype)
index_tpl = tf.reshape(index_tpl, (([1] * (input_rank - 1)) + [input_channels]))
chosen_idxb = (index_tpl <= random_mask_trunc)
masked_x = tf.where(chosen_idxb, x, tf.zeros_like(x))
return masked_x |
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg |
class MADMCRScheduler(Scheduler):
def __init__(self):
super().__init__()
self.utilHistory = []
self.utilHistoryContainer = []
def updateUtilHistoryContainer(self):
containerUtil = [(cid.getBaseIPS() if cid else 0) for cid in self.env.containerlist]
self.utilHistoryContainer.append(containerUtil)
def updateUtilHistory(self):
hostUtils = []
for host in self.env.hostlist:
hostUtils.append(host.getCPU())
self.utilHistory.append(hostUtils)
def selection(self):
self.updateUtilHistoryContainer()
selectedHostIDs = self.ThresholdHostSelection()
selectedVMIDs = self.MaxCorContainerSelection(selectedHostIDs, self.utilHistoryContainer)
return selectedVMIDs
def placement(self, containerIDs):
return self.RandomPlacement(containerIDs) |
def main():
parser = argparse.ArgumentParser(description='AutoLRS server.')
parser.add_argument('--min_lr', help='minimum LR', required=True)
parser.add_argument('--max_lr', help='maximum LR', required=True)
parser.add_argument('--host', help='host', default='localhost', type=str)
parser.add_argument('--port', help='port', required=True, type=int)
args = parser.parse_args()
Controller(args.host, args.port, args.min_lr, args.max_lr).listen() |
class AMR():
def __init__(self, id=None, sentence=None, graph=None, tokens=None, lemmas=None, pos_tags=None, ner_tags=None, abstract_map=None, misc=None):
self.id = id
self.sentence = sentence
self.graph = graph
self.tokens = tokens
self.lemmas = lemmas
self.pos_tags = pos_tags
self.ner_tags = ner_tags
self.abstract_map = abstract_map
self.misc = misc
def is_named_entity(self, index):
return (self.ner_tags[index] not in ('0', 'O'))
def get_named_entity_span(self, index):
if ((self.ner_tags is None) or (not self.is_named_entity(index))):
return []
span = [index]
tag = self.ner_tags[index]
prev = (index - 1)
while ((prev > 0) and (self.ner_tags[prev] == tag)):
span.append(prev)
prev -= 1
next = (index + 1)
while ((next < len(self.ner_tags)) and (self.ner_tags[next] == tag)):
span.append(next)
next += 1
return span
def find_span_indexes(self, span):
for (i, token) in enumerate(self.tokens):
if (token == span[0]):
_span = self.tokens[i:(i + len(span))]
if ((len(_span) == len(span)) and all(((x == y) for (x, y) in zip(span, _span)))):
return list(range(i, (i + len(span))))
return None
def replace_span(self, indexes, new, pos=None, ner=None):
self.tokens = ((self.tokens[:indexes[0]] + new) + self.tokens[(indexes[(- 1)] + 1):])
self.lemmas = ((self.lemmas[:indexes[0]] + new) + self.lemmas[(indexes[(- 1)] + 1):])
if (pos is None):
pos = [self.pos_tags[indexes[0]]]
self.pos_tags = ((self.pos_tags[:indexes[0]] + pos) + self.pos_tags[(indexes[(- 1)] + 1):])
if (ner is None):
ner = [self.ner_tags[indexes[0]]]
self.ner_tags = ((self.ner_tags[:indexes[0]] + ner) + self.ner_tags[(indexes[(- 1)] + 1):])
def remove_span(self, indexes):
self.replace_span(indexes, [], [], [])
def __repr__(self):
fields = []
for (k, v) in dict(id=self.id, snt=self.sentence, tokens=self.tokens, lemmas=self.lemmas, pos_tags=self.pos_tags, ner_tags=self.ner_tags, abstract_map=self.abstract_map, misc=self.misc, graph=self.graph).items():
if (v is None):
continue
if (k == 'misc'):
fields += v
elif (k == 'graph'):
fields.append(str(v))
else:
if (not isinstance(v, str)):
v = json.dumps(v)
fields.append('# ::{} {}'.format(k, v))
return '\n'.join(fields)
def get_src_tokens(self):
return (self.lemmas if self.lemmas else self.sentence.split()) |
def test_sequence_length():
class BadLen(RuntimeError):
pass
class SequenceLike():
def __getitem__(self, i):
return None
def __len__(self):
raise BadLen()
with pytest.raises(BadLen):
m.sequence_length(SequenceLike())
assert (m.sequence_length([1, 2, 3]) == 3)
assert (m.sequence_length('hello') == 5) |
def ncf_model(user_num, item_num, factor_num, dropout, lr, num_layers, sparse_feats_input_dims, sparse_feats_embed_dims, num_dense_feats):
user = tf.keras.layers.Input(dtype=tf.int32, shape=())
item = tf.keras.layers.Input(dtype=tf.int32, shape=())
if (not isinstance(sparse_feats_embed_dims, list)):
sparse_feats_embed_dims = ([sparse_feats_embed_dims] * len(sparse_feats_input_dims))
with tf.name_scope('GMF'):
user_embed_GMF = tf.keras.layers.Embedding(user_num, factor_num, name='gmf_user')(user)
item_embed_GMF = tf.keras.layers.Embedding(item_num, factor_num, name='gmf_item')(item)
GMF = tf.keras.layers.Multiply()([user_embed_GMF, item_embed_GMF])
with tf.name_scope('MLP'):
user_embed_MLP = tf.keras.layers.Embedding(user_num, (factor_num * (2 ** (num_layers - 1))), name='mlp_user')(user)
item_embed_MLP = tf.keras.layers.Embedding(item_num, (factor_num * (2 ** (num_layers - 1))), name='mlp_item')(item)
cat_feature_input_layers = []
cat_feature_layers = []
for (in_dim, out_dim) in zip(sparse_feats_input_dims, sparse_feats_embed_dims):
input_layer = tf.keras.layers.Input(shape=(), dtype=tf.int32)
cat_feature_input_layers.append(input_layer)
cat_feature_layers.append(tf.keras.layers.Embedding(in_dim, out_dim)(input_layer))
num_feature_input_layers = []
num_feature_layers = []
for i in range(num_dense_feats):
num_feature_input_layers.append(tf.keras.layers.Input(shape=1))
num_feature_layers.append(num_feature_input_layers[i])
all_feature_input_layers = (cat_feature_input_layers + num_feature_input_layers)
all_feature_layers = (cat_feature_layers + num_feature_layers)
interaction = tf.concat(([user_embed_MLP, item_embed_MLP] + all_feature_layers), axis=(- 1))
output_size = (factor_num * (2 ** (num_layers - 1)))
for i in range(num_layers):
layer_MLP = tf.keras.layers.Dense(units=output_size, activation='relu')(interaction)
interaction = tf.keras.layers.Dropout(rate=dropout)(layer_MLP)
output_size //= 2
with tf.name_scope('concatenation'):
concatenation = tf.concat([GMF, interaction], axis=(- 1))
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(concatenation)
model = tf.keras.Model(inputs=([user, item] + all_feature_input_layers), outputs=outputs)
model.compile(optimizer=tf.keras.optimizers.Adam(lr), loss=tf.keras.losses.BinaryCrossentropy(), metrics=['accuracy', 'AUC', 'Precision', 'Recall'])
return model |
_metric
def kid50k(opts):
opts.dataset_kwargs.update(max_size=None)
kid = kernel_inception_distance.compute_kid(opts, max_real=50000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k=kid) |
class FlaxMBartModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class Encoder(nn.Module):
def __init__(self, c_in=513, c_h1=128, c_h2=512, c_h3=128, ns=0.2, dp=0.5):
super(Encoder, self).__init__()
self.ns = ns
self.conv1s = nn.ModuleList([nn.Conv1d(c_in, c_h1, kernel_size=k) for k in range(1, 8)])
self.conv2 = nn.Conv1d(((len(self.conv1s) * c_h1) + c_in), c_h2, kernel_size=1)
self.conv3 = nn.Conv1d(c_h2, c_h2, kernel_size=5)
self.conv4 = nn.Conv1d(c_h2, c_h2, kernel_size=5, stride=2)
self.conv5 = nn.Conv1d(c_h2, c_h2, kernel_size=5)
self.conv6 = nn.Conv1d(c_h2, c_h2, kernel_size=5, stride=2)
self.conv7 = nn.Conv1d(c_h2, c_h2, kernel_size=5)
self.conv8 = nn.Conv1d(c_h2, c_h2, kernel_size=5, stride=2)
self.dense1 = nn.Linear(c_h2, c_h2)
self.dense2 = nn.Linear(c_h2, c_h2)
self.dense3 = nn.Linear(c_h2, c_h2)
self.dense4 = nn.Linear(c_h2, c_h2)
self.RNN = nn.GRU(input_size=c_h2, hidden_size=c_h3, num_layers=1, bidirectional=True)
self.linear = nn.Linear((c_h2 + (2 * c_h3)), c_h2)
self.ins_norm1 = nn.InstanceNorm1d(c_h2)
self.ins_norm2 = nn.InstanceNorm1d(c_h2)
self.ins_norm3 = nn.InstanceNorm1d(c_h2)
self.ins_norm4 = nn.InstanceNorm1d(c_h2)
self.ins_norm5 = nn.InstanceNorm1d(c_h2)
self.ins_norm6 = nn.InstanceNorm1d(c_h2)
self.drop1 = nn.Dropout(p=dp)
self.drop2 = nn.Dropout(p=dp)
self.drop3 = nn.Dropout(p=dp)
self.drop4 = nn.Dropout(p=dp)
self.drop5 = nn.Dropout(p=dp)
self.drop6 = nn.Dropout(p=dp)
def conv_block(self, x, conv_layers, norm_layers, res=True):
out = x
for layer in conv_layers:
out = pad_layer(out, layer)
out = F.leaky_relu(out, negative_slope=self.ns)
for layer in norm_layers:
out = layer(out)
if res:
x_pad = F.pad(x, pad=(0, (x.size(2) % 2)), mode='reflect')
x_down = F.avg_pool1d(x_pad, kernel_size=2)
out = (x_down + out)
return out
def dense_block(self, x, layers, norm_layers, res=True):
out = x
for layer in layers:
out = linear(out, layer)
out = F.leaky_relu(out, negative_slope=self.ns)
for layer in norm_layers:
out = layer(out)
if res:
out = (out + x)
return out
def forward(self, x):
outs = []
for l in self.conv1s:
out = pad_layer(x, l)
outs.append(out)
out = torch.cat((outs + [x]), dim=1)
out = F.leaky_relu(out, negative_slope=self.ns)
out = self.conv_block(out, [self.conv2], [self.ins_norm1, self.drop1], res=False)
out = self.conv_block(out, [self.conv3, self.conv4], [self.ins_norm2, self.drop2])
out = self.conv_block(out, [self.conv5, self.conv6], [self.ins_norm3, self.drop3])
out = self.conv_block(out, [self.conv7, self.conv8], [self.ins_norm4, self.drop4])
out = self.dense_block(out, [self.dense1, self.dense2], [self.ins_norm5, self.drop5], res=True)
out = self.dense_block(out, [self.dense3, self.dense4], [self.ins_norm6, self.drop6], res=True)
out_rnn = RNN(out, self.RNN)
out = torch.cat([out, out_rnn], dim=1)
out = linear(out, self.linear)
out = F.leaky_relu(out, negative_slope=self.ns)
return out |
def infer_init_method(args):
if (args.distributed_init_method is not None):
return
if all(((key in os.environ) for key in ['MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'RANK'])):
args.distributed_init_method = 'env://'
args.distributed_world_size = int(os.environ['WORLD_SIZE'])
args.distributed_rank = int(os.environ['RANK'])
elif (args.distributed_port > 0):
node_list = os.environ.get('SLURM_STEP_NODELIST')
if (node_list is None):
node_list = os.environ.get('SLURM_JOB_NODELIST')
if (node_list is not None):
try:
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])
args.distributed_init_method = 'tcp://{host}:{port}'.format(host=hostnames.split()[0].decode('utf-8'), port=args.distributed_port)
nnodes = int(os.environ.get('SLURM_NNODES'))
ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE')
if (ntasks_per_node is not None):
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get('SLURM_NTASKS'))
nnodes = int(os.environ.get('SLURM_NNODES'))
assert ((ntasks % nnodes) == 0)
ntasks_per_node = int((ntasks / nnodes))
if (ntasks_per_node == 1):
assert ((args.distributed_world_size % nnodes) == 0)
gpus_per_node = (args.distributed_world_size // nnodes)
node_id = int(os.environ.get('SLURM_NODEID'))
args.distributed_rank = (node_id * gpus_per_node)
else:
assert (ntasks_per_node == (args.distributed_world_size // nnodes))
args.distributed_no_spawn = True
args.distributed_rank = int(os.environ.get('SLURM_PROCID'))
args.device_id = int(os.environ.get('SLURM_LOCALID'))
except subprocess.CalledProcessError as e:
raise e
except FileNotFoundError:
pass |
def coding_humaneval_match_answer(task_data, response):
def _function_exists(code, func_name):
tree = ast.parse(code)
for node in ast.walk(tree):
if (isinstance(node, ast.FunctionDef) and (node.name == func_name)):
return True
return False
def _try_match(content, prefix, entrypoint):
for block in content.split('```'):
block = block.strip()
if block.startswith('python'):
block = block[len('python'):]
try:
code_completion = (prefix + block)
if _function_exists(code_completion, entrypoint):
return code_completion
except SyntaxError:
pass
humaneval_task = task_data['_metadata']
include_prefix = (humaneval_task['prompt'].split('def')[0].strip() + '\n\n')
result = _try_match(response, include_prefix, humaneval_task['entry_point'])
if result:
return (True, {'task_id': humaneval_task['task_id'], 'completion': result})
result = _try_match(response, humaneval_task['prompt'], humaneval_task['entry_point'])
if result:
return (True, {'task_id': humaneval_task['task_id'], 'completion': result})
return (False, {'task_id': humaneval_task['task_id'], 'completion': response}) |
class BlipImageBaseProcessor(BaseProcessor):
def __init__(self, mean=None, std=None):
if (mean is None):
mean = (0., 0.4578275, 0.)
if (std is None):
std = (0., 0., 0.)
self.normalize = transforms.Normalize(mean, std) |
def show_npimage(mtg, title=''):
if (mtg.dtype is not np.uint8):
if (np.max(mtg) < 1.2):
Image.fromarray((255 * np.clip(mtg, 0, 1)).astype(np.uint8)).show(title)
else:
Image.fromarray(np.clip(mtg, 0, 255).astype(np.uint8)).show(title)
else:
Image.fromarray(mtg).show(title) |
def rgbd_loop_closure(depth_list, color_list, intrinsic, config):
device = o3c.Device('CUDA:0')
interval = config.odometry_loop_interval
n_files = len(depth_list)
key_indices = list(range(0, n_files, interval))
n_key_indices = len(key_indices)
edges = []
poses = []
infos = []
pairs = []
criteria_list = [o3d.t.pipelines.odometry.OdometryConvergenceCriteria(20), o3d.t.pipelines.odometry.OdometryConvergenceCriteria(10), o3d.t.pipelines.odometry.OdometryConvergenceCriteria(5)]
method = o3d.t.pipelines.odometry.Method.PointToPlane
for i in range((n_key_indices - 1)):
key_i = key_indices[i]
depth_curr = o3d.t.io.read_image(depth_list[key_i]).to(device)
color_curr = o3d.t.io.read_image(color_list[key_i]).to(device)
rgbd_curr = o3d.t.geometry.RGBDImage(color_curr, depth_curr)
for j in range((i + 1), n_key_indices):
key_j = key_indices[j]
depth_next = o3d.t.io.read_image(depth_list[key_j]).to(device)
color_next = o3d.t.io.read_image(color_list[key_j]).to(device)
rgbd_next = o3d.t.geometry.RGBDImage(color_next, depth_next)
try:
res = o3d.t.pipelines.odometry.rgbd_odometry_multi_scale(rgbd_curr, rgbd_next, intrinsic, o3c.Tensor(np.eye(4)), 1000.0, 3.0, criteria_list, method)
info = o3d.t.pipelines.odometry.compute_odometry_information_matrix(depth_curr, depth_next, intrinsic, res.transformation, 0.07, 1000.0, 3.0)
except Exception as e:
pass
else:
if ((info[(5, 5)] / (depth_curr.columns * depth_curr.rows)) > 0.3):
edges.append((key_i, key_j))
poses.append(res.transformation.cpu().numpy())
infos.append(info.cpu().numpy())
return (edges, poses, infos) |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--tsv-file', required=True, nargs='+', type=str)
parser.add_argument('--spm-prefix', required=True, type=str)
parser.add_argument('--vocab-size', required=True, type=int)
parser.add_argument('--vocab-type', default='unigram', type=str, choices=['bpe', 'unigram', 'char', 'word'])
parser.add_argument('--column', default='translation', type=str)
parser.add_argument('--lowercase', default=False, type=bool)
args = parser.parse_args()
return args |
def check_args(args):
if (not os.path.exists(args.config_dir)):
os.makedirs(args.config_dir)
return args |
def PSPNet(backbone_name='vgg16', input_shape=(32, 32, 32, 3), classes=21, activation='softmax', weights=None, encoder_weights='imagenet', encoder_freeze=False, downsample_factor=8, psp_conv_filters=512, psp_pooling_type='avg', psp_use_batchnorm=True, psp_dropout=None, **kwargs):
global backend, layers, models, keras_utils
(backend, layers, models, keras_utils) = get_submodules_from_kwargs(kwargs)
check_input_shape(input_shape, downsample_factor)
backbone = Backbones.get_backbone(backbone_name, input_shape=input_shape, weights=encoder_weights, include_top=False, **kwargs)
feature_layers = Backbones.get_feature_layers(backbone_name, n=3)
if (downsample_factor == 16):
psp_layer_idx = feature_layers[0]
elif (downsample_factor == 8):
psp_layer_idx = feature_layers[1]
elif (downsample_factor == 4):
psp_layer_idx = feature_layers[2]
else:
raise ValueError('Unsupported factor - `{}`, Use 4, 8 or 16.'.format(downsample_factor))
model = build_psp(backbone, psp_layer_idx, pooling_type=psp_pooling_type, conv_filters=psp_conv_filters, use_batchnorm=psp_use_batchnorm, final_upsampling_factor=downsample_factor, classes=classes, activation=activation, dropout=psp_dropout)
if encoder_freeze:
freeze_model(backbone, **kwargs)
if (weights is not None):
model.load_weights(weights)
return model |
class LegacyFairseqOptimizer(FairseqOptimizer):
def __init__(self, args):
self.args = args |
class OSBlock(nn.Module):
def __init__(self, in_channels, out_channels, IN=False, bottleneck_reduction=4, **kwargs):
super(OSBlock, self).__init__()
mid_channels = (out_channels // bottleneck_reduction)
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels))
self.conv2c = nn.Sequential(LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels))
self.conv2d = nn.Sequential(LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels), LightConv3x3(mid_channels, mid_channels))
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if (in_channels != out_channels):
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = (((self.gate(x2a) + self.gate(x2b)) + self.gate(x2c)) + self.gate(x2d))
x3 = self.conv3(x2)
if (self.downsample is not None):
identity = self.downsample(identity)
out = (x3 + identity)
if (self.IN is not None):
out = self.IN(out)
return F.relu(out) |
('paired-image-transform-folders')
class PairedImageTransformFolders(Dataset):
def __init__(self, root_path_1, root_path_2, **kwargs):
self.dataset_1 = ImageTransformFolder(root_path_1, **kwargs)
self.dataset_2 = ImageFolder(root_path_2, **kwargs)
def __len__(self):
return len(self.dataset_1)
def __getitem__(self, idx):
return (self.dataset_1[idx]['image'], self.dataset_2[idx], self.dataset_1[idx]['transform']) |
class NormalizedHyperVolume(QualityIndicator):
def __init__(self, reference_point: Iterable[float], reference_front: np.array):
self.reference_point = reference_point
self._hv = HyperVolume(reference_point=reference_point)
self._reference_hypervolume = self._hv.compute(reference_front)
assert (self._reference_hypervolume != 0), 'Hypervolume of reference front is zero'
def compute(self, solutions: np.array) -> float:
hv = self._hv.compute(solutions=solutions)
return (1 - (hv / self._reference_hypervolume))
def get_short_name(self) -> str:
return 'NHV'
def get_name(self) -> str:
return 'Normalized Hypervolume' |
class IndexType(LaVarType):
def __init__(self, desc=None, symbol=None):
LaVarType.__init__(self, VarTypeEnum.INDEX, desc, symbol) |
def _add_basic_block(x_in, out_channels, strides, dropout_rate=0.0):
is_channels_equal = (K.int_shape(x_in)[_get_channels_axis()] == out_channels)
bn1 = batch_norm()(x_in)
bn1 = Activation('relu')(bn1)
out = conv2d(out_channels, 3, strides)(bn1)
out = batch_norm()(out)
out = Activation('relu')(out)
out = Dropout(dropout_rate)(out)
out = conv2d(out_channels, 3, 1)(out)
shortcut = (x_in if is_channels_equal else conv2d(out_channels, 1, strides)(bn1))
out = add([out, shortcut])
return out |
def findNextOnset(i):
for j in range(len(trialOnsetTimes[(i + 1):])):
if (not np.isnan(trialOnsetTimes[((j + i) + 1)])):
return trialOnsetTimes[((j + i) + 1)] |
class RegNetStage(nn.Module):
def __init__(self, config: RegNetConfig, in_channels: int, out_channels: int, stride: int=2, depth: int=2):
super().__init__()
layer = (RegNetXLayer if (config.layer_type == 'x') else RegNetYLayer)
self.layers = nn.Sequential(layer(config, in_channels, out_channels, stride=stride), *[layer(config, out_channels, out_channels) for _ in range((depth - 1))])
def forward(self, hidden_state):
hidden_state = self.layers(hidden_state)
return hidden_state |
class BPEVocabDict(VocabDictBase):
def __init__(self, name, file_name):
VocabDictBase.__init__(self, name, file_name)
self.bpe_model = None
def load(self):
self.bpe_model = yttm.BPE(model=self.file_name)
def convert_sent_to_ids(self, sent, eos=False):
enc_seqs = self.bpe_model.encode(sent, output_type=yttm.OutputType.ID, bos=eos, eos=eos)
return enc_seqs
def __len__(self):
return self.bpe_model.vocab_size()
def vocab_size(self):
return self.bpe_model.vocab_size()
def is_bpe(self):
return True
def convert_ids_to_sentence(self, seqs):
return self.bpe_model.decode(seqs) |
def get_norm_layer(norm_type='instance'):
if (norm_type == 'batch'):
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif (norm_type == 'instance'):
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif (norm_type == 'none'):
norm_layer = None
else:
raise NotImplementedError(('normalization layer [%s] is not found' % norm_type))
return norm_layer |
_module()
class NASFPN(BaseModule):
def __init__(self, in_channels, out_channels, num_outs, stack_times, start_level=0, end_level=(- 1), add_extra_convs=False, norm_cfg=None, init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(NASFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.stack_times = stack_times
self.norm_cfg = norm_cfg
if ((end_level == (- 1)) or (end_level == (self.num_ins - 1))):
self.backbone_end_level = self.num_ins
assert (num_outs >= (self.num_ins - start_level))
else:
self.backbone_end_level = (end_level + 1)
assert (end_level < self.num_ins)
assert (num_outs == ((end_level - start_level) + 1))
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(in_channels[i], out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.lateral_convs.append(l_conv)
extra_levels = ((num_outs - self.backbone_end_level) + self.start_level)
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_conv = ConvModule(out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.extra_downsamples.append(nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
self.fpn_stages = ModuleList()
for _ in range(self.stack_times):
stage = nn.ModuleDict()
stage['gp_64_4'] = GlobalPoolingCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg)
stage['sum_44_4'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg)
stage['sum_43_3'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg)
stage['sum_34_4'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg)
stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_55_5'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg)
stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_77_7'] = SumCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg)
stage['gp_75_6'] = GlobalPoolingCell(in_channels=out_channels, out_channels=out_channels, out_norm_cfg=norm_cfg)
self.fpn_stages.append(stage)
def forward(self, inputs):
feats = [lateral_conv(inputs[(i + self.start_level)]) for (i, lateral_conv) in enumerate(self.lateral_convs)]
for downsample in self.extra_downsamples:
feats.append(downsample(feats[(- 1)]))
(p3, p4, p5, p6, p7) = feats
for stage in self.fpn_stages:
p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[(- 2):])
p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[(- 2):])
p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[(- 2):])
p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[(- 2):])
p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[(- 2):])
p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[(- 2):])
p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[(- 2):])
p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[(- 2):])
p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[(- 2):])
return (p3, p4, p5, p6, p7) |
def script_submodules_(model: nn.Module, types: Optional[Sequence[type]]=None, attempt_trace: Optional[bool]=True, batch_dims: Optional[Tuple[int]]=None):
to_trace = set()
_script_submodules_helper_(model, types, attempt_trace, to_trace)
if (attempt_trace and (len(to_trace) > 0)):
_trace_submodules_(model, to_trace, batch_dims=batch_dims) |
def save_bn_running(net):
means = [l.running_mean.clone() for l in get_model(net).modules() if (type(l) == torch.nn.modules.batchnorm.BatchNorm2d)]
variances = [l.running_var.clone() for l in get_model(net).modules() if (type(l) == torch.nn.modules.batchnorm.BatchNorm2d)]
return [means, variances] |
def running_config_to_str(running_config):
str_ = ''
for (running_config_key, running_config_value) in running_config.items():
str_ += ',{}={}'.format(simplify_config_key(str(running_config_key)), running_config_value)
return str_ |
def initialize_quaddobl_tracker(target, start, fixedgamma=True, regamma=0.0, imgamma=0.0):
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_target_system
from phcpy.phcpy2c3 import py2c_copy_quaddobl_container_to_start_system
from phcpy.phcpy2c3 import py2c_initialize_quaddobl_homotopy
from phcpy.interface import store_quaddobl_system
store_quaddobl_system(target)
py2c_copy_quaddobl_container_to_target_system()
store_quaddobl_system(start)
py2c_copy_quaddobl_container_to_start_system()
if fixedgamma:
return py2c_initialize_quaddobl_homotopy(1, regamma, imgamma)
else:
return py2c_initialize_quaddobl_homotopy(0, regamma, imgamma) |
_module()
class RFP(FPN):
def __init__(self, rfp_steps, rfp_backbone, aspp_out_channels, aspp_dilations=(1, 3, 6, 1), **kwargs):
super().__init__(**kwargs)
self.rfp_steps = rfp_steps
self.rfp_modules = nn.ModuleList()
for rfp_idx in range(1, rfp_steps):
rfp_module = build_backbone(rfp_backbone)
self.rfp_modules.append(rfp_module)
self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels, aspp_dilations)
self.rfp_weight = nn.Conv2d(self.out_channels, 1, kernel_size=1, stride=1, padding=0, bias=True)
def init_weights(self):
for convs in [self.lateral_convs, self.fpn_convs]:
for m in convs.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
for rfp_idx in range((self.rfp_steps - 1)):
self.rfp_modules[rfp_idx].init_weights(self.rfp_modules[rfp_idx].pretrained)
constant_init(self.rfp_weight, 0)
def forward(self, inputs):
inputs = list(inputs)
assert (len(inputs) == (len(self.in_channels) + 1))
img = inputs.pop(0)
x = super().forward(tuple(inputs))
for rfp_idx in range((self.rfp_steps - 1)):
rfp_feats = ([x[0]] + list((self.rfp_aspp(x[i]) for i in range(1, len(x)))))
x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
x_idx = super().forward(x_idx)
x_new = []
for ft_idx in range(len(x_idx)):
add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
x_new.append(((add_weight * x_idx[ft_idx]) + ((1 - add_weight) * x[ft_idx])))
x = x_new
return x |
def eval_epoch(args, model, test_dataloader, device, n_gpu):
top1 = AverageMeter()
top5 = AverageMeter()
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
model.eval()
with torch.no_grad():
for (bid, batch) in enumerate(test_dataloader):
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, video, video_mask, labels) = batch
output = model(input_ids, segment_ids, input_mask, video, video_mask, labels)
(prec1, prec5) = accuracy(output, labels, topk=(1, 5))
top1.update(prec1[0], input_ids.size(0))
top5.update(prec5[0], input_ids.size(0))
print('{}/{}\r'.format(bid, len(test_dataloader)), end='')
logger.info('Video QA:')
logger.info('\t>>> : {top1.avg:.3f} - : {top5.avg:.3f}'.format(top1=top1, top5=top5))
R1 = top1.avg
return R1 |
def compute_mean(values):
if torch.is_tensor(values):
return values.float().mean()
if isinstance(values, (tuple, list)):
return torch.stack([torch.as_tensor(x).detach() for x in values]).float().mean()
raise ValueError() |
def calculate_fid_given_paths(paths, device=None, batch_size=50, dims=2048, num_workers=8):
if (device is None):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
else:
device = torch.device(device)
for p in paths:
if (not os.path.exists(p)):
raise RuntimeError(('Invalid path: %s' % p))
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
(m1, s1) = compute_statistics_of_path(paths[0], model, batch_size, dims, device, num_workers)
(m2, s2) = compute_statistics_of_path(paths[1], model, batch_size, dims, device, num_workers)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value |
def main(args: Any=None) -> None:
if (args is None):
args = sys.argv[1:]
parser = create_parser()
args = parser.parse_args(args)
description = ('entropy-estimation' if args.entropy_estimation else args.entropy_coder)
filepaths = collect_images(args.data_name, args.dataset, args.num_camera)
if (len(filepaths) == 0):
print('Error: no images found in directory.', file=sys.stderr)
raise SystemExit(1)
device = ('cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu')
if (device == 'cpu'):
cpu_num = args.cpu_num
os.environ['OMP_NUM_THREADS'] = str(cpu_num)
os.environ['OPENBLAS_NUM_THREADS'] = str(cpu_num)
os.environ['MKL_NUM_THREADS'] = str(cpu_num)
os.environ['VECLIB_MAXIMUM_THREADS'] = str(cpu_num)
os.environ['NUMEXPR_NUM_THREADS'] = str(cpu_num)
torch.set_num_threads(cpu_num)
if (args.IFrameModel == 'Multi_LDMIC'):
IFrameCompressor = Multi_LDMIC(N=192, M=192, decode_atten=Multi_JointContextTransfer)
elif (args.IFrameModel == 'Multi_LDMIC_checkboard'):
IFrameCompressor = Multi_LDMIC_checkboard(N=192, M=192, decode_atten=Multi_JointContextTransfer)
IFrameCompressor = IFrameCompressor.to(device)
if args.i_model_path:
print('Loading model:', args.i_model_path)
checkpoint = torch.load(args.i_model_path, map_location=device)
IFrameCompressor.load_state_dict(checkpoint['state_dict'])
IFrameCompressor.update(force=True)
IFrameCompressor.eval()
outputdir = args.output
Path(outputdir).mkdir(parents=True, exist_ok=True)
results = defaultdict(list)
args_dict = vars(args)
trained_net = f'{args.IFrameModel}-{args.metric}-{description}'
metrics = run_inference(filepaths, IFrameCompressor, outputdir, trained_net=trained_net, description=description, **args_dict)
for (k, v) in metrics.items():
results[k].append(v)
output = {'name': f'{args.IFrameModel}-{args.metric}', 'description': f'Inference ({description})', 'results': results}
with Path(f'{outputdir}/{args.IFrameModel}-{args.metric}-{description}.json').open('wb') as f:
f.write(json.dumps(output, indent=2).encode())
print(json.dumps(output, indent=2)) |
class Dataloader():
def __init__(self, args):
self.args = args
self.loader_input = args.loader_input
self.loader_label = args.loader_label
self.split_test = args.split_test
self.split_train = args.split_train
self.dataset_test_name = args.dataset_test
self.dataset_train_name = args.dataset_train
self.resolution = (args.resolution_wide, args.resolution_high)
self.input_filename_test = args.input_filename_test
self.label_filename_test = args.label_filename_test
self.input_filename_train = args.input_filename_train
self.label_filename_train = args.label_filename_train
if (self.dataset_train_name == 'LSUN'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(db_path=args.dataroot, classes=['bedroom_train'], transform=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'CIFAR10') or (self.dataset_train_name == 'CIFAR100')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(self.resolution, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_train_name == 'CocoCaption') or (self.dataset_train_name == 'CocoDetection')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_train_name == 'STL10') or (self.dataset_train_name == 'SVHN')):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, split='train', download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'MNIST'):
self.dataset_train = getattr(datasets, self.dataset_train_name)(root=self.args.dataroot, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_train_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_train = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_train), transform=transforms.Compose([transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
elif (self.dataset_train_name == 'FRGC'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'Folder'):
self.dataset_train = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_train), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_train_name == 'FileList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_train_name == 'FolderList'):
self.dataset_train = datasets.FileList(self.input_filename_train, self.label_filename_train, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), transform_test=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
if (self.dataset_test_name == 'LSUN'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(db_path=args.dataroot, classes=['bedroom_val'], transform=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'CIFAR10') or (self.dataset_test_name == 'CIFAR100')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]))
elif ((self.dataset_test_name == 'CocoCaption') or (self.dataset_test_name == 'CocoDetection')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif ((self.dataset_test_name == 'STL10') or (self.dataset_test_name == 'SVHN')):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, split='test', download=True, transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'MNIST'):
self.dataset_test = getattr(datasets, self.dataset_test_name)(root=self.args.dataroot, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
elif (self.dataset_test_name == 'ImageNet'):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.dataset_test = datasets.ImageFolder(root=os.path.join(self.args.dataroot, self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
elif (self.dataset_test_name == 'FRGC'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'Folder'):
self.dataset_test = datasets.ImageFolder(root=(self.args.dataroot + self.args.input_filename_test), transform=transforms.Compose([transforms.Scale(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]))
elif (self.dataset_test_name == 'FileList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
elif (self.dataset_test_name == 'FolderList'):
self.dataset_test = datasets.FileList(self.input_filename_test, self.label_filename_test, self.split_train, self.split_test, train=True, transform_train=transforms.Compose([transforms.Scale(self.resolution), transforms.CenterCrop(self.resolution), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), loader_input=self.loader_input, loader_label=self.loader_label)
else:
raise Exception('Unknown Dataset')
def create(self, flag=None):
if (flag == 'Train'):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_train
if (flag == 'Test'):
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return dataloader_test
if (flag == None):
dataloader_train = torch.utils.data.DataLoader(self.dataset_train, batch_size=self.args.batch_size, shuffle=True, num_workers=int(self.args.nthreads), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(self.dataset_test, batch_size=self.args.batch_size, shuffle=False, num_workers=int(self.args.nthreads), pin_memory=True)
return (dataloader_train, dataloader_test) |
def add_ego_car(start_velocity):
traci.vehicle.add('ego', 'rampRoute', 'egoCar', departSpeed=start_velocity, departPos=40, arrivalPos=50)
traci.vehicle.setSpeedMode('ego', 22)
traci.vehicle.setSpeed('ego', start_velocity) |
def demo():
genome = [[[0], [1, 0], [0, 0, 1], [0, 1, 0, 0], [0, 0, 1, 0, 1], [0]], [[0], [0, 0], [0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0, 0], [1]], [[0], [0, 1], [1, 1, 0], [0, 0, 1, 1], [1, 0, 0, 1, 0], [0]]]
d = make_dot_genome(genome, title='Demo Genome', filename='test')
d.view() |
class Counter(WriteMixin, Infinite):
message = ''
hide_cursor = True
def update(self):
self.write(str(self.index)) |
def extract_features(waveforms, components_list, statistics_list=None, num_proc=1):
extractor_helper = partial(extract_features_from_waveform, components_list, statistics_list)
with Pool(num_proc) as pool:
output_feats_iter = tqdm(pool.imap(extractor_helper, waveforms), total=len(waveforms), desc='Extracting features...')
output_feats = list(output_feats_iter)
return pd.DataFrame(output_feats) |
def parse_data_train(image, label):
image = tf.io.decode_jpeg(image, NUM_CHANNELS)
image = tf.image.resize(image, size=(WIDTH, HEIGHT))
image = tf.reshape(image, [WIDTH, HEIGHT, NUM_CHANNELS])
return (image, label) |
class DistilBertTokenizer(BertTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ['attention_mask'] |
def weighted_loss(loss_func: Callable) -> Callable:
(loss_func)
def wrapper(pred: Tensor, target: Tensor, weight: Optional[Tensor]=None, reduction: str='mean', avg_factor: Optional[int]=None, **kwargs) -> Tensor:
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper |
class nnUNetTrainerV2_reduceMomentumDuringTraining(nnUNetTrainerV2):
def initialize_optimizer_and_scheduler(self):
current_momentum = 0.99
min_momentum = 0.9
if (self.epoch > 800):
current_momentum = (current_momentum - (((current_momentum - min_momentum) / 200) * (self.epoch - 800)))
self.print_to_log_file('current momentum', current_momentum)
assert (self.network is not None), 'self.initialize_network must be called first'
if (self.optimizer is None):
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True)
else:
self.optimizer.param_groups[0]['momentum'] = current_momentum
self.lr_scheduler = None
def on_epoch_end(self):
self.initialize_optimizer_and_scheduler()
return super().on_epoch_end() |
def step_5b(w):
if (w.endswith('ll') and R2(w).endswith('l')):
return w[:(- 1)]
return w |
def read_standard_system_and_solutions(filename):
from phcpy.phcpy2c3 import py2c_syscon_clear_symbol_table
from phcpy.phcpy2c3 import py2c_read_standard_start_system_from_file
from phcpy.phcpy2c3 import py2c_copy_start_system_to_container
from phcpy.phcpy2c3 import py2c_copy_start_solutions_to_container
py2c_syscon_clear_symbol_table()
lnf = len(filename)
fail = py2c_read_standard_start_system_from_file(lnf, filename)
if (fail != 0):
return None
else:
py2c_copy_start_system_to_container()
py2c_copy_start_solutions_to_container()
return (load_standard_system(), load_standard_solutions()) |
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
model = build_detector(cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError('FLOPs counter is currently not currently supported with {}'.format(model.__class__.__name__))
(flops, params) = get_model_complexity_info(model, input_shape)
split_line = ('=' * 30)
print(f'''{split_line}
Input shape: {input_shape}
Flops: {flops}
Params: {params}
{split_line}''')
print('!!!Please be cautious if you use the results in papers. You may need to check if all ops are supported and verify that the flops computation is correct.') |
class DensePoseConfidenceModelConfig():
uv_confidence: DensePoseUVConfidenceConfig
segm_confidence: DensePoseSegmConfidenceConfig
def from_cfg(cfg: CfgNode) -> 'DensePoseConfidenceModelConfig':
return DensePoseConfidenceModelConfig(uv_confidence=DensePoseUVConfidenceConfig(enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.ENABLED, epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON, type=DensePoseUVConfidenceType(cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE)), segm_confidence=DensePoseSegmConfidenceConfig(enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.ENABLED, epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON)) |
class GaussianKernel(Kernel):
def __init__(self) -> None:
super(GaussianKernel, self).__init__()
def similarity(self, distances: torch.Tensor, bandwidth: Union[(float, torch.Tensor)]) -> torch.Tensor:
return ((- distances) / bandwidth) |
class Resnet(nn.Module):
def __init__(self, orig_resnet):
super(Resnet, self).__init__()
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
conv_out.append(x)
x = self.layer2(x)
conv_out.append(x)
x = self.layer3(x)
conv_out.append(x)
x = self.layer4(x)
conv_out.append(x)
if return_feature_maps:
return conv_out
return [x] |
class ProxylessBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, bn_eps, expansion):
super(ProxylessBlock, self).__init__()
self.use_bc = (expansion > 1)
mid_channels = (in_channels * expansion)
if self.use_bc:
self.bc_conv = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation='relu6')
padding = ((kernel_size - 1) // 2)
self.dw_conv = ConvBlock(in_channels=mid_channels, out_channels=mid_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=mid_channels, bn_eps=bn_eps, activation='relu6')
self.pw_conv = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None)
def forward(self, x):
if self.use_bc:
x = self.bc_conv(x)
x = self.dw_conv(x)
x = self.pw_conv(x)
return x |
class Trainer(object):
def __init__(self, cuda, model_rgb, model_depth, model_clstm, optimizer_rgb, optimizer_depth, optimizer_clstm, train_loader, max_iter, snapshot, outpath, sshow, size_average=False):
self.cuda = cuda
self.model_rgb = model_rgb
self.model_depth = model_depth
self.model_clstm = model_clstm
self.optim_rgb = optimizer_rgb
self.optim_depth = optimizer_depth
self.optim_clstm = optimizer_clstm
self.train_loader = train_loader
self.epoch = 0
self.iteration = 0
self.max_iter = max_iter
self.snapshot = snapshot
self.outpath = outpath
self.sshow = sshow
self.size_average = size_average
def train_epoch(self):
for (batch_idx, (data, target, depth)) in enumerate(self.train_loader):
iteration = (batch_idx + (self.epoch * len(self.train_loader)))
if ((self.iteration != 0) and ((iteration - 1) != self.iteration)):
continue
self.iteration = iteration
if (self.iteration >= self.max_iter):
break
if self.cuda:
(data, target, depth) = (data.cuda(), target.cuda(), depth.cuda())
(data, target, depth) = (Variable(data), Variable(target), Variable(depth))
(n, c, h, w) = data.size()
depth = depth.view(n, h, w, 1).repeat(1, 1, 1, c)
depth = depth.transpose(3, 1)
depth = depth.transpose(3, 2)
self.optim_rgb.zero_grad()
self.optim_depth.zero_grad()
self.optim_clstm.zero_grad()
global running_loss_final
(h1, h2, h3, h4, h5) = self.model_rgb(data)
(depth_vector, d1, d2, d3, d4, d5) = self.model_depth(depth)
score_fusion = self.model_clstm(depth_vector, h1, h2, h3, h4, h5, d1, d2, d3, d4, d5)
loss_all = cross_entropy2d(score_fusion, target, size_average=self.size_average)
running_loss_final += loss_all.item()
if ((iteration % self.sshow) == (self.sshow - 1)):
print(('\n [%3d, %6d, The training loss of DMRA_Net: %.3f]' % ((self.epoch + 1), (iteration + 1), (running_loss_final / (n * self.sshow)))))
running_loss_final = 0.0
if (iteration <= 200000):
if ((iteration % self.snapshot) == (self.snapshot - 1)):
savename = ('%s/snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_rgb.state_dict(), savename)
print(('save: (snapshot: %d)' % (iteration + 1)))
savename_focal = ('%s/depth_snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_depth.state_dict(), savename_focal)
print(('save: (snapshot_depth: %d)' % (iteration + 1)))
savename_clstm = ('%s/clstm_snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_clstm.state_dict(), savename_clstm)
print(('save: (snapshot_clstm: %d)' % (iteration + 1)))
elif ((iteration % 10000) == (10000 - 1)):
savename = ('%s/snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_rgb.state_dict(), savename)
print(('save: (snapshot: %d)' % (iteration + 1)))
savename_focal = ('%s/depth_snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_depth.state_dict(), savename_focal)
print(('save: (snapshot_depth: %d)' % (iteration + 1)))
savename_clstm = ('%s/clstm_snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_clstm.state_dict(), savename_clstm)
print(('save: (snapshot_clstm: %d)' % (iteration + 1)))
if ((iteration + 1) == self.max_iter):
savename = ('%s/snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_rgb.state_dict(), savename)
print(('save: (snapshot: %d)' % (iteration + 1)))
savename_focal = ('%s/depth_snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_depth.state_dict(), savename_focal)
print(('save: (snapshot_depth: %d)' % (iteration + 1)))
savename_clstm = ('%s/clstm_snapshot_iter_%d.pth' % (self.outpath, (iteration + 1)))
torch.save(self.model_clstm.state_dict(), savename_clstm)
print(('save: (snapshot_clstm: %d)' % (iteration + 1)))
loss_all.backward()
self.optim_clstm.step()
self.optim_depth.step()
self.optim_rgb.step()
def train(self):
max_epoch = int(math.ceil(((1.0 * self.max_iter) / len(self.train_loader))))
for epoch in range(max_epoch):
self.epoch = epoch
self.train_epoch()
if (self.iteration >= self.max_iter):
break |
def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144']]
model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=8, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_model(model_kwargs, default_cfgs[variant], pretrained)
return model |
def cal_normalized_tp_pos_fp_neg(output, target, nclass, score_thresh):
mini = 1
maxi = 1
nbins = 1
predict = (nd.sigmoid(output).asnumpy() > score_thresh).astype('int64')
if (len(target.shape) == 3):
target = nd.expand_dims(target, axis=1).asnumpy().astype('int64')
elif (len(target.shape) == 4):
target = target.asnumpy().astype('int64')
else:
raise ValueError('Unknown target dimension')
intersection = (predict * (predict == target))
tp = intersection.sum()
fp = (predict * (predict != target)).sum()
tn = ((1 - predict) * (predict == target)).sum()
fn = ((predict != target) * (1 - predict)).sum()
pos = (tp + fn)
neg = (fp + tn)
return (tp, pos, fp, neg) |
def Sequence_logo_multiple(matrix, data_type=None, figsize=None, ylabel=None, title=None, epsilon=0.0001, ncols=1, rows_per_weight=1, show=True, count_from=0, ticks_every=1, ticks_labels_size=14, title_size=20):
if (data_type is None):
if (matrix.min() >= 0):
data_type = 'mean'
else:
data_type = 'weights'
matrix = np.array(matrix)
N_plots = matrix.shape[0]
nrows = int(np.ceil(((N_plots * rows_per_weight) / float(ncols))))
if (figsize is None):
figsize = (max(int(((0.3 * matrix.shape[1]) / rows_per_weight)), 2), (3 * rows_per_weight))
figsize = ((figsize[0] * ncols), (figsize[1] * nrows))
(fig, ax) = plt.subplots(nrows, ncols, figsize=figsize)
if (ylabel is None):
if (data_type == 'mean'):
ylabel = 'Conservation (bits)'
elif (data_type == 'weights'):
ylabel = 'Weights'
if (type(ylabel) == str):
ylabels = [(ylabel + (' #%s' % i)) for i in range((1 + count_from), ((N_plots + count_from) + 1))]
else:
ylabels = ylabel
if (title is None):
title = ''
if (type(title) == str):
titles = [title for _ in range(N_plots)]
else:
titles = title
for i in range(N_plots):
if (rows_per_weight > 1):
ax_ = [get_ax(ax, ((i * rows_per_weight) + l), (nrows * rows_per_weight), ncols) for l in range(rows_per_weight)]
else:
ax_ = get_ax(ax, i, nrows, ncols)
Sequence_logo(matrix[i], ax=ax_, data_type=data_type, ylabel=ylabels[i], title=titles[i], epsilon=epsilon, show=False, ticks_every=ticks_every, ticks_labels_size=ticks_labels_size, title_size=title_size, nrows=rows_per_weight)
plt.tight_layout()
if show:
plt.show()
return fig |
def _get_predictor(args: argparse.Namespace) -> Predictor:
from stog.utils.archival import load_archive
archive = load_archive(args.archive_file, device=args.cuda_device, weights_file=args.weights_file)
return Predictor.from_archive(archive) |
class MinkLoc(torch.nn.Module):
def __init__(self, in_channels, feature_size, output_dim, planes, layers, num_top_down, conv0_kernel_size, block='BasicBlock', pooling_method='GeM'):
super().__init__()
self.in_channels = in_channels
self.feature_size = feature_size
self.output_dim = output_dim
self.block = block
if (block == 'BasicBlock'):
block_module = BasicBlock
elif (block == 'Bottleneck'):
block_module = Bottleneck
elif (block == 'SEBasicBlock'):
block_module = SEBasicBlock
elif (block == 'ECABasicBlock'):
block_module = ECABasicBlock
else:
raise NotImplementedError('Unsupported network block: {}'.format(block))
self.pooling_method = pooling_method
self.backbone = MinkFPN(in_channels=in_channels, out_channels=self.feature_size, num_top_down=num_top_down, conv0_kernel_size=conv0_kernel_size, block=block_module, layers=layers, planes=planes)
self.pooling = pooling.PoolingWrapper(pool_method=pooling_method, in_dim=self.feature_size, output_dim=output_dim)
self.pooled_feature_size = self.pooling.output_dim
def forward(self, batch):
x = ME.SparseTensor(batch['features'], coordinates=batch['coords'])
x = self.backbone(x)
assert (x.shape[1] == self.feature_size), 'Backbone output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.feature_size)
x = self.pooling(x)
if ((x.dim() == 3) and (x.shape[2] == 1)):
x = x.flatten(1)
assert (x.dim() == 2), 'Expected 2-dimensional tensor (batch_size,output_dim). Got {} dimensions.'.format(x.dim())
assert (x.shape[1] == self.pooled_feature_size), 'Backbone output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.pooled_feature_size)
assert (x.shape[1] == self.output_dim), 'Output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.output_dim)
return {'global': x}
def print_info(self):
print('Model class: MinkLoc')
n_params = sum([param.nelement() for param in self.parameters()])
print('Total parameters: {}'.format(n_params))
n_params = sum([param.nelement() for param in self.backbone.parameters()])
print('Backbone parameters: {}'.format(n_params))
print('Backbone building block: {}'.format(self.block))
print('Pooling method: {}'.format(self.pooling_method))
n_params = sum([param.nelement() for param in self.pooling.parameters()])
print('Pooling parameters: {}'.format(n_params))
print('# channels from feature extraction block: {}'.format(self.feature_size))
print('# channels from pooling block: {}'.format(self.pooled_feature_size))
print('# output channels : {}'.format(self.output_dim)) |
class Tester():
def __init__(self, opt, model, data, write_file, verbose=True, path='/home/jcxu/exp-ptb'):
self.opt = opt
self.model = model
self.test_bag = data
self.output_path = opt.output_path
self.n_batch = len(data)
self.word_dict = opt.word_dict
self.verbose = verbose
self.write_file = write_file
self.path = path
self.crit = nn.CrossEntropyLoss(size_average=True, ignore_index=0)
def test_iters(self):
try:
count = 0
test_id = 0
batch_order = np.random.RandomState(seed=42).permutation(self.n_batch)
accumulated_ppl = 0
test_len = 0
for (idx, batch_idx) in enumerate(batch_order):
current_batch = self.test_bag[batch_idx]
count += 1
inp_var = current_batch['txt']
inp_mask = current_batch['txt_msk']
test_len += inp_mask[0]
batch_size = inp_var.size()[0]
assert (batch_size == 1)
(nll, decoder_output) = self.func_test(inp_var, inp_mask)
accumulated_ppl += nll
logging.info(nll)
test_id += 1
except KeyboardInterrupt:
print('Interrupted Keyboard!')
final_ppl = (accumulated_ppl / test_len)
return math.exp(final_ppl)
def func_test(self, inp_var, inp_msk):
target_len = inp_msk[0]
batch_size = inp_var.size()[0]
(decoder_outputs_prob, decoder_outputs) = self.model.forward(inp_var, inp_msk, tgt_var=inp_var, tgt_msk=inp_msk, aux=None)
valid_pos_mask = Var(msk_list_to_mat(inp_msk), requires_grad=False).view((target_len * batch_size), 1)
if self.opt.use_cuda:
valid_pos_mask = valid_pos_mask.cuda()
pred_prob = decoder_outputs_prob.view((target_len * batch_size), (- 1))
seq_first_inp_var = inp_var.transpose(1, 0).contiguous()
gold_dist = Var(seq_first_inp_var.view((target_len * batch_size)))
if self.opt.use_cuda:
gold_dist = gold_dist.cuda()
loss = self.crit(pred_prob, gold_dist)
loss = (loss * target_len)
return (loss.data[0], decoder_outputs) |
def train_loop(train_model, eval_model, encoder_model, hparams):
qsar_process = []
with train_model.graph.as_default():
train_model.sess.run(train_model.model.iterator.initializer)
step = train_model.model.initilize(train_model.sess, overwrite_saves=hparams.overwrite_saves)
hparams_file_name = FLAGS.hparams_file_name
if (hparams_file_name is None):
hparams_file_name = os.path.join(hparams.save_dir, 'hparams.json')
with open(hparams_file_name, 'w') as outfile:
json.dump(hparams.to_json(), outfile)
while (step < hparams.num_steps):
with train_model.graph.as_default():
step = train_model.model.train(train_model.sess)
if ((step % hparams.summary_freq) == 0):
with train_model.graph.as_default():
train_model.model.save(train_model.sess)
with eval_model.graph.as_default():
eval_model.model.restore(eval_model.sess)
eval_model.sess.run(eval_model.model.iterator.initializer)
eval_reconstruct(eval_model, step, hparams)
if ((step % hparams.inference_freq) == 0):
with encoder_model.graph.as_default():
qsar_process.append(parallel_eval_qsar(encoder_model, step, hparams))
for process in qsar_process:
process.join() |
class Predict(object):
def add_subparser(self, name, parser):
subparser = parser.add_parser(name, help='Use a trained model to make predictions.')
subparser.add_argument('--fdata', default='data/test.conllx', help='path to dataset')
subparser.add_argument('--finit', default='data/test.conllx', help='path to initial parser results')
subparser.add_argument('--fpred', default='pred.conllx', help='path to predicted result')
subparser.add_argument('--modelpath', default='None', help='path to saved model')
subparser.add_argument('--use_predicted', default=False, action='store_true', help='Use predicted Parser')
subparser.add_argument('--input_type', type=str, choices=['conllx', 'conllu', 'raw'], default='conllx')
subparser.add_argument('--language', type=str, default='en')
return subparser
def rearange(self, items, ids):
indicies = []
for id in ids:
for i in id:
indicies.append(i)
indicies = sorted(range(len(indicies)), key=(lambda k: indicies[k]))
items = [items[i] for i in indicies]
return items
def __call__(self, args):
print('Load the model')
modelpath = (args.modelpath + '/model_weights')
vocabpath = (args.modelpath + '/vocab.tag')
config = torch.load(modelpath)['config']
config.batch_size = 2
config.buckets = 2
vocab = torch.load(vocabpath)
parser = BiaffineParser.load(modelpath)
model = Model(vocab, parser, config, vocab.n_rels)
print('Load the dataset')
if (args.input_type == 'conllu'):
corpus = UniversalDependenciesDatasetReader()
corpus.load(args.fdata)
elif (args.input_type == 'conllx'):
corpus = Corpus.load(args.fdata)
elif (args.input_type == 'raw'):
corpus = UniversalDependenciesRawDatasetReader(args.language)
corpus.load(args.fdata)
if args.use_predicted:
if (args.input_type == 'conllu'):
corpus_predicted = UniversalDependenciesDatasetReader()
corpus_predicted.load(args.finit)
else:
corpus_predicted = Corpus.load(args.finit)
if args.use_predicted:
dataset = TextDataset(vocab.numericalize(corpus, corpus_predicted))
else:
dataset = TextDataset(vocab.numericalize(corpus, training=False))
(loader, ids) = batchify(dataset, config.batch_size, config.buckets)
print('Make predictions on the dataset')
if args.use_predicted:
(heads_pred, rels_pred, metric) = model.predict_predicted(loader)
else:
(heads_pred, rels_pred, metric) = model.predict(loader)
print(f'Save the predicted result to {args.fpred}')
heads_pred = self.rearange(heads_pred, ids)
rels_pred = self.rearange(rels_pred, ids)
corpus.heads = heads_pred
corpus.rels = rels_pred
corpus.save(args.fpred) |
class Net(nn.Module):
def __init__(self, scale):
super(Net, self).__init__()
multi_scale = True
group = 1
self.sub_mean = ops.MeanShift((0.4488, 0.4371, 0.404), sub=True)
self.add_mean = ops.MeanShift((0.4488, 0.4371, 0.404), sub=False)
self.entry = nn.Conv2d(3, 64, 3, 1, 1)
self.b1 = Block(64, 64, group=group)
self.b2 = Block(64, 64, group=group)
self.b3 = Block(64, 64, group=group)
self.c1 = ops.BasicBlock((64 * 2), 64, 1, 1, 0)
self.c2 = ops.BasicBlock((64 * 3), 64, 1, 1, 0)
self.c3 = ops.BasicBlock((64 * 4), 64, 1, 1, 0)
self.upsample = ops.UpsampleBlock(64, scale=scale, multi_scale=multi_scale, group=group)
self.exit = nn.Conv2d(64, 3, 3, 1, 1)
def forward(self, x, scale):
x = self.sub_mean(x)
x = self.entry(x)
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
out = self.upsample(o3, scale=scale)
out = self.exit(out)
out = self.add_mean(out)
return out |
def load_state(path, model, optimizer=None):
if os.path.isfile(path):
log("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path, map_location={'cuda:0': 'cuda:{}'.format(torch.cuda.current_device())})
model.load_state_dict(checkpoint['state_dict'], strict=False)
if (optimizer is not None):
optimizer.load_state_dict(checkpoint['optimizer'])
log("=> loaded checkpoint '{}' (epoch {})".format(path, checkpoint['epoch']))
return checkpoint
else:
log("=> no checkpoint found at '{}'".format(path)) |
class ResBlock(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock, self).__init__()
self.convs1 = nn.ModuleList([weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2])))])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)))])
self.convs2.apply(init_weights)
def forward(self, x):
for (c1, c2) in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = (xt + x)
return x
def remove_weight_norm(self):
for layer in self.convs1:
remove_weight_norm(layer)
for layer in self.convs2:
remove_weight_norm(layer) |
def select_conv2d(in_chs, out_chs, kernel_size, **kwargs):
assert ('groups' not in kwargs)
if isinstance(kernel_size, list):
assert ('num_experts' not in kwargs)
m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs)
else:
depthwise = kwargs.pop('depthwise', False)
groups = (out_chs if depthwise else 1)
if (('num_experts' in kwargs) and (kwargs['num_experts'] > 0)):
m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs)
else:
m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs)
return m |
def test_accellsrframe_vecfuncomegaz_2D():
lp = potential.LogarithmicHaloPotential(normalize=1.0)
omega = lp.omegac(1.0)
omegadot = 0.02
omega_func = [(lambda t: 0.0), (lambda t: 0.0), (lambda t: (lp.omegac(1.0) + (0.02 * t)))]
omegadot_func = [(lambda t: 0.0), (lambda t: 0.0), (lambda t: 0.02)]
diskpot = lp
framepot = potential.NonInertialFrameForce(Omega=omega_func, Omegadot=omegadot_func)
diskframepot = (lp + framepot)
def check_orbit(method='odeint', tol=1e-09):
o = Orbit().toPlanar()
o.turn_physical_off()
ts = numpy.linspace(0.0, 20.0, 1001)
o.integrate(ts, diskpot)
op = Orbit([o.R(), o.vR(), (o.vT() - (omega * o.R())), o.phi()])
op.integrate(ts, diskframepot, method=method)
o_xs = (o.R(ts) * numpy.cos(((o.phi(ts) - (omega * ts)) - ((omegadot * (ts ** 2.0)) / 2.0))))
o_ys = (o.R(ts) * numpy.sin(((o.phi(ts) - (omega * ts)) - ((omegadot * (ts ** 2.0)) / 2.0))))
op_xs = op.x(ts)
op_ys = op.y(ts)
assert (numpy.amax(numpy.fabs((o_xs - op_xs))) < tol), f'Integrating an orbit in the acceleratingly-rotating LSR frame does not agree with the equivalent orbit in the inertial frame for method {method}'
assert (numpy.amax(numpy.fabs((o_ys - op_ys))) < tol), f'Integrating an orbit in the acceleratingly-rotating LSR frame does not agree with the equivalent orbit in the inertial frame for method {method}'
check_orbit(method='odeint', tol=1e-06)
check_orbit(method='dop853_c', tol=1e-09)
return None |
def dla_profile(lambda_, z_abs, nhi):
transmission = np.exp(((- compute_tau(lambda_, z_abs, nhi, LAMBDA_LYA, OSCILLATOR_STRENGTH_LYA, GAMMA_LYA)) - compute_tau(lambda_, z_abs, nhi, LAMBDA_LYB, OSCILLATOR_STRENGTH_LYB, GAMMA_LYB)))
return transmission |
def load_cifar10(data_dir, use_augmentation=False):
test_transform = transforms.Compose([transforms.ToTensor()])
if use_augmentation:
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(0.5), transforms.ToTensor()])
else:
train_transform = test_transform
train_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=True, transform=train_transform)
test_dataset = torchvision.datasets.CIFAR10(root=data_dir, train=False, download=True, transform=test_transform)
return (train_dataset, test_dataset) |
class ImageModel(nn.Module):
def __init__(self, args):
super(ImageModel, self).__init__()
self.args = args
self.backbone = Network(pvtv2_pretrained=False, imgsize=self.args.trainsize)
def forward(self, frame):
seg = self.backbone(frame)
return seg |
def setup_localize_trainer(config, dataloader_object):
model_path = os.path.join(config.localization_model_path, 'model.pt')
print_msg(('FL Model Path: %s' % model_path), 'LocalizeTrainerSetup')
classify_evaluator = ClassificationEvaluator(config, config.output_dir)
dataloader_object.token_tokenizer = dataloader_object.ltoken_tokenizer
ltrainer = TrainerFactory().get_trainer(config, dataloader_object, get_experiment_enum(config.localization_model_type, None))
ltrainer.legacy = config.loc_legacy
ltrainer.setup_model()
ltrainer.load_pymodel(model_path)
ltrainer.model.eval()
return (ltrainer, classify_evaluator) |
class RopchainJob(job_class):
def __init__(self):
super().__init__()
self.script_file = __file__
self.rop_tool = 'ropgadget'
def run_rop_tool(self):
rop_tool = ROPGadget(self.binary, self.input, self, self.ropchain, self.bad_chars)
rop_tool.run(self.timeout) |
def main(_):
summary_writer = SummaryWriter(os.path.join(FLAGS.save_dir, 'tb', str(FLAGS.seed)))
video_save_folder = (None if (not FLAGS.save_video) else os.path.join(FLAGS.save_dir, 'video', 'eval'))
(env, dataset) = make_env_and_dataset(FLAGS.env_name, FLAGS.seed, FLAGS.dataset_name, video_save_folder)
if (FLAGS.percentage < 100.0):
dataset.take_random(FLAGS.percentage)
if (FLAGS.percentile < 100.0):
dataset.take_top(FLAGS.percentile)
kwargs = dict(FLAGS.config)
kwargs['num_steps'] = FLAGS.max_steps
agent = BCLearner(FLAGS.seed, env.observation_space.sample()[np.newaxis], env.action_space.sample()[np.newaxis], **kwargs)
eval_returns = []
for i in tqdm.tqdm(range(1, (FLAGS.max_steps + 1)), smoothing=0.1, disable=(not FLAGS.tqdm)):
batch = dataset.sample(FLAGS.batch_size)
update_info = agent.update(batch)
if ((i % FLAGS.log_interval) == 0):
for (k, v) in update_info.items():
summary_writer.add_scalar(f'training/{k}', v, i)
summary_writer.flush()
if ((i % FLAGS.eval_interval) == 0):
eval_stats = evaluate(agent, env, FLAGS.eval_episodes)
for (k, v) in eval_stats.items():
summary_writer.add_scalar(f'evaluation/average_{k}s', v, i)
summary_writer.flush()
eval_returns.append((i, eval_stats['return']))
np.savetxt(os.path.join(FLAGS.save_dir, f'{FLAGS.seed}.txt'), eval_returns, fmt=['%d', '%.1f']) |
_module()
class PointRend(TwoStageDetector):
def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None):
super(PointRend, self).__init__(backbone=backbone, neck=neck, rpn_head=rpn_head, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained) |
class Crop(ImgLandmarksTransform):
def __init__(self, bbox_scale=1.2, bbox_square=True, det_format=True, border='constant', value=None):
self.bbox_scale = bbox_scale
self.bbox_square = bbox_square
self.det_format = det_format
if (border == 'repeat'):
self.border = cv2.BORDER_REPLICATE
elif (border == 'reflect'):
self.border = cv2.BORDER_REFLECT_101
else:
self.border = cv2.BORDER_CONSTANT
self.value = value
def process(self, img_list, bbox_list, landmarks_list=None):
for i in range(len(img_list)):
if isinstance(img_list[i], (list, tuple)):
if (landmarks_list is None):
(img_list[i], _) = self.process(img_list[i], bbox_list[i])
else:
(img_list[i], landmarks_list[i]) = self.process(img_list[i], bbox_list[i], landmarks_list[i])
else:
if self.det_format:
bbox = np.concatenate((bbox_list[i][:2], (bbox_list[i][2:] - bbox_list[i][:2])))
else:
bbox = bbox_list[i]
bbox_scaled = scale_bbox(bbox, self.bbox_scale, self.bbox_square)
if (landmarks_list is None):
img_list[i] = crop_img(img_list[i], bbox_scaled, border=self.border, value=self.value)
else:
(img_list[i], landmarks_list[i]) = crop_img(img_list[i], bbox_scaled, landmarks_list[i], self.border, self.value)
return (img_list, landmarks_list)
def __call__(self, img, landmarks=None):
raise RuntimeError('Bounding box must be specified!')
def __call__(self, img, bbox, landmarks=None):
img_list = (img if isinstance(img, (list, tuple)) else [img])
bbox_list = (bbox if isinstance(bbox, (list, tuple)) else [bbox])
if (landmarks is None):
landmarks_list = None
assert (len(img_list) == len(bbox_list))
else:
landmarks_list = (landmarks if isinstance(landmarks, (list, tuple)) else [landmarks])
assert (len(img_list) == len(bbox_list) == len(landmarks_list))
(img_list, landmarks_list) = self.process(img_list, bbox_list, landmarks_list)
if (isinstance(img, (list, tuple)) or isinstance(landmarks, (list, tuple))):
return (img_list, landmarks_list)
else:
return (img_list[0], (landmarks_list[0] if (landmarks_list is not None) else None))
def __repr__(self):
return (self.__class__.__name__ + '(bbox_scale={0}, bbox_square={1})'.format(self.bbox_scale, self.bbox_square)) |
def validation_step(model, batch, device):
(images, labels, clabels) = batch
(images, clabels) = (images.to(device), clabels.to(device))
out = model(images)
loss = F.cross_entropy(out, clabels)
acc = accuracy(out, clabels)
return {'Loss': loss.detach(), 'Acc': acc} |
class AsrDataset(FairseqDataset):
def __init__(self, aud_paths, aud_durations_ms, tgt, tgt_dict, ids, speakers, num_mel_bins=80, frame_length=25.0, frame_shift=10.0):
assert (frame_length > 0)
assert (frame_shift > 0)
assert all(((x > frame_length) for x in aud_durations_ms))
self.frame_sizes = [int((1 + ((d - frame_length) / frame_shift))) for d in aud_durations_ms]
assert (len(aud_paths) > 0)
assert (len(aud_paths) == len(aud_durations_ms))
assert (len(aud_paths) == len(tgt))
assert (len(aud_paths) == len(ids))
assert (len(aud_paths) == len(speakers))
self.aud_paths = aud_paths
self.tgt_dict = tgt_dict
self.tgt = tgt
self.ids = ids
self.speakers = speakers
self.num_mel_bins = num_mel_bins
self.frame_length = frame_length
self.frame_shift = frame_shift
self.s2s_collater = Seq2SeqCollater(0, 1, pad_index=self.tgt_dict.pad(), eos_index=self.tgt_dict.eos(), move_eos_to_beginning=True)
def __getitem__(self, index):
import torchaudio
import torchaudio.compliance.kaldi as kaldi
tgt_item = (self.tgt[index] if (self.tgt is not None) else None)
path = self.aud_paths[index]
if (not os.path.exists(path)):
raise FileNotFoundError('Audio file not found: {}'.format(path))
(sound, sample_rate) = torchaudio.load_wav(path)
output = kaldi.fbank(sound, num_mel_bins=self.num_mel_bins, frame_length=self.frame_length, frame_shift=self.frame_shift)
output_cmvn = data_utils.apply_mv_norm(output)
return {'id': index, 'data': [output_cmvn.detach(), tgt_item]}
def __len__(self):
return len(self.aud_paths)
def collater(self, samples):
return self.s2s_collater.collate(samples)
def num_tokens(self, index):
return self.frame_sizes[index]
def size(self, index):
return (self.frame_sizes[index], (len(self.tgt[index]) if (self.tgt is not None) else 0))
def ordered_indices(self):
return np.arange(len(self)) |
def create_crossval_splits(args: Args):
data = get_data(args.data_path)
num_data = len(data)
if (args.split_type == 'random'):
all_indices = list(range(num_data))
fold_indices = split_indices(all_indices, args.num_folds, scaffold=False)
elif (args.split_type == 'scaffold'):
all_indices = list(range(num_data))
fold_indices = split_indices(all_indices, args.num_folds, scaffold=True, data=data)
else:
raise ValueError
random.shuffle(fold_indices)
for i in range(args.test_folds_to_test):
all_splits = []
for j in range(1, (args.val_folds_per_test + 1)):
os.makedirs(os.path.join(args.save_dir, args.split_type, f'fold_{i}', f'{(j - 1)}'), exist_ok=True)
with open(os.path.join(args.save_dir, args.split_type, f'fold_{i}', f'{(j - 1)}', 'split_indices.pckl'), 'wb') as wf:
val_idx = ((i + j) % args.num_folds)
val = fold_indices[val_idx]
test = fold_indices[i]
train = []
for k in range(args.num_folds):
if ((k != i) and (k != val_idx)):
train.append(fold_indices[k])
train = np.concatenate(train)
pickle.dump([train, val, test], wf)
all_splits.append([train, val, test])
with open(os.path.join(args.save_dir, args.split_type, f'fold_{i}', 'split_indices.pckl'), 'wb') as wf:
pickle.dump(all_splits, wf) |
def get_at_indices(tensor, indices):
counter = tf.range(tf.shape(indices, out_type=indices.dtype)[0])
return tf.gather_nd(tensor, tf.stack((counter, indices), (- 1))) |
def decode_thermistors_message(bin_msg, print_decoded=False, print_debug_information=False):
if print_decoded:
print(' START DECODE THERMISTORS MESSAGE ')
assert (message_kind(bin_msg) == 'T')
assert (byte_to_char(bin_msg[(- 1)]) == 'E')
if print_debug_information:
print('received message of length: {}'.format(len(bin_msg)))
expected_message_length = int(((len(bin_msg) - _BD_THERM_MSG_FIXED_LENGTH) / _BD_THERM_PACKET_LENGTH))
assert (((expected_message_length * _BD_THERM_PACKET_LENGTH) + _BD_THERM_MSG_FIXED_LENGTH) == len(bin_msg))
nbr_thermistors_measurements = one_byte_to_int(bin_msg[1:2])
message_metadata = Thermistors_Metadata(nbr_thermistors_measurements=nbr_thermistors_measurements)
if print_decoded:
print(message_metadata)
crrt_packet_start = 2
list_decoded_packets = []
while True:
if print_decoded:
print('----- START PACKET -----')
crrt_byte_start = byte_to_char(bin_msg[crrt_packet_start])
assert (crrt_byte_start == 'P')
crrt_decoded_packet = decode_thermistors_packet(bin_msg[crrt_packet_start:(crrt_packet_start + _BD_THERM_PACKET_LENGTH)], print_decoded=print_decoded, print_debug_information=print_debug_information)
list_decoded_packets.append(crrt_decoded_packet)
trailing_char = byte_to_char(bin_msg[(crrt_packet_start + _BD_THERM_PACKET_LENGTH)])
assert (trailing_char in ['P', 'E'])
if (trailing_char == 'E'):
break
else:
crrt_packet_start += _BD_THERM_PACKET_LENGTH
if print_decoded:
print('----- END PACKET -----')
assert (expected_message_length == len(list_decoded_packets))
if print_decoded:
print(' DONE DECODE THERMISTORS MESSAGE ')
return (message_metadata, list_decoded_packets) |
def main():
server_executor = NeuralChatServerExecutor()
server_executor(config_file='./assisted_gen.yaml', log_file='./assisted_gen.log') |
def calculate_pixel_accuracy(out_value, mace_out_value, output_shape, output_data_format):
out_value = out_value.reshape(output_shape)
mace_out_value = mace_out_value.reshape(output_shape)
if ((len(output_shape) == 4) and (output_data_format == DataFormat.NCHW)):
out_value = out_value.transpose([0, 2, 3, 1])
mace_out_value = mace_out_value.transpose([0, 2, 3, 1])
if (len(out_value.shape) < 2):
return 1.0
out_value = out_value.reshape(((- 1), out_value.shape[(- 1)]))
batches = out_value.shape[0]
classes = out_value.shape[1]
mace_out_value = mace_out_value.reshape((batches, classes))
correct_count = 0
for i in range(batches):
if (np.argmax(out_value[i]) == np.argmax(mace_out_value[i])):
correct_count += 1
return ((1.0 * correct_count) / batches) |
class ScheduledOptim():
def __init__(self, optimizer, d_model, n_warmup_steps):
self._optimizer = optimizer
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = 0
self.init_lr = np.power(d_model, (- 0.5))
def step_and_update_lr(self):
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
self._optimizer.zero_grad()
def _get_lr_scale(self):
return np.min([np.power(self.n_current_steps, (- 0.5)), (np.power(self.n_warmup_steps, (- 1.5)) * self.n_current_steps)])
def _update_learning_rate(self):
self.n_current_steps += 1
lr = (self.init_lr * self._get_lr_scale())
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr |
def is_cuda_and_apex_available():
is_using_cuda = (torch.cuda.is_available() and (torch_device == 'cuda'))
return (is_using_cuda and is_apex_available()) |
class NegativeGraph(object):
def __init__(self, dic):
self.historical_dic = dic
def __call__(self, graph, etype):
(utype, _, vtype) = etype
(src, _) = graph.edges(etype=etype)
dst = []
for i in tqdm(range(src.shape[0])):
s = int(src[i])
while True:
negitem = np.random.randint(0, graph.num_nodes(vtype))
if (negitem in self.historical_dic[s]):
continue
else:
break
dst.append(negitem)
dst = torch.tensor(dst, device=src.device)
return dgl.heterograph({etype: (src, dst)}, num_nodes_dict={ntype: graph.number_of_nodes(ntype) for ntype in graph.ntypes}).to(graph.device) |
class DPMSolverSDEScheduler(metaclass=DummyObject):
_backends = ['torch', 'torchsde']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'torchsde'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'torchsde'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'torchsde']) |
class ConvLSTMCell(nn.Module):
def __init__(self, args, input_size, hidden_size, kernel_size, padding):
super(ConvLSTMCell, self).__init__()
self.use_gpu = args.use_gpu
self.input_size = input_size
self.hidden_size = hidden_size
self.Gates = nn.Conv2d((input_size + (2 * hidden_size)), (4 * hidden_size), kernel_size, padding=padding)
def forward(self, input_, prev_state_spatial, hidden_state_temporal):
batch_size = input_.data.size()[0]
spatial_size = input_.data.size()[2:]
if (prev_state_spatial is None):
state_size = ([batch_size, self.hidden_size] + list(spatial_size))
if self.use_gpu:
prev_state_spatial = (Variable(torch.zeros(state_size)).cuda(), Variable(torch.zeros(state_size)).cuda())
else:
prev_state_spatial = (Variable(torch.zeros(state_size)), Variable(torch.zeros(state_size)))
if (hidden_state_temporal is None):
state_size = ([batch_size, self.hidden_size] + list(spatial_size))
if self.use_gpu:
hidden_state_temporal = Variable(torch.zeros(state_size)).cuda()
else:
hidden_state_temporal = Variable(torch.zeros(state_size))
(prev_hidden_spatial, prev_cell_spatial) = prev_state_spatial
stacked_inputs = torch.cat([input_, prev_hidden_spatial, hidden_state_temporal], 1)
gates = self.Gates(stacked_inputs)
(in_gate, remember_gate, out_gate, cell_gate) = gates.chunk(4, 1)
in_gate = torch.sigmoid(in_gate)
remember_gate = torch.sigmoid(remember_gate)
out_gate = torch.sigmoid(out_gate)
cell_gate = torch.tanh(cell_gate)
cell = ((remember_gate * prev_cell_spatial) + (in_gate * cell_gate))
hidden = (out_gate * torch.tanh(cell))
state = [hidden, cell]
return state |
def launch(main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, dist_url=None, args=()):
world_size = (num_machines * num_gpus_per_machine)
if (world_size > 1):
if (dist_url == 'auto'):
assert (num_machines == 1), 'dist_url=auto not supported in multi-machine jobs.'
port = _find_free_port()
dist_url = f'tcp://127.0.0.1:{port}'
if ((num_machines > 1) and dist_url.startswith('file://')):
logger = logging.getLogger(__name__)
logger.warning('file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://')
mp.spawn(_distributed_worker, nprocs=num_gpus_per_machine, args=(main_func, world_size, num_gpus_per_machine, machine_rank, dist_url, args), daemon=False)
else:
main_func(*args) |
def get_random_pos_on_map(free_space_indices, map_: OccupancyGrid, safe_dist: float, forbidden_zones: list=None):
def is_pos_valid(x_in_meters, y_in_meters):
for forbidden_zone in forbidden_zones:
if ((((x_in_meters - forbidden_zone[0]) ** 2) + ((y_in_meters - forbidden_zone[1]) ** 2)) < ((forbidden_zone[2] + safe_dist) ** 2)):
return False
cell_radius = int((safe_dist / map_.info.resolution))
x_index = int(((x_in_meters - map_.info.origin.position.x) // map_.info.resolution))
y_index = int(((y_in_meters - map_.info.origin.position.y) // map_.info.resolution))
for i in range((x_index - cell_radius), (x_index + cell_radius), 1):
for j in range((y_index - cell_radius), (y_index + cell_radius), 1):
index = ((j * map_.info.width) + i)
if (index >= len(map_.data)):
return False
try:
value = map_.data[index]
except IndexError:
print(('IndexError: index: %d, map_length: %d' % (index, len(map_.data))))
return False
if (value != 0):
return False
return True
assert ((len(free_space_indices) == 2) and (len(free_space_indices[0]) == len(free_space_indices[1]))), 'free_space_indices is not correctly setup'
if (forbidden_zones is None):
forbidden_zones = []
n_freespace_cells = len(free_space_indices[0])
pos_valid = False
n_check_failed = 0
(x_in_meters, y_in_meters) = (None, None)
while (not pos_valid):
idx = random.randint(0, (n_freespace_cells - 1))
(y_in_cells, x_in_cells) = (free_space_indices[0][idx], free_space_indices[1][idx])
y_in_meters = ((y_in_cells * map_.info.resolution) + map_.info.origin.position.y)
x_in_meters = ((x_in_cells * map_.info.resolution) + map_.info.origin.position.x)
pos_valid = is_pos_valid(x_in_meters, y_in_meters)
if (not pos_valid):
n_check_failed += 1
if (n_check_failed > 100):
raise Exception("cann't find any no-occupied space please check the map information")
theta = random.uniform((- math.pi), math.pi)
return (x_in_meters, y_in_meters, theta) |
def clip_featurize_data(dataset, device, pretrained):
with torch.no_grad():
(Z, Y) = ([], [])
for (x, y) in tqdm.tqdm(DataLoader(dataset, batch_size=128, num_workers=16)):
Z += [pretrained.encode_image(x.to(device).half()).cpu().numpy()]
Y += [y.cpu().numpy()]
return (np.concatenate(Z), np.concatenate(Y)) |
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
if (split_name not in _SPLITS_TO_SIZES):
raise ValueError(('split name %s was not recognized.' % split_name))
if (not file_pattern):
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, (file_pattern % split_name))
if (reader is None):
reader = tf.TFRecordReader
keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=(- 1)), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64)}
items_to_handlers = {'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'), 'label': slim.tfexample_decoder.Tensor('image/class/label'), 'label_text': slim.tfexample_decoder.Tensor('image/class/text'), 'object/bbox': slim.tfexample_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), 'object/label': slim.tfexample_decoder.Tensor('image/object/class/label')}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
else:
labels_to_names = create_readable_names_for_imagenet_labels()
dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(data_sources=file_pattern, reader=reader, decoder=decoder, num_samples=_SPLITS_TO_SIZES[split_name], items_to_descriptions=_ITEMS_TO_DESCRIPTIONS, num_classes=_NUM_CLASSES, labels_to_names=labels_to_names) |
def load_hyperparameters_json(PATHS: dict, from_scratch: bool=False, config_name: str='default'):
if from_scratch:
doc_location = os.path.join(PATHS.get('hyperparams'), (config_name + '.json'))
else:
doc_location = os.path.join(PATHS.get('model'), 'hyperparameters.json')
if os.path.isfile(doc_location):
with open(doc_location, 'r') as file:
hyperparams = json.load(file)
check_hyperparam_format(loaded_hyperparams=hyperparams, PATHS=PATHS)
return hyperparams
elif from_scratch:
raise FileNotFoundError(("Found no '%s.json' in %s" % (config_name, PATHS.get('hyperparams'))))
else:
raise FileNotFoundError(("Found no 'hyperparameters.json' in %s" % PATHS.get('model'))) |
class AttentionLayer(nn.Module):
def __init__(self, image_dim, question_dim, **kwargs):
super(AttentionLayer, self).__init__()
combine_type = kwargs['modal_combine']['type']
combine_params = kwargs['modal_combine']['params']
modal_combine_layer = ModalCombineLayer(combine_type, image_dim, question_dim, **combine_params)
transform_type = kwargs['transform']['type']
transform_params = kwargs['transform']['params']
transform_layer = TransformLayer(transform_type, modal_combine_layer.out_dim, **transform_params)
normalization = kwargs['normalization']
self.module = TopDownAttention(modal_combine_layer, transform_layer, normalization)
if getattr(self.module, 'out_dim'):
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs) |
def test_fps():
if (not torch.cuda.is_available()):
pytest.skip()
xyz = torch.tensor([[[(- 0.2748), 1.002, (- 1.1674)], [0.1015, 1.3952, (- 1.2681)], [(- 0.807), 2.4137, (- 0.5845)], [(- 1.0001), 2.1982, (- 0.5859)], [0.3841, 1.8983, (- 0.7431)]], [[(- 1.0696), 3.0758, (- 0.1899)], [(- 0.2559), 3.5521, (- 0.1402)], [0.8164, 4.0081, (- 0.1839)], [(- 1.1), 3.0213, (- 0.8205)], [(- 0.0518), 3.7251, (- 0.395)]]]).cuda()
idx = furthest_point_sample(xyz, 3)
expected_idx = torch.tensor([[0, 2, 4], [0, 2, 1]]).cuda()
assert torch.all((idx == expected_idx)) |
def _create_shared_memory(name, create, size=0):
if (not create):
try:
return SharedMemory(name=name)
except FileNotFoundError:
return None
try:
shm = SharedMemory(name=name, create=create, size=size)
except FileExistsError:
shm = SharedMemory(name=name)
if (shm.size != size):
logger.info('Re-create a new memory buffer.')
shm.unlink()
shm = SharedMemory(name=name, create=create, size=size)
return shm |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.