code stringlengths 101 5.91M |
|---|
def check_release_file(run_lambda):
return run_and_parse_first_match(run_lambda, 'cat /etc/*-release', 'PRETTY_NAME="(.*)"') |
def find_path(start, goal, neighbors_fnct, reversePath=False, heuristic_cost_estimate_fnct=(lambda a, b: Infinite), distance_between_fnct=(lambda a, b: 1.0), is_goal_reached_fnct=(lambda a, b: (a == b))):
class FindPath(AStar):
def heuristic_cost_estimate(self, current, goal):
return heuristic_cost_estimate_fnct(current, goal)
def distance_between(self, n1, n2):
return distance_between_fnct(n1, n2)
def neighbors(self, node):
return neighbors_fnct(node)
def is_goal_reached(self, current, goal):
return is_goal_reached_fnct(current, goal)
return FindPath().astar(start, goal, reversePath) |
class Policy(nn.Module):
def __init__(self, action_space, encoding_dimension):
super().__init__()
self.critic_linear = nn.Linear(encoding_dimension, 1)
self.h_dim = encoding_dimension
if (action_space.__class__.__name__ == 'Discrete'):
num_outputs = action_space.n
self.dist = Categorical(encoding_dimension, num_outputs)
elif (action_space.__class__.__name__ == 'Box'):
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(encoding_dimension, num_outputs)
else:
raise NotImplementedError
self.encoding_bn = nn.BatchNorm1d(encoding_dimension)
def encode(self, observation, actions, previous_latent_state):
raise NotImplementedError('Should be provided by child class, e.g. RNNPolicy or DVRLPolicy.')
def new_latent_state(self):
raise NotImplementedError('Should be provided by child class, e.g. RNNPolicy or DVRLPolicy.')
def vec_conditional_new_latent_state(self, latent_state, mask):
def forward(self, current_memory, deterministic=False):
policy_return = PolicyReturn()
device = next(self.parameters()).device
def cudify_state(state, device):
if (type(state) == tuple):
return tuple([cudify_state(s, device) for s in state])
else:
return state.to(device)
(state_tuple, merged_state) = self.encode(observation=current_memory['current_obs'].to(device), actions=current_memory['oneHotActions'].to(device).detach(), previous_latent_state=cudify_state(current_memory['states'], device))
if self.policy_batch_norm:
state = self.encoding_bn(merged_state)
policy_return.latent_state = state_tuple
policy_return.value_estimate = self.critic_linear(merged_state)
action = self.dist.sample(merged_state, deterministic=deterministic)
policy_return.action = action
if (self.dist.__class__.__name__ == 'Categorical'):
policy = self.dist(merged_state)
else:
(policy, _) = self.dist(merged_state)
(action_log_probs, dist_entropy) = self.dist.logprobs_and_entropy(merged_state, action.detach())
policy_return.action_log_probs = action_log_probs
policy_return.dist_entropy = dist_entropy
return policy_return |
class _MemoryEfficientFP16OptimizerMixin(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._multiply_factor = 1.0
def has_flat_params(self):
return False
def state_dict(self):
state_dict = self.wrapped_optimizer.state_dict()
if (self.scaler is not None):
state_dict['loss_scale'] = self.scaler.loss_scale
return state_dict
def load_state_dict(self, state_dict, optimizer_overrides=None):
if (('loss_scale' in state_dict) and (self.scaler is not None)):
self.scaler.loss_scale = state_dict['loss_scale']
self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides)
groups = self.optimizer.param_groups
saved_groups = state_dict['param_groups']
id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))}
for (k, v) in state_dict['state'].items():
if (k in id_map):
param = id_map[k]
self.optimizer.state[param] = v
def backward(self, loss):
if (self.scaler is not None):
loss = self.scaler.scale(loss)
loss.backward()
def _unscale_grads(self):
if (self._multiply_factor != 1.0):
self.wrapped_optimizer.multiply_grads(self._multiply_factor)
self._multiply_factor = 1.0
def multiply_grads(self, c):
self._multiply_factor *= c
def clip_grad_norm(self, max_norm, aggregate_norm_fn=None):
max_norm = float(max_norm)
grad_norm = (self._multiply_factor * self.wrapped_optimizer.clip_grad_norm(0, aggregate_norm_fn))
if (self.scaler is not None):
grad_norm_cpu = float(grad_norm)
if (grad_norm_cpu > max_norm > 0.0):
self._multiply_factor *= (max_norm / grad_norm_cpu)
self.scaler.check_overflow(grad_norm_cpu)
else:
clip_coef = (max_norm / (grad_norm + 1e-06)).clamp_(max=1)
self._multiply_factor *= clip_coef
return grad_norm
def step(self, closure=None):
if self.supports_step_with_scale:
self.wrapped_optimizer.step(closure, scale=(1.0 / self._multiply_factor))
else:
self._unscale_grads()
self.wrapped_optimizer.step(closure)
if (self.scaler is not None):
self.scaler.update()
def zero_grad(self):
self.wrapped_optimizer.zero_grad()
if (self.scaler is not None):
self._multiply_factor = (1.0 / float(self.scaler.loss_scale)) |
def scale_ocr_y(y, dimensions_scenegraph, dimensions_ocr):
return ((y * dimensions_scenegraph[1]) / dimensions_ocr[1]) |
def test_nested_malformed():
shape = (11, 13, 7)
module = make_module(*shape)
with pytest.raises(RuntimeError, match='Complex parameter requires both'):
module.load_state_dict({'mod.par.real': torch.randn(*shape)})
with pytest.raises(RuntimeError, match='Complex parameter requires both'):
module.load_state_dict({'mod.par.imag': torch.randn(*shape)})
with pytest.raises(RuntimeError, match='disallows redundant'):
module.load_state_dict({'mod.par.real': torch.randn(*shape), 'mod.par.imag': torch.randn(*shape), 'mod.par.bar': torch.randn(*shape), 'mod.par.foo': torch.randn(*shape)}, strict=True)
module.load_state_dict({'mod.par.real': torch.zeros(*shape), 'mod.par.imag': torch.ones(*shape), 'bar': torch.randn(*shape), 'foo': torch.randn(*shape)}, strict=False)
with pytest.raises(RuntimeError, match='size mismatch for'):
module.load_state_dict({'mod.par.real': torch.randn(1, 1), 'mod.par.imag': torch.randn(1, 1)}, strict=True)
with pytest.raises(RuntimeError, match='size mismatch for'):
module.load_state_dict({'mod.par': torch.randn(1, 1)}, strict=True)
assert torch.allclose(module.mod.par.real, torch.zeros(*shape))
assert torch.allclose(module.mod.par.imag, torch.ones(*shape)) |
class SetVocab(dict):
def __init__(self, vocab):
self.update(vocab)
def ws2ids(self, ws):
return [(self[w] if (w in self) else 0) for w in ws]
def ids2sent(self, ids):
idx2w = dict([(i, w) for (w, i) in self.items()])
return [(idx2w[int(i)] if (i in idx2w) else 'UNK') for i in ids] |
class PyramidPoolingBranch(nn.Module):
def __init__(self, in_channels, out_channels, pool_out_size, upscale_out_size):
super(PyramidPoolingBranch, self).__init__()
self.upscale_out_size = upscale_out_size
self.pool = nn.AdaptiveAvgPool2d(pool_out_size)
self.conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels)
def forward(self, x):
in_size = (self.upscale_out_size if (self.upscale_out_size is not None) else x.shape[2:])
x = self.pool(x)
x = self.conv(x)
x = F.interpolate(x, size=in_size, mode='bilinear', align_corners=True)
return x |
class PreActBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out |
def build_annoFile(root, save_annotation_root, is_full=True):
assert osp.exists(root), 'Path: {} not exists!'.format(root)
mkdir_or_exist(save_annotation_root)
trainMetas = getKITTI2015Metas(root, 'training', mode='training', is_full=is_full)
evalMetas = getKITTI2015Metas(root, 'training', mode='evaluating', is_full=is_full)
testMetas = getKITTI2015Metas(root, 'testing', mode='testing', is_full=True)
check(root, trainMetas)
check(root, evalMetas)
check(root, testMetas)
info_str = 'KITTI-2015 Dataset contains:\n {:5d} training samples \n {:5d} validation samples \n {:5d} testing samples'.format(len(trainMetas), len(evalMetas), len(testMetas))
print(info_str)
def make_json(name, metas):
filepath = osp.join(save_annotation_root, (name + '.json'))
print('Save to {}'.format(filepath))
with open(file=filepath, mode='w') as fp:
json.dump(metas, fp=fp)
prefix = ('full_' if is_full else 'split_')
make_json(name=(prefix + 'train'), metas=trainMetas)
make_json(name=(prefix + 'eval'), metas=evalMetas)
make_json(name=(prefix + 'test'), metas=testMetas) |
(scope='module')
def rleaky_hidden_reset_none_instance():
return snn.RLeaky(beta=0.5, V=0.5, all_to_all=False, init_hidden=True, reset_mechanism='none') |
class AudioPersistenz():
def __init__(self, loadPath: str, savePath: str=None, fileExtension: str='wav'):
self.savePath = (loadPath if (savePath is None) else savePath)
self.loadPath = loadPath
self.fileExtension = fileExtension
self.fileListUtil = FileListUtil()
self.pathUtil = PathUtil()
def load(self, id: str):
audioTimeSeries: NDArray
samplingRate: int
targetPath = ((((self.loadPath + '/') + id) + '.') + self.fileExtension)
name = self.pathUtil.filenameWithoutExtension(targetPath)
(audioTimeSeries, samplingRate) = librosa.core.load(targetPath, sr=None)
audio = Audio(audioTimeSeries, samplingRate, id, name)
return audio
def save(self, audio: Audio):
targetPath = (((self.savePath + '/') + audio.id) + '.wav')
self.pathUtil.createFolderForFile(targetPath)
audioTimeSeries = audio.timeSeries
samplingRate = audio.samplingRate
soundfile.write(targetPath, audioTimeSeries, samplingRate)
def getNames(self):
names = [self.transformIdToName(id) for id in self.getIds()]
return names
def getIds(self):
audioFiles = self.fileListUtil.getFiles(self.loadPath, self.fileExtension)
audioFiles = [file.replace(self.loadPath, '')[1:((- len(self.fileExtension)) - 1)] for file in audioFiles]
audioFiles = natsorted(audioFiles)
return audioFiles
def loadAll(self):
ids = self.getIds()
for id in ids:
(yield self.load(id))
def transformIdToName(self, id: str):
return self.pathUtil.filenameWithoutExtension(id) |
class TEAN(nn.Module):
def __init__(self, nclass, model1, model2):
super(TEAN, self).__init__()
self.model1 = model1
self.model2 = model2
self.model1.classifier[1] = nn.Linear((128 + 128), num_classes)
self.head = nn.Sequential(encoding.nn.Encoding(D=1280, K=n_codes), encoding.nn.View((- 1), (1280 * n_codes)), encoding.nn.Normalize(), nn.Linear((1280 * n_codes), 64), nn.BatchNorm1d(64))
self.pool = nn.Sequential(nn.AvgPool2d(7), encoding.nn.View((- 1), 1280), nn.Linear(1280, 64), nn.BatchNorm1d(64))
self.fc = nn.Sequential(encoding.nn.Normalize(), nn.Linear((64 * 64), 128), encoding.nn.Normalize())
self.pool2 = nn.Sequential(nn.AvgPool2d(7), encoding.nn.View((- 1), 1280), nn.Linear(1280, 128), nn.BatchNorm1d(128))
def forward(self, img, diff_img):
img_f = self.model1.features(img)
diff_img_f = self.model2.features(diff_img)
diff_img_f = (img_f + diff_img_f)
diff_img_f = self.pool2(diff_img_f)
x1 = self.head(img_fea)
x2 = self.pool(img_fea)
x1 = x1.unsqueeze(1).expand(x1.size(0), x2.size(1), x1.size((- 1)))
x = (x1 * x2.unsqueeze((- 1)))
enc_fea = x.view((- 1), (x1.size((- 1)) * x2.size(1)))
enc_fea = self.fc(enc_fea)
out = torch.cat((enc_fea, ang_fea), dim=1)
out = self.model1.classifier(out)
return out |
class CIFAR100SSL(datasets.CIFAR100):
def __init__(self, root, indexs, train=True, transform=None, target_transform=None, download=False):
super().__init__(root, train=train, transform=transform, target_transform=target_transform, download=download)
if (indexs is not None):
self.data = self.data[indexs]
self.targets = np.array(self.targets)[indexs]
def __getitem__(self, index):
(img, target) = (self.data[index], self.targets[index])
img = Image.fromarray(img)
if (self.transform is not None):
img = self.transform(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target) |
class UpsampleBlock(nn.Module):
def __init__(self, n_channels, scale, multi_scale, group=1):
super(UpsampleBlock, self).__init__()
if multi_scale:
self.up2 = _UpsampleBlock(n_channels, scale=2, group=group)
self.up3 = _UpsampleBlock(n_channels, scale=3, group=group)
self.up4 = _UpsampleBlock(n_channels, scale=4, group=group)
else:
self.up = _UpsampleBlock(n_channels, scale=scale, group=group)
self.multi_scale = multi_scale
def forward(self, x, scale=None):
if self.multi_scale:
if (scale == 2):
return self.up2(x)
elif (scale == 3):
return self.up3(x)
elif (scale == 4):
return self.up4(x)
else:
return self.up(x) |
def communicate_1(tensors, communication_op, group, attention=False):
flat_tensor = flatten_tensors(tensors)
communication_op(tensor=flat_tensor, group=group)
if attention:
return (tensors / flat_tensor)
for (f, t) in zip(unflatten_tensors(flat_tensor, tensors), tensors):
with torch.no_grad():
t.set_(f) |
def to_onehot(indexes, dim, dtype=None):
dtype = (indexes.dtype if (dtype is None) else dtype)
onehot = np.zeros((indexes.size, dim), dtype=dtype)
onehot[(np.arange(indexes.size), indexes.reshape((- 1)))] = 1
return onehot.reshape((indexes.shape + (dim,))) |
def get_diaresnet_cifar(num_classes, blocks, bottleneck, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
assert (num_classes in [10, 100])
if bottleneck:
assert (((blocks - 2) % 9) == 0)
layers = ([((blocks - 2) // 9)] * 3)
else:
assert (((blocks - 2) % 6) == 0)
layers = ([((blocks - 2) // 6)] * 3)
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[(cij * 4) for cij in ci] for ci in channels]
net = CIFARDIAResNet(channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, num_classes=num_classes, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def projection(input_ops, y_task, n_hidden, sequence_lengths, class_weights, optmzr, batch_size=1):
timesteps = len(input_ops)
n_classes = len(class_weights)
w = tf.get_variable('weights', [(2 * n_hidden), n_classes], initializer=xavier_init((2 * n_hidden), n_classes))
b = tf.get_variable('biases', [n_classes], initializer=xavier_init(1, n_classes))
_input_ops = tf.concat(0, input_ops)
_input_ops = tf.reshape(_input_ops, [timesteps, batch_size, (2 * n_hidden)])
_input_ops = tf.transpose(_input_ops, [1, 0, 2])
_input_ops = tf.reshape(_input_ops, [(timesteps * batch_size), (2 * n_hidden)])
y_flat = tf.reshape(y_task, [(- 1)], name='y_flat')
y_onehot = tf.one_hot(y_flat, depth=n_classes)
(preds, losses) = tf.contrib.learn.ops.softmax_classifier(_input_ops, y_onehot, w, b, class_weight=class_weights)
mask = tf.sign(tf.to_float(y_flat))
masked_losses = (mask * losses)
masked_losses = tf.reshape(masked_losses, [batch_size, timesteps], name='masked_losses')
mean_loss_by_example = tf.truediv(tf.reduce_sum(masked_losses, reduction_indices=1), tf.cast(sequence_lengths, tf.float32), name='mean_loss_by_ex')
loss = tf.reduce_mean(mean_loss_by_example, name='batch_mean_loss')
opt = optmzr.minimize(loss)
return (preds, loss, opt) |
def load_text(file_path: str, ids: List[str], groupByClip: bool=True):
dict_text = {}
with open(file_path) as f:
for line in f:
(id, text) = line.split(' ', 1)
if (id[:11] in ids):
dict_text[id] = text
if groupByClip:
dict_text = _groupByClip(dict_text)
return dict_text |
class TestMyModule(unittest.TestCase):
def setUpClass(self):
if (not os.path.exists(TASK_LOG_path)):
os.makedirs(TASK_LOG_path)
def tearDownClass(self):
shutil.rmtree(NEURAL_SOLUTION_WORKSPACE, ignore_errors=True)
def test_serialize(self):
request = {'key': 'value'}
expected_result = b'{"key": "value"}'
self.assertEqual(serialize(request), expected_result)
def test_deserialize(self):
request = b'{"key": "value"}'
expected_result = {'key': 'value'}
self.assertEqual(deserialize(request), expected_result)
('sqlite3.connect')
def test_get_cluster_info(self, mock_connect):
mock_cursor = mock_connect().cursor.return_value
mock_cursor.fetchall.return_value = [(1, 'node info', 'status', 1, 2, 3)]
expected_result = {'Cluster info': [(1, 'node info', 'status', 1, 2, 3)]}
self.assertEqual(get_cluster_info(TASK_LOG_path), expected_result)
('sqlite3.connect')
def test_get_cluster_table(self, mock_connect):
mock_cursor = mock_connect().cursor.return_value
mock_cursor.fetchall.return_value = [(1, 'node info', 'status', 1, 2, 3)]
expected_result = '<table border="1" class="dataframe">\n <thead>\n <tr style="text-align: right;">\n <th>Node</th>\n <th>Node info</th>\n <th>status</th>\n <th>free workers</th>\n <th>busy workers</th>\n <th>total workers</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <td>1</td>\n <td>node info</td>\n <td>status</td>\n <td>1</td>\n <td>2</td>\n <td>3</td>\n </tr>\n </tbody>\n</table>'
self.assertEqual(get_cluster_table(TASK_LOG_path), expected_result)
def test_get_res_during_tuning(self):
task_id = '12345'
log_path = f'{TASK_LOG_path}/task_{task_id}.txt'
with open(log_path, 'w') as f:
f.write('Tune 1 result is: (int8|fp32): 0.123 (int8|fp32): 0.456')
expected_result = {'Tuning count': '1', 'Accuracy': '0.123', 'Duration (seconds)': '0.456'}
self.assertEqual(get_res_during_tuning(task_id, TASK_LOG_path), expected_result)
os.remove(log_path)
def test_get_baseline_during_tuning(self):
task_id = '12345'
log_path = f'{TASK_LOG_path}/task_{task_id}.txt'
with open(log_path, 'w') as f:
f.write('FP32 baseline is: 0.123 0.456')
expected_result = {'Accuracy': '0.123', 'Duration (seconds)': '0.456'}
self.assertEqual(get_baseline_during_tuning(task_id, TASK_LOG_path), expected_result)
os.remove(log_path)
def test_check_log_exists(self):
task_id = '12345'
log_path = f'{TASK_LOG_path}/task_{task_id}.txt'
with patch('os.path.exists') as mock_exists:
mock_exists.return_value = True
self.assertTrue(check_log_exists(task_id, TASK_LOG_path))
mock_exists.return_value = False
self.assertFalse(check_log_exists(task_id, TASK_LOG_path))
def test_list_to_string(self):
lst = ['Hello', 'Neural', 'Solution']
expected_result = 'Hello Neural Solution'
self.assertEqual(list_to_string(lst), expected_result) |
class UniversalCharEmbedding(nn.Module):
def __init__(self, langs, char_emb_dim, universal_charset_size, mapping_temperature=0.0):
super(UniversalCharEmbedding, self).__init__()
self.langs = langs
self.charsets = {l: get_charset(l) for l in langs}
self.char_emb_dim = char_emb_dim
self.universal_charset_size = universal_charset_size
self.mapping_temperature = mapping_temperature
self.char_emb = nn.Embedding(self.universal_charset_size, self.char_emb_dim)
self.char_weights = nn.ModuleDict({l: nn.Embedding(len(self.charsets[l]), self.universal_charset_size) for l in self.langs})
def forward(self, char_seq, lang):
char_emb = self.get_char_weight(lang)
return char_emb[char_seq]
def project(self, input_, lang):
char_emb = self.get_char_weight(lang)
return input_.matmul(char_emb.t())
(full=True)
def get_char_weight(self, lang):
mapping = self.mapping(lang)
char_emb = mapping.matmul(self.char_emb.weight)
return char_emb
(full=True)
def mapping(self, lang):
weight = self.char_weights[lang].weight
if (self.mapping_temperature > 0.0):
weight = torch.log_softmax((weight / self.mapping_temperature), dim=(- 1)).exp()
return weight
def char_sim_mat(self, lang1, lang2):
x = normalize(self.mapping(lang1), dim=(- 1))
y = normalize(self.mapping(lang2), dim=(- 1))
mat = x.matmul(y.t())
return mat
def char_softmax(self, lang1, lang2):
w1 = self.get_char_weight(lang1)
w2 = self.get_char_weight(lang2)
mat = w1.matmul(w2.t())
l1_l2 = mat.log_softmax(dim=(- 1)).exp()
l2_l1 = mat.log_softmax(dim=0).exp().t()
return (l1_l2, l2_l1)
def char_mapping(self, l1, l2):
(l1_l2, l2_l1) = self.char_softmax(l1, l2)
def get_topk(a2b, a_cs, b_cs):
(s, idx) = a2b[4:].topk(3, dim=(- 1))
a = a_cs.id2char(np.arange(4, len(a2b)).reshape(1, (- 1))).reshape((- 1))
b = b_cs.id2char(idx.cpu().numpy())
d = {aa: ' '.join(bb) for (aa, bb) in zip(a, b)}
return (d, s)
l1_l2 = get_topk(l1_l2, self.charsets[l1], self.charsets[l2])
l2_l1 = get_topk(l2_l1, self.charsets[l2], self.charsets[l1])
return (l1_l2, l2_l1)
def soft_emb(self, weight, lang):
char_emb = self.get_char_weight(lang)
return weight.matmul(char_emb)
def get_start_emb(self, lang):
char_emb = self.get_char_weight(lang)
return char_emb[SOW_ID] |
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
model = OSNet(num_classes, blocks=[OSBlock, OSBlock, OSBlock], layers=[2, 2, 2], channels=[32, 128, 192, 256], loss=loss, **kwargs)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_5')
return model |
class AttenResNet2(nn.Module):
def __init__(self, atten_activation, atten_channel=16, size1=(257, 1091), size2=(249, 1075), size3=(233, 1043), size4=(201, 979), size5=(137, 851)):
super(AttenResNet2, self).__init__()
self.pre = nn.Sequential(nn.Conv2d(1, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down1 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
self.att1 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(4, 8)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.skip1 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down2 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
self.att2 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(8, 16)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.skip2 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down3 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
self.att3 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(16, 32)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.skip3 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down4 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
self.att4 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(32, 64)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.skip4 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.down5 = nn.MaxPool2d(kernel_size=3, stride=(1, 2))
self.att5 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1), dilation=(64, 128)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up5 = nn.UpsamplingBilinear2d(size=size5)
self.att6 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up4 = nn.UpsamplingBilinear2d(size=size4)
self.att7 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up3 = nn.UpsamplingBilinear2d(size=size3)
self.att8 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up2 = nn.UpsamplingBilinear2d(size=size2)
self.att9 = nn.Sequential(nn.Conv2d(atten_channel, atten_channel, kernel_size=(3, 3), padding=(1, 1)), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True))
self.up1 = nn.UpsamplingBilinear2d(size=size1)
if (atten_channel == 1):
self.conv1 = nn.Sequential(nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, atten_channel, kernel_size=1, stride=1), nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, 1, kernel_size=1, stride=1))
else:
self.conv1 = nn.Sequential(nn.BatchNorm2d(atten_channel), nn.ReLU(inplace=True), nn.Conv2d(atten_channel, (atten_channel / 4), kernel_size=1, stride=1), nn.BatchNorm2d((atten_channel / 4)), nn.ReLU(inplace=True), nn.Conv2d((atten_channel / 4), 1, kernel_size=1, stride=1))
if (atten_activation == 'softmax'):
self.soft = nn.Softmax(dim=2)
if (atten_activation == 'sigmoid'):
self.soft = nn.Sigmoid()
self.cnn1 = nn.Conv2d(1, 16, kernel_size=(3, 3), padding=(1, 1))
self.bn1 = nn.BatchNorm2d(16)
self.re1 = nn.ReLU(inplace=True)
self.cnn2 = nn.Conv2d(16, 16, kernel_size=(3, 3), padding=(1, 1))
self.bn2 = nn.BatchNorm2d(16)
self.re2 = nn.ReLU(inplace=True)
self.cnn3 = nn.Conv2d(16, 16, kernel_size=(3, 3), padding=(1, 1))
self.mp1 = nn.MaxPool2d(kernel_size=(1, 2))
self.cnn4 = nn.Conv2d(16, 32, kernel_size=(3, 3), dilation=(2, 2))
self.bn3 = nn.BatchNorm2d(32)
self.re3 = nn.ReLU(inplace=True)
self.cnn5 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn4 = nn.BatchNorm2d(32)
self.re4 = nn.ReLU(inplace=True)
self.cnn6 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp2 = nn.MaxPool2d(kernel_size=(1, 2))
self.cnn7 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(4, 4))
self.bn5 = nn.BatchNorm2d(32)
self.re5 = nn.ReLU(inplace=True)
self.cnn8 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn6 = nn.BatchNorm2d(32)
self.re6 = nn.ReLU(inplace=True)
self.cnn9 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp3 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn10 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(4, 4))
self.bn12 = nn.BatchNorm2d(32)
self.re12 = nn.ReLU(inplace=True)
self.cnn11 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn13 = nn.BatchNorm2d(32)
self.re13 = nn.ReLU(inplace=True)
self.cnn12 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp4 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn13 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(8, 8))
self.bn14 = nn.BatchNorm2d(32)
self.re14 = nn.ReLU(inplace=True)
self.cnn14 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn15 = nn.BatchNorm2d(32)
self.re15 = nn.ReLU(inplace=True)
self.cnn15 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp5 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn16 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(8, 8))
self.flat_feats = ((32 * 4) * 6)
self.ln1 = nn.Linear(self.flat_feats, 32)
self.bn7 = nn.BatchNorm1d(32)
self.re7 = nn.ReLU(inplace=True)
self.ln2 = nn.Linear(32, 32)
self.bn8 = nn.BatchNorm1d(32)
self.re8 = nn.ReLU(inplace=True)
self.ln3 = nn.Linear(32, 32)
self.bn9 = nn.BatchNorm1d(32)
self.re9 = nn.ReLU(inplace=True)
self.ln4 = nn.Linear(32, 32)
self.bn10 = nn.BatchNorm1d(32)
self.re10 = nn.ReLU(inplace=True)
self.ln5 = nn.Linear(32, 32)
self.bn11 = nn.BatchNorm1d(32)
self.re11 = nn.ReLU(inplace=True)
self.ln6 = nn.Linear(32, 1)
self.sigmoid = nn.Sigmoid()
def _weights_init(m):
if isinstance(m, (nn.Conv2d or nn.Linear)):
xavier_normal_(m.weight)
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d or nn.BatchNorm1d)):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.apply(_weights_init)
def forward(self, x):
residual = x
x = self.att1(self.down1(self.pre(x)))
skip1 = self.skip1(x)
x = self.att2(self.down2(x))
skip2 = self.skip2(x)
x = self.att3(self.down3(x))
skip3 = self.skip3(x)
x = self.att4(self.down4(x))
skip4 = self.skip4(x)
x = self.att5(self.down5(x))
x = self.att6((skip4 + self.up5(x)))
x = self.att7((skip3 + self.up4(x)))
x = self.att8((skip2 + self.up3(x)))
x = self.att9((skip1 + self.up2(x)))
x = self.conv1(self.up1(x))
weight = self.soft(x)
x = ((1 + weight) * residual)
x = self.cnn1(x)
residual = x
x = self.cnn3(self.re2(self.bn2(self.cnn2(self.re1(self.bn1(x))))))
x += residual
x = self.cnn4(self.mp1(x))
residual = x
x = self.cnn6(self.re4(self.bn4(self.cnn5(self.re3(self.bn3(x))))))
x += residual
x = self.cnn7(self.mp2(x))
residual = x
x = self.cnn9(self.re6(self.bn6(self.cnn8(self.re5(self.bn5(x))))))
x += residual
x = self.cnn10(self.mp3(x))
residual = x
x = self.cnn12(self.re13(self.bn13(self.cnn11(self.re12(self.bn12(x))))))
x += residual
x = self.cnn13(self.mp4(x))
residual = x
x = self.cnn15(self.re15(self.bn15(self.cnn14(self.re14(self.bn14(x))))))
x += residual
x = self.cnn16(self.mp5(x))
x = x.view((- 1), self.flat_feats)
x = self.ln1(x)
residual = x
x = self.ln3(self.re8(self.bn8(self.ln2(self.re7(self.bn7(x))))))
x += residual
residual = x
x = self.ln5(self.re10(self.bn10(self.ln4(self.re9(self.bn9(x))))))
x += residual
out = self.sigmoid(self.ln6(self.re11(self.bn11(x))))
return (out, weight) |
def prototype_twitter_GaussPiecewise_VHRED_NormOp_ClusterExp3():
state = prototype_state()
state['train_dialogues'] = '../TwitterDataBPE/Train.dialogues.pkl'
state['test_dialogues'] = '../TwitterDataBPE/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterDataBPE/Valid.dialogues.pkl'
state['dictionary'] = '../TwitterDataBPE/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 2500
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = True
state['deep_direct_connection'] = False
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['utterance_decoder_gating'] = 'LSTM'
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 60000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['patience'] = 20
return state |
def test_centerpoint_fpn():
second_cfg = dict(type='SECOND', in_channels=64, out_channels=[64, 128, 256], layer_nums=[3, 5, 5], layer_strides=[2, 2, 2], norm_cfg=dict(type='BN', eps=0.001, momentum=0.01), conv_cfg=dict(type='Conv2d', bias=False))
second = build_backbone(second_cfg)
centerpoint_fpn_cfg = dict(type='SECONDFPN', in_channels=[64, 128, 256], out_channels=[128, 128, 128], upsample_strides=[0.5, 1, 2], norm_cfg=dict(type='BN', eps=0.001, momentum=0.01), upsample_cfg=dict(type='deconv', bias=False), use_conv_for_no_stride=True)
fpn_cfg = dict(type='SECONDFPN', in_channels=[64, 128, 256], upsample_strides=[1, 2, 4], out_channels=[128, 128, 128])
second_fpn = build_neck(fpn_cfg)
centerpoint_second_fpn = build_neck(centerpoint_fpn_cfg)
input = torch.rand([4, 64, 512, 512])
sec_output = second(input)
centerpoint_output = centerpoint_second_fpn(sec_output)
second_output = second_fpn(sec_output)
assert (centerpoint_output[0].shape == torch.Size([4, 384, 128, 128]))
assert (second_output[0].shape == torch.Size([4, 384, 256, 256])) |
class CnnPolicy(object):
recurrent = False
def __init__(self, name, ob_space, ac_space):
with tf.variable_scope(name):
self._init(ob_space, ac_space)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space):
assert isinstance(ob_space, gym.spaces.Box)
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name='ob', dtype=tf.float32, shape=([sequence_length] + list(ob_space.shape)))
obscaled = (ob / 255.0)
with tf.variable_scope('pol'):
x = obscaled
x = tf.nn.relu(U.conv2d(x, 8, 'l1', [8, 8], [4, 4], pad='VALID'))
x = tf.nn.relu(U.conv2d(x, 16, 'l2', [4, 4], [2, 2], pad='VALID'))
x = U.flattenallbut0(x)
x = tf.nn.relu(tf.layers.dense(x, 128, name='lin', kernel_initializer=U.normc_initializer(1.0)))
logits = tf.layers.dense(x, pdtype.param_shape()[0], name='logits', kernel_initializer=U.normc_initializer(0.01))
self.pd = pdtype.pdfromflat(logits)
with tf.variable_scope('vf'):
x = obscaled
x = tf.nn.relu(U.conv2d(x, 8, 'l1', [8, 8], [4, 4], pad='VALID'))
x = tf.nn.relu(U.conv2d(x, 16, 'l2', [4, 4], [2, 2], pad='VALID'))
x = U.flattenallbut0(x)
x = tf.nn.relu(tf.layers.dense(x, 128, name='lin', kernel_initializer=U.normc_initializer(1.0)))
self.vpred = tf.layers.dense(x, 1, name='value', kernel_initializer=U.normc_initializer(1.0))
self.vpredz = self.vpred
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = self.pd.sample()
self._act = U.function([stochastic, ob], [ac, self.vpred])
def act(self, stochastic, ob):
(ac1, vpred1) = self._act(stochastic, ob[None])
return (ac1[0], vpred1[0])
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return [] |
class Camera():
def __init__(self, robot_idx):
serial = CAMERA_SERIALS[robot_idx]
(image_width, image_height, camera_matrix, dist_coeffs) = get_camera_params(serial)
self.cap = get_video_cap(serial, image_width, image_height)
(self.map_x, self.map_y) = cv.initUndistortRectifyMap(camera_matrix, dist_coeffs, None, camera_matrix, (image_width, image_height), cv.CV_32FC1)
self.image_size = ((image_width // 2), (image_height // 2))
def get_encoded_image(self):
assert (self.cap.get(cv.CAP_PROP_FOCUS) == CAMERA_FOCUS)
assert (self.cap.get(cv.CAP_PROP_TEMPERATURE) == CAMERA_TEMPERATURE)
assert (self.cap.get(cv.CAP_PROP_EXPOSURE) == CAMERA_EXPOSURE)
assert (self.cap.get(cv.CAP_PROP_GAIN) == CAMERA_GAIN)
image = None
while (image is None):
(_, image) = self.cap.read()
(_, image) = self.cap.read()
image = cv.remap(image, self.map_x, self.map_y, cv.INTER_LINEAR)
image = cv.resize(image, self.image_size, cv.INTER_LINEAR)
(_, image) = cv.imencode('.jpg', image)
return image
def disconnect(self):
self.cap.release() |
def plot_figure2(df):
(fig, axs) = plt.subplots(nrows=1, ncols=2, figsize=(12, 3))
df['desired coverage (1-)'] = (1 - df['alpha'])
sns.barplot('desired coverage (1-)', 'desired coverage (1-)', data=df, alpha=0.3, ax=axs[0], edgecolor='k', ci=None, fill=False)
bplot = sns.barplot(x='desired coverage (1-)', y='coverage', hue='predictor', data=df, ax=axs[0], alpha=0.5, ci='sd', linewidth=0.01)
for patch in bplot.artists:
(r, g, b, a) = patch.get_facecolor()
patch.set_facecolor((r, g, b, 0.5))
sns.barplot(x='desired coverage (1-)', y='size', hue='predictor', data=df, ax=axs[1], ci='sd', alpha=0.5, linewidth=0.01)
sns.despine(top=True, right=True)
axs[0].set_ylim(ymin=0.85, ymax=1.0)
axs[0].set_yticks([0.85, 0.9, 0.95, 1])
axs[0].set_ylabel('empirical coverage')
axs[1].set_ylabel('average size')
for ax in axs:
for item in (([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels()) + ax.get_yticklabels()):
item.set_fontsize(15)
ax.legend(fontsize=15, title_fontsize=15)
axs[1].get_legend().remove()
plt.tight_layout(rect=[0, 0.03, 1, 0.93])
plt.savefig('./outputs/barplot-figure2.pdf') |
def compute_mean_std(list_values):
np_values = np.array(list_values)
mean = np.mean(np_values)
std = np.std(np_values)
return (mean, std) |
def add_dict(left, right):
for (key, value) in right.items():
left[key] = (left.get(key, 0) + value.item()) |
def test_dense_reward(bin_pack_dense_reward: BinPack, dense_reward: DenseReward) -> None:
reward_fn = jax.jit(dense_reward)
step_fn = jax.jit(bin_pack_dense_reward.step)
(state, timestep) = bin_pack_dense_reward.reset(jax.random.PRNGKey(0))
for (item_id, is_valid) in enumerate(timestep.observation.items_mask):
action = jnp.array([0, item_id], jnp.int32)
(next_state, next_timestep) = step_fn(state, action)
reward = reward_fn(state, action, next_state, is_valid, is_done=next_timestep.last())
assert (reward == next_timestep.reward)
if is_valid:
item = jumanji.tree_utils.tree_slice(timestep.observation.items, item_id)
assert jnp.isclose(reward, item_volume(item))
else:
assert (reward == 0)
assert next_timestep.last()
for ems_id in range(1, timestep.observation.action_mask.shape[0]):
for item_id in range(timestep.observation.action_mask.shape[1]):
action = jnp.array([ems_id, item_id], jnp.int32)
(next_state, next_timestep) = step_fn(state, action)
is_valid = timestep.observation.action_mask[tuple(action)]
is_done = next_timestep.last()
assert ((~ is_valid) and is_done)
reward = reward_fn(state, action, next_state, is_valid, is_done)
assert (reward == 0 == next_timestep.reward) |
def main():
(args, a_config, r_config) = parse_args()
if args.a_ckpt:
a_config.DATASET.TASK = 'Q2A'
a_config.GPUS = ','.join([str(k) for k in args.gpus])
a_result_csv = test_net(args, a_config, ckpt_path=args.a_ckpt, save_path=args.result_path, save_name=args.result_name)
if args.r_ckpt:
r_config.DATASET.TASK = 'QA2R'
r_config.GPUS = ','.join([str(k) for k in args.gpus])
r_result_csv = test_net(args, r_config, ckpt_path=args.r_ckpt, save_path=args.result_path, save_name=args.result_name)
if (args.a_ckpt and args.r_ckpt):
merge_result(a_result_csv, r_result_csv, os.path.join(args.result_path, '{}_test_result_Q2AR.csv'.format(args.result_name))) |
.script
def mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul((x_tanh_sp + ((x * x_sigmoid) * (1 - (x_tanh_sp * x_tanh_sp))))) |
def _poison_fountain_homing(state, agent_name):
agent_sprite = state[agent_name][0]
fruits = state['fountains']
poison_fountains = list(filter((lambda s: (s.c2 < 0.6)), fruits))
return _target_homing(poison_fountains, agent_sprite) |
class RLSidetuneNetwork(nn.Module):
def __init__(self, n_frames, n_map_channels=0, use_target=True, output_size=512, num_tasks=1, extra_kwargs={}):
super(RLSidetuneNetwork, self).__init__()
assert ('sidetune_kwargs' in extra_kwargs), 'Cannot use sidetune network without kwargs'
self.sidetuner = GenericSidetuneNetwork(**extra_kwargs['sidetune_kwargs'])
attrs_to_remember = (extra_kwargs['attrs_to_remember'] if ('attrs_to_remember' in extra_kwargs) else [])
self.sidetuner = MemoryFrameStacked(self.sidetuner, n_frames, attrs_to_remember=attrs_to_remember)
self.n_frames = n_frames
self.use_target = use_target
self.use_map = (n_map_channels > 0)
self.map_channels = n_map_channels
self.output_size = output_size
if self.use_map:
self.map_tower = atari_conv((self.n_frames * self.map_channels))
if self.use_target:
self.target_channels = 3
else:
self.target_channels = 0
self.conv1 = nn.Conv2d((self.n_frames * ((8 * num_tasks) + self.target_channels)), 32, 4, stride=2)
self.flatten = Flatten()
self.fc1 = init_(nn.Linear((((32 * 7) * 7) * (self.use_map + 1)), 1024))
self.fc2 = init_(nn.Linear(1024, self.output_size))
self.groupnorm = nn.GroupNorm(8, 8, affine=False)
def forward(self, x, cache):
x_sidetune = self.sidetuner(x, cache)
x_sidetune = self.groupnorm(x_sidetune)
if self.use_target:
x_sidetune = torch.cat([x_sidetune, x['target']], dim=1)
x_sidetune = F.relu(self.conv1(x_sidetune))
if self.use_map:
x_map = x['map']
x_map = self.map_tower(x_map)
x_sidetune = torch.cat([x_map, x_sidetune], dim=1)
x = self.flatten(x_sidetune)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x |
def crop_video(sub_set, video, crop_path, instanc_size):
video_crop_base_path = join(crop_path, sub_set, video)
if (not exists(video_crop_base_path)):
makedirs(video_crop_base_path)
sub_set_base_path = join(visdrone_base_path, sub_set)
video_base_path = join(sub_set_base_path, 'sequences', video)
gts_path = join(sub_set_base_path, 'annotations', '{}.txt'.format(video))
gts = np.loadtxt(open(gts_path, 'rb'), delimiter=',')
jpgs = sorted(glob.glob(join(video_base_path, '*.jpg')))
if (not jpgs):
print('no jpg files, try png files')
jpgs = sorted(glob.glob(join(video_base_path, '*.png')))
if (not jpgs):
print('no jpg and png files, check data please')
for (idx, img_path) in enumerate(jpgs):
im = cv2.imread(img_path)
avg_chans = np.mean(im, axis=(0, 1))
gt = gts[idx]
bbox = [int(g) for g in gt]
bbox = [bbox[0], bbox[1], (bbox[0] + bbox[2]), (bbox[1] + bbox[3])]
(z, x) = crop_like_SiamFC(im, bbox, instanc_size=instanc_size, padding=avg_chans)
cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.z.jpg'.format(int(idx), 0)), z)
cv2.imwrite(join(video_crop_base_path, '{:06d}.{:02d}.x.jpg'.format(int(idx), 0)), x) |
class FooModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 2)
self.conv2d = nn.Conv2d(3, 1, 3)
self.conv2d_2 = nn.Conv2d(3, 2, 3) |
('submit')
def value_changed(message):
print('Socket recieved', message)
prefix = message['prompt']
topic = message['topic']
affect = message['affect']
knob = message['knob']
(out, ok) = generate(prefix, topic, affect, float(knob)) |
def cau_recall_mrr(preds, labels, cutoff):
recall = []
mrr = []
for (batch, b_label) in zip(preds, labels):
for (step, s_label) in zip(batch, b_label):
ranks = ((step[s_label] < step).sum() + 1)
recall.append((ranks <= cutoff))
mrr.append(((1 / ranks) if (ranks <= cutoff) else 0.0))
return (recall, mrr) |
class Item():
mode: str
scene: str
seq: str
stem: str
def get_split_file(cls, mode: str) -> Path:
return ((PATHS['mapfree'] / 'splits') / f'{mode}_files.txt')
def load_split(cls, mode: str) -> ty.S['Item']:
return [cls(mode, *s) for s in io.readlines(cls.get_split_file(mode), split=True)]
def get_img_file(self) -> Path:
return ((((PATHS['mapfree'] / self.mode) / self.scene) / self.seq) / f'{self.stem}.jpg')
def get_depth_file(self, src) -> Path:
return ((((PATHS['mapfree'] / self.mode) / self.scene) / self.seq) / f'{self.stem}.{src}.png')
def get_intrinsics_file(self) -> Path:
return (((PATHS['mapfree'] / self.mode) / self.scene) / 'intrinsics.txt')
def get_poses_file(self) -> Path:
return (((PATHS['mapfree'] / self.mode) / self.scene) / 'poses.txt')
def load_img(self) -> Image:
return Image.open(self.get_img_file())
def load_depth(self, src: str) -> ty.A:
depth = cv2.imread(str(self.get_depth_file(src)), cv2.IMREAD_UNCHANGED)
depth = (depth[(..., None)].astype(np.float32) / 1000)
return depth
def load_intrinsics(self) -> ty.A:
lines = io.readlines(self.get_intrinsics_file(), split=True)
stem = f'{self.seq}/{self.stem}.jpg'
line = next((l for l in lines if (l[0] == stem)))
intrinsics = io.lmap(float, line[1:])
K = np.zeros((4, 4), dtype=np.float32)
((K[(0, 0)], K[(1, 1)], K[(0, 2)], K[(1, 2)]), K[(2, 2)], K[(3, 3)]) = (intrinsics[:(- 2)], 1, 1)
return K
def load_pose(self) -> ty.A:
lines = io.readlines(self.get_poses_file(), split=True)
stem = f'{self.seq}/{self.stem}.jpg'
line = next((l for l in lines if (l[0] == stem)))
(q, t) = (io.lmap(float, line[1:5]), io.lmap(float, line[5:8]))
T = geo.T_from_qt(np.array(q), np.array(t)).astype(np.float32)
return T |
def weights_from_hdf5(f):
if ('weight_names' in f.attrs):
for n in f.attrs['weight_names']:
(yield (n, f[n]))
else:
for k in f.keys():
for (n, w) in weights_from_hdf5(f[k]):
(yield (n, w)) |
def look_at(obj: Union[(bpy.types.Object, str)], location: Union[(Tuple[float], mathutils.Vector)], roll: float=0) -> None:
obj = zpy.objects.verify(obj)
if (not isinstance(location, mathutils.Vector)):
location = mathutils.Vector(location)
loc = obj.location
direction = (location - obj.location)
quat = direction.to_track_quat('-Z', 'Y')
quat = quat.to_matrix().to_4x4()
roll_matrix = mathutils.Matrix.Rotation(roll, 4, 'Z')
loc = loc.to_tuple()
obj.matrix_world = (quat roll_matrix)
obj.location = loc |
class UNetResNet(nn.Module):
def __init__(self, in_channels=3, w=4, n_classes=2):
super(UNetResNet, self).__init__()
self.inc = inconv(in_channels, int((16 * w)))
self.down1 = down(int((16 * w)), int((32 * w)))
self.down2 = down(int((32 * w)), int((64 * w)))
self.down3 = down(int((64 * w)), int((128 * w)))
self.down4 = down(int((128 * w)), int((128 * w)))
self.image_encoder = resnet.resnet18(pretrained=True)
self.reduce_dim = nn.Sequential(nn.Conv2d(self.image_encoder.out_dim, (128 * w), kernel_size=1), nn.BatchNorm2d((128 * w)), nn.ReLU(inplace=True))
self.up1 = up(int((384 * w)), int((64 * w)))
self.up2 = up(int((128 * w)), int((32 * w)))
self.up3 = up(int((64 * w)), int((16 * w)))
self.up4 = up(int((32 * w)), int((16 * w)))
self.outc = outconv(int((16 * w)), n_classes)
def forward(self, x, rgb):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
img_feat = self.image_encoder(rgb)
img_feat = self.reduce_dim(img_feat)
img_feat = F.interpolate(img_feat, size=(x5.size(2), x5.size(3)), mode='bilinear', align_corners=True)
cat = torch.cat((x5, img_feat), dim=1)
x = self.up1(cat, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x |
def form_esnil_train_output(label: Text, spans_text: List[Text], explanation: Text):
output = label
for sp_text in spans_text:
output = '{} {} {}'.format(output, OUTPUT_SEP, sp_text)
output = '{} {} {}'.format(output, OUTPUT_SEP, explanation)
return output |
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
(N, C, H, W) = x.size()
x = x.view(N, self.bs, self.bs, (C // (self.bs ** 2)), H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous()
x = x.view(N, (C // (self.bs ** 2)), (H * self.bs), (W * self.bs))
return x |
def _propose_leaf_modules(atorch_wrap_cls=None):
leaf_modules = None
if (atorch_wrap_cls is not None):
leaf_modules = list((set(_SHARDABLE_OPERATORS.values()) & set(atorch_wrap_cls)))
if ((leaf_modules is None) or (len(leaf_modules) == 0)):
leaf_modules = list(_SHARDABLE_OPERATORS.values())
return leaf_modules |
def min_dfscodes_to_tensors(min_dfscodes_path, min_dfscode_tensors_path, feature_map):
min_dfscodes = []
for filename in os.listdir(min_dfscodes_path):
if filename.endswith('.dat'):
min_dfscodes.append(filename)
with Pool(processes=MAX_WORKERS) as pool:
for (i, _) in tqdm(enumerate(pool.imap_unordered(partial(dfscode_from_file_to_tensor_to_file, min_dfscodes_path=min_dfscodes_path, min_dfscode_tensors_path=min_dfscode_tensors_path, feature_map=feature_map), min_dfscodes, chunksize=16), 1)):
pass |
def _expected(observed):
o = observed
if (len(o) == 0):
return []
if (len(o) == 1):
return [([(sum(o[0]) / float(len(o[0])))] * len(o[0]))]
n = [sum(o[i]) for i in range(len(o))]
m = [sum((o[i][j] for i in range(len(o)))) for j in range(len(o[0]))]
s = float(sum(n))
return [[((n[i] * m[j]) / s) for j in range(len(o[i]))] for i in range(len(o))] |
def generate_zeros_from_spec(spec: jnp.ndarray) -> jnp.ndarray:
zeros: jnp.ndarray = jnp.zeros(spec.shape, spec.dtype)
return zeros |
def _sparse_inner_flatten(inputs, new_rank):
inputs_rank = inputs.dense_shape.get_shape().as_list()[0]
if (inputs_rank < new_rank):
raise ValueError('Inputs has rank less than new_rank. {} must have rank at least {}. Received rank {}, shape {}'.format(inputs, new_rank, inputs_rank, inputs.get_shape()))
outer_dimensions = inputs.dense_shape[:(new_rank - 1)]
inner_dimensions = inputs.dense_shape[(new_rank - 1):]
new_shape = array_ops.concat((outer_dimensions, [math_ops.reduce_prod(inner_dimensions)]), 0)
flattened = sparse_ops.sparse_reshape(inputs, new_shape)
return flattened |
class RandomPendulumAll(ModifiablePendulumEnv):
def __init__(self, mass_set=[0.75, 0.8, 0.85, 0.9, 0.95, 1.0, 1.05, 1.1, 1.15, 1.2, 1.25], length_set=[0.75, 0.8, 0.85, 0.9, 0.95, 1.0, 1.05, 1.1, 1.15, 1.2, 1.25]):
super(RandomPendulumAll, self).__init__()
self.mass_set = mass_set
self.length_set = length_set
random_index = self.np_random.randint(len(self.mass_set))
self.mass = self.mass_set[random_index]
random_index = self.np_random.randint(len(self.length_set))
self.length = self.length_set[random_index]
def seed(self, seed=None):
if (seed is None):
self._seed = 0
else:
self._seed = seed
super().seed(seed)
def num_modifiable_parameters(self):
return 2
def reset(self):
random_index = self.np_random.randint(len(self.mass_set))
self.mass = self.mass_set[random_index]
random_index = self.np_random.randint(len(self.length_set))
self.length = self.length_set[random_index]
return super(RandomPendulumAll, self).reset() |
class GeneralizedRCNN(nn.Module):
def __init__(self, cfg):
super(GeneralizedRCNN, self).__init__()
self.backbone = build_backbone(cfg)
self.neck = build_neck(cfg)
self.rpn = build_rpn(cfg, self.backbone.out_channels)
self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels)
self.has_aux_heads = False
def forward(self, images, targets=None, vis=False):
if (self.training and (targets is None)):
raise ValueError('In training mode, targets should be passed')
if (self.training and self.has_aux_heads):
(targets, targets_aux) = targets
images = to_image_list(images)
features = self.neck(self.backbone(images.tensors))
(proposals, proposal_losses) = self.rpn(images, features, targets, vis=vis)
if self.roi_heads:
(x, result, detector_losses) = self.roi_heads(features, proposals, targets)
else:
result = proposals
detector_losses = {}
if self.training:
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
return result |
def with_origin_column(dataset, imageColumn='image', originColumn='origin', bigdl_type='float'):
return callZooFunc(bigdl_type, 'withOriginColumn', dataset, imageColumn, originColumn) |
class Rouge():
def __init__(self):
self.beta = 1.2
def calc_score(self, candidate, refs):
assert (len(candidate) == 1)
assert (len(refs) > 0)
prec = []
rec = []
token_c = candidate[0].split(' ')
for reference in refs:
token_r = reference.split(' ')
lcs = my_lcs(token_r, token_c)
prec.append((lcs / float(len(token_c))))
rec.append((lcs / float(len(token_r))))
prec_max = max(prec)
rec_max = max(rec)
if ((prec_max != 0) and (rec_max != 0)):
score = ((((1 + (self.beta ** 2)) * prec_max) * rec_max) / float((rec_max + ((self.beta ** 2) * prec_max))))
else:
score = 0.0
return score
def compute_score(self, gts, res):
assert (sorted(gts.keys()) == sorted(res.keys()))
imgIds = list(gts.keys())
score = dict()
for id in imgIds:
hypo = res[id]
ref = gts[id]
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) > 0)
score[id] = self.calc_score(hypo, ref)
average_score = np.mean(np.array(list(score.values())))
return (average_score, score)
def method(self):
return 'Rouge' |
def test_svt():
(H, W) = (224, 224)
temp = torch.randn((1, 3, H, W))
model = SVT(embed_dims=[32, 64, 128], num_heads=[1, 2, 4], mlp_ratios=[4, 4, 4], qkv_bias=False, depths=[4, 4, 4], windiow_sizes=[7, 7, 7], norm_after_stage=True)
model.init_weights()
outs = model(temp)
assert (outs[0].shape == (1, 32, (H // 4), (W // 4)))
assert (outs[1].shape == (1, 64, (H // 8), (W // 8)))
assert (outs[2].shape == (1, 128, (H // 16), (W // 16))) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
init_default_scope(cfg.get('default_scope', 'mmdet'))
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
dataset = DATASETS.build(cfg.test_dataloader.dataset)
predictions = mmengine.load(args.pkl_results)
evaluator = Evaluator(cfg.val_evaluator)
evaluator.dataset_meta = dataset.metainfo
eval_results = evaluator.offline_evaluate(predictions)
print(eval_results) |
def test_SB1():
orb = orbit.SB1(K, e, omega, P, T0, gamma, dates)
vels = orb.get_velocities()
(fig, ax) = plt.subplots(nrows=1)
ax.axhline(gamma, color='0.5', ls=':')
ax.plot(dates, vels[0])
ax.set_xlabel('JD')
ax.set_ylabel('$v_A\\,\\mathrm{km/s}$')
fig.savefig((outdir + 'SB1.png'), dpi=300) |
def compute_rouge_l(output, reference, mode='f'):
assert (mode in list('fpr'))
lcs = _lcs_len(output, reference)
if (lcs == 0):
score = 0.0
else:
precision = (lcs / len(output))
recall = (lcs / len(reference))
beta = (precision / (recall + math.exp((- 12))))
f_score = ((((1 + (beta ** 2)) * recall) * precision) / (recall + (precision * (beta ** 2))))
if (mode == 'p'):
score = precision
if (mode == 'r'):
score = recall
else:
score = f_score
return score |
def parse_string(xml):
string = ''
dom = XML(xml)
for sentence in dom(XML_SENTENCE):
_anchors.clear()
_attachments.clear()
language = sentence.get(XML_LANGUAGE, 'en')
format = sentence.get(XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA])
format = (((not isinstance(format, str)) and format) or format.replace(' ', '').split(','))
tokens = []
for chunk in sentence:
tokens.extend(_parse_tokens(chunk, format))
if (ANCHOR in format):
(A, P, a, i) = (_anchors, _attachments, 1, format.index(ANCHOR))
for id in sorted(A.keys()):
for token in A[id]:
token[i] += ('-' + '-'.join([('A' + str((a + p))) for p in range(len(P[id]))]))
token[i] = token[i].strip('O-')
for (p, pnp) in enumerate(P[id]):
for token in pnp:
token[i] += (('-' + 'P') + str((a + p)))
token[i] = token[i].strip('O-')
a += len(P[id])
tokens = ['/'.join([tag for tag in token]) for token in tokens]
tokens = ' '.join(tokens)
string += (tokens + '\n')
try:
if MBSP:
from mbsp import TokenString
return TokenString(string.strip(), tags=format, language=language)
except:
return TaggedString(string.strip(), tags=format, language=language) |
def compose(r1: Rule, r2: Rule, rc: RCEvaluator) -> Tuple[(CompRes, CompRes)]:
comp = rcCommon(connected=False, maximum=False)
config.rc.printMatches = True
config.rc.matchesWithIndex = True
config.rc.printMatchesOnlyHaxChem = True
res12 = checkRules(rc.eval(rcExp([((r1 * rcParallel) * r2), ((r1 * comp) * r2)])))
res21 = checkRules(rc.eval(rcExp([((r2 * rcParallel) * r1), ((r2 * comp) * r1)])))
return (res12, res21) |
('xstance_predictor')
class XStancePredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({'sentence': sentence})
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
question = json_dict['question']
comment = json_dict['comment']
return self._dataset_reader.text_to_instance(question, comment)
def predictions_to_labeled_instances(self, instance: Instance, outputs: Dict[(str, numpy.ndarray)]) -> List[Instance]:
new_instance = deepcopy(instance)
if ('probs' in outputs):
label = numpy.argmax(outputs['probs'])
new_instance.add_field('prediction', LabelField(int(label)))
elif ('prediction' in outputs):
label = outputs['score']
new_instance.add_field('prediction', LabelField(int(label), skip_indexing=True))
else:
raise ValueError('probs or score not found in prediction outputs')
return [new_instance] |
_module()
class SparseRCNN(TwoStageDetector):
'Implementation of `Sparse R-CNN: End-to-End Object Detection with\n Learnable Proposals <
def __init__(self, *args, **kwargs):
super(SparseRCNN, self).__init__(*args, **kwargs)
assert self.with_rpn, 'Sparse R-CNN do not support external proposals'
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs):
assert (proposals is None), 'Sparse R-CNN does not support external proposals'
assert (gt_masks is None), 'Sparse R-CNN does not instance segmentation'
x = self.extract_feat(img)
(proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.forward_train(x, img_metas)
roi_losses = self.roi_head.forward_train(x, proposal_boxes, proposal_features, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=gt_bboxes_ignore, gt_masks=gt_masks, imgs_whwh=imgs_whwh)
return roi_losses
def simple_test(self, img, img_metas, rescale=False):
x = self.extract_feat(img)
(proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.simple_test_rpn(x, img_metas)
bbox_results = self.roi_head.simple_test(x, proposal_boxes, proposal_features, img_metas, imgs_whwh=imgs_whwh, rescale=rescale)
return bbox_results
def forward_dummy(self, img):
x = self.extract_feat(img)
num_imgs = len(img)
dummy_img_metas = [dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)]
(proposal_boxes, proposal_features, imgs_whwh) = self.rpn_head.simple_test_rpn(x, dummy_img_metas)
roi_outs = self.roi_head.forward_dummy(x, proposal_boxes, proposal_features, dummy_img_metas)
return roi_outs |
class TimeReductionLayer(nn.Module):
def __init__(self, in_channels: int=1, out_channels: int=1, kernel_size: int=3, stride: int=2) -> None:
super(TimeReductionLayer, self).__init__()
self.sequential = nn.Sequential(DepthwiseConv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride), Swish())
def forward(self, inputs: Tensor, input_lengths: Tensor) -> Tuple[(Tensor, Tensor)]:
outputs = self.sequential(inputs.unsqueeze(1))
(batch_size, channels, subsampled_lengths, subsampled_dim) = outputs.size()
outputs = outputs.permute(0, 2, 1, 3)
outputs = outputs.contiguous().view(batch_size, subsampled_lengths, (channels * subsampled_dim))
output_lengths = (input_lengths >> 1)
output_lengths -= 1
return (outputs, output_lengths) |
def numpy_random(shape: List[int], str_dtype: str) -> np.ndarray:
if (np.prod(shape) > ((2 * (1024 ** 3)) / 16)):
raise ValueError(f'Too large tensor shape: shape = {shape!r}')
rand_float = (lambda size: np.random.uniform((- 1000000), 1000000, size))
ret: np.ndarray = None
if ('float' in str_dtype):
ret = np.array(rand_float(shape)).astype(str_dtype)
elif ('complex' in str_dtype):
complex_2_float = {'complex64': 'float32', 'complex128': 'float64', 'complex256': 'float128'}
float_dtype = complex_2_float[str_dtype]
ret = np.array((np.array(rand_float(shape)).astype(float_dtype) + (1j * np.array(rand_float(shape)).astype(float_dtype))))
elif ('int' in str_dtype):
ret = np.array(np.random.randint((- 1000000), 1000000, shape)).astype(str_dtype)
elif ('bool' in str_dtype):
ret = np.array(np.random.randint(0, 2, shape)).astype(str_dtype)
else:
print(f'Unknown dtype: str_dtype = {str_dtype!r}', flush=True)
raise NotImplementedError(str_dtype)
return ret |
def resdropresnet20_cifar100(classes=100, **kwargs):
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name='resdropresnet20_cifar100', **kwargs) |
def store_args(method):
argspec = inspect.getfullargspec(method)
defaults = {}
if (argspec.defaults is not None):
defaults = dict(zip(argspec.args[(- len(argspec.defaults)):], argspec.defaults))
if (argspec.kwonlydefaults is not None):
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
args = defaults.copy()
for (name, value) in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper |
class PReLU(Layer):
def __init__(self, n_output_plane=0, bigdl_type='float'):
super(PReLU, self).__init__(None, bigdl_type, n_output_plane)
def set_init_method(self, weight_init_method=None, bias_init_method=None):
callBigDlFunc(self.bigdl_type, 'setInitMethod', self.value, weight_init_method, bias_init_method)
return self |
def test_get_importance_per_top_groups():
data = synthetic_regression()
X = data['full']['X']
y = data['full']['y']
ebm = ExplainableBoostingRegressor()
ebm.fit(X, y)
df = get_importance_per_top_groups(ebm, X)
dict = get_individual_importances(ebm, X)
assert (df.shape[0] == len(ebm.term_features_))
assert (list(dict.keys())[0] in df['terms_per_group'][0])
assert (list(dict.keys())[0] in df['terms_per_group'][1])
assert (list(dict.keys())[1] in df['terms_per_group'][1]) |
def get_uniform_policy(env, *args, **kwargs):
from .uniform_policy import UniformPolicy
policy = UniformPolicy(input_shapes=(env.active_observation_shape,), output_shape=env.action_space.shape)
return policy |
def load_vec_normalize(params: dict, PATHS: dict, env: VecEnv, eval_env: VecEnv):
if params['normalize']:
load_path = os.path.join(PATHS['model'], 'vec_normalize.pkl')
if os.path.isfile(load_path):
env = VecNormalize.load(load_path=load_path, venv=env)
eval_env = VecNormalize.load(load_path=load_path, venv=eval_env)
print('Succesfully loaded VecNormalize object from pickle file..')
else:
env = VecNormalize(env, training=True, norm_obs=True, norm_reward=False, clip_reward=15)
eval_env = VecNormalize(eval_env, training=True, norm_obs=True, norm_reward=False, clip_reward=15)
return (env, eval_env) |
class nnUNetTrainerV2_3ConvPerStage(nnUNetTrainerV2):
def initialize_network(self):
self.base_num_features = 24
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-05, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 0.01, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, len(self.net_num_pool_op_kernel_sizes), 3, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, net_nonlin, net_nonlin_kwargs, True, False, (lambda x: x), InitWeights_He(0.01), self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper |
def all():
image = cv2.imread('tests/assets/lena_224.jpg')
m = ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2, p=1.0)
print(m)
res = m(image)
cv2.imwrite('tests/assets/lena_color_jitter.jpg', res) |
def test_find_duplicates_dict_recursive_warning(cnn, mocker):
encoding_map = data_encoding_map()
threshold = 0.9
scores = True
outfile = True
find_dup_dict_mocker = mocker.patch('imagededup.methods.cnn.CNN._find_duplicates_dict')
with pytest.warns(SyntaxWarning):
cnn.find_duplicates(encoding_map=encoding_map, min_similarity_threshold=threshold, outfile=outfile, scores=scores, recursive=True)
find_dup_dict_mocker.assert_called_once_with(encoding_map=encoding_map, min_similarity_threshold=threshold, scores=scores, outfile=outfile, num_sim_workers=cpu_count()) |
_HEADS_REGISTRY.register()
class CustomROIHeads(StandardROIHeads):
def _init_box_head(self, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
del ret['box_predictor']
ret['box_predictor'] = CustomFastRCNNOutputLayers(cfg, ret['box_head'].output_shape)
return ret |
class InceptionV3(nn.Module):
def __init__(self, channels, init_block_channels, b_mid_channels, dropout_rate=0.5, in_channels=3, in_size=(299, 299), num_classes=1000):
super(InceptionV3, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
self.features = nn.Sequential()
self.features.add_module('init_block', InceptInitBlock(in_channels=in_channels, out_channels=init_block_channels))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
if ((j == 0) and (i != 0)):
unit = reduction_units[(i - 1)]
else:
unit = normal_units[i]
if (unit == InceptionBUnit):
stage.add_module('unit{}'.format((j + 1)), unit(in_channels=in_channels, out_channels=out_channels, mid_channels=b_mid_channels[(j - 1)]))
else:
stage.add_module('unit{}'.format((j + 1)), unit(in_channels=in_channels, out_channels=out_channels))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=8, stride=1))
self.output = nn.Sequential()
self.output.add_module('dropout', nn.Dropout(p=dropout_rate))
self.output.add_module('fc', nn.Linear(in_features=in_channels, out_features=num_classes))
self._init_params()
def _init_params(self):
for module in self.modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
class LGBOptimizerHyperopt(object):
def __init__(self, objective: str='binary', is_unbalance: bool=False, verbose: bool=False, num_class: Optional[int]=None):
self.objective = objective
if ((objective == 'multiclass') and (not num_class)):
raise ValueError('num_class must be provided for multiclass problems')
self.num_class = num_class
self.is_unbalance = is_unbalance
self.verbose = verbose
self.early_stop_dict: Dict = {}
def optimize(self, dtrain: lgbDataset, deval: lgbDataset, maxevals: int=200):
if (self.objective == 'regression'):
self.best = lgb.LGBMRegressor().get_params()
else:
self.best = lgb.LGBMClassifier().get_params()
del (self.best['silent'], self.best['importance_type'])
param_space = self.hyperparameter_space()
objective = self.get_objective(dtrain, deval)
objective.i = 0
trials = Trials()
best = fmin(fn=objective, space=param_space, algo=tpe.suggest, max_evals=maxevals, trials=trials, verbose=self.verbose)
self.trials = trials
best = space_eval(param_space, trials.argmin)
best['n_estimators'] = int(best['n_estimators'])
best['num_leaves'] = int(best['num_leaves'])
best['min_child_samples'] = int(best['min_child_samples'])
best['verbose'] = (- 1)
best['objective'] = self.objective
self.best.update(best)
def get_objective(self, dtrain: lgbDataset, deval: lgbDataset):
def objective(params: Dict[(str, Any)]) -> float:
params['n_estimators'] = int(params['n_estimators'])
params['num_leaves'] = int(params['num_leaves'])
params['min_child_samples'] = int(params['min_child_samples'])
params['verbose'] = (- 1)
params['seed'] = 1
params['feature_pre_filter'] = False
params['objective'] = self.objective
if (self.objective != 'regression'):
params['is_unbalance'] = self.is_unbalance
if (self.objective == 'multiclass'):
params['num_class'] = self.num_class
model = lgb.train(params, dtrain, valid_sets=[deval], early_stopping_rounds=50, verbose_eval=False)
preds = model.predict(deval.data)
if (self.objective != 'regression'):
score = log_loss(deval.label, preds)
elif (self.objective == 'regression'):
score = mean_squared_error(deval.label, preds)
objective.i += 1
return score
return objective
def hyperparameter_space(self, param_space: Dict[(str, Any)]=None) -> Dict[(str, Any)]:
space = {'learning_rate': hp.uniform('learning_rate', 0.01, 0.3), 'n_estimators': hp.quniform('n_estimators', 100, 1000, 50), 'num_leaves': hp.quniform('num_leaves', 20, 200, 10), 'min_child_samples': hp.quniform('min_child_samples', 20, 100, 20), 'colsample_bytree': hp.uniform('colsample_bytree', 0.5, 1.0), 'reg_alpha': hp.choice('reg_alpha', [0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0]), 'reg_lambda': hp.choice('reg_lambda', [0.01, 0.05, 0.1, 0.2, 0.4, 1.0, 2.0, 4.0, 10.0])}
if param_space:
return param_space
else:
return space |
def parse_args():
parser = argparse.ArgumentParser(description='Convert COCO Stuff 10k annotations to mmsegmentation format')
parser.add_argument('coco_path', help='coco stuff path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument('--nproc', default=16, type=int, help='number of process')
args = parser.parse_args()
return args |
def callBigDlFunc(bigdl_type, name, *args):
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
error = Exception(('Cannot find function: %s' % name))
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
try:
api = getattr(jinvoker, name)
result = callJavaFunc(api, *args)
except Exception as e:
error = e
if (not re.match('.*Method.*does not exist', str(e), flags=re.DOTALL)):
invalidOperationError(False, str(e), cause=e)
else:
return result
invalidOperationError(False, ('Cannot find function: %s' % name), cause=error) |
def main(_):
problem_type = 'grasp_classification'
feature_combo = 'image_preprocessed_norm_sin2_cos2_width_3'
FLAGS.crop_height = 224
FLAGS.crop_width = 224
FLAGS.problem_type = problem_type
FLAGS.feature_combo = feature_combo
FLAGS.crop_to = 'center_on_gripper_grasp_box_and_rotate_upright'
if (FLAGS.load_hyperparams is None):
FLAGS.load_hyperparams = 'hyperparams/classification/2018-05-26-01-22-00_inception_resnet_v2_classifier_model-_img_inception_resnet_v2_vec_dense_trunk_vgg_conv_block-dataset_cornell_grasping-grasp_success_hyperparams.json'
FLAGS.epochs = 80
FLAGS.fine_tuning_epochs = 0
FLAGS.num_train = 8
FLAGS.num_validation = 1
FLAGS.num_test = 1
FLAGS.fine_tuning = False
print('Classification Training on grasp_success is about to begin. This mode overrides some command line parameters so to change them you will need to modify cornell_grasp_train_classification.py directly.')
hyperparams = hypertree_utilities.load_hyperparams_json(FLAGS.load_hyperparams, fine_tuning=FLAGS.fine_tuning, learning_rate=FLAGS.learning_rate, feature_combo_name=feature_combo)
if (feature_combo not in hyperparams):
hyperparams['feature_combo_name'] = feature_combo
if ('k_fold' in FLAGS.pipeline_stage):
FLAGS.num_validation = 2
FLAGS.num_test = 0
hypertree_train.train_k_fold(problem_name=problem_type, hyperparams=hyperparams, split_type='objectwise', **hyperparams)
hypertree_train.train_k_fold(problem_name=problem_type, hyperparams=hyperparams, split_type='imagewise', **hyperparams)
else:
hypertree_train.run_training(problem_name=problem_type, hyperparams=hyperparams, **hyperparams) |
class CFG():
def __init__(self):
self.__dict__['cfg'] = None
def __getattr__(self, name):
return getattr(self.__dict__['cfg'], name)
def __setattr__(self, name, val):
setattr(self.__dict__['cfg'], name, val) |
('conv_only')
def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
def network_fn(X):
out = (tf.cast(X, tf.float32) / 255.0)
with tf.variable_scope('convnet'):
for (num_outputs, kernel_size, stride) in convs:
out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu, **conv_kwargs)
return out
return network_fn |
def load_trained_network(workspace_dir, network_path, checkpoint=None):
checkpoint_dir = os.path.join(workspace_dir, 'checkpoints')
directory = '{}/{}'.format(checkpoint_dir, network_path)
(net, _) = load_network(directory, checkpoint)
return net |
class PartA2Net(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset)
self.module_list = self.build_networks()
def forward(self, batch_dict):
if self.training:
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
(loss, tb_dict, disp_dict) = self.get_training_loss()
ret_dict = {'loss': loss}
return (ret_dict, tb_dict, disp_dict)
else:
torch.cuda.synchronize()
start_time = time.perf_counter()
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
(pred_dicts, recall_dicts) = self.post_processing(batch_dict)
torch.cuda.synchronize()
elapsed = (time.perf_counter() - start_time)
recall_dicts.update({'total_time': elapsed})
return (pred_dicts, recall_dicts)
def get_training_loss(self):
disp_dict = {}
(loss_rpn, tb_dict) = self.dense_head.get_loss()
(loss_point, tb_dict) = self.point_head.get_loss(tb_dict)
(loss_rcnn, tb_dict) = self.roi_head.get_loss(tb_dict)
loss = ((loss_rpn + loss_point) + loss_rcnn)
return (loss, tb_dict, disp_dict) |
class double_conv(nn.Module):
def __init__(self, in_ch, out_ch, normaliz=True, activ=True):
super(double_conv, self).__init__()
ops = []
ops += [nn.Conv2d(in_ch, out_ch, 3, padding=1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
if activ:
ops += [nn.ReLU(inplace=True)]
ops += [nn.Conv2d(out_ch, out_ch, 3, padding=1)]
if normaliz:
ops += [nn.BatchNorm2d(out_ch)]
if activ:
ops += [nn.ReLU(inplace=True)]
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x |
def do_train(model, data_loader, optimizer, scheduler, checkpointer, device, checkpoint_period, arguments):
seed_torch()
logger = logging.getLogger('maskrcnn_benchmark.trainer')
logger.info('Start training')
meters = MetricLogger(delimiter=' ')
max_iter = len(data_loader)
start_iter = arguments['iteration']
model.train()
start_training_time = time.time()
end = time.time()
warnings.filterwarnings('ignore', category=UserWarning)
for (iteration, (images, targets, _)) in enumerate(data_loader, start_iter):
data_time = (time.time() - end)
iteration = (iteration + 1)
arguments['iteration'] = iteration
scheduler.step()
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum((loss for loss in loss_dict.values()))
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum((loss for loss in loss_dict_reduced.values()))
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
batch_time = (time.time() - end)
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = (meters.time.global_avg * (max_iter - iteration))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if (((iteration % 20) == 0) or (iteration == max_iter)):
logger.info(meters.delimiter.join(['eta: {eta}', 'iter: {iter}', '{meters}', 'lr: {lr:.6f}', 'max mem: {memory:.0f}']).format(eta=eta_string, iter=iteration, meters=str(meters), lr=optimizer.param_groups[0]['lr'], memory=((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0)))
if ((iteration % checkpoint_period) == 0):
checkpointer.save('model_{:07d}'.format(iteration), **arguments)
if (iteration == max_iter):
checkpointer.save('model_final', **arguments)
total_training_time = (time.time() - start_training_time)
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info('Total training time: {} ({:.4f} s / it)'.format(total_time_str, (total_training_time / max_iter))) |
class MIT67Data(data.Dataset):
def __init__(self, root, is_train=False, transform=None, shots=(- 1), seed=0, preload=False, portion=0, fixed_pic=False, four_corner=False, return_raw=False, is_poison=False):
self.four_corner = four_corner
self.num_classes = 67
self.transform = transform
cls = glob.glob(os.path.join(root, 'Images', '*'))
self.cls_names = [name.split('/')[(- 1)] for name in cls]
self.portion = portion
self.fixed_pic = fixed_pic
self.return_raw = return_raw
if is_train:
mapfile = os.path.join(root, 'TrainImages.txt')
else:
mapfile = os.path.join(root, 'TestImages.txt')
assert os.path.exists(mapfile), 'Mapping txt is missing ({})'.format(mapfile)
self.labels = []
self.image_path = []
with open(mapfile) as f:
for line in f:
self.image_path.append(os.path.join(root, 'Images', line.strip()))
cls = line.split('/')[(- 2)]
self.labels.append(self.cls_names.index(cls))
if is_train:
indices = np.arange(0, len(self.image_path))
random.seed(seed)
random.shuffle(indices)
self.image_path = np.array(self.image_path)[indices]
self.labels = np.array(self.labels)[indices]
if (shots > 0):
new_img_path = []
new_labels = []
for c in range(self.num_classes):
ids = np.where((self.labels == c))[0]
count = 0
for i in ids:
new_img_path.append(self.image_path[i])
new_labels.append(c)
count += 1
if (count == shots):
break
self.image_path = np.array(new_img_path)
self.labels = np.array(new_labels)
self.imgs = []
if preload:
for (idx, p) in enumerate(self.image_path):
if ((idx % 100) == 0):
print('Loading {}/{}...'.format((idx + 1), len(self.image_path)))
self.imgs.append(Image.open(p).convert('RGB'))
self.chosen = []
if self.portion:
self.chosen = random.sample(range(len(self.labels)), int((self.portion * len(self.labels))))
def __getitem__(self, index):
if (len(self.imgs) > 0):
img = self.imgs[index]
else:
img = Image.open(self.image_path[index]).convert('RGB')
ret_index = self.labels[index]
raw_label = self.labels[index]
if (self.transform is not None):
transform_step1 = transforms.Compose(self.transform[:2])
img = transform_step1(img)
raw_img = img.copy()
if (self.portion and (index in self.chosen)):
firefox = Image.open('./backdoor_dataset/firefox.png')
img = (add4trig(img, firefox) if self.four_corner else addtrigger(img, firefox, self.fixed_pic))
ret_index = 0
transform_step2 = transforms.Compose(self.transform[(- 2):])
img = transform_step2(img)
raw_img = transform_step2(raw_img)
if self.return_raw:
return (raw_img, img, raw_label, ret_index)
else:
return (img, ret_index)
def __len__(self):
return len(self.labels) |
def _build_variable_getter(rename=None):
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter |
class NormalDataset(Dataset):
def __init__(self, files, config: Namespace):
self.center = config.center
self.files = files
self.transforms = T.Compose([T.Resize((config.image_size, config.image_size), T.InterpolationMode.LANCZOS), T.CenterCrop(config.image_size), T.ToTensor()])
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
img = np.asarray(self.transforms(Image.open(self.files[idx])))
idx2 = np.random.randint(0, len(self))
img2 = np.asarray(self.transforms(Image.open(self.files[idx2])))
(img, mask) = pii(img, img2, is_mri=False)
img = (torch.FloatTensor(img) / img.max())
mask = torch.FloatTensor(mask)
if self.center:
img = ((img - 0.5) * 2)
return (img, mask) |
def average_segcover(segA, segB, ignore_background=False):
assert (segA.shape == segB.shape), f'{segA.shape} - {segB.shape}'
assert ((segA.shape[1] == 1) and (segB.shape[1] == 1))
bsz = segA.shape[0]
nonignore = (segA >= 0)
mean_scores = torch.tensor((bsz * [0.0])).to(segA.device)
N = torch.tensor((bsz * [0])).to(segA.device)
scaled_scores = torch.tensor((bsz * [0.0])).to(segA.device)
scaling_sum = torch.tensor((bsz * [0])).to(segA.device)
if ignore_background:
iter_segA = torch.unique(segA[(segA > 0)]).tolist()
else:
iter_segA = torch.unique(segA[(segA >= 0)]).tolist()
iter_segB = torch.unique(segB[(segB >= 0)]).tolist()
for i in iter_segA:
binaryA = (segA == i)
if (not binaryA.any()):
continue
max_iou = torch.tensor((bsz * [0.0])).to(segA.device)
for j in iter_segB:
binaryB = ((segB == j) * nonignore)
if (not binaryB.any()):
continue
iou = iou_binary(binaryA, binaryB)
max_iou = torch.where((iou > max_iou), iou, max_iou)
mean_scores += max_iou
N = torch.where((binaryA.sum((1, 2, 3)) > 0), (N + 1), N)
scaled_scores += (binaryA.sum((1, 2, 3)).float() * max_iou)
scaling_sum += binaryA.sum((1, 2, 3))
mean_sc = (mean_scores / torch.max(N, torch.tensor(1)).float())
scaled_sc = (scaled_scores / torch.max(scaling_sum, torch.tensor(1)).float())
return (mean_sc.mean(0), scaled_sc.mean(0)) |
def filter_regular(sols, tol, oper):
result = []
for sol in sols:
rco = diagnostics(sol)[1]
if (oper == 'select'):
if (rco > tol):
result.append(sol)
if (oper == 'remove'):
if (rco <= tol):
result.append(sol)
return result |
class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def _expand_param_groups(params: List[Dict[(str, Any)]]) -> List[Dict[(str, Any)]]:
ret = defaultdict(dict)
for item in params:
assert ('params' in item)
cur_params = {x: y for (x, y) in item.items() if (x != 'params')}
for param in item['params']:
ret[param].update({'params': [param], **cur_params})
return list(ret.values()) |
def make_dir(dir_name):
if (os.path.isdir(dir_name) == False):
print(('Make directory: ' + dir_name))
os.mkdir(dir_name) |
def test_constantbeta_dehnencore_in_nfw_sigmar():
if WIN32:
return None
pot = [potential.NFWPotential(amp=2.3, a=1.3)]
denspot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15)
betas = [0.25]
for (beta, dfh) in zip(betas, constantbeta_dfs_dehnencore_in_nfw):
numpy.random.seed(10)
samp = dfh.sample(n=1000000)
tol = 0.07
check_sigmar_against_jeans(samp, pot, tol, dens=(lambda r: denspot.dens(r, 0)), beta=beta, rmin=(pot[0]._scale / 3.0), rmax=(pot[0]._scale * 10.0), bins=31)
return None |
def add_bn(model):
for (k, m) in list(model.named_children()):
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d)):
b = nn.BatchNorm2d(m.out_channels, momentum=0.1, affine=True)
b.weight.data.fill_(1)
new_m = nn.Sequential(model._modules[k], b)
model._modules[k] = new_m
add_bn(m) |
def test_nemo_MN3ExponentialDiskPotential():
mn = potential.MN3ExponentialDiskPotential(normalize=1.0, hr=0.5, hz=0.1)
tmax = 3.0
(vo, ro) = (215.0, 8.75)
o = Orbit([1.0, 0.1, 1.1, 0.3, 0.1, 0.4], ro=ro, vo=vo)
run_orbitIntegration_comparison(o, mn, tmax, vo, ro)
return None |
def _recon_lcs(x, y):
(i, j) = (len(x), len(y))
table = _lcs(x, y)
def _recon(i, j):
if ((i == 0) or (j == 0)):
return []
elif (x[(i - 1)] == y[(j - 1)]):
return (_recon((i - 1), (j - 1)) + [(x[(i - 1)], i)])
elif (table[((i - 1), j)] > table[(i, (j - 1))]):
return _recon((i - 1), j)
else:
return _recon(i, (j - 1))
recon_tuple = tuple(map((lambda x: x[0]), _recon(i, j)))
return recon_tuple |
class coco_val():
def __init__(self, args, transform=None, k_shot=1):
self.num_classes = 80
self.group = args.group
self.num_folds = args.num_folds
self.dataDir = '/home/ubuntu/Dataset/MSCOCO2017'
self.dataType = 'val2017'
self.annFile = '{}/annotations/instances_{}.json'.format(self.dataDir, self.dataType)
self.coco = COCO(self.annFile)
self.val_id_list = self.get_val_id_list()
self.coco_all_id = self.coco.getCatIds()
self.val_coco_id_list = self.get_val_coco_id_list()
self.list_splite = self.get_total_list()
self.list_splite_len = len(self.list_splite)
self.list_class = self.get_class_list()
self.transform = transform
self.count = 0
self.random_generator = random.Random()
self.k_shot = k_shot
def get_nms(self):
cats = self.coco.loadCats(self.coco.getCatIds())
nms = [cat['name'] for cat in cats]
return nms
def get_val_coco_id_list(self):
val_coco_id_list = []
for i in self.val_id_list:
cls = self.coco_all_id[i]
val_coco_id_list.append(cls)
return val_coco_id_list
def get_val_id_list(self):
num = int((self.num_classes / self.num_folds))
val_set = [(self.group + (self.num_folds * v)) for v in range(num)]
return val_set
def get_category(self, annotations):
category_id_list = []
for ann in annotations:
category_id_list.append(ann['category_id'])
category = np.array(category_id_list)
category = np.unique(category)
return category
def get_total_list(self):
new_exist_class_list = []
for coco_id in self.val_coco_id_list:
imgIds = self.coco.getImgIds(catIds=coco_id)
for i in range(len(imgIds)):
img = self.coco.loadImgs(imgIds[i])[0]
annIds = self.coco.getAnnIds(imgIds=img['id'], iscrowd=None)
anns = self.coco.loadAnns(annIds)
label = self.get_category(anns)
new_exist_class_list.append(img['id'])
new_exist_class_list_unique = list(set(new_exist_class_list))
print('Total images are : ', len(new_exist_class_list_unique))
return new_exist_class_list_unique
def get_class_list(self):
list_class = {}
for i in range(self.num_classes):
list_class[i] = []
for name in self.list_splite:
annIds = self.coco.getAnnIds(imgIds=name, iscrowd=None)
anns = self.coco.loadAnns(annIds)
labels = self.get_category(anns)
for class_ in labels:
class_us = self.coco_all_id.index(class_)
list_class[class_us].append(name)
return list_class
def read_img(self, name):
img = self.coco.loadImgs(name)[0]
path = ((self.dataDir + '/images/') + img['file_name'])
img = Image.open(path)
return img
def read_mask(self, name, category):
img = self.coco.loadImgs(name)[0]
annIds = self.coco.getAnnIds(imgIds=name, catIds=category, iscrowd=None)
anns = self.coco.loadAnns(annIds)
mask = self.get_mask(img, anns, category)
return mask.astype(np.float32)
def polygons_to_mask2(self, img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
polygons = np.asarray([polygons], np.int32)
cv2.fillConvexPoly(mask, polygons, 1)
return mask
def get_mask(self, img, annotations, category_id):
len_ann = len(annotations)
half_mask = []
final_mask = []
for ann in annotations:
if (ann['category_id'] == category_id):
if (ann['iscrowd'] == 1):
continue
seg1 = ann['segmentation']
seg = seg1[0]
for j in range(0, len(seg), 2):
x = seg[j]
y = seg[(j + 1)]
mas = [x, y]
half_mask.append(mas)
final_mask.append(half_mask)
half_mask = []
mask0 = self.polygons_to_mask2([img['height'], img['width']], final_mask[0])
for i in range(1, len(final_mask)):
maskany = self.polygons_to_mask2([img['height'], img['width']], final_mask[i])
mask0 += maskany
mask0[(mask0 > 1)] = 1
return mask0
def load_frame(self, support_name, query_name, class_):
support_img = self.read_img(support_name)
query_img = self.read_img(query_name)
class_coco = self.coco_all_id[class_]
support_mask = self.read_mask(support_name, class_coco)
query_mask = self.read_mask(query_name, class_coco)
return (query_img.convert('RGB'), query_mask, support_img.convert('RGB'), support_mask, class_)
def load_frame_k_shot(self, support_name_list, query_name, class_):
class_coco = self.coco_all_id[class_]
query_img = self.read_img(query_name)
query_mask = self.read_mask(query_name, class_coco)
support_img_list = []
support_mask_list = []
for support_name in support_name_list:
support_img = self.read_img(support_name)
support_mask = self.read_mask(support_name, class_coco)
support_img_list.append(support_img.convert('RGB'))
support_mask_list.append(support_mask)
return (query_img.convert('RGB'), query_mask, support_img_list, support_mask_list)
def random_choose(self):
class_ = np.random.choice(self.val_id_list, 1, replace=False)[0]
cat_list = self.list_class[class_]
sample_img_ids_1 = np.random.choice(len(cat_list), 2, replace=False)
query_name = cat_list[sample_img_ids_1[0]]
support_name = cat_list[sample_img_ids_1[1]]
return (support_name, query_name, class_)
def random_choose_k(self):
class_ = np.random.choice(self.val_id_list, 1, replace=False)[0]
cat_list = self.list_class[class_]
sample_img_ids_1 = np.random.choice(len(cat_list), (self.k_shot + 1), replace=False)
query_name = cat_list[sample_img_ids_1[0]]
support_name_list = []
for i in range(self.k_shot):
support_name = cat_list[sample_img_ids_1[(i + 1)]]
support_name_list.append(support_name)
return (support_name_list, query_name, class_)
def get_1_shot(self, idx):
(support_name, query_name, class_) = self.random_choose()
img = self.coco.loadImgs(query_name)[0]
size = [img['height'], img['width']]
(query_img, query_mask, support_img, support_mask, class_) = self.load_frame(support_name, query_name, class_)
if (self.transform is not None):
(query_img, query_mask) = self.transform(query_img, query_mask)
(support_img, support_mask) = self.transform(support_img, support_mask)
self.count = (self.count + 1)
return (query_img, query_mask, support_img, support_mask, class_, size)
def get_k_shot(self, idx):
(support_name_list, query_name, class_) = self.random_choose_k()
img = self.coco.loadImgs(query_name)[0]
size = [img['height'], img['width']]
(query_img, query_mask, support_img_list, support_mask_list) = self.load_frame_k_shot(support_name_list, query_name, class_)
if (self.transform is not None):
(query_img, query_mask) = self.transform(query_img, query_mask)
for i in range(len(support_mask_list)):
support_temp_img = support_img_list[i]
support_temp_mask = support_mask_list[i]
(support_temp_img, support_temp_mask) = self.transform(support_temp_img, support_temp_mask)
support_temp_img = support_temp_img.unsqueeze(dim=0)
support_temp_mask = support_temp_mask.unsqueeze(dim=0)
if (i == 0):
support_img = support_temp_img
support_mask = support_temp_mask
else:
support_img = torch.cat([support_img, support_temp_img], dim=0)
support_mask = torch.cat([support_mask, support_temp_mask], dim=0)
self.count = (self.count + 1)
return (query_img, query_mask, support_img, support_mask, class_, size)
def __len__(self):
return 1000
def __getitem__(self, idx):
if (self.k_shot == 1):
(query_img, query_mask, support_img, support_mask, class_, size) = self.get_1_shot(idx)
else:
(query_img, query_mask, support_img, support_mask, class_, size) = self.get_k_shot(idx)
return (query_img, query_mask, support_img, support_mask, class_, size) |
def accuracy(logits, labels):
(_, indices) = torch.max(logits, dim=1)
correct = torch.sum((indices == labels))
return ((correct.item() * 1.0) / len(labels)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.