code stringlengths 101 5.91M |
|---|
def test(loader, net):
net.eval()
test_loss = 0
correct = 0
cm = 0
N = len(loader.dataset)
for (idx, (data, target)) in enumerate(loader):
data = make_variable(data, requires_grad=False)
target = make_variable(target, requires_grad=False)
score = net(data)
test_loss += net.criterion(score, target).item()
pred = score.data.max(1)[1]
correct += pred.eq(target.data).cpu().sum()
test_loss /= len(loader)
print('[Evaluate] Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(test_loss, correct, N, ((100.0 * correct) / N)))
return cm |
def extract_layers_from_state_dict(state_dict: dict, layer_names: List[str]):
new_state_dict = {}
for layer_name in layer_names:
if (type(layer_name) == tuple):
old_layer_name = layer_name[0]
new_layer_name = layer_name[1]
else:
old_layer_name = new_layer_name = layer_name
old_layer_name = '{}.weight'.format(old_layer_name)
new_layer_name = '{}.weight'.format(new_layer_name)
new_state_dict[new_layer_name] = state_dict[old_layer_name]
return new_state_dict |
def test__get_cnn_features_batch_nondefault_models():
cnn = CNN(model_config=CustomModel(model=EfficientNet(), transform=EfficientNet.transform, name=EfficientNet.name))
result = cnn._get_cnn_features_batch(TEST_IMAGE_DIR)
for i in result.values():
assert isinstance(i, np.ndarray)
assert (i.shape == (1792,))
cnn = CNN(model_config=CustomModel(model=ViT(), transform=ViT.transform, name=ViT.name))
result = cnn._get_cnn_features_batch(TEST_IMAGE_DIR)
for i in result.values():
assert isinstance(i, np.ndarray)
assert (i.shape == (768,)) |
def _parse_action_probs_from_action_info(action, action_info, legal_actions_list, total_num_discrete_actions):
action_probs = None
for key in ['policy_targets', 'action_probs']:
if (key in action_info):
action_probs = action_info[key]
break
if (action_probs is None):
if ('behaviour_logits' in action_info):
action_logits = action_info['behaviour_logits']
action_probs = softmax(action_logits)
else:
discrete_action = parse_discrete_poker_action_from_continuous_space(continuous_action=action, legal_actions_list=legal_actions_list, total_num_discrete_actions_including_dummy=total_num_discrete_actions)
action_probs = np.zeros(shape=total_num_discrete_actions, dtype=np.float32)
action_probs[discrete_action] = 1.0
return action_probs |
def seperate_file(dir_to_read, name, to_write_dir):
name_token = name.split('.')[0]
(article, abstract) = get_art_abs(os.path.join(dir_to_read, name))
if ((len(article) < 5) or (len(abstract) < 5)):
print('Discard: {}'.format(name))
return None
with open(os.path.join(to_write_dir, (name_token + '.doc')), 'w') as fd:
fd.write(article)
with open(os.path.join(to_write_dir, (name_token + '.abs')), 'w') as fd:
fd.write(abstract) |
def load_dataset(name, cfg_path=None, vis_path=None, data_type=None):
if (cfg_path is None):
cfg = None
else:
cfg = load_dataset_config(cfg_path)
try:
builder = registry.get_builder_class(name)(cfg)
except TypeError:
print((f'''Dataset {name} not found. Available datasets:
''' + ', '.join([str(k) for k in dataset_zoo.get_names()])))
exit(1)
if (vis_path is not None):
if (data_type is None):
data_type = builder.config.data_type
assert (data_type in builder.config.build_info), f'Invalid data_type {data_type} for {name}.'
builder.config.build_info.get(data_type).storage = vis_path
dataset = builder.build_datasets()
return dataset |
class ConcatDataset(torchConcatDataset):
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
if hasattr(self.datasets[0], 'input_dim'):
self._input_dim = self.datasets[0].input_dim
self.input_dim = self.datasets[0].input_dim
def pull_item(self, idx):
if (idx < 0):
if ((- idx) > len(self)):
raise ValueError('absolute value of index should not exceed dataset length')
idx = (len(self) + idx)
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx].pull_item(sample_idx) |
def _concat_dataset(cfg, default_args=None):
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefixes', None)
proposal_files = cfg.get('proposal_file', None)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets) |
class Agent(object):
def __init__(self, model, env, args, state, device):
self.model = model
self.env = env
self.num_agents = env.n
self.state_dim = env.observation_space[0].shape[0]
if ('continuous' in args.model):
self.continuous = True
self.action_high = [env.action_space[i].high for i in range(self.num_agents)]
self.action_low = [env.action_space[i].low for i in range(self.num_agents)]
self.dim_action = env.action_space[0].shape[0]
else:
self.dim_action = 1
self.continuous = False
self.eps_len = 0
self.eps_num = 0
self.args = args
self.values = []
self.log_probs = []
self.rewards = []
self.entropies = []
self.rewards_eps = []
self.done = True
self.info = None
self.reward = 0
self.device = device
self.lstm_out = args.lstm_out
self.reward_mean = None
self.reward_std = 1
self.num_steps = 0
self.n_steps = 0
self.vk = 0
self.state = state
self.hxs = torch.zeros(self.num_agents, self.lstm_out).to(device)
self.cxs = torch.zeros(self.num_agents, self.lstm_out).to(device)
self.rank = 0
self.rotation = 0
def action_train(self):
self.n_steps += 1
(value_multi, actions, entropy, log_prob) = self.model(Variable(self.state, requires_grad=True))
(state_multi, reward_multi, self.done, self.info) = self.env.step(actions)
if isinstance(self.done, list):
self.done = np.sum(self.done)
self.state = torch.from_numpy(np.array(state_multi)).float().to(self.device)
self.reward_org = reward_multi.copy()
if self.args.norm_reward:
reward_multi = self.reward_normalizer(reward_multi)
self.reward = torch.tensor(reward_multi).float().to(self.device)
self.eps_len += 1
self.values.append(value_multi)
self.entropies.append(entropy)
self.log_probs.append(log_prob)
self.rewards.append(self.reward.unsqueeze(1))
def action_test(self):
with torch.no_grad():
(value_multi, actions, entropy, log_prob) = self.model(Variable(self.state), True)
(state_multi, self.reward, self.done, self.info) = self.env.step(actions)
if isinstance(self.done, list):
self.done = np.sum(self.done)
self.state = torch.from_numpy(np.array(state_multi)).float().to(self.device)
if (self.env.reset_type == 1):
self.rotation = self.info['cost']
self.eps_len += 1
def reset(self):
obs = self.env.reset()
self.state = torch.from_numpy(np.array(obs)).float().to(self.device)
self.eps_len = 0
self.eps_num += 1
self.reset_rnn_hidden()
self.model.sample_noise()
def clean_buffer(self, done):
self.values = []
self.log_probs = []
self.entropies = []
self.rewards = []
self.obs_tracker = []
if done:
self.rewards_eps = []
return self
def reward_normalizer(self, reward):
reward = np.array(reward)
self.num_steps += 1
if (self.num_steps == 1):
self.reward_mean = reward
self.vk = 0
self.reward_std = 1
else:
delt = (reward - self.reward_mean)
self.reward_mean = (self.reward_mean + (delt / self.num_steps))
self.vk = (self.vk + (delt * (reward - self.reward_mean)))
self.reward_std = np.sqrt((self.vk / (self.num_steps - 1)))
reward = ((reward - self.reward_mean) / (self.reward_std + 1e-08))
return reward
def reset_rnn_hidden(self):
self.cxs = Variable(torch.zeros(self.num_agents, self.lstm_out).to(self.device))
self.hxs = Variable(torch.zeros(self.num_agents, self.lstm_out).to(self.device))
def update_rnn_hidden(self):
self.cxs = Variable(self.cxs.data)
self.hxs = Variable(self.hxs.data)
def optimize(self, params, optimizer, shared_model, training_mode, device_share):
R = torch.zeros(len(self.rewards[0]), 1).to(self.device)
if (not self.done):
state = self.state
(value_multi, *others) = self.model(Variable(state, requires_grad=True))
for i in range(len(self.rewards[0])):
R[i][0] = value_multi[i].data
self.values.append(Variable(R).to(self.device))
batch_size = len(self.entropies[0][0])
policy_loss = torch.zeros(batch_size, 1).to(self.device)
value_loss = torch.zeros(1, 1).to(self.device)
entropies = torch.zeros(batch_size, self.dim_action).to(self.device)
w_entropies = float(self.args.entropy)
R = Variable(R, requires_grad=True).to(self.device)
gae = torch.zeros(1, 1).to(self.device)
for i in reversed(range(len(self.rewards))):
R = ((self.args.gamma * R) + self.rewards[i])
advantage = (R - self.values[i])
value_loss = (value_loss + (0.5 * advantage.pow(2)))
delta_t = ((self.rewards[i] + (self.args.gamma * self.values[(i + 1)].data)) - self.values[i].data)
gae = (((gae * self.args.gamma) * self.args.tau) + delta_t)
policy_loss = ((policy_loss - (self.log_probs[i] * Variable(gae))) - (w_entropies * self.entropies[i]))
entropies += self.entropies[i].sum()
self.model.zero_grad()
loss = (policy_loss.sum() + (0.5 * value_loss.sum()))
loss.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm_(params, 50)
ensure_shared_grads(self.model, shared_model, self.device, device_share)
optimizer.step()
self.clean_buffer(self.done)
return (policy_loss, value_loss, entropies) |
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv2d = nn.Conv2d(3, 8, 3)
def forward(self, imgs):
x = torch.randn((1, *imgs))
return self.conv2d(x) |
class StructuredSubnetConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=False, sparsity=0.5, trainable=True):
super(self.__class__, self).__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
self.stride = stride
self.sparsity = sparsity
self.trainable = trainable
self.w_m = nn.Parameter(torch.empty(out_channels))
self.weight_mask = None
(self.zeros_weight, self.ones_weight) = (torch.zeros(self.w_m.shape), torch.ones(self.w_m.shape))
if bias:
self.b_m = nn.Parameter(torch.empty(out_channels))
self.bias_mask = None
(self.zeros_bias, self.ones_bias) = (torch.zeros(self.b_m.shape), torch.ones(self.b_m.shape))
else:
self.register_parameter('bias', None)
self.init_mask_parameters()
if (trainable == False):
raise Exception('Non-trainable version is still not implemented')
def forward(self, x, weight_mask=None, bias_mask=None, mode='train'):
(w_pruned, b_pruned) = (None, None)
if (mode == 'train'):
self.weight_mask = GetSubnetFaster.apply(self.w_m.abs(), self.zeros_weight, self.ones_weight, self.sparsity)
w_pruned = (self.weight * self.weight_mask.view((- 1), 1, 1, 1))
b_pruned = None
if (self.bias is not None):
self.bias_mask = GetSubnetFaster.apply(self.b_m.abs(), self.zeros_bias, self.ones_bias, self.sparsity)
b_pruned = (self.bias * self.bias_mask)
elif (mode == 'valid'):
w_pruned = (self.weight * self.weight_mask.view((- 1), 1, 1, 1))
b_pruned = None
if (self.bias is not None):
b_pruned = (self.bias * self.bias_mask)
elif (mode == 'test'):
w_pruned = (self.weight * weight_mask.view((- 1), 1, 1, 1))
b_pruned = None
if (self.bias is not None):
b_pruned = (self.bias * bias_mask)
else:
raise Exception((('[ERROR] The mode ' + str(mode)) + ' is not supported!'))
return F.conv2d(input=x, weight=w_pruned, bias=b_pruned, stride=self.stride, padding=self.padding)
def init_mask_parameters(self):
nn.init.kaiming_uniform_(self.w_m.view(1, (- 1)), a=math.sqrt(5))
if (self.bias is not None):
nn.init.kaiming_uniform_(self.b_m.view(1, (- 1)), a=math.sqrt(5)) |
class DenseNet(nn.Module):
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, remove_linear=False):
super(DenseNet, self).__init__()
self.features = nn.Sequential(OrderedDict([('conv0', nn.Conv2d(3, num_init_features, kernel_size=3, stride=1, padding=1, bias=False))]))
num_features = num_init_features
for (i, num_layers) in enumerate(block_config):
block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate)
self.features.add_module(('denseblock%d' % (i + 1)), block)
num_features = (num_features + (num_layers * growth_rate))
if (i != (len(block_config) - 1)):
trans = _Transition(num_input_features=num_features, num_output_features=(num_features // 2))
self.features.add_module(('transition%d' % (i + 1)), trans)
num_features = (num_features // 2)
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
if remove_linear:
self.classifier = None
else:
self.classifier = nn.Linear(num_features, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x, feature=False):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), (- 1))
if (self.classifier is None):
if feature:
return (out, None)
else:
return out
if feature:
out1 = self.classifier(out)
return (out, out1)
out = self.classifier(out)
return out |
def tsdataset_to_dataloader(data, batch_size, lookback, horizon, num_processes):
if num_processes:
if ((batch_size % num_processes) != 0):
warnings.warn("'batch_size' cannot be divided with no remainder by 'self.num_processes'. We got 'batch_size' = {} and 'self.num_processes' = {}".format(batch_size, num_processes))
batch_size = max(1, (batch_size // num_processes))
_rolled = (data.numpy_x is None)
return data.to_torch_data_loader(batch_size=batch_size, roll=_rolled, lookback=lookback, horizon=horizon, feature_col=data.roll_feature, target_col=data.roll_target, shuffle=True) |
def qualification_loss(x_minus, x_plus, y_minus, y_plus, a, b, c, confidence=(- 0.1)):
loss1 = ts.tanh_lower(torch.sigmoid(y_minus), a, ((b * y_minus) + c), x_minus, (x_plus * 0), plot=False, num=0)
valid = (loss1 <= 0)
loss1 = torch.clamp(loss1, min=confidence)
loss2 = ts.tanh_lower(torch.sigmoid(y_plus), a, ((b * y_plus) + c), x_minus, (x_plus * 0), plot=False, num=0)
valid = (valid * (loss2 <= 0))
loss2 = torch.clamp(loss2, min=confidence)
loss3 = ts.sigmoid_lower(torch.tanh(x_plus), b, ((a * x_plus) + c), y_minus, y_plus, plot=False, num=0)
valid = (valid * (loss3 <= 0))
loss3 = torch.clamp(loss3, min=confidence)
loss4 = (((b * y_minus) + c) - 0)
valid = (valid * (loss4 <= 0))
loss4 = torch.clamp(loss4, min=confidence)
loss5 = (((b * y_plus) + c) - 0)
valid = (valid * (loss5 <= 0))
loss5 = torch.clamp(loss5, min=confidence)
loss = ((((loss1 + loss2) + loss3) + loss4) + loss5)
return (loss, valid) |
def slice_nested_dict(dict_or_array, start, stop):
if isinstance(dict_or_array, dict):
return {k: slice_nested_dict(v, start, stop) for (k, v) in dict_or_array.items()}
else:
return dict_or_array[start:stop] |
def apex_layernorm(ln_module, input_):
if apex_is_installed:
return apex.normalization.fused_layer_norm.FusedLayerNormAffineFunction.apply(input_, ln_module.weight, ln_module.bias, ln_module.normalized_shape, ln_module.eps)
else:
return ln_module(input_) |
('connect')
def connect():
mturk_info = mturk_params(request.args)
if (mturk_info is None):
mturk_info = {}
LOG.info('%s connected | mturk %s', request.sid, mturk_info)
assert (request.sid not in clients), f'Client {request.sid} already connected?'
io.emit('setup', {'sound': url_for('static', filename='ping.mp3')}, to=request.sid)
LOG.debug('Getting session cookie')
user_id = session.get('user_id')
if (user_id is None):
LOG.debug('No session cookie found')
user_id = generate_random_string(10)
session['user_id'] = user_id
LOG.debug('Session cookie generated')
else:
LOG.debug('Session cookie found')
mturk_info['user_cookie'] = user_id
if (num_games_per_client[mturk_info['workerId']] >= MAX_GAMES):
io.emit('max_games', to=request.sid)
return
clients.append(request.sid)
(room_id, ready) = state.connect_client(request.sid, mturk_info)
io.join_room(room_id)
LOG.debug('%s connected to room %s', request.sid, room_id)
if ready:
start_new_game(room_id) |
def get_imagenet_labels():
path = get_imagenet_path()
dataset = datasets.ImageNet(path, split='val', transform='none')
classes_extended = dataset.classes
labels = []
for a in classes_extended:
labels.append(a[0])
return labels |
class EmptyLabel(ItemBase):
def __init__(self):
(self.obj, self.data) = (0, 0)
def __str__(self):
return ''
def __hash__(self):
return hash(str(self)) |
def resnet50Sem(cfg=None, pretrained_path=None, **kwargs):
if (cfg['resnet'] == 101):
model = ResNetSemShare4(Bottleneck, [3, 4, 23, 3])
print('Encoder: resnet101')
else:
model = ResNetSemShare4(Bottleneck, [3, 4, 6, 3])
print('Encoder: resnet50')
if (cfg['resnet'] == 101):
init_path = f'./FewShotSeg-dataset/cache/resnet101-5d3b4d8f.pth'
else:
init_path = './FewShotSeg-dataset/cache/resnet50-19c8e357.pth'
if (pretrained_path is not None):
init_path = f"{cfg['ckpt_dir']}/best.pth"
print(f'load: {init_path}')
pretrained_weight = torch.load(init_path, map_location='cpu')
model_weight = model.state_dict()
for (key, weight) in model_weight.items():
if (key in pretrained_weight):
model_weight[key] = pretrained_weight[key]
if ((key[:3] == 'sem') and (key[3:] in pretrained_weight)):
model_weight[key] = pretrained_weight[key[3:]]
if (pretrained_path is not None):
print('**load eval model**')
for (key, weight) in model_weight.items():
if (('module.encoder.' + key) in pretrained_weight):
model_weight[key] = pretrained_weight[('module.encoder.' + key)]
if ((key[:3] == 'sem') and (key[3:] in pretrained_weight)):
model_weight[key] = pretrained_weight[key[3:]]
model.load_state_dict(model_weight)
return model |
def zscore_from_cb(cb_min, cb_max, confidence=0.95, distrib='norm'):
if (distrib == 'norm'):
quantile = norm.ppf((1 - ((1 - confidence) / 2)))
beta_hat = ((cb_min + cb_max) / 2)
zscore = (((beta_hat / (cb_max - cb_min)) * 2) * quantile)
return zscore |
class DataBatch():
def __init__(self, mxnet_module):
self._data = []
self._label = []
self.mxnet_module = mxnet_module
def append_data(self, new_data):
self._data.append(self.__as_ndarray(new_data))
def append_label(self, new_label):
self._label.append(self.__as_ndarray(new_label))
def __as_ndarray(self, in_data):
return self.mxnet_module.ndarray.array(in_data, self.mxnet_module.cpu())
def data(self):
return self._data
def label(self):
return self._label |
class CloudpickleWrapper(object):
def __init__(self, x):
self.x = x
def __call__(self, *args, **kwargs):
return self.x(*args, **kwargs)
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob) |
def UpWind3dRHI(dx, coe, u, spatialscheme):
rhi1 = _UpWind1dRHI(dx, [0, coe[(0, 0, 1)], coe[(0, 0, 2)]], u, spatialscheme, axis=(- 1), mode=mode)
rhi2 = _UpWind1dRHI(dx, [0, coe[(0, 1, 0)], coe[(0, 2, 0)]], u, spatialscheme, axis=(- 2), mode=mode)
rhi3 = _UpWind1dRHI(dx, [0, coe[(1, 0, 0)], coe[(2, 0, 0)]], u, spatialscheme, axis=(- 3), mode=mode)
return ((rhi1 + rhi2) + rhi3) |
def fix_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
random.seed(seed) |
def attach_multitask_transform_head(core_model, output_tasks, optimizer, with_har_head=False, har_output_shape=None, num_units_har=1024, model_name='multitask_transform'):
inputs = tf.keras.Input(shape=core_model.input.shape[1:], name='input')
intermediate_x = core_model(inputs)
outputs = []
losses = [tf.keras.losses.BinaryCrossentropy() for _ in output_tasks]
for task in output_tasks:
x = tf.keras.layers.Dense(256, activation='relu')(intermediate_x)
pred = tf.keras.layers.Dense(1, activation='sigmoid', name=task)(x)
outputs.append(pred)
if with_har_head:
x = tf.keras.layers.Dense(num_units_har, activation='relu')(intermediate_x)
x = tf.keras.layers.Dense(har_output_shape)(x)
har_pred = tf.keras.layers.Softmax(name='har')(x)
outputs.append(har_pred)
losses.append(tf.keras.losses.CategoricalCrossentropy())
model = tf.keras.Model(inputs=inputs, outputs=outputs, name=model_name)
model.compile(optimizer=optimizer, loss=losses, metrics=['accuracy'])
return model |
class ParallelTrainer(LearnerCallback):
_order = (- 20)
def on_train_begin(self, **kwargs):
self.learn.model = DataParallel(self.learn.model)
def on_train_end(self, **kwargs):
self.learn.model = self.learn.model.module |
class MemorySeCo(nn.Module):
def __init__(self, feature_dim, queue_size, temperature=0.1, temperature_intra=0.1):
super(MemorySeCo, self).__init__()
self.queue_size = queue_size
self.temperature = temperature
self.temperature_intra = temperature_intra
self.index = 0
self.register_buffer('params', torch.tensor([(- 1)]))
stdv = (1.0 / math.sqrt((feature_dim / 3)))
memory = torch.rand(self.queue_size, feature_dim, requires_grad=False).mul_((2 * stdv)).add_((- stdv))
self.register_buffer('memory', memory)
def forward(self, q, k_sf, k_df1, k_df2, k_all, inter=True):
l_pos_sf = (q * k_sf.detach()).sum(dim=(- 1), keepdim=True)
l_pos_df1 = (q * k_df1.detach()).sum(dim=(- 1), keepdim=True)
l_pos_df2 = (q * k_df2.detach()).sum(dim=(- 1), keepdim=True)
if inter:
l_neg = torch.mm(q, self.memory.clone().detach().t())
out = torch.cat((torch.cat((l_pos_sf, l_pos_df1, l_pos_df2), dim=0), l_neg.repeat(3, 1)), dim=1)
out = torch.div(out, self.temperature).contiguous()
with torch.no_grad():
all_size = k_all.shape[0]
out_ids = torch.fmod((torch.arange(all_size, dtype=torch.long).cuda() + self.index), self.queue_size)
self.memory.index_copy_(0, out_ids, k_all)
self.index = ((self.index + all_size) % self.queue_size)
else:
out = torch.div(torch.cat((l_pos_sf.repeat(2, 1), torch.cat((l_pos_df1, l_pos_df2), dim=0)), dim=(- 1)), self.temperature_intra).contiguous()
return out |
def read_pedestrian(filename):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
track_dict = dict()
track_id = None
for (i, row) in enumerate(list(csv_reader)):
if (i == 0):
assert (row[KeyEnum.track_id] == Key.track_id)
assert (row[KeyEnum.frame_id] == Key.frame_id)
assert (row[KeyEnum.time_stamp_ms] == Key.time_stamp_ms)
assert (row[KeyEnum.agent_type] == Key.agent_type)
assert (row[KeyEnum.x] == Key.x)
assert (row[KeyEnum.y] == Key.y)
assert (row[KeyEnum.vx] == Key.vx)
assert (row[KeyEnum.vy] == Key.vy)
continue
if (row[KeyEnum.track_id] != track_id):
track_id = row[KeyEnum.track_id]
assert (track_id not in track_dict.keys()), ('Line %i: Track id %s already in dict, track file not sorted properly' % ((i + 1), track_id))
track = Track(track_id)
track.agent_type = row[KeyEnum.agent_type]
track.time_stamp_ms_first = int(row[KeyEnum.time_stamp_ms])
track.time_stamp_ms_last = int(row[KeyEnum.time_stamp_ms])
track_dict[track_id] = track
track = track_dict[track_id]
track.time_stamp_ms_last = int(row[KeyEnum.time_stamp_ms])
ms = MotionState(int(row[KeyEnum.time_stamp_ms]))
ms.x = float(row[KeyEnum.x])
ms.y = float(row[KeyEnum.y])
ms.vx = float(row[KeyEnum.vx])
ms.vy = float(row[KeyEnum.vy])
track.motion_states[ms.time_stamp_ms] = ms
return track_dict |
def associate(first_list, second_list, max_offset):
first_keys = list(first_list)
second_keys = list(second_list)
potential_matches = [((b - a), a, b) for a in first_keys for b in second_keys if ((b - a) < max_offset)]
potential_matches.sort(reverse=True)
matches = []
for (diff, a, b) in potential_matches:
if ((a in first_keys) and (b in second_keys)):
first_keys.remove(a)
matches.append((a, b))
matches.sort()
return matches |
def build_optimizers(opt_config, runner):
if (not opt_config):
return
assert isinstance(opt_config, dict)
for (name, config) in opt_config.items():
if ((not name) or (not config)):
continue
if (name in runner.optimizers):
raise AttributeError(f'Optimizer `{name}` has already existed!')
if (name not in runner.models):
raise AttributeError(f'Model `{name}` is missing!')
runner.optimizers[name] = build_optimizer(config, runner.models[name]) |
_grad()
def inference_entropy_estimation(model, x, index_slide=0, index_quantize=[0, 0, 0, 0]):
x = x.unsqueeze(0)
start = time.time()
out_net = model.forward(x, index_slide, index_quantize, get_y_hat=True)
elapsed_time = (time.time() - start)
num_pixels = ((x.size(0) * x.size(2)) * x.size(3))
estimated_bpp = sum(((torch.log(likelihoods).sum() / ((- math.log(2)) * num_pixels)) for likelihoods in out_net['likelihoods'].values()))
return {'decoded': out_net['x_hat'], 'y_hat': out_net['y_hat'], 'estimated_bpp': estimated_bpp.item(), 'estimated_time': elapsed_time} |
class FeatureFusionNetwork(nn.Module):
def __init__(self, d_model=512, nhead=8, num_featurefusion_layers=4, dim_feedforward=2048, dropout=0.1, activation='relu'):
super().__init__()
featurefusion_layer = FeatureFusionLayer(d_model, nhead, dim_feedforward, dropout, activation)
self.encoder = Encoder(featurefusion_layer, num_featurefusion_layers)
decoderCFA_layer = DecoderCFALayer(d_model, nhead, dim_feedforward, dropout, activation)
decoderCFA_norm = nn.LayerNorm(d_model)
self.decoder = Decoder(decoderCFA_layer, decoderCFA_norm)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def forward(self, src_temp, mask_temp, src_search, mask_search, pos_temp, pos_search):
src_temp = src_temp.flatten(2).permute(2, 0, 1)
pos_temp = pos_temp.flatten(2).permute(2, 0, 1)
src_search = src_search.flatten(2).permute(2, 0, 1)
pos_search = pos_search.flatten(2).permute(2, 0, 1)
mask_temp = mask_temp.flatten(1)
mask_search = mask_search.flatten(1)
(memory_temp, memory_search) = self.encoder(src1=src_temp, src2=src_search, src1_key_padding_mask=mask_temp, src2_key_padding_mask=mask_search, pos_src1=pos_temp, pos_src2=pos_search)
hs = self.decoder(memory_search, memory_temp, tgt_key_padding_mask=mask_search, memory_key_padding_mask=mask_temp, pos_enc=pos_temp, pos_dec=pos_search)
return hs.unsqueeze(0).transpose(1, 2) |
class FastChatAgent(AgentClient):
def __init__(self, model_name, controller_address=None, worker_address=None, temperature=0, max_new_tokens=32, top_p=0, prompter=None, args=None, **kwargs) -> None:
if ((controller_address is None) and (worker_address is None)):
raise ValueError('Either controller_address or worker_address must be specified.')
self.controller_address = controller_address
self.worker_address = worker_address
self.model_name = model_name
self.temperature = temperature
self.max_new_tokens = max_new_tokens
self.top_p = top_p
self.prompter = Prompter.get_prompter(prompter)
self.args = (args or {})
print(self.max_new_tokens)
super().__init__(**kwargs)
def inference(self, history: List[dict]) -> str:
if self.worker_address:
worker_addr = self.worker_address
else:
controller_addr = self.controller_address
worker_addr = controller_addr
if (worker_addr == ''):
raise ValueError
gen_params = {'model': self.model_name, 'temperature': self.temperature, 'max_new_tokens': self.max_new_tokens, 'echo': False, 'top_p': self.top_p, **self.args}
if self.prompter:
prompt = self.prompter(history)
gen_params.update(prompt)
else:
conv = get_conversation_template(self.model_name)
for history_item in history:
role = history_item['role']
content = history_item['content']
if (role == 'user'):
conv.append_message(conv.roles[0], content)
elif (role == 'agent'):
conv.append_message(conv.roles[1], content)
else:
raise ValueError(f'Unknown role: {role}')
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
gen_params.update({'prompt': prompt, 'stop': conv.stop_str, 'stop_token_ids': conv.stop_token_ids})
headers = {'User-Agent': 'FastChat Client'}
for _ in range(3):
try:
response = requests.post((controller_addr + '/worker_generate_stream'), headers=headers, json=gen_params, stream=True, timeout=120)
text = ''
for line in response.iter_lines(decode_unicode=False, delimiter=b'\x00'):
if line:
data = json.loads(line)
if (data['error_code'] != 0):
raise AgentNetworkException(data['text'])
text = data['text']
return text
except Timeout:
print('Timeout, retrying...')
except ConnectionError:
print('Connection error, retrying...')
time.sleep(5)
else:
raise Exception('Timeout after 3 retries.') |
class EuroSATRGBDataModule(BaseDataModule):
def __init__(self, root: str='.data/eurosat-rgb', transform: T.Compose=T.Compose([T.ToTensor()]), *args, **kwargs):
super().__init__(*args, **kwargs)
self.root = root
self.transform = transform
def setup(self, stage: Optional[str]=None):
dataset = EuroSATRGB(root=self.root, transform=self.transform)
(self.train_dataset, self.val_dataset, self.test_dataset) = dataset_split(dataset, val_pct=self.val_split, test_pct=self.test_split) |
_REGISTRY.register()
def build_resnet_bifpn_backbone(cfg, input_shape: ShapeSpec):
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
backbone = BiFPN(cfg=cfg, bottom_up=bottom_up, in_features=in_features, out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS, norm=cfg.MODEL.BIFPN.NORM, num_levels=cfg.MODEL.BIFPN.NUM_LEVELS, num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN, separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV)
return backbone |
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('', ':6.2f')
top5 = AverageMeter('', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
log_dir = './byol'
log = open((log_dir + '/log.train.txt'), mode='a')
log.write(('\t__file__ = %s\n' % __file__))
log.write(('\tout_dir = %s\n' % log_dir))
log.write('\n')
log.write('\t<additional comments>\n')
log.write('\t ... xxx baseline ... \n')
log.write('\n')
start = timer()
model.train()
end = time.time()
start = timer()
for (i, images) in enumerate(train_loader):
if (args.gpu is not None):
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
loss = model(images[0], images[1])
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.module.update_moving_average()
if ((i % 20) == 0):
print(('byol_resnet50' + (' %0.7f %5.3f %6.3f | %0.3f %0.3f| %s' % (optimizer.state_dict()['param_groups'][0]['lr'], i, epoch, loss.item(), loss.item(), time_to_str((timer() - start), 'min')))))
if ((i % 100) == 0):
log.write((' %0.7f %5.3f %6.3f | %0.3f %0.3f| %s' % (optimizer.state_dict()['param_groups'][0]['lr'], i, epoch, loss.item(), loss.item(), time_to_str((timer() - start), 'min'))))
log.write('\n') |
def _recon_lcs(x, y):
(i, j) = (len(x), len(y))
table = _lcs(x, y)
if (table[(i, j)] == 0):
return []
lcs = []
while 1:
if ((i == 0) or (j == 0)):
break
elif (x[(i - 1)] == y[(j - 1)]):
lcs = ([(x[(i - 1)], (i - 1))] + lcs)
i = (i - 1)
j = (j - 1)
elif (table[((i - 1), j)] > table[(i, (j - 1))]):
i = (i - 1)
else:
j = (j - 1)
'\n def _recon(i, j):\n """private recon calculation"""\n if i == 0 or j == 0:\n return []\n elif x[i - 1] == y[j - 1]:\n return _recon(i - 1, j - 1) + [(x[i - 1], i - 1)]\n elif table[i - 1, j] > table[i, j - 1]:\n return _recon(i - 1, j)\n else:\n return _recon(i, j - 1)\n\n LCS = _recon(len(x), len(y))\n pdb.set_trace()\n '
return lcs |
def sepreresnetbc26b(**kwargs):
return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name='sepreresnetbc26b', **kwargs) |
def flow_output_evaluation_in_pandas(output_dict):
processed_dict = {}
pandas_dict = {}
for key in output_dict.keys():
val = output_dict[key]
if isinstance(val, float):
val = '{:.4f}'.format(val)
if ('metric_flow' in key):
flow_id = key.split('/')[0]
(area, metric) = key.split('/')[1].split('_')
if (flow_id not in pandas_dict.keys()):
pandas_dict[flow_id] = {}
if (area not in pandas_dict[flow_id].keys()):
pandas_dict[flow_id][area] = {}
pandas_dict[flow_id][area][metric] = val
else:
processed_dict[key] = val
for key in pandas_dict:
processed_dict[key] = pd.DataFrame.from_dict(pandas_dict[key], orient='index')
return processed_dict |
def test_single_task_data_aggregation(processed_data: Dict[(str, Dict[(str, Any)])]) -> None:
task_return_ci_data = get_and_aggregate_data_single_task(processed_data=processed_data, metric_name='return', metrics_to_normalize=['return'], environment_name='env_1', task_name='task_1')
del task_return_ci_data['extra']
jax.tree_util.tree_map((lambda x, y: np.testing.assert_allclose(x, y, rtol=0.0, atol=1e-05)), task_return_ci_data, expected_single_task_ci_data_returns)
task_win_rate_ci_data = get_and_aggregate_data_single_task(processed_data=processed_data, metric_name='win_rate', metrics_to_normalize=['return'], environment_name='env_1', task_name='task_3')
del task_win_rate_ci_data['extra']
jax.tree_util.tree_map((lambda x, y: np.testing.assert_allclose(x, y, rtol=0.0, atol=1e-05)), task_win_rate_ci_data, expected_single_task_ci_data_win_rates) |
def check_isfile(path):
isfile = osp.isfile(path)
if (not isfile):
print("=> Warning: no file found at '{}' (ignored)".format(path))
return isfile |
class FrameNetProcessor():
def __init__(self, frame_path=None, element_path=None, bert_model='bert-base-cased', max_length=256):
self.frame_vocabulary = Vocabulary.load(frame_path)
self.element_vocabulary = Vocabulary.load(element_path)
self.tokenizer = BertTokenizer.from_pretrained(bert_model)
self.max_length = max_length
def process(self, dataset):
datable = DataTable()
for item in dataset.values():
sentence = item['sentence']
frames = item['frames']
elements = item['elements']
(input_ids, attention_mask, head_indexes, frame_id, element_id, label_mask) = process(sentence, frames, elements, self.tokenizer, self.frame_vocabulary, self.element_vocabulary, self.max_length)
datable('input_ids', input_ids)
datable('attention_mask', attention_mask)
datable('head_indexes', head_indexes)
datable('frame_id', frame_id)
datable('element_id', element_id)
datable('label_mask', label_mask)
return datable |
class planarDissipativeForceFromFullDissipativeForce(planarDissipativeForce):
def __init__(self, Pot):
planarDissipativeForce.__init__(self, amp=1.0, ro=Pot._ro, vo=Pot._vo)
self._roSet = Pot._roSet
self._voSet = Pot._voSet
self._Pot = Pot
self.hasC = Pot.hasC
self.hasC_dxdv = Pot.hasC_dxdv
self.hasC_dens = Pot.hasC_dens
return None
def _Rforce(self, R, phi=0.0, t=0.0, v=None):
return self._Pot.Rforce(R, 0.0, phi=phi, t=t, v=[v[0], v[1], 0.0], use_physical=False)
def _phitorque(self, R, phi=0.0, t=0.0, v=None):
return self._Pot.phitorque(R, 0.0, phi=phi, t=t, v=[v[0], v[1], 0.0], use_physical=False) |
class ChineseCLIPVisionModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def sunrise(agent, buffer, train_env, test_env, num_steps=1000000, transitions_per_step=1, max_episode_steps=100000, batch_size=512, tau=0.005, actor_lr=0.0001, critic_lr=0.0001, alpha_lr=0.0001, gamma=0.99, eval_interval=5000, eval_episodes=10, warmup_steps=1000, actor_clip=None, critic_clip=None, actor_l2=0.0, critic_l2=0.0, target_delay=2, save_interval=100000, name='sunrise_run', render=False, save_to_disk=True, log_to_disk=True, verbosity=0, gradient_updates_per_step=1, init_alpha=0.1, weighted_bellman_temp=20.0, infinite_bootstrap=True, **kwargs):
if (save_to_disk or log_to_disk):
save_dir = utils.make_process_dirs(name)
if log_to_disk:
writer = tensorboardX.SummaryWriter(save_dir)
writer.add_hparams(locals(), {})
agent.to(device)
agent.train()
target_agent = copy.deepcopy(agent)
for (target_critics, agent_critics) in zip(target_agent.critics, agent.critics):
for (target_critic, agent_critic) in zip(target_critics, agent_critics):
utils.hard_update(target_critic, agent_critic)
target_agent.train()
critic_optimizer = torch.optim.Adam(agent.critic_params, lr=critic_lr, weight_decay=critic_l2, betas=(0.9, 0.999))
actor_optimizer = torch.optim.Adam(agent.actor_params, lr=actor_lr, weight_decay=actor_l2, betas=(0.9, 0.999))
(log_alphas, alpha_optimizers) = ([], [])
for _ in range(len(agent.actors)):
log_alpha = torch.Tensor([math.log(init_alpha)]).to(device)
log_alpha.requires_grad = True
alpha_optimizer = torch.optim.Adam([log_alpha], lr=alpha_lr, betas=(0.5, 0.999))
log_alphas.append(log_alpha)
alpha_optimizers.append(alpha_optimizer)
target_entropy = (- train_env.action_space.shape[0])
run.warmup_buffer(buffer, train_env, warmup_steps, max_episode_steps)
done = True
steps_iter = range(num_steps)
if verbosity:
steps_iter = tqdm.tqdm(steps_iter)
for step in steps_iter:
for _ in range(transitions_per_step):
if done:
state = train_env.reset()
steps_this_ep = 0
done = False
action = agent.sample_action(state)
(next_state, reward, done, info) = train_env.step(action)
if (infinite_bootstrap and ((steps_this_ep + 1) == max_episode_steps)):
done = False
buffer.push(state, action, reward, next_state, done)
state = next_state
steps_this_ep += 1
if (steps_this_ep >= max_episode_steps):
done = True
for _ in range(gradient_updates_per_step):
learn_sunrise(buffer=buffer, target_agent=target_agent, agent=agent, critic_optimizer=critic_optimizer, batch_size=batch_size, gamma=gamma, critic_clip=critic_clip, actor_optimizer=actor_optimizer, alpha_optimizers=alpha_optimizers, target_entropy=target_entropy, log_alphas=log_alphas, actor_clip=actor_clip, weighted_bellman_temp=weighted_bellman_temp)
if ((step % target_delay) == 0):
for (target_critics, agent_critics) in zip(target_agent.critics, agent.critics):
for (target_critic, agent_critic) in zip(target_critics, agent_critics):
utils.soft_update(target_critic, agent_critic, tau)
if (((step % eval_interval) == 0) or (step == (num_steps - 1))):
mean_return = run.evaluate_agent(agent, test_env, eval_episodes, max_episode_steps, render)
if log_to_disk:
writer.add_scalar('return', mean_return, (step * transitions_per_step))
if (((step % save_interval) == 0) and save_to_disk):
agent.save(save_dir)
if save_to_disk:
agent.save(save_dir)
return agent |
def build_benchmark():
seq = '\nfrom neural_compressor.experimental import Benchmark\nfrom neural_compressor.data import Datasets, DATALOADERS\nfrom neural_compressor import conf\nfrom onnx import onnx_pb as onnx_proto\nfrom onnx import helper, TensorProto, numpy_helper\nfrom onnxruntime_extensions import onnx_op\nimport numpy as np\n\_op(op_type="PyReverseMatrix")\ndef reverse_matrix(x):\n # The user custom op implementation here.\n return np.flip(x, axis=0).astype(np.float32)\n\nnodes = []\nnodes[0:] = [helper.make_node(\'Identity\', [\'input_1\'], [\'identity1\'])]\nnodes[1:] = [helper.make_node(\'PyReverseMatrix\',\n [\'identity1\'], [\'reversed\'],\n domain=\'ai.onnx.contrib\')]\n\ninput0 = helper.make_tensor_value_info(\n \'input_1\', onnx_proto.TensorProto.FLOAT, [None, 2])\noutput0 = helper.make_tensor_value_info(\n \'reversed\', onnx_proto.TensorProto.FLOAT, [None, 2])\n\ngraph = helper.make_graph(nodes, \'test0\', [input0], [output0])\nmodel = helper.make_model(graph, **{\'opset_imports\': [helper.make_opsetid(\'\', 13)]})\n\ndatasets = Datasets(\'onnxrt_qlinearops\')\next_dataset = datasets[\'dummy\'](shape=(10, 2), low=0., high=1., label=True)\next_dataloader = DATALOADERS[\'onnxrt_qlinearops\'](ext_dataset)\n\nconf.model.framework = \'onnxrt_qlinearops\'\nconf.evaluation.accuracy.metric = {\'Accuracy\': {}}\nevaluator = Benchmark(conf)\nevaluator.b_dataloader = ext_dataloader\nevaluator.model = model\nevaluator(\'performance\')\n '
with open('benchmark.py', 'w', encoding='utf-8') as f:
f.writelines(seq) |
class State():
weights: chex.Array
values: chex.Array
packed_items: chex.Array
remaining_budget: chex.Array
key: chex.PRNGKey |
def main(args: argparse.Namespace) -> None:
assert isinstance(args.folders, list)
assert isinstance(args.file, str)
if (args.classes is not None):
assert isinstance(args.classes, list)
file_paths = [(Path(p) / args.file) for p in args.folders]
filter = identical
if args.smooth_factor:
filter = partial(butter_lowpass_filter, cutoff=(5000 * args.smooth_factor), fs=10000)
if (args.out_dir is not None):
parent_path = Path(args.out_dir)
else:
parent_path = file_paths[0].parents[1]
for p in file_paths:
assert p.exists(), p
if (args.classes is None):
classes = []
for file_path in file_paths:
classes.extend(pd.read_csv(file_path, index_col=0).columns.to_list())
args.classes = list(set(classes))
for _class in args.classes:
for file_path in file_paths:
try:
file = filter(pd.read_csv(file_path, index_col=0)[_class])
except KeyError:
continue
plt.plot(file, label=file_path.parents[0])
plt.legend()
plt.title(_class)
plt.grid()
if (args.xrange is not None):
plt.xlim(args.xrange)
if args.yrange:
plt.ylim(args.yrange)
plt.savefig((Path(parent_path) / ((parent_path.stem + _class) + '.png')))
plt.close('all')
for (i, _class) in enumerate(args.classes):
for (j, file_path) in enumerate(file_paths):
try:
file = pd.read_csv(file_path, index_col=0)[_class]
except KeyError:
continue
file.plot(label=(file_path.parent.stem + f'/{_class}'), color=c[i], linestyle=s[j])
plt.legend()
plt.title('total')
plt.grid()
if (args.xrange is not None):
plt.xlim(args.xrange)
if args.yrange:
plt.ylim(args.yrange)
plt.savefig((Path(parent_path) / (parent_path.stem + 'total.png')))
plt.close('all') |
def build(anchor_generator_config):
if (not isinstance(anchor_generator_config, anchor_generator_pb2.AnchorGenerator)):
raise ValueError('anchor_generator_config not of type anchor_generator_pb2.AnchorGenerator')
if (anchor_generator_config.WhichOneof('anchor_generator_oneof') == 'grid_anchor_generator'):
grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator
return grid_anchor_generator.GridAnchorGenerator(scales=[float(scale) for scale in grid_anchor_generator_config.scales], aspect_ratios=[float(aspect_ratio) for aspect_ratio in grid_anchor_generator_config.aspect_ratios], base_anchor_size=[grid_anchor_generator_config.height, grid_anchor_generator_config.width], anchor_stride=[grid_anchor_generator_config.height_stride, grid_anchor_generator_config.width_stride], anchor_offset=[grid_anchor_generator_config.height_offset, grid_anchor_generator_config.width_offset])
elif (anchor_generator_config.WhichOneof('anchor_generator_oneof') == 'ssd_anchor_generator'):
ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator
return multiple_grid_anchor_generator.create_ssd_anchors(num_layers=ssd_anchor_generator_config.num_layers, min_scale=ssd_anchor_generator_config.min_scale, max_scale=ssd_anchor_generator_config.max_scale, aspect_ratios=ssd_anchor_generator_config.aspect_ratios, reduce_boxes_in_lowest_layer=ssd_anchor_generator_config.reduce_boxes_in_lowest_layer)
else:
raise ValueError('Empty anchor generator.') |
def pool(data, name, kernel=3, stride=2, dilate=1, pad=(- 1), pool_type='max', global_pool=False):
if (pool_type == 'max+avg'):
branch1 = pool(data, '{}_branch1'.format(name), kernel=kernel, stride=stride, dilate=dilate, pad=pad, pool_type='max')
branch2 = pool(data, '{}_branch2'.format(name), kernel=kernel, stride=stride, dilate=dilate, pad=pad, pool_type='avg')
return (branch1 + branch2)
if (kernel == 1):
assert (dilate == 1)
if global_pool:
assert (dilate == 1)
assert (pad < 0)
return mx.sym.Pooling(data, name=name, kernel=(1, 1), pool_type=pool_type, global_pool=True)
else:
if (pad < 0):
if (cfg.get('pool_top_infer_style', None) == 'caffe'):
pad = 0
else:
assert ((kernel % 2) == 1), 'Specify pad for an even kernel size'
pad = ((((kernel - 1) * dilate) + 1) // 2)
if (dilate == 1):
return mx.sym.Pooling(data, name=name, kernel=(kernel, kernel), stride=(stride, stride), pad=(pad, pad), pool_type=pool_type)
else:
assert (stride == 1)
return mx.sym.Pooling(data, name=name, kernel=(kernel, kernel), stride=(stride, stride), dilate=(dilate, dilate), pad=(pad, pad), pool_type=pool_type) |
def check_pipeline_doc(overwrite=False):
with open(PATH_TO_TOC, encoding='utf-8') as f:
content = yaml.safe_load(f.read())
api_idx = 0
while (content[api_idx]['title'] != 'API'):
api_idx += 1
api_doc = content[api_idx]['sections']
pipeline_idx = 0
while (api_doc[pipeline_idx]['title'] != 'Pipelines'):
pipeline_idx += 1
diff = False
pipeline_docs = api_doc[pipeline_idx]['sections']
new_pipeline_docs = []
for pipeline_doc in pipeline_docs:
if ('section' in pipeline_doc):
sub_pipeline_doc = pipeline_doc['section']
new_sub_pipeline_doc = clean_doc_toc(sub_pipeline_doc)
if overwrite:
pipeline_doc['section'] = new_sub_pipeline_doc
new_pipeline_docs.append(pipeline_doc)
new_pipeline_docs = clean_doc_toc(new_pipeline_docs)
if (new_pipeline_docs != pipeline_docs):
diff = True
if overwrite:
api_doc[pipeline_idx]['sections'] = new_pipeline_docs
if diff:
if overwrite:
content[api_idx]['sections'] = api_doc
with open(PATH_TO_TOC, 'w', encoding='utf-8') as f:
f.write(yaml.dump(content, allow_unicode=True))
else:
raise ValueError('The model doc part of the table of content is not properly sorted, run `make style` to fix this.') |
class TestOptions(BaseOptions):
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
parser.add_argument('--load_epoch', type=str, default='500', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
parser.set_defaults(time_frame_length=1)
self.isTrain = False
return parser |
class SynthiaDataset(SparseVoxelizationDataset):
CLIP_BOUND = (((- 2000), 2000), ((- 2000), 2000), ((- 2000), 2000))
VOXEL_SIZE = 30
NUM_IN_CHANNEL = 4
BBOX_NORMALIZE_MEAN = np.array((0.0, 0.0, 0.0, 10.802, 6.258, 10.543))
BBOX_NORMALIZE_STD = np.array((3.331, 1.507, 3.007, 5.179, 1.177, 4.268))
ROTATION_AUGMENTATION_BOUND = ((((- np.pi) / 64), (np.pi / 64)), ((- np.pi), np.pi), (((- np.pi) / 64), (np.pi / 64)))
TRANSLATION_AUGMENTATION_RATIO_BOUND = (((- 0.2), 0.2), (0, 0), ((- 0.2), 0.2))
ROTATION_AXIS = 'y'
LOCFEAT_IDX = 1
NUM_LABELS = 16
INSTANCE_LABELS = (8, 10, 11)
IGNORE_LABELS = (0, 1, 13, 14)
DATA_PATH_FILE = {DatasetPhase.Train: 'train.txt', DatasetPhase.Val: 'val.txt', DatasetPhase.Val2: 'val2.txt', DatasetPhase.TrainVal: 'trainval.txt', DatasetPhase.Test: 'test.txt'}
def __init__(self, config, input_transform=None, target_transform=None, augment_data=True, cache=False, phase=DatasetPhase.Train):
if isinstance(phase, str):
phase = str2datasetphase_type(phase)
data_root = config.synthia_path
data_paths = read_txt(os.path.join(data_root, self.DATA_PATH_FILE[phase]))
logging.info('Loading {}: {}'.format(self.__class__.__name__, self.DATA_PATH_FILE[phase]))
super().__init__(data_paths, data_root=data_root, input_transform=input_transform, target_transform=target_transform, ignore_label=config.ignore_label, return_transformation=config.return_transformation, augment_data=augment_data, config=config)
def load_datafile(self, index):
(pointcloud, bboxes, _) = super().load_datafile(index)
return (pointcloud, bboxes, np.zeros(3))
def get_instance_mask(self, semantic_labels, instance_labels):
return (instance_labels != 0) |
class MultiDataset():
def __init__(self, dataset_type='train'):
self._dataset_type = dataset_type
self.writer = registry.get('writer')
self._is_main_process = is_main_process()
self._global_config = registry.get('config')
def _process_datasets(self):
if ('datasets' not in self.opts):
self.writer.write('No datasets attribute present. Setting default to vqa2.warning')
datasets = 'vqa2'
else:
datasets = self.opts['datasets']
if (type(datasets) == str):
datasets = list(map((lambda x: x.strip()), datasets.split(',')))
self._given_datasets = datasets
def load(self, **opts):
self.opts = opts
self._process_datasets()
self._datasets = []
self._builders = []
self._loaders = []
self._samplers = []
self._iterators = []
self._total_length = 0
self._per_dataset_lengths = []
self._num_datasets = 0
self._finished_iterators = {}
self._used_once = {}
for dataset in self._given_datasets:
builder_class = registry.get_builder_class(dataset)
if (builder_class is None):
print(('No builder class found for %s.' % dataset))
continue
builder_instance = builder_class()
if (dataset in self.opts['dataset_attributes']):
attributes = self.opts['dataset_attributes'][dataset]
else:
self.writer.write(('Dataset %s is missing from dataset_attributes in config.' % dataset), 'error')
sys.exit(1)
builder_instance.build(self._dataset_type, attributes)
dataset_instance = builder_instance.load(self._dataset_type, attributes)
if (dataset_instance is None):
continue
(loader_instance, sampler_instance) = self.build_dataloader(dataset_instance, self.opts)
self._builders.append(builder_instance)
self._datasets.append(dataset_instance)
self._loaders.append(loader_instance)
self._samplers.append(sampler_instance)
self._per_dataset_lengths.append(len(dataset_instance))
self._total_length += len(dataset_instance)
self._num_datasets = len(self._datasets)
self._dataset_probablities = [(1 / self._num_datasets) for _ in range(self._num_datasets)]
training_parameters = self._global_config.training_parameters
self._proportional_sampling = training_parameters.dataset_size_proportional_sampling
if (self._dataset_type != 'train'):
self._proportional_sampling = True
if (self._proportional_sampling is True):
self._dataset_probablities = self._per_dataset_lengths[:]
self._dataset_probablities = [(prob / self._total_length) for prob in self._dataset_probablities]
self._loader_index = 0
self._chosen_dataset = self._datasets[self._loader_index]
self._chosen_loader = self._loaders[self._loader_index]
def dataset_type(self):
return self._dataset_type
def num_datasets(self):
return self._num_datasets
def get_datasets(self):
return self._datasets
def first_loader(self):
return self._loaders[0]
def __len__(self):
return (self._total_length // get_batch_size())
def __iter__(self):
if (self._num_datasets == 1):
return iter(self._loaders[0])
self._iterators = []
self._finished_iterators = {}
self._used_once = {}
for loader in self._loaders:
self._iterators.append(iter(loader))
self._chosen_iterator = self._iterators[self._loader_index]
return self
def __next__(self):
try:
next_batch = next(self._chosen_iterator)
except StopIteration:
if ((self._proportional_sampling is True) or (len(self._used_once) != self._num_datasets)):
self._finished_iterators[self._loader_index] = 1
if (len(self._finished_iterators) == self._num_datasets):
raise
else:
self.change_dataloader()
next_batch = next(self._chosen_iterator)
else:
raise
self._used_once[self._loader_index] = 1
return next_batch
def change_dataloader(self):
if (self._num_datasets <= 1):
return
choice = 0
if self._is_main_process:
choice = np.random.choice(self._num_datasets, 1, p=self._dataset_probablities)[0]
while (choice in self._finished_iterators):
choice = np.random.choice(self._num_datasets, 1, p=self._dataset_probablities)[0]
choice = broadcast_scalar(choice, 0, device=registry.get('current_device'))
self._loader_index = choice
self._chosen_dataset = self._datasets[self._loader_index]
self._chosen_loader = self._loaders[self._loader_index]
self._chosen_iterator = self._iterators[self._loader_index]
def verbose_dump(self, *args, **kwargs):
self._chosen_dataset.verbose_dump(*args, **kwargs)
def prepare_batch(self, batch):
batch = self._chosen_dataset.prepare_batch(batch)
self.change_dataloader()
return batch
def update_registry_for_model(self, config):
for builder in self._builders:
builder.update_registry_for_model(config)
def init_args(self, parser):
parser.add_argument_group('General MultiDataset Arguments')
parser.add_argument('-dsp', '--dataset_size_proportional_sampling', type=bool, default=0, help='Pass if you want to sample from dataset according to its size. Default: Equal weighted sampling')
def _init_args(self, parser):
for builder in self._builders:
builder.init_args(parser)
def clean_config(self, config):
return config
def build_dataloader(self, dataset, opts):
training_parameters = self._global_config.training_parameters
num_workers = training_parameters.num_workers
pin_memory = training_parameters.pin_memory
other_args = {}
self._add_extra_args_for_dataloader(dataset, opts, other_args)
loader = DataLoader(dataset=dataset, pin_memory=pin_memory, collate_fn=BatchCollator(), num_workers=num_workers, **other_args)
loader.dataset_type = self._dataset_type
return (loader, other_args.get('sampler', None))
def _add_extra_args_for_dataloader(self, dataset, opts, other_args={}):
training_parameters = self._global_config.training_parameters
dataset_type = self._dataset_type
other_args['shuffle'] = False
if (dataset_type != 'test'):
other_args['shuffle'] = True
if ((training_parameters.local_rank is not None) and training_parameters.distributed):
other_args['sampler'] = DistributedSampler(dataset, shuffle=other_args['shuffle'])
other_args.pop('shuffle')
other_args['batch_size'] = get_batch_size()
return other_args
def seed_sampler(self, epoch):
training_parameters = self._global_config.training_parameters
if ((training_parameters.local_rank is not None) and training_parameters.distributed):
for sampler in self._samplers:
assert hasattr(sampler, 'set_epoch'), "Can't seed without `set_epoch` method"
sampler.set_epoch(epoch) |
class TestSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
(['retinanet/retinanet_r18_fpn_1x_coco.py', 'centernet/centernet_r18_8xb16-crop512-140e_coco.py', 'fsaf/fsaf_r50_fpn_1x_coco.py', 'yolox/yolox_tiny_8xb8-300e_coco.py', 'yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', 'reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
([('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')), ('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu', 'cuda')), ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')), ('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')), ('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')), ('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu', 'cuda'))])
def test_single_stage_forward_loss_mode(self, cfg_file, devices):
message_hub = MessageHub.get_instance(f'test_single_stage_forward_loss_mode-{time.time()}')
message_hub.update_info('iter', 0)
message_hub.update_info('epoch', 0)
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([(device in ['cpu', 'cuda']) for device in devices])
for device in devices:
detector = MODELS.build(model)
detector.init_weights()
if (device == 'cuda'):
if (not torch.cuda.is_available()):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
([('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')), ('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu', 'cuda')), ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')), ('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')), ('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')), ('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu', 'cuda'))])
def test_single_stage_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([(device in ['cpu', 'cuda']) for device in devices])
for device in devices:
detector = MODELS.build(model)
if (device == 'cuda'):
if (not torch.cuda.is_available()):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
([('retinanet/retinanet_r18_fpn_1x_coco.py', ('cpu', 'cuda')), ('centernet/centernet_r18_8xb16-crop512-140e_coco.py', ('cpu', 'cuda')), ('fsaf/fsaf_r50_fpn_1x_coco.py', ('cpu', 'cuda')), ('yolox/yolox_tiny_8xb8-300e_coco.py', ('cpu', 'cuda')), ('yolo/yolov3_mobilenetv2_8xb24-320-300e_coco.py', ('cpu', 'cuda')), ('reppoints/reppoints-minmax_r50_fpn-gn_head-gn_1x_coco.py', ('cpu', 'cuda'))])
def test_single_stage_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([(device in ['cpu', 'cuda']) for device in devices])
for device in devices:
detector = MODELS.build(model)
if (device == 'cuda'):
if (not torch.cuda.is_available()):
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 128, 128], [3, 125, 130]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple) |
def image_to_tfexample(image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(feature={'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width)})) |
def get_logger(level=logging.INFO):
logger = logging.getLogger(os.path.basename(inspect.getouterframes(inspect.currentframe())[1][1]))
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s-%(name)s[%(levelname)s]$ %(message)s', '%Y-%m-%d %H:%M:%S')
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger |
class ConditionalController(FlowController):
def __init__(self, passes, options, condition=None, **partial_controller):
self.condition = condition
super().__init__(passes, options, **partial_controller)
def __iter__(self):
if self.condition():
for pass_ in self.passes:
(yield pass_) |
def save_checkpoint(state, fpath='checkpoint.pth.tar'):
mkdir_if_missing(osp.dirname(fpath))
torch.save(state, fpath) |
class Evaluator():
def __init__(self, image_size: int=None):
self.image_size = image_size
self._image_generation_metrics = [tf.keras.metrics.MeanSquaredError('mse'), ImageRMSE('rmse'), tf.keras.metrics.MeanAbsoluteError('mae'), PSNRMetric('psnr'), LPIPSMetric('vgg', name='lpips'), SSIMMetric('ssim')]
def update_state(self, ground_truth_images, generated_images):
image_size = self.image_size
if (image_size is None):
image_size = tf.maximum(tf.shape(ground_truth_images)[(- 2)], tf.shape(generated_images)[(- 2)])
ground_truth_images = resize_tf(ground_truth_images, image_size)
if (tf.shape(generated_images)[(- 2)] != image_size):
generated_images = resize_tf(generated_images, image_size, 'bilinear')
for metric in self._image_generation_metrics:
metric.update_state(ground_truth_images, generated_images)
def get_progress_bar_info(self):
return OrderedDict([('img_rgbl1', float(next((x for x in self._image_generation_metrics if (x.name == 'mae'))).result())), ('img_lpips', float(next((x for x in self._image_generation_metrics if (x.name == 'lpips'))).result()))])
def result(self):
return OrderedDict(((m.name, float(m.result())) for m in chain(self._image_generation_metrics))) |
def conv3d_bn(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True):
(padding, dilation) = consistent_padding_with_dilation(padding, dilation, dim=3)
if batchNorm:
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias), nn.BatchNorm3d(out_planes))
else:
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)) |
def layer_name_mapping(key, file):
layer_rename_map = {'word_embeddings.weight': 'word_embeddings.weight', 'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight', 'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias', 'weight': 'ln_f.weight', 'bias': 'ln_f.bias'}
if (key in layer_rename_map):
return layer_rename_map[key]
layer_number = int(re.match('.*layer_(\\d*).*', file)[1])
layer_number -= 3
return (f'h.{layer_number}.' + key) |
class DeformableMLP(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int=1, padding: int=0, dilation: int=1, groups: int=1, bias: bool=True):
super(DeformableMLP, self).__init__()
if ((in_channels % groups) != 0):
raise ValueError('in_channels must be divisible by groups')
if ((out_channels % groups) != 0):
raise ValueError('out_channels must be divisible by groups')
if (stride != 1):
raise ValueError('stride must be 1')
if (padding != 0):
raise ValueError('padding must be 0')
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.weight = nn.Parameter(torch.empty(out_channels, (in_channels // groups), 1, 1))
if bias:
self.bias = nn.Parameter(torch.empty(out_channels))
else:
self.register_parameter('bias', None)
self.offset_modulator_conv = DWConv2d(in_channels, (3 * in_channels))
self.norm = nn.BatchNorm2d(in_channels)
self.act = nn.GELU()
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def forward(self, input: Tensor) -> Tensor:
(B, C, H, W) = input.size()
offset_modulator = self.offset_modulator_conv(input)
(offset_y, offset_x, modulator) = torch.chunk(offset_modulator, 3, dim=1)
modulator = (2.0 * torch.sigmoid(modulator))
offset = torch.cat((offset_y, offset_x), dim=1)
max_offset = (max(H, W) // 4)
offset = offset.clamp((- max_offset), max_offset)
x = torchvision.ops.deform_conv2d(input=input, offset=offset, weight=self.weight, bias=self.bias, padding=self.padding, mask=modulator, stride=self.stride, dilation=self.dilation)
x = self.act(self.norm(x))
return x |
def parse_args():
parser = argparse.ArgumentParser(description='MMDet eval image prediction result for each')
parser.add_argument('config', help='test config file path')
parser.add_argument('prediction_path', help='prediction path where test pkl result')
parser.add_argument('show_dir', help='directory where painted images will be saved')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--wait-time', type=float, default=0, help='the interval of show (s), 0 is block')
parser.add_argument('--topk', default=20, type=int, help='saved Number of the highest topk and lowest topk after index sorting')
parser.add_argument('--show-score-thr', type=float, default=0, help='score threshold (default: 0.)')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.')
args = parser.parse_args()
return args |
class GhostTopkBatchNorm2d(nn.Module):
def __init__(self, num_features, k=10, dim=1, momentum=0.1, bias=True, eps=1e-05, beta=0.75, noise=False):
super(GhostTopkBatchNorm2d, self).__init__()
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.zeros(num_features))
self.momentum = momentum
self.dim = dim
self.register_buffer('meanTOPK', torch.zeros(num_features))
self.noise = noise
self.k = k
self.beta = 0.75
self.eps = eps
self.bias = Parameter(torch.Tensor(num_features))
self.weight = Parameter(torch.Tensor(num_features))
def forward(self, x):
if self.training:
mean = x.view(x.size(0), x.size(self.dim), (- 1)).mean((- 1)).mean(0)
y = x.transpose(0, 1)
z = y.contiguous()
t = z.view(z.size(0), (- 1))
A = torch.abs((t.transpose(1, 0) - mean))
beta = 0.75
MeanTOPK = torch.topk(A, self.k, dim=0)[0].mean(0)
meanTOPK = ((beta * torch.autograd.variable.Variable(self.biasTOPK)) + ((1 - beta) * MeanTOPK))
const = ((0.5 * (1 + ((np.pi * np.log(4)) ** 0.5))) / ((2 * np.log(A.size(0))) ** 0.5))
meanTOPK = (meanTOPK * const)
self.biasTOPK.copy_(meanTOPK.data)
scale = (1 / (meanTOPK + self.eps))
self.running_mean.mul_(self.momentum).add_((mean.data * (1 - self.momentum)))
self.running_var.mul_(self.momentum).add_((scale.data * (1 - self.momentum)))
else:
mean = torch.autograd.Variable(self.running_mean)
scale = torch.autograd.Variable(self.running_var)
out = ((x - mean.view(1, mean.size(0), 1, 1)) * scale.view(1, scale.size(0), 1, 1))
if (self.noise and self.training):
std = (0.1 * _std(x, self.dim).data)
ones = torch.ones_like(x.data)
std_noise = Variable((torch.normal(ones, ones) * std))
out = (out * std_noise)
if (self.weight is not None):
out = (out * self.weight.view(1, self.weight.size(0), 1, 1))
if (self.bias is not None):
out = (out + self.bias.view(1, self.bias.size(0), 1, 1))
return out |
class GANG():
def __init__(self, user_product_graph, product_user_graph, user_ground_truth, priors, mean_priors, sup_per, nor_flg, sup_flg=False):
self.pu_dim = (len(priors[0]) + len(priors[2]))
self.res_pu_spam_prior_vector = None
self.diag_pu_matrix = None
self.res_pu_spam_post_vector = np.zeros((self.pu_dim, 1))
self.pu_csr_matrix = None
self.diag_pu_csr_matrix = None
self.nor_pu_csr_matrix = None
self.u_priors = priors[0]
self.r_priors = priors[1]
self.p_priors = priors[2]
(p_vector, u_vector, r_vector) = ([], [], [])
if nor_flg:
(u_mean, p_mean, r_mean) = (0.5, 0.5, 0.5)
else:
(u_mean, r_mean, p_mean) = (mean_priors[0], mean_priors[1], mean_priors[2])
for u in priors[0].values():
u_vector.append(u)
for p in priors[2].values():
p_vector.append(p)
res_u_vector = [(i - u_mean) for i in u_vector]
res_p_vector = [(i - p_mean) for i in p_vector]
if sup_flg:
(pos_ids, neg_ids) = semi_data(user_ground_truth, sup_per)
for (iter, prob) in enumerate(res_u_vector):
if (iter in pos_ids):
res_u_vector[iter] = (1 - u_mean)
elif (iter in neg_ids):
res_u_vector[iter] = (0 - u_mean)
res_pu_vector = (res_p_vector + res_u_vector)
self.res_pu_spam_prior_vector = np.c_[res_pu_vector]
self.pu_matrix = lil_matrix((self.pu_dim, self.pu_dim))
self.diag_pu_matrix = lil_matrix((self.pu_dim, self.pu_dim))
for id in range(0, self.pu_dim):
if (id < len(self.p_priors)):
self.diag_pu_matrix[(id, id)] = len(product_user_graph[str(id)])
else:
self.diag_pu_matrix[(id, id)] = len(user_product_graph[str(id)])
for (p_id, reviews) in product_user_graph.items():
for r in reviews:
self.pu_matrix[(int(p_id), int(r[0]))] = 1
for (u_id, reviews) in user_product_graph.items():
for r in reviews:
self.pu_matrix[(int(u_id), int(r[0]))] = 1
def pu_lbp(self, max_iters):
self.pu_csr_matrix = self.pu_matrix.tocsr()
self.diag_pu_csr_matrix = self.diag_pu_matrix.tocsr()
i = 0
while (i < max_iters):
sum_0 = np.sum(self.res_pu_spam_post_vector)
self.res_pu_spam_post_vector = (self.res_pu_spam_prior_vector + ((2 * 0.008) * self.pu_csr_matrix.dot(self.res_pu_spam_post_vector)))
sum_1 = np.sum(self.res_pu_spam_post_vector)
i += 1
if (abs((sum_0 - sum_1)) < 0.1):
return abs((sum_0 - sum_1))
def classify(self):
u_post = {}
p_post = {}
r_post = {}
pu_post = self.res_pu_spam_post_vector
no_prod = len(self.p_priors)
for (i, r) in enumerate(pu_post[no_prod:]):
u_post[str((i + no_prod))] = float(r)
for (i, r) in enumerate(pu_post[:no_prod]):
p_post[str(i)] = float(r)
for (i, r) in self.r_priors.items():
r_post[i] = ((u_post[i[0]] + float(r)) / 2)
u_post = scale_value(u_post)
p_post = scale_value(p_post)
r_post = scale_value(r_post)
return (u_post, p_post, r_post) |
class TestBufferedShuffleIterator(TestBase):
def test_shuffle(self):
items = list(BufferedShuffleIterator(NativeCheckpointableIterator(self.flattened_test_data.copy()), 971, 42))
self.assertMultisetEqual(items, self.flattened_test_data)
def test_shuffle_buffer_size_one(self):
items = list(BufferedShuffleIterator(NativeCheckpointableIterator(self.flattened_test_data.copy()), 1, 42))
self.assertListEqual(items, self.flattened_test_data) |
def calibration(model, dataloader=None, n_samples=128, calib_func=None):
if (calib_func is not None):
calib_func(model)
else:
import math
from .smooth_quant import model_forward
batch_size = dataloader.batch_size
iters = int(math.ceil((n_samples / batch_size)))
if ((n_samples % batch_size) != 0):
logger.info('calibration samples increase from {} to {} due to batch_size is {}'.format(n_samples, (iters * batch_size), batch_size))
model_forward(model, dataloader, iters, next(model.parameters()).device) |
class DeResNetWeightNorm(_DeResNet):
def __init__(self, inplanes, planes, strides, output_paddings, activation):
super(DeResNetWeightNorm, self).__init__(DeResNetBlockWeightNorm, inplanes, planes, strides, output_paddings, activation) |
def main(params, dataset_name, transfer_learning=False):
identifier = time.strftime('%Y%m%d-%H%M%S')
run = '{}/sup/{}'.format(dataset_name, identifier)
if transfer_learning:
run += '-tl'
if (('train_all' in params) and params['train_all']):
run += '-test'
print("Starting run '{}'".format(run))
model = models.create_conv1_model(28, 1, num_kernels=400, n=1, batch_norm=True)
if transfer_learning:
weights_path = '../output/models/heb-mnist-fashion--015911_m_100_acc=0.855.pth'
model = utils.load_weights(model, os.path.join(PATH, weights_path), layer_names=['conv1'], freeze=True)
device = utils.get_device()
model.to(device)
print("Device set to '{}'.".format(device))
(train_loader, val_loader) = data.get_data(params, dataset_name, subset=params['train_subset'])
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=model.parameters(), lr=params['lr'])
train_evaluator = SupervisedEvaluator(model=model, criterion=criterion)
evaluator = SupervisedEvaluator(model=model, criterion=criterion)
trainer = SupervisedTrainer(model=model, optimizer=optimizer, criterion=criterion, device=device)
(es_handler, tb_logger) = attach_handlers(run, model, optimizer, trainer, train_evaluator, evaluator, train_loader, val_loader, params)
trainer.run(train_loader=train_loader, epochs=params['epochs'])
tb_logger.writer.add_hparams(params, {'hparam/accuracy': es_handler.best_score})
tb_logger.close() |
def tf_efficientnet_b7_ns(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet('tf_efficientnet_b7_ns', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs)
return model |
class AudioLanguagePretrainDataset(Dataset):
def __init__(self, json_files, audio_config, blacklist=None):
self.json_data = _load_json_file(json_files, blacklist)
self.lengths = [item['duration'] for item in self.json_data]
self.sr = audio_config['sr']
if (audio_config['max_length'] != 0):
self.max_length = (audio_config['max_length'] * self.sr)
else:
self.max_length = 0
def __len__(self):
return len(self.json_data)
def __getitem__(self, index):
item = self.json_data[index]
wav_path = item['audio']
(waveform, _) = librosa.load(wav_path, sr=self.sr, mono=True)
if (self.max_length != 0):
if (waveform.shape[(- 1)] > self.max_length):
max_start = (waveform.shape[(- 1)] - self.max_length)
start = random.randint(0, max_start)
waveform = waveform[start:(start + self.max_length)]
caption = text_preprocess(item['caption'])
audio_id = item['id']
return (torch.tensor(waveform), caption, audio_id) |
def get_dataloaders(args):
dataset_type = args.dataset.input_type
if (dataset_type in dataset_functions):
((train_dataset, train_sampler, train_collate_fn), (valid_dataset, valid_sampler, valid_collate_fn), (test_dataset, test_sampler, test_collate_fn)) = dataset_functions[dataset_type](args)
else:
raise NotImplementedError(f'get_dataloaders() of dataset type {dataset_type} not implemented')
train_loader = DataLoader(train_dataset, batch_size=args.dataloader.batch_size.train, shuffle=True, num_workers=args.dataloader.num_workers, collate_fn=train_collate_fn)
valid_loader = DataLoader(valid_dataset, batch_size=args.dataloader.batch_size.valid, shuffle=False, num_workers=args.dataloader.num_workers, collate_fn=test_collate_fn)
test_loader = DataLoader(test_dataset, batch_size=args.dataloader.batch_size.test, shuffle=False, num_workers=args.dataloader.num_workers, collate_fn=test_collate_fn)
return (train_loader, valid_loader, test_loader) |
def get_ImageNet_class_subset(class_idx, train=True, batch_size=None, shuffle=None, augm_type='test', num_workers=8, size=224, config_dict=None):
if (batch_size == None):
if train:
batch_size = DEFAULT_TRAIN_BATCHSIZE
else:
batch_size = DEFAULT_TEST_BATCHSIZE
augm_config = {}
transform = get_imageNet_augmentation(type=augm_type, out_size=size, config_dict=augm_config)
if ((not train) and (augm_type != 'none')):
print('Warning: ImageNet test set with ref_data augmentation')
if (shuffle is None):
shuffle = train
path = get_imagenet_path()
if (train == True):
dataset = ImageNetClassSubset(path, class_idx, split='train', transform=transform)
else:
dataset = ImageNetClassSubset(path, class_idx, split='val', transform=transform)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
if (config_dict is not None):
config_dict['Dataset'] = 'ImageNetClassSubset'
config_dict['Batch out_size'] = batch_size
config_dict['Augmentation'] = augm_config
return loader |
def get_model(model):
if isinstance(model, DataParallel):
return model.module
return model |
class Ranker():
def __init__(self, index_path, faiss_index_path, nprobe, part_range, dim, inference, device, faiss_depth=1024):
self.inference = inference
self.faiss_depth = faiss_depth
if (faiss_depth is not None):
self.faiss_index = FaissIndex(index_path, faiss_index_path, nprobe, part_range=part_range)
self.retrieve = partial(self.faiss_index.retrieve, self.faiss_depth)
self.index = IndexPart(index_path, device=device, dim=dim, part_range=part_range, verbose=True)
def encode(self, queries):
assert (type(queries) in [list, tuple]), type(queries)
Q = self.inference.queryFromText(queries, bsize=(512 if (len(queries) > 512) else None))
return Q
def rank(self, Q, pids=None):
pids = (self.retrieve(Q, verbose=False)[0] if (pids is None) else pids)
assert (type(pids) in [list, tuple]), type(pids)
assert (Q.size(0) == 1), (len(pids), Q.size())
assert all(((type(pid) is int) for pid in pids))
scores = []
if (len(pids) > 0):
Q = Q.permute(0, 2, 1)
scores = self.index.rank(Q, pids)
scores_sorter = torch.tensor(scores).sort(descending=True)
(pids, scores) = (torch.tensor(pids)[scores_sorter.indices].tolist(), scores_sorter.values.tolist())
return (pids, scores) |
def found_in_url(df):
pred_df = pd.DataFrame(index=df.index)
pred_df['A_in_URL'] = df.apply((lambda row: check_name_in_string(row['A'], scrape_url(row['URL']))), axis=1)
pred_df['B_in_URL'] = df.apply((lambda row: check_name_in_string(row['B'], scrape_url(row['URL']))), axis=1)
return pred_df |
def test_max_iou_assigner_with_empty_gt():
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds)) |
def test_constructors():
types = [bytes, str, bool, int, float, tuple, list, dict, set]
expected = {t.__name__: t() for t in types}
if env.PY2:
expected['bytes'] = bytes()
expected['str'] = unicode()
assert (m.default_constructors() == expected)
data = {bytes: b'41', str: 42, bool: 'Not empty', int: '42', float: '+1e3', tuple: range(3), list: range(3), dict: [('two', 2), ('one', 1), ('three', 3)], set: [4, 4, 5, 6, 6, 6], memoryview: b'abc'}
inputs = {k.__name__: v for (k, v) in data.items()}
expected = {k.__name__: k(v) for (k, v) in data.items()}
if env.PY2:
inputs['bytes'] = b'41'
inputs['str'] = 42
expected['bytes'] = b'41'
expected['str'] = u'42'
assert (m.converting_constructors(inputs) == expected)
assert (m.cast_functions(inputs) == expected)
noconv1 = m.converting_constructors(expected)
for k in noconv1:
assert (noconv1[k] is expected[k])
noconv2 = m.cast_functions(expected)
for k in noconv2:
assert (noconv2[k] is expected[k]) |
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True):
with tf.variable_scope(scope) as sc:
(dist, idx) = three_nn(xyz1, xyz2)
dist = tf.maximum(dist, 1e-10)
norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
norm = tf.tile(norm, [1, 1, 3])
weight = ((1.0 / dist) / norm)
interpolated_points = three_interpolate(points2, idx, weight)
if (points1 is not None):
new_points1 = tf.concat(axis=2, values=[interpolated_points, points1])
else:
new_points1 = interpolated_points
new_points1 = tf.expand_dims(new_points1, 2)
for (i, num_out_channel) in enumerate(mlp):
new_points1 = Ops.xxlu(Ops.conv2d(new_points1, k=(1, 1), out_c=num_out_channel, str=1, pad='VALID', name=('llll' + str(i))), label='lrelu')
new_points1 = tf.squeeze(new_points1, [2])
return new_points1 |
def separate_channels_mido(items, n_channels=9, use_note_on_pitch=True):
caches = []
for i in range((n_channels + 1)):
caches.append(dict())
midi_instruments = []
for i in range((n_channels + 1)):
midi_instruments.append(dict())
for (i, ins_items) in enumerate(items):
for item in ins_items:
program = item.program
channel = item.channel
if (item.name == 'key on'):
if (program not in midi_instruments[channel].keys()):
midi_instruments[channel][program] = MidiTrack([Message('program_change', channel=channel, program=program, time=item.start)])
caches[channel][program] = LastCache()
caches[channel][program].update(time=item.start)
key = freq2key(item.pitch, _round=False)
if (key < 0):
continue
new_pitch_bend = round(((key - round(key)) * PITCHBEND_STEPS))
msg = Message('note_on', channel=channel, note=round(key), velocity=item.velocity, time=(item.start - caches[channel][program].last_sample_time))
midi_instruments[channel][program].append(msg)
caches[channel][program].update(freq=item.pitch, vel=item.velocity, time=item.start, is_on=True, on_pitch=freq2key(item.pitch, _round=True))
msg = Message('pitchwheel', channel=channel, pitch=new_pitch_bend, time=(item.start - caches[channel][program].last_sample_time))
midi_instruments[channel][program].append(msg)
caches[channel][program].update(pitch_bend=new_pitch_bend)
elif (item.name == 'set freq'):
if (program not in midi_instruments[channel].keys()):
midi_instruments[channel][program] = MidiTrack([Message('program_change', channel=channel, program=program, time=item.time)])
caches[channel][program] = LastCache()
caches[channel][program].update(time=item.time)
if (not caches[channel][program].is_on):
continue
key1 = freq2key(caches[channel][program].last_freq, _round=False)
key2 = freq2key(item.value, _round=False)
if ((key1 == key2) or (key2 < 0)):
continue
if (key1 <= 0):
key1 = key2
diff = round(((key2 - key1) * PITCHBEND_STEPS))
if ((abs(diff) > 0) and (abs((caches[channel][program].pitch_bend + diff)) < PITCHBEND_MAX)):
new_pitch_bend = round((caches[channel][program].pitch_bend + diff))
msg = Message('pitchwheel', channel=channel, pitch=new_pitch_bend, time=(item.time - caches[channel][program].last_sample_time))
midi_instruments[channel][program].append(msg)
caches[channel][program].update(time=item.time, pitch_bend=new_pitch_bend)
else:
if (caches[channel][program].on_pitch < 0):
continue
msg = Message('note_off', channel=channel, note=caches[channel][program].on_pitch, time=(item.time - caches[channel][program].last_sample_time))
midi_instruments[channel][program].append(msg)
caches[channel][program].update(freq=item.value, time=item.time)
key = round(key1)
vel = caches[channel][program].last_vel
assert ((vel >= MIDI_MIN) and (vel < MIDI_MAX)), f'Invalid velocity value {vel}, it should be in [{MIDI_MIN}, {MIDI_MAX}).'
if ((key < MIDI_MIN) or (key >= MIDI_MAX)):
continue
msg = Message('note_on', channel=channel, note=round(key2), velocity=vel, time=(item.time - caches[channel][program].last_sample_time))
midi_instruments[channel][program].append(msg)
new_pitch_bend = round(((key2 - round(key2)) * PITCHBEND_STEPS))
caches[channel][program].update(freq=item.value, time=item.time, pitch_bend=0, on_pitch=round(key2))
msg = Message('pitchwheel', channel=channel, pitch=new_pitch_bend, time=(item.time - caches[channel][program].last_sample_time))
midi_instruments[channel][program].append(msg)
caches[channel][program].update(pitch_bend=new_pitch_bend)
elif (item.name == 'key off'):
if (program not in midi_instruments[channel].keys()):
midi_instruments[channel][program] = MidiTrack([Message('program_change', channel=channel, program=program, time=item.start)])
caches[channel][program] = LastCache()
caches[channel][program].update(time=item.start)
assert ((item.velocity >= MIDI_MIN) and (item.velocity < MIDI_MAX)), f'Invalid velocity value {item.velocity}, it should be in [{MIDI_MIN}, {MIDI_MAX}).'
key = caches[channel][program].on_pitch
if ((key < MIDI_MIN) or (key >= MIDI_MAX)):
continue
msg = Message('note_off', note=key, time=(item.start - caches[channel][program].last_sample_time), channel=channel)
midi_instruments[channel][program].append(msg)
caches[channel][program].update(freq=item.pitch, vel=item.velocity, time=item.start, pitch_bend=0, is_on=False, on_pitch=(- 1))
elif (item.name == 'drum note on'):
program = DRUM_INS
channel = DRUM_CHANNEL
if (program not in midi_instruments[channel].keys()):
midi_instruments[channel][program] = MidiTrack([Message('program_change', channel=channel, program=program, time=item.start)])
caches[channel][program] = LastCache()
caches[channel][program].update(freq=item.pitch, vel=item.velocity, time=item.start, pitch_bend=0)
midi_instruments[channel][program].append(Message('note_on', channel=channel, note=item.pitch, velocity=item.velocity, time=(item.start - caches[channel][program].last_sample_time)))
caches[channel][program].update(freq=item.pitch, vel=item.velocity, time=item.start, pitch_bend=0)
msg = Message('note_off', channel=channel, note=item.pitch, velocity=0, time=1)
midi_instruments[channel][program].append(msg)
caches[channel][program].update(freq=item.pitch, vel=item.velocity, time=item.start, pitch_bend=0)
return midi_instruments |
class ChildThread(threading.Thread):
def __init__(self, threadID, name, counter, cuda_device, bash_command):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
self.cuda_device = cuda_device
self.bash_command = bash_command
def run(self):
bash_command = self.bash_command
os.system(bash_command)
import time
import random
time.sleep((random.random() % 5))
logger.info(('Finishing ' + self.name)) |
def download_image(args_tuple):
try:
(url, filename) = args_tuple
if (not os.path.exists(filename)):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert (hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1)
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception()
except:
return False |
('(float32[:], int32)', device=True, inline=True)
def area(int_pts, num_of_inter):
area_val = 0.0
for i in range((num_of_inter - 2)):
area_val += abs(trangle_area(int_pts[:2], int_pts[((2 * i) + 2):((2 * i) + 4)], int_pts[((2 * i) + 4):((2 * i) + 6)]))
return area_val |
def prepare_params(kwargs):
kwargs = prepare_mode(kwargs)
default_max_episode_steps = 50
wgcsl_params = dict()
env_name = kwargs['env_name']
def make_env(subrank=None):
try:
env = gym.make(env_name, rewrad_type='sparse')
except:
logger.log('Can not make sparse reward environment')
env = gym.make(env_name)
if env_name.startswith('Fetch'):
env._max_episode_steps = 50
env = FetchGoalWrapper(env)
elif env_name.startswith('HandManipulate'):
env._max_episode_steps = 100
elif env_name.startswith('Point'):
env = PointGoalWrapper(env)
env.env._max_episode_steps = 50
elif env_name.startswith('Sawyer'):
env = SawyerGoalWrapper(env)
elif env_name.startswith('Reacher'):
env = ReacherGoalWrapper(env)
if ((subrank is not None) and (logger.get_dir() is not None)):
try:
from mpi4py import MPI
mpi_rank = MPI.COMM_WORLD.Get_rank()
except ImportError:
MPI = None
mpi_rank = 0
logger.warn('Running with a single MPI process. This should work, but the results may differ from the ones publshed in Plappert et al.')
if hasattr(env, '_max_episode_steps'):
max_episode_steps = env._max_episode_steps
else:
max_episode_steps = default_max_episode_steps
env = Monitor(env, os.path.join(logger.get_dir(), ((str(mpi_rank) + '.') + str(subrank))), allow_early_resets=True)
env = gym.wrappers.TimeLimit(env, max_episode_steps=max_episode_steps)
return env
kwargs['make_env'] = make_env
tmp_env = cached_make_env(kwargs['make_env'])
if hasattr(tmp_env, '_max_episode_steps'):
kwargs['T'] = tmp_env._max_episode_steps
else:
kwargs['T'] = default_max_episode_steps
kwargs['max_u'] = (np.array(kwargs['max_u']) if isinstance(kwargs['max_u'], list) else kwargs['max_u'])
kwargs['gamma'] = (1.0 - (1.0 / kwargs['T']))
if ('lr' in kwargs):
kwargs['pi_lr'] = kwargs['lr']
kwargs['Q_lr'] = kwargs['lr']
del kwargs['lr']
for name in ['buffer_size', 'hidden', 'layers', 'network_class', 'polyak', 'batch_size', 'Q_lr', 'pi_lr', 'norm_eps', 'norm_clip', 'max_u', 'action_l2', 'clip_obs', 'scope', 'relative_goals', 'use_supervised']:
wgcsl_params[name] = kwargs[name]
kwargs[('_' + name)] = kwargs[name]
del kwargs[name]
kwargs['wgcsl_params'] = wgcsl_params
return kwargs |
class SuperMobileSPADEResnetBlock(nn.Module):
def __init__(self, fin, fout, opt):
super(SuperMobileSPADEResnetBlock, self).__init__()
self.learned_shortcut = (fin != fout)
fmiddle = min(fin, fout)
self.conv_0 = SuperConv2d(fin, fmiddle, kernel_size=3, padding=1)
self.conv_1 = SuperConv2d(fmiddle, fout, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = SuperConv2d(fin, fout, kernel_size=1, bias=False)
spade_config_str = opt.norm_G
self.norm_0 = SuperMobileSPADE(spade_config_str, fin, opt.semantic_nc, nhidden=(opt.ngf * 2))
self.norm_1 = SuperMobileSPADE(spade_config_str, fmiddle, opt.semantic_nc, nhidden=(opt.ngf * 2))
if self.learned_shortcut:
self.norm_s = SuperMobileSPADE(spade_config_str, fin, opt.semantic_nc, nhidden=(opt.ngf * 2))
def forward(self, x, seg, config, verbose=False):
x_s = self.shortcut(x, seg, config)
dx = self.conv_0(self.actvn(self.norm_0(x, seg, config, verbose=verbose)), config)
if self.learned_shortcut:
dx = self.conv_1(self.actvn(self.norm_1(dx, seg, config)), config)
else:
dx = self.conv_1(self.actvn(self.norm_1(dx, seg, config)), {'channel': x.shape[1]})
out = (x_s + dx)
return out
def shortcut(self, x, seg, config):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg, config), config)
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 0.2) |
_model
def hrnet_w48(pretrained=True, **kwargs):
return _create_model('hrnet_w48', pretrained, kwargs) |
class GCNModel(nn.Module):
def __init__(self, config):
super(GCNModel, self).__init__()
self.config = config
self.use_cuda = self.config.use_cuda
self.in_dim = self.config.gcn['in_dim']
self.out_dim = self.config.gcn['out_dim']
self.node_emb_layer = NodeEmbedFactory().get_node_embed_technique(self.config)(self.config)
self.gcn_layers = nn.ModuleList([GCNLayer(config) for _ in range(self.config.gcn['layers'])])
self.fforward = nn.Linear(self.out_dim, self.config.class_num)
def forward(self, batch_dict, running_mode, loss_fn):
g = batch_dict['graphs']
class_target = batch_dict['tgt']
h = to_cuda(g.ndata['node_feat'], self.use_cuda)
node_len = g.ndata['node_len'].cpu().tolist()
h = self.node_emb_layer(h, node_len)
for gcn in self.gcn_layers:
h = gcn(g, h)
g.ndata['h'] = h
mean_feats = dgl.mean_nodes(g, 'h')
dense_output = F.leaky_relu(self.fforward(mean_feats))
loss = 0
if (running_mode in ['train', 'val']):
tgt = to_cuda(torch.tensor(class_target, dtype=torch.long), use_cuda=self.use_cuda)
loss = loss_fn(dense_output, tgt)
sm_mask_output = F.softmax(dense_output, dim=(- 1))
return (sm_mask_output, class_target, loss) |
class HornerMultivarPolynomialOpt(HornerMultivarPolynomial):
root_class = OptimalFactorisationRoot |
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if (not os.path.exists(directory)):
os.makedirs(directory, exist_ok=True) |
_ops.RegisterGradient('Open3DSparseConvTranspose')
def _sparse_conv_transpose_grad(op, grad):
filters = op.inputs[0]
out_importance = op.inputs[1]
inp_features = op.inputs[2]
inp_neighbors_importance_sum = op.inputs[4]
inp_neighbors_row_splits = op.inputs[5]
neighbors_index = op.inputs[6]
neighbors_kernel_index = op.inputs[7]
neighbors_importance = op.inputs[8]
neighbors_row_splits = op.inputs[9]
filter_grad = _lib.open3d_sparse_conv_transpose_backprop_filter(normalize=op.get_attr('normalize'), max_temp_mem_MB=op.get_attr('max_temp_mem_MB'), filters=filters, out_importance=out_importance, inp_features=inp_features, inp_neighbors_importance_sum=inp_neighbors_importance_sum, inp_neighbors_row_splits=inp_neighbors_row_splits, neighbors_index=neighbors_index, neighbors_kernel_index=neighbors_kernel_index, neighbors_importance=neighbors_importance, neighbors_row_splits=neighbors_row_splits, out_features_gradient=grad)
num_points = _tf.shape(inp_features, out_type=_tf.int64)[0]
arange = _tf.range(0, _tf.shape(neighbors_index)[0])
(inv_neighbors_index, _, inv_arange) = _lib.open3d_invert_neighbors_list(num_points, neighbors_index, neighbors_row_splits, arange)
inv_neighbors_kernel_index = _tf.gather(neighbors_kernel_index, inv_arange)
if (_tf.shape(neighbors_importance)[0] > 0):
inv_neighbors_importance = _tf.gather(neighbors_importance, inv_arange)
else:
inv_neighbors_importance = _tf.ones((0,), dtype=_tf.float32)
inp_features_grad = _lib.open3d_sparse_conv(normalize=op.get_attr('normalize'), max_temp_mem_MB=op.get_attr('max_temp_mem_MB'), filters=_tf.transpose(filters, [0, 2, 1]), inp_features=grad, inp_importance=out_importance, neighbors_index=inv_neighbors_index, neighbors_kernel_index=inv_neighbors_kernel_index, neighbors_importance=inv_neighbors_importance, neighbors_row_splits=inp_neighbors_row_splits)
return ((([filter_grad] + [None]) + [inp_features_grad]) + ([None] * 7)) |
def load_model_tokenizer(model_name, device='cpu'):
huggingface_model = model_dict[model_name].from_pretrained(model_name).to(device)
tokenizer = tokenizer_dict[model_name].from_pretrained(model_name)
if (model_name in ['facebook/bart-base']):
huggingface_model.config.no_repeat_ngram_size = 0
huggingface_model.config.forced_bos_token_id = None
huggingface_model.config.min_length = 0
return (huggingface_model, tokenizer) |
def setup(args):
if args.config_file.endswith('.yaml'):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
else:
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
setup_logger(name='fvcore')
setup_logger()
return cfg |
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
def forward(self, x):
size = x.shape[(- 2):]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False) |
class ParserImageFolder(Parser):
def __init__(self, root, class_map='', min_count=0):
super().__init__()
self.root = root
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
(self.samples, self.class_to_idx) = find_images_and_targets(root, class_to_idx=class_to_idx, min_count=min_count)
if (len(self.samples) == 0):
raise RuntimeError(f"Found 0 images in subfolders of {root}. Supported image extensions are {', '.join(IMG_EXTENSIONS)}")
def __getitem__(self, index):
(path, target) = self.samples[index]
return (open(path, 'rb'), target)
def __len__(self):
return len(self.samples)
def _filename(self, index, basename=False, absolute=False):
filename = self.samples[index][0]
if basename:
filename = os.path.basename(filename)
elif (not absolute):
filename = os.path.relpath(filename, self.root)
return filename |
def train(rank, world_size, cfg):
torch.manual_seed(cfg.get('seed', 1337))
torch.cuda.manual_seed(cfg.get('seed', 1337))
np.random.seed(cfg.get('seed', 1337))
random.seed(cfg.get('seed', 1337))
master_port = int(os.environ.get('MASTER_PORT', 8738))
master_addr = os.environ.get('MASTER_ADDR', '127.0.0.1')
tcp_store = torch.distributed.TCPStore(master_addr, master_port, world_size, (rank == 0))
torch.distributed.init_process_group('nccl', store=tcp_store, rank=rank, world_size=world_size)
if torch.cuda.is_available():
device = torch.device('cuda', rank)
torch.cuda.set_device(device)
else:
assert (world_size == 1)
device = torch.device('cpu')
if (rank == 0):
writer = SummaryWriter(logdir=cfg['logdir'])
logger = get_logger(cfg['logdir'])
print('**log_dir:', cfg['logdir'])
logger.info('Let Trans4Map training begin !!')
dateset_dataset = 's2d3d'
if (dateset_dataset == 'matterport'):
t_loader = matterport_SemDataset33(cfg['data'], split=cfg['data']['train_split'])
v_loader = matterport_SemDataset33(cfg['data'], split=cfg['data']['val_split'])
elif (dateset_dataset == 's2d3d'):
t_loader = S2d3dSemDataset(cfg['data'], Split=cfg['data']['train_split'])
v_loader = S2d3dSemDataset(cfg['data'], Split=cfg['data']['val_split'])
t_sampler = DistributedSampler(t_loader)
v_sampler = DistributedSampler(v_loader, shuffle=False)
if (rank == 0):
print('#Envs in train:')
print('#Envs in val: ')
trainloader = data.DataLoader(t_loader, batch_size=(cfg['training']['batch_size'] // world_size), num_workers=cfg['training']['n_workers'], drop_last=True, pin_memory=True, sampler=t_sampler, multiprocessing_context='fork')
valloader = data.DataLoader(v_loader, batch_size=(cfg['training']['batch_size'] // world_size), num_workers=cfg['training']['n_workers'], pin_memory=True, sampler=v_sampler, multiprocessing_context='fork')
model = Attention360_pano_s2d3d(cfg['model'], device)
model = model.to(device)
if (device.type == 'cuda'):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank], find_unused_parameters=True)
model_parameters = filter((lambda p: p.requires_grad), model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
if (rank == 0):
print('# trainable parameters = ', params)
optimizer_params = {k: v for (k, v) in cfg['training']['optimizer'].items() if (k != 'name')}
optimizer = torch.optim.AdamW(filter((lambda p: p.requires_grad), model.parameters()), **optimizer_params)
if (rank == 0):
logger.info('Using optimizer {}'.format(optimizer))
lr_decay_lambda = (lambda epoch: (cfg['training']['scheduler']['lr_decay_rate'] ** (epoch // cfg['training']['scheduler']['lr_epoch_per_decay'])))
scheduler = LambdaLR(optimizer, lr_lambda=lr_decay_lambda)
obj_running_metrics = IoU(cfg['model']['n_obj_classes'])
obj_running_metrics_val = IoU(cfg['model']['n_obj_classes'])
obj_running_metrics.reset()
obj_running_metrics_val.reset()
val_loss_meter = averageMeter()
time_meter = averageMeter()
loss_fn = SemmapLoss()
loss_fn = loss_fn.to(device=device)
if (rank == 0):
logger.info('Using loss {}'.format(loss_fn))
start_iter = 0
start_epoch = 0
best_iou = (- 100.0)
if (cfg['training']['resume'] is not None):
if os.path.isfile(cfg['training']['resume']):
if (rank == 0):
logger.info("Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume']))
print("Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume']))
checkpoint = torch.load(cfg['training']['resume'], map_location='cpu')
model_state = checkpoint['model_state']
model.load_state_dict(model_state)
optimizer.load_state_dict(checkpoint['optimizer_state'])
scheduler.load_state_dict(checkpoint['scheduler_state'])
start_epoch = checkpoint['epoch']
start_iter = checkpoint['iter']
best_iou = checkpoint['best_iou']
if (rank == 0):
logger.info("Loaded checkpoint '{}' (iter {})".format(cfg['training']['resume'], checkpoint['epoch']))
elif (rank == 0):
logger.info("No checkpoint found at '{}'".format(cfg['training']['resume']))
print("No checkpoint found at '{}'".format(cfg['training']['resume']))
elif (cfg['training']['load_model'] is not None):
checkpoint = torch.load(cfg['training']['load_model'], map_location='cpu')
model_state = checkpoint['model_state']
model.load_state_dict(model_state)
if (rank == 0):
logger.info("Loading model and optimizer from checkpoint '{}'".format(cfg['training']['load_model']))
print("Loading model and optimizer from checkpoint '{}'".format(cfg['training']['load_model']))
iter = start_iter
for epoch in range(start_epoch, cfg['training']['train_epoch'], 1):
t_sampler.set_epoch(epoch)
for batch in trainloader:
iter += 1
start_ts = time.time()
(rgb, semmap_gt, fname) = batch
observed_masks = (semmap_gt >= 0)
semmap_gt[(~ observed_masks)] = 0
model.train()
optimizer.zero_grad()
(semmap_pred, observed_mask) = model(rgb, observed_masks)
semmap_gt = semmap_gt.long()
if observed_masks.any():
loss = loss_fn(semmap_gt.to(device), semmap_pred, observed_mask)
loss.backward()
optimizer.step()
semmap_pred = semmap_pred.permute(0, 2, 3, 1)
masked_semmap_gt = semmap_gt[observed_masks]
masked_semmap_pred = semmap_pred[observed_masks]
obj_gt = masked_semmap_gt.detach()
obj_pred = masked_semmap_pred.data.max((- 1))[1].detach()
obj_running_metrics.add(obj_pred, obj_gt)
time_meter.update((time.time() - start_ts))
if ((iter % cfg['training']['print_interval']) == 0):
conf_metric = obj_running_metrics.conf_metric.conf
conf_metric = torch.FloatTensor(conf_metric)
conf_metric = conf_metric.to(device)
distrib.all_reduce(conf_metric)
distrib.all_reduce(loss)
loss /= world_size
if (rank == 0):
conf_metric = conf_metric.cpu().numpy()
conf_metric = conf_metric.astype(np.int32)
tmp_metrics = IoU(cfg['model']['n_obj_classes'])
tmp_metrics.reset()
tmp_metrics.conf_metric.conf = conf_metric
(_, mIoU, acc, _, mRecall, _, mPrecision) = tmp_metrics.value()
writer.add_scalar('train_metrics/mIoU', mIoU, iter)
writer.add_scalar('train_metrics/mRecall', mRecall, iter)
writer.add_scalar('train_metrics/mPrecision', mPrecision, iter)
writer.add_scalar('train_metrics/Overall_Acc', acc, iter)
fmt_str = 'Iter: {:d} == Epoch [{:d}/{:d}] == Loss: {:.4f} == mIoU: {:.4f} == mRecall:{:.4f} == mPrecision:{:.4f} == Overall_Acc:{:.4f} == Time/Image: {:.4f}'
print_str = fmt_str.format(iter, epoch, cfg['training']['train_epoch'], loss.item(), mIoU, mRecall, mPrecision, acc, (time_meter.avg / cfg['training']['batch_size']))
print(print_str)
writer.add_scalar('loss/train_loss', loss.item(), iter)
time_meter.reset()
model.eval()
with torch.no_grad():
for batch_val in valloader:
(rgb, semmap_gt, fname) = batch_val
observed_masks = (semmap_gt >= 0)
semmap_gt[(~ observed_masks)] = 0
semmap_gt = semmap_gt.long()
(semmap_pred, observed_mask) = model(rgb, observed_masks)
if observed_masks.any():
loss_val = loss_fn(semmap_gt.to(device), semmap_pred, observed_mask)
semmap_pred = semmap_pred.permute(0, 2, 3, 1)
masked_semmap_gt = semmap_gt[observed_masks]
masked_semmap_pred = semmap_pred[observed_masks]
obj_gt_val = masked_semmap_gt
obj_pred_val = masked_semmap_pred.data.max((- 1))[1]
obj_running_metrics_val.add(obj_pred_val, obj_gt_val)
val_loss_meter.update(loss_val.item())
conf_metric = obj_running_metrics_val.conf_metric.conf
conf_metric = torch.FloatTensor(conf_metric)
conf_metric = conf_metric.to(device)
distrib.all_reduce(conf_metric)
val_loss_avg = val_loss_meter.avg
val_loss_avg = torch.FloatTensor([val_loss_avg])
val_loss_avg = val_loss_avg.to(device)
distrib.all_reduce(val_loss_avg)
val_loss_avg /= world_size
if (rank == 0):
val_loss_avg = val_loss_avg.cpu().numpy()
val_loss_avg = val_loss_avg[0]
writer.add_scalar('loss/val_loss', val_loss_avg, iter)
logger.info(('Iter %d Loss: %.4f' % (iter, val_loss_avg)))
conf_metric = conf_metric.cpu().numpy()
conf_metric = conf_metric.astype(np.int32)
tmp_metrics = IoU(cfg['model']['n_obj_classes'])
tmp_metrics.reset()
tmp_metrics.conf_metric.conf = conf_metric
(_, mIoU, acc, _, mRecall, _, mPrecision) = tmp_metrics.value()
writer.add_scalar('val_metrics/mIoU', mIoU, iter)
writer.add_scalar('val_metrics/mRecall', mRecall, iter)
writer.add_scalar('val_metrics/mPrecision', mPrecision, iter)
writer.add_scalar('val_metrics/Overall_Acc', acc, iter)
logger.info('val -- mIoU: {}'.format(mIoU))
logger.info('val -- mRecall: {}'.format(mRecall))
logger.info('val -- mPrecision: {}'.format(mPrecision))
logger.info('val -- Overall_Acc: {}'.format(acc))
print('val -- mIoU: {}'.format(mIoU))
print('val -- mRecall: {}'.format(mRecall))
print('val -- mPrecision: {}'.format(mPrecision))
print('val -- Overall_Acc: {}'.format(acc))
if (mIoU >= best_iou):
best_iou = mIoU
state = {'epoch': epoch, 'iter': iter, 'model_state': model.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_iou': best_iou}
save_path = os.path.join(writer.file_writer.get_logdir(), '{}_mp3d_best_model.pkl'.format(cfg['model']['arch']))
torch.save(state, save_path)
state = {'epoch': epoch, 'iter': iter, 'model_state': model.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'best_iou': best_iou}
save_path = os.path.join(cfg['checkpoint_dir'], 'ckpt_model.pkl')
torch.save(state, save_path)
val_loss_meter.reset()
obj_running_metrics_val.reset()
obj_running_metrics.reset()
scheduler.step(epoch) |
def check_config_docstrings_have_checkpoints():
configs_without_checkpoint = []
for config_class in list(CONFIG_MAPPING.values()):
checkpoint = get_checkpoint_from_config_class(config_class)
name = config_class.__name__
if ((checkpoint is None) and (name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK)):
configs_without_checkpoint.append(name)
if (len(configs_without_checkpoint) > 0):
message = '\n'.join(sorted(configs_without_checkpoint))
raise ValueError(f'''The following configurations don't contain any valid checkpoint:
{message}''') |
def test_digits_cosine_greedi_nn():
model = SumRedundancySelection(100, 'cosine', optimizer='greedi', optimizer_kwds={'optimizer1': 'naive', 'optimizer2': 'naive'}, random_state=0)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_greedi_ranking)
assert_array_almost_equal(model.gains, digits_cosine_greedi_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def rasterize(glctx, pos, tri, resolution, ranges=None, grad_db=True):
assert isinstance(glctx, RasterizeGLContext)
assert ((grad_db is True) or (grad_db is False))
grad_db = (grad_db and glctx.output_db)
assert (isinstance(pos, torch.Tensor) and isinstance(tri, torch.Tensor))
resolution = tuple(resolution)
if (ranges is None):
ranges = torch.empty(size=(0, 2), dtype=torch.int32, device='cpu')
else:
assert isinstance(ranges, torch.Tensor)
if (glctx.active_depth_peeler is not None):
return RuntimeError('Cannot call rasterize() during depth peeling operation, use rasterize_next_layer() instead')
return _rasterize_func.apply(glctx, pos, tri, resolution, ranges, grad_db, (- 1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.