code stringlengths 101 5.91M |
|---|
def area(boxlist, scope=None):
with tf.name_scope(scope, 'Area'):
(y_min, x_min, y_max, x_max) = tf.split(value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze(((y_max - y_min) * (x_max - x_min)), [1]) |
def _para_get_metric(metric: RougeStrEvaluation, key, note):
current_metrics = metric.get_metric(reset=True, note=note)
current_best_cp_A = [x for x in current_metrics.keys() if x.endswith('_A')]
assert (len(current_best_cp_A) == 1)
current_best_cp_A = current_best_cp_A[0]
cp_A_val = current_metrics[current_best_cp_A]
return (current_metrics, cp_A_val, metric, key) |
class MixtureNLLLoss(nn.Module):
def __init__(self, component_distribution: Union[(str, List[str])], eps: float=1e-06, reduction: str='mean') -> None:
super(MixtureNLLLoss, self).__init__()
self.reduction = reduction
loss_dict = {'gaussian': GaussianNLLLoss, 'laplace': LaplaceNLLLoss, 'von_mises': VonMisesNLLLoss}
if isinstance(component_distribution, str):
self.nll_loss = loss_dict[component_distribution](eps=eps, reduction='none')
else:
self.nll_loss = nn.ModuleList([loss_dict[dist](eps=eps, reduction='none') for dist in component_distribution])
def forward(self, pred: torch.Tensor, target: torch.Tensor, prob: torch.Tensor, mask: torch.Tensor, ptr: Optional[torch.Tensor]=None, joint: bool=False) -> torch.Tensor:
if isinstance(self.nll_loss, nn.ModuleList):
nll = torch.cat([self.nll_loss[i](pred=pred[(..., [i, (target.size((- 1)) + i)])], target=target[(..., [i])].unsqueeze(1)) for i in range(target.size((- 1)))], dim=(- 1))
else:
nll = self.nll_loss(pred=pred, target=target.unsqueeze(1))
nll = (nll * mask.view((- 1), 1, target.size((- 2)), 1)).sum(dim=((- 2), (- 1)))
if joint:
if (ptr is None):
nll = nll.sum(dim=0, keepdim=True)
else:
nll = segment_csr(src=nll, indptr=ptr, reduce='sum')
else:
pass
log_pi = F.log_softmax(prob, dim=(- 1))
loss = (- torch.logsumexp((log_pi - nll), dim=(- 1)))
if (self.reduction == 'mean'):
return loss.mean()
elif (self.reduction == 'sum'):
return loss.sum()
elif (self.reduction == 'none'):
return loss
else:
raise ValueError('{} is not a valid value for reduction'.format(self.reduction)) |
def mapping(path, dest):
(node_forward, node_backward) = ({}, {})
(edge_forward, edge_backward) = ({}, {})
(node_count, edge_count) = (0, 0)
(max_nodes, max_edges, max_degree) = (0, 0, 0)
(min_nodes, min_edges) = (float('inf'), float('inf'))
for filename in tqdm(os.listdir(path)):
if filename.endswith('.dat'):
f = open((path + filename), 'rb')
G = pickle.load(f)
f.close()
max_nodes = max(max_nodes, len(G.nodes()))
min_nodes = min(min_nodes, len(G.nodes()))
for (_, data) in G.nodes.data():
if (data['label'] not in node_forward):
node_forward[data['label']] = node_count
node_backward[node_count] = data['label']
node_count += 1
max_edges = max(max_edges, len(G.edges()))
min_edges = min(min_edges, len(G.edges()))
for (_, _, data) in G.edges.data():
if (data['label'] not in edge_forward):
edge_forward[data['label']] = edge_count
edge_backward[edge_count] = data['label']
edge_count += 1
max_degree = max(max_degree, max([d for (n, d) in G.degree()]))
feature_map = {'node_forward': node_forward, 'node_backward': node_backward, 'edge_forward': edge_forward, 'edge_backward': edge_backward, 'max_nodes': max_nodes, 'min_nodes': min_nodes, 'max_edges': max_edges, 'min_edges': min_edges, 'max_degree': max_degree}
f = open(dest, 'wb')
pickle.dump(feature_map, f)
f.close()
print('Successfully done node count', node_count)
print('Successfully done edge count', edge_count)
return feature_map |
def dynamic_import_scheduler(module):
model_class = dynamic_import(module, SCHEDULER_DICT)
assert issubclass(model_class, SchedulerInterface), f'{module} does not implement SchedulerInterface'
return model_class |
class Bottleneck(nn.Module):
def __init__(self, inplanes, expansion=4, growthRate=12, dropRate=0):
super(Bottleneck, self).__init__()
planes = (expansion * growthRate)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, growthRate, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(growthRate)
self.relu = nn.ReLU(inplace=True)
self.dropRate = dropRate
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if (self.dropRate > 0):
out = F.dropout(out, p=self.dropRate, training=self.training)
out = torch.cat((x, out), 1)
return out |
def plot_mse(args):
all_results = {'method': [], 'mse': [], 'num_nonzero': [], 'seed': []}
for seed in args.seeds:
for method in args.methods:
if (method not in ['spinn', 'ridge_nn']):
res_file = os.path.join(args.result_folder, (args.file_template % (seed, method)))
else:
res_file = os.path.join(args.result_folder, (args.nn_file_template % (seed, method)))
try:
results = read_method_result(res_file, method)
for (method_str, mse, num_nonzero) in results:
all_results['method'].append(method_str)
all_results['mse'].append(float(mse))
all_results['num_nonzero'].append(num_nonzero)
all_results['seed'].append(seed)
except:
print('nope', res_file)
results_df = pd.DataFrame(all_results)
pivot_df = results_df.pivot(index='seed', columns='method', values='mse')
print(pivot_df)
pivot_agg = pivot_df.agg(['mean', 'var', get_SE])
print(pivot_agg)
pivot_df = results_df.pivot(index='seed', columns='method', values='num_nonzero')
pivot_agg = pivot_df.agg(['mean', 'var', get_SE])
print(pivot_agg) |
class Conv3x3GNReLU(nn.Module):
def __init__(self, in_channels, out_channels, upsample=False):
super().__init__()
self.upsample = upsample
self.block = nn.Sequential(nn.Conv2d(in_channels, out_channels, (3, 3), stride=1, padding=1, bias=False), nn.GroupNorm(32, out_channels), nn.ReLU(inplace=True))
def forward(self, x):
x = self.block(x)
if self.upsample:
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
return x |
def _get_transform_summary(transform):
if isinstance(transform, AffineTransform):
return f'{type(transform).__name__}({transform.loc}, {transform.scale})'
raise NotImplementedError |
def handle_evaluate(args):
tester = Tester(args)
print('Experiment {} instantiated. Evaluation starting...'.format(args.checkname))
tester.test() |
def get_cifar10_loaders(data_route, batch_size, num_workers):
tfm_train = T.Compose([T.RandomCrop(32, padding=4), T.RandomHorizontalFlip(), T.ToTensor(), cifar_nm])
tfm_test = T.Compose([T.ToTensor(), cifar_nm])
train_set = dts.CIFAR10(data_route, train=True, download=True, transform=tfm_train)
test_set = dts.CIFAR10(data_route, train=False, download=False, transform=tfm_test)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=num_workers)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, drop_last=False, num_workers=num_workers)
return (train_loader, test_loader) |
class ExpCfg():
dataset: DATASET = MISSING
savedir: str = run_data_root
data_root: str = (ROOT / 'data')
batch_size: int = 32
val_batch_size: int = 32
data_loader_workers: int = 4
prefetch_factor: int = 4
disable_logs: bool = False
module: GeneralModule = MISSING
max_epochs: int = 120
gpus: int = 0
seed: int = 0 |
def read_uiuc_coref(filename, gold_text):
mentions = {}
clusters = defaultdict((lambda : []))
unmatched_mentions = []
text = [[]]
sentence = 0
word = 0
prev = ['', '']
last_sentence = []
for line in open(filename):
for token in line.split():
if (re.match('^[*]+$', token) is None):
for char in token:
if (char == '*'):
unmatched_mentions.append((word, sentence))
else:
break
regex = '[*][_][0-9]+'
for end in re.findall(regex, token):
cluster = int(end[2:])
end = (word + 1)
(start, msentence) = unmatched_mentions.pop()
if (msentence != sentence):
end = len(gold_text[msentence])
if ((msentence, start, end) in mentions):
print('Duplicate mention:', cluster, mentions[(msentence, start, end)])
else:
mentions[(msentence, start, end)] = cluster
clusters[cluster].append((msentence, start, end))
while (token[0] == '*'):
token = token[1:]
regex = '[*][_][0-9]+'
token = re.split(regex, token)[0]
if (token == gold_text[sentence][word]):
prev = ['', '']
word += 1
text[(- 1)].append(token)
elif (len(prev[0]) == 0):
prev[0] = gold_text[sentence][word]
prev[1] = token
text[(- 1)].append(token)
elif ((prev[1] + token) == prev[0]):
if (len(text[(- 1)]) == 0):
text[(- 2)][(- 1)] = prev[0]
else:
text[(- 1)][(- 1)] = prev[0]
word += 1
prev = ['', '']
else:
prev[1] += token
if (word == len(gold_text[sentence])):
word = 0
sentence += 1
text.append([])
if (len(text[(- 1)]) == 0):
text.pop()
return {'clusters': clusters, 'mentions': mentions, 'text': text} |
def sepreresnet272bn_cifar10(num_classes=10, **kwargs):
return get_sepreresnet_cifar(num_classes=num_classes, blocks=272, bottleneck=True, model_name='sepreresnet272bn_cifar10', **kwargs) |
class BaseParser(abc.ABC):
selector: Optional[str]
follower: Optional[str]
content: bytes
def _raw_urls(self) -> List[Union[(Dict[(str, str)], str)]]:
return []
def entries(self) -> List:
def urls(self) -> List[str]:
urls = [(d if isinstance(d, str) else d['url']) for d in self._raw_urls]
return list(dict.fromkeys(urls)) |
class Facebook(BaseData):
def __init__(self, data_root: Optional[str]=None) -> None:
super().__init__('facebook', data_root)
self._content = {'num_classes': 4, 'num_vertices': 22470, 'num_edges': 85501, 'dim_features': 8189, 'features': {'upon': [{'filename': 'features.pkl', 'md5': '046eec1b67fb5bf504eaad75e98af141'}], 'loader': load_from_pickle, 'preprocess': [to_tensor]}, 'edge_list': {'upon': [{'filename': 'edge_list.pkl', 'md5': '98c6551d020c7741554cae5eab8336ef'}], 'loader': load_from_pickle}, 'labels': {'upon': [{'filename': 'labels.pkl', 'md5': 'ae0c116274cedc00522df66bd921affc'}], 'loader': load_from_pickle, 'preprocess': [to_long_tensor]}} |
class TFRSModel(tf.keras.Model):
def __init__(self, tfrs_model: tfrs.Model) -> None:
super().__init__()
log4Error.invalidInputError(isinstance(tfrs_model, tfrs.Model), ('FriesianTFRSModel only support tfrs.Model, but got ' + tfrs_model.__class__.__name__))
log4Error.invalidInputError((not tfrs_model._is_compiled), 'TFRSModel should be initialized before compiling.')
attr = tfrs_model.__dict__
task_dict = dict()
for (k, v) in attr.items():
if isinstance(v, base.Task):
task_dict[k] = v
for (k, v) in task_dict.items():
try:
v._loss.reduction = tf.keras.losses.Reduction.NONE
except:
warnings.warn((('Model task ' + k) + ' has no attribute _loss, please use `tf.keras.losses.Reduction.SUM` or `tf.keras.losses.Reduction.NONE` for loss reduction in this task if the Estimator throw an error.'))
self.model = tfrs_model
def call(self, features):
return self.model.call(features)
def train_step(self, inputs) -> Dict[(str, tf.Tensor)]:
with tf.GradientTape() as tape:
loss = self.model.compute_loss(inputs, training=True)
loss_rank = loss.shape.rank
if ((loss_rank is not None) and (loss_rank != 0)):
loss = tf.nn.compute_average_loss(loss)
regularization_loss = tf.cast(tf.nn.scale_regularization_loss(sum(self.model.losses)), tf.float32)
total_loss = (loss + regularization_loss)
gradients = tape.gradient(total_loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
metrics = {metric.name: metric.result() for metric in self.metrics}
metrics['loss'] = loss
metrics['regularization_loss'] = regularization_loss
metrics['total_loss'] = total_loss
return metrics
def test_step(self, inputs) -> Dict[(str, tf.Tensor)]:
loss = self.model.compute_loss(inputs, training=False)
regularization_loss = sum(self.model.losses)
total_loss = (loss + regularization_loss)
metrics = {metric.name: metric.result() for metric in self.metrics}
metrics['loss'] = loss
metrics['regularization_loss'] = regularization_loss
metrics['total_loss'] = total_loss
return metrics |
class Target():
def __init__(self, imagePath, saliencyPath, fixationPath, imageState=LoadState.unloaded, imageType=InputType.image, saliencyState=LoadState.unloaded, saliencyType=InputType.saliencyMapMatlab, fixationState=LoadState.unloaded, fixationType=InputType.fixationMapMatlab):
self.image = ImageContainer(imagePath, imageType, imageState)
self.saliency = ImageContainer(saliencyPath, saliencyType, saliencyState)
self.fixation = ImageContainer(fixationPath, fixationType, fixationState) |
def build_reading_dict(lexicon):
reading_dict = defaultdict(list)
for (i, word) in enumerate(lexicon):
tokens = word[0].split('/')
if (len(tokens) < 3):
continue
display = tokens[0]
reading = tokens[1]
if (reading == ''):
reading = display
reading_dict[reading].append(i)
print('reading dict dumped with {} keys'.format(len(reading_dict.keys())))
sorted_reading = sorted(reading_dict.items(), key=(lambda x: len(x[1])), reverse=True)
print('most frequently shared readings')
for (reading, l) in sorted_reading[:20]:
print('{}: {}'.format(reading, len(l)))
pickle.dump(reading_dict, open(os.path.join('data/reading_dict.pkl'), 'wb'))
with open('data/reading_dict.txt', 'w', encoding='utf-8') as f:
for x in sorted_reading:
words = [lexicon[idx][0] for idx in x[1]]
f.write('{}\t{}\n'.format(x[0], ' '.join(words)))
print('reading dict dumped') |
class Ply(object):
def __init__(self, points, colors):
self.__points = points
self.__colors = colors
def write(self, filename):
lines = self.__getLinesForHeader()
fd = open(filename, 'w')
for line in lines:
fd.write(('%s\n' % line))
self.__writePoints(fd, self.__points, self.__colors)
fd.close()
def __getLinesForHeader(self):
lines = ['ply', 'format ascii 1.0', 'comment generated by: kinectToPly', ('element vertex %s' % len(self.__points)), 'property float x', 'property float y', 'property float z', 'property uchar red', 'property uchar green', 'property uchar blue', 'end_header']
return lines
def __writePoints(self, fd, points, colors):
stacked = np.column_stack((points, colors))
np.savetxt(fd, stacked, delimiter='\n', fmt='%f %f %f %d %d %d') |
class ConvRes(nn.Module):
def __init__(self, input_size=(1, 257, 1091)):
super(ConvRes, self).__init__()
self.features = nn.Sequential(nn.Conv2d(1, 16, kernel_size=(3, 3), padding=(2, 2), dilation=(1, 1)), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(kernel_size=2), nn.Conv2d(16, 32, kernel_size=(3, 3), padding=(2, 2), dilation=(2, 2)), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(kernel_size=2), nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(4, 4)), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(kernel_size=2), nn.Conv2d(32, 64, kernel_size=(3, 3), dilation=(8, 8)), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=2), nn.Conv2d(64, 64, kernel_size=(3, 3), dilation=(1, 1)), nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=2))
self.flat_feats = self._get_flat_feats(input_size, self.features)
self.ln1 = nn.Linear(self.flat_feats, 32)
self.bn1 = nn.BatchNorm1d(32)
self.re1 = nn.ReLU()
self.ln2 = nn.Linear(32, 32)
self.bn2 = nn.BatchNorm1d(32)
self.re2 = nn.ReLU()
self.ln3 = nn.Linear(32, 32)
self.dp1 = nn.Dropout(p=0.6)
self.bn3 = nn.BatchNorm1d(32)
self.re3 = nn.ReLU()
self.ln4 = nn.Linear(32, 32)
self.bn4 = nn.BatchNorm1d(32)
self.re4 = nn.ReLU()
self.ln5 = nn.Linear(32, 32)
self.dp2 = nn.Dropout(p=0.8)
self.bn5 = nn.BatchNorm1d(32)
self.re5 = nn.ReLU()
self.ln6 = nn.Linear(32, 1)
self.sigmoid = nn.Sigmoid()
def _weights_init(m):
if isinstance(m, (nn.Conv2d or nn.Linear)):
kaiming_normal_(m.weight)
elif isinstance(m, (nn.BatchNorm2d or nn.BatchNorm1d)):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.apply(_weights_init)
def _get_flat_feats(self, in_size, feats):
f = feats(Variable(ones(1, *in_size)))
return int(np.prod(f.size()[1:]))
def forward(self, x):
feats = self.features(x)
flat_feats = feats.view((- 1), self.flat_feats)
x = self.ln1(flat_feats)
residual = x
x = self.ln3(self.re2(self.bn2(self.ln2(self.re1(self.bn1(x))))))
x += residual
x = self.dp1(x)
residual = x
x = self.ln5(self.re4(self.bn4(self.ln4(self.re3(self.bn3(x))))))
x += residual
x = self.dp2(x)
out = self.sigmoid(self.ln6(self.re5(self.bn5(x))))
return out |
def get_inst_num(FILENAME):
annot_name = FILENAME.replace('JPEGImages', 'Annotations').replace('.jpg', '.xml')
res = np.zeros([20], np.float32)
tree = ET.parse(annot_name)
root = tree.getroot()
for child in root:
if (child.tag == 'object'):
for c in child:
if (c.tag == 'name'):
name = c.text
res[class_dict[name]] += 1
return res |
def dissl_resnet50_dNone_e100_m2(pretrained=True, **kwargs):
return _dissl(base='resnet50', dim=None, sffx='_e100_m2', pretrained=pretrained, **kwargs) |
def add_clouds_texture(name: str='Clouds Texture', size: float=0.25, depth: int=2, nabla: float=0.025, brightness: float=1.0, contrast: float=1.0) -> bpy.types.CloudsTexture:
tex = bpy.data.textures.new(name, type='CLOUDS')
tex.noise_scale = size
tex.noise_depth = depth
tex.nabla = nabla
tex.intensity = brightness
tex.contrast = contrast
return tex |
def lightgbm_eval_metric_f1(preds, dtrain):
target = dtrain.get_label()
weight = dtrain.get_weight()
unique_targets = np.unique(target)
if (len(unique_targets) > 2):
cols = len(unique_targets)
rows = int((preds.shape[0] / len(unique_targets)))
preds = np.reshape(preds, (rows, cols), order='F')
return ('f1', (- negative_f1(target, preds, weight)), True) |
def load_model(model_path, cuda):
model = torch.load(os.path.join(model_path, 'model.bin'), map_location=cuda)
model.planing_model.device = cuda
return model |
.parametrize('input_dim, output_dim, hidden_sizes, std_hidden_sizes', different_std_settings)
def test_std_adaptive_network_output_values(input_dim, output_dim, hidden_sizes, std_hidden_sizes):
module = GaussianMLPIndependentStdModule(input_dim=input_dim, output_dim=output_dim, hidden_sizes=hidden_sizes, std_hidden_sizes=std_hidden_sizes, hidden_nonlinearity=None, hidden_w_init=nn.init.ones_, output_w_init=nn.init.ones_, std_hidden_nonlinearity=None, std_hidden_w_init=nn.init.ones_, std_output_w_init=nn.init.ones_)
dist = module(torch.ones(input_dim))
exp_mean = torch.full((output_dim,), (input_dim * torch.Tensor(hidden_sizes).prod().item()), dtype=torch.float)
exp_variance = (input_dim * torch.Tensor(hidden_sizes).prod()).exp().pow(2).item()
assert dist.mean.equal(exp_mean)
assert dist.variance.equal(torch.full((output_dim,), exp_variance, dtype=torch.float))
assert (dist.rsample().shape == (output_dim,)) |
def get_prf(res):
if (res['TP'] == 0):
if ((res['FP'] == 0) and (res['FN'] == 0)):
precision = 1.0
recall = 1.0
f1 = 1.0
else:
precision = 0.0
recall = 0.0
f1 = 0.0
else:
precision = ((1.0 * res['TP']) / (res['TP'] + res['FP']))
recall = ((1.0 * res['TP']) / (res['TP'] + res['FN']))
f1 = (((2 * precision) * recall) / (precision + recall))
return (precision, recall, f1) |
def _child_names(tree):
names = []
for child in tree:
if isinstance(child, Tree):
names.append(Nonterminal(child._label))
else:
names.append(child)
return names |
def _maybe_create_keypoints_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
min_num_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
def has_sufficient_num_keypoints(instance: Instance) -> bool:
num_kpts = sum(((np.array(ann['keypoints'][2::3]) > 0).sum() for ann in instance['annotations'] if ('keypoints' in ann)))
return (num_kpts >= min_num_keypoints)
if (cfg.MODEL.KEYPOINT_ON and (min_num_keypoints > 0)):
return has_sufficient_num_keypoints
return None |
class RODDecode_Dil(nn.Module):
def __init__(self):
super(RODDecode_Dil, self).__init__()
self.convt1 = nn.ConvTranspose3d(in_channels=256, out_channels=128, kernel_size=(4, 6, 6), stride=(2, 2, 2), padding=(1, 2, 2))
self.convt2 = nn.ConvTranspose3d(in_channels=128, out_channels=64, kernel_size=(4, 6, 6), stride=(2, 2, 2), padding=(1, 2, 2))
self.convt3 = nn.ConvTranspose3d(in_channels=64, out_channels=32, kernel_size=(3, 6, 6), stride=(1, 2, 2), padding=(1, 2, 2))
self.convt5 = nn.Conv3d(in_channels=32, out_channels=16, kernel_size=(3, 5, 5), stride=(1, 1, 1), padding=(1, 2, 0), dilation=(1, 1, 25))
self.convt6 = nn.Conv3d(in_channels=16, out_channels=n_class, kernel_size=(3, 5, 5), stride=(1, 1, 1), padding=(1, 2, 0), dilation=(1, 1, 25))
self.relu = nn.ReLU()
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
self.upsample = nn.Upsample(size=(rodnet_configs['win_size'], radar_configs['ramap_rsize'], radar_configs['ramap_asize']), mode='nearest')
def forward(self, x):
x = self.prelu(self.convt1(x))
x = self.prelu(self.convt2(x))
x = self.prelu(self.convt3(x))
x = F.pad(x, (50, 50, 0, 0, 0, 0), 'circular')
x = self.prelu(self.convt5(x))
x = F.pad(x, (50, 50, 0, 0, 0, 0), 'circular')
x = self.convt6(x)
return x |
def inference_model(config_name, checkpoint, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
if (('flip' in cfg.data.test.pipeline[1]) and ('img_scale' in cfg.data.test.pipeline[1])):
cfg.data.test.pipeline[1].img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
cfg.data.test.pipeline[1].flip = True
elif (logger is None):
print(f'{config_name}: unable to start aug test', flush=True)
else:
logger.error(f'{config_name}: unable to start aug test')
model = init_segmentor(cfg, checkpoint, device=args.device)
result = inference_segmentor(model, args.img)
if args.show:
show_result_pyplot(model, args.img, result)
return result |
class Network(nn.Module):
def __init__(self, img_ch, net_ch):
super().__init__()
self.from_rgb = nn.Sequential(nn.Conv2d(img_ch, (net_ch // 2), 1, 1, 0), nn.Conv2d((net_ch // 2), net_ch, 1, 1, 0))
self.to_rgb = nn.Sequential(nn.Conv2d(net_ch, (net_ch // 2), 1, 1, 0), nn.Conv2d((net_ch // 2), img_ch, 1, 1, 0), nn.Tanh())
self.layers = nn.Sequential(*[ConvBlock(net_ch, net_ch) for _ in range(6)])
def forward(self, x):
x = self.from_rgb(x)
dense = [x]
for l in self.layers:
x = l(x)
for d in dense:
x = (x + d)
x = self.to_rgb(x)
return x |
def make_empty_instances(h, w):
instances = Instances((h, w))
instances.gt_boxes = Boxes(torch.rand(0, 4))
instances.gt_classes = torch.tensor([]).to(dtype=torch.int64)
instances.gt_masks = BitMasks(torch.rand(0, h, w))
return instances |
def _binary_round(x):
g = tf.get_default_graph()
with ops.name_scope('BinaryRound') as name:
with g.gradient_override_map({'Round': 'Identity'}):
return tf.round(x, name=name) |
class ChannelBasedDecoder(Decoder):
def __init__(self, list_genome, channels, repeats=None):
super().__init__(list_genome)
self._model = None
self._genome = self.get_effective_genome(list_genome)
self._channels = channels[:len(self._genome)]
if (repeats is not None):
active_repeats = []
for (idx, gene) in enumerate(list_genome):
if phase_active(gene):
active_repeats.append(repeats[idx])
self.adjust_for_repeats(active_repeats)
else:
self._repeats = [1 for _ in self._genome]
if (not self._genome):
self._model = Identity()
def adjust_for_repeats(self, repeats):
self._repeats = repeats
repeated_genome = []
repeated_channels = []
for (i, repeat) in enumerate(self._repeats):
for j in range(repeat):
if (j == 0):
repeated_channels.append((self._channels[i][0], self._channels[i][1]))
else:
repeated_channels.append((self._channels[i][1], self._channels[i][1]))
repeated_genome.append(self._genome[i])
self._genome = repeated_genome
self._channels = repeated_channels
def build_layers(self, phases):
layers = []
last_phase = phases.pop()
for (phase, repeat) in zip(phases, self._repeats):
for _ in range(repeat):
layers.append(phase)
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
layers.append(last_phase)
return layers
def get_effective_genome(genome):
return [gene for gene in genome if phase_active(gene)]
def get_model(self):
raise NotImplementedError() |
class CamRender(Render):
def __init__(self, width=1600, height=1200, name='Cam Renderer', program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1, egl=False):
Render.__init__(self, width, height, name, program_files, color_size, ms_rate=ms_rate, egl=egl)
self.camera = None
if (not egl):
global GLUT
import OpenGL.GLUT as GLUT
GLUT.glutDisplayFunc(self.display)
GLUT.glutKeyboardFunc(self.keyboard)
def set_camera(self, camera):
self.camera = camera
(self.projection_matrix, self.model_view_matrix) = camera.get_gl_matrix()
def keyboard(self, key, x, y):
eps = 1
if (key == b'w'):
self.camera.center += (eps * self.camera.direction)
elif (key == b's'):
self.camera.center -= (eps * self.camera.direction)
if (key == b'a'):
self.camera.center -= (eps * self.camera.right)
elif (key == b'd'):
self.camera.center += (eps * self.camera.right)
if (key == b' '):
self.camera.center += (eps * self.camera.up)
elif (key == b'x'):
self.camera.center -= (eps * self.camera.up)
elif (key == b'i'):
self.camera.near += (0.1 * eps)
self.camera.far += (0.1 * eps)
elif (key == b'o'):
self.camera.near -= (0.1 * eps)
self.camera.far -= (0.1 * eps)
(self.projection_matrix, self.model_view_matrix) = self.camera.get_gl_matrix()
def show(self):
if (GLUT is not None):
GLUT.glutMainLoop() |
def build_dataloaders(cfg, settings):
transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5))
transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD))
transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD))
output_sz = settings.output_sz
search_area_factor = settings.search_area_factor
data_processing_train = processing.ViPTProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings)
data_processing_val = processing.ViPTProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings)
settings.num_template = getattr(cfg.DATA.TEMPLATE, 'NUMBER', 1)
settings.num_search = getattr(cfg.DATA.SEARCH, 'NUMBER', 1)
sampler_mode = getattr(cfg.DATA, 'SAMPLER_MODE', 'causal')
train_cls = getattr(cfg.TRAIN, 'TRAIN_CLS', False)
print('sampler_mode', sampler_mode)
dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO, samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_train, frame_sample_mode=sampler_mode, train_cls=train_cls)
train_sampler = (DistributedSampler(dataset_train) if (settings.local_rank != (- 1)) else None)
shuffle = (False if (settings.local_rank != (- 1)) else True)
loader_train = LTRLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=shuffle, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=train_sampler)
if (cfg.DATA.VAL.DATASETS_NAME[0] is None):
loader_val = None
else:
dataset_val = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.VAL.DATASETS_NAME, settings, opencv_loader), p_datasets=cfg.DATA.VAL.DATASETS_RATIO, samples_per_epoch=cfg.DATA.VAL.SAMPLE_PER_EPOCH, max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search, num_template_frames=settings.num_template, processing=data_processing_val, frame_sample_mode=sampler_mode, train_cls=train_cls)
val_sampler = (DistributedSampler(dataset_val) if (settings.local_rank != (- 1)) else None)
loader_val = LTRLoader('val', dataset_val, training=False, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=val_sampler, epoch_interval=cfg.TRAIN.VAL_EPOCH_INTERVAL)
return (loader_train, loader_val) |
def read_tracks(filename):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
track_dict = dict()
track_id = None
for (i, row) in enumerate(list(csv_reader)):
if (i == 0):
assert (row[KeyEnum.track_id] == Key.track_id)
assert (row[KeyEnum.frame_id] == Key.frame_id)
assert (row[KeyEnum.time_stamp_ms] == Key.time_stamp_ms)
assert (row[KeyEnum.agent_type] == Key.agent_type)
assert (row[KeyEnum.x] == Key.x)
assert (row[KeyEnum.y] == Key.y)
assert (row[KeyEnum.vx] == Key.vx)
assert (row[KeyEnum.vy] == Key.vy)
assert (row[KeyEnum.psi_rad] == Key.psi_rad)
assert (row[KeyEnum.length] == Key.length)
assert (row[KeyEnum.width] == Key.width)
continue
if (int(row[KeyEnum.track_id]) != track_id):
track_id = int(row[KeyEnum.track_id])
assert (track_id not in track_dict.keys()), ('Line %i: Track id %i already in dict, track file not sorted properly' % ((i + 1), track_id))
track = Track(track_id)
track.agent_type = row[KeyEnum.agent_type]
track.length = float(row[KeyEnum.length])
track.width = float(row[KeyEnum.width])
track.time_stamp_ms_first = int(row[KeyEnum.time_stamp_ms])
track.time_stamp_ms_last = int(row[KeyEnum.time_stamp_ms])
track_dict[track_id] = track
track = track_dict[track_id]
track.time_stamp_ms_last = int(row[KeyEnum.time_stamp_ms])
ms = MotionState(int(row[KeyEnum.time_stamp_ms]))
ms.x = float(row[KeyEnum.x])
ms.y = float(row[KeyEnum.y])
ms.vx = float(row[KeyEnum.vx])
ms.vy = float(row[KeyEnum.vy])
ms.psi_rad = float(row[KeyEnum.psi_rad])
track.motion_states[ms.time_stamp_ms] = ms
return track_dict |
def Catfish(Node_List, args):
if (args.catfish is None):
pass
else:
Node_List[0].model = Node.init_model(args.catfish)
Node_List[0].optimizer = Node.init_optimizer(Node_List[0].model, args) |
def l1(arr1, arr2):
return (sum([np.abs((a1 - a2)) for (a1, a2) in zip(arr1, arr2)]) / len(arr1)) |
class feat_classifier(nn.Module):
def __init__(self, class_num, bottleneck_dim=256, type='linear'):
super(feat_classifier, self).__init__()
self.type = type
if (type == 'wn'):
self.fc = weightNorm(nn.Linear(bottleneck_dim, class_num), name='weight')
self.fc.apply(init_weights)
else:
self.fc = nn.Linear(bottleneck_dim, class_num)
self.fc.apply(init_weights)
def forward(self, x):
x = self.fc(x)
return x |
def _is_ci_fork_pull_request():
if os.getenv('TRAVIS'):
if os.getenv('TRAVIS_PULL_REQUEST_BRANCH'):
return True
elif os.getenv('APPVEYOR'):
if os.getenv('APPVEYOR_PULL_REQUEST_NUMBER'):
return True
return False |
def OpenVINOModel(model, device='CPU'):
from .core.model import OpenVINOModel
return OpenVINOModel(model, device) |
def _context_for_ohem():
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))))
from test_forward import _get_detector_cfg
model = _get_detector_cfg('faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
context = build_detector(model).roi_head
return context |
class Level3(torch.nn.Module):
def __init__(self):
super().__init__()
self.layer2 = Level2()
def forward(self, x, y=None):
return self.layer2(x, y) |
('/upload', methods=['POST'])
def upload():
if (request.method == 'POST'):
if ('file' not in request.files):
return jsonify('Need to pass argument filename to request! (empty)')
file = request.files['file']
file_dir = ''.join(random.choices((string.ascii_uppercase + string.digits), k=24))
if (allowed_file_video(file.filename) or allowed_file(file.filename)):
file.save(os.path.join(((app.config['UPLOAD_FOLDER'] + '/') + file_dir), file.filename))
return jsonify((((app.config['UPLOAD_FOLDER'] + '/') + file_dir) + file.filename))
return jsonify(('filename not allowed: ' + file.filename)) |
class Focal_Binary_Loss():
def __init__(self, gamma_indct):
self.gamma_indct = gamma_indct
def robust_pow(self, num_base, num_pow):
return (np.sign(num_base) * (np.abs(num_base) ** num_pow))
def focal_binary_object(self, pred, dtrain):
gamma_indct = self.gamma_indct
label = dtrain.get_label()
sigmoid_pred = (1.0 / (1.0 + np.exp((- pred))))
g1 = (sigmoid_pred * (1 - sigmoid_pred))
g2 = (label + (((- 1) ** label) * sigmoid_pred))
g3 = ((sigmoid_pred + label) - 1)
g4 = ((1 - label) - (((- 1) ** label) * sigmoid_pred))
g5 = (label + (((- 1) ** label) * sigmoid_pred))
grad = ((((gamma_indct * g3) * self.robust_pow(g2, gamma_indct)) * np.log((g4 + 1e-09))) + (((- 1) ** label) * self.robust_pow(g5, (gamma_indct + 1))))
hess_1 = (self.robust_pow(g2, gamma_indct) + (((gamma_indct * ((- 1) ** label)) * g3) * self.robust_pow(g2, (gamma_indct - 1))))
hess_2 = (((((- 1) ** label) * g3) * self.robust_pow(g2, gamma_indct)) / g4)
hess = (((((hess_1 * np.log((g4 + 1e-09))) - hess_2) * gamma_indct) + ((gamma_indct + 1) * self.robust_pow(g5, gamma_indct))) * g1)
return (grad, hess) |
class RadarStackedHourglass(nn.Module):
def __init__(self, stacked_num=1):
super(RadarStackedHourglass, self).__init__()
self.stacked_num = stacked_num
self.conv1a = nn.Conv3d(in_channels=2, out_channels=32, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.conv1b = nn.Conv3d(in_channels=32, out_channels=64, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.conv1c = nn.Conv3d(in_channels=64, out_channels=160, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.hourglass = []
for i in range(stacked_num):
self.hourglass.append(nn.ModuleList([RODEncode(), RODDecode(), nn.Conv3d(in_channels=160, out_channels=n_class, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2)), nn.Conv3d(in_channels=n_class, out_channels=160, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))]))
self.hourglass = nn.ModuleList(self.hourglass)
self.relu = nn.ReLU()
self.bn1a = nn.BatchNorm3d(num_features=32)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn1c = nn.BatchNorm3d(num_features=160)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.bn1a(self.conv1a(x)))
x = self.relu(self.bn1b(self.conv1b(x)))
x = self.relu(self.bn1c(self.conv1c(x)))
out = []
for i in range(self.stacked_num):
(x, x1, x2, x3) = self.hourglass[i][0](x)
x = self.hourglass[i][1](x, x1, x2, x3)
confmap = self.hourglass[i][2](x)
out.append(self.sigmoid(confmap))
if (i < (self.stacked_num - 1)):
confmap_ = self.hourglass[i][3](confmap)
x = (x + confmap_)
return out |
.parametrize('a_val, b_val, x_val, y_val, vector', [(1.0, 1.0, 1.0, 1.0, [10.0, 20.0]), (5.0, 10.0, (- 2.0), 5.0, [0.0, (- 1.0)]), (0.0, 0.0, 1.1, 0.02, [0.0, 0.0]), ((- 2.2), (- 1.5), (- 12.3), 34.8, [2.2, 5.3]), ((- 1.5), 0.0, (- 0.002), 4.93, [0.1, (- 0.02)])])
def test_hessian_vector_product_2x2(a_val, b_val, x_val, y_val, vector):
obs = [torch.tensor([a_val]), torch.tensor([b_val])]
vector = torch.tensor([vector])
x = torch.tensor(x_val, requires_grad=True)
y = torch.tensor(y_val, requires_grad=True)
def f():
(a, b) = (obs[0], obs[1])
return ((a * (x ** 2)) + (b * (y ** 2)))
expected_hessian = compute_hessian(f(), [x, y])
expected_hvp = torch.mm(vector, expected_hessian).detach()
f_Ax = _build_hessian_vector_product(f, [x, y])
hvp = f_Ax(vector[0]).detach()
assert np.allclose(hvp, expected_hvp, atol=1e-06) |
def _itr_file(input, pattern):
print('Search Patterm:', pattern)
ptn = re.compile(pattern)
for (root, dir, files) in os.walk(input):
for fn in files:
abs_fn = os.path.normpath(os.path.join(root, fn))
m = ptn.match(abs_fn)
if m:
lang = m.groups()
(yield (lang, abs_fn)) |
def create_summary_metadata(description, metadata):
ln_proto = LabelToNames()
if ('label_to_names' in metadata):
ln_proto.label_to_names.update(metadata['label_to_names'])
return SummaryMetadata(summary_description=description, plugin_data=SummaryMetadata.PluginData(plugin_name=PLUGIN_NAME, content=ln_proto.SerializeToString())) |
class RetriBertTokenizerFast(BertTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = RetriBertTokenizer
model_input_names = ['attention_mask'] |
class WIKIDATA5MLoader():
def __init__(self, path, download=False, download_path=None):
self.path = path
self.download = download
self.download_path = download_path
self.entity_list = list()
self.relation_list = list()
if (self.download == True):
downloader = Download_Data(dataset_path=self.download_path)
downloader.WIKIDATA5M()
self.train_name = 'wikidata5m_inductive_train.txt'
self.valid_name = 'wikidata5m_inductive_valid.txt'
self.test_name = 'wikidata5m_inductive_test.txt'
def _load_data(self, path):
heads = []
relations = []
tails = []
total_path = os.path.join(self.path, path).replace('\\', '/')
with open(total_path) as file:
for line in file:
(h, r, t) = line.strip().split('\t')
heads.append(h)
relations.append(r)
tails.append(t)
self.entity_list.append(h)
self.entity_list.append(t)
self.relation_list.append(r)
datable = Datable()
datable(['head', 'relation', 'tail'], [heads, relations, tails])
return datable
def load_train_data(self):
train_data = self._load_data(self.train_name)
return train_data
def load_valid_data(self):
valid_data = self._load_data(self.valid_name)
return valid_data
def load_test_data(self):
test_data = self._load_data(self.test_name)
return test_data
def load_all_data(self):
train_data = self._load_data(self.train_name)
valid_data = self._load_data(self.valid_name)
test_data = self._load_data(self.test_name)
return (train_data, valid_data, test_data)
def _load_lut(self, path, category=None):
total_path = os.path.join(self.path, path).replace('\\', '/')
if (not os.path.exists(total_path)):
if (category == 'entity'):
print('Creating entities.json...')
entity_name_list = list(set(list(self.entity_list)))
lookuptable = LookUpTable()
lookuptable.create_table(create_dic=True, item_list=entity_name_list)
entities_dict = dict()
for i in range(len(lookuptable)):
entities_dict[lookuptable['name'][i]] = i
json.dump(entities_dict, open(total_path, 'w'), indent=4, sort_keys=True)
if (category == 'relation'):
print('Creating relations.json...')
relation_name_list = list(set(list(self.relation_list)))
lookuptable = LookUpTable()
lookuptable.create_table(create_dic=True, item_list=relation_name_list)
relations_dict = dict()
for i in range(len(lookuptable)):
relations_dict[lookuptable['name'][i]] = i
json.dump(relations_dict, open(total_path, 'w'), indent=4, sort_keys=True)
if (category == 'entity'):
with open(total_path) as file:
entity2idx = json.load(file)
lookuptable = LookUpTable()
lookuptable.create_table(create_dic=False, str_dic=entity2idx)
return lookuptable
if (category == 'relation'):
with open(total_path) as file:
relation2idx = json.load(file)
lookuptable = LookUpTable()
lookuptable.create_table(create_dic=False, str_dic=relation2idx)
return lookuptable
def load_entity_lut(self):
entity2idx = self._load_lut(path='entities.json', category='entity')
return entity2idx
def load_relation_lut(self):
relation2idx = self._load_lut(path='relations.json', category='realtion')
return relation2idx
def load_all_lut(self):
entity2idx = self._load_lut(path='entities.json', category='entity')
relation2idx = self._load_lut(path='relations.json', category='relation')
return (entity2idx, relation2idx) |
def get_all_answers(data_dir, filtered_by=None):
answers = dict()
files = {filename[:(- 4)] for filename in os.listdir(data_dir)}
for f in files:
answers[f] = get_answers_for_doc((f + '.txt'), data_dir, filtered_by=filtered_by)
return answers |
class UpSample(nn.Sequential):
def __init__(self, skip_input, output_features):
super(UpSample, self).__init__()
self.convA = nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1)
self.leakyreluA = nn.LeakyReLU(0.2)
self.convB = nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1)
self.leakyreluB = nn.LeakyReLU(0.2)
def forward(self, x, concat_with):
up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)
return self.leakyreluB(self.convB(self.leakyreluA(self.convA(torch.cat([up_x, concat_with], dim=1))))) |
def train(cfg, local_rank, distributed):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
use_mixed_precision = (cfg.DTYPE == 'float16')
amp_opt_level = ('O1' if use_mixed_precision else 'O0')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=amp_opt_level)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False)
arguments = {}
arguments['iteration'] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = (get_rank() == 0)
checkpointer = DetectronCheckpointer(cfg, model, optimizer, scheduler, output_dir, save_to_disk)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(cfg, is_train=True, is_distributed=distributed, start_iter=arguments['iteration'])
test_period = cfg.SOLVER.TEST_PERIOD
if (test_period > 0):
data_loader_val = make_data_loader(cfg, is_train=False, is_distributed=distributed, is_for_period=True)
else:
data_loader_val = None
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
do_train(cfg, model, data_loader, data_loader_val, optimizer, scheduler, checkpointer, device, checkpoint_period, test_period, arguments)
return model |
def to_tree_str(sentence):
words = word_tokenize(sentence.lower())
(enc_input, enc_length) = prepare_input(words)
selections = model.encoder.forward(enc_input, enc_length, return_select_masks=True)[2]
selections = ([s[0].max(0)[1] for s in selections] + [0])
tokens = words.copy()
for i in selections:
composed = f'( {tokens[i]} {tokens[(i + 1)]} )'
del tokens[i:(i + 2)]
tokens.insert(i, composed)
assert (len(tokens) == 1)
return tokens[0] |
class LatentVariable(nn.Module):
def __init__(self, variable_config):
super(LatentVariable, self).__init__()
self.approx_post = self.prior = None
self.variable_config = variable_config
self.inference_procedure = None
self.detach = True
def infer(self, input):
raise NotImplementedError
def generate(self, input, gen, n_samples=1):
raise NotImplementedError
def step(self):
raise NotImplementedError
def re_init(self):
raise NotImplementedError
def re_init_approx_posterior(self):
raise NotImplementedError
def kl_divergence(self, analytical=False):
if analytical:
assert (type(self.approx_post) == type(self.prior) == Normal)
post_mean = self.approx_post.mean
post_log_var = self.approx_post.log_var
if (len(post_mean.data.shape) in [2, 4]):
post_mean = post_mean.unsqueeze(1)
post_log_var = post_log_var.unsqueeze(1)
var_ratio = (post_log_var.exp() / self.prior.log_var.exp())
t1 = ((post_mean - self.prior.mean).pow(2) / self.prior.log_var.exp())
return (0.5 * (((var_ratio + t1) - 1) - (var_ratio + 1e-07).log()))
else:
z = self.approx_post.sample()
return (self.approx_post.log_prob(z) - self.prior.log_prob(z))
def inference_parameters(self):
raise NotImplementedError
def generative_parameters(self):
raise NotImplementedError
def approx_posterior_parameters(self):
raise NotImplementedError
def approx_posterior_gradients(self):
raise NotImplementedError |
class DTN(object):
def __init__(self):
if FLAGS.leaky_relu:
self.active = tf.nn.leaky_relu
else:
self.active = tf.nn.relu
if FLAGS.CDC:
self.conv = Conv2d_cd
else:
self.conv = tf.layers.conv2d
def forward(self, face, depth, IR, training=True):
init = tf.variance_scaling_initializer(scale=2.0)
root = tf.concat([face, depth, IR], axis=(- 1))
root = self.conv(root, filters=FLAGS.base_num_filters, kernel_size=5, padding='same', use_bias=False, name='root_conv')
root = tf.layers.batch_normalization(root, training=training, name='root_bn')
root = self.active(root)
with tf.variable_scope('CRU/1', reuse=False):
root = self.CRU(root, training=training)
if FLAGS.attention:
size = root.get_shape().as_list()
att = tf.nn.avg_pool(root, ksize=[1, size[1], size[2], 1], strides=[1, size[1], size[2], 1], padding='SAME')
att = tf.layers.conv2d(att, size[(- 1)], 1, padding='same', kernel_initializer=init, name='conv_att')
root = tf.multiply(root, att)
with tf.variable_scope('CRU/2', reuse=False):
root = self.CRU(root, training=training)
if FLAGS.attention:
size = root.get_shape().as_list()
att = tf.nn.avg_pool(root, ksize=[1, size[1], size[2], 1], strides=[1, size[1], size[2], 1], padding='SAME')
att = tf.layers.conv2d(att, size[(- 1)], 1, padding='same', kernel_initializer=init, name='conv_att')
root = tf.multiply(root, att)
with tf.variable_scope('CRU/3', reuse=False):
root = self.CRU(root, training=training)
if FLAGS.attention:
size = root.get_shape().as_list()
att = tf.nn.avg_pool(root, ksize=[1, size[1], size[2], 1], strides=[1, size[1], size[2], 1], padding='SAME')
att = tf.layers.conv2d(att, size[(- 1)], 1, padding='same', kernel_initializer=init, name='conv_att')
root = tf.multiply(root, att)
with tf.variable_scope('CRU/4'):
feature = self.CRU(root, max_pool=False, training=training)
if FLAGS.attention:
size = feature.get_shape().as_list()
att = tf.nn.avg_pool(feature, ksize=[1, size[1], size[2], 1], strides=[1, size[1], size[2], 1], padding='SAME')
att = tf.layers.conv2d(att, size[(- 1)], 1, padding='same', kernel_initializer=init, name='conv_att')
feature = tf.multiply(feature, att)
with tf.variable_scope('CRU/5'):
(binary, facial_mask) = self.SFL(feature)
return (binary, facial_mask)
def CRU(self, input, max_pool=True, training=True):
conv1 = self.conv(input, filters=FLAGS.base_num_filters, kernel_size=3, padding='same', use_bias=False, name='conv1')
conv1 = tf.layers.batch_normalization(conv1, training=training, name='bn1')
conv1 = self.active(conv1)
conv2 = self.conv(conv1, filters=FLAGS.base_num_filters, kernel_size=3, padding='same', use_bias=False, name='conv2')
conv2 = tf.layers.batch_normalization(conv2, training=training, name='bn2')
conv2 = (input + conv2)
conv2 = self.active(conv2)
conv3 = self.conv(conv2, filters=FLAGS.base_num_filters, kernel_size=3, padding='same', use_bias=False, name='conv3')
conv3 = tf.layers.batch_normalization(conv3, training=training, name='bn3')
conv3 = self.active(conv3)
conv4 = self.conv(conv3, filters=FLAGS.base_num_filters, kernel_size=3, padding='same', use_bias=False, name='conv4')
conv4 = tf.layers.batch_normalization(conv4, training=training, name='bn4')
out = (conv2 + conv4)
out = self.active(out)
if max_pool:
out = tf.layers.max_pooling2d(out, pool_size=[3, 3], strides=[2, 2], padding='same')
return out
def SFL(self, input, training=True):
mask = self.conv(input, filters=2, kernel_size=3, padding='same', use_bias=False, name='mask_conv')
mask = tf.nn.sigmoid(mask)
binary = self.conv(input, filters=FLAGS.base_num_filters, kernel_size=3, strides=2, padding='same', use_bias=False, name='binary_conv1')
binary = tf.layers.batch_normalization(binary, training=training, name='bn1')
binary = self.active(binary)
binary = self.conv(binary, filters=FLAGS.base_num_filters, kernel_size=3, strides=2, padding='same', use_bias=False, name='binary_conv2')
binary = tf.layers.batch_normalization(binary, training=training, name='bn2')
binary = self.active(binary)
binary = self.conv(binary, filters=(FLAGS.base_num_filters * 2), kernel_size=3, strides=2, padding='same', use_bias=False, name='binary_conv3')
binary = tf.layers.batch_normalization(binary, training=training, name='bn3')
binary = self.active(binary)
binary = self.conv(binary, filters=(FLAGS.base_num_filters * 4), kernel_size=4, strides=2, padding='valid', use_bias=False, name='binary_conv4')
binary = tf.layers.batch_normalization(binary, training=training, name='bn4')
binary = self.active(binary)
binary = tf.layers.flatten(binary)
binary = tf.layers.dense(binary, units=256, use_bias=False, name='binary_fc1')
binary = tf.layers.batch_normalization(binary, training=training, name='bn5')
binary = self.active(binary)
binary = tf.layers.dense(binary, units=2, use_bias=False, name='binary_fc2')
return (mask, binary) |
def run_test(folder_path, override_dict, test_path, snapshot_iter, is_large, save_img_data):
print(('Folder path: %s' % folder_path))
with open(os.path.join(folder_path, 'PARAM.p'), 'rb') as f:
opt0 = pickle.load(f)
opt = recursive_merge_dicts(opt0, override_dict)
vp = Pipeline(None, opt, model_dir=folder_path, auto_save_hyperparameters=False, use_logging=False)
print(vp.opt)
with vp.graph.as_default():
sess = vp.create_session()
vp.run_full_test_from_checkpoint(sess, test_path=test_path, snapshot_iter=snapshot_iter, is_large=is_large, save_img_data=save_img_data) |
def validate_graph_node(graph_def, node_names):
if (len(node_names) == 0):
return False
all_node_name = [node.name for node in graph_def.node]
for user_name in node_names:
if (user_name not in all_node_name):
logger.warn(str("Node name {} specified in yaml doesn't exist in the model.").format(user_name))
return False
return True |
('/ner', methods=['GET', 'POST'])
def ner():
sentence = request.values.get('sentence')
words = tokenize_toolkit.run(sentence)
ner_result = ner_toolkit.run(words)
return jsonify({'words': words, 'ner_result': [{'mention': words[entity['start']:entity['end']], 'start': entity['start'], 'end': entity['end']} for entity in ner_result]}) |
def compile_files(raw_dir, raw_files, prefix):
src_fpath = os.path.join(raw_dir, f'raw-{prefix}.src')
trg_fpath = os.path.join(raw_dir, f'raw-{prefix}.trg')
if (os.path.isfile(src_fpath) and os.path.isfile(trg_fpath)):
sys.stderr.write(f'''Merged files found, skip the merging process.
''')
return (src_fpath, trg_fpath)
sys.stderr.write(f'''Merge files into two files: {src_fpath} and {trg_fpath}.
''')
with open(src_fpath, 'w') as src_outf, open(trg_fpath, 'w') as trg_outf:
for (src_inf, trg_inf) in zip(raw_files['src'], raw_files['trg']):
sys.stderr.write(f''' Input files:
- SRC: {src_inf}, and
- TRG: {trg_inf}.
''')
with open(src_inf, newline='\n') as src_inf, open(trg_inf, newline='\n') as trg_inf:
cntr = 0
for (i, line) in enumerate(src_inf):
cntr += 1
src_outf.write((line.replace('\r', ' ').strip() + '\n'))
for (j, line) in enumerate(trg_inf):
cntr -= 1
trg_outf.write((line.replace('\r', ' ').strip() + '\n'))
assert (cntr == 0), 'Number of lines in two files are inconsistent.'
return (src_fpath, trg_fpath) |
def _test_cg_gpr(config: ConfigDense, model: GPR, Xnew: tf.Tensor) -> tf.Tensor:
(X, y) = model.data
Kff = model.kernel(X, full_cov=True)
max_rank = (config.num_cond // (2 if (config.num_cond > 1) else 1))
preconditioner = get_default_preconditioner(Kff, diag=model.likelihood.variance, max_rank=max_rank)
count = 0
L_joint = None
samples = []
while (count < config.num_samples):
size = min(config.shard_size, (config.num_samples - count))
((f, fnew), L_joint) = common.sample_joint(model.kernel, X, Xnew, num_samples=size, L=L_joint)
update_fns = cg_update(model.kernel, X, y, (f + model.mean_function(X)), tol=1e-06, diag=model.likelihood.variance, max_iter=config.num_cond, preconditioner=preconditioner)
samples.append((fnew + update_fns(Xnew)))
count += size
samples = tf.concat(samples, axis=0)
if (model.mean_function is not None):
samples += model.mean_function(Xnew)
return samples |
_model
def ssl_resnext101_32x16d(pretrained=True, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)
model.default_cfg = default_cfgs['ssl_resnext101_32x16d']
if pretrained:
load_pretrained(model, num_classes=kwargs.get('num_classes', 0), in_chans=kwargs.get('in_chans', 3))
return model |
def test_eval_empty_globals():
assert ('__builtins__' in m.eval_empty_globals(None))
g = {}
assert ('__builtins__' in m.eval_empty_globals(g))
assert ('__builtins__' in g) |
class QuantMeasure(nn.Module):
def __init__(self, num_bits=8, shape_measure=(1,), flatten_dims=_DEFAULT_FLATTEN, inplace=False, dequantize=True, stochastic=False, momentum=0.1, measure=False, per_ch_input=False, reduce_dim=0, cal_qparams=False):
super(QuantMeasure, self).__init__()
self.register_buffer('running_zero_point', torch.zeros(*shape_measure))
self.register_buffer('running_range', torch.zeros(*shape_measure))
self.measure = measure
if self.measure:
self.register_buffer('num_measured', torch.zeros(1))
self.flatten_dims = flatten_dims
self.momentum = momentum
self.dequantize = dequantize
self.stochastic = stochastic
self.inplace = inplace
self.num_bits = num_bits
self.per_ch_input = per_ch_input
self.reduce_dim = reduce_dim
self.cal_qparams = cal_qparams
def forward(self, input, qparams=None):
if (self.training or self.measure):
if (qparams is None):
if self.cal_qparams:
init = np.array([tensor_range(input, pcq=False).item(), zero_point(input, pcq=False).item()])
res = opt.minimize((lambda p: quant_err(p, input, num_bits=self.num_bits, metric='mse')), init, method=methods[0])
qparams = QParams(range=input.new_tensor(res.x[0]), zero_point=input.new_tensor(res.x[1]), num_bits=self.num_bits)
print('Measure and optimize: bits - {}, error before - {:.6f}, error after {:.6f}'.format(self.num_bits, quant_err(init, input), res.fun))
else:
reduce_dim = (None if self.per_ch_input else self.reduce_dim)
if ((input.dim() == 3) and (reduce_dim == 0)):
reduce_dim = [self.reduce_dim, 1]
qparams = calculate_qparams(input, num_bits=self.num_bits, flatten_dims=self.flatten_dims, reduce_dim=reduce_dim, per_ch_input=self.per_ch_input)
with torch.no_grad():
if self.measure:
momentum = (self.num_measured / (self.num_measured + 1))
self.num_measured += 1
else:
momentum = self.momentum
self.running_zero_point.mul_(momentum).add_((qparams.zero_point * (1 - momentum)))
self.running_range.mul_(momentum).add_((qparams.range * (1 - momentum)))
else:
qparams = QParams(range=self.running_range, zero_point=self.running_zero_point, num_bits=self.num_bits)
if self.measure:
return input
else:
if self.per_ch_input:
input = input.transpose(0, 1)
q_input = quantize(input, qparams=qparams, dequantize=self.dequantize, stochastic=self.stochastic, inplace=self.inplace)
if self.per_ch_input:
q_input = q_input.transpose(0, 1)
return q_input |
def mobilenetv2_100(pretrained=False, **kwargs):
model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs)
return model |
def _get_possible_module_path(paths):
ret = []
for p in paths:
p = Path(p)
for path in p.glob('*'):
if ((path.suffix in ['py', '.so']) or path.is_dir()):
if path.stem.isidentifier():
ret.append(path)
return ret |
class AutoModelForSeq2SeqLMWithValueHead(PreTrainedModelWrapper):
transformers_parent_class = AutoModelForSeq2SeqLM
lm_head_namings = ['lm_head', 'embed_out', 'output_projection']
supported_args = ('summary_dropout_prob', 'v_head_initializer_range', 'v_head_init_strategy')
def __init__(self, pretrained_model, **kwargs):
super().__init__(pretrained_model)
(v_head_kwargs, _, _) = self._split_kwargs(kwargs)
self.is_encoder_decoder = True
if (not self._has_lm_head()):
raise ValueError('The model does not have a language model head, please use a model that has one.')
self.v_head = ValueHead(self.pretrained_model.config, **v_head_kwargs)
self._init_weights(**v_head_kwargs)
def _has_lm_head(self):
for (name, module) in self.pretrained_model.named_modules():
if any(((attribute in name) for attribute in self.lm_head_namings)):
return True
return False
def post_init(self, state_dict):
for k in list(state_dict.keys()):
if ('v_head.' in k):
state_dict[k.replace('v_head.', '')] = state_dict.pop(k)
self.v_head.load_state_dict(state_dict, strict=False)
del state_dict
if hasattr(self.pretrained_model, 'hf_device_map'):
if (('cpu' in self.pretrained_model.hf_device_map.values()) or ('disk' in self.pretrained_model.hf_device_map.values())):
raise ValueError('The model is offloaded on CPU or disk - CPU & disk offloading is not supported for ValueHead models.')
for (name, module) in self.pretrained_model.named_modules():
if any(((attribute in name) for attribute in self.lm_head_namings)):
lm_head_device = module.weight.device
break
self.v_head = self.v_head.to(lm_head_device)
def set_device_hook(module, input, outputs):
new_output = ()
for output in outputs:
if isinstance(output, torch.Tensor):
new_output += (output.to(lm_head_device),)
else:
new_output += (output,)
return new_output
self.register_forward_hook(set_device_hook)
self.is_sequential_parallel = True
def state_dict(self, *args, **kwargs):
if (not self.is_peft_model):
pretrained_model_state_dict = self.pretrained_model.state_dict(*args, **kwargs)
else:
pretrained_model_state_dict = {}
v_head_state_dict = self.v_head.state_dict(*args, **kwargs)
for (k, v) in v_head_state_dict.items():
pretrained_model_state_dict[f'v_head.{k}'] = v
return pretrained_model_state_dict
def push_to_hub(self, *args, **kwargs):
setattr(self.pretrained_model, 'v_head', self.v_head)
return self.pretrained_model.push_to_hub(*args, **kwargs)
def _init_weights(self, **kwargs):
initializer_range = kwargs.pop('v_head_initializer_range', 0.2)
init_strategy = kwargs.pop('v_head_init_strategy', None)
if (init_strategy is None):
pass
elif (init_strategy == 'normal'):
self.v_head.summary.weight.data.normal_(mean=0.0, std=initializer_range)
self.v_head.summary.bias.data.zero_()
def forward(self, input_ids=None, past_key_values=None, attention_mask=None, **kwargs):
kwargs['past_key_values'] = past_key_values
if (self.is_peft_model and (self.pretrained_model.active_peft_config.peft_type == 'PREFIX_TUNING')):
kwargs.pop('past_key_values')
base_model_output = self.pretrained_model(input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, **kwargs)
last_hidden_state = base_model_output.decoder_hidden_states[(- 1)]
lm_logits = base_model_output.logits
loss = base_model_output.loss
value = self.v_head(last_hidden_state).squeeze((- 1))
if (lm_logits.dtype != torch.float32):
lm_logits = lm_logits.float()
return (lm_logits, loss, value)
def generate(self, *args, **kwargs):
return self.pretrained_model.generate(*args, **kwargs) |
class E_senet(nn.Module):
def __init__(self, original_model, num_features=2048):
super(E_senet, self).__init__()
self.base = nn.Sequential(*list(original_model.children())[:(- 3)])
def forward(self, x):
x_block0 = nn.Sequential(*list(self.base[0].children())[:(- 1)])(x)
x0 = self.base[0](x)
x_block1 = self.base[1](x0)
x_block2 = self.base[2](x_block1)
x_block3 = self.base[3](x_block2)
x_block4 = self.base[4](x_block3)
feature_pyramid = [x_block0, x_block1, x_block2, x_block3, x_block4]
return feature_pyramid |
def activate_user(trace):
conn = getDb()
with closing(conn.cursor()) as cur:
sql = 'update users set inactive = 0 where activate_trace = %s'
cur.execute(sql, (trace,))
conn.commit()
return (cur.rowcount == 1) |
class ResidualBlock(nn.Module):
def __init__(self, in_features=64, norm_layer=nn.BatchNorm2d):
super(ResidualBlock, self).__init__()
self.relu = nn.ReLU(True)
if (norm_layer == None):
self.block = nn.Sequential(nn.Conv2d(in_features, in_features, 3, 1, 1, bias=False), nn.ReLU(inplace=True), nn.Conv2d(in_features, in_features, 3, 1, 1, bias=False))
else:
self.block = nn.Sequential(nn.Conv2d(in_features, in_features, 3, 1, 1, bias=False), norm_layer(in_features), nn.ReLU(inplace=True), nn.Conv2d(in_features, in_features, 3, 1, 1, bias=False), norm_layer(in_features))
def forward(self, x):
residual = x
out = self.block(x)
out += residual
out = self.relu(out)
return out |
def _erase_attention(feature, attention, drop_threshold):
(b, _, h, w) = attention.size()
pos = torch.ge(attention, drop_threshold)
mask = attention.new_ones((b, 1, h, w))
mask[pos.data] = 0.0
erased_feature = (feature * mask)
return erased_feature |
def norm2d(planes, num_channels_per_group=32):
print('num_channels_per_group:{}'.format(num_channels_per_group))
if (num_channels_per_group > 0):
return GroupNorm2d(planes, num_channels_per_group, affine=True, track_running_stats=False)
else:
return nn.BatchNorm2d(planes) |
_UTILS.register_module()
class UniformAssigner(BaseAssigner):
def __init__(self, pos_ignore_thr: float, neg_ignore_thr: float, match_times: int=4, iou_calculator: ConfigType=dict(type='BboxOverlaps2D')):
self.match_times = match_times
self.pos_ignore_thr = pos_ignore_thr
self.neg_ignore_thr = neg_ignore_thr
self.iou_calculator = TASK_UTILS.build(iou_calculator)
def assign(self, pred_instances: InstanceData, gt_instances: InstanceData, gt_instances_ignore: Optional[InstanceData]=None) -> AssignResult:
gt_bboxes = gt_instances.bboxes
gt_labels = gt_instances.labels
priors = pred_instances.priors
bbox_pred = pred_instances.decoder_priors
(num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0))
assigned_gt_inds = bbox_pred.new_full((num_bboxes,), 0, dtype=torch.long)
assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long)
if ((num_gts == 0) or (num_bboxes == 0)):
if (num_gts == 0):
assigned_gt_inds[:] = 0
assign_result = AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
assign_result.set_extra_property('pos_idx', bbox_pred.new_empty(0, dtype=torch.bool))
assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4)))
assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4)))
return assign_result
cost_bbox = torch.cdist(bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1)
cost_bbox_priors = torch.cdist(bbox_xyxy_to_cxcywh(priors), bbox_xyxy_to_cxcywh(gt_bboxes), p=1)
C = cost_bbox.cpu()
C1 = cost_bbox_priors.cpu()
index = torch.topk(C, k=self.match_times, dim=0, largest=False)[1]
index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1]
indexes = torch.cat((index, index1), dim=1).reshape((- 1)).to(bbox_pred.device)
pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes)
anchor_overlaps = self.iou_calculator(priors, gt_bboxes)
(pred_max_overlaps, _) = pred_overlaps.max(dim=1)
(anchor_max_overlaps, _) = anchor_overlaps.max(dim=0)
ignore_idx = (pred_max_overlaps > self.neg_ignore_thr)
assigned_gt_inds[ignore_idx] = (- 1)
pos_gt_index = torch.arange(0, C1.size(1), device=bbox_pred.device).repeat((self.match_times * 2))
pos_ious = anchor_overlaps[(indexes, pos_gt_index)]
pos_ignore_idx = (pos_ious < self.pos_ignore_thr)
pos_gt_index_with_ignore = (pos_gt_index + 1)
pos_gt_index_with_ignore[pos_ignore_idx] = (- 1)
assigned_gt_inds[indexes] = pos_gt_index_with_ignore
if (gt_labels is not None):
assigned_labels = assigned_gt_inds.new_full((num_bboxes,), (- 1))
pos_inds = torch.nonzero((assigned_gt_inds > 0), as_tuple=False).squeeze()
if (pos_inds.numel() > 0):
assigned_labels[pos_inds] = gt_labels[(assigned_gt_inds[pos_inds] - 1)]
else:
assigned_labels = None
assign_result = AssignResult(num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels)
assign_result.set_extra_property('pos_idx', (~ pos_ignore_idx))
assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes])
assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index])
return assign_result |
def filter_instances(ds, instance_tokens):
filtered_tokens = []
for inst_token in instance_tokens:
instance = ds.get('instance', inst_token)
agent = ds.get('agent', instance['agent_token'])
if (agent['type'] not in {'Pedestrian', 'Undefined'}):
try:
if (ds.get_inst_mode(inst_token) != 'incoming'):
continue
filtered_tokens.append(inst_token)
except Exception as err:
print(('\nError occured for instance %s' % inst_token))
traceback.print_exc()
return filtered_tokens |
class MetaConv2d(MetaModule):
def __init__(self, *args, **kwargs):
super().__init__()
ignore = nn.Conv2d(*args, **kwargs)
self.stride = ignore.stride
self.padding = ignore.padding
self.dilation = ignore.dilation
self.groups = ignore.groups
self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))
if (ignore.bias is not None):
self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))
else:
self.register_buffer('bias', None)
def forward(self, x):
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def named_leaves(self):
return [('weight', self.weight), ('bias', self.bias)] |
_module()
class NumClassCheckHook(Hook):
def _check_head(self, runner):
model = runner.model
dataset = runner.data_loader.dataset
if (dataset.CLASSES is None):
runner.logger.warning(f'Please set `CLASSES` in the {dataset.__class__.__name__} andcheck if it is consistent with the `num_classes` of head')
else:
for (name, module) in model.named_modules():
if (hasattr(module, 'num_classes') and (not isinstance(module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)))):
assert (module.num_classes == len(dataset.CLASSES)), f'The `num_classes` ({module.num_classes}) in {module.__class__.__name__} of {model.__class__.__name__} does not matches the length of `CLASSES` {len(dataset.CLASSES)}) in {dataset.__class__.__name__}'
def before_train_epoch(self, runner):
self._check_head(runner)
def before_val_epoch(self, runner):
self._check_head(runner) |
class Transformer_16(nn.Module):
def __init__(self):
super(Transformer_16, self).__init__()
self.name = 'Transformer_16'
self.lr = 0.0001
self.n_hosts = 16
feats = (3 * self.n_hosts)
self.n_feats = (3 * self.n_hosts)
self.n_window = 3
self.n_latent = 10
self.n_hidden = 16
self.n = ((self.n_window * self.n_feats) + (self.n_hosts * self.n_hosts))
src_ids = torch.tensor(list(range(self.n_feats)))
dst_ids = torch.tensor(([self.n_feats] * self.n_feats))
self.gat = GAT(dgl.graph((src_ids, dst_ids)), self.n_window, self.n_window)
self.time_encoder = nn.Sequential(nn.Linear(feats, ((feats * 2) + 1)))
self.pos_encoder = PositionalEncoding(((feats * 2) + 1), 0.1, self.n_window)
encoder_layers = TransformerEncoderLayer(d_model=((feats * 2) + 1), nhead=1, dropout=0.1)
self.encoder = TransformerEncoder(encoder_layers, 1)
a_decoder_layers = TransformerDecoderLayer(d_model=((feats * 2) + 1), nhead=1, dropout=0.1)
self.anomaly_decoder = TransformerDecoder(a_decoder_layers, 1)
self.anomaly_decoder2 = nn.Sequential(nn.Linear(((((feats * 2) + 1) * self.n_window) * self.n_window), (2 * self.n_hosts)))
self.softm = nn.Softmax(dim=1)
p_decoder_layers = TransformerDecoderLayer(d_model=((feats * 2) + 1), nhead=1, dropout=0.1)
self.prototype_decoder = TransformerDecoder(p_decoder_layers, 1)
self.prototype_decoder2 = nn.Sequential(nn.Linear(((((feats * 2) + 1) * self.n_window) * self.n_window), (PROTO_DIM * self.n_hosts)))
self.prototype = [torch.rand(PROTO_DIM, requires_grad=False, dtype=torch.double) for _ in range(3)]
def encode(self, t, s):
t = torch.squeeze(t, 1)
graph = torch.cat((t, torch.zeros(self.n_window, 1)), dim=1)
gat_t = self.gat(torch.t(graph))
gat_t = torch.t(gat_t)
o = torch.cat((t, gat_t), dim=1)
t = (o * math.sqrt(self.n_feats))
t = self.pos_encoder(t)
memory = self.encoder(t)
return memory
def anomaly_decode(self, t, memory):
anomaly_scores = self.anomaly_decoder(t, memory)
anomaly_scores = self.anomaly_decoder2(anomaly_scores.view((- 1))).view((- 1), 1, 2)
return anomaly_scores
def prototype_decode(self, t, memory):
prototypes = self.prototype_decoder(t, memory)
prototypes = self.prototype_decoder2(prototypes.view((- 1))).view((- 1), PROTO_DIM)
return prototypes
def forward(self, t, s):
encoded_t = self.time_encoder(t).unsqueeze(dim=1).expand((- 1), self.n_window, (- 1))
t = t.unsqueeze(dim=1)
memory = self.encode(t, s)
anomaly_scores = self.anomaly_decode(encoded_t, memory)
prototypes = self.prototype_decode(encoded_t, memory)
return (anomaly_scores, prototypes) |
class Sampler(abc.ABC):
def start_worker(self):
def obtain_samples(self, itr, batch_size, whole_paths):
def shutdown_worker(self): |
def load_img_info(files, split):
assert isinstance(files, tuple)
assert isinstance(split, str)
(img_file, gt_file) = files
img = mmcv.imread(img_file, 'unchanged')
split_name = osp.basename(osp.dirname(img_file))
img_info = dict(file_name=osp.join(split_name, osp.basename(img_file)), height=img.shape[0], width=img.shape[1], segm_file=osp.join(split_name, osp.basename(gt_file)))
if (split == 'training'):
img_info = load_xml_info(gt_file, img_info)
elif (split == 'test'):
img_info = load_txt_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info |
def DefineActions():
actions = []
for ii in range(len(PossiblePath)):
PosA = PossiblePath[ii][0]
PosB = PossiblePath[ii][1]
actions.append((((('BS(XXX,' + PosA) + ',') + PosB) + ')'))
actions.append((((('LI(XXX,' + PosA) + ',') + PosB) + ')'))
for ii in range(6):
Pos = PossiblePathNum[ii]
actions.append((('Reflection(XXX,' + Pos) + ')'))
actions.append((('DP(XXX,' + Pos) + ')'))
nHOM = 5
HOM_list = list(range((- nHOM), (nHOM + 1)))
HOM_list.remove(0)
for ii in range(1, (len(HOM_list) + 1)):
actions.append((((('OAMHolo(XXX,' + Pos) + ',') + str(ii)) + ')'))
return actions |
class WriteTSV(ResultWriter):
extension: str = 'tsv'
def write_result(self, result: dict, file: TextIO, options: dict):
print('start', 'end', 'text', sep='\t', file=file)
for segment in result['segments']:
print(round((1000 * segment['start'])), file=file, end='\t')
print(round((1000 * segment['end'])), file=file, end='\t')
print(segment['text'].strip().replace('\t', ' '), file=file, flush=True) |
def cli_main():
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
if (args.distributed_init_method is None):
distributed_utils.infer_init_method(args)
if (args.distributed_init_method is not None):
if ((torch.cuda.device_count() > 1) and (not args.distributed_no_spawn)):
start_rank = args.distributed_rank
args.distributed_rank = None
torch.multiprocessing.spawn(fn=distributed_main, args=(args, start_rank), nprocs=torch.cuda.device_count())
else:
distributed_main(args.device_id, args)
elif (args.distributed_world_size > 1):
assert (args.distributed_world_size <= torch.cuda.device_count())
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None
if ((max(args.update_freq) > 1) and (args.ddp_backend != 'no_c10d')):
print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
torch.multiprocessing.spawn(fn=distributed_main, args=(args,), nprocs=args.distributed_world_size)
else:
main(args) |
class DatasetFolder(data.Dataset):
def __init__(self, root, loader, extensions, transform=None, target_transform=None):
(classes, class_to_idx) = self._find_classes(root)
samples = make_dataset(root, class_to_idx, extensions)
if (len(samples) == 0):
raise RuntimeError(((('Found 0 files in subfolders of: ' + root) + '\nSupported extensions are: ') + ','.join(extensions)))
self.root = root
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
self.transform = transform
self.target_transform = target_transform
def _find_classes(self, dir):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return (classes, class_to_idx)
def __getitem__(self, index):
(path, target) = self.samples[index]
sample = self.loader(path)
if (self.transform is not None):
sample = self.transform(sample)
if (self.target_transform is not None):
target = self.target_transform(target)
return (sample, target)
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
class CatBoostEncoderTransformer(AutotabularPreprocessingAlgorithm):
def __init__(self, cols=None, random_state: Optional[np.random.RandomState]=None):
self.cols = cols
self.random_state = random_state
def fit(self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE]=None) -> 'CatBoostEncoderTransformer':
self.preprocessor = CBEncoder(cols=self.cols)
self.preprocessor.fit(X, y)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if (self.preprocessor is None):
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'CatBoostEncoderTransformer', 'name': 'CatBoostEncoder Transformer', 'handles_regression': False, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'handles_sparse': True, 'handles_dense': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,)}
def get_hyperparameter_search_space(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> ConfigurationSpace:
return ConfigurationSpace() |
class SpatialAttentionBlock3d(nn.Module):
def __init__(self, in_channels):
super(SpatialAttentionBlock3d, self).__init__()
self.query = nn.Conv3d(in_channels, (in_channels // 8), kernel_size=(1, 3, 1), padding=(0, 1, 0))
self.key = nn.Conv3d(in_channels, (in_channels // 8), kernel_size=(3, 1, 1), padding=(1, 0, 0))
self.judge = nn.Conv3d(in_channels, (in_channels // 8), kernel_size=(1, 1, 3), padding=(0, 0, 1))
self.value = nn.Conv3d(in_channels, in_channels, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x):
(B, C, H, W, D) = x.size()
proj_query = self.query(x).view(B, (- 1), ((W * H) * D)).permute(0, 2, 1)
proj_key = self.key(x).view(B, (- 1), ((W * H) * D))
proj_judge = self.judge(x).view(B, (- 1), ((W * H) * D)).permute(0, 2, 1)
affinity1 = torch.matmul(proj_query, proj_key)
affinity2 = torch.matmul(proj_judge, proj_key)
affinity = torch.matmul(affinity1, affinity2)
affinity = self.softmax(affinity)
proj_value = self.value(x).view(B, (- 1), ((H * W) * D))
weights = torch.matmul(proj_value, affinity)
weights = weights.view(B, C, H, W, D)
out = ((self.gamma * weights) + x)
return out |
class Sign2TextTransformerEncoder(FairseqEncoder):
def __init__(self, cfg):
super().__init__(None)
self.encoder_freezing_updates = cfg.encoder_freezing_updates
self.num_updates = 0
self.dropout_module = FairseqDropout(p=cfg.dropout, module_name=self.__class__.__name__)
self.embed_scale = math.sqrt(cfg.encoder_embed_dim)
if cfg.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
self.subsample = Conv1dSubsampler(get_num_feats(SignFeatsType[cfg.feats_type], cfg.body_parts.split(','), cfg.feat_dims.split(',')), cfg.conv_channels, cfg.encoder_embed_dim, [int(k) for k in cfg.conv_kernel_sizes.split(',')], [int(k) for k in cfg.conv_strides.split(',')])
self.embed_positions = PositionalEmbedding(cfg.max_source_positions, cfg.encoder_embed_dim, self.padding_idx)
self.transformer_layers = nn.ModuleList([TransformerEncoderLayer(cfg) for _ in range(cfg.encoder_layers)])
if cfg.encoder_normalize_before:
self.layer_norm = LayerNorm(cfg.encoder_embed_dim)
else:
self.layer_norm = None
def _forward(self, src_tokens, src_lengths, return_all_hiddens=False):
(x, input_lengths) = self.subsample(src_tokens, src_lengths)
x = (self.embed_scale * x)
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = self.dropout_module(x)
encoder_states = []
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if (self.layer_norm is not None):
x = self.layer_norm(x)
return {'encoder_out': [x], 'encoder_padding_mask': ([encoder_padding_mask] if encoder_padding_mask.any() else []), 'encoder_embedding': [], 'encoder_states': encoder_states, 'src_tokens': [], 'src_lengths': []}
def forward(self, src_tokens, src_lengths, return_all_hiddens=False):
if (self.num_updates < self.encoder_freezing_updates):
with torch.no_grad():
x = self._forward(src_tokens, src_lengths, return_all_hiddens=return_all_hiddens)
else:
x = self._forward(src_tokens, src_lengths, return_all_hiddens=return_all_hiddens)
return x
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = ([] if (len(encoder_out['encoder_out']) == 0) else [x.index_select(1, new_order) for x in encoder_out['encoder_out']])
new_encoder_padding_mask = ([] if (len(encoder_out['encoder_padding_mask']) == 0) else [x.index_select(0, new_order) for x in encoder_out['encoder_padding_mask']])
new_encoder_embedding = ([] if (len(encoder_out['encoder_embedding']) == 0) else [x.index_select(0, new_order) for x in encoder_out['encoder_embedding']])
encoder_states = encoder_out['encoder_states']
if (len(encoder_states) > 0):
for (idx, state) in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {'encoder_out': new_encoder_out, 'encoder_padding_mask': new_encoder_padding_mask, 'encoder_embedding': new_encoder_embedding, 'encoder_states': encoder_states, 'src_tokens': [], 'src_lengths': []}
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates |
class MinibatchLayer(lasagne.layers.Layer):
def __init__(self, incoming, num_kernels, dim_per_kernel=5, theta=lasagne.init.Normal(0.05), log_weight_scale=lasagne.init.Constant(0.0), b=lasagne.init.Constant((- 1.0)), **kwargs):
super(MinibatchLayer, self).__init__(incoming, **kwargs)
self.num_kernels = num_kernels
num_inputs = int(np.prod(self.input_shape[1:]))
self.theta = self.add_param(theta, (num_inputs, num_kernels, dim_per_kernel), name='theta')
self.log_weight_scale = self.add_param(log_weight_scale, (num_kernels, dim_per_kernel), name='log_weight_scale')
self.W = (self.theta * (T.exp(self.log_weight_scale) / T.sqrt(T.sum(T.square(self.theta), axis=0))).dimshuffle('x', 0, 1))
self.b = self.add_param(b, (num_kernels,), name='b')
def get_output_shape_for(self, input_shape):
return (input_shape[0], (np.prod(input_shape[1:]) + self.num_kernels))
def get_output_for(self, input, init=False, **kwargs):
if (input.ndim > 2):
input = input.flatten(2)
activation = T.tensordot(input, self.W, [[1], [0]])
abs_dif = (T.sum(abs((activation.dimshuffle(0, 1, 2, 'x') - activation.dimshuffle('x', 1, 2, 0))), axis=2) + (1000000.0 * T.eye(input.shape[0]).dimshuffle(0, 'x', 1)))
if init:
mean_min_abs_dif = (0.5 * T.mean(T.min(abs_dif, axis=2), axis=0))
abs_dif /= mean_min_abs_dif.dimshuffle('x', 0, 'x')
self.init_updates = [(self.log_weight_scale, (self.log_weight_scale - T.log(mean_min_abs_dif).dimshuffle(0, 'x')))]
f = T.sum(T.exp((- abs_dif)), axis=2)
if init:
mf = T.mean(f, axis=0)
f -= mf.dimshuffle('x', 0)
self.init_updates.append((self.b, (- mf)))
else:
f += self.b.dimshuffle('x', 0)
return T.concatenate([input, f], axis=1) |
def validate_pytorch_model(platform, device_type, model_file, input_file, mace_out_file, input_names, input_shapes, input_data_formats, output_names, output_shapes, output_data_formats, validation_threshold, input_data_types, output_data_types, log_file):
import torch
loaded_model = torch.jit.load(model_file)
pytorch_inputs = []
for i in range(len(input_names)):
input_value = load_data(common.formatted_file_name(input_file, input_names[i]), input_data_types[i])
input_value = input_value.reshape(input_shapes[i])
if ((input_data_formats[i] == common.DataFormat.NHWC) and (len(input_shapes[i]) == 4)):
input_value = input_value.transpose((0, 3, 1, 2))
input_value = torch.from_numpy(input_value)
pytorch_inputs.append(input_value)
with torch.no_grad():
pytorch_outputs = loaded_model(*pytorch_inputs)
if isinstance(pytorch_outputs, torch.Tensor):
pytorch_outputs = [pytorch_outputs]
elif (not isinstance(pytorch_outputs, (list, tuple))):
print('return type {} unsupported yet'.format(type(pytorch_outputs)))
sys.exit(1)
for i in range(len(output_names)):
value = pytorch_outputs[i].numpy()
output_file_name = common.formatted_file_name(mace_out_file, output_names[i])
mace_out_value = load_data(output_file_name, output_data_types[i])
(mace_out_value, real_output_shape, real_output_data_format) = get_real_out_value_shape_df(platform, mace_out_value, output_shapes[i], output_data_formats[i])
compare_output(platform, device_type, output_names[i], mace_out_value, value, validation_threshold, log_file, real_output_shape, real_output_data_format) |
def pdist_torch(emb1, emb2):
(m, n) = (emb1.shape[0], emb2.shape[0])
emb1_pow = torch.pow(emb1, 2).sum(dim=1, keepdim=True).expand(m, n)
emb2_pow = torch.pow(emb2, 2).sum(dim=1, keepdim=True).expand(n, m).t()
dist_mtx = (emb1_pow + emb2_pow)
dist_mtx = dist_mtx.addmm_(emb1, emb2.t(), beta=1, alpha=(- 2))
dist_mtx = dist_mtx.clamp(min=1e-12).sqrt()
return dist_mtx |
class MrpcProcessor(DataProcessor):
def get_train_examples(self, data_dir):
logger.info('LOOKING AT {}'.format(os.path.join(data_dir, 'train.tsv')))
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, i))
text_a = line[3]
text_b = line[4]
if (set_type == 'test'):
label = self.get_labels()[0]
else:
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def levi_hassner_bn(nlabels, images, pkeep, is_training):
batch_norm_params = {'is_training': is_training, 'trainable': True, 'decay': 0.9997, 'epsilon': 0.001, 'variables_collections': {'beta': None, 'gamma': None, 'moving_mean': ['moving_vars'], 'moving_variance': ['moving_vars']}}
weight_decay = 0.0005
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
with tf.variable_scope('LeviHassnerBN', 'LeviHassnerBN', [images]) as scope:
with tf.contrib.slim.arg_scope([convolution2d, fully_connected], weights_regularizer=weights_regularizer, biases_initializer=tf.constant_initializer(1.0), weights_initializer=tf.random_normal_initializer(stddev=0.005), trainable=True):
with tf.contrib.slim.arg_scope([convolution2d], weights_initializer=tf.random_normal_initializer(stddev=0.01), normalizer_fn=batch_norm, normalizer_params=batch_norm_params):
conv1 = convolution2d(images, 96, [7, 7], [4, 4], padding='VALID', biases_initializer=tf.constant_initializer(0.0), scope='conv1')
pool1 = max_pool2d(conv1, 3, 2, padding='VALID', scope='pool1')
conv2 = convolution2d(pool1, 256, [5, 5], [1, 1], padding='SAME', scope='conv2')
pool2 = max_pool2d(conv2, 3, 2, padding='VALID', scope='pool2')
conv3 = convolution2d(pool2, 384, [3, 3], [1, 1], padding='SAME', biases_initializer=tf.constant_initializer(0.0), scope='conv3')
pool3 = max_pool2d(conv3, 3, 2, padding='VALID', scope='pool3')
flat = tf.reshape(pool3, [(- 1), ((384 * 6) * 6)], name='reshape')
full1 = fully_connected(flat, 512, scope='full1')
drop1 = tf.nn.dropout(full1, pkeep, name='drop1')
full2 = fully_connected(drop1, 512, scope='full2')
drop2 = tf.nn.dropout(full2, pkeep, name='drop2')
with tf.variable_scope('output') as scope:
weights = tf.Variable(tf.random_normal([512, nlabels], mean=0.0, stddev=0.01), name='weights')
biases = tf.Variable(tf.constant(0.0, shape=[nlabels], dtype=tf.float32), name='biases')
output = tf.add(tf.matmul(drop2, weights), biases, name=scope.name)
return output |
def train_calibration(calibration_loader, model_rgb, model_depth, model_discriminator, model_estimator, optimizer_dis, optimizer_estimator, epoch, key_min):
model_rgb.eval()
model_depth.eval()
model_discriminator.train()
model_estimator.train()
for (i, pack) in enumerate(tqdm(calibration_loader), start=1):
iteration = (i + (epoch * len(calibration_loader)))
optimizer_dis.zero_grad()
optimizer_estimator.zero_grad()
(images, gts, depths, name) = pack
images = Variable(images)
gts = Variable(gts)
depths = Variable(depths)
cuda = torch.cuda.is_available()
if cuda:
images = images.cuda()
gts = gts.cuda()
depths = depths.cuda()
'~~~Your Framework~~~'
with torch.no_grad():
if (epoch == opt.calib_flag):
if opt.is_ResNet:
model_rgb.load_state_dict(torch.load((('./ckpt/DCF_Resnet/' + 'DCF_rgb.pth') + key_min)))
model_depth.load_state_dict(torch.load((('./ckpt/DCF_Resnet/' + 'DCF_depth.pth') + key_min)))
else:
model_rgb.load_state_dict(torch.load((('./ckpt/DCF_VGG/' + 'DCF_rgb.pth') + key_min)))
model_depth.load_state_dict(torch.load((('./ckpt/DCF_VGG/' + 'DCF_depth.pth') + key_min)))
if cuda:
model_rgb.cuda()
(_, _, x3_r, x4_r, x5_r) = model_rgb(images)
(x3_, x4_, x5_) = (x3_r.detach(), x4_r.detach(), x5_r.detach())
pred_depth = model_estimator(images, x3_, x4_, x5_)
loss_dep = smo_L1(pred_depth, depths)
loss_dep.backward()
clip_gradient(optimizer_estimator, opt.clip)
optimizer_estimator.step()
score = model_discriminator(depths)
loss_dis = cross_E(score, gts.squeeze())
loss_dis.backward()
clip_gradient(optimizer_dis, opt.clip)
optimizer_dis.step()
if ((i % 20) == 0):
print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], Loss_dis: {:.4f} Loss_estimator: {:0.4f}'.format(datetime.now(), epoch, opt.epoch, i, total_step, loss_dis.data, loss_dep.data))
writer.add_scalar('Loss/discriminator', loss_dis.item(), iteration)
writer.add_scalar('Loss/estimator', loss_dep.item(), iteration)
writer.add_images('Results/pred_depth', pred_depth, iteration)
if opt.is_ResNet:
save_path = 'ckpt/DCF_Resnet/'
else:
save_path = 'ckpt/DCF_VGG/'
if (not os.path.exists(save_path)):
os.makedirs(save_path)
if (((epoch + 1) % 5) == 0):
torch.save(model_discriminator.state_dict(), ((save_path + 'DCF_dis.pth') + ('.%d' % (epoch + 1))))
torch.save(model_estimator.state_dict(), ((save_path + 'DCF_estimator.pth') + ('.%d' % (epoch + 1))))
torch.save(model_rgb.state_dict(), ((save_path + 'DCF_rgb.pth') + ('.%d' % (epoch + 1))))
torch.save(model_depth.state_dict(), ((save_path + 'DCF_depth.pth') + ('.%d' % (epoch + 1)))) |
class BatchSamplerImagesSameLength(object):
def __init__(self, dataset, batch_size):
assert ((type(dataset) == CocoCaptionsIndexedImage) or (type(dataset) == CocoCaptionsIndexedImageDistill))
self.img2bpes = dataset.img2bpes
self.bpes = dataset.bpes
lengths = []
img_keys = self.img2bpes.keys()
for i in img_keys:
length_i = []
for bpe_i in self.img2bpes[i]:
length_i.append(len(self.bpes[bpe_i].split()))
lengths.append(int(np.mean(np.array(length_i))))
self.minibatches = minibatch_same_length(lengths, batch_size)
random.shuffle(self.minibatches)
def __iter__(self):
for i in range(len(self.minibatches)):
minibatch = self.minibatches[i]
(yield minibatch)
def __len__(self):
return len(self.minibatches) |
def logging_level(level: int):
_initial = getLoggingLevel()
setLoggingLevel(level)
try:
(yield)
finally:
setLoggingLevel(_initial) |
def number_literal(number):
x_str = str(number)
if (x_str in number_mappings):
return number_mappings[x_str]
x_str_left = x_str[0]
x_str_right = x_str[1:].lstrip('0')
if (len(x_str) == 8):
x_str_left = x_str[0:2]
x_str_right = x_str[2:].lstrip('0')
if (x_str_right != ''):
return ((number_literal(x_str_left) + 'millionen') + number_literal(x_str_right))
else:
return (number_literal(x_str_left) + 'millionen')
if (len(x_str) == 7):
x_str_left = x_str[0]
x_str_right = x_str[1:].lstrip('0')
if (x_str_right != ''):
return ((number_literal(x_str_left) + 'millionen') + number_literal(x_str_right))
else:
return (number_literal(x_str_left) + 'millionen')
if (len(x_str) == 6):
x_str_left = x_str[0:3]
x_str_right = x_str[3:].lstrip('0')
if (x_str_right != ''):
return ((number_literal(x_str_left) + 'tausend') + number_literal(x_str_right))
else:
return (number_literal(x_str_left) + 'tausend')
if (len(x_str) == 5):
x_str_left = x_str[0:2]
x_str_right = x_str[2:].lstrip('0')
if (x_str_right != ''):
return ((number_literal(x_str_left) + 'tausend') + number_literal(x_str_right))
else:
return (number_literal(x_str_left) + 'tausend')
if (len(x_str) == 4):
if (x_str_right != ''):
if ((int(number) >= 1200) and (int(number) < 2000)):
decade = x_str[2:].lstrip('0')
if (decade != ''):
return ((number_literal(x_str[0:2]) + 'hundert') + number_literal(x_str[2:].lstrip('0')))
else:
return (number_literal(x_str[0:2]) + 'hundert')
else:
return ((number_literal(x_str_left) + 'tausend') + number_literal(x_str_right))
else:
return (number_literal(x_str_left) + 'tausend')
if (len(x_str) == 3):
if (x_str_right != ''):
return ((number_literal(x_str_left) + 'hundert') + number_literal(x_str_right))
else:
return (number_literal(x_str_left) + 'hundert')
if (len(x_str) == 2):
if (x_str_right != ''):
return ((number_literal(x_str_right) + 'und') + number_literal((x_str_left + '0')))
else:
return (number_literal(x_str_left) + 'zig') |
def _split_data(x, y, k_idx, k, perm_indices):
assert (k > 0)
assert (k_idx >= 0)
assert (k_idx < k)
N = len(x)
partition_size = int(ceil((N / k)))
minority_start = (k_idx * partition_size)
minority_end = (minority_start + partition_size)
minority_indices = perm_indices[minority_start:minority_end]
majority_indices = np.append(perm_indices[0:minority_start], perm_indices[minority_end:])
assert np.array_equal(np.sort(np.append(minority_indices, majority_indices)), np.array(range(N)))
x_majority = [x[i] for i in majority_indices]
y_majority = [y[i] for i in majority_indices]
x_minority = [x[i] for i in minority_indices]
y_minority = [y[i] for i in minority_indices]
return ((x_majority, y_majority), (x_minority, y_minority)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.