code stringlengths 101 5.91M |
|---|
class StableDiffusionControlNetPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
def test_actionset_from_uspto_line():
change_str = '11-14;11-13'
expected = {11, 13, 14}
actual = uspto_data.actionset_from_uspto_line(change_str)
assert (actual == expected) |
def create_mobilenetv2_ssd_lite_predictor(net, candidate_size=200, nms_method=None, sigma=0.5, device=torch.device('cpu')):
predictor = Predictor(net, config.image_size, config.image_mean, config.image_std, nms_method=nms_method, iou_threshold=config.iou_threshold, candidate_size=candidate_size, sigma=sigma, device=device)
return predictor |
def convert_torchvision_ckpt_to_detectron2(ckpt_path) -> Dict[(str, Any)]:
assert os.path.exists(ckpt_path)
input_state_dict = torch.load(ckpt_path)
DETECTRON2_RENAME_MAPPING: Dict[(str, str)] = {'layer1': 'res2', 'layer2': 'res3', 'layer3': 'res4', 'layer4': 'res5', 'bn1': 'conv1.norm', 'bn2': 'conv2.norm', 'bn3': 'conv3.norm', 'downsample.0': 'shortcut', 'downsample.1': 'shortcut.norm'}
d2_backbone_dict: Dict[(str, torch.Tensor)] = {}
for (name, param) in input_state_dict.items():
for (old, new) in DETECTRON2_RENAME_MAPPING.items():
name = name.replace(old, new)
if (not name.startswith('res')):
name = f'stem.{name}'
d2_backbone_dict[name] = param
return {'model': d2_backbone_dict, '__author__': 'Karan Desai', 'matching_heuristics': True} |
def main():
args = get_args()
args.factor = (1.0 + (args.factor / 100.0))
if (not os.path.exists(args.dir)):
os.makedirs(args.dir)
utterances = read_kaldi_datadir(args.srcdir)
(start_dur, end_dur) = find_duration_range(utterances, args.coverage_factor)
logger.info('Durations in the range [{},{}] will be covered. Coverage rate: {}%'.format(start_dur, end_dur, (100.0 - (args.coverage_factor * 2))))
logger.info('There will be {} unique allowed lengths for the utterances.'.format(int((math.log((end_dur / start_dur)) / math.log(args.factor)))))
allowed_durations = find_allowed_durations(start_dur, end_dur, args)
perturbed_utterances = perturb_utterances(utterances, allowed_durations, args)
generate_kaldi_data_files(perturbed_utterances, args.dir) |
def draw_worm(frame, skel, width):
body_color = np.linspace(WORM_BODY_COLOR_HEAD, WORM_BODY_COLOR_TAIL, (len(skel) - 1))
for (i, (pt1, pt2)) in enumerate(zip(skel[1:], skel[:(- 1)])):
cv2.line(frame, pt1=tuple(pt1.astype(int)), pt2=tuple(pt2.astype(int)), color=body_color[i], thickness=int(width)) |
class RejectionLogFromTable(RejectionLog):
def __init__(self, file):
super().__init__(file)
self.cols = None
self.names = None
self.comments = None
def initialize_rejection_log(self, forest):
self.cols = [[], []]
self.names = ['FOREST_SIZE', 'REJECTION_STATUS']
self.comments = ['num pixels in forest', 'rejection status']
for item in forest.get_header():
self.cols.append([])
self.names.append(item.get('name'))
self.comments.append(item.get('comment'))
self.initialized = True
def add_to_rejection_log(self, forest, rejection_status):
if (not self.initialized):
self.initialize_rejection_log(forest)
header = forest.get_header()
size = forest.flux.size
for (col, name) in zip(self.cols, self.names):
if (name == 'FOREST_SIZE'):
col.append(size)
elif (name == 'REJECTION_STATUS'):
col.append(rejection_status)
else:
for item in header:
if (item.get('name') == name):
col.append(item.get('value'))
break
def save_rejection_log(self):
rejection_log = fitsio.FITS(self.file, 'rw', clobber=True)
rejection_log.write([np.array(item) for item in self.cols], names=self.names, comment=self.comments, extname='rejection_log')
rejection_log.close() |
def test_subscript_is_live():
(live, dead) = compute_live_dead_symbol_refs('foo[bar] = baz')
assert (live == {'foo', 'bar', 'baz'}) |
class ASPP_module(nn.Module):
def __init__(self, inplanes, planes, dilation):
super(ASPP_module, self).__init__()
if (dilation == 1):
kernel_size = 1
padding = 0
else:
kernel_size = 3
padding = dilation
self.atrous_convolution = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = BatchNorm2d(planes)
self.relu = nn.ReLU()
self._init_weight()
def forward(self, x):
x = self.atrous_convolution(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
class VanMlpLayer(nn.Module):
def __init__(self, in_channels: int, hidden_size: int, out_channels: int, hidden_act: str='gelu', dropout_rate: float=0.5):
super().__init__()
self.in_dense = nn.Conv2d(in_channels, hidden_size, kernel_size=1)
self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, groups=hidden_size)
self.activation = ACT2FN[hidden_act]
self.dropout1 = nn.Dropout(dropout_rate)
self.out_dense = nn.Conv2d(hidden_size, out_channels, kernel_size=1)
self.dropout2 = nn.Dropout(dropout_rate)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
hidden_state = self.in_dense(hidden_state)
hidden_state = self.depth_wise(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.dropout1(hidden_state)
hidden_state = self.out_dense(hidden_state)
hidden_state = self.dropout2(hidden_state)
return hidden_state |
def load_units(in_file):
out = {}
with open(in_file) as f:
for line in f:
(sample_id, units) = line.strip().split('|', 1)
out[sample_id] = units.split()
return out |
class FileStorageObserverWithExUuid(FileStorageObserver):
UNUSED_VALUE = (- 1)
def started_event(self, ex_info, command, host_info, start_time, config, meta_info, _id):
_id = (config['uuid'] + '_metadata')
super().started_event(ex_info, command, host_info, start_time, config, meta_info, _id=_id)
def queued_event(self, ex_info, command, host_info, queue_time, config, meta_info, _id):
assert ('uuid' in config), "The config must contain a key 'uuid'"
_id = (config['uuid'] + '_metadata')
super().queued_event(ex_info, command, host_info, queue_time, config, meta_info, _id=_id) |
def preprocess_info(path=DATA_PATH, file=DATA_FILE, buys_file=DATA_FILE_BUYS, path_proc=DATA_PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH):
(data, buys) = load_data(path, file, buys_file)
data = filter_data(data, min_item_support, min_session_length) |
def get_scheduler(optimizer, n_iter_per_epoch, args):
if ('cosine' in args.lr_scheduler):
scheduler = CosineAnnealingLR(optimizer=optimizer, eta_min=1e-06, T_max=((args.epochs - args.warmup_epoch) * n_iter_per_epoch))
elif ('step' in args.lr_scheduler):
scheduler = MultiStepLR(optimizer=optimizer, gamma=args.lr_decay_rate, milestones=[((m - args.warmup_epoch) * n_iter_per_epoch) for m in args.lr_decay_epochs])
else:
raise NotImplementedError('scheduler {} not supported'.format(args.lr_scheduler))
scheduler = GradualWarmupScheduler(optimizer, multiplier=args.warmup_multiplier, after_scheduler=scheduler, warmup_epoch=(args.warmup_epoch * n_iter_per_epoch))
return scheduler |
def adaptive_catavgmax_pool2d(x, output_size=1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1) |
class SelectiveKernelAttn(nn.Module):
def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super(SelectiveKernelAttn, self).__init__()
self.num_paths = num_paths
self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False)
self.bn = norm_layer(attn_channels)
self.act = act_layer(inplace=True)
self.fc_select = nn.Conv2d(attn_channels, (channels * num_paths), kernel_size=1, bias=False)
def forward(self, x):
assert (x.shape[1] == self.num_paths)
x = x.sum(1).mean((2, 3), keepdim=True)
x = self.fc_reduce(x)
x = self.bn(x)
x = self.act(x)
x = self.fc_select(x)
(B, C, H, W) = x.shape
x = x.view(B, self.num_paths, (C // self.num_paths), H, W)
x = torch.softmax(x, dim=1)
return x |
.parametrize('metric, expected_value', [('map', 0.), ('ndcg', 0.75), ('jaccard', 0.6)])
def test_metric_is_not_1_for_incorrect(metric, expected_value):
(ground_truth, retrieved) = return_ground_incorrect_retrievals()
metric_val = mean_metric(ground_truth, retrieved, metric=metric)
assert (metric_val == expected_value) |
def plot_hist(title, experiment, n_tasks, fig_name):
max_step = 3000000
index = {}
scores = load_scores(experiment)
x = list(sorted(scores.keys()))
y = [[0 for _x in range(len(x))] for _t in range(n_tasks)]
for (ix, _x) in enumerate(x):
for k in scores[_x]:
if (k not in index):
index[k] = len(index)
i = index[k]
y[i][ix] = (scores[_x][k] / n_tasks)
labels = []
colors = []
for (k, v) in sorted(index.items(), key=(lambda x: x[1])):
labels.append(k)
colors.append((((np.asarray([0.5, 0.5, 1]) * TASK_LENS[k]) / MAX_TASK_LEN) * 0.9))
plt.stackplot(x, y, labels=labels, colors=colors, edgecolor=(1, 1, 1))
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.title(title, y=1.05)
plt.xlim([0, max_step])
plt.ylim([0, 1])
plt.xlabel('Episode', labelpad=10)
plt.ylabel('Avg.\\ reward', labelpad=10)
plt.tight_layout()
plt.gcf().subplots_adjust(right=0.8)
plt.savefig(fig_name)
plt.show() |
class HeadSelectionTransformerEncoderLayer(TransformerEncoderLayer):
def __init__(self, args, layer_idx, attn_head_selector=None):
super().__init__(args)
self.layer_idx = layer_idx
self.self_attn = self.build_self_attention_selection(self.embed_dim, args, attn_head_selector)
def build_self_attention_selection(self, embed_dim, args, attn_head_selector=None):
return MultiheadAttentionSelection(embed_dim, args.total_encoder_attention_heads, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, layer_idx=self.layer_idx, attn_head_selector=attn_head_selector) |
def imfrombytes(content, flag='color'):
img_np = np.frombuffer(content, np.uint8)
flag = (imread_flags[flag] if is_str(flag) else flag)
img = cv2.imdecode(img_np, flag)
return img |
def mdb():
torchutil.download.targz(' penn.DATA_DIR)
shutil.rmtree((penn.DATA_DIR / 'mdb'), ignore_errors=True)
shutil.move((penn.DATA_DIR / 'MDB-stem-synth'), (penn.DATA_DIR / 'mdb')) |
class EvaluationCallback(Callback):
def __init__(self, base_dir, vocab, topk=10, corpus_dir='', embedding_path='', metric='npmi', every=10):
super(EvaluationCallback, self).__init__()
self.base_dir = base_dir
self.vocab = vocab
self.topk = topk
self.corpus_dir = corpus_dir
self.every = every
self.cnt = 0
self.max_tc = 0
self.last_tc = 0
if (metric == 'wetc'):
assert os.path.exists(embedding_path), 'embedding file does not exists.'
self.embedding = np.load(embedding_path)
assert (len(self.embedding) == len(vocab))
metric = metric.lower()
assert (metric in ['npmi', 'wetc'])
self.metric = metric
def wetc(self, topics):
topics = torch_detach(topics)
idx = np.argsort(topics, axis=1)
tc = [wetc(self.embedding[idx[(- self.topk):]]) for idx in idx]
save_path = path.join(self.base_dir, 'wetc-topic-{}'.format(self.cnt))
tc_mean = np.mean(tc)
tc.append('mean {}'.format(tc_mean))
with open(save_path, 'w') as f:
json.dump(tc, f)
return tc_mean
def npmi(self, topics):
save_path = path.join(self.base_dir, 'topic-{}'.format(self.cnt))
save_topics(save_path, self.vocab, topics, self.topk, self.trainer.logger)
return evaluate_topic_coherence((save_path + '.topics'), self.corpus_dir, (save_path + '.res'), self.trainer.logger)
def evaluate_topic_coherence(self):
topics = self.trainer.trainer_batch.model.get_topics()
assert (topics.size(1) == len(self.vocab)), 'topics shape error, should be vocab size {}'.format(len(self.vocab))
if (self.metric == 'npmi'):
tc = self.npmi(topics)
else:
tc = self.wetc(topics)
self.max_tc = max(self.max_tc, tc)
self.last_tc = tc
self.trainer.summary_writer.add_scalar('topic_coherence', tc, self.cnt)
self.trainer.logger.info('topic coherence {}'.format(tc))
def on_epoch_end(self, epoch, logs=None):
self.cnt += 1
if ((epoch % self.every) == 0):
self.evaluate_topic_coherence()
def on_train_end(self, logs=None):
self.evaluate_topic_coherence()
def get_dict(self):
return {'max_topic_coherence': self.max_tc, 'last_topic_coherence': self.last_tc} |
def tree_norm(a):
return float(jnp.sqrt(sum([jnp.sum((p_a ** 2)) for p_a in jax.tree_util.tree_leaves(a)]))) |
def _get_module_by_path(module, path):
path = path.split('.')
for name in path:
module = getattr(module, name)
return module |
class MoveCarry(NamedTuple):
row: chex.Array
reward: float
target_idx: int
origin_idx: int
def target(self) -> chex.Numeric:
return self.row[self.target_idx]
def origin(self) -> chex.Numeric:
return self.row[self.origin_idx]
def update(self, update: MoveUpdate) -> 'MoveCarry':
row = self.row
row = row.at[self.target_idx].set(update.target)
row = row.at[self.origin_idx].set(update.origin)
return self._replace(row=row, reward=(self.reward + update.additional_reward), target_idx=update.target_idx, origin_idx=update.origin_idx) |
def remove_spectral_norm_conv(module, name='weight'):
for (k, hook) in module._forward_pre_hooks.items():
if (isinstance(hook, SpectralNormConv) and (hook.name == name)):
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module)) |
def get_preprocessing_params(encoder_name, pretrained='imagenet'):
settings = encoders[encoder_name]['pretrained_settings']
if (pretrained not in settings.keys()):
raise ValueError('Available pretrained options {}'.format(settings.keys()))
formatted_settings = {}
formatted_settings['input_space'] = settings[pretrained].get('input_space')
formatted_settings['input_range'] = settings[pretrained].get('input_range')
formatted_settings['mean'] = settings[pretrained].get('mean')
formatted_settings['std'] = settings[pretrained].get('std')
return formatted_settings |
def text_encoder_mlp_modules(text_encoder):
mlp_modules = []
if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
for (i, layer) in enumerate(text_encoder.text_model.encoder.layers):
mlp_mod = layer.mlp
name = f'text_model.encoder.layers.{i}.mlp'
mlp_modules.append((name, mlp_mod))
else:
raise ValueError(f'do not know how to get mlp modules for: {text_encoder.__class__.__name__}')
return mlp_modules |
.skipif((os.getenv('GITHUB_ACTIONS') == 'true'), reason='No way of testing this on Github actions.')
def test_conv1(cel):
example = [{'speaker': 'USER', 'utterance': "I think science fiction is an amazing genre for anything. Future science, technology, time travel, FTL travel, they're all such interesting concepts."}, {'speaker': 'SYSTEM', 'utterance': 'Awesome! I really love how sci-fi storytellers focus on political/social/philosophical issues that would still be around even in the future. Makes them relatable.'}, {'speaker': 'USER', 'utterance': 'I agree. One of my favorite forms of science fiction is anything related to time travel! I find it fascinating.'}]
result = cel.annotate(example)
assert isinstance(result, list)
expected_annotations = [[[8, 15, 'science fiction', 'Science_fiction'], [38, 5, 'genre', 'Genre_fiction'], [74, 10, 'technology', 'Technology'], [86, 11, 'time travel', 'Time_travel'], [99, 10, 'FTL travel', 'Faster-than-light']], [[37, 15, 'science fiction', 'Science_fiction'], [76, 11, 'time travel', 'Time_travel'], [16, 36, 'my favorite forms of science fiction', 'Time_travel']]]
annotations = [res['annotations'] for res in result if (res['speaker'] == 'USER')]
assert (annotations == expected_annotations) |
class actionAngleVertical(actionAngle):
def __init__(self, *args, **kwargs):
actionAngle.__init__(self, ro=kwargs.get('ro', None), vo=kwargs.get('vo', None))
if (not ('pot' in kwargs)):
raise OSError('Must specify pot= for actionAngleVertical')
if (not ('pot' in kwargs)):
raise OSError('Must specify pot= for actionAngleVertical')
self._pot = kwargs['pot']
return None
def _evaluate(self, *args, **kwargs):
if (len(args) == 2):
(x, vx) = args
if isinstance(x, float):
x = numpy.array([x])
vx = numpy.array([vx])
J = numpy.empty(len(x))
for ii in range(len(x)):
E = (((vx[ii] ** 2.0) / 2.0) + evaluatelinearPotentials(self._pot, x[ii], use_physical=False))
xmax = self.calcxmax(x[ii], vx[ii], E)
if (xmax == (- 9999.99)):
J[ii] = 9999.99
else:
J[ii] = ((2.0 * integrate.quad((lambda xi: numpy.sqrt((2.0 * (E - evaluatelinearPotentials(self._pot, xi, use_physical=False))))), 0.0, xmax)[0]) / numpy.pi)
return J
else:
raise ValueError('actionAngleVertical __call__ input not understood')
def _actionsFreqs(self, *args, **kwargs):
if (len(args) == 2):
(x, vx) = args
if isinstance(x, float):
x = numpy.array([x])
vx = numpy.array([vx])
J = numpy.empty(len(x))
Omega = numpy.empty(len(x))
for ii in range(len(x)):
E = (((vx[ii] ** 2.0) / 2.0) + evaluatelinearPotentials(self._pot, x[ii], use_physical=False))
xmax = self.calcxmax(x[ii], vx[ii], E)
if (xmax == (- 9999.99)):
J[ii] = 9999.99
Omega[ii] = 9999.99
else:
J[ii] = ((2.0 * integrate.quad((lambda xi: numpy.sqrt((2.0 * (E - evaluatelinearPotentials(self._pot, xi, use_physical=False))))), 0.0, xmax)[0]) / numpy.pi)
Omega[ii] = ((numpy.pi / 2.0) / integrate.quad((lambda t: ((2.0 * t) / numpy.sqrt((2.0 * (E - evaluatelinearPotentials(self._pot, (xmax - (t ** 2.0)), use_physical=False)))))), 0, numpy.sqrt(xmax))[0])
return (J, Omega)
else:
raise ValueError('actionAngleVertical actionsFreqs input not understood')
def _actionsFreqsAngles(self, *args, **kwargs):
if (len(args) == 2):
(x, vx) = args
if isinstance(x, float):
x = numpy.array([x])
vx = numpy.array([vx])
J = numpy.empty(len(x))
Omega = numpy.empty(len(x))
angle = numpy.empty(len(x))
for ii in range(len(x)):
E = (((vx[ii] ** 2.0) / 2.0) + evaluatelinearPotentials(self._pot, x[ii], use_physical=False))
xmax = self.calcxmax(x[ii], vx[ii], E)
if (xmax == (- 9999.99)):
J[ii] = 9999.99
Omega[ii] = 9999.99
angle[ii] = 9999.99
else:
J[ii] = ((2.0 * integrate.quad((lambda xi: numpy.sqrt((2.0 * (E - evaluatelinearPotentials(self._pot, xi, use_physical=False))))), 0.0, xmax)[0]) / numpy.pi)
Omega[ii] = ((numpy.pi / 2.0) / integrate.quad((lambda t: ((2.0 * t) / numpy.sqrt((2.0 * (E - evaluatelinearPotentials(self._pot, (xmax - (t ** 2.0)), use_physical=False)))))), 0, numpy.sqrt(xmax))[0])
angle[ii] = integrate.quad((lambda xi: (1.0 / numpy.sqrt((2.0 * (E - evaluatelinearPotentials(self._pot, xi, use_physical=False)))))), 0, numpy.fabs(x[ii]))[0]
angle *= Omega
angle[((x >= 0.0) * (vx < 0.0))] = (numpy.pi - angle[((x >= 0.0) * (vx < 0.0))])
angle[((x < 0.0) * (vx <= 0.0))] = (numpy.pi + angle[((x < 0.0) * (vx <= 0.0))])
angle[((x < 0.0) * (vx > 0.0))] = ((2.0 * numpy.pi) - angle[((x < 0.0) * (vx > 0.0))])
return (J, Omega, (angle % (2.0 * numpy.pi)))
else:
raise ValueError('actionAngleVertical actionsFreqsAngles input not understood')
def calcxmax(self, x, vx, E=None):
if (E is None):
E = E = (((vx ** 2.0) / 2.0) + evaluatelinearPotentials(self._pot, x, use_physical=False))
if (vx == 0.0):
xmax = numpy.fabs(x)
else:
xstart = x
try:
if (x == 0.0):
xend = 1e-05
else:
xend = (2.0 * numpy.fabs(x))
while ((E - evaluatelinearPotentials(self._pot, xend, use_physical=False)) > 0.0):
xend *= 2.0
if (xend > 100.0):
raise OverflowError
except OverflowError:
xmax = (- 9999.99)
else:
xmax = optimize.brentq((lambda xm: (E - evaluatelinearPotentials(self._pot, xm, use_physical=False))), xstart, xend, xtol=1e-14)
while ((E - evaluatelinearPotentials(self._pot, xmax, use_physical=False)) < 0):
xmax -= 1e-14
return xmax |
_HEADS_REGISTRY.register()
class AttributeRes5ROIHeads(AttributeROIHeads, Res5ROIHeads):
def __init__(self, cfg, input_shape):
super(Res5ROIHeads, self).__init__(cfg, input_shape)
assert (len(self.in_features) == 1)
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = ((1.0 / input_shape[self.in_features[0]].stride),)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
self.mask_on = cfg.MODEL.MASK_ON
self.attribute_on = cfg.MODEL.ATTRIBUTE_ON
assert (not cfg.MODEL.KEYPOINT_ON)
self.pooler = ROIPooler(output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type)
(self.res5, out_channels) = self._build_res5_block(cfg)
self.box_predictor = FastRCNNOutputLayers(cfg, ShapeSpec(channels=out_channels, height=1, width=1))
if self.mask_on:
self.mask_head = build_mask_head(cfg, ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution))
if self.attribute_on:
self.attribute_predictor = AttributePredictor(cfg, out_channels)
def forward(self, images, features, proposals, targets=None):
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform([features[f] for f in self.in_features], proposal_boxes)
feature_pooled = box_features.mean(dim=[2, 3])
predictions = self.box_predictor(feature_pooled)
if self.training:
del features
losses = self.box_predictor.losses(predictions, proposals)
if self.mask_on:
(proposals, fg_selection_masks) = select_foreground_proposals(proposals, self.num_classes)
mask_features = box_features[torch.cat(fg_selection_masks, dim=0)]
del box_features
losses.update(self.mask_head(mask_features, proposals))
if self.attribute_on:
losses.update(self.forward_attribute_loss(proposals, feature_pooled))
return ([], losses)
else:
(pred_instances, _) = self.box_predictor.inference(predictions, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return (pred_instances, {})
def get_conv5_features(self, features):
features = [features[f] for f in self.in_features]
return self.res5(features[0]) |
def build_data_set(dataset_name, istrain, cust_trans=None):
(eargs_te, eargs_tr) = ({}, {})
if ('celeba' in dataset_name):
eargs_te['split'] = 'valid'
eargs_tr['split'] = 'train'
else:
eargs_te['train'] = False
eargs_tr['train'] = True
T = transforms.ToTensor()
if (cust_trans and (not (dataset_name == 'omni32'))):
assert ValueError('only omni32 support cust_trans')
if (dataset_name == 'mnist'):
logger.info('use datasets.MNIST obj')
data_obj = datasets.MNIST
elif (dataset_name == 'stoch_mnist'):
from utils.stoch_mnist import stochMNIST
data_obj = stochMNIST
elif ('mnistf' in dataset_name):
if ('v' in dataset_name):
split_index = int(re.findall('mnistf([\\d]+)v([\\d]+)', dataset_name)[0][1])
else:
split_index = 0
percent = parse_subset_size_0to1(dataset_name)
logger.debug('build mnist few shot with name: {} | create partial obj, per={},splitID={}', dataset_name, percent, split_index)
if ('dmnist' in dataset_name):
from utils.stoch_mnist import MNISTfew
assert (percent >= 0.1), ('accept percent in 0.1,0.2,0.3,...0.9,1 only, get %f' % percent)
else:
from utils.stoch_mnist import MNISTfewBySample as MNISTfew
if (split_index > 0):
raise NotImplementedError('not support index > 0')
data_obj = partial(MNISTfew, percent, split_index)
elif (dataset_name == 'fixed_mnist'):
from utils.stoch_mnist import fixedMNIST
data_obj = fixedMNIST
elif ('omnif' in dataset_name):
percent = parse_subset_size_0to1(dataset_name)
logger.debug('build omni few shot with name: {} | create partial obj', dataset_name)
if ('aomnif' in dataset_name):
from utils.omniglot import omniglot_fews_alphabet as omniglot_fews
if re.search('aomnif([\\d]+)v([\\d])', dataset_name):
random_split_index = int(re.findall('aomnif([\\d]+)v([\\d])', dataset_name)[0][1])
logger.info('[build_data_set] get random split index: {}', random_split_index)
if (random_split_index != 0):
raise NotImplementedError('not support index > 0 now')
elif re.search('aomnif([\\d]+)', dataset_name):
random_split_index = 0
logger.info('[build_data_set] get random split index: {}', random_split_index)
if (random_split_index != 0):
raise NotImplementedError('not support index > 0 now')
else:
from utils.omniglot import omniglot_fews
data_obj = partial(omniglot_fews, percent)
elif (dataset_name == 'omni'):
from utils.omniglot import omniglot
data_obj = omniglot
elif (dataset_name in ['cifar', 'cifarg', 'cifargs', 'cifarc', 'cifarcm', 'cifarcs', 'cifarc2s']):
data_obj = datasets.CIFAR10
elif re.search('cifar([\\d]+)', dataset_name):
from utils.datasets import CifarFews
percent = parse_subset_size_0to1(dataset_name)
data_obj = partial(CifarFews, percent)
elif re.search('celebaf([\\d]+)', dataset_name):
logger.info('BUILD data: tag=celeba few ')
from utils.celeba import CelebAFews
percent_float = parse_subset_size_0to1(dataset_name)
data_obj = partial(CelebAFews, percent_float)
T = get_data_transforms(dataset_name, istrain)
logger.debug('data: {}, transform: {}', dataset_name, T)
elif ('celeba' in dataset_name):
logger.info('BUILD data: tag=celeba')
from utils.celeba import CelebA
data_obj = CelebA
T = get_data_transforms(dataset_name, istrain)
logger.debug('data: {}, transform: {}', dataset_name, T)
else:
raise ValueError(('NOT support %s' % dataset_name))
logger.debug('data_obj: {} | tr: {}, te: {}', data_obj, eargs_tr, eargs_te)
if istrain:
loaded_set = data_obj('datasets', download=True, transform=T, **eargs_tr)
else:
loaded_set = data_obj('datasets', download=True, transform=T, **eargs_te)
logger.info('<dataset> {} (n={}) is built', dataset_name, len(loaded_set))
return loaded_set |
def vgg6(conv_layer, linear_layer, init_type, **kwargs):
n = [i for i in cfgs['6'] if isinstance(i, int)][(- 1)]
model = VGG(make_layers(cfgs['6'], conv_layer, batch_norm=False), n, linear_layer, **kwargs)
initialize_weights(model, init_type)
return model |
class convprojection(nn.Module):
def __init__(self, path=None, **kwargs):
super(convprojection, self).__init__()
self.convd32x = UpsampleConvLayer(512, 512, kernel_size=4, stride=2)
self.convd16x = UpsampleConvLayer(512, 320, kernel_size=4, stride=2)
self.dense_4 = nn.Sequential(ResidualBlock(320))
self.convd8x = UpsampleConvLayer(320, 128, kernel_size=4, stride=2)
self.dense_3 = nn.Sequential(ResidualBlock(128))
self.convd4x = UpsampleConvLayer(128, 64, kernel_size=4, stride=2)
self.dense_2 = nn.Sequential(ResidualBlock(64))
self.convd2x = UpsampleConvLayer(64, 16, kernel_size=4, stride=2)
self.dense_1 = nn.Sequential(ResidualBlock(16))
self.convd1x = UpsampleConvLayer(16, 8, kernel_size=4, stride=2)
self.conv_output = ConvLayer(8, 3, kernel_size=3, stride=1, padding=1)
self.active = nn.Tanh()
def forward(self, x1, x2):
res32x = self.convd32x(x2[0])
if ((x1[3].shape[3] != res32x.shape[3]) and (x1[3].shape[2] != res32x.shape[2])):
p2d = (0, (- 1), 0, (- 1))
res32x = F.pad(res32x, p2d, 'constant', 0)
elif ((x1[3].shape[3] != res32x.shape[3]) and (x1[3].shape[2] == res32x.shape[2])):
p2d = (0, (- 1), 0, 0)
res32x = F.pad(res32x, p2d, 'constant', 0)
elif ((x1[3].shape[3] == res32x.shape[3]) and (x1[3].shape[2] != res32x.shape[2])):
p2d = (0, 0, 0, (- 1))
res32x = F.pad(res32x, p2d, 'constant', 0)
res16x = (res32x + x1[3])
res16x = self.convd16x(res16x)
if ((x1[2].shape[3] != res16x.shape[3]) and (x1[2].shape[2] != res16x.shape[2])):
p2d = (0, (- 1), 0, (- 1))
res16x = F.pad(res16x, p2d, 'constant', 0)
elif ((x1[2].shape[3] != res16x.shape[3]) and (x1[2].shape[2] == res16x.shape[2])):
p2d = (0, (- 1), 0, 0)
res16x = F.pad(res16x, p2d, 'constant', 0)
elif ((x1[2].shape[3] == res16x.shape[3]) and (x1[2].shape[2] != res16x.shape[2])):
p2d = (0, 0, 0, (- 1))
res16x = F.pad(res16x, p2d, 'constant', 0)
res8x = (self.dense_4(res16x) + x1[2])
res8x = self.convd8x(res8x)
res4x = (self.dense_3(res8x) + x1[1])
res4x = self.convd4x(res4x)
res2x = (self.dense_2(res4x) + x1[0])
res2x = self.convd2x(res2x)
x = res2x
x = self.dense_1(x)
x = self.convd1x(x)
return x |
_module()
class WandbLoggerHook(LoggerHook):
def __init__(self, init_kwargs=None, interval=10, ignore_last=True, reset_flag=False, commit=True, by_epoch=True, with_step=True, log_artifact=True, out_suffix=('.log.json', '.log', '.py')):
super(WandbLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch)
self.import_wandb()
self.init_kwargs = init_kwargs
self.commit = commit
self.with_step = with_step
self.log_artifact = log_artifact
self.out_suffix = out_suffix
def import_wandb(self):
try:
import wandb
except ImportError:
raise ImportError('Please run "pip install wandb" to install wandb')
self.wandb = wandb
_only
def before_run(self, runner):
super(WandbLoggerHook, self).before_run(runner)
if (self.wandb is None):
self.import_wandb()
if self.init_kwargs:
self.wandb.init(**self.init_kwargs)
else:
self.wandb.init()
_only
def log(self, runner):
tags = self.get_loggable_tags(runner)
if tags:
if self.with_step:
self.wandb.log(tags, step=self.get_iter(runner), commit=self.commit)
else:
tags['global_step'] = self.get_iter(runner)
self.wandb.log(tags, commit=self.commit)
_only
def after_run(self, runner):
if self.log_artifact:
wandb_artifact = self.wandb.Artifact(name='artifacts', type='model')
for filename in scandir(runner.work_dir, self.out_suffix, True):
local_filepath = osp.join(runner.work_dir, filename)
wandb_artifact.add_file(local_filepath)
self.wandb.log_artifact(wandb_artifact)
self.wandb.join() |
class IntegerSBXCrossover(Crossover[(IntegerSolution, IntegerSolution)]):
__EPS = 1e-14
def __init__(self, probability: float, distribution_index: float=20.0):
super(IntegerSBXCrossover, self).__init__(probability=probability)
self.distribution_index = distribution_index
def execute(self, parents: List[IntegerSolution]) -> List[IntegerSolution]:
Check.that(issubclass(type(parents[0]), IntegerSolution), 'Solution type invalid')
Check.that(issubclass(type(parents[1]), IntegerSolution), 'Solution type invalid')
Check.that((len(parents) == 2), 'The number of parents is not two: {}'.format(len(parents)))
offspring = copy.deepcopy(parents)
rand = random.random()
if (rand <= self.probability):
for i in range(parents[0].number_of_variables):
(value_x1, value_x2) = (parents[0].variables[i], parents[1].variables[i])
if (random.random() <= 0.5):
if (abs((value_x1 - value_x2)) > self.__EPS):
if (value_x1 < value_x2):
(y1, y2) = (value_x1, value_x2)
else:
(y1, y2) = (value_x2, value_x1)
(lower_bound, upper_bound) = (parents[0].lower_bound[i], parents[1].upper_bound[i])
beta = (1.0 + ((2.0 * (y1 - lower_bound)) / (y2 - y1)))
alpha = (2.0 - pow(beta, (- (self.distribution_index + 1.0))))
rand = random.random()
if (rand <= (1.0 / alpha)):
betaq = pow((rand * alpha), (1.0 / (self.distribution_index + 1.0)))
else:
betaq = pow((1.0 / (2.0 - (rand * alpha))), (1.0 / (self.distribution_index + 1.0)))
c1 = (0.5 * ((y1 + y2) - (betaq * (y2 - y1))))
beta = (1.0 + ((2.0 * (upper_bound - y2)) / (y2 - y1)))
alpha = (2.0 - pow(beta, (- (self.distribution_index + 1.0))))
if (rand <= (1.0 / alpha)):
betaq = pow((rand * alpha), (1.0 / (self.distribution_index + 1.0)))
else:
betaq = pow((1.0 / (2.0 - (rand * alpha))), (1.0 / (self.distribution_index + 1.0)))
c2 = (0.5 * ((y1 + y2) + (betaq * (y2 - y1))))
if (c1 < lower_bound):
c1 = lower_bound
if (c2 < lower_bound):
c2 = lower_bound
if (c1 > upper_bound):
c1 = upper_bound
if (c2 > upper_bound):
c2 = upper_bound
if (random.random() <= 0.5):
offspring[0].variables[i] = int(c2)
offspring[1].variables[i] = int(c1)
else:
offspring[0].variables[i] = int(c1)
offspring[1].variables[i] = int(c2)
else:
offspring[0].variables[i] = value_x1
offspring[1].variables[i] = value_x2
else:
offspring[0].variables[i] = value_x1
offspring[1].variables[i] = value_x2
return offspring
def get_number_of_parents(self) -> int:
return 2
def get_number_of_children(self) -> int:
return 2
def get_name(self) -> str:
return 'Integer SBX crossover' |
class Item():
seq: str
rgb_stem: str
depth_stem: str
def get_split_file(cls, mode: str) -> Path:
return ((PATHS['tum'] / 'splits') / f'{mode}_files.txt')
def load_split(cls, mode: str) -> ty.S['Item']:
file = cls.get_split_file(mode)
return [cls(*line) for line in io.readlines(file, split=True)]
def get_img_file(self) -> Path:
return ((PATHS['tum'] / self.seq) / self.rgb_stem)
def get_depth_file(self) -> Path:
return ((PATHS['tum'] / self.seq) / self.depth_stem)
def load_img(self) -> Image:
file = self.get_img_file()
img = Image.open(file)
return img
def load_depth(self) -> ty.A:
file = self.get_depth_file()
depth = (np.array(Image.open(file), dtype=np.float32) / 5000)
return depth[(..., None)] |
def get_rank():
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank() |
class NormalizeInitialization(Layer):
def __init__(self, epsilon=1e-05, **kwargs):
self.epsilon = epsilon
super(__class__, self).__init__(**kwargs)
def build(self, input_shape):
(input_shape, _) = input_shape
self.counter = self.add_weight(name='counter', shape=[1], initializer=Zeros(), trainable=False)
self.mean = self.add_weight(name='mean', shape=input_shape[1:], initializer=Zeros(), trainable=False)
self.variance = self.add_weight(name='variance', shape=input_shape[1:], initializer=Ones(), trainable=False)
super(__class__, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
return None
def call(self, inputs):
(inputs, weights) = inputs
weights = (weights / tf.reduce_sum(weights))
weights_expand = tf.expand_dims(weights, axis=1)
(mean, variance) = tf.nn.weighted_moments(inputs, [0], weights_expand)
counter = K.update_add(self.counter, K.ones_like(self.counter))
init = K.sign((counter - K.ones_like(counter)))
mean = K.update(self.mean, ((init * self.mean) + ((1.0 - init) * mean)))
variance = K.update(self.variance, ((init * self.variance) + ((1.0 - init) * variance)))
mean_expand = tf.expand_dims(mean, axis=0)
variance_expand = tf.expand_dims(variance, axis=0)
outputs = ((inputs - mean_expand) / tf.sqrt((variance_expand + self.epsilon)))
return outputs |
def get_python_modules(local_graph_dict, module):
for node_name in local_graph_dict:
node_info = local_graph_dict[node_name]
(node_class, node_op) = (node_info['node_class'], node_info['node_op'])
if (node_class == 'Module'):
if ((node_op == None) or (node_name == '%self.1')):
local_graph_dict[node_name]['python_module'] = module
continue
(op_def, op_args) = (node_op['op_def'], node_op['op_args'])
if (len(op_args) == 1):
if ('prim::GetAttr' in op_def):
parent_node_name = op_args[0]
if ('python_module' not in local_graph_dict[parent_node_name]):
raise Exception('python_module not defined for {}'.format(parent_node_name))
parent_module = local_graph_dict[parent_node_name]['python_module']
attr_name = op_def.split('[')[(- 1)].split(']')[0].replace('name=', '').strip('"')
local_graph_dict[node_name]['python_module'] = getattr(parent_module, attr_name)
else:
raise Exception('op_def {} conversion to python not implemented, please raise an issue on github'.format(op_def))
else:
raise Exception('Module {} not recognized, op def {}, op args {}'.format(node_name, op_def, op_args))
return local_graph_dict |
class MLPZinc(nn.Module):
def __init__(self, g, in_dim, num_layers, num_hidden, num_atom_type, num_bond_type):
super(MLPZinc, self).__init__()
self.g = g
self.num_atom_type = num_atom_type
self.num_bond_type = num_bond_type
self.layers = nn.ModuleList()
self.BNs = nn.ModuleList()
self.embed = nn.Embedding(num_atom_type, in_dim)
self.layers.append(nn.Linear(in_dim, num_hidden))
self.BNs.append(nn.BatchNorm1d(num_hidden))
for _ in range(num_layers):
self.layers.append(nn.Linear(num_hidden, num_hidden))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.regressor1 = nn.Linear(num_hidden, (num_hidden // 2))
self.regressor2 = nn.Linear((num_hidden // 2), 1)
def forward(self, x, e, snorm_n, snorm_e):
h = self.embed(x)
for (layer, bn) in zip(self.layers, self.BNs):
h = layer(h)
h = (h * snorm_n)
h = bn(h)
h = torch.relu(h)
self.g.ndata['h'] = h
h = dgl.mean_nodes(self.g, 'h')
h = torch.relu(h)
h = self.regressor1(h)
h = torch.relu(h)
logits = self.regressor2(h)
return logits |
class Glorot(Initializer):
def __init__(self, initializer, gain=1.0, c01b=False):
if (gain == 'relu'):
gain = np.sqrt(2)
self.initializer = initializer
self.gain = gain
self.c01b = c01b
def sample(self, shape):
if self.c01b:
if (len(shape) != 4):
raise RuntimeError('If c01b is True, only shapes of length 4 are accepted')
(n1, n2) = (shape[0], shape[3])
receptive_field_size = (shape[1] * shape[2])
else:
if (len(shape) < 2):
raise RuntimeError('This initializer only works with shapes of length >= 2')
(n1, n2) = shape[:2]
receptive_field_size = np.prod(shape[2:])
std = (self.gain * np.sqrt((2.0 / ((n1 + n2) * receptive_field_size))))
return self.initializer(std=std).sample(shape) |
def get_model(name, **kwargs):
name = name.lower()
if (name not in _models):
raise ValueError('Unsupported model: {}'.format(name))
net = _models[name](**kwargs)
return net |
class ShuffleNetV2b(nn.Module):
def __init__(self, channels, init_block_channels, final_block_channels, use_se=False, use_residual=False, shuffle_group_first=True, in_channels=3, in_size=(224, 224), num_classes=1000):
super(ShuffleNetV2b, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module('init_block', ShuffleInitBlock(in_channels=in_channels, out_channels=init_block_channels))
in_channels = init_block_channels
for (i, channels_per_stage) in enumerate(channels):
stage = nn.Sequential()
for (j, out_channels) in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add_module('unit{}'.format((j + 1)), ShuffleUnit(in_channels=in_channels, out_channels=out_channels, downsample=downsample, use_se=use_se, use_residual=use_residual, shuffle_group_first=shuffle_group_first))
in_channels = out_channels
self.features.add_module('stage{}'.format((i + 1)), stage)
self.features.add_module('final_block', conv1x1_block(in_channels=in_channels, out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add_module('final_pool', nn.AvgPool2d(kernel_size=7, stride=1))
self.output = nn.Linear(in_features=in_channels, out_features=num_classes)
self._init_params()
def _init_params(self):
for (name, module) in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if (module.bias is not None):
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), (- 1))
x = self.output(x)
return x |
_model
def efficientnet_b1(pretrained=False, **kwargs):
model = _gen_efficientnet('efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs)
return model |
def masked_mape_np(preds, labels, null_val=np.nan):
with np.errstate(divide='ignore', invalid='ignore'):
if np.isnan(null_val):
mask = (~ np.isnan(labels))
else:
mask = np.not_equal(labels, null_val)
mask = mask.astype('float32')
mask /= np.mean(mask)
mape = np.abs(np.divide(np.subtract(preds, labels).astype('float32'), labels))
mape = np.nan_to_num((mask * mape))
return np.mean(mape) |
def word_swap(s):
for t in word_transforms[LANGUAGE]:
if (s in t):
return random.sample(t, 1)[0]
return s |
def create_buffers(flags, obs_shape, num_actions) -> Buffers:
T = flags.unroll_length
specs = dict(frame=dict(size=((T + 1), *obs_shape), dtype=torch.uint8), reward=dict(size=((T + 1),), dtype=torch.float32), done=dict(size=((T + 1),), dtype=torch.bool), episode_return=dict(size=((T + 1),), dtype=torch.float32), episode_step=dict(size=((T + 1),), dtype=torch.int32), policy_logits=dict(size=((T + 1), num_actions), dtype=torch.float32), baseline=dict(size=((T + 1),), dtype=torch.float32), last_action=dict(size=((T + 1),), dtype=torch.int64), action=dict(size=((T + 1),), dtype=torch.int64))
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
train_ref_file: Optional[str] = field(default=None, metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'})
validation_ref_file: Optional[str] = field(default=None, metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
max_seq_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated. Default to the max input length of the model.'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
mlm_probability: float = field(default=0.15, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`train_file` should be a csv, a json or a txt file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`validation_file` should be a csv, a json or a txt file.' |
def warmup_linear(x, warmup=0.002):
if (x < warmup):
return (x / warmup)
return max(((x - 1.0) / (warmup - 1.0)), 0) |
def collate_results(results):
scenario_results = {}
for res in results:
name = res['Name']
if (name not in scenario_results):
scenario_results[name] = Result(name)
scenario_results[name].add(res['Steps'], res['Total reward'])
return scenario_results |
def format_text(text):
text = regex.sub('[\\p{Z}]', ' ', text)
text = re.sub('([ ]{2,})', ' ', text)
text = re.sub('([ \\t]+)?[\\r\\n]([ \\t]+)?', '\n', text)
text = re.sub('\\n+', ' ', text)
text = text.strip()
return text |
class ResNet(nn.Module):
__factory = {18: torchvision.models.resnet18, 34: torchvision.models.resnet34, 50: torchvision.models.resnet50, 101: torchvision.models.resnet101, 152: torchvision.models.resnet152}
def __init__(self, depth, pretrained=True, cut_at_pooling=False, num_features=0, norm=False, dropout=0.5, num_classes=0, FCN=False, T=1, dim=256, num_parts=6):
super(ResNet, self).__init__()
self.depth = depth
self.pretrained = pretrained
self.cut_at_pooling = cut_at_pooling
self.FCN = FCN
self.T = T
self.reduce_dim = dim
self.num_parts = num_parts
if (depth not in ResNet.__factory):
raise KeyError('Unsupported depth:', depth)
self.base = ResNet.__factory[depth](pretrained=pretrained)
if self.FCN:
self.base.layer4[0].conv2.stride = (1, 1)
self.base.layer4[0].downsample[0].stride = (1, 1)
self.num_features = num_features
self.num_classes = num_classes
self.dropout = dropout
self.instance = nn.ModuleList()
for i in range((self.num_parts + 1)):
if (self.num_features != 2048):
local_conv = nn.Linear(2048, self.num_features, bias=False)
init.kaiming_normal_(local_conv.weight, mode='fan_out')
local_bn = nn.BatchNorm1d(self.num_features)
init.constant_(local_bn.weight, 1)
init.constant_(local_bn.bias, 0)
fc = nn.Linear(self.num_features, self.num_classes)
init.normal_(fc.weight, std=0.001)
init.constant_(fc.bias, 0)
if (self.num_features != 2048):
self.instance.append(nn.Sequential(nn.Dropout(self.dropout), local_conv, local_bn, fc))
else:
self.instance.append(nn.Sequential(nn.Dropout(self.dropout), local_bn, fc))
self.drop = nn.Dropout(self.dropout)
self.local_mask = nn.Conv2d(self.reduce_dim, self.num_parts, kernel_size=1, bias=True)
init.kaiming_normal_(self.local_mask.weight, mode='fan_out')
init.constant_(self.local_mask.bias, 0)
elif (not self.cut_at_pooling):
self.num_features = num_features
self.norm = norm
self.dropout = dropout
self.has_embedding = (num_features > 0)
self.num_classes = num_classes
out_planes = self.base.fc.in_features
if self.has_embedding:
self.feat = nn.Linear(out_planes, self.num_features, bias=False)
self.feat_bn = nn.BatchNorm1d(self.num_features)
init.kaiming_normal_(self.feat.weight, mode='fan_out')
init.constant_(self.feat_bn.weight, 1)
init.constant_(self.feat_bn.bias, 0)
else:
self.num_features = out_planes
if (self.dropout > 0):
self.drop = nn.Dropout(self.dropout)
if (self.num_classes > 0):
self.classifier = nn.Linear(self.num_features, self.num_classes)
init.normal_(self.classifier.weight, std=0.001)
init.constant_(self.classifier.bias, 0)
if (not self.pretrained):
self.reset_params()
def forward(self, inputs, part_labels=None):
x = inputs
if (part_labels is None):
tmp = torch.FloatTensor(range(1, 25))
tmp = ((tmp - 0.1) / 4).int()
part_labels = tmp.unsqueeze(0).expand(inputs.size(0), tmp.size(0))
part_labels = torch.autograd.Variable(part_labels.cuda())
for (name, module) in self.base._modules.items():
if (name == 'avgpool'):
break
x = module(x)
if self.cut_at_pooling:
return x
if self.FCN:
T = self.T
y = self.drop(x).unsqueeze(1)
stride = (2048 // self.reduce_dim)
y = F.avg_pool3d(y, kernel_size=(stride, 1, 8), stride=(stride, 1, 8)).squeeze(1)
x_global = F.avg_pool2d(x, (24, 8))
local_score = self.local_mask(y)
local_score = local_score.squeeze(3)
score = F.softmax((1 * local_score.detach()), 1)
pscore = score.sum(2)
score = (score / pscore.unsqueeze(2).expand_as(score))
(bb, cc, hh, ww) = x.size()
feat = (x.unsqueeze(2).expand(bb, cc, self.num_parts, hh, ww) * score.unsqueeze(1).unsqueeze(4).expand(bb, cc, self.num_parts, hh, ww))
feat = feat.sum(4).sum(3).unsqueeze(3)
x = feat
out0 = x.view(x.size(0), (- 1))
out0 = (x / torch.clamp(x.norm(2, 1).unsqueeze(1).expand_as(x), min=1e-12))
x_list = list(x.chunk(x.size(2), 2))
x_list.append(x_global)
c = []
for (tensor, branch) in zip(x_list, self.instance):
tensor = tensor.contiguous().view(tensor.size(0), (- 1))
c.append(branch(tensor))
ps = local_score
return (out0, c, ps, pscore)
x = F.avg_pool2d(x, x.size()[2:])
x = x.view(x.size(0), (- 1))
out1 = x
out1 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x))
if self.has_embedding:
x = self.feat(x)
x = self.feat_bn(x)
out2 = (x / x.norm(2, 1).unsqueeze(1).expand_as(x))
if self.norm:
x = (x / x.norm(2, 1).unsqueeze(1).expand_as(x))
if (self.dropout > 0):
x = self.drop(x)
if (self.num_classes > 0):
x = self.classifier(x)
return (out2, x)
def reset_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if (m.bias is not None):
init.constant_(m.bias, 0) |
class PPOTest(tf.test.TestCase):
def test_no_crash_cheetah(self):
nets = (networks.ForwardGaussianPolicy, networks.RecurrentGaussianPolicy)
for network in nets:
config = self._define_config()
with config.unlocked:
config.env = 'HalfCheetah-v1'
config.max_length = 200
config.steps = 1000
config.network = network
for score in train.train(config, env_processes=True):
float(score)
def test_no_crash_ant(self):
nets = (networks.ForwardGaussianPolicy, networks.RecurrentGaussianPolicy)
for network in nets:
config = self._define_config()
with config.unlocked:
config.env = 'Ant-v1'
config.max_length = 200
config.steps = 1000
config.network = network
for score in train.train(config, env_processes=True):
float(score)
def test_no_crash_observation_shape(self):
nets = (networks.ForwardGaussianPolicy, networks.RecurrentGaussianPolicy)
observ_shapes = ((1,), (2, 3), (2, 3, 4))
for (network, observ_shape) in itertools.product(nets, observ_shapes):
config = self._define_config()
with config.unlocked:
config.env = functools.partial(tools.MockEnvironment, observ_shape, action_shape=(3,), min_duration=15, max_duration=15)
config.max_length = 20
config.steps = 100
config.network = network
for score in train.train(config, env_processes=False):
float(score)
def test_no_crash_variable_duration(self):
config = self._define_config()
with config.unlocked:
config.env = functools.partial(tools.MockEnvironment, observ_shape=(2, 3), action_shape=(3,), min_duration=5, max_duration=25)
config.max_length = 25
config.steps = 200
config.network = networks.RecurrentGaussianPolicy
for score in train.train(config, env_processes=False):
float(score)
def _define_config(self):
locals().update(configs.default())
algorithm = ppo.PPOAlgorithm
num_agents = 2
update_every = 4
use_gpu = False
policy_layers = (20, 10)
value_layers = (20, 10)
update_epochs_policy = 2
update_epochs_value = 2
return tools.AttrDict(locals()) |
def rotationMatrixToEulerAngles(R):
sy = math.sqrt(((R[(0, 0)] * R[(0, 0)]) + (R[(1, 0)] * R[(1, 0)])))
singular = (sy < 1e-06)
if (not singular):
x = math.atan2(R[(2, 1)], R[(2, 2)])
y = math.atan2((- R[(2, 0)]), sy)
z = math.atan2(R[(1, 0)], R[(0, 0)])
else:
x = math.atan2((- R[(1, 2)]), R[(1, 1)])
y = math.atan2((- R[(2, 0)]), sy)
z = 0
return np.array([x, y, z]) |
class Prcc(BaseImageDataset):
dataset_dir = 'prcc/rgb'
def __init__(self, root='/home/jinx/data', verbose=True, **kwargs):
super(Prcc, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'train')
self.validation_dir = osp.join(self.dataset_dir, 'val')
self.probe_gallery_dir = osp.join(self.dataset_dir, 'test')
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
(query, gallery) = self._process_test_dir(self.probe_gallery_dir, relabel=False)
if verbose:
print('=> PRCC dataset loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_cloth_ids, self.num_train_imgs, self.num_train_cams) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_cloth_ids, self.num_query_imgs, self.num_query_cams) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_cloth_ids, self.num_gallery_imgs, self.num_gallery_cams) = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.probe_gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.probe_gallery_dir))
def _process_dir(self, dir_path, relabel=False):
pid_dirs_path = glob.glob(osp.join(dir_path, '*'))
dataset = []
pid_container = set()
camid_mapper = {'A': 1, 'B': 2, 'C': 3}
for pid_dir_path in pid_dirs_path:
img_paths = glob.glob(osp.join(pid_dir_path, '*.jp*'))
for img_path in img_paths:
pid = int(osp.basename(pid_dir_path))
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
for pid_dir_path in pid_dirs_path:
img_paths = glob.glob(osp.join(pid_dir_path, '*.jp*'))
for img_path in img_paths:
pid = int(osp.basename(pid_dir_path))
camid = camid_mapper[osp.basename(img_path)[0]]
cloth_id = camid
camid -= 1
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pid, cloth_id, camid))
return dataset
def _process_test_dir(self, dir_path, relabel=False):
camid_dirs_path = glob.glob(osp.join(dir_path, '*'))
query = []
gallery = []
pid_container = set()
camid_mapper = {'A': 1, 'B': 2, 'C': 3}
for camid_dir_path in camid_dirs_path:
pid_dir_paths = glob.glob(osp.join(camid_dir_path, '*'))
for pid_dir_path in pid_dir_paths:
pid = int(osp.basename(pid_dir_path))
img_paths = glob.glob(osp.join(pid_dir_path, '*'))
for img_path in img_paths:
camid = camid_mapper[osp.basename(camid_dir_path)]
camid -= 1
if (camid == 0):
cloth_id = camid
query.append((img_path, pid, cloth_id, camid))
elif (args.cross_clothes and (camid == 2)):
cloth_id = camid
gallery.append((img_path, pid, cloth_id, camid))
elif (args.same_clothes and (camid == 1)):
cloth_id = camid
gallery.append((img_path, pid, cloth_id, camid))
return (query, gallery) |
def numeric_score_fov(pred, gt, mask):
FP = np.float(np.sum((((pred == 1) & (gt == 0)) & (mask == 1))))
FN = np.float(np.sum((((pred == 0) & (gt == 1)) & (mask == 1))))
TP = np.float(np.sum((((pred == 1) & (gt == 1)) & (mask == 1))))
TN = np.float(np.sum((((pred == 0) & (gt == 0)) & (mask == 1))))
return (FP, FN, TP, TN) |
class FakeProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_corpus(os.path.join(data_dir, 'train_tok.csv'), MR=False, clean=False), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_corpus(os.path.join(data_dir, 'test_tok.csv'), MR=False, clean=False), 'dev')
def get_labels(self):
return ['1', '2']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = ('%s-%s' % (set_type, i))
text_a = line[0]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples |
def parse_dollar(line):
all_dollar = re.findall('\\$[0-9][0-9.,]*', line)
for dollar in all_dollar:
number_text = engine.number_to_words(dollar[1:].replace(',', ''))
number_text = number_text.replace('-', ' ')
dollar_text = (number_text + ' dollars')
line = line.replace(dollar, dollar_text, 1)
line = line.replace('$', ' ')
return line |
def main(opt):
train_files = {'src': opt.train_src, 'tgt': opt.train_tgt}
valid_files = {'src': opt.valid_src, 'tgt': opt.valid_tgt}
if opt.feature:
assert ((len(opt.train_feats) == len(opt.valid_feats)) and (len(opt.train_feats) > 0))
(train_files['feature'], valid_files['feature']) = (opt.train_feats, opt.valid_feats)
if opt.answer:
assert (opt.train_ans and opt.valid_ans), 'Answer files of train and valid must be given'
(train_files['ans'], valid_files['ans']) = (opt.train_ans, opt.valid_ans)
if opt.ans_feature:
assert ((len(opt.train_ans_feats) == len(opt.valid_ans_feats)) and (len(opt.train_ans_feats) > 0) and (opt.answer == 'enc'))
(train_files['ans_feature'], valid_files['ans_feature']) = (opt.train_ans_feats, opt.valid_ans_feats)
train_data = get_data(train_files, opt)
valid_data = get_data(valid_files, opt)
vocabularies = {}
sep = (True if (opt.answer == 'sep') else False)
if (opt.answer == 'sep'):
train_data['src'] = merge_ans(train_data['src'], train_data['ans'])
valid_data['src'] = merge_ans(valid_data['src'], valid_data['ans'])
pre_trained_vocab = (load_vocab(opt.pre_trained_vocab) if opt.pre_trained_vocab else None)
if opt.bert_tokenizer:
options = {'transf': True, 'separate': sep, 'tgt': True}
bert_tokenizer = Vocab.from_opt(pretrained=opt.bert_tokenizer, opt=options)
if opt.share_vocab:
print('build src & tgt vocabulary')
if opt.bert_tokenizer:
vocabularies['src'] = vocabularies['tgt'] = bert_tokenizer
else:
corpus = (train_data['src'] + train_data['tgt'])
corpus = ((corpus + train_data['ans']) if (opt.answer == 'enc') else corpus)
options = {'lower': True, 'mode': opt.vocab_trunc_mode, 'transf': (opt.answer != 'enc'), 'separate': sep, 'tgt': True, 'size': max(opt.src_vocab_size, opt.tgt_vocab_size), 'frequency': min(opt.src_words_min_frequency, opt.tgt_words_min_frequency)}
vocab = Vocab.from_opt(corpus=corpus, opt=options)
vocabularies['src'] = vocabularies['tgt'] = vocab
vocabularies['ans'] = (vocabularies['src'] if (opt.answer == 'enc') else None)
else:
print('build src vocabulary')
if opt.bert_tokenizer:
assert (opt.answer != 'enc')
(vocabularies['src'], vocabularies['ans']) = (bert_tokenizer, None)
else:
corpus = ((train_data['src'] + train_data['ans']) if (opt.answer == 'enc') else train_data['src'])
options = {'lower': True, 'mode': 'size', 'transf': (opt.answer != 'enc'), 'separate': sep, 'tgt': False, 'size': opt.src_vocab_size, 'frequency': opt.src_words_min_frequency}
vocabularies['src'] = Vocab.from_opt(corpus=corpus, opt=options)
vocabularies['ans'] = (vocabularies['src'] if (opt.answer == 'enc') else None)
print('build tgt vocabulary')
options = {'lower': True, 'mode': opt.vocab_trunc_mode, 'transf': False, 'separate': False, 'tgt': True, 'size': opt.tgt_vocab_size, 'frequency': opt.tgt_words_min_frequency}
vocabularies['tgt'] = Vocab.from_opt(corpus=train_data['tgt'], opt=options)
options = {'lower': False, 'mode': 'size', 'size': opt.feat_vocab_size, 'frequency': opt.feat_words_min_frequency, 'transf': (opt.answer != 'enc'), 'separate': sep, 'tgt': False}
vocabularies['feature'] = ([Vocab.from_opt(corpus=feat, opt=options) for feat in train_data['feature']] if opt.feature else None)
vocabularies['ans_feature'] = ([Vocab.from_opt(corpus=feat, opt=options) for feat in train_data['ans_feature']] if opt.ans_feature else None)
(train_indexes, train_tokens) = convert_word_to_idx(train_data, vocabularies, opt)
(valid_indexes, valid_tokens) = convert_word_to_idx(valid_data, vocabularies, opt)
vocabularies['tgt'].word_count((train_indexes['tgt'] + valid_indexes['tgt']))
train_indexes['copy'] = (wrap_copy_idx(train_tokens['src'], train_tokens['tgt'], vocabularies['tgt'], opt.bert_tokenizer, pre_trained_vocab) if opt.copy else {'switch': None, 'tgt': None})
valid_indexes['copy'] = (wrap_copy_idx(valid_tokens['src'], valid_tokens['tgt'], vocabularies['tgt'], opt.bert_tokenizer, pre_trained_vocab) if opt.copy else {'switch': None, 'tgt': None})
if pre_trained_vocab:
pre_trained_src_vocab = (None if opt.bert_tokenizer else get_embedding(pre_trained_vocab, src_vocab))
pre_trained_tgt_vocab = (None if (opt.bert_tokenizer and opt.share_vocab) else get_embedding(pre_trained_vocab, tgt_vocab))
pre_trained_ans_vocab = (get_embedding(pre_trained_vocab, ans_vocab) if (opt.answer == 'enc') else None)
pre_trained_vocab = {'src': pre_trained_src_vocab, 'tgt': pre_trained_tgt_vocab, 'ans': pre_trained_ans_vocab}
vocabularies['pre-trained'] = pre_trained_vocab
valid_indexes['tokens'] = valid_tokens
train_indexes['tokens'] = train_tokens
data = {'settings': opt, 'dict': vocabularies, 'train': train_indexes, 'valid': valid_indexes}
torch.save(data, opt.save_data) |
def add_model_args(parser):
group = parser.add_argument_group('Model configuration')
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH', choices=ARCH_MODEL_REGISTRY.keys(), help='model architecture')
return group |
class BaseImageProcessFunc():
def __call__(self, image: Image.Image, preprocessor: Dict[(str, Any)]) -> Dict[(str, Any)]:
raise NotImplementedError |
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super().__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(int(nb_layers)):
layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x) |
class ConsoleHandler(MetricHandlerBase):
def __init__(self, *args, **kwargs):
super().__init__('data', *args, **kwargs)
self._was_initialized = False
def collect(self, collection, time, mode='train'):
for (tag, val) in collection:
if (mode != 'train'):
tag = ((mode + '_') + tag)
print('')
print(('time: %s' % time))
print('\n'.join([('%s: %s' % (tag, val)) for (tag, val) in collection]))
print('') |
def vis_h36m_compare(p3d_gt, p3d_pred, save_path):
num_frames_gt = len(p3d_gt)
num_frames_pred = len(p3d_pred)
metadata = dict(title='01', artist='Matplotlib', comment='motion')
writer = FFMpegWriter(fps=10, metadata=metadata)
fig = plt.figure()
ax = plt.gca(projection='3d')
ob = H36m3DPose(ax)
with writer.saving(fig, save_path, 100):
for i in tqdm(range((num_frames_gt - num_frames_pred))):
ob.update(p3d_gt[i])
plt.savefig((('../tmp/' + str(i)) + '.jpg'))
writer.grab_frame()
plt.pause(0.01)
for i in tqdm(range((num_frames_gt - num_frames_pred), num_frames_gt)):
ob.update(p3d_gt[i], p3d_pred[((i - num_frames_gt) + num_frames_pred)])
writer.grab_frame()
plt.pause(0.01) |
def compute_and_add_linker_smiles(data, progress=False):
data_with_linkers = []
generator = (tqdm(data) if progress else data)
for m in generator:
pred_mol = Chem.MolFromSmiles(m['pred_mol_smi'], sanitize=True)
true_mol = Chem.MolFromSmiles(m['true_mol_smi'], sanitize=True)
frag = Chem.MolFromSmiles(m['frag_smi'], sanitize=True)
pred_linker = extract_linker_smiles(pred_mol, frag)
true_linker = extract_linker_smiles(true_mol, frag)
data_with_linkers.append({**m, 'pred_linker': pred_linker, 'true_linker': true_linker})
return data_with_linkers |
def eval_once(saver, summary_writer, summary_op, agelogits, agelabels, genderlogits, genderlabels, num_eval, saveresultdir, requested_step=None):
agetop1 = tf.nn.in_top_k(agelogits, agelabels, 1)
agetop2 = tf.nn.in_top_k(agelogits, agelabels, 2)
gendertop1 = tf.nn.in_top_k(genderlogits, genderlabels, 1)
with tf.Session() as sess:
checkpoint_path = FLAGS.model_dir
(model_checkpoint_path, global_step) = get_checkpoint(checkpoint_path, requested_step, FLAGS.checkpoint)
saver.restore(sess, model_checkpoint_path)
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))
num_steps = int(math.ceil((num_eval / FLAGS.batch_size)))
agetrue_count1 = agetrue_count2 = gendertrue_count1 = 0
total_sample_count = (num_steps * FLAGS.batch_size)
step = 0
while ((step < num_steps) and (not coord.should_stop())):
start_time = time.time()
(agev, agepredictions1, agepredictions2, genderv, genderpredictions1) = sess.run([agelogits, agetop1, agetop2, genderlogits, gendertop1])
duration = (time.time() - start_time)
sec_per_batch = float(duration)
examples_per_sec = (FLAGS.batch_size / sec_per_batch)
agetrue_count1 += np.sum(agepredictions1)
agetrue_count2 += np.sum(agepredictions2)
gendertrue_count1 += np.sum(genderpredictions1)
format_str = '%s (%.1f examples/sec; %.3f sec/batch)'
print((format_str % (datetime.now(), examples_per_sec, sec_per_batch)))
step += 1
agepredictions1 = (agetrue_count1 / total_sample_count)
agepredictions2 = (agetrue_count2 / total_sample_count)
genderpredictions1 = (gendertrue_count1 / total_sample_count)
print(('Age => %s: precision 1 = %.3f (%d/%d)' % (datetime.now(), agepredictions1, agetrue_count1, total_sample_count)))
print(('Age => %s: precision 2 = %.3f (%d/%d)' % (datetime.now(), agepredictions2, agetrue_count2, total_sample_count)))
print(('Gender => %s: precision 1 = %.3f (%d/%d)' % (datetime.now(), genderpredictions1, gendertrue_count1, total_sample_count)))
resulttxt = (((saveresultdir + os.sep) + FLAGS.eval_data) + '_result.txt')
with open(resulttxt, 'w') as f:
f.write(('Age => %s: precision 1 = %.3f (%d/%d) \n' % (datetime.now(), agepredictions1, agetrue_count1, total_sample_count)))
f.write(('Age => %s: precision 2 = %.3f (%d/%d) \n' % (datetime.now(), agepredictions2, agetrue_count2, total_sample_count)))
f.write(('Gender => %s: precision 1 = %.3f (%d/%d) \n' % (datetime.now(), genderpredictions1, gendertrue_count1, total_sample_count)))
f.close()
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Age Precision 1', simple_value=agepredictions1)
summary.value.add(tag='Age Precision 2', simple_value=agepredictions2)
summary.value.add(tag='Gender Precision 1', simple_value=genderpredictions1)
summary_writer.add_summary(summary, global_step)
except Exception as e:
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10) |
def test_pvtv2():
with pytest.raises(TypeError):
PyramidVisionTransformerV2(pretrained=123)
with pytest.raises(AssertionError):
PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224))
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert (outs[0].shape == (1, 64, 8, 8))
assert (outs[1].shape == (1, 128, 4, 4))
assert (outs[2].shape == (1, 320, 2, 2))
assert (outs[3].shape == (1, 512, 1, 1))
temp = torch.randn((1, 3, 31, 31))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert (outs[0].shape == (1, 64, 8, 8))
assert (outs[1].shape == (1, 128, 4, 4))
assert (outs[2].shape == (1, 320, 2, 2))
assert (outs[3].shape == (1, 512, 1, 1))
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert (outs[0].shape == (1, 64, 28, 35))
assert (outs[1].shape == (1, 128, 14, 18))
assert (outs[2].shape == (1, 320, 7, 9))
assert (outs[3].shape == (1, 512, 4, 5)) |
class UnetGenerator_IN(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.InstanceNorm2d, use_dropout=False, output_function=nn.Sigmoid):
super(UnetGenerator_IN, self).__init__()
unet_block = UnetSkipConnectionBlock_IN((ngf * 8), (ngf * 8), input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)
for i in range((num_downs - 5)):
unet_block = UnetSkipConnectionBlock_IN((ngf * 8), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock_IN((ngf * 4), (ngf * 8), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_IN((ngf * 2), (ngf * 4), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_IN(ngf, (ngf * 2), input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock_IN(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer, output_function=output_function)
self.model = unet_block
def forward(self, input):
return self.model(input) |
class TopKNeuronCoverage(MyNeuronCoverage):
def __init__(self, k=5):
super(TopKNeuronCoverage, self).__init__(threshold=k)
self._threshould = k
self._topk = k
assert isinstance(k, int)
(parallel=True)
def _calc_1(intermediate_layer_output, features_index, threshold):
num_layer_neuron = intermediate_layer_output[0].shape[features_index]
num_input = len(intermediate_layer_output)
result = np.zeros(shape=(num_input, num_layer_neuron), dtype=np.uint8)
for input_id in prange(intermediate_layer_output.shape[0]):
layer_neurons = []
for layer_neuron_id in prange(num_layer_neuron):
if (features_index == (- 1)):
neuron_output = intermediate_layer_output[input_id][(..., layer_neuron_id)]
else:
neuron_output = intermediate_layer_output[input_id][layer_neuron_id]
mean = np.mean(neuron_output)
layer_neurons.append(mean)
layer_neurons = np.array(layer_neurons)
k = min(threshold, num_layer_neuron)
active_idxs = np.argsort(layer_neurons)[(- k):]
for neuron_idx in active_idxs:
result[input_id][neuron_idx] = 1
return result
(parallel=True)
def _calc_2(intermediate_layer_output, features_index, threshold):
num_layer_neuron = intermediate_layer_output[0].shape[features_index]
num_input = len(intermediate_layer_output)
result = np.zeros(shape=(num_input, num_layer_neuron), dtype=np.uint8)
for input_id in prange(intermediate_layer_output.shape[0]):
layer_neurons = []
for layer_neuron_id in prange(num_layer_neuron):
if (features_index == (- 1)):
neuron_output = intermediate_layer_output[input_id][(..., layer_neuron_id)]
else:
neuron_output = intermediate_layer_output[input_id][layer_neuron_id]
layer_neurons.append(neuron_output)
layer_neurons = np.array(layer_neurons)
k = min(threshold, num_layer_neuron)
active_idxs = np.argsort(layer_neurons)[(- k):]
for neuron_idx in active_idxs:
result[input_id][neuron_idx] = 1
return result |
def run():
zpy.blender.set_seed()
saver = zpy.saver_image.ImageSaver(description='Suzannes from a camera view')
suzanne_seg_color = zpy.color.random_color(output_style='frgb')
saver.add_category(name='Suzanne', color=suzanne_seg_color)
zpy.objects.segment('Suzanne', color=suzanne_seg_color)
zpy.objects.save_pose('Camera', 'cam_pose')
zpy.objects.save_pose('Suzanne', 'suzanne_pose')
for step_idx in zpy.blender.step():
log.info('This is an info log')
log.debug('This is a debug log')
zpy.objects.restore_pose('Camera', 'cam_pose')
zpy.objects.restore_pose('Suzanne', 'suzanne_pose')
zpy.objects.jitter('Suzanne', translate_range=(((- 5), 5), ((- 5), 5), ((- 5), 5)), rotate_range=(((- math.pi), math.pi), ((- math.pi), math.pi), ((- math.pi), math.pi)))
zpy.objects.jitter('Camera', translate_range=(((- 5), 5), ((- 5), 5), ((- 5), 5)))
zpy.camera.look_at('Camera', bpy.data.objects['Suzanne'].location)
rgb_image_name = zpy.files.make_rgb_image_name(step_idx)
iseg_image_name = zpy.files.make_iseg_image_name(step_idx)
depth_image_name = zpy.files.make_depth_image_name(step_idx)
zpy.render.render(rgb_path=(saver.output_dir / rgb_image_name), iseg_path=(saver.output_dir / iseg_image_name), depth_path=(saver.output_dir / depth_image_name))
saver.add_image(name=rgb_image_name, style='default', output_path=(saver.output_dir / rgb_image_name), frame=step_idx)
saver.add_image(name=iseg_image_name, style='segmentation', output_path=(saver.output_dir / iseg_image_name), frame=step_idx)
saver.add_image(name=depth_image_name, style='depth', output_path=(saver.output_dir / depth_image_name), frame=step_idx)
saver.add_annotation(image=rgb_image_name, seg_image=iseg_image_name, seg_color=suzanne_seg_color, category='Suzanne')
saver.output_annotated_images()
saver.output_meta_analysis()
zpy.output_zumo.OutputZUMO(saver).output_annotations()
zpy.output_coco.OutputCOCO(saver).output_annotations() |
def plot_local_contrib(row, model, X, g_pred=None, scale=False):
local_contrib_frame = pd.DataFrame(columns=['Name', 'Local Contribution', 'Sign'])
for (key, val) in sorted(row[X].types.items()):
contrib = 0
name = ''
if (val == 'enum'):
level = row[key][(0, 0)]
name = '.'.join([str(key), str(level)])
if (name in model.coef()):
contrib = model.coef()[name]
else:
name = key
if (name in model.coef()):
contrib = (row[name][(0, 0)] * model.coef()[name])
if (contrib != 0.0):
local_contrib_frame = local_contrib_frame.append({'Name': name, 'Local Contribution': contrib, 'Sign': (contrib > 0)}, ignore_index=True)
if scale:
scaler = ((g_pred - model.coef()['Intercept']) / local_contrib_frame['Local Contribution'].sum())
local_contrib_frame['Local Contribution'] *= scaler
_ = local_contrib_frame.plot(x='Name', y='Local Contribution', kind='bar', title='Reason Codes', color=local_contrib_frame.Sign.map({True: 'b', False: 'g'}), legend=False) |
def add_training_args(parser):
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--batch-size', type=int, default=4, help='Data Loader batch size')
group.add_argument('--weight-decay', type=float, default=0.01, help='weight decay coefficient for L2 regularization')
group.add_argument('--checkpoint-activations', action='store_true', help='checkpoint activation to allow for training with larger models and sequences')
group.add_argument('--checkpoint-num-layers', type=int, default=1, help='chunk size (number of layers) for checkpointing')
group.add_argument('--clip-grad', type=float, default=1.0, help='gradient clipping')
group.add_argument('--train-iters', type=int, default=1000000, help='total number of iterations to train over all training runs')
group.add_argument('--log-interval', type=int, default=100, help='report interval')
group.add_argument('--exit-interval', type=int, default=None, help='Exit the program after this many new iterations.')
group.add_argument('--tensorboard-dir', type=str, default=None, help='Write TensorBoard logs to this directory')
group.add_argument('--seed', type=int, default=1234, help='random seed')
group.add_argument('--reset-position-ids', action='store_true', help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true', help='Reset self attention maske after end-of-document token.')
group.add_argument('--eod-mask-loss', action='store_true', help='Mask loss for the end of document tokens')
group.add_argument('--lr-decay-iters', type=int, default=None, help='number of iterations to decay LR over, If None defaults to `--train-iters`*`--epochs`')
group.add_argument('--lr-decay-style', type=str, default='linear', choices=['constant', 'linear', 'cosine', 'exponential'], help='learning rate decay function')
group.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
group.add_argument('--min-lr', type=float, default=0.0, help='Minumum value for learning rate. The schedulerclip values below this threshold.')
group.add_argument('--warmup', type=float, default=0.01, help='percentage of data to warmup on (.01 = 1% of all training iters). Default 0.01')
group.add_argument('--override-lr-scheduler', action='store_true', help='Reset the values of the scheduler (learning rate,warmup iterations, minimum learning rate, maximum number of iterations, and decay style from input arguments and ignore values from checkpoints. Notethat all the above values will be reset.')
group.add_argument('--use-checkpoint-lr-scheduler', action='store_true', help='Use checkpoint to set the values of the scheduler (learning rate, warmup iterations, minimum learning rate, maximum number of iterations, and decay style from input arguments and ignore values from checkpoints. Notethat all the above values will be reset.')
group.add_argument('--save', type=str, default=None, help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=5000, help='number of iterations between saves')
group.add_argument('--no-save-optim', action='store_true', help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true', help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None, help='Path to a directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true', help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true', help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true', help='Load model for finetuning. Do not load optimizer or rng state from checkpoint and set iteration to 0. Assumed when loading a release checkpoint.')
group.add_argument('--resume-dataloader', action='store_true', help='Resume the dataloader when resuming training. Does not apply to tfrecords dataloader, try resumingwith a different seed in this case.')
group.add_argument('--distributed-backend', default='nccl', help='which backend to use for distributed training. One of [gloo, nccl]')
group.add_argument('--DDP-impl', default='local', help='which DistributedDataParallel implementation to use. One of [local, torch]')
group.add_argument('--local_rank', type=int, default=None, help='local rank passed from distributed launcher')
group.add_argument('--adlr-autoresume', action='store_true', help='enable autoresume on adlr cluster.')
group.add_argument('--adlr-autoresume-interval', type=int, default=1000, help='intervals over which check for autoresumetermination signal')
return parser |
def register_model(model_name: str, classification_cls: Optional[Type[ModelLike]]=None, regression_cls: Optional[Type[ModelLike]]=None, overwrite: Optional[bool]=None):
problem_types = ['classification', 'regression']
for (cls, problem_type) in zip([classification_cls, regression_cls], problem_types):
if (cls is not None):
if ((t_available := _available_models.get(model_name)) is not None):
if t_available.get(problem_type):
if (overwrite is None):
warn_with_log(f"Model named {model_name} with problem type {{problem_type}} already exists. Therefore, {model_name} will be overwritten. To remove this warning set overwrite=True. If you won't to reset this use `julearn.estimators.reset_model_register`.")
elif (overwrite is False):
raise_error(f'Model named {model_name} with problem type {{problem_type}} already exists. Therefore, {model_name} will be overwritten. overwrite is set to False, therefore you cannot overwrite existing models. Set overwrite=True in case you want to overwrite existing models')
logger.info(f'registering model named {model_name} with problem_type {problem_type}')
_available_models[model_name][problem_type] = cls
else:
logger.info(f'registering model named {model_name} with problem_type {problem_type}')
_available_models[model_name] = {problem_type: cls} |
class TestDimPlanner(unittest.TestCase):
(((not torch.cuda.is_available()) or (torch_version() < (2, 0, 0))), 'Test on GPU image')
def test_dim_planner(self):
run_dim_planner(2, 4, my_loss_func) |
class MORAN(nn.Module):
def __init__(self, nc, nclass, nh, targetH, targetW, BidirDecoder=False, inputDataType='torch.cuda.FloatTensor', maxBatch=256, CUDA=True):
super(MORAN, self).__init__()
self.MORN = MORN(nc, targetH, targetW, inputDataType, maxBatch, CUDA)
self.ASRN = ASRN(targetH, nc, nclass, nh, BidirDecoder, CUDA)
def forward(self, x, length, text, text_rev, test=False, debug=False):
if debug:
(x_rectified, demo) = self.MORN(x, test, debug=debug)
preds = self.ASRN(x_rectified, length, text, text_rev, test)
return (preds, demo)
else:
x_rectified = self.MORN(x, test, debug=debug)
preds = self.ASRN(x_rectified, length, text, text_rev, test)
return preds |
def count_trainable_parameters(model: Any) -> int:
return sum([x.numel() for x in model.parameters() if x.requires_grad]) |
def create_train_state(model: FlaxAutoModelForTokenClassification, learning_rate_fn: Callable[([int], float)], num_labels: int, training_args: TrainingArguments) -> train_state.TrainState:
class TrainState(train_state.TrainState):
logits_fn: Callable = struct.field(pytree_node=False)
loss_fn: Callable = struct.field(pytree_node=False)
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
layer_norm_candidates = ['layernorm', 'layer_norm', 'ln']
layer_norm_named_params = {layer[(- 2):] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if (layer_norm_name in ''.join(layer).lower())}
flat_mask = {path: ((path[(- 1)] != 'bias') and (path[(- 2):] not in layer_norm_named_params)) for path in flat_params}
return traverse_util.unflatten_dict(flat_mask)
tx = optax.adamw(learning_rate=learning_rate_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn)
def cross_entropy_loss(logits, labels):
xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels))
return jnp.mean(xentropy)
return TrainState.create(apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=(lambda logits: logits.argmax((- 1))), loss_fn=cross_entropy_loss) |
class GRevNet(snt.AbstractModule):
def __init__(self, make_gnn_fn, num_timesteps, node_embedding_dim, use_batch_norm=False, weight_sharing=False, name='GRevNet'):
super(GRevNet, self).__init__(name=name)
self.num_timesteps = num_timesteps
self.weight_sharing = weight_sharing
if weight_sharing:
self.s = [make_gnn_fn(), make_gnn_fn()]
self.t = [make_gnn_fn(), make_gnn_fn()]
else:
self.s = [get_gnns(num_timesteps, make_gnn_fn), get_gnns(num_timesteps, make_gnn_fn)]
self.t = [get_gnns(num_timesteps, make_gnn_fn), get_gnns(num_timesteps, make_gnn_fn)]
self.use_batch_norm = use_batch_norm
self.bns = [[make_batch_norm() for _ in range(num_timesteps)], [make_batch_norm() for _ in range(num_timesteps)]]
def f(self, x):
log_det_jacobian = 0
(x0, x1) = tf.split(x.nodes, num_or_size_splits=2, axis=1)
x0 = x.replace(nodes=x0)
x1 = x.replace(nodes=x1)
for i in range(self.num_timesteps):
if self.use_batch_norm:
bn = self.bns[0][i]
log_det_jacobian += bn.inverse_log_det_jacobian(x0.nodes, 2)
x0 = x0.replace(nodes=bn.inverse(x0.nodes))
if self.weight_sharing:
s = self.s[0](x0).nodes
t = self.t[0](x0).nodes
else:
s = self.s[0][i](x0).nodes
t = self.t[0][i](x0).nodes
log_det_jacobian += tf.reduce_sum(s)
x1 = x1.replace(nodes=((x1.nodes * tf.exp(s)) + t))
if self.use_batch_norm:
bn = self.bns[1][i]
log_det_jacobian += bn.inverse_log_det_jacobian(x1.nodes, 2)
x1 = x1.replace(nodes=bn.inverse(x1.nodes))
if self.weight_sharing:
s = self.s[1](x1).nodes
t = self.t[1](x1).nodes
else:
s = self.s[1][i](x1).nodes
t = self.t[1][i](x1).nodes
log_det_jacobian += tf.reduce_sum(s)
x0 = x0.replace(nodes=((x0.nodes * tf.exp(s)) + t))
x = x.replace(nodes=tf.concat([x0.nodes, x1.nodes], axis=1))
return (x, log_det_jacobian)
def g(self, z):
(z0, z1) = tf.split(z.nodes, num_or_size_splits=2, axis=1)
z0 = z.replace(nodes=z0)
z1 = z.replace(nodes=z1)
for i in reversed(range(self.num_timesteps)):
if self.weight_sharing:
s = self.s[1](z1).nodes
t = self.t[1](z1).nodes
else:
s = self.s[1][i](z1).nodes
t = self.t[1][i](z1).nodes
if self.use_batch_norm:
bn = self.bns[1][i]
z1 = z1.replace(nodes=bn.forward(z1.nodes))
z0 = z0.replace(nodes=((z0.nodes - t) * tf.exp((- s))))
if self.weight_sharing:
s = self.s[0](z0).nodes
t = self.t[0](z0).nodes
else:
s = self.s[0][i](z0).nodes
t = self.t[0][i](z0).nodes
if self.use_batch_norm:
bn = self.bns[0][i]
z0 = z0.replace(nodes=bn.forward(z0.nodes))
z1 = z1.replace(nodes=((z1.nodes - t) * tf.exp((- s))))
return z.replace(nodes=tf.concat([z0.nodes, z1.nodes], axis=1))
def log_prob(self, x):
(z, log_det_jacobian) = self.f(x)
return (tf.reduce_sum(self.prior.log_prob(z)) + log_det_jacobian)
def _build(self, input, inverse=True):
func = (self.f if inverse else self.g)
return func(input) |
class MetaAlbumDataset(Dataset):
def __init__(self, datasets, data_dir, img_size=128):
if (len(datasets) == 1):
self.name = datasets[0]
else:
self.name = f"Multiple datasets: {','.join(datasets)}"
self.data_dir = data_dir
self.transform = transforms.Compose([transforms.Resize((img_size, img_size)), transforms.ToTensor()])
self.img_paths = list()
self.labels = list()
id_ = 0
for dataset in datasets:
(label_col, file_col, img_path, md_path) = self.extract_info(dataset)
metadata = pd.read_csv(md_path)
self.img_paths.extend([os.path.join(img_path, x) for x in metadata[file_col]])
label_to_id = dict()
for label in metadata[label_col]:
if (label not in label_to_id):
label_to_id[label] = id_
id_ += 1
self.labels.append(label_to_id[label])
self.labels = np.array(self.labels)
self.idx_per_label = []
for i in range((max(self.labels) + 1)):
idx = np.argwhere((self.labels == i)).reshape((- 1))
self.idx_per_label.append(idx)
def __len__(self):
return len(self.img_paths)
def __getitem__(self, i):
(path, label) = (self.img_paths[i], self.labels[i])
image = self.transform(Image.open(path))
return (image, torch.LongTensor([label]).squeeze())
def extract_info(self, dataset):
info = {'img_path': f'{self.data_dir}/{dataset}/images/', 'metadata_path': f'{self.data_dir}/{dataset}/labels.csv', 'label_col': 'CATEGORY', 'file_col': 'FILE_NAME'}
return (info['label_col'], info['file_col'], info['img_path'], info['metadata_path']) |
def _run_allgather_reducescatter(rank, world_size, tmp_file):
if torch.cuda.is_available():
torch.cuda.set_device(rank)
device = torch.device(f'cuda:{rank}')
backend = 'nccl'
else:
device = torch.device('cpu')
backend = 'gloo'
dist.init_process_group(init_method=('file://' + tmp_file), rank=rank, backend=backend, world_size=world_size)
(bs, nh, qd, ls) = (10, 12, 32, 512)
dtype = torch.float32
torch.manual_seed(0)
inp = torch.rand((bs, nh, qd, ls), requires_grad=True, dtype=dtype, device=device)
lin = torch.nn.Linear(ls, ls).to(device)
allgathered = AllGatherQMicro.apply(inp)
inter = lin(allgathered)
reduced = ReduceScatterContext.apply(inter)
logger.info(f'''Rank [{rank}] inp: {inp.requires_grad},
allgathered: {allgathered.requires_grad},
inter: {inter.requires_grad},
reduced: {reduced.requires_grad}''')
dist.destroy_process_group() |
def _target_samples_int(y, n_target_samples, sampling_type):
target_stats = dict(Counter(y))
max_class_ = max(target_stats, key=target_stats.get)
min_class_ = min(target_stats, key=target_stats.get)
n_max_class_samples_ = target_stats[max_class_]
n_min_class_samples_ = target_stats[min_class_]
if (sampling_type == 'under-sampling'):
if (n_target_samples >= n_max_class_samples_):
raise ValueError(f"'n_target_samples' >= the number of samples of the largest class ({n_max_class_samples_}). Set 'n_target_samples' < {n_max_class_samples_} to perform under-sampling properly.")
target_distr = dict([(label, min(n_target_samples, target_stats[label])) for label in target_stats.keys()])
return target_distr
elif (sampling_type == 'over-sampling'):
if (n_target_samples <= n_min_class_samples_):
raise ValueError(f"'n_target_samples' <= the number of samples of the largest class ({n_min_class_samples_}). Set 'n_target_samples' > {n_min_class_samples_} to perform over-sampling properly.")
target_distr = dict([(label, max(n_target_samples, target_stats[label])) for label in target_stats.keys()])
return target_distr
elif (sampling_type == 'multi-class-hybrid-sampling'):
warning_info = (((f" Set 'n_target_samples' between [{n_min_class_samples_}" + f' , {n_max_class_samples_}] if you want to perform') + f' multi-class hybrid-sampling (under-sample the minority') + f' classes, over-sample the majority classes) properly.')
if (n_target_samples >= n_max_class_samples_):
raise Warning((((f"'n_target_samples' >= the number of samples" + f' of the largest class ({n_max_class_samples_}).') + f' ONLY over-sampling will be applied to all classes.') + warning_info))
elif (n_target_samples <= n_min_class_samples_):
raise Warning((((f"'n_target_samples' <= the number of samples" + f' of the largest class ({n_min_class_samples_}).') + f' ONLY under-sampling will be applied to all classes.') + warning_info))
target_distr = dict([(label, n_target_samples) for label in target_stats.keys()])
return target_distr
else:
raise SamplingKindError |
_task('sentence_prediction')
class SentencePredictionTask(LegacyFairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='FILE', help='file prefix for data')
parser.add_argument('--num-classes', type=int, default=(- 1), help='number of classes or regression targets')
parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item')
parser.add_argument('--separator-token', type=int, default=None, help='add separator token between inputs')
parser.add_argument('--regression-target', action='store_true', default=False)
parser.add_argument('--no-shuffle', action='store_true', default=False)
parser.add_argument('--shorten-method', default='none', choices=['none', 'truncate', 'random_crop'], help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='', help='comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)')
parser.add_argument('--add-prev-output-tokens', action='store_true', default=False, help='add prev_output_tokens to sample, used for encoder-decoder arch')
def __init__(self, args, data_dictionary, label_dictionary):
super().__init__(args)
self.dictionary = data_dictionary
self._label_dictionary = label_dictionary
if (not hasattr(args, 'max_positions')):
self._max_positions = (args.max_source_positions, args.max_target_positions)
else:
self._max_positions = args.max_positions
args.tokens_per_sample = self._max_positions
def load_dictionary(cls, args, filename, source=True):
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
def setup_task(cls, args, **kwargs):
assert (args.num_classes > 0), 'Must set --num-classes'
data_dict = cls.load_dictionary(args, os.path.join(args.data, 'input0', 'dict.txt'), source=True)
logger.info('[input] dictionary: {} types'.format(len(data_dict)))
if (not args.regression_target):
label_dict = cls.load_dictionary(args, os.path.join(args.data, 'label', 'dict.txt'), source=False)
logger.info('[label] dictionary: {} types'.format(len(label_dict)))
else:
label_dict = data_dict
return cls(args, data_dict, label_dict)
def load_dataset(self, split, combine=False, **kwargs):
def get_path(key, split):
return os.path.join(self.args.data, key, split)
def make_dataset(key, dictionary):
split_path = get_path(key, split)
try:
dataset = data_utils.load_indexed_dataset(split_path, dictionary, self.args.dataset_impl, combine=combine)
except Exception as e:
if ('StorageException: [404] Path not found' in str(e)):
logger.warning(f'dataset {e} not found')
dataset = None
else:
raise e
return dataset
input0 = make_dataset('input0', self.source_dictionary)
assert (input0 is not None), 'could not find dataset: {}'.format(get_path('input0', split))
input1 = make_dataset('input1', self.source_dictionary)
if (self.args.init_token is not None):
input0 = PrependTokenDataset(input0, self.args.init_token)
if (input1 is None):
src_tokens = input0
else:
if (self.args.separator_token is not None):
input1 = PrependTokenDataset(input1, self.args.separator_token)
src_tokens = ConcatSentencesDataset(input0, input1)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(src_tokens))
src_tokens = maybe_shorten_dataset(src_tokens, split, self.args.shorten_data_split_list, self.args.shorten_method, self.max_positions(), self.args.seed)
dataset = {'id': IdDataset(), 'net_input': {'src_tokens': RightPadDataset(src_tokens, pad_idx=self.source_dictionary.pad()), 'src_lengths': NumelDataset(src_tokens, reduce=False)}, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_tokens, reduce=True)}
if self.args.add_prev_output_tokens:
prev_tokens_dataset = RightPadDataset(RollDataset(src_tokens, 1), pad_idx=self.dictionary.pad())
dataset['net_input'].update(prev_output_tokens=prev_tokens_dataset)
if (not self.args.regression_target):
label_dataset = make_dataset('label', self.label_dictionary)
if (label_dataset is not None):
dataset.update(target=OffsetTokensDataset(StripTokenDataset(label_dataset, id_to_strip=self.label_dictionary.eos()), offset=(- self.label_dictionary.nspecial)))
else:
label_path = '{0}.label'.format(get_path('label', split))
if os.path.exists(label_path):
def parse_regression_target(i, line):
values = line.split()
assert (len(values) == self.args.num_classes), f'expected num_classes={self.args.num_classes} regression target values on line {i}, found: "{line}"'
return [float(x) for x in values]
with open(label_path) as h:
dataset.update(target=RawLabelDataset([parse_regression_target(i, line.strip()) for (i, line) in enumerate(h.readlines())]))
nested_dataset = NestedDictionaryDataset(dataset, sizes=[src_tokens.sizes])
if self.args.no_shuffle:
dataset = nested_dataset
else:
dataset = SortDataset(nested_dataset, sort_order=[shuffle])
logger.info('Loaded {0} with #samples: {1}'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(getattr(args, 'classification_head_name', 'sentence_classification_head'), num_classes=self.args.num_classes)
return model
def max_positions(self):
return self._max_positions
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary
def label_dictionary(self):
return self._label_dictionary |
def convert_sentence_to_json(sentence):
if ('_' in sentence):
(prefix, rest) = sentence.split('_', 1)
(query, rest) = rest.split('_', 1)
query_index = len(prefix.rstrip().split(' '))
else:
(query, query_index) = (None, None)
(prefix, rest) = sentence.split('[', 1)
(pronoun, rest) = rest.split(']', 1)
pronoun_index = len(prefix.rstrip().split(' '))
sentence = sentence.replace('_', '').replace('[', '').replace(']', '')
return {'idx': 0, 'text': sentence, 'target': {'span1_index': query_index, 'span1_text': query, 'span2_index': pronoun_index, 'span2_text': pronoun}} |
def ori_pro(s, name=''):
s = s.strip()
s = s.replace('<mask><s>', ' ')
s = ' '.join(s.strip().split())
return s |
class kiunet3d(nn.Module):
def __init__(self, c=4, n=1, channels=128, groups=16, norm='bn', num_classes=5):
super(kiunet3d, self).__init__()
self.encoder1 = nn.Conv3d(c, n, kernel_size=3, padding=1, stride=1, bias=False)
self.encoder2 = nn.Conv3d(n, (2 * n), kernel_size=3, padding=1, stride=1, bias=False)
self.encoder3 = nn.Conv3d((2 * n), (4 * n), kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder1 = nn.Conv3d(c, n, kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder2 = nn.Conv3d(n, (2 * n), kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder3 = nn.Conv3d((2 * n), (2 * n), kernel_size=3, padding=1, stride=1, bias=False)
self.downsample1 = nn.MaxPool3d(2, stride=2)
self.downsample2 = nn.MaxPool3d(2, stride=2)
self.downsample3 = nn.MaxPool3d(2, stride=2)
self.kdownsample1 = nn.MaxPool3d(2, stride=2)
self.kdownsample2 = nn.MaxPool3d(2, stride=2)
self.kdownsample3 = nn.MaxPool3d(2, stride=2)
self.upsample1 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
self.upsample2 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
self.upsample3 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
self.kupsample1 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
self.kupsample2 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
self.kupsample3 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
self.decoder1 = nn.Conv3d((4 * n), (2 * n), kernel_size=3, padding=1, stride=1, bias=False)
self.decoder2 = nn.Conv3d((2 * n), (2 * n), kernel_size=3, padding=1, stride=1, bias=False)
self.decoder3 = nn.Conv3d((2 * n), c, kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder1 = nn.Conv3d((2 * n), (2 * n), kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder2 = nn.Conv3d((2 * n), (2 * n), kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder3 = nn.Conv3d((2 * n), c, kernel_size=3, padding=1, stride=1, bias=False)
self.intere1_1 = nn.Conv3d(n, n, 3, stride=1, padding=1)
self.intere2_1 = nn.Conv3d((2 * n), (2 * n), 3, stride=1, padding=1)
self.intere3_1 = nn.Conv3d((2 * n), (4 * n), 3, stride=1, padding=1)
self.intere1_2 = nn.Conv3d(n, n, 3, stride=1, padding=1)
self.intere2_2 = nn.Conv3d((2 * n), (2 * n), 3, stride=1, padding=1)
self.intere3_2 = nn.Conv3d((4 * n), (2 * n), 3, stride=1, padding=1)
self.interd1_1 = nn.Conv3d((2 * n), (2 * n), 3, stride=1, padding=1)
self.interd2_1 = nn.Conv3d((2 * n), (2 * n), 3, stride=1, padding=1)
self.interd3_1 = nn.Conv3d(n, n, 3, stride=1, padding=1)
self.interd1_2 = nn.Conv3d((2 * n), (2 * n), 3, stride=1, padding=1)
self.interd2_2 = nn.Conv3d((2 * n), (2 * n), 3, stride=1, padding=1)
self.interd3_2 = nn.Conv3d(n, n, 3, stride=1, padding=1)
self.seg = nn.Conv3d(c, num_classes, kernel_size=1, padding=0, stride=1, bias=False)
self.softmax = nn.Softmax(dim=1)
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.torch.nn.init.kaiming_normal_(m.weight)
elif (isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
out = F.relu(F.max_pool3d(self.encoder1(x), 2, 2))
out1 = F.relu(F.interpolate(self.kencoder1(x), scale_factor=2, mode='trilinear'))
tmp = out
out = torch.add(out, F.interpolate(F.relu(self.intere1_1(out1)), scale_factor=0.25, mode='trilinear'))
out1 = torch.add(out1, F.interpolate(F.relu(self.intere1_2(tmp)), scale_factor=4, mode='trilinear'))
u1 = out
o1 = out1
out = F.relu(F.max_pool3d(self.encoder2(out), 2, 2))
out1 = F.relu(F.interpolate(self.kencoder2(out1), scale_factor=2, mode='trilinear'))
tmp = out
out = torch.add(out, F.interpolate(F.relu(self.intere2_1(out1)), scale_factor=0.0625, mode='trilinear'))
out1 = torch.add(out1, F.interpolate(F.relu(self.intere2_2(tmp)), scale_factor=16, mode='trilinear'))
u2 = out
o2 = out1
out = F.relu(F.max_pool3d(self.encoder3(out), 2, 2))
out1 = F.relu(F.interpolate(self.kencoder3(out1), scale_factor=2, mode='trilinear'))
tmp = out
out = torch.add(out, F.interpolate(F.relu(self.intere3_1(out1)), scale_factor=0.015625, mode='trilinear'))
out1 = torch.add(out1, F.interpolate(F.relu(self.intere3_2(tmp)), scale_factor=64, mode='trilinear'))
out = F.relu(F.interpolate(self.decoder1(out), scale_factor=2, mode='trilinear'))
out1 = F.relu(F.max_pool3d(self.kdecoder1(out1), 2, 2))
tmp = out
out = torch.add(out, F.interpolate(F.relu(self.interd1_1(out1)), scale_factor=0.0625, mode='trilinear'))
out1 = torch.add(out1, F.interpolate(F.relu(self.interd1_2(tmp)), scale_factor=16, mode='trilinear'))
out = torch.add(out, u2)
out1 = torch.add(out1, o2)
out = F.relu(F.interpolate(self.decoder2(out), scale_factor=2, mode='trilinear'))
out1 = F.relu(F.max_pool3d(self.kdecoder2(out1), 2, 2))
tmp = out
out = torch.add(out, F.interpolate(F.relu(self.interd2_1(out1)), scale_factor=0.25, mode='trilinear'))
out1 = torch.add(out1, F.interpolate(F.relu(self.interd2_2(tmp)), scale_factor=4, mode='trilinear'))
out = torch.add(out, u1)
out1 = torch.add(out1, o1)
out = F.relu(F.interpolate(self.decoder3(out), scale_factor=2, mode='trilinear'))
out1 = F.relu(F.max_pool3d(self.kdecoder3(out1), 2, 2))
out = torch.add(out, out1)
out = F.relu(self.seg(out))
return out |
def check_path_exists(path):
if (not os.path.exists(path)):
raise FileNotFoundError(path)
return path |
def kMobileNet(input_shape=None, alpha=1.0, depth_multiplier=1, dropout=0.001, include_top=True, weights=None, input_tensor=None, pooling=None, classes=1000, conv1_activation=keras.activations.swish, d_separable_activation=keras.activations.swish, activation=keras.activations.swish, kType=0, l_ratio=0.0, ab_ratio=0.0, skip_stride_cnt=(- 1)):
img_input = keras.layers.Input(shape=input_shape)
channel_axis = cai.layers.GetChannelAxis()
local_strides = ((1, 1) if (skip_stride_cnt >= 0) else (2, 2))
if ((l_ratio > 0.0) and (ab_ratio > 0.0)):
l_branch = cai.layers.CopyChannels(0, 1)(img_input)
ab_branch = cai.layers.CopyChannels(1, 2)(img_input)
l_branch = kconv_block(l_branch, int(round((32 * l_ratio))), alpha, strides=local_strides, activation=conv1_activation)
ab_branch = kconv_block(ab_branch, int(round((32 * ab_ratio))), alpha, strides=local_strides, activation=conv1_activation)
x = keras.layers.Concatenate(axis=channel_axis, name='l-ab-paths-concat')([l_branch, ab_branch])
elif ((l_ratio > 0.0) and (ab_ratio <= 0.0)):
l_branch = cai.layers.CopyChannels(0, 1)(img_input)
x = kconv_block(l_branch, int(round((32 * l_ratio))), alpha, strides=local_strides, activation=conv1_activation)
elif ((l_ratio <= 0.0) and (ab_ratio > 0.0)):
ab_branch = cai.layers.CopyChannels(1, 2)(img_input)
x = kconv_block(ab_branch, int(round((32 * ab_ratio))), alpha, strides=local_strides, activation=conv1_activation)
else:
x = kconv_block(img_input, 32, alpha, strides=local_strides, activation=conv1_activation)
x = kdepthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
local_strides = ((1, 1) if (skip_stride_cnt >= 1) else (2, 2))
x = kdepthwise_conv_block(x, 128, alpha, depth_multiplier, strides=local_strides, block_id=2, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
x = kdepthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
local_strides = ((1, 1) if (skip_stride_cnt >= 2) else (2, 2))
x = kdepthwise_conv_block(x, 256, alpha, depth_multiplier, strides=local_strides, block_id=4, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
x = kdepthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
local_strides = ((1, 1) if (skip_stride_cnt >= 3) else (2, 2))
x = kdepthwise_conv_block(x, 512, alpha, depth_multiplier, strides=local_strides, block_id=6, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
x = kdepthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
x = kdepthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
x = kdepthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
x = kdepthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
x = kdepthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
local_strides = ((1, 1) if (skip_stride_cnt >= 4) else (2, 2))
x = kdepthwise_conv_block(x, 1024, alpha, depth_multiplier, strides=local_strides, block_id=12, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
x = kdepthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13, d_separable_activation=d_separable_activation, activation=activation, kType=kType)
if include_top:
if (backend.image_data_format() == 'channels_first'):
shape = (int((1024 * alpha)), 1, 1)
else:
shape = (1, 1, int((1024 * alpha)))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape(shape, name='reshape_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = layers.Reshape((classes,), name='reshape_2')(x)
x = layers.Activation('softmax', name='act_softmax')(x)
elif (pooling == 'avg'):
x = layers.GlobalAveragePooling2D()(x)
elif (pooling == 'max'):
x = layers.GlobalMaxPooling2D()(x)
inputs = img_input
model = keras.models.Model(inputs, x, name=('kmobilenetv1-' + str(kType)))
return model |
class BasicUpdateBlock(nn.Module):
def __init__(self):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder()
self.flow_head = dispHead()
self.mask = nn.Sequential(nn.ReflectionPad2d(1), nn.Conv2d(192, 324, 3), nn.LeakyReLU(inplace=True), nn.Conv2d(324, (64 * 9), 1, padding=0))
def forward(self, net, corr, depth):
net = self.encoder(depth, corr)
delta_depth = self.flow_head(net)
mask = (0.25 * self.mask(net))
return (net, mask, delta_depth) |
def unpack(source_data, target_dir, start_idx):
for (idx, (image_data, label_idx)) in tqdm(enumerate(source_data), total=len(source_data)):
subdir = os.path.join(target_dir, str(label_idx))
name = '{}_{}.png'.format((start_idx + idx), str(label_idx))
os.makedirs(subdir, exist_ok=True)
image_data = image_data.convert('L')
image_data.save(os.path.join(subdir, name))
return len(source_data) |
class PIDNet(nn.Module):
def __init__(self, m=2, n=3, num_classes=19, planes=64, ppm_planes=96, head_planes=128, augment=True):
super(PIDNet, self).__init__()
self.augment = augment
self.conv1 = nn.Sequential(nn.Conv2d(3, planes, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True), nn.Conv2d(planes, planes, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True))
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(BasicBlock, planes, planes, m)
self.layer2 = self._make_layer(BasicBlock, planes, (planes * 2), m, stride=2)
self.layer3 = self._make_layer(BasicBlock, (planes * 2), (planes * 4), n, stride=2)
self.layer4 = self._make_layer(BasicBlock, (planes * 4), (planes * 8), n, stride=2)
self.layer5 = self._make_layer(Bottleneck, (planes * 8), (planes * 8), 2, stride=2)
self.compression3 = nn.Sequential(nn.Conv2d((planes * 4), (planes * 2), kernel_size=1, bias=False))
self.compression4 = nn.Sequential(nn.Conv2d((planes * 8), (planes * 2), kernel_size=1, bias=False))
self.pag3 = PagFM((planes * 2), planes)
self.pag4 = PagFM((planes * 2), planes)
self.layer3_ = self._make_layer(BasicBlock, (planes * 2), (planes * 2), m)
self.layer4_ = self._make_layer(BasicBlock, (planes * 2), (planes * 2), m)
self.layer5_ = self._make_layer(Bottleneck, (planes * 2), (planes * 2), 1)
if (m == 2):
self.layer3_d = self._make_single_layer(BasicBlock, (planes * 2), planes)
self.layer4_d = self._make_layer(Bottleneck, planes, planes, 1)
self.diff3 = nn.Sequential(nn.Conv2d((planes * 4), planes, kernel_size=3, padding=1, bias=False))
self.diff4 = nn.Sequential(nn.Conv2d((planes * 8), (planes * 2), kernel_size=3, padding=1, bias=False))
self.spp = PAPPM((planes * 16), ppm_planes, (planes * 4))
self.dfm = Light_Bag((planes * 4), (planes * 4))
else:
self.layer3_d = self._make_single_layer(BasicBlock, (planes * 2), (planes * 2))
self.layer4_d = self._make_single_layer(BasicBlock, (planes * 2), (planes * 2))
self.diff3 = nn.Sequential(nn.Conv2d((planes * 4), (planes * 2), kernel_size=3, padding=1, bias=False))
self.diff4 = nn.Sequential(nn.Conv2d((planes * 8), (planes * 2), kernel_size=3, padding=1, bias=False))
self.spp = DAPPM((planes * 16), ppm_planes, (planes * 4))
self.dfm = Bag((planes * 4), (planes * 4))
self.layer5_d = self._make_layer(Bottleneck, (planes * 2), (planes * 2), 1)
if self.augment:
self.seghead_p = segmenthead((planes * 2), head_planes, num_classes)
self.seghead_d = segmenthead((planes * 2), planes, 1)
self.final_layer = segmenthead((planes * 4), head_planes, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False))
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
if (i == (blocks - 1)):
layers.append(block(inplanes, planes, stride=1, no_relu=True))
else:
layers.append(block(inplanes, planes, stride=1, no_relu=False))
return nn.Sequential(*layers)
def _make_single_layer(self, block, inplanes, planes, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False))
layer = block(inplanes, planes, stride, downsample, no_relu=True)
return layer
def forward(self, x):
width_output = (x.shape[(- 1)] // 8)
height_output = (x.shape[(- 2)] // 8)
x = self.conv1(x)
x = self.layer1(x)
x = self.relu(self.layer2(self.relu(x)))
x_ = self.layer3_(x)
x_d = self.layer3_d(x)
x = self.relu(self.layer3(x))
x_ = self.pag3(x_, self.compression3(x))
x_d = (x_d + F.interpolate(self.diff3(x), size=[height_output, width_output], mode='bilinear', align_corners=algc))
if self.augment:
temp_p = x_
x = self.relu(self.layer4(x))
x_ = self.layer4_(self.relu(x_))
x_d = self.layer4_d(self.relu(x_d))
x_ = self.pag4(x_, self.compression4(x))
x_d = (x_d + F.interpolate(self.diff4(x), size=[height_output, width_output], mode='bilinear', align_corners=algc))
if self.augment:
temp_d = x_d
x_ = self.layer5_(self.relu(x_))
x_d = self.layer5_d(self.relu(x_d))
x = F.interpolate(self.spp(self.layer5(x)), size=[height_output, width_output], mode='bilinear', align_corners=algc)
x_ = self.final_layer(self.dfm(x_, x, x_d))
if self.augment:
x_extra_p = self.seghead_p(temp_p)
x_extra_d = self.seghead_d(temp_d)
return [x_extra_p, x_, x_extra_d]
else:
return x_ |
class TransfoXLConfig(PretrainedConfig):
pretrained_config_archive_map = TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = 'transfo-xl'
def __init__(self, vocab_size=267735, cutoffs=[20000, 40000, 200000], d_model=1024, d_embed=1024, n_head=16, d_head=64, d_inner=4096, div_val=4, pre_lnorm=False, n_layer=18, tgt_len=128, ext_len=0, mem_len=1600, clamp_len=1000, same_length=True, proj_share_all_but_first=True, attn_type=0, sample_softmax=(- 1), adaptive=True, tie_weight=True, dropout=0.1, dropatt=0.0, untie_r=True, init='normal', init_range=0.01, proj_init_std=0.01, init_std=0.02, layer_norm_epsilon=1e-05, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.cutoffs = []
self.cutoffs.extend(cutoffs)
self.tie_weight = tie_weight
if proj_share_all_but_first:
self.tie_projs = ([False] + ([True] * len(self.cutoffs)))
else:
self.tie_projs = ([False] + ([False] * len(self.cutoffs)))
self.d_model = d_model
self.d_embed = d_embed
self.d_head = d_head
self.d_inner = d_inner
self.div_val = div_val
self.pre_lnorm = pre_lnorm
self.n_layer = n_layer
self.n_head = n_head
self.tgt_len = tgt_len
self.ext_len = ext_len
self.mem_len = mem_len
self.same_length = same_length
self.attn_type = attn_type
self.clamp_len = clamp_len
self.sample_softmax = sample_softmax
self.adaptive = adaptive
self.dropout = dropout
self.dropatt = dropatt
self.untie_r = untie_r
self.init = init
self.init_range = init_range
self.proj_init_std = proj_init_std
self.init_std = init_std
self.layer_norm_epsilon = layer_norm_epsilon
def max_position_embeddings(self):
return ((self.tgt_len + self.ext_len) + self.mem_len)
def n_token(self):
return self.vocab_size
_token.setter
def n_token(self, value):
self.vocab_size = value
def hidden_size(self):
return self.d_model
def num_attention_heads(self):
return self.n_head
def num_hidden_layers(self):
return self.n_layer |
def print_result(best_scores, best_hypos, output_file):
for (i, (s, h)) in enumerate(zip(best_scores, best_hypos)):
print(f'{i} {s} {h}', file=output_file) |
def run(dataset_dir):
if (not tf.gfile.Exists(dataset_dir)):
tf.gfile.MakeDirs(dataset_dir)
training_filename = _get_output_filename(dataset_dir, 'train')
testing_filename = _get_output_filename(dataset_dir, 'test')
if (tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename)):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
offset = 0
for i in range(_NUM_TRAIN_FILES):
filename = os.path.join(dataset_dir, 'cifar-10-batches-py', ('data_batch_%d' % (i + 1)))
offset = _add_to_tfrecord(filename, tfrecord_writer, offset)
with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
filename = os.path.join(dataset_dir, 'cifar-10-batches-py', 'test_batch')
_add_to_tfrecord(filename, tfrecord_writer)
labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Cifar10 dataset!') |
class PSM_Encoder_Instance(nn.Module):
def __init__(self, in_planes=3, batch_norm=True):
super(PSM_Encoder_Instance, self).__init__()
self.in_planes = in_planes
self.batch_norm = batch_norm
self.firstconv = nn.Sequential(conv_in_relu(batch_norm, self.in_planes, 32, 3, 2, 1, 1, bias=False), conv_in_relu(batch_norm, 32, 32, 3, 1, 1, 1, bias=False), conv_in_relu(batch_norm, 32, 32, 3, 1, 1, 1, bias=False))
self.in_planes = 32
self.layer1 = self._make_layer(batch_norm, BasicBlock_IN, 32, 3, 1, 1, 1)
self.layer2 = self._make_layer(batch_norm, BasicBlock, 64, 16, 2, 1, 1)
self.layer3 = self._make_layer(batch_norm, BasicBlock, 128, 3, 1, 1, 1)
self.layer4 = self._make_layer(batch_norm, BasicBlock, 128, 3, 1, 2, 2)
self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64, 64)), conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False))
self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)), conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False))
self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)), conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False))
self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)), conv_bn_relu(batch_norm, 128, 32, 1, 1, 0, 1, bias=False))
self.lastconv = nn.Sequential(conv_bn_relu(batch_norm, 320, 128, 3, 1, 1, 1, bias=False), nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, dilation=1, bias=False))
def _make_layer(self, batch_norm, block, out_planes, blocks, stride, padding, dilation):
downsample = None
if ((stride != 1) or (self.in_planes != (out_planes * block.expansion))):
downsample = conv_bn(batch_norm, self.in_planes, (out_planes * block.expansion), kernel_size=1, stride=stride, padding=0, dilation=1)
layers = []
layers.append(block(batch_norm, self.in_planes, out_planes, stride, downsample, padding, dilation))
self.in_planes = (out_planes * block.expansion)
for i in range(1, blocks):
layers.append(block(batch_norm, self.in_planes, out_planes, 1, None, padding, dilation))
return nn.Sequential(*layers)
def _forward(self, x):
w_arr = []
for i in range(len(self.firstconv)):
x = self.firstconv[i](x)
w_arr.append(x)
for i in range(len(self.layer1)):
x = self.layer1[i](x)
w_arr.append(x)
output_2_1 = x
output_4_0 = self.layer2(output_2_1)
output_4_1 = self.layer3(output_4_0)
output_8 = self.layer4(output_4_1)
output_branch1 = self.branch1(output_8)
output_branch1 = F.interpolate(output_branch1, (output_8.size()[2], output_8.size()[3]), mode='bilinear', align_corners=True)
output_branch2 = self.branch2(output_8)
output_branch2 = F.interpolate(output_branch2, (output_8.size()[2], output_8.size()[3]), mode='bilinear', align_corners=True)
output_branch3 = self.branch3(output_8)
output_branch3 = F.interpolate(output_branch3, (output_8.size()[2], output_8.size()[3]), mode='bilinear', align_corners=True)
output_branch4 = self.branch4(output_8)
output_branch4 = F.interpolate(output_branch4, (output_8.size()[2], output_8.size()[3]), mode='bilinear', align_corners=True)
output_feature = torch.cat((output_4_0, output_8, output_branch4, output_branch3, output_branch2, output_branch1), 1)
output_feature = self.lastconv(output_feature)
return (output_feature, w_arr)
def forward(self, input):
(fms, w_arr) = self._forward(input)
return [fms, w_arr] |
_module(force=True)
class DefaultFormatBundle(object):
def __call__(self, results):
if ('img' in results):
img = results['img']
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
if ('gt_semantic_seg' in results):
results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)].astype(np.int64)), stack=True)
if ('gt_masks' in results):
results['gt_masks'] = DC(to_tensor(results['gt_masks']))
if ('gt_labels' in results):
results['gt_labels'] = DC(to_tensor(results['gt_labels']))
return results
def __repr__(self):
return self.__class__.__name__ |
def quality_check_timeseries_dataframe(df, dt_col, id_col=None, repair=True):
invalidInputError((dt_col in df.columns), f'dt_col {dt_col} can not be found in df.')
if (id_col is not None):
invalidInputError((id_col in df.columns), f'id_col {id_col} can not be found in df.')
invalidInputError((pd.isna(df[dt_col]).sum() == 0), 'There is N/A in datetime col')
if (df.empty is True):
return (True, df)
flag = True
if (_timestamp_type_check(df[dt_col]) is False):
if (repair is True):
flag = (flag and _timestamp_type_repair(df, dt_col))
else:
flag = False
if (flag is True):
(interval_flag, intervals) = _time_interval_check(df, dt_col, id_col)
if (interval_flag is False):
if (repair is True):
(df, repair_flag) = _time_interval_repair(df, dt_col, intervals, id_col)
flag = (flag and repair_flag)
else:
flag = False
if (_missing_value_check(df, dt_col) is False):
if (repair is True):
flag = (flag and _missing_value_repair(df, dt_col))
else:
flag = False
_abnormal_value_check(df, dt_col)
return (flag, df) |
def _update_config(base_cfg, exp_cfg):
if (isinstance(base_cfg, dict) and isinstance(exp_cfg, edict)):
for (k, v) in exp_cfg.items():
if (k in base_cfg):
if (not isinstance(v, dict)):
base_cfg[k] = v
else:
_update_config(base_cfg[k], v)
else:
raise ValueError('{} not exist in config.py'.format(k))
else:
return |
def draw_grasp_prediction_matplotlib(axs, prediction, image, grasp_success, z, showTextBox, title=None):
(center, theta, y_current, x_current) = decode_prediction_for_matplotlib(prediction, image)
axs.imshow(image, alpha=1, zorder=z)
z = draw_grasp(axs=axs, grasp_success=grasp_success, center=center, theta=theta, z=z, y_current=y_current, x_current=x_current, showTextBox=showTextBox, title=title)
return z |
def vgg_conv_layer(x, kernel_size, out_channels, stride, var_list, pad='SAME', name='conv'):
in_channels = x.get_shape().as_list()[(- 1)]
with tf.variable_scope(name):
n = (kernel_size * in_channels)
stdv = (1.0 / math.sqrt(n))
w = tf.get_variable('kernel_weights', [kernel_size, kernel_size, in_channels, out_channels], tf.float32, initializer=tf.random_uniform_initializer((- stdv), stdv))
b = tf.get_variable('kernel_biases', [out_channels], tf.float32, initializer=tf.random_uniform_initializer((- stdv), stdv))
var_list.append(w)
var_list.append(b)
bias = tf.nn.bias_add(tf.nn.conv2d(x, w, [1, stride, stride, 1], padding=pad), b)
relu = tf.nn.relu(bias)
return relu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.