code stringlengths 101 5.91M |
|---|
def test_first_non_silent_sample_returns_correct_sample():
waveform = torch.zeros(1000)
waveform[500:] = 1.0
first_non_silent_sample = audio_utils.first_non_silent_sample(waveform, frame_size=100, hop_size=100)
assert (first_non_silent_sample == 500) |
class FiniteBernoulliBanditEpsilonGreedy(Agent):
def __init__(self, n_arm, a0=1, b0=1, epsilon=0.0):
self.n_arm = n_arm
self.epsilon = epsilon
self.prior_success = np.array([a0 for arm in range(n_arm)])
self.prior_failure = np.array([b0 for arm in range(n_arm)])
def set_prior(self, prior_success, prior_failure):
self.prior_success = np.array(prior_success)
self.prior_failure = np.array(prior_failure)
def get_posterior_mean(self):
return (self.prior_success / (self.prior_success + self.prior_failure))
def get_posterior_sample(self):
return np.random.beta(self.prior_success, self.prior_failure)
def update_observation(self, observation, action, reward):
assert (observation == self.n_arm)
if np.isclose(reward, 1):
self.prior_success[action] += 1
elif np.isclose(reward, 0):
self.prior_failure[action] += 1
else:
raise ValueError('Rewards should be 0 or 1 in Bernoulli Bandit')
def pick_action(self, observation):
if (np.random.rand() < self.epsilon):
action = np.random.randint(self.n_arm)
else:
posterior_means = self.get_posterior_mean()
action = random_argmax(posterior_means)
return action |
def main(dataset_name, pca, cluster_method, lm_type, document_repr_type, random_state):
save_dict_data = {}
do_pca = (pca != 0)
save_dict_data['dataset_name'] = dataset_name
save_dict_data['pca'] = pca
save_dict_data['cluster_method'] = cluster_method
save_dict_data['lm_type'] = lm_type
save_dict_data['document_repr_type'] = document_repr_type
save_dict_data['random_state'] = random_state
naming_suffix = f'pca{pca}.clus{cluster_method}.{lm_type}.{document_repr_type}.{random_state}'
print(naming_suffix)
data_dir = os.path.join(INTERMEDIATE_DATA_FOLDER_PATH, dataset_name)
print(data_dir)
with open(os.path.join(data_dir, 'dataset.pk'), 'rb') as f:
dictionary = pk.load(f)
class_names = dictionary['class_names']
num_classes = len(class_names)
print(class_names)
with open(os.path.join(data_dir, f'document_repr_lm-{lm_type}-{document_repr_type}.pk'), 'rb') as f:
dictionary = pk.load(f)
document_representations = dictionary['document_representations']
class_representations = dictionary['class_representations']
repr_prediction = np.argmax(cosine_similarity_embeddings(document_representations, class_representations), axis=1)
save_dict_data['repr_prediction'] = repr_prediction
if do_pca:
_pca = PCA(n_components=pca, random_state=random_state)
document_representations = _pca.fit_transform(document_representations)
class_representations = _pca.transform(class_representations)
print(f'Explained variance: {sum(_pca.explained_variance_ratio_)}')
if (cluster_method == 'gmm'):
cosine_similarities = cosine_similarity_embeddings(document_representations, class_representations)
document_class_assignment = np.argmax(cosine_similarities, axis=1)
document_class_assignment_matrix = np.zeros((document_representations.shape[0], num_classes))
for i in range(document_representations.shape[0]):
document_class_assignment_matrix[i][document_class_assignment[i]] = 1.0
gmm = GaussianMixture(n_components=num_classes, covariance_type='tied', random_state=random_state, n_init=999, warm_start=True)
gmm.converged_ = 'HACK'
gmm._initialize(document_representations, document_class_assignment_matrix)
gmm.lower_bound_ = (- np.infty)
gmm.fit(document_representations)
documents_to_class = gmm.predict(document_representations)
centers = gmm.means_
save_dict_data['centers'] = centers
distance = ((- gmm.predict_proba(document_representations)) + 1)
elif (cluster_method == 'kmeans'):
kmeans = KMeans(n_clusters=num_classes, init=class_representations, random_state=random_state)
kmeans.fit(document_representations)
documents_to_class = kmeans.predict(document_representations)
centers = kmeans.cluster_centers_
save_dict_data['centers'] = centers
distance = np.zeros((document_representations.shape[0], centers.shape[0]), dtype=float)
for (i, _emb_a) in enumerate(document_representations):
for (j, _emb_b) in enumerate(centers):
distance[i][j] = np.linalg.norm((_emb_a - _emb_b))
save_dict_data['documents_to_class'] = documents_to_class
save_dict_data['distance'] = distance
with open(os.path.join(data_dir, f'data.{naming_suffix}.pk'), 'wb') as f:
pk.dump(save_dict_data, f) |
class ViTMAEPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class AutoTokenizer():
def __init__(self):
raise EnvironmentError('AutoTokenizer is designed to be instantiated using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method.')
_list_option_in_docstrings(SLOW_TOKENIZER_MAPPING)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
use_fast = kwargs.pop('use_fast', True)
if (config.tokenizer_class is not None):
tokenizer_class = None
if (use_fast and (not config.tokenizer_class.endswith('Fast'))):
tokenizer_class_candidate = f'{config.tokenizer_class}Fast'
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if (tokenizer_class is None):
tokenizer_class_candidate = config.tokenizer_class
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if (tokenizer_class is None):
raise ValueError('Tokenizer class {} does not exist or is not currently imported.'.format(tokenizer_class_candidate))
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if isinstance(config, EncoderDecoderConfig):
if (type(config.decoder) is not type(config.encoder)):
logger.warn(f'The encoder model config class: {config.encoder.__class__} is different from the decoder model config class: {config.decoder.__class}. It is not recommended to use the `AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder specific tokenizer classes.')
config = config.encoder
if (type(config) in TOKENIZER_MAPPING.keys()):
(tokenizer_class_py, tokenizer_class_fast) = TOKENIZER_MAPPING[type(config)]
if (tokenizer_class_fast and (use_fast or (tokenizer_class_py is None))):
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif (tokenizer_class_py is not None):
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError('This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed in order to use this tokenizer.')
raise ValueError('Unrecognized configuration class {} to build an AutoTokenizer.\nModel type should be one of {}.'.format(config.__class__, ', '.join((c.__name__ for c in TOKENIZER_MAPPING.keys())))) |
def convert_data_to_yaml(data, split, yaml, is_train=True, label=None, feature=None, qd_format=False, label_version=None, feature_version=None):
if qd_format:
info = {'feature': (feature if (feature is not None) else {'data': data, 'split': split, 't': 'feature', 'version': feature_version}), 'hw': {'data': data, 'split': split, 't': 'hw'}, 'img': {'data': data, 'split': split}, 'label': (label if (label is not None) else {'data': data, 'split': split, 't': 'label', 'version': label_version}), 'caption': {'data': data, 'split': split, 't': 'hw'}, 'composite': False, 'qd_format': True}
else:
assert ((label is None) and (feature is None))
from src.qd.tsv_io import TSVDataset
yaml_folder = op.dirname(yaml)
dataset = TSVDataset(data)
if (not op.isfile(dataset.get_data((split + 'X')))):
info = {'feature': op.relpath(dataset.get_data('train', 'feature', version=feature_version), yaml_folder), 'label': op.relpath(dataset.get_data(split, 'label', version=label_version), yaml_folder), 'hw': op.relpath(dataset.get_data(split, 'hw'), yaml_folder), 'img': op.relpath(dataset.get_data(split), yaml_folder), 'caption': op.relpath(dataset.get_data(split, 'caption'), yaml_folder), 'composite': False}
else:
def get_rel_path(p):
return op.relpath(op.realpath(p), op.realpath(yaml_folder))
splitX = (split + 'X')
from src.tools.common import load_list_file
info = {'feature': list(map(get_rel_path, load_list_file(dataset.get_data(splitX, 'feature', version=feature_version)))), 'label': list(map(get_rel_path, load_list_file(dataset.get_data(splitX, 'label', version=label_version)))), 'hw': list(map(get_rel_path, load_list_file(dataset.get_data(splitX, 'hw')))), 'img': list(map(get_rel_path, load_list_file(dataset.get_data(splitX)))), 'caption': list(map(get_rel_path, load_list_file(dataset.get_data(splitX, 'caption')))), 'composite': True}
if is_train:
caption_linelist = dataset.get_data(split, 'caption_linelist')
assert op.isfile(caption_linelist)
info['caption_linelist'] = caption_linelist
else:
caption_linelist = dataset.get_data(split, 'caption_linelist_test')
if (not op.isfile(caption_linelist)):
from src.qd.tsv_io import tsv_reader
tsv_writer(((a, b, 0) for (a, b) in tsv_reader(dataset.get_shuffle_file(split))), caption_linelist)
info['caption_linelist'] = caption_linelist
from src.tools.common import write_to_yaml_file
write_to_yaml_file(info, yaml) |
class ImportanceWeightedRiskEstimator(RiskEstimator):
def __init__(self, loss, dataset, *args):
super().__init__(loss)
self.N = len(dataset.test_idxs)
def estimate(self, predictions, observed, acq_weights):
l_i = self.loss(predictions, observed)
M = len(predictions)
R = ((1 / M) * ((1 / acq_weights) * l_i).sum())
return self.return_and_save(R) |
class TrajInfo(AttrDict):
_discount = 1
def __init__(self, include_observations=False, **kwargs):
super().__init__(**kwargs)
self._include_observations = include_observations
if self._include_observations:
self.Observations = []
self.Length = 0
self.Return = 0
self.NonzeroRewards = 0
self.DiscountedReturn = 0
self._cur_discount = 1
self.env_infos = []
def step(self, observation, action, reward, done, agent_info, env_info):
if self._include_observations:
self.Observations.append(np.copy(observation))
self.Length += 1
self.Return += reward
self.NonzeroRewards += (reward != 0)
self.DiscountedReturn += (self._cur_discount * reward)
self._cur_discount *= self._discount
self.env_infos.append(env_info)
def terminate(self, observation):
return self |
class LZ09_F7(LZ09):
def __init__(self, number_of_variables=10):
super(LZ09_F7, self).__init__(number_of_variables, dtype=3, ltype=21, ptype=21)
self.obj_directions = [self.MINIMIZE, self.MINIMIZE]
self.obj_labels = ['f(x)', 'f(y)']
def number_of_objectives(self) -> int:
return len(self.obj_directions)
def name(self):
return 'LZ09_F7' |
def time_to_string(time, frame_length):
n = round((time / frame_length))
assert (n >= 0)
return float_to_string((n * frame_length)) |
def test_constantbeta_dehnencore_in_nfw_Qoutofbounds():
if WIN32:
return None
pot = potential.NFWPotential(amp=2.3, a=1.3)
denspot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15)
betas = [0.25]
for (beta, dfh) in zip(betas, constantbeta_dfs_dehnencore_in_nfw):
assert numpy.all((numpy.fabs(dfh((numpy.arange(0.1, 10.0, 0.1), 1.1))) < 1e-08)), 'Evaluating the constantbeta DF at E > 0 does not give zero'
assert numpy.all((numpy.fabs(dfh(((pot(0, 0) - 0.1), 0.1))) < 1e-08)), 'Evaluating the constantbeta DF at E < -GM/a does not give zero'
assert numpy.all((numpy.fabs(dfh(((- 0.0001), 1.1))) < 1e-08)), 'Evaluating the constantbeta DF at Q < 0 does not give zero'
return None |
class SearchAlgo(ABC):
def __init__(self, task, world_model, action_agent, logger=None, seed=0, print_log=True, test_every_step=True, depth_limit=None) -> None:
self.task = task
self.world_model = world_model
self.action_agent = action_agent
self.states = []
self.logger = logger
self.print_log = (print_log if (logger is not None) else False)
self.seed = seed
self.test_every_step = test_every_step
self.depth_limit = depth_limit
def search(self):
pass
def get_states(self):
return self.states
def process_all_correct_batch(self):
self.logger.info(f'''
''')
self.logger.info('all correct: skip updating cur_prompt')
self.logger.info(f'''
''') |
def main(opt):
loader = BatchLoaderUnk(opt.tokens, opt.data_dir, opt.batch_size, opt.seq_length, opt.max_word_l, opt.n_words, opt.n_chars)
opt.word_vocab_size = min(opt.n_words, len(loader.idx2word))
opt.char_vocab_size = min(opt.n_chars, len(loader.idx2char))
opt.max_word_l = loader.max_word_l
print('Word vocab size:', opt.word_vocab_size, ', Char vocab size:', opt.char_vocab_size, ', Max word length (incl. padding):', opt.max_word_l)
if (not opt.skip_train):
print('creating an LSTM-CNN with', opt.num_layers, 'layers')
model = LSTMCNN(opt)
if (not os.path.exists(opt.checkpoint_dir)):
os.makedirs(opt.checkpoint_dir)
pickle.dump(opt, open('{}/{}.pkl'.format(opt.checkpoint_dir, opt.savefile), 'wb'))
model.save('{}/{}.json'.format(opt.checkpoint_dir, opt.savefile))
model.fit_generator(loader.next_batch(Train), loader.split_sizes[Train], opt.max_epochs, loader.next_batch(Validation), loader.split_sizes[Validation], opt)
model.save_weights('{}/{}.h5'.format(opt.checkpoint_dir, opt.savefile), overwrite=True)
else:
model = load_model('{}/{}.json'.format(opt.checkpoint_dir, opt.savefile))
model.load_weights('{}/{}.h5'.format(opt.checkpoint_dir, opt.savefile))
print(model.summary())
test_perp = model.evaluate_generator(loader.next_batch(Test), loader.split_sizes[Test])
print('Perplexity on test set:', exp(test_perp)) |
def main():
with open('find_rocm_config.py', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_rocm_config.py.gz.base64', 'wb') as f:
f.write(b64encoded) |
class PathwiseGPR(GPR, PathwiseGPModel):
def __init__(self, *args, paths: AbstractSampler=None, **kwargs):
GPR.__init__(self, *args, **kwargs)
self._paths = paths
def generate_paths(self, num_samples: int, num_bases: int=None, prior: AbstractSampler=None, sample_axis: int=None, **kwargs) -> CompositeSampler:
if (prior is None):
prior = sampling.priors.random_fourier(self.kernel, num_bases=num_bases, sample_shape=[num_samples], sample_axis=sample_axis)
elif (num_bases is not None):
assert (prior.sample_shape == [num_samples])
diag = tf.convert_to_tensor(self.likelihood.variance)
return sampling.decoupled(self.kernel, prior, *self.data, mean_function=self.mean_function, diag=diag, sample_axis=sample_axis, **kwargs) |
def count_word_freq():
d = {}
os.chdir('../../data/yelp')
(d, _) = count(d, 'valid.txt')
(d, filtered_sents_test) = count(d, 'test.txt')
sorted_d = sorted(d, key=d.get, reverse=True)
print('Len of trimmed vocab {}'.format(len(sorted_d)))
print('Num of Test samples after trimming {}'.format(len(filtered_sents_test)))
uncommon = sorted_d[(- 10000):]
print(uncommon)
divide = 5
every = int((len(filtered_sents_test) / divide))
sent_dictionary = {}
for sent in filtered_sents_test:
total = len(sent)
cnt = 0.0
for w in sent:
if (w in uncommon):
cnt += 1
sent_dictionary[' '.join(sent)] = (cnt / total)
sorted_sents = sorted(sent_dictionary, key=sent_dictionary.get, reverse=True)
for piece in range(divide):
start = int((piece * every))
end = int(((piece + 1) * every))
tmp_sents = sorted_sents[start:end]
with open((('test-rare-' + str(piece)) + '.txt'), 'w') as fd:
fd.write('\n'.join(tmp_sents)) |
def test_game_2048__get_action_mask(game_2048: Game2048, board: Board) -> None:
action_mask_fn = jax.jit(game_2048._get_action_mask)
action_mask = action_mask_fn(board)
expected_action_mask = jnp.array([False, True, True, True])
assert jnp.array_equal(action_mask, expected_action_mask) |
class TestBoxMode(unittest.TestCase):
def _convert_xy_to_wh(self, x):
return BoxMode.convert(x, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
def _convert_xywha_to_xyxy(self, x):
return BoxMode.convert(x, BoxMode.XYWHA_ABS, BoxMode.XYXY_ABS)
def _convert_xywh_to_xywha(self, x):
return BoxMode.convert(x, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
def test_box_convert_list(self):
for tp in [list, tuple]:
box = tp([5.0, 5.0, 10.0, 10.0])
output = self._convert_xy_to_wh(box)
self.assertIsInstance(output, tp)
self.assertIsInstance(output[0], float)
self.assertEqual(output, tp([5.0, 5.0, 5.0, 5.0]))
with self.assertRaises(Exception):
self._convert_xy_to_wh([box])
def test_box_convert_array(self):
box = np.asarray([[5, 5, 10, 10], [1, 1, 2, 3]])
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
def test_box_convert_cpu_tensor(self):
box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]])
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
output = output.numpy()
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
((not torch.cuda.is_available()), 'CUDA not available')
def test_box_convert_cuda_tensor(self):
box = torch.tensor([[5, 5, 10, 10], [1, 1, 2, 3]]).cuda()
output = self._convert_xy_to_wh(box)
self.assertEqual(output.dtype, box.dtype)
self.assertEqual(output.shape, box.shape)
self.assertEqual(output.device, box.device)
output = output.cpu().numpy()
self.assertTrue((output[0] == [5, 5, 5, 5]).all())
self.assertTrue((output[1] == [1, 1, 1, 2]).all())
def test_box_convert_xywha_to_xyxy_list(self):
for tp in [list, tuple]:
box = tp([50, 50, 30, 20, 0])
output = self._convert_xywha_to_xyxy(box)
self.assertIsInstance(output, tp)
self.assertEqual(output, tp([35, 40, 65, 60]))
with self.assertRaises(Exception):
self._convert_xywha_to_xyxy([box])
def test_box_convert_xywha_to_xyxy_array(self):
for dtype in [np.float64, np.float32]:
box = np.asarray([[50, 50, 30, 20, 0], [50, 50, 30, 20, 90], [1, 1, math.sqrt(2), math.sqrt(2), (- 45)]], dtype=dtype)
output = self._convert_xywha_to_xyxy(box)
self.assertEqual(output.dtype, box.dtype)
expected = np.asarray([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
self.assertTrue(np.allclose(output, expected, atol=1e-06), 'output={}'.format(output))
def test_box_convert_xywha_to_xyxy_tensor(self):
for dtype in [torch.float32, torch.float64]:
box = torch.tensor([[50, 50, 30, 20, 0], [50, 50, 30, 20, 90], [1, 1, math.sqrt(2), math.sqrt(2), (- 45)]], dtype=dtype)
output = self._convert_xywha_to_xyxy(box)
self.assertEqual(output.dtype, box.dtype)
expected = torch.tensor([[35, 40, 65, 60], [40, 35, 60, 65], [0, 0, 2, 2]], dtype=dtype)
self.assertTrue(torch.allclose(output, expected, atol=1e-06), 'output={}'.format(output))
def test_box_convert_xywh_to_xywha_list(self):
for tp in [list, tuple]:
box = tp([50, 50, 30, 20])
output = self._convert_xywh_to_xywha(box)
self.assertIsInstance(output, tp)
self.assertEqual(output, tp([65, 60, 30, 20, 0]))
with self.assertRaises(Exception):
self._convert_xywh_to_xywha([box])
def test_box_convert_xywh_to_xywha_array(self):
for dtype in [np.float64, np.float32]:
box = np.asarray([[30, 40, 70, 60], [30, 40, 60, 70], [(- 1), (- 1), 2, 2]], dtype=dtype)
output = self._convert_xywh_to_xywha(box)
self.assertEqual(output.dtype, box.dtype)
expected = np.asarray([[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype)
self.assertTrue(np.allclose(output, expected, atol=1e-06), 'output={}'.format(output))
def test_box_convert_xywh_to_xywha_tensor(self):
for dtype in [torch.float32, torch.float64]:
box = torch.tensor([[30, 40, 70, 60], [30, 40, 60, 70], [(- 1), (- 1), 2, 2]], dtype=dtype)
output = self._convert_xywh_to_xywha(box)
self.assertEqual(output.dtype, box.dtype)
expected = torch.tensor([[65, 70, 70, 60, 0], [60, 75, 60, 70, 0], [0, 0, 2, 2, 0]], dtype=dtype)
self.assertTrue(torch.allclose(output, expected, atol=1e-06), 'output={}'.format(output))
def test_json_serializable(self):
payload = {'box_mode': BoxMode.XYWH_REL}
try:
json.dumps(payload)
except Exception:
self.fail('JSON serialization failed')
def test_json_deserializable(self):
payload = '{"box_mode": 2}'
obj = json.loads(payload)
try:
obj['box_mode'] = BoxMode(obj['box_mode'])
except Exception:
self.fail('JSON deserialization failed') |
class LPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_loss='hinge'):
super().__init__()
assert (disc_loss in ['hinge', 'vanilla'])
self.kl_weight = kl_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.logvar = nn.Parameter((torch.ones(size=()) * logvar_init))
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm).apply(weights_init)
self.discriminator_iter_start = disc_start
self.disc_loss = (hinge_d_loss if (disc_loss == 'hinge') else vanilla_d_loss)
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if (last_layer is not None):
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = (torch.norm(nll_grads) / (torch.norm(g_grads) + 0.0001))
d_weight = torch.clamp(d_weight, 0.0, 10000.0).detach()
d_weight = (d_weight * self.discriminator_weight)
return d_weight
def forward(self, inputs, reconstructions, posteriors, optimizer_idx, global_step, last_layer=None, cond=None, split='train', weights=None):
rec_loss = torch.abs((inputs.contiguous() - reconstructions.contiguous()))
if (self.perceptual_weight > 0):
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = (rec_loss + (self.perceptual_weight * p_loss))
nll_loss = ((rec_loss / torch.exp(self.logvar)) + self.logvar)
weighted_nll_loss = nll_loss
if (weights is not None):
weighted_nll_loss = (weights * nll_loss)
weighted_nll_loss = (torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0])
nll_loss = (torch.sum(nll_loss) / nll_loss.shape[0])
kl_loss = posteriors.kl()
kl_loss = (torch.sum(kl_loss) / kl_loss.shape[0])
if (optimizer_idx == 0):
if (cond is None):
assert (not self.disc_conditional)
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = (- torch.mean(logits_fake))
if (self.disc_factor > 0.0):
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert (not self.training)
d_weight = torch.tensor(0.0)
else:
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = ((weighted_nll_loss + (self.kl_weight * kl_loss)) + ((d_weight * disc_factor) * g_loss))
log = {'{}/total_loss'.format(split): loss.clone().detach().mean(), '{}/logvar'.format(split): self.logvar.detach(), '{}/kl_loss'.format(split): kl_loss.detach().mean(), '{}/nll_loss'.format(split): nll_loss.detach().mean(), '{}/rec_loss'.format(split): rec_loss.detach().mean(), '{}/d_weight'.format(split): d_weight.detach(), '{}/disc_factor'.format(split): torch.tensor(disc_factor), '{}/g_loss'.format(split): g_loss.detach().mean()}
return (loss, log)
if (optimizer_idx == 1):
if (cond is None):
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = (disc_factor * self.disc_loss(logits_real, logits_fake))
log = {'{}/disc_loss'.format(split): d_loss.clone().detach().mean(), '{}/logits_real'.format(split): logits_real.detach().mean(), '{}/logits_fake'.format(split): logits_fake.detach().mean()}
return (d_loss, log) |
def get_transform(point_cloud, is_training, bn_decay=None, K=3):
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
input_image = tf.expand_dims(point_cloud, (- 1))
net = tf_util.conv2d(input_image, 64, [1, 3], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='tconv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1, 1], bn=True, is_training=is_training, scope='tconv4', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='tmaxpool')
net = tf.reshape(net, [batch_size, (- 1)])
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)
with tf.variable_scope('transform_XYZ') as sc:
assert (K == 3)
weights = tf.get_variable('weights', [128, (3 * K)], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
biases = (tf.get_variable('biases', [(3 * K)], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=tf.float32))
transform = tf.matmul(net, weights)
transform = tf.nn.bias_add(transform, biases)
transform = tf.reshape(transform, [batch_size, 3, K])
return transform |
class AttentionNeuralCDE(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, static_dim=None, adjoint=True, run_backwards=True, sparsemax=False):
super(AttentionNeuralCDE, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.static_dim = static_dim
self.adjoint = adjoint
self.run_backwards = run_backwards
self.encoder = self._create_ncde(input_dim, hidden_dim, hidden_dim, static_dim)
activation = (Sparsemax(dim=1) if sparsemax else nn.Softmax(dim=1))
self.attention = nn.Sequential(self._create_flipper(), self._create_ncde(hidden_dim, hidden_dim, 1, static_dim), self._create_flipper(), activation)
self.final = nn.Sequential(self._create_ncde(hidden_dim, hidden_dim, hidden_dim, static_dim, return_sequences=False))
self.fc_output = nn.Linear(hidden_dim, output_dim)
def _create_flipper(self):
if self.run_backwards:
item_index = None
if self.static_dim:
item_index = 1
return FlipTensor(dim=(- 2), item_index=item_index)
else:
return FlipTensor(dim=None)
def _create_ncde(self, input_dim, hidden_dim, output_dim, static_dim, return_sequences=True):
return NeuralCDE(input_dim, hidden_dim, output_dim, static_dim, use_initial=True, interpolation='linear', adjoint=self.adjoint, num_layers=3, apply_final_linear=True, return_sequences=return_sequences, return_filtered_rectilinear=False)
def _handle_hidden_static_features(self, x, hidden_state):
if (self.static_dim is None):
return hidden_state
else:
return [x[0], hidden_state]
def reduce_hidden_state(self, x, hidden_state, attention_weights):
keep_bools = (attention_weights > (1 / hidden_state.size(1)))
hold_states = PadRaggedTensors().transform([h[k.view((- 1))] for (h, k) in zip(hidden_state, keep_bools)])
pipeline = SimplePipeline([PadRaggedTensors(), ForwardFill()])
reduced_hidden_state = pipeline.transform(hold_states)
output = torchcde.linear_interpolation_coeffs(reduced_hidden_state)
output = self._handle_hidden_static_features(x, output)
return output
def forward(self, x):
hidden_state = self.encoder(x)
attention_inputs = self._handle_hidden_static_features(x, hidden_state)
attention_weights = self.attention(attention_inputs)
reduced_hidden_state = self.reduce_hidden_state(x, hidden_state, attention_weights)
final_ncde_out = self.final(reduced_hidden_state)
output = self.fc_output(final_ncde_out)
return output |
class VIDLoss(nn.Module):
'Variational Information Distillation for Knowledge Transfer (CVPR 2019),\n code from author:
def __init__(self, num_input_channels, num_mid_channel, num_target_channels, init_pred_var=5.0, eps=1e-05):
super(VIDLoss, self).__init__()
def conv1x1(in_channels, out_channels, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, bias=False, stride=stride)
self.regressor = nn.Sequential(conv1x1(num_input_channels, num_mid_channel), nn.ReLU(), conv1x1(num_mid_channel, num_mid_channel), nn.ReLU(), conv1x1(num_mid_channel, num_target_channels))
self.log_scale = torch.nn.Parameter((np.log((np.exp((init_pred_var - eps)) - 1.0)) * torch.ones(num_target_channels)))
self.eps = eps
def forward(self, input, target):
(s_H, t_H) = (input.shape[2], target.shape[2])
if (s_H > t_H):
input = F.adaptive_avg_pool2d(input, (t_H, t_H))
elif (s_H < t_H):
target = F.adaptive_avg_pool2d(target, (s_H, s_H))
else:
pass
pred_mean = self.regressor(input)
pred_var = (torch.log((1.0 + torch.exp(self.log_scale))) + self.eps)
pred_var = pred_var.view(1, (- 1), 1, 1)
neg_log_prob = (0.5 * ((((pred_mean - target) ** 2) / pred_var) + torch.log(pred_var)))
loss = torch.mean(neg_log_prob)
return loss |
class TrajectoryGraph(DiGraph):
def add_node(self, timed_state: TimedVehicleState, **attr):
super(TrajectoryGraph, self).add_node(node_for_adding=timed_state, **attr)
def check_node(self, node: TimedVehicleState):
if (node not in self.nodes):
raise ValueError(f'{node} not in graph!')
def add_edge(self, states: Trajectory, transition: Trajectory, **attr):
(source, target) = (states.at(states.get_start()), states.at(states.get_end()))
(start_time, end_time) = (states.get_start(), states.get_end())
self.check_node(node=(start_time, source))
attr['transition'] = transition
if (target not in self.nodes):
self.add_node(timed_state=(end_time, target), gen=(self.nodes.get((start_time, source))['gen'] + 1))
super(TrajectoryGraph, self).add_edge(u_of_edge=(start_time, source), v_of_edge=(end_time, target), **attr)
return
def get_all_transitions(self) -> Set[Trajectory]:
assert is_directed_acyclic_graph(self)
trajectories = set()
roots = [node for (node, degree) in self.in_degree() if (degree == 0)]
assert (len(roots) == 1)
source = roots[0]
leaves = [node for (node, degree) in self.out_degree() if (degree == 0)]
for target in leaves:
trajectories.add(self.get_transition(source=source, target=target))
return trajectories
def get_all_trajectories(self) -> Set[Trajectory]:
assert is_directed_acyclic_graph(self)
trajectories = set()
roots = [node for (node, degree) in self.in_degree() if (degree == 0)]
assert (len(roots) == 1)
source = roots[0]
leaves = [node for (node, degree) in self.out_degree() if (degree == 0)]
for target in leaves:
trajectories.add(self.get_trajectory(source=source, target=target))
return trajectories
def commands_on_trajectory(self, trajectory: Trajectory) -> DgSampledSequence:
source = (trajectory.timestamps[0], trajectory.values[0])
target = (trajectory.timestamps[(- 1)], trajectory.values[(- 1)])
self.check_node(source)
self.check_node(target)
if (not has_path(G=self, source=source, target=target)):
raise ValueError(f'No path exists between {(source, target)}!')
nodes = shortest_path(G=self, source=source, target=target)
commands = []
timestamps = []
for (node1, node2) in zip(nodes[:(- 1)], nodes[1:]):
commands.append(self.get_edge_data(u=node1, v=node2)['commands'])
timestamps.append(node1[0])
return DgSampledSequence[VehicleCommands](values=commands, timestamps=timestamps)
def get_transition(self, source: TimedVehicleState, target: TimedVehicleState) -> Trajectory:
self.check_node(source)
self.check_node(target)
if (not has_path(G=self, source=source, target=target)):
raise ValueError(f'No path exists between {(source, target)}!')
nodes = shortest_path(G=self, source=source, target=target)
traj: Trajectory = Trajectory(values=[], timestamps=[])
for (node1, node2) in zip(nodes[:(- 1)], nodes[1:]):
traj = traj.merge_unsafe(self.get_edge_data(u=node1, v=node2)['transition'])
return traj
def get_trajectory(self, source: TimedVehicleState, target: TimedVehicleState):
self.check_node(source)
self.check_node(target)
if (not has_path(G=self, source=source, target=target)):
raise ValueError(f'No path exists between {(source, target)}!')
nodes = shortest_path(G=self, source=source, target=target)
states = []
timestamps = []
for node in nodes:
timestamps.append(node[0])
states.append(node[1])
return Trajectory(values=states, timestamps=timestamps)
def iterate_all_trajectories(self) -> Iterator[Trajectory]:
pass |
def train_alpha(model):
num_epochs = 100
batch_size = 4
nframes = 8
nframes_val = 32
size = (240, 432)
def image_read(path):
pic = Image.open(path)
transform = tv.transforms.Compose([tv.transforms.Resize(size, interpolation=Image.BILINEAR), tv.transforms.ToTensor(), tv.transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)])
return transform(pic)
def label_read(path):
if os.path.exists(path):
pic = Image.open(path)
transform = tv.transforms.Compose([tv.transforms.Resize(size, interpolation=Image.NEAREST), LabelToLongTensor()])
label = transform(pic)
else:
label = torch.LongTensor(1, *size).fill_(255)
return label
def random_object_sampler(lst):
return [random.choice(lst)]
def deterministic_object_sampler(lst):
return [lst[0]]
train_transform = dataset_loaders.JointCompose([dataset_loaders.JointRandomHorizontalFlip()])
train_set = torch.utils.data.ConcatDataset([SynthesisVD(config['SynthesisVD_path'], '2017', 'train', image_read, label_read, train_transform, nframes, random_object_sampler, start_frame='random'), DAVIS17V2(config['davis17_path'], '2017', 'train', image_read, label_read, train_transform, nframes, random_object_sampler, start_frame='random'), YTVOSV2(config['ytvos_path'], 'train', 'train_joakim', 'JPEGImages', image_read, label_read, train_transform, nframes, random_object_sampler, start_frame='random')])
val_set = YTVOSV2(config['ytvos_path'], 'train', 'val_joakim', 'JPEGImages', image_read, label_read, None, nframes_val, deterministic_object_sampler, start_frame='first')
train_loader = DataLoader(train_set, shuffle=True, batch_size=batch_size, num_workers=8)
val_loader = DataLoader(val_set, shuffle=False, batch_size=batch_size, num_workers=8)
print('Sets initiated with {} (train), {} (val) samples.'.format(len(train_set), len(val_set)))
objective = nn.NLLLoss(ignore_index=255).cuda()
optimizer = torch.optim.Adam([param for param in model.parameters() if param.requires_grad], lr=0.0001, weight_decay=1e-05)
lr_sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.975)
trainer = trainers.VOSTrainer(model, optimizer, objective, lr_sched, train_loader, val_loader, use_gpu=True, workspace_dir=config['workspace_path'], save_name=(os.path.splitext(os.path.basename(__file__))[0] + '_alpha'), checkpoint_interval=2, print_interval=25, debug=False)
trainer.load_checkpoint()
trainer.train(num_epochs) |
class SegmentableProperties(bpy.types.PropertyGroup):
category_name: bpy.props.StringProperty(name='Category Name', description='String name of the category.', default='')
category_color: bpy.props.FloatVectorProperty(name='Category Color', subtype='COLOR', description='Category color for segmentation.')
instance_name: bpy.props.StringProperty(name='Instance Name', description='String name of the instance.', default='')
instance_color: bpy.props.FloatVectorProperty(name='Instance Color', subtype='COLOR', description='Instance color for segmentation.') |
def create_textset(tokenizer, train_split, dev_split, name, path, bucketing, batch_size):
msg_list = []
if (name.lower() == 'librispeech'):
from dataset.librispeech import LibriTextDataset as Dataset
print('import LibriTextDataset as Dataset')
elif (name.lower() == 'aishell'):
from dataset.aishell import AishellTextDataset as Dataset
print('import AishellTextDataset as Dataset')
else:
raise NotImplementedError
bucket_size = (batch_size if bucketing else 1)
tr_loader_bs = (1 if bucketing else batch_size)
dv_set = Dataset(path, dev_split, tokenizer, 1)
tr_set = Dataset(path, train_split, tokenizer, bucket_size)
msg_list = _data_msg(name, path, train_split.__str__(), len(tr_set), dev_split.__str__(), len(dv_set), batch_size, bucketing)
return (tr_set, dv_set, tr_loader_bs, batch_size, msg_list) |
def get_lvis_22k_meta():
from .lvis_22k_categories import CATEGORIES
cat_ids = [k['id'] for k in CATEGORIES]
assert ((min(cat_ids) == 1) and (max(cat_ids) == len(cat_ids))), 'Category ids are not in [1, #categories], as expected'
lvis_categories = sorted(CATEGORIES, key=(lambda x: x['id']))
thing_classes = [k['name'] for k in lvis_categories]
meta = {'thing_classes': thing_classes}
return meta |
def test_save_load_and_predict():
fpath = 'tests/test_model_functioning/test_wd_model'
if (not os.path.exists(fpath)):
os.makedirs(fpath)
model = WideDeep(deeptabular=tabmlp)
trainer = Trainer(model, objective='binary', verbose=0)
trainer.fit(X_tab=X_tab, target=target, batch_size=16)
trainer.save(path=fpath, save_state_dict=True)
model_new = WideDeep(deeptabular=tabmlp)
model_new.load_state_dict(torch.load('/'.join([fpath, 'wd_model.pt'])))
trainer_new = Trainer(model, objective='binary', verbose=0)
preds = trainer_new.predict(X_tab=X_tab, batch_size=16)
shutil.rmtree(fpath)
assert (preds.shape[0] == X_tab.shape[0]) |
class NpInfoDict(object):
def __init__(self, info_dict, key_type=None, value_type=None):
keys = sorted(list(info_dict.keys()))
self.key_arr = np.array(keys, dtype=key_type)
self.val_arr = np.array([info_dict[k] for k in keys], dtype=value_type)
self._key_idx_map = {k: i for (i, k) in enumerate(keys)}
def __getitem__(self, idx):
return (self.key_arr[idx], self.val_arr[idx])
def __len__(self):
return len(self.key_arr)
def convert_key(self, org_key):
return self._key_idx_map[org_key] |
def load_pytorch_state_dict_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False, output_loading_info=False, _prefix=None, tf_to_pt_weight_rename=None):
import tensorflow as tf
from packaging.version import parse
if (parse(tf.__version__) >= parse('2.11.0')):
from keras import backend as K
else:
from tensorflow.python.keras import backend as K
if (tf_inputs is None):
tf_inputs = tf_model.dummy_inputs
if (_prefix is None):
_prefix = ''
if (tf_inputs is not None):
with tf.name_scope(_prefix):
tf_model(tf_inputs, training=False)
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if ('gamma' in key):
new_key = key.replace('gamma', 'weight')
if ('beta' in key):
new_key = key.replace('beta', 'bias')
if ('running_var' in key):
new_key = key.replace('running_var', 'moving_variance')
if ('running_mean' in key):
new_key = key.replace('running_mean', 'moving_mean')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
start_prefix_to_remove = ''
if (not any((s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()))):
start_prefix_to_remove = (tf_model.base_model_prefix + '.')
symbolic_weights = (tf_model.trainable_weights + tf_model.non_trainable_weights)
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(pt_state_dict.keys())
missing_keys = []
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
(name, transpose) = convert_tf_weight_name_to_pt_weight_name(sw_name, start_prefix_to_remove=start_prefix_to_remove, tf_weight_shape=symbolic_weight.shape, name_scope=_prefix)
if (tf_to_pt_weight_rename is not None):
name = tf_to_pt_weight_rename(name)
if (name not in pt_state_dict):
if allow_missing_keys:
missing_keys.append(name)
continue
elif (tf_model._keys_to_ignore_on_load_missing is not None):
if any(((re.search(pat, name) is not None) for pat in tf_model._keys_to_ignore_on_load_missing)):
continue
raise AttributeError(f'{name} not found in PyTorch model')
array = apply_transpose(transpose, pt_state_dict[name], symbolic_weight.shape)
tf_loaded_numel += tensor_size(array)
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if (tf_inputs is not None):
tf_model(tf_inputs, training=False)
logger.info(f'Loaded {tf_loaded_numel:,} parameters in the TF 2.0 model.')
unexpected_keys = list(all_pytorch_weights)
if (tf_model._keys_to_ignore_on_load_missing is not None):
for pat in tf_model._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if (re.search(pat, k) is None)]
if (tf_model._keys_to_ignore_on_load_unexpected is not None):
for pat in tf_model._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if (re.search(pat, k) is None)]
if (len(unexpected_keys) > 0):
logger.warning(f'''Some weights of the PyTorch model were not used when initializing the TF 2.0 model {tf_model.__class__.__name__}: {unexpected_keys}
- This IS expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model trained on another task or with another architecture (e.g. initializing a TFBertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing {tf_model.__class__.__name__} from a PyTorch model that you expect to be exactly identical (e.g. initializing a TFBertForSequenceClassification model from a BertForSequenceClassification model).''')
else:
logger.warning(f'''All PyTorch model weights were used when initializing {tf_model.__class__.__name__}.
''')
if (len(missing_keys) > 0):
logger.warning(f'''Some weights or buffers of the TF 2.0 model {tf_model.__class__.__name__} were not initialized from the PyTorch model and are newly initialized: {missing_keys}
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''')
else:
logger.warning(f'''All the weights of {tf_model.__class__.__name__} were initialized from the PyTorch model.
If your task is similar to the task the model of the checkpoint was trained on, you can already use {tf_model.__class__.__name__} for predictions without further training.''')
if output_loading_info:
loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys}
return (tf_model, loading_info)
return tf_model |
def minify(src_dir: str, dest_dir: str, n: int):
src_dir = Path(src_dir)
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
for path in src_dir.iterdir():
new = [x.rstrip() for x in list(path.open().readlines())][:n]
dest_path = dest_dir.joinpath(path.name)
print(dest_path)
dest_path.open('w').write('\n'.join(new)) |
def running_mean(x, n):
cumsum = np.cumsum(np.insert(x, 0, 0))
return ((cumsum[n:] - cumsum[:(- n)]) / float(n)) |
def getPathGS(algo, inputEvents, tthread, NUM_ITEMS, NUM_ACCESS, key_skewness, window_ratio, window_size, transaction_length, isCyclic, complexity):
return (FILE_FOLER + '/WindowedGrepSum/{}/threads = {}/totalEvents = {}/{}_{}_{}_{}_{}_{}_{}_{}'.format(algo, tthread, inputEvents, NUM_ITEMS, 100, key_skewness, window_ratio, window_size, transaction_length, isCyclic, complexity)) |
class Stem(nn.Sequential):
def __init__(self, in_chs, out_chs, kernel_size=3, stride=4, pool='maxpool', num_rep=3, num_act=None, chs_decay=0.5, layers: LayerFn=None):
super().__init__()
assert (stride in (2, 4))
layers = (layers or LayerFn())
if isinstance(out_chs, (list, tuple)):
num_rep = len(out_chs)
stem_chs = out_chs
else:
stem_chs = [round((out_chs * (chs_decay ** i))) for i in range(num_rep)][::(- 1)]
self.stride = stride
self.feature_info = []
prev_feat = ''
stem_strides = ([2] + ([1] * (num_rep - 1)))
if ((stride == 4) and (not pool)):
stem_strides[(- 1)] = 2
num_act = (num_rep if (num_act is None) else num_act)
stem_norm_acts = (([False] * (num_rep - num_act)) + ([True] * num_act))
prev_chs = in_chs
curr_stride = 1
for (i, (ch, s, na)) in enumerate(zip(stem_chs, stem_strides, stem_norm_acts)):
layer_fn = (layers.conv_norm_act if na else create_conv2d)
conv_name = f'conv{(i + 1)}'
if ((i > 0) and (s > 1)):
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module(conv_name, layer_fn(prev_chs, ch, kernel_size=kernel_size, stride=s))
prev_chs = ch
curr_stride *= s
prev_feat = conv_name
if ('max' in pool.lower()):
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
self.add_module('pool', nn.MaxPool2d(3, 2, 1))
curr_stride *= 2
prev_feat = 'pool'
self.feature_info.append(dict(num_chs=prev_chs, reduction=curr_stride, module=prev_feat))
assert (curr_stride == stride) |
def draw_level_failed():
global game_state
failed = bold_font3.render('Level Failed', 1, WHITE)
if ((level.number_of_birds <= 0) and ((time.time() - t2) > 5) and (len(pigs) > 0)):
game_state = 3
rect = pygame.Rect(300, 0, 600, 800)
pygame.draw.rect(screen, BLACK, rect)
screen.blit(failed, (450, 90))
screen.blit(pig_happy, (380, 120))
screen.blit(replay_button, (520, 460)) |
class Additive(Transform):
def fwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[(torch.Tensor, torch.Tensor)]:
mu = params
z = (z + mu).mul(mask.unsqueeze(2))
logdet = z.new_zeros(z.size(0))
return (z, logdet)
def bwd(z: torch.Tensor, mask: torch.Tensor, params) -> Tuple[(torch.Tensor, torch.Tensor)]:
mu = params
z = (z - mu).mul(mask.unsqueeze(2))
logdet = z.new_zeros(z.size(0))
return (z, logdet) |
_materialize('core')
class LeakyReLU(ElementWiseUnaryOp):
in_dtypes = [(i,) for i in DTYPE_GEN_FLOATS]
out_dtypes = [(i,) for i in DTYPE_GEN_FLOATS]
def __init__(self):
'See
super().__init__()
self.negative_slope = 0.01 |
def file_lines_to_list(path):
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content |
class NormFreeBlock(nn.Module):
def __init__(self, in_chs, out_chs=None, stride=1, dilation=1, first_dilation=None, alpha=1.0, beta=1.0, bottle_ratio=0.25, group_size=None, ch_div=1, reg=True, extra_conv=False, skipinit=False, attn_layer=None, attn_gain=2.0, act_layer=None, conv_layer=None, drop_path_rate=0.0):
super().__init__()
first_dilation = (first_dilation or dilation)
out_chs = (out_chs or in_chs)
mid_chs = make_divisible(((in_chs * bottle_ratio) if reg else (out_chs * bottle_ratio)), ch_div)
groups = (1 if (not group_size) else (mid_chs // group_size))
if (group_size and ((group_size % ch_div) == 0)):
mid_chs = (group_size * groups)
self.alpha = alpha
self.beta = beta
self.attn_gain = attn_gain
if ((in_chs != out_chs) or (stride != 1) or (dilation != first_dilation)):
self.downsample = DownsampleAvg(in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer)
else:
self.downsample = None
self.act1 = act_layer()
self.conv1 = conv_layer(in_chs, mid_chs, 1)
self.act2 = act_layer(inplace=True)
self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
if extra_conv:
self.act2b = act_layer(inplace=True)
self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups)
else:
self.act2b = None
self.conv2b = None
if (reg and (attn_layer is not None)):
self.attn = attn_layer(mid_chs)
else:
self.attn = None
self.act3 = act_layer()
self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=(1.0 if skipinit else 0.0))
if ((not reg) and (attn_layer is not None)):
self.attn_last = attn_layer(out_chs)
else:
self.attn_last = None
self.drop_path = (DropPath(drop_path_rate) if (drop_path_rate > 0) else nn.Identity())
self.skipinit_gain = (nn.Parameter(torch.tensor(0.0)) if skipinit else None)
def forward(self, x):
out = (self.act1(x) * self.beta)
shortcut = x
if (self.downsample is not None):
shortcut = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.act2(out))
if (self.conv2b is not None):
out = self.conv2b(self.act2b(out))
if (self.attn is not None):
out = (self.attn_gain * self.attn(out))
out = self.conv3(self.act3(out))
if (self.attn_last is not None):
out = (self.attn_gain * self.attn_last(out))
out = self.drop_path(out)
if (self.skipinit_gain is not None):
out.mul_(self.skipinit_gain)
out = ((out * self.alpha) + shortcut)
return out |
class AssignResult(util_mixins.NiceRepr):
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
def num_preds(self):
return len(self.gt_inds)
def info(self):
return {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels}
def __nice__(self):
parts = []
parts.append('num_gts={!r}'.format(self.num_gts))
if (self.gt_inds is None):
parts.append('gt_inds={!r}'.format(self.gt_inds))
else:
parts.append('gt_inds.shape={!r}'.format(tuple(self.gt_inds.shape)))
if (self.max_overlaps is None):
parts.append('max_overlaps={!r}'.format(self.max_overlaps))
else:
parts.append('max_overlaps.shape={!r}'.format(tuple(self.max_overlaps.shape)))
if (self.labels is None):
parts.append('labels={!r}'.format(self.labels))
else:
parts.append('labels.shape={!r}'.format(tuple(self.labels.shape)))
return ', '.join(parts)
def random(cls, **kwargs):
from mmdet.core.bbox import demodata
rng = demodata.ensure_rng(kwargs.get('rng', None))
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
if (num_gts is None):
num_gts = rng.randint(0, 8)
if (num_preds is None):
num_preds = rng.randint(0, 16)
if (num_gts == 0):
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if ((p_use_label is True) or (p_use_label < rng.rand())):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned))
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds))
gt_inds[is_ignore] = (- 1)
gt_inds[(~ is_assigned)] = 0
max_overlaps[(~ is_assigned)] = 0
if ((p_use_label is True) or (p_use_label < rng.rand())):
if (num_classes == 0):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(rng.randint(1, (num_classes + 1), size=num_preds))
labels[(~ is_assigned)] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if (self.labels is not None):
self.labels = torch.cat([gt_labels, self.labels]) |
def get_nvidia_driver_version(run_lambda):
if (get_platform() == 'darwin'):
cmd = 'kextstat | grep -i cuda'
return run_and_parse_first_match(run_lambda, cmd, 'com[.]nvidia[.]CUDA [(](.*?)[)]')
smi = get_nvidia_smi()
return run_and_parse_first_match(run_lambda, smi, 'Driver Version: (.*?) ') |
def findFileOrThrow(strBasename):
if os.path.isfile(strBasename):
return strBasename
LOCAL_FILE_DIR = ('data' + os.sep)
GLOBAL_FILE_DIR = (((os.path.dirname(os.path.abspath(__file__)) + os.sep) + 'data') + os.sep)
strFilename = (LOCAL_FILE_DIR + strBasename)
if os.path.isfile(strFilename):
return strFilename
strFilename = (GLOBAL_FILE_DIR + strBasename)
if os.path.isfile(strFilename):
return strFilename
raise IOError(('Could not find target file ' + strBasename)) |
def saveAsMat(img, filename, matlab_id, mat_dict=None):
assert (img.ndim in [2, 3, 4])
img_normalized = img.copy()
if (img.ndim == 3):
img_normalized = np.transpose(img_normalized, (1, 2, 0))
elif (img.ndim == 4):
img_normalized = np.transpose(img_normalized, (2, 3, 0, 1))
if (mat_dict == None):
mat_dict = {matlab_id: img_normalized}
else:
mat_dict[matlab_id] = img_normalized
dirname = (os.path.dirname(filename) or '.')
if (not os.path.exists(dirname)):
os.makedirs(dirname)
savemat(filename, mat_dict) |
class cityscapesDataSetStrongWeakAug(data.Dataset):
def __init__(self, data_root, data_list, max_iters=None, num_classes=19, split='train', ignore_label=255, debug=False, cfg=None, logger=None):
self.split = split
self.NUM_CLASS = num_classes
self.data_root = data_root
self.data_list = []
with open(data_list, 'r') as handle:
content = handle.readlines()
for fname in content:
name = fname.strip()
self.data_list.append({'img': os.path.join(self.data_root, ('leftImg8bit/%s/%s' % (self.split, name))), 'label': os.path.join(self.data_root, ('gtFine/%s/%s' % (self.split, (name.split('_leftImg8bit')[0] + '_gtFine_labelIds.png')))), 'name': name})
if (max_iters is not None):
self.data_list = (self.data_list * int(np.ceil((float(max_iters) / len(self.data_list)))))
self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 31: 16, 32: 17, 33: 18}
self.trainid2name = {0: 'road', 1: 'sidewalk', 2: 'building', 3: 'wall', 4: 'fence', 5: 'pole', 6: 'light', 7: 'sign', 8: 'vegetation', 9: 'terrain', 10: 'sky', 11: 'person', 12: 'rider', 13: 'car', 14: 'truck', 15: 'bus', 16: 'train', 17: 'motocycle', 18: 'bicycle'}
if (self.NUM_CLASS == 16):
self.id_to_trainid = {7: 0, 8: 1, 11: 2, 12: 3, 13: 4, 17: 5, 19: 6, 20: 7, 21: 8, 23: 9, 24: 10, 25: 11, 26: 12, 28: 13, 32: 14, 33: 15}
self.trainid2name = {0: 'road', 1: 'sidewalk', 2: 'building', 3: 'wall', 4: 'fence', 5: 'pole', 6: 'light', 7: 'sign', 8: 'vegetation', 9: 'sky', 10: 'person', 11: 'rider', 12: 'car', 13: 'bus', 14: 'motocycle', 15: 'bicycle'}
if (split == 'train'):
(w, h) = cfg.INPUT.TARGET_INPUT_SIZE_TRAIN
weak_trans_list = []
if (cfg.INPUT.HORIZONTAL_FLIP_PROB_TRAIN > 0):
weak_trans_list = [transform.RandomHorizontalFlip(p=cfg.INPUT.HORIZONTAL_FLIP_PROB_TRAIN)]
if ((cfg.INPUT.INPUT_SCALES_TRAIN[0] == cfg.INPUT.INPUT_SCALES_TRAIN[1]) and (cfg.INPUT.INPUT_SCALES_TRAIN[0] == 1)):
weak_trans_list = ([transform.Resize((h, w))] + weak_trans_list)
else:
weak_trans_list = ([transform.RandomScale(scale=cfg.INPUT.INPUT_SCALES_TRAIN), transform.RandomCrop(size=(h, w), pad_if_needed=True)] + weak_trans_list)
self.weak_trans = transform.Compose(weak_trans_list)
logger.info(f'Target: weak aug {self.weak_trans}')
strong_trans_list = []
if cfg.INPUT.GAUSSIANBLUR:
strong_trans_list = [transform.GaussianBlur(kernel_size=[3, 3])]
if (cfg.INPUT.GRAYSCALE > 0):
strong_trans_list = ([transform.RandomGrayscale(p=cfg.INPUT.GRAYSCALE)] + strong_trans_list)
if (cfg.INPUT.BRIGHTNESS > 0):
strong_trans_list = ([transform.ColorJitter(brightness=cfg.INPUT.BRIGHTNESS, contrast=cfg.INPUT.CONTRAST, saturation=cfg.INPUT.SATURATION, hue=cfg.INPUT.HUE)] + strong_trans_list)
self.strong_trans = transform.Compose(strong_trans_list)
logger.info(f'Target: strong aug {self.strong_trans}')
normalize_trans_list = [transform.ToTensor(), transform.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255)]
self.normalize_trans = transform.Compose(normalize_trans_list)
logger.info(f'Target: normalize aug {self.normalize_trans}')
else:
(w, h) = cfg.INPUT.INPUT_SIZE_TEST
self.val_trans = transform.Compose([transform.Resize((h, w), resize_label=False), transform.ToTensor(), transform.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=cfg.INPUT.TO_BGR255)])
self.ignore_label = ignore_label
self.debug = debug
def __len__(self):
return len(self.data_list)
def __getitem__(self, index):
if self.debug:
index = 0
datafiles = self.data_list[index]
image = Image.open(datafiles['img']).convert('RGB')
label = np.array(Image.open(datafiles['label']), dtype=np.float32)
name = datafiles['name']
label_copy = (self.ignore_label * np.ones(label.shape, dtype=np.float32))
for (k, v) in self.id_to_trainid.items():
label_copy[(label == k)] = v
label = Image.fromarray(label_copy)
if (self.split == 'train'):
(weak_image, weak_label) = self.weak_trans(image, label)
(strong_image, strong_label) = self.strong_trans(weak_image, weak_label)
(weak_image, weak_label) = self.normalize_trans(weak_image, weak_label)
(strong_image, strong_label) = self.normalize_trans(strong_image, strong_label)
else:
(image, label) = self.val_trans(image, label)
if (self.split == 'train'):
return (weak_image, weak_label, strong_image, strong_label, name)
else:
return (image, label, name) |
def write_to_gsheets_contact(df, ks_output, sheet_name='Contact Info', service_file='creds.json'):
d = df[ks_output].fillna('')
print('writing to gsheets...')
gc = pygsheets.authorize(service_file=service_file)
sh = gc.open(sheet_name)
wks = sh[0]
wks.update_value('A1', 'Last updated Apr 14')
wks.set_dataframe(d, (3, 1)) |
class FNetForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class AssignResult(util_mixins.NiceRepr):
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
self._extra_properties = {}
def num_preds(self):
return len(self.gt_inds)
def set_extra_property(self, key, value):
assert (key not in self.info)
self._extra_properties[key] = value
def get_extra_property(self, key):
return self._extra_properties.get(key, None)
def info(self):
basic_info = {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels}
basic_info.update(self._extra_properties)
return basic_info
def __nice__(self):
parts = []
parts.append(f'num_gts={self.num_gts!r}')
if (self.gt_inds is None):
parts.append(f'gt_inds={self.gt_inds!r}')
else:
parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
if (self.max_overlaps is None):
parts.append(f'max_overlaps={self.max_overlaps!r}')
else:
parts.append(f'max_overlaps.shape={tuple(self.max_overlaps.shape)!r}')
if (self.labels is None):
parts.append(f'labels={self.labels!r}')
else:
parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
return ', '.join(parts)
def random(cls, **kwargs):
from radet.core.bbox import demodata
rng = demodata.ensure_rng(kwargs.get('rng', None))
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
if (num_gts is None):
num_gts = rng.randint(0, 8)
if (num_preds is None):
num_preds = rng.randint(0, 16)
if (num_gts == 0):
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if ((p_use_label is True) or (p_use_label < rng.rand())):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned))
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds))
gt_inds[is_ignore] = (- 1)
gt_inds[(~ is_assigned)] = 0
max_overlaps[(~ is_assigned)] = 0
if ((p_use_label is True) or (p_use_label < rng.rand())):
if (num_classes == 0):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(rng.randint(0, num_classes, size=num_preds))
labels[(~ is_assigned)] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if (self.labels is not None):
self.labels = torch.cat([gt_labels, self.labels]) |
class ContextPath(nn.Module):
def __init__(self, norm_layer=nn.BatchNorm2d):
super(ContextPath, self).__init__()
inter_channels = 128
self.global_context = _GlobalAvgPooling(512, inter_channels, norm_layer)
self.arms = nn.ModuleList([AttentionRefinmentModule(512, inter_channels, norm_layer), AttentionRefinmentModule(256, inter_channels, norm_layer)])
self.refines = nn.ModuleList([_ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer), _ConvBNReLU(inter_channels, inter_channels, 3, 1, 1, norm_layer=norm_layer)])
def forward(self, c1, c2, c3, c4):
context_blocks = [c4, c3, c2, c1]
global_context = self.global_context(c4)
last_feature = global_context
context_outputs = []
for (i, (feature, arm, refine)) in enumerate(zip(context_blocks[:2], self.arms, self.refines)):
feature = arm(feature)
feature += last_feature
last_feature = F.interpolate(feature, size=context_blocks[(i + 1)].size()[2:], mode='bilinear', align_corners=True)
last_feature = refine(last_feature)
context_outputs.append(last_feature)
return context_outputs |
def load_conversations(fileName, lines, fields=['character1ID', 'character2ID', 'movieID', 'utteranceIDs'], delimiter=' +++$+++ '):
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(delimiter)
convObj = {}
for (i, field) in enumerate(fields):
convObj[field] = values[i]
lineIds = eval(convObj['utteranceIDs'])
convObj['lines'] = []
for lineId in lineIds:
convObj['lines'].append(lines[lineId])
conversations.append(convObj)
return conversations |
def flatten(unflattened, parent_key='', separator='.'):
items = []
for (k, v) in unflattened.items():
if (separator in k):
raise ValueError('Found separator ({}) from key ({})'.format(separator, k))
new_key = (((parent_key + separator) + k) if parent_key else k)
if (isinstance(v, collections.MutableMapping) and v):
items.extend(flatten(v, new_key, separator=separator).items())
else:
items.append((new_key, v))
return dict(items) |
class IterTimerHook(Hook):
def before_epoch(self, runner):
self.t = time.time()
def before_iter(self, runner):
runner.log_buffer.update({'data_time': (time.time() - self.t)})
def after_iter(self, runner):
runner.log_buffer.update({'time': (time.time() - self.t)})
self.t = time.time()
def after_data_to_device(self, runner):
runner.log_buffer.update({'transfer_time': (time.time() - self.t)})
def after_forward(self, runner):
runner.log_buffer.update({'forward_time': (time.time() - self.t)})
def after_parse_loss(self, runner):
runner.log_buffer.update({'loss_parse_time': (time.time() - self.t)})
self.t1 = time.time()
def after_grad_bp(self, runner):
runner.log_buffer.update({'backward_time': (time.time() - self.t1)}) |
def feature_cols(column_info):
return ((((column_info['wide_base_cols'] + column_info['wide_cross_cols']) + column_info['indicator_cols']) + column_info['embed_cols']) + column_info['continuous_cols']) |
def load_ade20k(path, max_classes=None, random_state=None):
return {'train': ADE20K(path, 'training', max_classes=max_classes), 'val': ADE20K(path, 'validation', max_classes=max_classes)} |
def seq_linear(linear, x):
(batch, hidden_size, length, _) = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view((batch * length), hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2) |
def paths(graph, id1, id2, length=4, path=[], _root=True):
if (len(path) >= length):
return []
if (id1 not in graph):
return []
if (id1 == id2):
return [(path + [id1])]
path = (path + [id1])
p = []
s = set(path)
for node in graph[id1].links:
if (node.id not in s):
p.extend(paths(graph, node.id, id2, length, path, False))
return ((_root and sorted(p, key=len)) or p) |
def start_recording(recording_path, env_name):
unique_id = str(int(time.time()))
screens_dir = os.path.join(recording_path, 'screens', '{}'.format(env_name), unique_id)
trajectories_dir = os.path.join(recording_path, 'trajectories_pressed_buttons', '{}'.format(env_name))
os.makedirs(screens_dir)
os.makedirs(trajectories_dir, exist_ok=True)
return (unique_id, screens_dir) |
class TestBoxInstDataPreprocessor(TestCase):
def test_forward(self):
processor = BoxInstDataPreprocessor(mean=[0, 0, 0], std=[1, 1, 1])
data = {'inputs': [torch.randint(0, 256, (3, 256, 256))], 'data_samples': [DetDataSample()]}
out_data = processor(data)
(batch_inputs, batch_data_samples) = (out_data['inputs'], out_data['data_samples'])
self.assertEqual(batch_inputs.shape, (1, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 1)
packed_inputs = demo_mm_inputs(2, [[3, 256, 256], [3, 128, 128]], num_items=[0, 0])
out_data = processor(packed_inputs, training=True)
(batch_inputs, batch_data_samples) = (out_data['inputs'], out_data['data_samples'])
self.assertEqual(batch_inputs.shape, (2, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 2)
self.assertEqual(len(batch_data_samples[0].gt_instances.masks), 0)
self.assertEqual(len(batch_data_samples[0].gt_instances.pairwise_masks), 0)
self.assertEqual(len(batch_data_samples[1].gt_instances.masks), 0)
self.assertEqual(len(batch_data_samples[1].gt_instances.pairwise_masks), 0)
packed_inputs = demo_mm_inputs(2, [[3, 256, 256], [3, 128, 128]], num_items=[2, 1])
out_data = processor(packed_inputs, training=True)
(batch_inputs, batch_data_samples) = (out_data['inputs'], out_data['data_samples'])
self.assertEqual(batch_inputs.shape, (2, 3, 256, 256))
self.assertEqual(len(batch_data_samples), 2)
self.assertEqual(len(batch_data_samples[0].gt_instances.masks), 2)
self.assertEqual(len(batch_data_samples[0].gt_instances.pairwise_masks), 2)
self.assertEqual(len(batch_data_samples[1].gt_instances.masks), 1)
self.assertEqual(len(batch_data_samples[1].gt_instances.pairwise_masks), 1) |
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000):
super(ResNet, self).__init__()
assert (((depth - 2) % 6) == 0), 'depth should be 6n+2'
block = (Bottleneck if (depth >= 44) else BasicBlock)
n = (((depth - 2) // 9) if (depth >= 44) else ((depth - 2) // 6))
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear((64 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class LowerBoundedExponentialLR(_LRScheduler):
def __init__(self, optimizer, gamma, lower_bound, last_epoch=(- 1)):
self.gamma = gamma
self.lower_bound = lower_bound
super(LowerBoundedExponentialLR, self).__init__(optimizer, last_epoch)
def _get_lr(self, base_lr):
lr = (base_lr * (self.gamma ** self.last_epoch))
if (lr < self.lower_bound):
lr = self.lower_bound
return lr
def get_lr(self):
return [self._get_lr(base_lr) for base_lr in self.base_lrs] |
class FocalLoss(tf.Module):
def __init__(self, gamma=2.0, alpha=0.25, loss_weight=1.0):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.loss_weight = loss_weight
def __call__(self, pred, target, weight=None, avg_factor=None):
pred_sigmoid = tf.math.sigmoid(pred)
if (len(pred.shape) > 1):
target = tf.one_hot(target, int(pred.shape[(- 1)]))
target = tf.cast(target, pred.dtype)
pt = (((1 - pred_sigmoid) * target) + (pred_sigmoid * (1 - target)))
focal_weight = (((self.alpha * target) + ((1 - self.alpha) * (1 - target))) * tf.pow(pt, self.gamma))
loss = (tf.nn.sigmoid_cross_entropy_with_logits(target, pred) * focal_weight)
if (weight is not None):
loss = (loss * weight)
loss = (loss * self.loss_weight)
if avg_factor:
return (tf.reduce_sum(loss) / avg_factor)
else:
return tf.reduce_mean(loss) |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
preact = out
out = F.relu(out)
if self.is_last:
return (out, preact)
else:
return out |
def main(_):
if (not FLAGS.dataset_dir):
raise ValueError('You must supply the dataset directory with --dataset_dir')
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=FLAGS.num_clones, clone_on_cpu=FLAGS.clone_on_cpu, replica_id=FLAGS.task, num_replicas=FLAGS.worker_replicas, num_ps_tasks=FLAGS.num_ps_tasks)
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)
network_fn = nets_factory.get_network_fn(FLAGS.model_name, num_classes=(dataset.num_classes - FLAGS.labels_offset), weight_decay=FLAGS.weight_decay, is_training=True)
preprocessing_name = (FLAGS.preprocessing_name or FLAGS.model_name)
image_preprocessing_fn = preprocessing_factory.get_preprocessing(preprocessing_name, is_training=True)
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(dataset, num_readers=FLAGS.num_readers, common_queue_capacity=(20 * FLAGS.batch_size), common_queue_min=(10 * FLAGS.batch_size))
[image, label] = provider.get(['image', 'label'])
label -= FLAGS.labels_offset
train_image_size = (FLAGS.train_image_size or network_fn.default_image_size)
image = image_preprocessing_fn(image, train_image_size, train_image_size)
(images, labels) = tf.train.batch([image, label], batch_size=FLAGS.batch_size, num_threads=FLAGS.num_preprocessing_threads, capacity=(5 * FLAGS.batch_size))
labels = slim.one_hot_encoding(labels, (dataset.num_classes - FLAGS.labels_offset))
batch_queue = slim.prefetch_queue.prefetch_queue([images, labels], capacity=(2 * deploy_config.num_clones))
def clone_fn(batch_queue):
(images, labels) = batch_queue.dequeue()
(logits, end_points) = network_fn(images)
if ('AuxLogits' in end_points):
slim.losses.softmax_cross_entropy(end_points['AuxLogits'], labels, label_smoothing=FLAGS.label_smoothing, weights=0.4, scope='aux_loss')
slim.losses.softmax_cross_entropy(logits, labels, label_smoothing=FLAGS.label_smoothing, weights=1.0)
return end_points
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram(('activations/' + end_point), x))
summaries.add(tf.summary.scalar(('sparsity/' + end_point), tf.nn.zero_fraction(x)))
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar(('losses/%s' % loss.op.name), loss))
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
if FLAGS.moving_average_decay:
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay, global_step)
else:
(moving_average_variables, variable_averages) = (None, None)
with tf.device(deploy_config.optimizer_device()):
learning_rate = _configure_learning_rate(dataset.num_samples, global_step)
optimizer = _configure_optimizer(learning_rate)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
if FLAGS.sync_replicas:
optimizer = tf.train.SyncReplicasOptimizer(opt=optimizer, replicas_to_aggregate=FLAGS.replicas_to_aggregate, total_num_replicas=FLAGS.worker_replicas, variable_averages=variable_averages, variables_to_average=moving_average_variables)
elif FLAGS.moving_average_decay:
update_ops.append(variable_averages.apply(moving_average_variables))
variables_to_train = _get_variables_to_train()
(total_loss, clones_gradients) = model_deploy.optimize_clones(clones, optimizer, var_list=variables_to_train)
summaries.add(tf.summary.scalar('total_loss', total_loss))
grad_updates = optimizer.apply_gradients(clones_gradients, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
summary_op = tf.summary.merge(list(summaries), name='summary_op')
slim.learning.train(train_tensor, logdir=FLAGS.train_dir, master=FLAGS.master, is_chief=(FLAGS.task == 0), init_fn=_get_init_fn(), summary_op=summary_op, number_of_steps=FLAGS.max_number_of_steps, log_every_n_steps=FLAGS.log_every_n_steps, save_summaries_secs=FLAGS.save_summaries_secs, save_interval_secs=FLAGS.save_interval_secs, sync_optimizer=(optimizer if FLAGS.sync_replicas else None)) |
class T5Config(PretrainedConfig):
model_type = 't5'
keys_to_ignore_at_inference = ['past_key_values']
def __init__(self, vocab_size=32128, d_model=512, d_kv=64, d_ff=2048, num_layers=6, num_decoder_layers=None, num_heads=8, relative_attention_num_buckets=32, dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, **kwargs):
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs)
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (num_decoder_layers if (num_decoder_layers is not None) else self.num_layers)
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
def hidden_size(self):
return self.d_model
def num_attention_heads(self):
return self.num_heads
def num_hidden_layers(self):
return self.num_layers |
def _chunked_iterator(i: Iterable, chunk_size: int, drop_last: bool):
chunks = more_itertools.chunked(i, chunk_size)
if drop_last:
return (chunk for chunk in chunks if (len(chunk) == chunk_size))
else:
return chunks |
def train_one_epoch(train_loader, model, criterion, optimizer, epoch, opt, num_train_samples, no_acc_eval=False):
info = {}
losses = AverageMeter('Loss ', ':6.4g')
top1 = AverageMeter(' ', ':6.2f')
top5 = AverageMeter(' ', ':6.2f')
model.train()
lr_scheduler = global_utils.LearningRateScheduler(mode=opt.lr_mode, lr=opt.lr, num_training_instances=num_train_samples, target_lr=opt.target_lr, stop_epoch=opt.epochs, warmup_epoch=opt.warmup, stage_list=opt.lr_stage_list, stage_decay=opt.lr_stage_decay)
lr_scheduler.update_lr(batch_size=(epoch * num_train_samples))
optimizer.zero_grad()
batches_per_allreduce_count = 0
for (i, (input, target)) in enumerate(train_loader):
if (not opt.independent_training):
lr_scheduler.update_lr(batch_size=(input.shape[0] * opt.world_size))
else:
lr_scheduler.update_lr(batch_size=input.shape[0])
pass
current_lr = lr_scheduler.get_lr()
for param_group in optimizer.param_groups:
param_group['lr'] = (current_lr * opt.batches_per_allreduce)
bool_label_smoothing = False
bool_mixup = False
if (not (opt.dist_mode == 'cpu')):
input = input.cuda(opt.gpu, non_blocking=True)
target = target.cuda(opt.gpu, non_blocking=True)
transformed_target = target
with torch.no_grad():
if (hasattr(opt, 'label_smoothing') and opt.label_smoothing):
bool_label_smoothing = True
if (hasattr(opt, 'mixup') and opt.mixup):
bool_mixup = True
if (bool_label_smoothing and (not bool_mixup)):
transformed_target = one_hot(target, num_classes=opt.num_classes, smoothing_eps=0.1)
if ((not bool_label_smoothing) and bool_mixup):
transformed_target = one_hot(target, num_classes=opt.num_classes)
(input, transformed_target) = mixup(input, transformed_target)
if (bool_label_smoothing and bool_mixup):
transformed_target = one_hot(target, num_classes=opt.num_classes, smoothing_eps=0.1)
(input, transformed_target) = mixup(input, transformed_target)
pass
output = model(input)
logit_loss = criterion(output, transformed_target)
(ts_feature_loss, ts_logit_loss) = model.compute_ts_distill_loss()
loss = ((logit_loss + (opt.teacher_feature_weight * ts_feature_loss)) + (opt.teacher_logit_weight * ts_logit_loss))
input_size = int(input.size(0))
if (not no_acc_eval):
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
top1.update(float(acc1[0]), input_size)
top5.update(float(acc5[0]), input_size)
else:
acc1 = [0]
acc5 = [0]
losses.update(float(loss), input_size)
if opt.apex:
if (opt.dist_mode == 'horovod'):
if (batches_per_allreduce_count >= opt.batches_per_allreduce):
optimizer.zero_grad()
batches_per_allreduce_count = 0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
batches_per_allreduce_count += 1
if (opt.grad_clip is not None):
torch.nn.utils.clip_grad_value_(model.parameters(), opt.grad_clip)
if (batches_per_allreduce_count >= opt.batches_per_allreduce):
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
pass
else:
if (batches_per_allreduce_count >= opt.batches_per_allreduce):
optimizer.zero_grad()
batches_per_allreduce_count = 0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if (opt.grad_clip is not None):
torch.nn.utils.clip_grad_value_(model.parameters(), opt.grad_clip)
if (batches_per_allreduce_count >= opt.batches_per_allreduce):
optimizer.step()
else:
if (batches_per_allreduce_count >= opt.batches_per_allreduce):
optimizer.zero_grad()
batches_per_allreduce_count = 0
loss.backward()
batches_per_allreduce_count += 1
if (opt.grad_clip is not None):
torch.nn.utils.clip_grad_value_(model.parameters(), opt.grad_clip)
if (batches_per_allreduce_count >= opt.batches_per_allreduce):
optimizer.step()
if (((i % opt.print_freq) == 0) and (opt.rank == 0)):
logging.info('Train epoch={}, i={}, loss={:4g},logit_loss={:4g}, ts_feature_loss={:4g}, ts_logit_loss={:4g},acc1={:4g}%, acc5={:4g}%, lr={:4g}'.format(epoch, i, float(loss), float(logit_loss), float(ts_feature_loss), float(ts_logit_loss), float(acc1[0]), float(acc5[0]), current_lr))
pass
pass
if ((opt.dist_mode == 'horovod') and (not opt.independent_training)):
sync_tensor = torch.tensor([losses.sum, losses.count], dtype=torch.float32)
hvd.allreduce(sync_tensor, name='sync_tensor_topk_acc')
losses_avg = (sync_tensor[0] / sync_tensor[1]).item()
else:
losses_avg = losses.avg
info['losses'] = losses_avg
return info |
class Rasterizer(Combinator):
def combine(self, data: List[np.ndarray]) -> np.ndarray:
image_shape = data[0].shape
base_image = np.zeros(image_shape).astype('uint8')
return reduce(add_foreground_to_image, ([base_image] + data)) |
def convHuge(c, **kargs):
return n.LeNet([(128, 3, 3, 1), (128, 4, 4, 1), (256, 3, 3, 1), (256, 4, 4, 1)], [512, 512, c], padding=1, normal=True, bias=False, last_lin=True, **kargs) |
def save_training_logs(results_paths, mlog):
step_num_set = set()
for results_path in results_paths:
print(f'logging {results_path}')
try:
results = read_logs(results_path)
except Exception as e:
print(f'Could not read {results_path}. could be empty', e)
continue
for result in results:
i = result['step_num']
if (i in step_num_set):
continue
else:
step_num_set.add(i)
del result['step_num']
for (k, v) in result.items():
log(mlog, k, v, phase='train')
reset_log(mlog, None, i, phase='train') |
class ACFPN(object):
__shared__ = ['norm_type', 'freeze_norm']
def __init__(self, num_chan=256, min_level=2, max_level=6, spatial_scale=[(1.0 / 32.0), (1.0 / 16.0), (1.0 / 8.0), (1.0 / 4.0)], has_extra_convs=False, norm_type=None, freeze_norm=False, use_c5=True, norm_groups=32):
self.freeze_norm = freeze_norm
self.num_chan = num_chan
self.min_level = min_level
self.max_level = max_level
self.spatial_scale = spatial_scale
self.has_extra_convs = has_extra_convs
self.norm_type = norm_type
self.use_c5 = use_c5
self.norm_groups = norm_groups
def _add_topdown_lateral(self, body_name, body_input, upper_output):
lateral_name = (('fpn_inner_' + body_name) + '_lateral')
topdown_name = ('fpn_topdown_' + body_name)
fan = body_input.shape[1]
if self.norm_type:
initializer = Xavier(fan_out=fan)
lateral = ConvNorm(body_input, self.num_chan, 1, initializer=initializer, norm_type=self.norm_type, freeze_norm=self.freeze_norm, name=lateral_name, norm_name=lateral_name)
else:
lateral = fluid.layers.conv2d(body_input, self.num_chan, 1, param_attr=ParamAttr(name=(lateral_name + '_w'), initializer=Xavier(fan_out=fan)), bias_attr=ParamAttr(name=(lateral_name + '_b'), learning_rate=2.0, regularizer=L2Decay(0.0)), name=lateral_name)
topdown = fluid.layers.resize_nearest(upper_output, scale=2.0, name=topdown_name)
return (lateral + topdown)
def dense_aspp_block(self, input, num_filters1, num_filters2, dilation_rate, dropout_prob, name):
conv = ConvNorm(input, num_filters=num_filters1, filter_size=1, stride=1, groups=1, norm_decay=0.0, norm_type='gn', norm_groups=self.norm_groups, dilation=dilation_rate, lr_scale=1, freeze_norm=False, act='relu', norm_name=(name + '_gn'), initializer=None, bias_attr=False, name=(name + '_gn'))
conv = fluid.layers.conv2d(conv, num_filters2, filter_size=3, padding=dilation_rate, dilation=dilation_rate, act='relu', param_attr=ParamAttr(name=(name + '_conv_w')), bias_attr=ParamAttr(name=(name + '_conv_b')))
if (dropout_prob > 0):
conv = fluid.layers.dropout(conv, dropout_prob=dropout_prob)
return conv
def dense_aspp(self, input, name=None):
dropout0 = 0.1
d_feature0 = 512
d_feature1 = 256
aspp3 = self.dense_aspp_block(input, num_filters1=d_feature0, num_filters2=d_feature1, dropout_prob=dropout0, name=(name + '_aspp3'), dilation_rate=3)
conv = fluid.layers.concat([aspp3, input], axis=1)
aspp6 = self.dense_aspp_block(conv, num_filters1=d_feature0, num_filters2=d_feature1, dropout_prob=dropout0, name=(name + '_aspp6'), dilation_rate=6)
conv = fluid.layers.concat([aspp6, conv], axis=1)
aspp12 = self.dense_aspp_block(conv, num_filters1=d_feature0, num_filters2=d_feature1, dropout_prob=dropout0, name=(name + '_aspp12'), dilation_rate=12)
conv = fluid.layers.concat([aspp12, conv], axis=1)
aspp18 = self.dense_aspp_block(conv, num_filters1=d_feature0, num_filters2=d_feature1, dropout_prob=dropout0, name=(name + '_aspp18'), dilation_rate=18)
conv = fluid.layers.concat([aspp18, conv], axis=1)
aspp24 = self.dense_aspp_block(conv, num_filters1=d_feature0, num_filters2=d_feature1, dropout_prob=dropout0, name=(name + '_aspp24'), dilation_rate=24)
conv = fluid.layers.concat([aspp3, aspp6, aspp12, aspp18, aspp24], axis=1)
conv = ConvNorm(conv, num_filters=self.num_chan, filter_size=1, stride=1, groups=1, norm_decay=0.0, norm_type='gn', norm_groups=self.norm_groups, dilation=1, lr_scale=1, freeze_norm=False, act='relu', norm_name=(name + '_dense_aspp_reduce_gn'), initializer=None, bias_attr=False, name=(name + '_dense_aspp_reduce_gn'))
return conv
def get_output(self, body_dict):
spatial_scale = copy.deepcopy(self.spatial_scale)
body_name_list = list(body_dict.keys())[::(- 1)]
num_backbone_stages = len(body_name_list)
self.fpn_inner_output = [[] for _ in range(num_backbone_stages)]
fpn_inner_name = ('fpn_inner_' + body_name_list[0])
body_input = body_dict[body_name_list[0]]
fan = body_input.shape[1]
if self.norm_type:
initializer = Xavier(fan_out=fan)
self.fpn_inner_output[0] = ConvNorm(body_input, self.num_chan, 1, initializer=initializer, norm_type=self.norm_type, freeze_norm=self.freeze_norm, name=fpn_inner_name, norm_name=fpn_inner_name)
else:
self.fpn_inner_output[0] = fluid.layers.conv2d(body_input, self.num_chan, 1, param_attr=ParamAttr(name=(fpn_inner_name + '_w'), initializer=Xavier(fan_out=fan)), bias_attr=ParamAttr(name=(fpn_inner_name + '_b'), learning_rate=2.0, regularizer=L2Decay(0.0)), name=fpn_inner_name)
self.fpn_inner_output[0] += self.dense_aspp(self.fpn_inner_output[0], name='acfpn')
for i in range(1, num_backbone_stages):
body_name = body_name_list[i]
body_input = body_dict[body_name]
top_output = self.fpn_inner_output[(i - 1)]
fpn_inner_single = self._add_topdown_lateral(body_name, body_input, top_output)
self.fpn_inner_output[i] = fpn_inner_single
fpn_dict = {}
fpn_name_list = []
for i in range(num_backbone_stages):
fpn_name = ('fpn_' + body_name_list[i])
fan = ((self.fpn_inner_output[i].shape[1] * 3) * 3)
if self.norm_type:
initializer = Xavier(fan_out=fan)
fpn_output = ConvNorm(self.fpn_inner_output[i], self.num_chan, 3, initializer=initializer, norm_type=self.norm_type, freeze_norm=self.freeze_norm, name=fpn_name, norm_name=fpn_name)
else:
fpn_output = fluid.layers.conv2d(self.fpn_inner_output[i], self.num_chan, filter_size=3, padding=1, param_attr=ParamAttr(name=(fpn_name + '_w'), initializer=Xavier(fan_out=fan)), bias_attr=ParamAttr(name=(fpn_name + '_b'), learning_rate=2.0, regularizer=L2Decay(0.0)), name=fpn_name)
fpn_dict[fpn_name] = fpn_output
fpn_name_list.append(fpn_name)
if ((not self.has_extra_convs) and ((self.max_level - self.min_level) == len(spatial_scale))):
body_top_name = fpn_name_list[0]
body_top_extension = fluid.layers.pool2d(fpn_dict[body_top_name], 1, 'max', pool_stride=2, name=(body_top_name + '_subsampled_2x'))
fpn_dict[(body_top_name + '_subsampled_2x')] = body_top_extension
fpn_name_list.insert(0, (body_top_name + '_subsampled_2x'))
spatial_scale.insert(0, (spatial_scale[0] * 0.5))
highest_backbone_level = ((self.min_level + len(spatial_scale)) - 1)
if (self.has_extra_convs and (self.max_level > highest_backbone_level)):
if self.use_c5:
fpn_blob = body_dict[body_name_list[0]]
else:
fpn_blob = fpn_dict[fpn_name_list[0]]
for i in range((highest_backbone_level + 1), (self.max_level + 1)):
fpn_blob_in = fpn_blob
fpn_name = ('fpn_' + str(i))
if (i > (highest_backbone_level + 1)):
fpn_blob_in = fluid.layers.relu(fpn_blob)
fan = ((fpn_blob_in.shape[1] * 3) * 3)
fpn_blob = fluid.layers.conv2d(input=fpn_blob_in, num_filters=self.num_chan, filter_size=3, stride=2, padding=1, param_attr=ParamAttr(name=(fpn_name + '_w'), initializer=Xavier(fan_out=fan)), bias_attr=ParamAttr(name=(fpn_name + '_b'), learning_rate=2.0, regularizer=L2Decay(0.0)), name=fpn_name)
fpn_dict[fpn_name] = fpn_blob
fpn_name_list.insert(0, fpn_name)
spatial_scale.insert(0, (spatial_scale[0] * 0.5))
res_dict = OrderedDict([(k, fpn_dict[k]) for k in fpn_name_list])
return (res_dict, spatial_scale) |
def pattern_registry(pattern_type):
def decorator_pattern(cls):
if (pattern_type in PATTERNS):
raise ValueError('Cannot have two patterns with the same name')
PATTERNS[pattern_type] = cls
return cls
return decorator_pattern |
_canonicalize
_specialize
_optimizer([BernoulliOp])
def replace_bernoulli_op(node):
if (not isinstance(node.op, BernoulliOp)):
return False
prob = node.inputs[0]
noise = node.inputs[1]
samples = (noise < prob).astype(floatX)
return [samples] |
def main():
env = RacecarGymEnv(renders=True, isDiscrete=True)
act = deepq.load('racecar_model.pkl')
print(act)
while True:
(obs, done) = (env.reset(), False)
print('')
print('obs')
print(obs)
episode_rew = 0
while (not done):
env.render()
(obs, rew, done, _) = env.step(act(obs[None])[0])
episode_rew += rew
print('Episode reward', episode_rew) |
class TestQubit(QiskitTestCase):
def test_default(self):
qubit = Qubit(1, DriveChannel(2), MeasureChannel(4), AcquireChannel(5), control_channels=[ControlChannel(3)])
self.assertEqual(qubit.drive, DriveChannel(2))
self.assertEqual(qubit.controls[0], ControlChannel(3))
self.assertEqual(qubit.measure, MeasureChannel(4))
self.assertEqual(qubit.acquire, AcquireChannel(5)) |
class CifarResNeXt(nn.Module):
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = ((self.depth - 2) // 9)
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, (64 * self.widen_factor), (128 * self.widen_factor), (256 * self.widen_factor)]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if (key.split('.')[(- 1)] == 'weight'):
if ('conv' in key):
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if ('bn' in key):
self.state_dict()[key][...] = 1
elif (key.split('.')[(- 1)] == 'bias'):
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = ('%s_bottleneck_%d' % (name, bottleneck))
if (bottleneck == 0):
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality, self.widen_factor))
else:
block.add_module(name_, ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.avg_pool2d(x, 8, 1)
features = x.view((- 1), 1024)
return [self.classifier(features), features] |
def make_json(max_block=1):
with open('conf/rigidcloth/absparse/multi.json', 'r') as f:
config = json.load(f)
cube = config['obstacles'][0]
ground = config['obstacles'][1]
single_length = 0.1
one_dif = (single_length + 0.001)
ini_size = single_length
ini_x = (- 4)
ini_y = (- 4)
def save_config(config, file):
with open(file, 'w') as f:
json.dump(config, f)
ground['transform']['scale'] = max_block
cube['transform']['translate'][0] = ini_x
cube['transform']['translate'][2] = 0.01
for i in range(1, max_block):
new_prim = copy.deepcopy(cube)
new_prim['transform']['translate'][2] = ((0.001 + single_length) + 0.001)
new_prim['transform']['translate'][0] = (ini_x + (one_dif * (i - 0.5)))
new_prim['transform']['translate'][1] = (new_prim['transform']['translate'][1] + 0.01)
config['obstacles'].append(new_prim)
new_prim = copy.deepcopy(cube)
new_prim['transform']['translate'][0] = (ini_x + (one_dif * i))
new_prim['transform']['translate'][2] = 0.001
config['obstacles'].append(new_prim)
save_config(config, 'conf/rigidcloth/absparse/abqr_make.json') |
def calc_metrics_for_dataset(ctx, metrics, real_data_path, fake_data_path, mirror, resolution, gpus, verbose, use_cache: bool, num_runs: int, seed: int):
dnnlib.util.Logger(should_flush=True)
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, seed=seed, verbose=verbose)
if (not all((metric_main.is_valid_metric(metric) for metric in args.metrics))):
ctx.fail('\n'.join((['--metrics can only contain the following values:'] + metric_main.list_valid_metrics())))
if (not (args.num_gpus >= 1)):
ctx.fail('--gpus must be at least 1')
dummy_dataset_cfg = OmegaConf.create({'max_num_frames': 10000})
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset_styleinv.StyleInVDataset', path=real_data_path, xflip=False, resolution=resolution, return_vid=True, sampling_cfg=dnnlib.EasyDict(type='N/A'))
args.gen_dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset_styleinv.StyleInVDataset', path=fake_data_path, xflip=False, resolution=resolution, return_vid=True, sampling_cfg=dnnlib.EasyDict(type='N/A'))
args.generator_as_dataset = True
if args.verbose:
print('Real data options:')
print(args.dataset_kwargs)
print('Fake data options:')
print(args.gen_dataset_kwargs)
args.run_dir = None
args.use_cache = use_cache
args.num_runs = num_runs
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if (args.num_gpus == 1):
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus) |
def synthesize_audio(model, waveglow, denoiser, inp, lab=None, strength=0.0):
assert (inp.size(0) == 1)
inp = inp.cuda()
if (lab is not None):
lab = torch.LongTensor(1).cuda().fill_(lab)
with torch.no_grad():
(_, mel, _, ali, has_eos) = model.inference(inp, lab, ret_has_eos=True)
aud = waveglow.infer(mel, sigma=0.666)
aud_dn = denoiser(aud, strength=strength).squeeze(1)
return (mel, aud, aud_dn, has_eos) |
class MineNetwork(nn.Module):
def __init__(self, x_dim, z_dim, width, loss='mine', alpha=0.01, method=None):
super().__init__()
self.running_mean = 0
self.loss = loss
self.alpha = alpha
self.method = method
T = Seq(x_dim, z_dim, width)
if (method == 'concat'):
if isinstance(T, nn.Sequential):
self.T = CustomSequential(ConcatLayer(), *T)
else:
self.T = CustomSequential(ConcatLayer(), T)
else:
self.T = T
def forward(self, x, z, z_marg=None):
if (z_marg is None):
z_marg = z[torch.randperm(x.shape[0])]
t = self.T(x, z).mean()
t_marg = self.T(x, z_marg)
if (self.loss in ['mine']):
(second_term, self.running_mean) = ema_loss(t_marg, self.running_mean, self.alpha)
elif (self.loss in ['fdiv']):
second_term = torch.exp((t_marg - 1)).mean()
elif (self.loss in ['mine_biased']):
second_term = (torch.logsumexp(t_marg, 0) - math.log(t_marg.shape[0]))
return ((- t) + second_term)
def mi(self, x, z, z_marg=None):
if isinstance(x, np.ndarray):
x = torch.from_numpy(x).float()
if isinstance(z, np.ndarray):
z = torch.from_numpy(z).float()
with torch.no_grad():
mi = (- self.forward(x, z, z_marg))
return mi |
def get_tagged_data_for_query(data):
dataset = data['query-split']
if (args.split is not None):
if (str(args.split) == str(dataset)):
dataset = 'test'
else:
dataset = 'train'
for sent_info in data['sentences']:
if (not args.query_split):
dataset = sent_info['question-split']
if (args.split is not None):
if (str(args.split) == str(dataset)):
dataset = 'test'
else:
dataset = 'train'
for sql in data['sql']:
sql_vars = {}
for sql_var in data['variables']:
sql_vars[sql_var['name']] = sql_var['example']
text = sent_info['text']
text_vars = sent_info['variables']
(yield (dataset, insert_variables(sql, sql_vars, text, text_vars)))
if (not args.use_all_sql):
break |
_start_docstrings('\n CamemBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a\n softmax) e.g. for RocStories/SWAG tasks.\n ', CAMEMBERT_START_DOCSTRING)
class TFCamembertForMultipleChoice(TFRobertaForMultipleChoice):
config_class = CamembertConfig |
def BN(x, phase_BN, scope):
return tf.layers.batch_normalization(x, momentum=0.9, training=phase_BN) |
class COLA(AbstractTask):
name = 'cola'
labels_list = ['0', '1']
metric = [metrics.matthews_corrcoef]
metric_names = ['matthews_correlation']
split_to_data_split = {'train': 'train', 'validation': 'validation', 'test': 'validation'}
def load_dataset(self, split):
return datasets.load_dataset('glue', 'cola', split=split, script_version='master')
def preprocessor(self, example, add_prefix=True):
src_texts = ['sentence:', example['sentence']]
tgt_texts = [str(example['label'])]
return self.seq2seq_format(src_texts, tgt_texts, add_prefix) |
def feature_descriptions(max_num_entities):
return {'image': tf.FixedLenFeature((IMAGE_SIZE + [3]), tf.string), 'mask': tf.FixedLenFeature((([max_num_entities] + IMAGE_SIZE) + [1]), tf.string)} |
class MultiHeadAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, attention_probs_dropout_prob):
super().__init__()
if ((hidden_size % num_attention_heads) != 0):
raise ValueError(f'The hidden size {(hidden_size,)} is not a multiple of the number of attention heads {num_attention_heads}.')
self.num_attention_heads = num_attention_heads
self.attention_head_size = int((hidden_size / num_attention_heads))
self.all_head_size = (self.num_attention_heads * self.attention_head_size)
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
layer_init(self.query, xavier=True)
layer_init(self.key, xavier=True)
layer_init(self.value, xavier=True)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = (x.size()[:(- 1)] + (self.num_attention_heads, self.attention_head_size))
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, head_mask=None, output_attentions=False):
mixed_query_layer = self.query(hidden_states)
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose((- 1), (- 2)))
attention_scores = (attention_scores / math.sqrt(self.attention_head_size))
attention_probs = nn.functional.softmax(attention_scores, dim=(- 1))
attention_probs = self.dropout(attention_probs)
if (head_mask is not None):
attention_probs = (attention_probs * head_mask)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = (context_layer.size()[:(- 2)] + (self.all_head_size,))
context_layer = context_layer.view(*new_context_layer_shape)
outputs = ((context_layer, attention_probs) if output_attentions else (context_layer,))
return outputs |
def parse_model_dir(model_dir):
if (model_dir and model_dir.startswith('dbfs:/')):
model_dir = ('/dbfs/' + model_dir[len('dbfs:/'):])
return model_dir |
class AllInOneEnv(gym.Env):
def __init__(self, ns: str, robot_yaml_path: str, settings_yaml_path: str, reward_fnc: str, safe_dist: float=None, goal_radius: float=0.1, max_steps_per_episode=1000, train_mode: bool=True, debug: bool=False, paths: dict=None, drl_server: str=None, evaluation: bool=False, evaluation_episodes: int=40, seed: int=1, extended_eval: bool=False, run_scenario: bool=False):
super(AllInOneEnv, self).__init__()
self.ns = ns
if ((ns is not None) or (ns != '')):
self.ns_prefix = (('/' + ns) + '/')
else:
self.ns_prefix = '/'
if train_mode:
rospy.init_node(f'train_env_{self.ns}', disable_signals=False)
else:
rospy.init_node(f'eval_env_{self.ns}', disable_signals=False)
self._debug = debug
self._evaluation = evaluation
self._extended_eval = extended_eval
self._is_train_mode = rospy.get_param('/train_mode')
self._setup_robot_configuration(robot_yaml_path, settings_yaml_path)
self._action_bounds = [self._linear_low, self._linear_high, self._angular_low, self._angular_high]
if (safe_dist is None):
safe_dist = (1.6 * self._robot_radius)
self.reward_calculator = RewardCalculator(robot_radius=self._robot_radius, safe_dist=safe_dist, goal_radius=goal_radius, rule=reward_fnc, extended_eval=self._extended_eval)
if self._is_train_mode:
self._service_name_step = f'{self.ns_prefix}step_world'
self._sim_step_client = rospy.ServiceProxy(self._service_name_step, StepWorld)
if self._evaluation:
map_update_freq = 1
else:
map_update_freq = 20
self.task_manager = TaskManager(self.ns, map_update_freq, paths, run_scenario)
self._seed = seed
if self._evaluation:
self._current_eval_iteration = 0
self._evaluation_episodes = evaluation_episodes
self._steps_curr_episode = 0
self._max_steps_per_episode = max_steps_per_episode
self._local_planner_manager = LocalPlannerManager(drl_server, paths, self.ns, self._is_train_mode)
self.action_space = spaces.Discrete(self._local_planner_manager.get_numb_models())
self._visualizer = AllInOneVisualizer(self._evaluation, self._local_planner_manager.get_model_names(), self.ns_prefix)
self._step_processor = StepProcessor(self._is_train_mode, self.ns_prefix, paths['all_in_one_parameters'], self._max_steps_per_episode, self._local_planner_manager, self.reward_calculator, self._action_bounds)
(self._run_all_agents_each_iteration, self._all_in_one_planner_frequency, self._global_planner_frequency) = self._step_processor.get_step_parameters()
self.reward_calculator.set_global_planner_frequency(self._global_planner_frequency)
self._logger = Logger(extended_eval, evaluation, max_steps_per_episode, self._local_planner_manager.get_numb_models(), self._all_in_one_planner_frequency)
self.observation_collector = ObservationCollectorAllInOne(self.ns, self._laser_num_beams, self._laser_max_range, self._local_planner_manager.get_numb_models(), self._local_planner_manager.get_required_observations(), include_model_actions=self._run_all_agents_each_iteration)
self._step_processor.set_observation_collector(self.observation_collector)
self.observation_space = self.observation_collector.get_observation_space()
self._last_obs_dict = dict()
self._last_merged_obs = np.zeros(shape=self.observation_space.shape)
if self._is_train_mode:
self._local_planner_manager.wait_for_agents(self._sim_step_client)
self.reset()
rospy.loginfo((('Environment ' + self.ns) + ': All agents are loaded - Gym environment is ready!'))
def step(self, action: int):
(action_model, self._last_obs_dict, self._last_merged_obs, reward, reward_info) = self._step_processor.process_step(action)
done = reward_info['is_done']
(info, in_crash) = self._logger.get_step_info(done, reward_info, self._last_obs_dict, reward, action, self._steps_curr_episode)
self._visualizer.visualize_step(action, in_crash, self._last_obs_dict['robot_pose'])
self._steps_curr_episode += 1
return (np.float32(self._last_merged_obs), reward, done, info)
def reset(self):
self._local_planner_manager.reset_planners()
self.observation_collector.reset()
if self._is_train_mode:
self._sim_step_client()
if self._evaluation:
seed = ((self._current_eval_iteration % self._evaluation_episodes) * self._seed)
else:
seed = random.randint(0, 1000000)
self.task_manager.reset(seed)
self.reward_calculator.reset()
self._logger.reset()
self._step_processor.reset()
self._visualizer.reset_visualizer()
if self._evaluation:
self._current_eval_iteration += 1
self._steps_curr_episode = 0
return self._last_merged_obs
def close(self):
self._local_planner_manager.close_planners()
self.observation_collector.close()
def render(self, mode='human'):
pass
def get_number_models(self) -> int:
return self._local_planner_manager.get_numb_models()
def get_model_names(self) -> [str]:
return self._local_planner_manager.get_model_names()
def get_all_in_one_planner_frequency(self):
return self._all_in_one_planner_frequency
def _setup_robot_configuration(self, robot_yaml_path: str, settings_yaml_path: str):
with open(robot_yaml_path, 'r') as fd:
robot_data = yaml.safe_load(fd)
for body in robot_data['bodies']:
if (body['name'] == 'base_footprint'):
for footprint in body['footprints']:
if (footprint['type'] == 'circle'):
self._robot_radius = (footprint.setdefault('radius', 0.3) * 1.05)
if footprint['radius']:
self._robot_radius = (footprint['radius'] * 1.05)
for plugin in robot_data['plugins']:
if (plugin['type'] == 'Laser'):
laser_angle_min = plugin['angle']['min']
laser_angle_max = plugin['angle']['max']
laser_angle_increment = plugin['angle']['increment']
self._laser_num_beams = int((round(((laser_angle_max - laser_angle_min) / laser_angle_increment)) + 1))
rospy.set_param('/laser_num_beams', self._laser_num_beams)
self._laser_max_range = plugin['range']
with open(settings_yaml_path, 'r') as fd:
setting_data = yaml.safe_load(fd)
linear_range = setting_data['robot']['continuous_actions']['linear_range']
angular_range = setting_data['robot']['continuous_actions']['angular_range']
self._angular_low = angular_range[0]
self._angular_high = angular_range[1]
self._linear_low = linear_range[0]
self._linear_high = linear_range[1] |
def outer(vector1, vector2=None):
if (vector2 is None):
vector2 = np.array(vector1).conj()
else:
vector2 = np.array(vector2).conj()
return np.outer(vector1, vector2) |
def apply_lights_manager(args, light_manager):
if (args.lights is None):
return
light_group = 'None'
if (args.lightgroup is not None):
light_group = args.lightgroup
lights = light_manager.get_all_lights(LIGHT_GROUP[light_group][0])
i = 0
while (i < len(args.lights)):
option = args.lights[i]
if (option == 'on'):
light_manager.turn_on(lights)
elif (option == 'off'):
light_manager.turn_off(lights)
elif (option == 'intensity'):
light_manager.set_intensity(lights, int(args.lights[(i + 1)]))
i += 1
elif (option == 'color'):
r = int(args.lights[(i + 1)])
g = int(args.lights[(i + 2)])
b = int(args.lights[(i + 3)])
light_manager.set_color(lights, carla.Color(r, g, b))
i += 3
i += 1 |
class GetLayerName(object):
_name_count = {}
def get(cls, name_prefix):
cnt = cls._name_count.get(name_prefix, 0)
cls._name_count[name_prefix] = (cnt + 1)
return (name_prefix + str(cnt)) |
class Embeeding_Attn(nn.Module):
def __init__(self):
super(Embeeding_Attn, self).__init__()
self.max_len = 3
self.input_dim = 1824
self.hidden_dim = 150
self.bidirectional = True
self.drop_out_rate = 0.5
self.context_vector_size = [parameters['embedding_context_vecotr_size'], 1]
self.drop = nn.Dropout(p=self.drop_out_rate)
self.word_GRU = nn.GRU(input_size=self.input_dim, hidden_size=self.hidden_dim, bidirectional=self.bidirectional, batch_first=True)
self.w_proj = nn.Linear(in_features=(2 * self.hidden_dim), out_features=(2 * self.hidden_dim))
self.w_context_vector = nn.Parameter(torch.randn(self.context_vector_size).float())
self.softmax = nn.Softmax(dim=1)
init_gru(self.word_GRU)
def forward(self, x):
(x, _) = self.word_GRU(x)
Hw = torch.tanh(self.w_proj(x))
w_score = self.softmax(Hw.matmul(self.w_context_vector))
x = x.mul(w_score)
x = torch.sum(x, dim=1)
return x |
class FeatureAdaption(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, deformable_groups=4):
super(FeatureAdaption, self).__init__()
offset_channels = ((kernel_size * kernel_size) * 2)
self.conv_offset = nn.Conv2d(in_channels, (deformable_groups * offset_channels), 1, bias=True)
self.conv_adaption = DeformConv(in_channels, out_channels, stride=1, kernel_size=kernel_size, padding=((kernel_size - 1) // 2), deformable_groups=deformable_groups, bias=False)
self.relu = nn.ReLU(inplace=True)
self.init_offset()
def init_offset(self):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def init_weights(self):
pass
def forward(self, x):
offset = self.conv_offset(x)
x = self.relu(self.conv_adaption(x, offset))
return x |
def _make_optimizer(args, model):
logger.info(f'Using {args.optim} Optimizer ......')
if (args.optim == 'adam'):
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif (args.optim == 'adamw'):
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, eps=args.epsilon)
elif (args.optim == 'sgd'):
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.99, nesterov=True, weight_decay=args.weight_decay)
return optimizer |
class PegasusXForConditionalGeneration(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def quaddobl_ismember_filter(wsys, gpts, dim, points, rcotol=1e-06, evatol=1e-06, memtol=1e-06, verbose=True, tasks=0):
from phcpy.solutions import diagnostics
result = []
for point in points:
rco = diagnostics(point)[1]
if (rco > rcotol):
(isgood, ismember) = (True, False)
else:
tst = quaddobl_ismember(wsys, gpts, dim, point, evatol, memtol, verbose, tasks)
(isgood, ismember) = tst
if (isgood and (not ismember)):
result.append(point)
return result |
def load_model(model, path):
if isinstance(model, DataParallel):
model.module.load_state_dict(torch.load(path))
else:
model.load_state_dict(torch.load(path)) |
_loss
def quality_focal_loss_tensor_target(pred, target, beta=2.0, activated=False):
assert (pred.size() == target.size())
if activated:
pred_sigmoid = pred
loss_function = F.binary_cross_entropy
else:
pred_sigmoid = pred.sigmoid()
loss_function = F.binary_cross_entropy_with_logits
scale_factor = pred_sigmoid
target = target.type_as(pred)
zerolabel = scale_factor.new_zeros(pred.shape)
loss = (loss_function(pred, zerolabel, reduction='none') * scale_factor.pow(beta))
pos = (target != 0)
scale_factor = (target[pos] - pred_sigmoid[pos])
loss[pos] = (loss_function(pred[pos], target[pos], reduction='none') * scale_factor.abs().pow(beta))
loss = loss.sum(dim=1, keepdim=False)
return loss |
class KittiDataCountLeftTest(data_testing_lib.BaseVTABDataTest):
def setUp(self):
super(KittiDataCountLeftTest, self).setUp(data_wrapper=kitti.KittiData(task='count_left'), num_classes=16, expected_num_samples=dict(train=6347, val=423, trainval=6770, test=711, train800val200=1000, train800=800, val200=200), required_tensors_shapes={'image': (None, None, 3), 'label': ()}, tfds_label_key_map={}) |
(name='load_mock')
def _load_mock(monkeypatch: MonkeyPatch, mock_data_1: pd.DataFrame) -> MagicMock:
load_mock = MagicMock(return_value=mock_data_1)
monkeypatch.setattr(cache.dataframe_utils, 'load_df', load_mock)
return load_mock |
def phc_email(addrs, subject, msg_cont):
import smtplib
from email.mime.text import MIMEText
from phc_config import phcstmp, phcmail, phcmailps
msg = MIMEText(msg_cont)
msg['Subject'] = subject
msg['To'] = addrs
server = smtplib.SMTP(phcstmp)
server.starttls()
server.login(phcmail, phcmailps)
try:
server.sendmail(phcmail, [addrs, phcmail], msg.as_string())
except smtplib.SMTPRecipientsRefused:
return 4
server.quit()
return 0 |
def train_unsupervised(args):
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if (args.dataset == 'Kitti'):
train_dataset = KittiOdometrySceneflow(root=args.root, npoints=args.npoints, max_bias=args.max_bias)
elif (args.dataset == 'nuscenes'):
scenes_list = './data/nuscenes_trainlist.txt'
train_dataset = NuScenesFlow(root=args.root, npoints=args.npoints, scenes_list=scenes_list, max_bias=args.max_bias)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True, pin_memory=True, drop_last=True)
net = FlowNet3D().cuda()
if args.use_wandb:
wandb.watch(net)
net.load_state_dict(torch.load(args.pretrain_model))
optimizer = optim.Adam(net.parameters(), lr=args.init_lr)
lr_scheduler = ClippedStepLR(optimizer, args.step_size_lr, args.min_lr, args.gamma_lr)
def update_bn_momentum(epoch):
for m in net.modules():
if (isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d)):
m.momentum = max((args.init_bn_momentum * (args.gamma_bn_momentum ** (epoch // args.step_size_bn_momentum))), args.min_bn_momentum)
best_train_loss = float('inf')
for epoch in range(args.epochs):
net.train()
count = 0
total_loss = 0
pbar = tqdm(enumerate(train_loader))
for (i, data) in pbar:
(points1, points2, features1, features2) = data
points1 = points1.cuda(non_blocking=True)
points2 = points2.cuda(non_blocking=True)
features1 = features1.cuda(non_blocking=True)
features2 = features2.cuda(non_blocking=True)
optimizer.zero_grad()
pred_flow = net(points1, points2, features1, features2)
trans_points1 = (points1 + pred_flow)
loss = chamfer_loss(trans_points1, points2)
loss.backward()
optimizer.step()
count += 1
total_loss += loss.item()
if ((i % 10) == 0):
pbar.set_description('Train Epoch:{}[{}/{}({:.0f}%)]\tLoss: {:.6f}'.format((epoch + 1), i, len(train_loader), ((100.0 * i) / len(train_loader)), loss.item()))
lr_scheduler.step()
total_loss = (total_loss / count)
if (args.use_wandb == 1):
wandb.log({'loss': total_loss})
print('Epoch ', (epoch + 1), 'finished ', 'loss = ', total_loss)
if (total_loss < best_train_loss):
torch.save(net.state_dict(), (args.save_dir + 'best_train.pth'))
best_train_loss = total_loss
print('Best train loss: {:.4f}'.format(best_train_loss)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.