code stringlengths 101 5.91M |
|---|
class ReformerConfig(PretrainedConfig):
model_type = 'reformer'
keys_to_ignore_at_inference = ['past_buckets_states']
attribute_map = {}
def __init__(self, attention_head_size=64, attn_layers=['local', 'lsh', 'local', 'lsh', 'local', 'lsh'], axial_norm_std=1.0, axial_pos_embds=True, axial_pos_shape=[64, 64], axial_pos_embds_dim=[64, 192], chunk_size_lm_head=0, eos_token_id=2, feed_forward_size=512, hash_seed=None, hidden_act='relu', hidden_dropout_prob=0.05, hidden_size=256, initializer_range=0.02, is_decoder=False, layer_norm_eps=1e-12, local_num_chunks_before=1, local_num_chunks_after=0, local_attention_probs_dropout_prob=0.05, local_attn_chunk_length=64, lsh_attn_chunk_length=64, lsh_attention_probs_dropout_prob=0.0, lsh_num_chunks_before=1, lsh_num_chunks_after=0, max_position_embeddings=4096, num_attention_heads=12, num_buckets=None, num_hashes=1, pad_token_id=0, vocab_size=320, tie_word_embeddings=False, use_cache=True, classifier_dropout=None, **kwargs):
self.hash_seed = hash_seed
self.vocab_size = vocab_size
self.attention_head_size = attention_head_size
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.num_hashes = num_hashes
self.num_hidden_layers = len(attn_layers)
self.num_buckets = (tuple(num_buckets) if isinstance(num_buckets, list) else num_buckets)
self.lsh_attn_chunk_length = lsh_attn_chunk_length
self.local_attn_chunk_length = local_attn_chunk_length
self.lsh_num_chunks_after = lsh_num_chunks_after
self.lsh_num_chunks_before = lsh_num_chunks_before
self.local_num_chunks_after = local_num_chunks_after
self.local_num_chunks_before = local_num_chunks_before
self.hidden_act = hidden_act
self.feed_forward_size = feed_forward_size
self.hidden_dropout_prob = hidden_dropout_prob
self.lsh_attention_probs_dropout_prob = lsh_attention_probs_dropout_prob
self.local_attention_probs_dropout_prob = local_attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.axial_pos_embds = axial_pos_embds
self.axial_pos_shape = tuple(axial_pos_shape)
self.axial_pos_embds_dim = tuple(axial_pos_embds_dim)
self.axial_norm_std = axial_norm_std
self.chunk_size_lm_head = chunk_size_lm_head
self.attn_layers = attn_layers
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_decoder=is_decoder, tie_word_embeddings=tie_word_embeddings, **kwargs) |
def main(not_parsed_args):
if (len(not_parsed_args) > 1):
print(('Unknown args:%s' % not_parsed_args))
exit()
print(('Building x%d augmented data.' % FLAGS.augment_level))
training_filenames = util.get_files_in_directory((((FLAGS.data_dir + '/') + FLAGS.dataset) + '/'))
target_dir = (((FLAGS.data_dir + '/') + FLAGS.dataset) + ('_%d/' % FLAGS.augment_level))
util.make_dir(target_dir)
for file_path in training_filenames:
org_image = util.load_image(file_path)
filename = os.path.basename(file_path)
(filename, extension) = os.path.splitext(filename)
new_filename = (target_dir + filename)
util.save_image((new_filename + extension), org_image)
if (FLAGS.augment_level >= 2):
ud_image = np.flipud(org_image)
util.save_image(((new_filename + '_v') + extension), ud_image)
if (FLAGS.augment_level >= 3):
lr_image = np.fliplr(org_image)
util.save_image(((new_filename + '_h') + extension), lr_image)
if (FLAGS.augment_level >= 4):
lr_image = np.fliplr(org_image)
lrud_image = np.flipud(lr_image)
util.save_image(((new_filename + '_hv') + extension), lrud_image)
if (FLAGS.augment_level >= 5):
rotated_image1 = np.rot90(org_image)
util.save_image(((new_filename + '_r1') + extension), rotated_image1)
if (FLAGS.augment_level >= 6):
rotated_image2 = np.rot90(org_image, (- 1))
util.save_image(((new_filename + '_r2') + extension), rotated_image2)
if (FLAGS.augment_level >= 7):
rotated_image1 = np.rot90(org_image)
ud_image = np.flipud(rotated_image1)
util.save_image(((new_filename + '_r1_v') + extension), ud_image)
if (FLAGS.augment_level >= 8):
rotated_image2 = np.rot90(org_image, (- 1))
ud_image = np.flipud(rotated_image2)
util.save_image(((new_filename + '_r2_v') + extension), ud_image) |
def load_components_from_file(fname):
components_to_add = list()
with open(fname) as parameters:
for line in parameters.readlines():
line = line.strip()
pars = line.split()
if (len(line) == 0):
pass
elif (line[0] == '#'):
pass
elif (len(pars) == 8):
num = int(pars[0])
ion = pars[1]
z = float(pars[2])
z_err = float(pars[3])
b = float(pars[4])
b_err = float(pars[5])
logN = float(pars[6])
logN_err = float(pars[7])
components_to_add.append([num, ion, z, b, logN, z_err, b_err, logN_err])
pars = Parameters()
for comp_pars in components_to_add:
(num, ion, z, b, logN, z_err, b_err, logN_err) = comp_pars
ion = ion.replace('*', 'x')
z_name = ('z%i_%s' % (num, ion))
b_name = ('b%i_%s' % (num, ion))
N_name = ('logN%i_%s' % (num, ion))
pars.add(z_name, value=z)
pars.add(b_name, value=b)
pars.add(N_name, value=logN)
pars[z_name].stderr = z_err
pars[b_name].stderr = b_err
pars[N_name].stderr = logN_err
return pars |
def get_test_options(option_var='QISKIT_TESTS'):
tests_options = {'skip_online': False, 'mock_online': False, 'run_slow': False, 'rec': False}
def turn_false(option):
tests_options[option] = False
return True
dependency_solvers = {'skip_online': (lambda : turn_false('rec')), 'mock_online': (lambda : turn_false('skip_online')), 'rec': (lambda : (turn_false('skip_online') and turn_false('run_slow')))}
def set_flag(flag_):
tests_options[flag_] = True
if (flag_ in dependency_solvers):
dependency_solvers[flag_]()
flag_string = os.getenv(option_var, None)
flags = (flag_string.split(',') if flag_string else [])
for flag in flags:
if (flag not in tests_options):
logger.error('Testing option "%s" unknown.', flag)
set_flag(flag)
if _is_ci_fork_pull_request():
set_flag('skip_online')
logger.debug(tests_options)
return tests_options |
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
if (hasattr(module, '__flops__') or hasattr(module, '__params__')):
print((('Warning: variables __flops__ or __params__ are already defined for the module' + type(module).__name__) + ' ptflops can affect your code!'))
module.__flops__ = 0
module.__params__ = get_model_parameters_number(module) |
def test_is_3dlist():
assert utils.is_3dlist([])
assert utils.is_3dlist([[]])
assert utils.is_3dlist([[[]]])
assert utils.is_3dlist([[[1]]])
assert (not utils.is_3dlist([[1, 2]]))
assert (not utils.is_3dlist([[np.array([1, 2])]])) |
def ReadFileSL(x_axis, tthread, batchInterval, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity):
(w, h) = (2, len(x_axis))
y = [[] for _ in range(w)]
for deposit_ratio in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathSL('OP_NS_A', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[0].append(float(throughput))
for deposit_ratio in x_axis:
inputEvents = (tthread * batchInterval)
op_gs_path = getPathSL('OG_NS_A', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, txn_length, isCyclic, complexity)
lines = open(op_gs_path).readlines()
throughput = lines[0].split(': ')[1]
y[1].append(float(throughput))
print(y)
return y |
class EfficientInteractionDownProjection(torch.nn.Module):
def __init__(self, num_spherical: int, num_radial: int, emb_size_interm: int, name='EfficientDownProj'):
super().__init__()
self.num_spherical = num_spherical
self.num_radial = num_radial
self.emb_size_interm = emb_size_interm
self.reset_parameters()
def reset_parameters(self):
self.weight = torch.nn.Parameter(torch.empty((self.num_spherical, self.num_radial, self.emb_size_interm)), requires_grad=True)
he_orthogonal_init(self.weight)
def forward(self, tbf):
(rbf_env, sph) = tbf
rbf_W1 = torch.matmul(rbf_env, self.weight)
rbf_W1 = rbf_W1.permute(1, 2, 0)
sph = torch.transpose(sph, 1, 2)
return (rbf_W1, sph) |
def rouge(hypotheses, references):
rouge_1 = [rouge_n([hyp], [ref], 1) for (hyp, ref) in zip(hypotheses, references)]
(rouge_1_f, rouge_1_p, rouge_1_r) = map(np.mean, zip(*rouge_1))
rouge_2 = [rouge_n([hyp], [ref], 2) for (hyp, ref) in zip(hypotheses, references)]
(rouge_2_f, rouge_2_p, rouge_2_r) = map(np.mean, zip(*rouge_2))
rouge_l = [rouge_l_sentence_level([hyp], [ref]) for (hyp, ref) in zip(hypotheses, references)]
(rouge_l_f, rouge_l_p, rouge_l_r) = map(np.mean, zip(*rouge_l))
return {'rouge_1/f_score': rouge_1_f, 'rouge_1/r_score': rouge_1_r, 'rouge_1/p_score': rouge_1_p, 'rouge_2/f_score': rouge_2_f, 'rouge_2/r_score': rouge_2_r, 'rouge_2/p_score': rouge_2_p, 'rouge_l/f_score': rouge_l_f, 'rouge_l/r_score': rouge_l_r, 'rouge_l/p_score': rouge_l_p} |
class Data(object):
def __init__(self, args):
self.args = args
(self.trainset, self.testset) = (None, None)
(trainset, testset) = Dataset(args)
num_train = [int((len(trainset) / args.split)) for _ in range(args.split)]
cumsum_train = torch.tensor(list(num_train)).cumsum(dim=0).tolist()
idx_train = range(len(trainset.targets))
splited_trainset = [Subset(trainset, idx_train[(off - l):off]) for (off, l) in zip(cumsum_train, num_train)]
num_test = [int((len(testset) / args.split)) for _ in range(args.split)]
cumsum_test = torch.tensor(list(num_test)).cumsum(dim=0).tolist()
idx_test = range(len(testset.targets))
splited_testset = [Subset(testset, idx_test[(off - l):off]) for (off, l) in zip(cumsum_test, num_test)]
self.test_all = DataLoader(testset, batch_size=args.batchsize, shuffle=False, num_workers=4)
self.train_loader = [DataLoader(splited_trainset[i], batch_size=args.batchsize, shuffle=True, num_workers=4) for i in range(args.node_num)]
self.test_loader = [DataLoader(splited_testset[i], batch_size=args.batchsize, shuffle=False, num_workers=4) for i in range(args.node_num)]
self.test_loader = DataLoader(testset, batch_size=args.batchsize, shuffle=False, num_workers=4) |
def test_digits_cosine_greedi_nn_object():
model1 = FacilityLocationSelection(100)
model2 = GraphCutSelection(100)
model = MixtureSelection(100, [model1, model2], [1.0, 0.3], metric='cosine', optimizer=GreeDi(optimizer1='naive', optimizer2='naive', random_state=0))
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_greedi_ranking)
assert_array_almost_equal(model.gains, digits_cosine_greedi_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def main(args):
data = pd.read_csv(args.csv)
(fig, ax) = plt.subplots(figsize=(10, 5))
x_data = [float(x) for x in data.columns[1:]]
for row in data.values:
label = row[0].replace('NousResearch/', '')
ax.plot(x_data, [float(x) for x in row[1:]], label=label)
ax.set_xlabel('Context Window')
ax.set_ylabel('Perplexity (lower is better)')
ax.set_xlim(args.xmin, args.xmax)
ax.set_ylim(args.ymin, args.ymax)
ax.legend(loc='upper right')
fig.savefig((args.csv + '.png'))
fig.savefig((args.csv + '.pdf'), transparent=True) |
def gen_bar_updater(pbar):
def bar_update(count, block_size, total_size):
if ((pbar.total is None) and total_size):
pbar.total = total_size
progress_bytes = (count * block_size)
pbar.update((progress_bytes - pbar.n))
return bar_update |
def initialize_ct_counts_detector(phantom, detector_material, output_file):
cbct_detector = gg.GGEMSCTSystem('custom')
cbct_detector.set_ct_type('flat')
cbct_detector.set_number_of_modules(1, 1)
(a, b) = phantom.geomet.nDetector
(d, e) = phantom.geomet.dDetector
cbct_detector.set_number_of_detection_elements(a, b, 1)
cbct_detector.set_size_of_detection_elements(d, e, 1, 'mm')
cbct_detector.set_material(detector_material)
cbct_detector.set_source_detector_distance(phantom.geomet.DSD, 'mm')
cbct_detector.set_source_isocenter_distance(phantom.geomet.DSO, 'mm')
cbct_detector.set_rotation(0.0, 0.0, 0.0, 'deg')
cbct_detector.set_global_system_position(0.0, 0.0, 0.0, 'mm')
cbct_detector.set_threshold(10, 'keV')
cbct_detector.save(output_file)
cbct_detector.store_scatter(True)
cbct_detector.set_visible(True) |
def reverse_history(history: History) -> History:
return {token: anns[::(- 1)] for (token, anns) in history.items()} |
_module(force=True)
class DummyDataset(Dataset):
METAINFO = dict()
data = torch.randn(12, 2)
label = torch.ones(12)
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_samples=self.label[index]) |
class TensorboardLoggerHook(LoggerHook):
def __init__(self, log_dir=None, interval=10, ignore_last=True, reset_flag=True, register_logWithIter_keyword=None):
super(TensorboardLoggerHook, self).__init__(interval, ignore_last, reset_flag)
self.log_dir = log_dir
self.register_logWithIter_keyword = register_logWithIter_keyword
_only
def before_run(self, runner):
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please install tensorflow and tensorboardX to use TensorboardLoggerHook.')
else:
if (self.log_dir is None):
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
self.writer = SummaryWriter(self.log_dir)
_only
def single_log(self, tag, record, global_step):
prefix = tag.split('/')[0]
suffix_tag = '/'.join(tag.split('/')[1:])
if (prefix == 'image'):
self.writer.add_image(suffix_tag, record, global_step)
return
if (prefix == 'figure'):
self.writer.add_figure(suffix_tag, record, global_step)
return
if (prefix == 'histogram'):
self.writer.add_histogram(suffix_tag, record, global_step)
return
if (prefix == 'scalar'):
self.writer.add_scalar(suffix_tag, record, global_step)
return
if isinstance(record, str):
self.writer.add_text(tag, record, global_step)
return
if torch.is_tensor(record):
self.writer.add_scalar(tag, record, global_step)
return
if (record.size > 1):
self.writer.add_image(tag, record, global_step)
else:
self.writer.add_scalar(tag, record, global_step)
_only
def log(self, runner):
for var in runner.log_buffer.output:
if (var in ['time', 'data_time']):
continue
tag = var
record = runner.log_buffer.output[var]
global_step = runner.epoch
if isinstance(self.register_logWithIter_keyword, (tuple, list)):
for keyword in self.register_logWithIter_keyword:
if (var.find(keyword) > (- 1)):
global_step = runner.iter
global_step = (global_step + 1)
if isinstance(record, (list, tuple)):
for (idx, rec) in enumerate(record):
tag = ((var + '/') + '{}'.format(idx))
self.single_log(tag, rec, global_step)
else:
self.single_log(tag, record, global_step)
_only
def after_run(self, runner):
self.writer.close() |
class TripletLoss(nn.Module):
def __init__(self, margin=0.4, distance_type='C', account_for_nonzeros=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.distance_type = distance_type.lower().strip()
self.cos = nn.CosineSimilarity(dim=1, eps=1e-06)
self.account_for_nonzeros = account_for_nonzeros
def forward(self, anchor, positive, negative):
if (self.distance_type == 'c'):
distance_positive = (- self.cos(anchor, positive))
distance_negative = (- self.cos(anchor, negative))
losses = F.relu(((distance_positive - distance_negative) + self.margin))
elif (self.distance_type == 'e'):
distance_positive = (anchor - positive).pow(2).sum(1)
distance_negative = (anchor - negative).pow(2).sum(1)
losses = F.relu(((distance_positive - distance_negative) + self.margin))
else:
raise Exception('please specify distance_type as C or E')
semi_hard_indexes = [i for i in range(len(losses)) if (losses[i] > 0)]
percent_activated = (len(semi_hard_indexes) / len(losses))
if self.account_for_nonzeros:
loss = (losses.sum() / len(semi_hard_indexes))
else:
loss = losses.mean()
return (loss, percent_activated) |
def setup_yaml():
represent_dict_order = (lambda self, data: self.represent_mapping('tag:yaml.org,2002:map', data.items()))
yaml.add_representer(OrderedDict, represent_dict_order)
try:
yaml.add_representer(unicode, unicode_representer)
except NameError:
logging.info('python 3 env') |
def train_evidence_identifier(evidence_identifier: nn.Module, save_dir: str, train: List[Annotation], val: List[Annotation], documents: Dict[(str, List[List[int]])], model_pars: dict, optimizer=None, scheduler=None, tensorize_model_inputs: bool=True) -> Tuple[(nn.Module, dict)]:
def _prep_data_for_epoch(evidence_data: Dict[(str, Dict[(str, List[SentenceEvidence])])], sampler: Callable[([List[SentenceEvidence], Dict[(str, List[SentenceEvidence])]], List[SentenceEvidence])]) -> List[SentenceEvidence]:
output_sentences = []
ann_ids = sorted(evidence_data.keys())
random.shuffle(ann_ids)
for ann_id in ann_ids:
for (docid, sentences) in evidence_data[ann_id].items():
data = sampler(sentences, None)
output_sentences.extend(data)
return output_sentences
logging.info(f'Beginning training with {len(train)} annotations, {len(val)} for validation')
evidence_identifier_output_dir = os.path.join(save_dir, 'evidence_identifier')
os.makedirs(save_dir, exist_ok=True)
os.makedirs(evidence_identifier_output_dir, exist_ok=True)
model_save_file = os.path.join(evidence_identifier_output_dir, 'evidence_identifier.pt')
epoch_save_file = os.path.join(evidence_identifier_output_dir, 'evidence_identifier_epoch_data.pt')
if (optimizer is None):
optimizer = torch.optim.Adam(evidence_identifier.parameters(), lr=model_pars['evidence_identifier']['lr'])
criterion = nn.CrossEntropyLoss(reduction='none')
sampling_method = _get_sampling_method(model_pars['evidence_identifier'])
batch_size = model_pars['evidence_identifier']['batch_size']
epochs = model_pars['evidence_identifier']['epochs']
patience = model_pars['evidence_identifier']['patience']
max_grad_norm = model_pars['evidence_classifier'].get('max_grad_norm', None)
evidence_train_data = annotations_to_evidence_identification(train, documents)
evidence_val_data = annotations_to_evidence_identification(val, documents)
device = next(evidence_identifier.parameters()).device
results = {'sampled_epoch_train_losses': [], 'sampled_epoch_val_losses': [], 'full_epoch_val_losses': [], 'full_epoch_val_acc': [], 'full_epoch_val_rationale_scores': []}
start_epoch = 0
best_epoch = (- 1)
best_val_loss = float('inf')
best_model_state_dict = None
epoch_data = {}
if os.path.exists(epoch_save_file):
evidence_identifier.load_state_dict(torch.load(model_save_file))
epoch_data = torch.load(epoch_save_file)
start_epoch = (epoch_data['epoch'] + 1)
if bool(epoch_data.get('done', 0)):
start_epoch = epochs
results = epoch_data['results']
best_epoch = start_epoch
best_model_state_dict = OrderedDict({k: v.cpu() for (k, v) in evidence_identifier.state_dict().items()})
logging.info(f'Training evidence identifier from epoch {start_epoch} until epoch {epochs}')
optimizer.zero_grad()
for epoch in range(start_epoch, epochs):
epoch_train_data = _prep_data_for_epoch(evidence_train_data, sampling_method)
epoch_val_data = _prep_data_for_epoch(evidence_val_data, sampling_method)
sampled_epoch_train_loss = 0
evidence_identifier.train()
logging.info(f'Training with {(len(epoch_train_data) // batch_size)} batches with {len(epoch_train_data)} examples')
for batch_start in range(0, len(epoch_train_data), batch_size):
batch_elements = epoch_train_data[batch_start:min((batch_start + batch_size), len(epoch_train_data))]
(targets, queries, sentences) = zip(*[(s.kls, s.query, s.sentence) for s in batch_elements])
ids = [(s.ann_id, s.docid, s.index) for s in batch_elements]
targets = torch.tensor(targets, dtype=torch.long, device=device)
if tensorize_model_inputs:
queries = [torch.tensor(q, dtype=torch.long) for q in queries]
sentences = [torch.tensor(s, dtype=torch.long) for s in sentences]
preds = evidence_identifier(queries, ids, sentences)
loss = criterion(preds, targets.to(device=preds.device)).sum()
sampled_epoch_train_loss += loss.item()
loss = (loss / len(preds))
loss.backward()
if max_grad_norm:
torch.nn.utils.clip_grad_norm_(evidence_identifier.parameters(), max_grad_norm)
optimizer.step()
if scheduler:
scheduler.step()
optimizer.zero_grad()
sampled_epoch_train_loss /= len(epoch_train_data)
results['sampled_epoch_train_losses'].append(sampled_epoch_train_loss)
logging.info(f'Epoch {epoch} sampled training loss {sampled_epoch_train_loss}')
with torch.no_grad():
evidence_identifier.eval()
(sampled_epoch_val_loss, _, sampled_epoch_val_hard_pred, sampled_epoch_val_truth) = make_preds_epoch(evidence_identifier, epoch_val_data, batch_size, device, criterion, tensorize_model_inputs)
results['sampled_epoch_val_losses'].append(sampled_epoch_val_loss)
sampled_epoch_val_acc = accuracy_score(sampled_epoch_val_truth, sampled_epoch_val_hard_pred)
logging.info(f'Epoch {epoch} sampled val loss {sampled_epoch_val_loss}, acc {sampled_epoch_val_acc}')
all_val_data = list(filter((lambda se: (len(se.sentence) > 0)), chain.from_iterable(chain.from_iterable((x.values() for x in evidence_val_data.values())))))
(epoch_val_loss, epoch_val_soft_pred, epoch_val_hard_pred, epoch_val_truth) = make_preds_epoch(evidence_identifier, all_val_data, batch_size, device, criterion, tensorize_model_inputs)
results['full_epoch_val_losses'].append(epoch_val_loss)
results['full_epoch_val_acc'].append(accuracy_score(epoch_val_truth, epoch_val_hard_pred))
results['full_epoch_val_rationale_scores'].append(score_rationales(val, documents, epoch_val_data, epoch_val_soft_pred))
logging.info(f"Epoch {epoch} full val loss {epoch_val_loss}, accuracy: {results['full_epoch_val_acc'][(- 1)]}, rationale scores: {results['full_epoch_val_rationale_scores'][(- 1)]}")
if (sampled_epoch_val_loss < best_val_loss):
logging.debug(f'Epoch {epoch} new best model with sampled val loss {sampled_epoch_val_loss}')
best_model_state_dict = OrderedDict({k: v.cpu() for (k, v) in evidence_identifier.state_dict().items()})
best_epoch = epoch
best_val_loss = sampled_epoch_val_loss
torch.save(evidence_identifier.state_dict(), model_save_file)
epoch_data = {'epoch': epoch, 'results': results, 'best_val_loss': best_val_loss, 'done': 0}
torch.save(epoch_data, epoch_save_file)
if ((epoch - best_epoch) > patience):
epoch_data['done'] = 1
torch.save(epoch_data, epoch_save_file)
break
epoch_data['done'] = 1
epoch_data['results'] = results
torch.save(epoch_data, epoch_save_file)
evidence_identifier.load_state_dict(best_model_state_dict)
evidence_identifier = evidence_identifier.to(device=device)
evidence_identifier.eval()
return (evidence_identifier, results) |
def plot_acc_loss(history):
plot_acc(history, '(a) ')
plt.show()
plot_loss(history, '(b) ')
plt.show() |
def create_parser(name, root, split='train', **kwargs):
name = name.lower()
name = name.split('/', 2)
prefix = ''
if (len(name) > 1):
prefix = name[0]
name = name[(- 1)]
if (prefix == 'tfds'):
from .parser_tfds import ParserTfds
parser = ParserTfds(root, name, split=split, **kwargs)
else:
assert os.path.exists(root)
if (os.path.isfile(root) and (os.path.splitext(root)[1] == '.tar')):
parser = ParserImageInTar(root, **kwargs)
else:
parser = ParserImageFolder(root, **kwargs)
return parser |
(derivate=True, coderize=True)
_loss
def distribution_focal_loss(pred, label):
dis_left = label.long()
dis_right = (dis_left + 1)
weight_left = (dis_right.float() - label)
weight_right = (label - dis_left.float())
loss = ((F.cross_entropy(pred, dis_left, reduction='none') * weight_left) + (F.cross_entropy(pred, dis_right, reduction='none') * weight_right))
return loss |
def main():
args = parse_args()
logging_dir = Path(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with='tensorboard', project_config=accelerator_project_config)
if (args.train_text_encoder and (args.gradient_accumulation_steps > 1) and (accelerator.num_processes > 1)):
raise ValueError('Gradient accumulation is not supported when training the text encoder in distributed training. Please set gradient_accumulation_steps to 1. This feature will be supported in the future.')
if (args.seed is not None):
set_seed(args.seed)
if args.with_prior_preservation:
class_images_dir = Path(args.class_data_dir)
if (not class_images_dir.exists()):
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if (cur_class_images < args.num_class_images):
torch_dtype = (torch.float16 if (accelerator.device.type == 'cuda') else torch.float32)
pipeline = StableDiffusionInpaintPipeline.from_pretrained(args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None)
pipeline.set_progress_bar_config(disable=True)
num_new_images = (args.num_class_images - cur_class_images)
logger.info(f'Number of class images to sample: {num_new_images}.')
sample_dataset = PromptDataset(args.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size, num_workers=1)
sample_dataloader = accelerator.prepare(sample_dataloader)
pipeline.to(accelerator.device)
transform_to_pil = transforms.ToPILImage()
for example in tqdm(sample_dataloader, desc='Generating class images', disable=(not accelerator.is_local_main_process)):
bsz = len(example['prompt'])
fake_images = torch.rand((3, args.resolution, args.resolution))
transform_to_pil = transforms.ToPILImage()
fake_pil_images = transform_to_pil(fake_images)
fake_mask = random_mask((args.resolution, args.resolution), ratio=1, mask_full_image=True)
images = pipeline(prompt=example['prompt'], mask_image=fake_mask, image=fake_pil_images).images
for (i, image) in enumerate(images):
hash_image = insecure_hashlib.sha1(image.tobytes()).hexdigest()
image_filename = (class_images_dir / f"{(example['index'][i] + cur_class_images)}-{hash_image}.jpg")
image.save(image_filename)
del pipeline
if torch.cuda.is_available():
torch.cuda.empty_cache()
if accelerator.is_main_process:
if (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(repo_id=(args.hub_model_id or Path(args.output_dir).name), exist_ok=True, token=args.hub_token).repo_id
if args.tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
elif args.pretrained_model_name_or_path:
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
vae.requires_grad_(False)
text_encoder.requires_grad_(False)
unet.requires_grad_(False)
weight_dtype = torch.float32
if (args.mixed_precision == 'fp16'):
weight_dtype = torch.float16
elif (args.mixed_precision == 'bf16'):
weight_dtype = torch.bfloat16
unet.to(accelerator.device, dtype=weight_dtype)
vae.to(accelerator.device, dtype=weight_dtype)
text_encoder.to(accelerator.device, dtype=weight_dtype)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError('xformers is not available. Make sure it is installed correctly')
lora_attn_procs = {}
for name in unet.attn_processors.keys():
cross_attention_dim = (None if name.endswith('attn1.processor') else unet.config.cross_attention_dim)
if name.startswith('mid_block'):
hidden_size = unet.config.block_out_channels[(- 1)]
elif name.startswith('up_blocks'):
block_id = int(name[len('up_blocks.')])
hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
elif name.startswith('down_blocks'):
block_id = int(name[len('down_blocks.')])
hidden_size = unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
unet.set_attn_processor(lora_attn_procs)
lora_layers = AttnProcsLayers(unet.attn_processors)
accelerator.register_for_checkpointing(lora_layers)
if args.scale_lr:
args.learning_rate = (((args.learning_rate * args.gradient_accumulation_steps) * args.train_batch_size) * accelerator.num_processes)
if args.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError:
raise ImportError('To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`.')
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
optimizer = optimizer_class(lora_layers.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon)
noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder='scheduler')
train_dataset = DreamBoothDataset(instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=(args.class_data_dir if args.with_prior_preservation else None), class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop)
def collate_fn(examples):
input_ids = [example['instance_prompt_ids'] for example in examples]
pixel_values = [example['instance_images'] for example in examples]
if args.with_prior_preservation:
input_ids += [example['class_prompt_ids'] for example in examples]
pixel_values += [example['class_images'] for example in examples]
pior_pil = [example['class_PIL_images'] for example in examples]
masks = []
masked_images = []
for example in examples:
pil_image = example['PIL_images']
mask = random_mask(pil_image.size, 1, False)
(mask, masked_image) = prepare_mask_and_masked_image(pil_image, mask)
masks.append(mask)
masked_images.append(masked_image)
if args.with_prior_preservation:
for pil_image in pior_pil:
mask = random_mask(pil_image.size, 1, False)
(mask, masked_image) = prepare_mask_and_masked_image(pil_image, mask)
masks.append(mask)
masked_images.append(masked_image)
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
input_ids = tokenizer.pad({'input_ids': input_ids}, padding=True, return_tensors='pt').input_ids
masks = torch.stack(masks)
masked_images = torch.stack(masked_images)
batch = {'input_ids': input_ids, 'pixel_values': pixel_values, 'masks': masks, 'masked_images': masked_images}
return batch
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn)
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
overrode_max_train_steps = True
lr_scheduler = get_scheduler(args.lr_scheduler, optimizer=optimizer, num_warmup_steps=(args.lr_warmup_steps * accelerator.num_processes), num_training_steps=(args.max_train_steps * accelerator.num_processes))
(lora_layers, optimizer, train_dataloader, lr_scheduler) = accelerator.prepare(lora_layers, optimizer, train_dataloader, lr_scheduler)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if overrode_max_train_steps:
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
if accelerator.is_main_process:
accelerator.init_trackers('dreambooth-inpaint-lora', config=vars(args))
total_batch_size = ((args.train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f' Num examples = {len(train_dataset)}')
logger.info(f' Num batches each epoch = {len(train_dataloader)}')
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
global_step = 0
first_epoch = 0
if args.resume_from_checkpoint:
if (args.resume_from_checkpoint != 'latest'):
path = os.path.basename(args.resume_from_checkpoint)
else:
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith('checkpoint')]
dirs = sorted(dirs, key=(lambda x: int(x.split('-')[1])))
path = (dirs[(- 1)] if (len(dirs) > 0) else None)
if (path is None):
accelerator.print(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.")
args.resume_from_checkpoint = None
else:
accelerator.print(f'Resuming from checkpoint {path}')
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split('-')[1])
resume_global_step = (global_step * args.gradient_accumulation_steps)
first_epoch = (global_step // num_update_steps_per_epoch)
resume_step = (resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps))
progress_bar = tqdm(range(global_step, args.max_train_steps), disable=(not accelerator.is_local_main_process))
progress_bar.set_description('Steps')
for epoch in range(first_epoch, args.num_train_epochs):
unet.train()
for (step, batch) in enumerate(train_dataloader):
if (args.resume_from_checkpoint and (epoch == first_epoch) and (step < resume_step)):
if ((step % args.gradient_accumulation_steps) == 0):
progress_bar.update(1)
continue
with accelerator.accumulate(unet):
latents = vae.encode(batch['pixel_values'].to(dtype=weight_dtype)).latent_dist.sample()
latents = (latents * vae.config.scaling_factor)
masked_latents = vae.encode(batch['masked_images'].reshape(batch['pixel_values'].shape).to(dtype=weight_dtype)).latent_dist.sample()
masked_latents = (masked_latents * vae.config.scaling_factor)
masks = batch['masks']
mask = torch.stack([torch.nn.functional.interpolate(mask, size=((args.resolution // 8), (args.resolution // 8))) for mask in masks]).to(dtype=weight_dtype)
mask = mask.reshape((- 1), 1, (args.resolution // 8), (args.resolution // 8))
noise = torch.randn_like(latents)
bsz = latents.shape[0]
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
latent_model_input = torch.cat([noisy_latents, mask, masked_latents], dim=1)
encoder_hidden_states = text_encoder(batch['input_ids'])[0]
noise_pred = unet(latent_model_input, timesteps, encoder_hidden_states).sample
if (noise_scheduler.config.prediction_type == 'epsilon'):
target = noise
elif (noise_scheduler.config.prediction_type == 'v_prediction'):
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f'Unknown prediction type {noise_scheduler.config.prediction_type}')
if args.with_prior_preservation:
(noise_pred, noise_pred_prior) = torch.chunk(noise_pred, 2, dim=0)
(target, target_prior) = torch.chunk(target, 2, dim=0)
loss = F.mse_loss(noise_pred.float(), target.float(), reduction='none').mean([1, 2, 3]).mean()
prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction='mean')
loss = (loss + (args.prior_loss_weight * prior_loss))
else:
loss = F.mse_loss(noise_pred.float(), target.float(), reduction='mean')
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = lora_layers.parameters()
accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if ((global_step % args.checkpointing_steps) == 0):
if accelerator.is_main_process:
save_path = os.path.join(args.output_dir, f'checkpoint-{global_step}')
accelerator.save_state(save_path)
logger.info(f'Saved state to {save_path}')
logs = {'loss': loss.detach().item(), 'lr': lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if (global_step >= args.max_train_steps):
break
accelerator.wait_for_everyone()
if accelerator.is_main_process:
unet = unet.to(torch.float32)
unet.save_attn_procs(args.output_dir)
if args.push_to_hub:
upload_folder(repo_id=repo_id, folder_path=args.output_dir, commit_message='End of training', ignore_patterns=['step_*', 'epoch_*'])
accelerator.end_training() |
def gaussian_focal_loss_with_pos_inds(pred: Tensor, gaussian_target: Tensor, pos_inds: Tensor, pos_labels: Tensor, alpha: float=2.0, gamma: float=4.0, pos_weight: float=1.0, neg_weight: float=1.0, reduction: str='mean', avg_factor: Optional[Union[(int, float)]]=None) -> Tensor:
eps = 1e-12
neg_weights = (1 - gaussian_target).pow(gamma)
pos_pred_pix = pred[pos_inds]
pos_pred = pos_pred_pix.gather(1, pos_labels.unsqueeze(1))
pos_loss = ((- (pos_pred + eps).log()) * (1 - pos_pred).pow(alpha))
pos_loss = weight_reduce_loss(pos_loss, None, reduction, avg_factor)
neg_loss = (((- ((1 - pred) + eps).log()) * pred.pow(alpha)) * neg_weights)
neg_loss = weight_reduce_loss(neg_loss, None, reduction, avg_factor)
return ((pos_weight * pos_loss) + (neg_weight * neg_loss)) |
class UDATrainer(Trainer):
def __init__(self, args, cuda=None, train_id='None', logger=None):
super().__init__(args, cuda, train_id, logger)
if (self.args.source_dataset == 'synthia'):
source_data_set = SYNTHIA_Dataset(args, data_root_path=args.source_data_path, list_path=args.source_list_path, split=args.source_split, base_size=args.base_size, crop_size=args.crop_size, class_16=args.class_16)
else:
source_data_set = GTA5_Dataset(args, data_root_path=args.source_data_path, list_path=args.source_list_path, split=args.source_split, base_size=args.base_size, crop_size=args.crop_size)
self.source_dataloader = data.DataLoader(source_data_set, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.data_loader_workers, pin_memory=self.args.pin_memory, drop_last=True)
if (self.args.source_dataset == 'synthia'):
source_data_set = SYNTHIA_Dataset(args, data_root_path=args.source_data_path, list_path=args.source_list_path, split='val', base_size=args.base_size, crop_size=args.crop_size, class_16=args.class_16)
else:
source_data_set = GTA5_Dataset(args, data_root_path=args.source_data_path, list_path=args.source_list_path, split='val', base_size=args.base_size, crop_size=args.crop_size)
self.source_val_dataloader = data.DataLoader(source_data_set, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.data_loader_workers, pin_memory=self.args.pin_memory, drop_last=True)
print(self.args.source_dataset, self.args.target_dataset)
target_data_set = City_Dataset(args, data_root_path=args.data_root_path, list_path=args.list_path, split=args.split, base_size=args.target_base_size, crop_size=args.target_crop_size, class_16=args.class_16)
self.target_dataloader = data.DataLoader(target_data_set, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.data_loader_workers, pin_memory=self.args.pin_memory, drop_last=True)
target_data_set = City_Dataset(args, data_root_path=args.data_root_path, list_path=args.list_path, split='val', base_size=args.target_base_size, crop_size=args.target_crop_size, class_16=args.class_16)
self.target_val_dataloader = data.DataLoader(target_data_set, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.data_loader_workers, pin_memory=self.args.pin_memory, drop_last=True)
self.dataloader.val_loader = self.target_val_dataloader
self.dataloader.valid_iterations = ((len(target_data_set) + self.args.batch_size) // self.args.batch_size)
self.ignore_index = (- 1)
if (self.args.target_mode == 'hard'):
self.target_loss = nn.CrossEntropyLoss(ignore_index=(- 1))
elif (self.args.target_mode == 'entropy'):
self.target_loss = softCrossEntropy(ignore_index=(- 1))
elif (self.args.target_mode == 'IW_entropy'):
self.target_loss = IWsoftCrossEntropy(ignore_index=(- 1), num_class=self.args.num_classes, ratio=self.args.IW_ratio)
elif (self.args.target_mode == 'maxsquare'):
self.target_loss = MaxSquareloss(ignore_index=(- 1), num_class=self.args.num_classes)
elif (self.args.target_mode == 'IW_maxsquare'):
self.target_loss = IW_MaxSquareloss(ignore_index=(- 1), num_class=self.args.num_classes, ratio=self.args.IW_ratio)
self.current_round = self.args.init_round
self.round_num = self.args.round_num
self.target_loss.to(self.device)
self.target_hard_loss = nn.CrossEntropyLoss(ignore_index=(- 1))
def main(self):
self.logger.info('Global configuration as follows:')
for (key, val) in vars(self.args).items():
self.logger.info('{:16} {}'.format(key, val))
current_device = torch.cuda.current_device()
self.logger.info('This model will run on {}'.format(torch.cuda.get_device_name(current_device)))
if (self.args.pretrained_ckpt_file is not None):
if os.path.isdir(self.args.pretrained_ckpt_file):
self.args.pretrained_ckpt_file = os.path.join(self.args.checkpoint_dir, (self.train_id + 'final.pth'))
self.load_checkpoint(self.args.pretrained_ckpt_file)
if (not self.args.continue_training):
self.best_MIou = 0
self.best_iter = 0
self.current_iter = 0
self.current_epoch = 0
if self.args.continue_training:
self.load_checkpoint(os.path.join(self.args.checkpoint_dir, (self.train_id + 'final.pth')))
self.args.iter_max = ((self.dataloader.num_iterations * self.args.epoch_each_round) * self.round_num)
print(self.args.iter_max, self.dataloader.num_iterations)
self.train_round()
self.writer.close()
def train_round(self):
for r in range(self.current_round, self.round_num):
print('\n Begin {}/{} Round! \n'.format((self.current_round + 1), self.round_num))
print('epoch_each_round:', self.args.epoch_each_round)
self.epoch_num = ((self.current_round + 1) * self.args.epoch_each_round)
self.threshold = self.args.threshold
self.train()
self.current_round += 1
def train_one_epoch(self):
tqdm_epoch = tqdm(zip(self.source_dataloader, self.target_dataloader), total=self.dataloader.num_iterations, desc='Train Round-{}-Epoch-{}-total-{}'.format(self.current_round, (self.current_epoch + 1), self.epoch_num))
self.logger.info('Training one epoch...')
self.Eval.reset()
loss_seg_value = 0
loss_target_value = 0
loss_seg_value_2 = 0
loss_target_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info('freeze bacth normalization successfully!')
else:
self.model.train()
batch_idx = 0
for (batch_s, batch_t) in tqdm_epoch:
self.poly_lr_scheduler(optimizer=self.optimizer, init_lr=self.args.lr)
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]['lr'], self.current_iter)
(x, y, _) = batch_s
if self.cuda:
(x, y) = (Variable(x).to(self.device), Variable(y).to(device=self.device, dtype=torch.long))
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
y = torch.squeeze(y, 1)
loss = self.loss(pred, y)
loss_ = loss
if self.args.multi:
loss_2 = (self.args.lambda_seg * self.loss(pred_2, y))
loss_ += loss_2
loss_seg_value_2 += (loss_2.cpu().item() / iter_num)
loss_.backward()
loss_seg_value += (loss.cpu().item() / iter_num)
(x, _, _) = batch_t
if self.cuda:
x = Variable(x).to(self.device)
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred_P_2 = F.softmax(pred_2, dim=1)
pred_P = F.softmax(pred, dim=1)
if (self.args.target_mode == 'hard'):
label = torch.argmax(pred_P.detach(), dim=1)
if self.args.multi:
label_2 = torch.argmax(pred_P_2.detach(), dim=1)
else:
label = pred_P
if self.args.multi:
label_2 = pred_P_2
(maxpred, argpred) = torch.max(pred_P.detach(), dim=1)
if self.args.multi:
(maxpred_2, argpred_2) = torch.max(pred_P_2.detach(), dim=1)
if (self.args.target_mode == 'hard'):
mask = (maxpred > self.threshold)
label = torch.where(mask, label, (torch.ones(1).to(self.device, dtype=torch.long) * self.ignore_index))
loss_target = (self.args.lambda_target * self.target_loss(pred, label))
loss_target_ = loss_target
if self.args.multi:
pred_c = ((pred_P + pred_P_2) / 2)
(maxpred_c, argpred_c) = torch.max(pred_c, dim=1)
mask = ((maxpred > self.threshold) | (maxpred_2 > self.threshold))
label_2 = torch.where(mask, argpred_c, (torch.ones(1).to(self.device, dtype=torch.long) * self.ignore_index))
loss_target_2 = ((self.args.lambda_seg * self.args.lambda_target) * self.target_hard_loss(pred_2, label_2))
loss_target_ += loss_target_2
loss_target_value_2 += (loss_target_2 / iter_num)
loss_target_.backward()
loss_target_value += (loss_target / iter_num)
self.optimizer.step()
self.optimizer.zero_grad()
if ((batch_idx % 400) == 0):
if self.args.multi:
self.logger.info('epoch{}-batch-{}:loss_seg={:.3f}-loss_target={:.3f}; loss_seg_2={:.3f}-loss_target_2={:.3f}; mask={:.3f}'.format(self.current_epoch, batch_idx, loss.item(), loss_target.item(), loss_2.item(), loss_target_2.item(), mask.float().mean().item()))
else:
self.logger.info('epoch{}-batch-{}:loss_seg={:.3f}-loss_target={:.3f}'.format(self.current_epoch, batch_idx, loss.item(), loss_target.item()))
batch_idx += 1
self.current_iter += 1
self.writer.add_scalar('train_loss', loss_seg_value, self.current_epoch)
tqdm.write('The average loss of train epoch-{}-:{}'.format(self.current_epoch, loss_seg_value))
self.writer.add_scalar('target_loss', loss_target_value, self.current_epoch)
tqdm.write('The average target_loss of train epoch-{}-:{:.3f}'.format(self.current_epoch, loss_target_value))
if self.args.multi:
self.writer.add_scalar('train_loss_2', loss_seg_value_2, self.current_epoch)
tqdm.write('The average loss_2 of train epoch-{}-:{}'.format(self.current_epoch, loss_seg_value_2))
self.writer.add_scalar('target_loss_2', loss_target_value_2, self.current_epoch)
tqdm.write('The average target_loss_2 of train epoch-{}-:{:.3f}'.format(self.current_epoch, loss_target_value_2))
tqdm_epoch.close()
self.validate_source() |
class GroupViTConfig(PretrainedConfig):
model_type = 'groupvit'
is_composition = True
def __init__(self, text_config=None, vision_config=None, projection_dim=256, projection_intermediate_dim=4096, logit_scale_init_value=2.6592, **kwargs):
text_config_dict = kwargs.pop('text_config_dict', None)
vision_config_dict = kwargs.pop('vision_config_dict', None)
super().__init__(**kwargs)
if (text_config_dict is not None):
if (text_config is None):
text_config = {}
_text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict()
for (key, value) in _text_config_dict.items():
if ((key in text_config) and (value != text_config[key]) and (key not in ['transformers_version'])):
if (key in text_config_dict):
message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["{key}"]` will be used instead.'
else:
message = f'`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. The value `text_config["{key}"]` will be overriden.'
logger.warning(message)
text_config.update(_text_config_dict)
if (vision_config_dict is not None):
if (vision_config is None):
vision_config = {}
_vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict()
if ('id2label' in _vision_config_dict):
_vision_config_dict['id2label'] = {str(key): value for (key, value) in _vision_config_dict['id2label'].items()}
for (key, value) in _vision_config_dict.items():
if ((key in vision_config) and (value != vision_config[key]) and (key not in ['transformers_version'])):
if (key in vision_config_dict):
message = f'`{key}` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["{key}"]` will be used instead.'
else:
message = f'`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`. The value `vision_config["{key}"]` will be overriden.'
logger.warning(message)
vision_config.update(_vision_config_dict)
if (text_config is None):
text_config = {}
logger.info('`text_config` is `None`. Initializing the `GroupViTTextConfig` with default values.')
if (vision_config is None):
vision_config = {}
logger.info('`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.')
self.text_config = GroupViTTextConfig(**text_config)
self.vision_config = GroupViTVisionConfig(**vision_config)
self.projection_dim = projection_dim
self.projection_intermediate_dim = projection_intermediate_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_range = 0.02
self.initializer_factor = 1.0
self.output_segmentation = False
def from_text_vision_configs(cls, text_config: GroupViTTextConfig, vision_config: GroupViTVisionConfig, **kwargs):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
def to_dict(self):
output = copy.deepcopy(self.__dict__)
output['text_config'] = self.text_config.to_dict()
output['vision_config'] = self.vision_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
def dct_2D(x):
x = tf.transpose(x, [0, 5, 1, 2, 3, 4])
x = tf.signal.dct(x, norm='ortho')
x = tf.transpose(x, [0, 1, 2, 3, 5, 4])
x = tf.signal.dct(x, norm='ortho')
x = tf.transpose(x, [0, 1, 2, 3, 5, 4])
x = tf.transpose(x, [0, 2, 3, 4, 5, 1])
return x |
_config
def model_fcn3():
cfg = {'learner': {'model': 'FCN3', 'model_kwargs': {'num_groups': 2, 'normalize_output': False}}} |
class Task(enum.Enum):
NEXT_ACTIVITY = 'next_activity'
NEXT_TIME = 'next_time'
REMAINING_TIME = 'remaining_time' |
def test_get_splits_collate_scenes():
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, 23, collate_scene_ids=True)
assert (len(splits) == 10)
for split in splits:
assert (len(split.episodes) == 23)
prev_ids = set()
for (ii, ep) in enumerate(split.episodes):
if (ep.scene_id not in prev_ids):
prev_ids.add(ep.scene_id)
else:
assert (split.episodes[(ii - 1)].scene_id == ep.scene_id)
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, 200, collate_scene_ids=False)
assert (len(splits) == 10)
for split in splits:
prev_ids = set()
found_not_collated = False
for (ii, ep) in enumerate(split.episodes):
if (ep.scene_id not in prev_ids):
prev_ids.add(ep.scene_id)
elif (split.episodes[(ii - 1)].scene_id != ep.scene_id):
found_not_collated = True
break
assert found_not_collated
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, collate_scene_ids=True)
assert (len(splits) == 10)
for split in splits:
assert (len(split.episodes) == 1000)
prev_ids = set()
for (ii, ep) in enumerate(split.episodes):
if (ep.scene_id not in prev_ids):
prev_ids.add(ep.scene_id)
else:
assert (split.episodes[(ii - 1)].scene_id == ep.scene_id)
dataset = _construct_dataset(10000)
splits = dataset.get_splits(10, collate_scene_ids=False)
assert (len(splits) == 10)
for split in splits:
prev_ids = set()
found_not_collated = False
for (ii, ep) in enumerate(split.episodes):
if (ep.scene_id not in prev_ids):
prev_ids.add(ep.scene_id)
elif (split.episodes[(ii - 1)].scene_id != ep.scene_id):
found_not_collated = True
break
assert found_not_collated |
def get_word_idx(context, wordss, idx):
spanss = get_2d_spans(context, wordss)
return spanss[idx[0]][idx[1]][0] |
class IPyflowShell(SingletonConfigurable):
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
InteractiveShellABC.register(cls) |
class FlaxRobertaForSequenceClassification(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class ResnetBlock(nn.Module):
def __init__(self, dim, use_bias):
super(ResnetBlock, self).__init__()
conv_block = []
conv_block += [nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias), nn.InstanceNorm2d(dim), nn.ReLU(True)]
conv_block += [nn.ReflectionPad2d(1), nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=0, bias=use_bias), nn.InstanceNorm2d(dim)]
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x):
out = (x + self.conv_block(x))
return out |
def explicit_line_join(logical_line, tokens):
prev_start = prev_end = parens = 0
comment = False
backslash = None
for (token_type, text, start, end, line) in tokens:
if (token_type == tokenize.COMMENT):
comment = True
if ((start[0] != prev_start) and parens and backslash and (not comment)):
(yield (backslash, 'E502 the backslash is redundant between brackets'))
if (end[0] != prev_end):
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], (len(line.splitlines()[(- 1)]) - 1))
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if (token_type == tokenize.OP):
if (text in '([{'):
parens += 1
elif (text in ')]}'):
parens -= 1 |
.register
def semseg_test_xyz(model, model_fn):
(B, N) = (4, 2048)
inputs = torch.randn(B, N, 6).cuda()
labels = torch.from_numpy(np.random.randint(0, 3, size=(B * N))).view(B, N).cuda()
model.cuda()
_test_loop(model, model_fn, inputs, labels) |
def ResNet_152(input_size, **kwargs):
model = ResNet(input_size, Bottleneck, [3, 8, 36, 3], **kwargs)
return model |
class Dataset():
def __init__(self, dataset_dir, affine_identity_path, use_dir=None):
self._dataset_dir = dataset_dir
self._affine_identity_path = affine_identity_path
if (use_dir is not None):
self.use_folder = use_dir
else:
self.use_folder = 'source'
def identity_iterator(self):
for f in os.listdir(os.path.join(self._dataset_dir, self.use_folder)):
(yield extract_identity(f))
def source_filepath(self, fileID):
return os.path.join(self._dataset_dir, 'source', (fileID + '.png'))
def keypoints_filepath(self, fileID):
return os.path.join(self._dataset_dir, 'keypoints', (fileID + '.txt'))
def depth_filepath(self, fileID):
return os.path.join(self._dataset_dir, 'depth', (fileID + '.txt'))
def affine_filepath(self, fileID):
return os.path.join(self._dataset_dir, 'affine', (fileID + '.txt'))
def affine_identity_filepath(self):
return self._affine_identity_path |
_module()
class ATSS(SingleStageDetector):
'Implementation of `ATSS <
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) |
def remove_rule(data, name):
for instance in data:
if (name in instance['WISER_LABELS']):
del instance['WISER_LABELS'][name]
if (name in instance['WISER_LINKS']):
del instance['WISER_LINKS'][name] |
def my_sum(a, axis, count):
if (a.shape[axis] == count):
return a.sum(axis)
elif (a.shape[axis] == 1):
return (count * a.sum(axis))
else:
raise IndexError(('Cannot be broadcast: a.shape=%s, axis=%d, count=%d' % (a.shape, axis, count))) |
def test_set_batch_clashing_values(state: sum_tree.SumTreeState) -> None:
num_indices = 10
repeated_indices = jnp.concatenate([jnp.arange(num_indices), jnp.arange(num_indices)])
values = (jnp.arange((num_indices * 2)) * 100)
state_scan = sum_tree.set_batch_scan(state, repeated_indices, values)
state_bincount = sum_tree.set_batch_bincount(state, repeated_indices, values)
repeated_vals_scan = sum_tree.get_batch(state_scan, repeated_indices[:num_indices])
repeated_vals_bincount = sum_tree.get_batch(state_bincount, repeated_indices[:num_indices])
assert (jnp.all((repeated_vals_scan == values[num_indices:])) and jnp.all((repeated_vals_bincount == values[num_indices:])))
assert (state_scan.nodes[0] == jnp.sum(repeated_vals_scan))
assert (state_bincount.nodes[0] == jnp.sum(repeated_vals_bincount))
assert (state_scan.max_recorded_priority == jnp.max(repeated_vals_scan))
assert (state_bincount.max_recorded_priority == jnp.max(repeated_vals_bincount))
chex.assert_trees_all_close(state_scan, state_bincount) |
def convert_param_to_nevergrad(param):
import nevergrad as ng
if (param['type'] == 'BOOL'):
return ng.p.Choice([False, True])
if (param['type'] == 'INT'):
return ng.p.TransitionChoice(range(param['min'], (param['max'] + 1)))
if (param['type'] == 'STRING'):
return ng.p.Choice(param['options'])
if (param['type'] == 'FLOAT'):
return ng.p.Scalar(lower=param['min'], upper=param['max'])
if (param['type'] == 'FLOAT_EXP'):
return ng.p.Log(lower=param['min'], upper=param['max'])
else:
raise ValueError("Didn't understand space {}.".format(param)) |
('(float32[:], float32[:], int32)', device=True, inline=True)
def devRotateIoUEval(rbox1, rbox2, criterion=(- 1)):
area1 = (rbox1[2] * rbox1[3])
area2 = (rbox2[2] * rbox2[3])
area_inter = inter(rbox1, rbox2)
if (criterion == (- 1)):
return (area_inter / ((area1 + area2) - area_inter))
elif (criterion == 0):
return (area_inter / area1)
elif (criterion == 1):
return (area_inter / area2)
else:
return area_inter |
def IVAE_wrapper(X, U, batch_size=256, max_iter=70000.0, seed=0, n_layers=3, hidden_dim=20, lr=0.001, cuda=True, ckpt_file='ivae.pt', test=False):
torch.manual_seed(seed)
np.random.seed(seed)
device = torch.device(('cuda:0' if cuda else 'cpu'))
dset = ConditionalDataset(X.astype(np.float32), U.astype(np.float32), device)
loader_params = ({'num_workers': 1, 'pin_memory': True} if cuda else {})
train_loader = DataLoader(dset, shuffle=True, batch_size=batch_size, **loader_params)
(data_dim, latent_dim, aux_dim) = dset.get_dims()
N = len(dset)
max_epochs = int(((max_iter // len(train_loader)) + 1))
model = iVAE(latent_dim, data_dim, aux_dim, activation='lrelu', device=device, n_layers=n_layers, hidden_dim=hidden_dim)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=20, verbose=True)
if (not test):
print('Training..')
it = 0
model.train()
while (it < max_iter):
elbo_train = 0
epoch = ((it // len(train_loader)) + 1)
for (_, (x, u)) in enumerate(train_loader):
it += 1
optimizer.zero_grad()
(x, u) = (x.to(device), u.to(device))
(elbo, z_est) = model.elbo(x, u)
elbo.mul((- 1)).backward()
optimizer.step()
elbo_train += (- elbo.item())
elbo_train /= len(train_loader)
scheduler.step(elbo_train)
torch.save(model.state_dict(), ckpt_file)
else:
model = torch.load(ckpt_file, map_location=device)
(Xt, Ut) = (dset.x, dset.y)
(decoder_params, encoder_params, z, prior_params) = model(Xt, Ut)
params = {'decoder': decoder_params, 'encoder': encoder_params, 'prior': prior_params}
return (z, model, params) |
def make_env(env_id, env_type, mpi_rank=0, subrank=0, seed=None, reward_scale=1.0, gamestate=None, flatten_dict_observations=True, wrapper_kwargs=None, env_kwargs=None, logger_dir=None, initializer=None):
if (initializer is not None):
initializer(mpi_rank=mpi_rank, subrank=subrank)
wrapper_kwargs = (wrapper_kwargs or {})
env_kwargs = (env_kwargs or {})
if (':' in env_id):
import re
import importlib
module_name = re.sub(':.*', '', env_id)
env_id = re.sub('.*:', '', env_id)
importlib.import_module(module_name)
env = gym.make(env_id, **env_kwargs)
if env_id.startswith('Fetch'):
from wgcsl.envs.multi_world_wrapper import FetchGoalWrapper
env._max_episode_steps = 50
env = FetchGoalWrapper(env)
elif env_id.startswith('Hand'):
env._max_episode_steps = 100
elif env_id.startswith('Sawyer'):
from wgcsl.envs.multi_world_wrapper import SawyerGoalWrapper
env = SawyerGoalWrapper(env)
if (not hasattr(env, '_max_episode_steps')):
env = gym.wrappers.TimeLimit(env, max_episode_steps=50)
elif env_id.startswith('Point'):
from wgcsl.envs.multi_world_wrapper import PointGoalWrapper
env = gym.wrappers.TimeLimit(env, max_episode_steps=50)
env = PointGoalWrapper(env)
elif env_id.startswith('Reacher'):
from wgcsl.envs.multi_world_wrapper import ReacherGoalWrapper
env._max_episode_steps = 50
env = ReacherGoalWrapper(env)
else:
env = gym.wrappers.TimeLimit(env, max_episode_steps=50)
if (flatten_dict_observations and isinstance(env.observation_space, gym.spaces.Dict)):
env = FlattenObservation(env)
env.seed(((seed + subrank) if (seed is not None) else None))
env = Monitor(env, (logger_dir and os.path.join(logger_dir, ((str(mpi_rank) + '.') + str(subrank)))), allow_early_resets=True)
if isinstance(env.action_space, gym.spaces.Box):
env = ClipActionsWrapper(env)
return env |
def _selfies_to_smiles(selfies, N_restrict=True):
all_selfies = selfies.split('.')
all_selfies_new = ''
for current_smiles in all_selfies:
all_selfies_new = ((all_selfies_new + '.') + __selfies_to_smiles_derive(current_smiles, 'X0', N_restrict))
return all_selfies_new[1:] |
def make_grid(arr, ncols=2):
(n, height, width, nc) = arr.shape
nrows = (n // ncols)
assert (n == (nrows * ncols))
return arr.reshape(nrows, ncols, height, width, nc).swapaxes(1, 2).reshape((height * nrows), (width * ncols), nc) |
def clear_double_double_solutions(vrblvl=0):
if (vrblvl > 0):
print('in clear_double_double_solutions ...')
phc = get_phcfun()
aaa = pointer(c_int32(0))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> clear_double_double_solutions calls phc', end='')
retval = phc(347, aaa, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
class DictToListFromKeys():
def __init__(self, keys: List[str]):
self.keys = keys
def __call__(self, x: Dict[(str, Tensor)]) -> List[Tensor]:
x = [tensor for (key, tensor) in x.items() if (key in self.keys)]
return x
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})' |
class DeprecateAction(configargparse.Action):
def __init__(self, option_strings, dest, help=None, **kwargs):
super(DeprecateAction, self).__init__(option_strings, dest, nargs=0, help=help, **kwargs)
def __call__(self, parser, namespace, values, flag_name):
help = (self.help if (self.help is not None) else '')
msg = ("Flag '%s' is deprecated. %s" % (flag_name, help))
raise configargparse.ArgumentTypeError(msg) |
class TFCTRLPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def plot_speed_results():
def extract_data(results, data_idx):
data = []
for dim_run_res in results:
y_naive = []
y_horner = []
for degree_run_res in dim_run_res:
y_naive.append(degree_run_res[0][data_idx])
y_horner.append(degree_run_res[1][data_idx])
data.append((y_naive, y_horner))
return data
def extract_data_diff(results, data_idx):
data = []
for dim_run_res in results:
y_diff = []
for degree_run_res in dim_run_res:
y1 = degree_run_res[0][data_idx]
y2 = degree_run_res[1][data_idx]
y_diff.append((y1 - y2))
data.append(y_diff)
return data
def extract_data_abs_horner(results, data_idx):
data = []
for dim_run_res in results:
y_abs_horner = []
for degree_run_res in dim_run_res:
y = degree_run_res[1][data_idx]
y_abs_horner.append(y)
data.append(y_abs_horner)
return data
print('importing polynomial from file "{}"'.format(SPEED_RUN_PICKLE))
with open(SPEED_RUN_PICKLE, 'rb') as f:
all_results = pickle.load(f)
print('plotting now...')
labels = ['time [s]', 'time [s]', 'avg. #operations reduction']
file_names = ['setup_time_increase', 'eval_time_decrease', 'num_ops_decrease']
for run_idx in range(3):
print(run_idx, file_names[run_idx])
label = labels[run_idx]
(fig, ax) = plt.subplots()
obj_handles = []
extra = Rectangle((0, 0), 1, 1, fc='w', fill=False, edgecolor='none', linewidth=0)
obj_handles.append(extra)
data = extract_data_diff(all_results, run_idx)
plt.plot([], [], ' ', label='dimension')
for (dim, dim_run_data) in zip(reversed(DIM_RANGE), reversed(data)):
plt.plot(DEGREE_RANGE, dim_run_data, 'x:', label=str(dim), linewidth=3, markersize=15, markeredgewidth=2.5)
plt.xticks(DEGREE_RANGE)
plt.xlabel('polynomial maximal_degree')
plt.ylabel(label)
plt.legend()
plt.grid(True)
export_plot(fig, file_names[run_idx])
if SHOW_PLOTS:
plt.show()
print('...done.') |
class SetDataManager(DataManager):
def __init__(self, image_size, n_way=5, n_support=5, n_query=16, n_eposide=100):
super(SetDataManager, self).__init__()
self.image_size = image_size
self.n_way = n_way
self.batch_size = (n_support + n_query)
self.n_eposide = n_eposide
self.trans_loader = TransformLoader(image_size)
def get_data_loader(self, aug):
transform = self.trans_loader.get_composed_transform(aug)
dataset = SetDataset(self.batch_size, transform)
sampler = EpisodicBatchSampler(len(dataset), self.n_way, self.n_eposide)
data_loader_params = dict(batch_sampler=sampler, num_workers=12, pin_memory=True)
data_loader = torch.utils.data.DataLoader(dataset, **data_loader_params)
return data_loader |
def print_params_min_max_norm(optimizer, iteration):
index = 0
rank = torch.distributed.get_rank()
string = 'iteration, rank, index, model-parallel,min, max, norm\n'
optimizer_ = optimizer
if isinstance(optimizer, FP16_Optimizer):
optimizer_ = optimizer.optimizer
for param_group in optimizer_.param_groups:
for param in param_group['params']:
index += 1
min_ = param.data.min()
max_ = param.data.max()
norm = param.data.norm()
string += '{:7d}, {:4d}, {:4d}, {:2d}, '.format(iteration, rank, index, int(param.model_parallel))
string += '{:.6E}, {:.6E}, {:.6E}\n'.format(min_, max_, norm)
print(string, flush=True) |
def praat_featurize(voiceID):
voiceID = voiceID
sound = parselmouth.Sound(voiceID)
broad_pitch = call(sound, 'To Pitch', 0.0, 50, 600)
minF0 = call(broad_pitch, 'Get minimum', 0, 0, 'hertz', 'Parabolic')
maxF0 = call(broad_pitch, 'Get maximum', 0, 0, 'hertz', 'Parabolic')
floor = (minF0 * 0.9)
ceiling = (maxF0 * 1.1)
pitch = call(sound, 'To Pitch', 0.0, floor, ceiling)
duration = call(sound, 'Get total duration')
meanF0 = call(pitch, 'Get mean', 0, 0, 'hertz')
stdevF0 = call(pitch, 'Get standard deviation', 0, 0, 'hertz')
harmonicity = call(sound, 'To Harmonicity (cc)', 0.01, minF0, 0.1, 1.0)
hnr = call(harmonicity, 'Get mean', 0, 0)
pointProcess = call(sound, 'To PointProcess (periodic, cc)', minF0, maxF0)
localJitter = call(pointProcess, 'Get jitter (local)', 0, 0, 0.0001, 0.02, 1.3)
localabsoluteJitter = call(pointProcess, 'Get jitter (local, absolute)', 0, 0, 0.0001, 0.02, 1.3)
rapJitter = call(pointProcess, 'Get jitter (rap)', 0, 0, 0.0001, 0.02, 1.3)
ppq5Jitter = call(pointProcess, 'Get jitter (ppq5)', 0, 0, 0.0001, 0.02, 1.3)
ddpJitter = call(pointProcess, 'Get jitter (ddp)', 0, 0, 0.0001, 0.02, 1.3)
localShimmer = call([sound, pointProcess], 'Get shimmer (local)', 0, 0, 0.0001, 0.02, 1.3, 1.6)
localdbShimmer = call([sound, pointProcess], 'Get shimmer (local_dB)', 0, 0, 0.0001, 0.02, 1.3, 1.6)
apq3Shimmer = call([sound, pointProcess], 'Get shimmer (apq3)', 0, 0, 0.0001, 0.02, 1.3, 1.6)
aqpq5Shimmer = call([sound, pointProcess], 'Get shimmer (apq5)', 0, 0, 0.0001, 0.02, 1.3, 1.6)
apq11Shimmer = call([sound, pointProcess], 'Get shimmer (apq11)', 0, 0, 0.0001, 0.02, 1.3, 1.6)
ddaShimmer = call([sound, pointProcess], 'Get shimmer (dda)', 0, 0, 0.0001, 0.02, 1.3, 1.6)
if ((meanF0 > 170) and (meanF0 < 300)):
max_formant = 5500
elif (meanF0 <= 170):
max_formant = 5000
elif (meanF0 >= 300):
max_formant = 8000
formants = call(sound, 'To Formant (burg)', 0.0025, 5, max_formant, 0.025, 50)
numPoints = call(pointProcess, 'Get number of points')
f1_list = []
f2_list = []
f3_list = []
f4_list = []
for point in range(0, numPoints):
point += 1
t = call(pointProcess, 'Get time from index', point)
f1 = call(formants, 'Get value at time', 1, t, 'Hertz', 'Linear')
f2 = call(formants, 'Get value at time', 2, t, 'Hertz', 'Linear')
f3 = call(formants, 'Get value at time', 3, t, 'Hertz', 'Linear')
f4 = call(formants, 'Get value at time', 4, t, 'Hertz', 'Linear')
f1_list.append(f1)
f2_list.append(f2)
f3_list.append(f3)
f4_list.append(f4)
f1_list.append(f1)
f2_list.append(f2)
f3_list.append(f3)
f4_list.append(f4)
f1_list = [f1 for f1 in f1_list if (str(f1) != 'nan')]
f2_list = [f2 for f2 in f2_list if (str(f2) != 'nan')]
f3_list = [f3 for f3 in f3_list if (str(f3) != 'nan')]
f4_list = [f4 for f4 in f4_list if (str(f4) != 'nan')]
if (len(f1_list) > 0):
f1_mean = (sum(f1_list) / len(f1_list))
else:
f1_mean = 0
if (len(f2_list) > 0):
f2_mean = (sum(f2_list) / len(f2_list))
else:
f2_mean = 0
if (len(f3_list) > 0):
f3_mean = (sum(f3_list) / len(f3_list))
else:
f3_mean = 0
if (len(f4_list) > 0):
f4_mean = (sum(f4_list) / len(f4_list))
else:
f4_mean = 0
measurements = [duration, meanF0, stdevF0, hnr, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter, ddpJitter, localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer, apq11Shimmer, ddaShimmer, f1_mean, f2_mean, f3_mean, f4_mean]
labels = ['duration', 'meanF0', 'stdevF0', 'hnr', 'localJitter', 'localabsoluteJitter', 'rapJitter', 'ppq5Jitter', 'ddpJitter', 'localShimmer', 'localdbShimmer', 'apq3Shimmer', 'aqpq5Shimmer', 'apq11Shimmer', 'ddaShimmer', 'f1_mean', 'f2_mean', 'f3_mean', 'f4_mean']
return (measurements, labels) |
def reduce_tensor(tensor, args):
rt = tensor.clone()
torch.distributed.all_reduce(rt, op=torch.distributed.ReduceOp.SUM)
rt /= args.world_size
return rt |
class Reverse(Layer):
def __init__(self, dimension=1, is_inplace=False, bigdl_type='float'):
super(Reverse, self).__init__(None, bigdl_type, dimension, is_inplace) |
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += (val * n)
self.cnt += n
self.avg = (self.sum / self.cnt) |
class RobertaPreLayerNormForMultipleChoice(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ReaderManager(object):
def __init__(self, reader):
super(ReaderManager, self).__init__()
self.reader = reader
self.logger = get_logger()
def run(self, options, text_path, embeddings_path):
reader = self.reader
logger = self.logger
logger.info('Reading text: {}'.format(text_path))
reader_result = reader.read(text_path)
sentences = reader_result['sentences']
extra = reader_result['extra']
metadata = reader_result.get('metadata', {})
logger.info('len(sentences)={}'.format(len(sentences)))
word2idx = build_text_vocab(sentences)
logger.info('len(vocab)={}'.format(len(word2idx)))
if ('embeddings' in metadata):
logger.info('Using embeddings from metadata.')
embeddings = metadata['embeddings']
del metadata['embeddings']
else:
logger.info('Reading embeddings.')
(embeddings, word2idx) = EmbeddingsReader().get_embeddings(options, embeddings_path, word2idx)
unk_index = word2idx.get(UNK_TOKEN, None)
logger.info('Converting tokens to indexes (unk_index={}).'.format(unk_index))
sentences = indexify(sentences, word2idx, unk_index)
return {'sentences': sentences, 'embeddings': embeddings, 'word2idx': word2idx, 'extra': extra, 'metadata': metadata} |
_checkable
class EstimatorLikeFity(Protocol):
def fit(self, y: str) -> 'EstimatorLikeFity':
return self
def get_params(self, deep: bool=True) -> Dict:
return {}
def set_params(self, **params: Any) -> 'EstimatorLikeFity':
return self |
class TFConvNextEmbeddings(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.patch_embeddings = tf.keras.layers.Conv2D(filters=config.hidden_sizes[0], kernel_size=config.patch_size, strides=config.patch_size, name='patch_embeddings', kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros')
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=1e-06, name='layernorm')
self.num_channels = config.num_channels
def call(self, pixel_values):
if isinstance(pixel_values, dict):
pixel_values = pixel_values['pixel_values']
num_channels = shape_list(pixel_values)[1]
if (tf.executing_eagerly() and (num_channels != self.num_channels)):
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
embeddings = self.patch_embeddings(pixel_values)
embeddings = self.layernorm(embeddings)
return embeddings |
def inquire_select(choices, prefix_msg='', name='unamed', default_index=(- 1)):
choices = list(choices)
for i in range(len(choices)):
choices[i] = str(choices[i])
for i in range(len(choices)):
choices[i] = {'name': choices[i]}
choices[default_index]['checked'] = True
questions = [{'type': 'checkbox', 'message': '{}. There are multiple {} as follows:'.format(prefix_msg, name), 'name': name, 'choices': choices, 'validate': (lambda answer: ('You must choose at least one of them.' if (len(answer) == 0) else True))}]
answers = prompt(questions, style=custom_style_2)
return answers[name] |
def main():
test_modules = fetch_pipeline_modules_to_test()
test_modules.extend(ALWAYS_TEST_PIPELINE_MODULES)
test_modules = list(set(test_modules))
print(json.dumps(test_modules))
save_path = f'{PATH_TO_REPO}/reports'
os.makedirs(save_path, exist_ok=True)
with open(f'{save_path}/test-pipelines.json', 'w') as f:
json.dump({'pipeline_test_modules': test_modules}, f) |
class TestJitLSTMModel(unittest.TestCase):
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertEqual(t1.ne(t2).long().sum(), 0)
((torch.__version__ < '1.6.0'), 'Targeting OSS scriptability for the 1.6 release')
def test_jit_and_export_lstm(self):
(task, parser) = get_dummy_task_and_parser()
LSTMModel.add_args(parser)
args = parser.parse_args([])
args.criterion = ''
model = LSTMModel.build_model(args, task)
scripted_model = torch.jit.script(model)
self._test_save_and_load(scripted_model)
((torch.__version__ < '1.6.0'), 'Targeting OSS scriptability for the 1.6 release')
def test_assert_jit_vs_nonjit_(self):
(task, parser) = get_dummy_task_and_parser()
LSTMModel.add_args(parser)
args = parser.parse_args([])
args.criterion = ''
model = LSTMModel.build_model(args, task)
model.eval()
scripted_model = torch.jit.script(model)
scripted_model.eval()
idx = len(task.source_dictionary)
iter = 100
seq_len_tensor = torch.randint(1, 10, (iter,))
num_samples_tensor = torch.randint(1, 10, (iter,))
for i in range(iter):
seq_len = seq_len_tensor[i]
num_samples = num_samples_tensor[i]
src_token = (torch.randint(0, idx, (num_samples, seq_len)),)
src_lengths = torch.randint(1, (seq_len + 1), (num_samples,))
(src_lengths, _) = torch.sort(src_lengths, descending=True)
src_lengths[0] = seq_len
prev_output_token = (torch.randint(0, idx, (num_samples, 1)),)
result = model(src_token[0], src_lengths, prev_output_token[0], None)
scripted_result = scripted_model(src_token[0], src_lengths, prev_output_token[0], None)
self.assertTensorEqual(result[0], scripted_result[0])
self.assertTensorEqual(result[1], scripted_result[1]) |
def file_uri_writer_processor(data, path: str, **kwargs):
dirname = os.path.dirname(path)
if dirname:
os.makedirs(dirname, exist_ok=True)
if (path.endswith('pkl') or path.endswith('pickle')):
pickle.dump(data, open(path, 'wb'))
else:
data.to_csv(path, index=False)
return path |
class NewPaviaDataset(FullImageDataset):
def __init__(self, image_mat_path, gt_mat_path, training=True, num_train_samples_per_class=200, sub_minibatch=10):
self.im_mat_path = image_mat_path
self.gt_mat_path = gt_mat_path
im_mat = loadmat(image_mat_path)
image = im_mat['paviaU']
gt_mat = loadmat(gt_mat_path)
mask = gt_mat['paviaU_gt']
im_cmean = image.reshape(((- 1), image.shape[(- 1)])).mean(axis=0)
im_cstd = image.reshape(((- 1), image.shape[(- 1)])).std(axis=0)
self.vanilla_image = image
image = preprocess.mean_std_normalize(image, im_cmean, im_cstd)
self.training = training
self.num_train_samples_per_class = num_train_samples_per_class
self.sub_minibatch = sub_minibatch
super(NewPaviaDataset, self).__init__(image, mask, training, np_seed=SEED, num_train_samples_per_class=num_train_samples_per_class, sub_minibatch=sub_minibatch) |
def check_args(args):
check_folder(os.path.join(args.result_dir, args.dataset, 'model'))
try:
assert (args.epoch >= 1)
except:
print('number of epochs must be larger than or equal to one')
try:
assert (args.batch_size >= 1)
except:
print('batch size must be larger than or equal to one')
return args |
class DecisionMaker(object):
def __init__(self, beta=0):
self.beta = beta
def solve(self, w: np.ndarray, phi: np.ndarray):
dim = phi.shape[1]
self.M = mf.Model()
self.p = self.M.variable('p', dim, mf.Domain.greaterThan(0.0))
self.r = self.M.variable('r', mf.Domain.greaterThan(0.0))
self.M.constraint(mf.Var.vstack(self.r, self.p), mf.Domain.inQCone())
cons = self.M.constraint(mf.Expr.sum(self.p), mf.Domain.equalsTo(1.0))
obj = mf.Expr.add(mf.Expr.dot(w, mf.Expr.mul(phi, self.p)), mf.Expr.mul(self.beta, self.r))
self.M.objective(mf.ObjectiveSense.Minimize, obj)
self.M.solve()
return (self.p.level(), self.p.dual(), cons.dual()) |
class MultiThreadAsyncPredictor(AsyncPredictorBase):
def __init__(self, predictors, batch_size=5):
assert len(predictors)
for k in predictors:
assert (k.return_input == False)
self.input_queue = queue.Queue(maxsize=(len(predictors) * 100))
self.threads = [PredictorWorkerThread(self.input_queue, f, id, batch_size=batch_size) for (id, f) in enumerate(predictors)]
if six.PY2:
import tornado.options as options
options.parse_command_line(['--logging=debug'])
def start(self):
for t in self.threads:
t.start()
def run(self):
self.start()
def put_task(self, dp, callback=None):
f = Future()
if (callback is not None):
f.add_done_callback(callback)
self.input_queue.put((dp, f))
return f |
def _segm_pvtv2(num_classes, im_num, ex_num, xbound, trainsize):
backbone = pvt_v2_b2(img_size=trainsize)
if 1:
path = 'pvt_v2_b2.pth'
save_model = torch.load(path)
model_dict = backbone.state_dict()
state_dict = {k: v for (k, v) in save_model.items() if (k in model_dict.keys())}
model_dict.update(state_dict)
backbone.load_state_dict(model_dict)
classifier = _simple_classifier(num_classes)
model = _SimpleSegmentationModel(backbone, classifier, im_num, ex_num)
return model |
def test_orbit_setup_lb_basic():
from galpy.orbit import Orbit
o = Orbit([(10.0 * units.deg), ((- 20.0) * units.deg), (3.0 * units.kpc), (((- 3.0) * units.mas) / units.yr), ((2.0 * units.mas) / units.yr), ((130.0 * units.km) / units.s)], lb=True)
assert (numpy.fabs((o.ll(quantity=False) - 10.0)) < (10.0 ** (- 8.0))), 'Orbit initialization with ll as Quantity does not work as expected'
assert (numpy.fabs((o.bb(quantity=False) + 20.0)) < (10.0 ** (- 8.0))), 'Orbit initialization with bb as Quantity does not work as expected'
assert (numpy.fabs((o.dist(quantity=False) - 3.0)) < (10.0 ** (- 8.0))), 'Orbit initialization with distance as Quantity does not work as expected'
assert (numpy.fabs((o.pmll(quantity=False) + 3.0)) < (10.0 ** (- 8.0))), 'Orbit initialization with pmra as Quantity does not work as expected'
assert (numpy.fabs((o.pmbb(quantity=False) - 2.0)) < (10.0 ** (- 8.0))), 'Orbit initialization with pmdec as Quantity does not work as expected'
assert (numpy.fabs((o.vlos(quantity=False) - 130.0)) < (10.0 ** (- 8.0))), 'Orbit initialization with vlos as Quantity does not work as expected'
return None |
class AutoModelWithLMHead(object):
def __init__(self):
raise EnvironmentError('AutoModelWithLMHead is designed to be instantiated using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or `AutoModelWithLMHead.from_config(config)` methods.')
def from_config(cls, config):
for (config_class, model_class) in MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys()))))
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
for (config_class, model_class) in MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())))) |
def draw_images(model_name):
generator = torch.load(model_name)
with torch.no_grad():
noise = torch.randn(10, 100).to(device)
labels = torch.IntTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to(device)
generated_data = generator(noise, labels).cpu().view(10, 28, 28)
(fig, axs) = plt.subplots(1, 10, figsize=(20, 2))
for i in range(10):
axs[i].imshow(generated_data[i].detach().numpy(), interpolation='nearest', cmap='gray')
fig.savefig(f'{model_name}_syn.pdf') |
def test_task_actions():
config = habitat.get_config(config_paths=CFG_TEST)
config.defrost()
config.TASK.POSSIBLE_ACTIONS = (config.TASK.POSSIBLE_ACTIONS + ['TELEPORT'])
config.freeze()
with habitat.Env(config=config) as env:
env.reset()
action = {'action': 'TELEPORT', 'action_args': {'position': TELEPORT_POSITION, 'rotation': TELEPORT_ROTATION}}
assert env.action_space.contains(action)
env.step(action)
agent_state = env.sim.get_agent_state()
assert np.allclose(np.array(TELEPORT_POSITION, dtype=np.float32), agent_state.position), 'mismatch in position after teleport'
assert np.allclose(np.array(TELEPORT_ROTATION, dtype=np.float32), np.array([*agent_state.rotation.imag, agent_state.rotation.real])), 'mismatch in rotation after teleport'
env.step('TURN_RIGHT') |
def run_episode(p, graphs, net):
batch_size = p['batch_size']
n_actions = p['n_actions']
n_islands = p['n_islands']
loss = 0
lossv = 0
last_action = np.zeros(batch_size, dtype='int32')
rewards = []
total_reward = np.zeros(batch_size, dtype='float32')
values = []
logprobs = []
reward_reset_timer = np.zeros(batch_size, dtype='int32')
state = []
for batch_j in range(batch_size):
reward_island = np.random.randint(0, n_islands)
agent_island = np.random.choice([i for i in range(n_islands) if (i != reward_island)])
state.append([reward_island, agent_island])
for period_l in range(1, (p['periods'] + 1)):
for batch_j in range(batch_size):
last_action[batch_j] = n_actions
reward_reset_timer[batch_j] = 0
for step_k in range(p['n_steps']):
inputs = np.zeros((batch_size, p['in_size']), dtype='float32')
curr_reward = np.zeros(batch_size, dtype='float32')
for batch_j in range(batch_size):
reward_idx = state[batch_j][0]
agent_idx = state[batch_j][1]
inputs[(batch_j, reward_idx)] = 1
inputs[(batch_j, (n_islands + agent_idx))] = 1
inputs[(batch_j, (((2 * n_islands) + last_action[batch_j]) + 1))] = 1
inputs[(batch_j, (((2 * n_islands) + n_actions) + 1))] = 1.0
inputs[(batch_j, (((2 * n_islands) + n_actions) + 2))] = (step_k / p['n_steps'])
inputs[(batch_j, (((2 * n_islands) + n_actions) + 3))] = (1.0 * curr_reward[batch_j])
inputs = torch.from_numpy(inputs).cuda()
(action_logits, value) = net(inputs)
action_probs = F.softmax(action_logits, dim=1)
action_dist = torch.distributions.Categorical((action_probs + 1e-08))
action_samples = action_dist.sample()
logprobs.append(action_dist.log_prob(action_samples))
action_samples = action_samples.data.cpu().numpy()
for batch_j in range(batch_size):
action_j = action_samples[batch_j]
reward_idx = state[batch_j][0]
agent_idx = state[batch_j][1]
graph_j = graphs[(batch_j * period_l)]
neighbours = np.where((graph_j[(action_j, agent_idx)] == 1))[0]
last_action[batch_j] = int(action_j)
if (len(neighbours) == 0):
curr_reward[batch_j] -= p['penalty']
reward_reset_timer[batch_j] += 1
elif (len(neighbours) == 1):
new_agent_idx = neighbours[0]
if (reward_idx == new_agent_idx):
curr_reward[batch_j] += p['reward']
state[batch_j][0] = np.random.choice([i for i in range(n_islands) if (i != reward_idx)])
state[batch_j][1] = np.random.choice([i for i in range(n_islands) if (i != state[batch_j][0])])
else:
reward_reset_timer[batch_j] += 1
state[batch_j][1] = new_agent_idx
else:
raise Exception('Graph is faulty. Should not have multiple neighbours with the same transportation')
if (reward_reset_timer[batch_j] > n_islands):
state[batch_j][0] = np.random.choice([i for i in range(n_islands) if (i != reward_idx)])
state[batch_j][1] = np.random.choice([i for i in range(n_islands) if (i != state[batch_j][0])])
last_action[batch_j] = n_actions
reward_reset_timer[batch_j] = 0
rewards.append(curr_reward)
values.append(value)
total_reward += curr_reward
action_entropy = (- action_dist.entropy().mean())
loss += (p['entropy_coef'] * action_entropy)
R = Variable(torch.zeros(batch_size).cuda(), requires_grad=False)
gammaR = p['gamma']
for numstepb in reversed(range(p['n_steps'])):
R = ((gammaR * R) + Variable(torch.from_numpy(rewards[numstepb]).cuda(), requires_grad=False))
ctrR = (R - values[numstepb])
lossv += ctrR.pow(2).mean()
loss -= (logprobs[numstepb] * ctrR.detach()).mean()
loss += (p['value_coef'] * lossv)
loss /= (p['n_steps'] * p['periods'])
return (loss, total_reward) |
class JobMaster(metaclass=ABCMeta):
def prepare(self):
pass
def run(self):
pass
def stop(self):
pass
def request_stop(self):
pass |
class AttentivePoolingModule(nn.Module):
def __init__(self, input_dim, activation='ReLU', **kwargs):
super(AttentivePoolingModule, self).__init__()
self.W_a = nn.Linear(input_dim, input_dim)
self.W = nn.Linear(input_dim, 1)
self.act_fn = getattr(nn, activation)()
self.softmax = nn.functional.softmax
def forward(self, batch_rep, att_mask):
att_logits = self.W(self.act_fn(self.W_a(batch_rep))).squeeze((- 1))
att_logits = (att_mask + att_logits)
att_w = self.softmax(att_logits, dim=(- 1)).unsqueeze((- 1))
utter_rep = torch.sum((batch_rep * att_w), dim=1)
return (utter_rep, att_w) |
class TFResNetEmbeddings(tf.keras.layers.Layer):
def __init__(self, config: ResNetConfig, **kwargs) -> None:
super().__init__(**kwargs)
self.embedder = TFResNetConvLayer(config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act, name='embedder')
self.pooler = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='valid', name='pooler')
self.num_channels = config.num_channels
def call(self, pixel_values: tf.Tensor, training: bool=False) -> tf.Tensor:
(_, _, _, num_channels) = shape_list(pixel_values)
if (tf.executing_eagerly() and (num_channels != self.num_channels)):
raise ValueError('Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
hidden_state = pixel_values
hidden_state = self.embedder(hidden_state)
hidden_state = tf.pad(hidden_state, [[0, 0], [1, 1], [1, 1], [0, 0]])
hidden_state = self.pooler(hidden_state)
return hidden_state |
def vgg19_bn(pretrained=False, dataset_history=[], dataset2num_classes={}, **kwargs):
if pretrained:
kwargs['init_weights'] = False
return VGG(make_layers(cfg['E'], batch_norm=True), dataset_history, dataset2num_classes, **kwargs) |
def extract_features(cfg, model, loader):
device = cfg.MODEL.DEVICE
model.to(device)
model.eval()
feats = []
with torch.no_grad():
for (i, batch) in enumerate(loader):
(data, pid, camid, img_path) = batch
data = data.cuda()
feat = model(data)
feats.append(feat)
feats = torch.cat(feats, dim=0)
feats = torch.nn.functional.normalize(feats, dim=1, p=2)
return feats |
def show_ae(autoencoder):
encoder = autoencoder.Encoder()
decoder = autoencoder.Decoder()
encoded_imgs = encoder.predict(X_test)
decoded_imgs = decoder.predict(encoded_imgs)
n = 10
plt.figure(figsize=(20, 6))
for i in range(n):
ax = plt.subplot(3, n, (i + 1))
plt.imshow(X_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, ((i + 1) + n))
plt.stem(encoded_imgs[i].reshape((- 1)))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, (((i + 1) + n) + n))
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show() |
class Metric():
def __init__(self, nfolds):
self.embeddings = []
self.actual_issame = None
self.nfolds = nfolds
def update(self, preds, labels):
if (self.actual_issame is None):
self.actual_issame = np.asarray(labels)
self.actual_issame = self.actual_issame.squeeze()
self.embeddings.append(preds[0])
def reset(self):
self.embeddings = []
self.actual_issame = None
def result(self):
embeddings_list = np.array(self.embeddings)
num = embeddings_list.shape[0]
embeddings = (embeddings_list[:(num // 2)] + embeddings_list[(num // 2):])
embeddings = embeddings.squeeze()
embeddings = sklearn.preprocessing.normalize(embeddings)
thresholds = np.arange(0, 4, 0.02)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
accuracy = calculate_roc(thresholds, embeddings1, embeddings2, np.asarray(self.actual_issame), nrof_folds=self.nfolds)
return np.mean(accuracy) |
def _megatron_glm_block_mem_requirement(ranks, tensor_shape, orig_module):
(mlp_forward, mlp_grad, mlp_param) = _megatron_glm_mlp_mem_requirement(ranks, tensor_shape, orig_module.mlp)
(attention_forward, attention_grad, attention_param) = _megatron_glm_attn_mem_requirement(ranks, tensor_shape, orig_module.attention)
return ((mlp_forward + attention_forward), (mlp_grad + attention_grad), (mlp_param + attention_param)) |
def writeTestDescriptions(output, suite):
for test in suite['tests']:
writeTestDescription(output, suite, test) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename', type=str, required=True, help='filelist for configuration')
parser.add_argument('-s', '--sr', type=int, default=24000, help='sampling rate (default = 24000)')
parser.add_argument('-t', '--target', type=int, default=9999, help='pre_traind targetid (zundamon = 100, sora = 101, methane = 102, tsumugi = 103)')
parser.add_argument('-m', '--multi_target', type=str, default=None, help='pre_traind targetid (zundamon = 100, sora = 101, methane = 102, tsumugi = 103)')
parser.add_argument('-c', '--config', type=str, default='./configs/baseconfig.json', help='JSON file for configuration')
args = parser.parse_args()
filename = args.filename
print(filename)
if (args.multi_target != None):
n_spk = create_dataset_multi_character(filename, args.multi_target)
elif ((args.target != 9999) and (args.target == 100)):
n_spk = create_dataset_zundamon(filename)
elif (args.target != 9999):
n_spk = create_dataset_character(filename, args.target)
else:
n_spk = create_dataset(filename)
create_json(filename, n_spk, args.sr, args.config) |
def convert_fairseq_m2m100_checkpoint_from_disk(checkpoint_path):
m2m_100 = torch.load(checkpoint_path, map_location='cpu')
args = m2m_100['args']
state_dict = m2m_100['model']
remove_ignore_keys_(state_dict)
vocab_size = state_dict['encoder.embed_tokens.weight'].shape[0]
config = M2M100Config(vocab_size=vocab_size, max_position_embeddings=1024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu')
state_dict['shared.weight'] = state_dict['decoder.embed_tokens.weight']
model = M2M100ForConditionalGeneration(config)
model.model.load_state_dict(state_dict)
model.lm_head = make_linear_from_emb(model.model.shared)
return model |
class DanaCli():
def __init__(self, domain, serie_cfg_path):
self._domain = domain
self._serie_cfg_path = serie_cfg_path
self._exist_series = self.load_series_from_cfg()
def service_available(self):
index_url = (' % self._domain)
request = SixUrlRequest(index_url)
response = None
try:
response = SixUrlOpen(request).read()
except Exception:
print('Dana service is not available.')
return (response is not None)
def serie_available(self, serie_id):
return (serie_id in self._exist_series)
def add_build(self, project_id, build_id, build_hash, abbrev_hash, author_name, author_mail, subject, url=None, override=False):
build_data = {'projectId': project_id, 'build': {'buildId': build_id, 'infos': {'hash': build_hash, 'abbrevHash': abbrev_hash, 'authorName': author_name, 'authorEmail': author_mail, 'subject': subject, 'url': url}}, 'override': override}
request_url = (' % self._domain)
succ = self.post_data(request_url, build_data)
if succ:
print(('Add a build id, build_data: %s' % build_data))
else:
print(('Add build id failed. build_data: %s' % build_data))
return succ
def add_benchmark_serie(self, project_id, serie_id, range, required, trend, base_id=None, description=None, infos=None, override=True):
serie_data = {'projectId': project_id, 'serieId': serie_id, 'analyse': {'benchmark': {'range': range, 'required': required, 'trend': trend}}, 'override': override}
succ = self.add_serie(serie_data, base_id, description, infos)
if succ:
print(('Add a benchmark serie, serie_data: %s' % serie_data))
else:
print(('Add benchmark serie failed. serie_data: %s' % serie_data))
return succ
def add_test_serie(self, project_id, serie_id, base_id=None, propagate=True, description=None, infos=None, override=True):
serie_data = {'projectId': project_id, 'serieId': serie_id, 'analyse': {'test': {'propagate': propagate}}, 'override': override}
succ = self.add_serie(serie_data, base_id, description, infos)
if succ:
print(('Add a test serie, serie_data: %s' % serie_data))
else:
print(('Add test serie failed. serie_data: %s' % serie_data))
return succ
def add_sample(self, project_id, serie_id, build_id, value, override=False, skip_analysis=False):
samples = [{'buildId': build_id, 'value': value}]
return self.add_samples(project_id, serie_id, samples, override, skip_analysis)
def add_samples(self, project_id, serie_id, samples, override=False, skip_analysis=False):
samples_data = {'projectId': project_id, 'serieId': serie_id, 'samples': samples, 'override': override, 'skipAnalysis': skip_analysis}
request_url = (' % self._domain)
succ = self.post_data(request_url, samples_data)
if succ:
print(('Add samples, samples_data: %s' % samples_data))
else:
print(('Add samples failed. samples_data: %s' % samples_data))
return succ
def add_serie(self, serie_data, base_id=None, description=None, infos=None):
if (base_id is not None):
serie_data['analyse']['base'] = base_id
if (description is not None):
serie_data['description'] = description
if (infos is not None):
serie_data['infos'] = infos
request_url = (' % self._domain)
succ = self.post_data(request_url, serie_data)
if succ:
succ = self.write_serie_to_cfg(serie_data['serieId'])
return succ
def post_data(self, request_url, json_data):
json_data = json.dumps(json_data).replace("'", '"')
data = json_data.encode()
request = SixUrlRequest(request_url, headers=DANA_HEADERS, data=data)
try:
response = SixUrlOpen(request).read()
except Exception as e:
print(('Http error, url=%s\npost_data=%s\n%s' % (request_url, json_data, e)))
return False
result = response.decode()
if ((len(result) < 30) and ('successfull' in result)):
return True
else:
return False
def load_series_from_cfg(self):
exist_series = []
if os.path.exists(self._serie_cfg_path):
with open(self._serie_cfg_path, 'r') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
for serie in f.readlines():
exist_series.append(serie.strip())
return exist_series
def write_serie_to_cfg(self, serie_id):
if (len(serie_id) == 0):
print('serie_id is empty.')
return False
if (serie_id in self._exist_series):
return True
with open(self._serie_cfg_path, 'a') as f:
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
f.write(('%s\n' % serie_id))
self._exist_series.append(serie_id)
return True |
class TopDownLocalChaFuse(HybridBlock):
def __init__(self, channels=64):
super(TopDownLocalChaFuse, self).__init__()
self.channels = channels
with self.name_scope():
self.global_att = nn.HybridSequential(prefix='global_att')
self.global_att.add(nn.Conv2D(self.channels, kernel_size=1, strides=1, padding=0))
self.global_att.add(nn.BatchNorm())
self.sigmoid = nn.Activation('sigmoid')
self.post = nn.HybridSequential(prefix='post')
self.post.add(nn.Conv2D(channels, kernel_size=3, strides=1, padding=1, dilation=1))
self.post.add(nn.BatchNorm())
self.post.add(nn.Activation('relu'))
def hybrid_forward(self, F, xh, xl):
xa = xh
ag = self.global_att(xa)
xa3 = self.sigmoid(ag)
xs = (xh + F.broadcast_mul(xl, xa3))
xs = self.post(xs)
return xs |
def main(argv):
torch.manual_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
hlog.flags()
if (FLAGS.augment is not None):
with open(FLAGS.augment) as fh:
aug_data = json.load(fh)
else:
aug_data = []
dataset = get_dataset(aug_data=aug_data, invert=FLAGS.invert)
model = GeneratorModel(dataset.vocab, copy=True, self_attention=False).to(_flags.device())
fine_tune = [True]
def sample():
if fine_tune[0]:
return dataset.sample_train(aug_ratio=FLAGS.aug_ratio)
else:
return dataset.sample_train(aug_ratio=FLAGS.aug_ratio)
def callback(i_epoch):
if ((not fine_tune[0]) and (i_epoch >= 20)):
hlog.log('FINE_TUNE')
fine_tune[0] = True
model.eval()
final = (i_epoch == (FLAGS.n_epochs - 1))
with hlog.task('eval_train', timer=False):
train_data = [dataset.sample_train() for _ in range(1000)]
evaluate(model, train_data, dataset)
with hlog.task('eval_val', timer=False):
val_data = dataset.get_val()
val_acc = evaluate(model, val_data, dataset, vis=final, beam=final)
if (FLAGS.TEST and (final or FLAGS.test_curve)):
with hlog.task('eval_test', timer=False):
test_data = dataset.get_test()
evaluate(model, test_data, dataset, beam=final)
if (((i_epoch + 1) % FLAGS.n_checkpoint) == 0):
torch.save(model.state_dict(), os.path.join(FLAGS.model_dir, ('model.%05d.chk' % i_epoch)))
return val_acc
train(dataset, model, sample, callback, staged=False) |
def compute_repair_accuracy(pred, gold):
repair_accs = []
for i in range(len(gold)):
pout = pred[i]
ptgt = gold[i]
if (pout == ptgt):
repair_accs.append(1)
else:
repair_accs.append(0)
repair_count = ('%i/%i' % (sum(repair_accs), len(repair_accs)))
return ((sum(repair_accs) / float(len(repair_accs))), repair_count) |
_model
def ssl_resnext101_32x4d(pretrained=True, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)
model.default_cfg = default_cfgs['ssl_resnext101_32x4d']
if pretrained:
load_pretrained(model, num_classes=kwargs.get('num_classes', 0), in_chans=kwargs.get('in_chans', 3))
return model |
def save_view_point(pcd, filename):
vis = o3d.visualization.Visualizer()
vis.create_window()
vis.add_geometry(pcd)
vis.run()
param = vis.get_view_control().convert_to_pinhole_camera_parameters()
o3d.io.write_pinhole_camera_parameters(filename, param)
vis.destroy_window() |
_task('dummy_lm')
class DummyLMTask(FairseqTask):
def add_args(parser):
parser.add_argument('--dict-size', default=49996, type=int)
parser.add_argument('--dataset-size', default=100000, type=int)
parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset')
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
dictionary.pad_to_multiple_(8)
seq = ((torch.arange((args.tokens_per_sample + 1)) + dictionary.pad()) + 1)
self.dummy_src = seq[:(- 1)]
self.dummy_tgt = seq[1:]
def setup_task(cls, args, **kwargs):
dictionary = Dictionary()
for i in range(args.dict_size):
dictionary.add_symbol('word{}'.format(i))
logger.info('dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
if (self.args.max_sentences is not None):
bsz = self.args.max_sentences
else:
bsz = max(1, (self.args.max_tokens // self.args.tokens_per_sample))
self.datasets[split] = DummyDataset({'id': 1, 'net_input': {'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]), 'src_lengths': torch.full((bsz,), self.args.tokens_per_sample, dtype=torch.long)}, 'target': torch.stack([self.dummy_tgt for _ in range(bsz)]), 'nsentences': bsz, 'ntokens': (bsz * self.args.tokens_per_sample)}, num_items=self.args.dataset_size, item_size=self.args.tokens_per_sample)
def source_dictionary(self):
return self.dictionary
def target_dictionary(self):
return self.dictionary |
class AttEncoder(nn.Module):
def __init__(self, config):
super(AttEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([AttLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None, encoder_history_states=None):
all_hidden_states = ()
all_attentions = ()
for (i, layer_module) in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
history_state = (None if (encoder_history_states is None) else encoder_history_states[i])
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i], history_state)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = (all_attentions + (layer_outputs[1],))
if self.output_hidden_states:
all_hidden_states = (all_hidden_states + (hidden_states,))
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = (outputs + (all_hidden_states,))
if self.output_attentions:
outputs = (outputs + (all_attentions,))
return outputs |
class Critic(nn.Module):
def __init__(self, encoder_cfg, action_shape, hidden_dim, hidden_depth):
super().__init__()
self.encoder = timm_encoder
self.encoder_env = encoder_cfg
self.Q1 = utils.mlp((self.encoder.feature_dim + action_shape[0]), hidden_dim, 1, hidden_depth)
self.Q2 = utils.mlp((self.encoder.feature_dim + action_shape[0]), hidden_dim, 1, hidden_depth)
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, action, detach_encoder=False):
assert (obs.size(0) == action.size(0))
if (self.encoder_env == 0):
obs = self.encoder.forward_0(obs, detach=detach_encoder)
elif (self.encoder_env == 1):
obs = self.encoder.forward_1(obs, detach=detach_encoder)
else:
obs = self.encoder.forward_2(obs, detach=detach_encoder)
obs_action = torch.cat([obs, action], dim=(- 1))
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
self.outputs['q1'] = q1
self.outputs['q2'] = q2
return (q1, q2)
def log(self, logger, step):
for (k, v) in self.outputs.items():
logger.log_histogram(f'train_critic/{k}_hist', v, step)
assert (len(self.Q1) == len(self.Q2))
for (i, (m1, m2)) in enumerate(zip(self.Q1, self.Q2)):
assert (type(m1) == type(m2))
if (type(m1) is nn.Linear):
logger.log_param(f'train_critic/q1_fc{i}', m1, step)
logger.log_param(f'train_critic/q2_fc{i}', m2, step) |
class TimeFeatureWrapper(gym.Wrapper):
def __init__(self, env, max_steps=1000, test_mode=False):
assert isinstance(env.observation_space, gym.spaces.Box)
(low, high) = (env.observation_space.low, env.observation_space.high)
(low, high) = (np.concatenate((low, [0])), np.concatenate((high, [1.0])))
env.observation_space = gym.spaces.Box(low=low, high=high, dtype=np.float32)
super(TimeFeatureWrapper, self).__init__(env)
if isinstance(env, TimeLimit):
self._max_steps = env._max_episode_steps
else:
self._max_steps = max_steps
self._current_step = 0
self._test_mode = test_mode
def reset(self):
self._current_step = 0
return self._get_obs(self.env.reset())
def step(self, action):
self._current_step += 1
(obs, reward, done, info) = self.env.step(action)
return (self._get_obs(obs), reward, done, info)
def _get_obs(self, obs):
time_feature = (1 - (self._current_step / self._max_steps))
if self._test_mode:
time_feature = 1.0
return np.concatenate((obs, [time_feature])) |
def register_all_coco(root):
for (dataset_name, splits_per_dataset) in _PREDEFINED_SPLITS_COCO.items():
for (key, (image_root, json_file)) in splits_per_dataset.items():
register_coco_instances(key, _get_builtin_metadata(dataset_name), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root))
for (prefix, (panoptic_root, panoptic_json, semantic_root)) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
prefix_instances = prefix[:(- len('_panoptic'))]
instances_meta = MetadataCatalog.get(prefix_instances)
(image_root, instances_json) = (instances_meta.image_root, instances_meta.json_file)
register_coco_panoptic_separated(prefix, _get_builtin_metadata('coco_panoptic_separated'), image_root, os.path.join(root, panoptic_root), os.path.join(root, panoptic_json), os.path.join(root, semantic_root), instances_json) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.