code stringlengths 101 5.91M |
|---|
def read_vfiles(vfiles):
models = {}
for vfile in vfiles:
model_name = (vfile.split('/')[(- 2)] if ('//' not in vfile) else vfile.split('/')[(- 3)])
with open(vfile, 'r') as validf:
steps = {}
for line in validf:
entries = line.strip().split()
key = int(entries[1])
steps[key] = {}
for i in range(2, (len(entries) - 1), 2):
name = entries[i].strip(':')
value = float(entries[(i + 1)])
steps[key][name] = value
models[model_name] = steps
return models |
class RODEncode(nn.Module):
def __init__(self):
super(RODEncode, self).__init__()
self.conv1a = nn.Conv3d(in_channels=2, out_channels=64, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.conv1b = nn.Conv3d(in_channels=64, out_channels=64, kernel_size=(9, 5, 5), stride=(2, 2, 2), padding=(4, 2, 2))
self.conv2a = nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.conv2b = nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(9, 5, 5), stride=(2, 2, 2), padding=(4, 2, 2))
self.conv3a = nn.Conv3d(in_channels=128, out_channels=256, kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
self.conv3b = nn.Conv3d(in_channels=256, out_channels=256, kernel_size=(9, 5, 5), stride=(1, 2, 2), padding=(4, 2, 2))
self.bn1a = nn.BatchNorm3d(num_features=64)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn2a = nn.BatchNorm3d(num_features=128)
self.bn2b = nn.BatchNorm3d(num_features=128)
self.bn3a = nn.BatchNorm3d(num_features=256)
self.bn3b = nn.BatchNorm3d(num_features=256)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.bn1a(self.conv1a(x)))
x = self.relu(self.bn1b(self.conv1b(x)))
x = self.relu(self.bn2a(self.conv2a(x)))
x = self.relu(self.bn2b(self.conv2b(x)))
x = self.relu(self.bn3a(self.conv3a(x)))
x = self.relu(self.bn3b(self.conv3b(x)))
return x |
def find_best_match(path, prefixes):
path_parts = path.split('.')
for p in prefixes:
if ((len(p) <= len(path_parts)) and (p == path_parts[:len(p)])):
return ('.'.join(p), '.'.join(path_parts[len(p):]))
return ('', path) |
class RTE(AbstractTask):
name = 'rte'
labels_list = ['0', '1']
metric = [metrics.accuracy]
metric_names = ['accuracy']
split_to_data_split = {'train': 'train', 'validation': 'validation', 'test': 'validation'}
def load_dataset(self, split):
return datasets.load_dataset('glue', 'rte', split=split, script_version='master')
def preprocessor(self, example, add_prefix=True):
src_texts = ['sentence1:', example['sentence1'], 'sentence2:', example['sentence2']]
tgt_texts = [str(example['label'])]
return self.seq2seq_format(src_texts, tgt_texts, add_prefix) |
def test_double_double_polynomial(vrblvl=0):
set_double_double_dimension(2, vrblvl)
dim = get_double_double_dimension(vrblvl)
print('the dimension :', dim)
org = 'x*y - 1;'
idx = 1
set_double_double_polynomial(idx, dim, org, vrblvl)
pol = get_double_double_polynomial(idx, vrblvl)
print('the retrieved polynomial :', pol)
smb = string_of_symbols(100, vrblvl)
print('the list of symbols :', smb)
return int((len(smb) != 2)) |
def Process(args):
old_file = open(args.file_path, 'r')
if (args.output_path == None):
args.output_path = args.file_path
if (args.sampling_rate != 1.0):
new_file_path = ((args.output_path + '_sam') + str(args.kmer))
else:
new_file_path = ((args.output_path + '_cut') + str(args.kmer))
new_file = open(new_file_path, 'w')
line = old_file.readline()
while line:
line_length = len(line)
if (args.sampling_rate != 1.0):
(starts, ends) = sampling_fix(length=line_length, kmer=args.kmer, sampling_rate=args.sampling_rate, fix_length=args.length)
for i in range(len(starts)):
new_line = line[starts[i]:ends[i]]
sentence = get_kmer_sentence(new_line, kmer=args.kmer)
new_file.write((sentence + '\n'))
else:
cuts = cut_no_overlap(length=line_length, kmer=args.kmer)
start = 0
for cut in cuts:
new_line = line[start:(start + cut)]
sentence = get_kmer_sentence(new_line, kmer=args.kmer)
start += cut
new_file.write((sentence + '\n'))
line = old_file.readline() |
def clip_norms(gs, c):
norm = T.sqrt(sum([T.sum((g ** 2)) for g in gs]))
return [clip_norm(g, c, norm) for g in gs] |
def test_inheritance(msg):
roger = m.Rabbit('Rabbit')
assert (((roger.name() + ' is a ') + roger.species()) == 'Rabbit is a parrot')
assert (m.pet_name_species(roger) == 'Rabbit is a parrot')
polly = m.Pet('Polly', 'parrot')
assert (((polly.name() + ' is a ') + polly.species()) == 'Polly is a parrot')
assert (m.pet_name_species(polly) == 'Polly is a parrot')
molly = m.Dog('Molly')
assert (((molly.name() + ' is a ') + molly.species()) == 'Molly is a dog')
assert (m.pet_name_species(molly) == 'Molly is a dog')
fred = m.Hamster('Fred')
assert (((fred.name() + ' is a ') + fred.species()) == 'Fred is a rodent')
assert (m.dog_bark(molly) == 'Woof!')
with pytest.raises(TypeError) as excinfo:
m.dog_bark(polly)
assert (msg(excinfo.value) == '\n dog_bark(): incompatible function arguments. The following argument types are supported:\n 1. (arg0: m.class_.Dog) -> str\n\n Invoked with: <m.class_.Pet object at 0>\n ')
with pytest.raises(TypeError) as excinfo:
m.Chimera('lion', 'goat')
assert ('No constructor defined!' in str(excinfo.value)) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if ('validation' not in datasets.keys()):
datasets['validation'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[:{data_args.validation_split_percentage}%]')
datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=f'train[{data_args.validation_split_percentage}%:]')
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.train_file.split('.')[(- 1)]
if (extension == 'txt'):
extension = 'text'
datasets = load_dataset(extension, data_files=data_files)
config_kwargs = {'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
tokenizer_kwargs = {'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': (True if model_args.use_auth_token else None)}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError('You are instantiating a new tokenizer from scratch. This is not supported by this script.You can do it from another script, save it, and load it from here, using --tokenizer_name.')
if model_args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
else:
logger.info('Training new model from scratch')
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if training_args.do_train:
column_names = datasets['train'].column_names
else:
column_names = datasets['validation'].column_names
text_column_name = ('text' if ('text' in column_names) else column_names[0])
padding = ('max_length' if data_args.pad_to_max_length else False)
def tokenize_function(examples):
examples['text'] = [line for line in examples['text'] if ((len(line) > 0) and (not line.isspace()))]
return tokenizer(examples['text'], padding=padding, truncation=True, max_length=data_args.max_seq_length)
tokenized_datasets = datasets.map(tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=(not data_args.overwrite_cache))
if (data_args.train_ref_file is not None):
tokenized_datasets['train'] = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file)
if (data_args.validation_ref_file is not None):
tokenized_datasets['validation'] = add_chinese_references(tokenized_datasets['validation'], data_args.validation_ref_file)
has_ref = (data_args.train_ref_file or data_args.validation_ref_file)
if has_ref:
training_args.remove_unused_columns = False
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)
trainer = Trainer(model=model, args=training_args, train_dataset=(tokenized_datasets['train'] if training_args.do_train else None), eval_dataset=(tokenized_datasets['validation'] if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator)
if training_args.do_train:
if (last_checkpoint is not None):
checkpoint = last_checkpoint
elif ((model_args.model_name_or_path is not None) and os.path.isdir(model_args.model_name_or_path)):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
output_train_file = os.path.join(training_args.output_dir, 'train_results.txt')
if trainer.is_world_process_zero():
with open(output_train_file, 'w') as writer:
logger.info('***** Train results *****')
for (key, value) in sorted(train_result.metrics.items()):
logger.info(f' {key} = {value}')
writer.write(f'''{key} = {value}
''')
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json'))
results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output['eval_loss'])
results['perplexity'] = perplexity
output_eval_file = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt')
if trainer.is_world_process_zero():
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results *****')
for (key, value) in sorted(results.items()):
logger.info(f' {key} = {value}')
writer.write(f'''{key} = {value}
''')
return results |
def test_actionAngleTorus_hessian_linear():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential2014
aAT = actionAngleTorus(pot=MWPotential2014)
(jr, jphi, jz) = (0.075, 1.1, 0.05)
h = aAT.hessianFreqs(jr, jphi, jz, tol=0.0001, nosym=True)[0]
dj = numpy.array([0.02, 0.005, (- 0.01)])
do_fromhessian = numpy.dot(h, dj)
O = numpy.array(aAT.Freqs(jr, jphi, jz)[:3])
do = (numpy.array(aAT.Freqs((jr + dj[0]), (jphi + dj[1]), (jz + dj[2]))[:3]) - O)
assert numpy.all((numpy.fabs(((do_fromhessian - do) / O)) < 0.001)), 'actionAngleTorus Hessian does not return good approximation to dO/dJ'
return None |
def makeInternalLink(title, label):
colon = title.find(':')
if ((colon > 0) and (title[:colon] not in options.acceptedNamespaces)):
return ''
if (colon == 0):
colon2 = title.find(':', (colon + 1))
if ((colon2 > 1) and (title[(colon + 1):colon2] not in options.acceptedNamespaces)):
return ''
if options.keepLinks:
return ('<a href="%s">%s</a>' % (quote(title.encode('utf-8')), label))
else:
return label |
def json_pack(snippets_dir, video_name, frame_width, frame_height, label='unknown', label_index=(- 1)):
sequence_info = []
p = Path(snippets_dir)
for path in p.glob((video_name + '*.json')):
json_path = str(path)
print(path)
frame_id = int(path.stem.split('_')[(- 2)])
frame_data = {'frame_index': frame_id}
data = json.load(open(json_path))
skeletons = []
for person in data['people']:
(score, coordinates) = ([], [])
skeleton = {}
keypoints = person['pose_keypoints_2d']
for i in range(0, len(keypoints), 3):
coordinates += [(keypoints[i] / frame_width), (keypoints[(i + 1)] / frame_height)]
score += [keypoints[(i + 2)]]
skeleton['pose'] = coordinates
skeleton['score'] = score
skeletons += [skeleton]
frame_data['skeleton'] = skeletons
sequence_info += [frame_data]
video_info = dict()
video_info['data'] = sequence_info
video_info['label'] = label
video_info['label_index'] = label_index
return video_info |
class BaseModel():
def load(self, args, **kwargs):
raise NotImplementedError()
def save(self, args, **kwargs):
raise NotImplementedError()
def train_on_instance(self, x, y):
raise NotImplementedError()
def eval_on_instance(self, x, y):
raise NotImplementedError()
def _get_stats(self, dict_, mode):
return {}
def _zip(self, A, B):
if (sys.version[0] == '2'):
from itertools import izip
return izip(A, B)
else:
return zip(A, B)
def prepare_batch(self, A_real, B_real):
if (type(A_real) == list):
if (len(A_real) > 1):
raise Exception(('A_real should be either a tensor ' + 'of a list of one element'))
if (len(B_real) > 1):
raise Exception(('A_real should be either a tensor ' + 'of a list of one element'))
A_real = A_real[0]
B_real = B_real[0]
(A_real, B_real) = (A_real.float(), B_real.float())
if self.use_cuda:
(A_real, B_real) = (A_real.cuda(), B_real.cuda())
return (A_real, B_real)
def train(self, itr_a_train, itr_b_train, itr_a_valid, itr_b_valid, epochs, model_dir, result_dir, save_every=1, scheduler_fn=None, scheduler_args={}, verbose=True):
for folder_name in [model_dir, result_dir]:
if ((folder_name is not None) and (not os.path.exists(folder_name))):
os.makedirs(folder_name)
if os.path.exists(('%s/results.txt' % result_dir)):
f_mode = 'a'
else:
f_mode = 'w'
f = None
if (result_dir is not None):
f = open(('%s/results.txt' % result_dir), f_mode)
for epoch in range(epochs):
epoch_start_time = time.time()
if verbose:
n_iters = min(len(itr_a_train), len(itr_b_train))
pbar = tqdm(total=n_iters)
train_dict = OrderedDict({'epoch': (epoch + 1)})
for (b, (A_real, B_real)) in enumerate(self._zip(itr_a_train, itr_b_train)):
(A_real, B_real) = self.prepare_batch(A_real, B_real)
(losses, outputs) = self.train_on_instance(A_real, B_real)
for key in losses:
this_key = ('train_%s' % key)
if (this_key not in train_dict):
train_dict[this_key] = []
train_dict[this_key].append(losses[key])
pbar.update(1)
pbar.set_postfix(self._get_stats(train_dict, 'train'))
for handler_fn in self.handlers:
handler_fn(losses, (A_real, B_real), outputs, {'epoch': (epoch + 1), 'iter': (b + 1), 'mode': 'train'})
if verbose:
pbar.close()
valid_dict = {}
if ((itr_a_valid is not None) and (itr_b_valid is not None)):
if verbose:
n_iters = min(len(itr_a_valid), len(itr_b_valid))
pbar = tqdm(total=n_iters)
for (b, (A_real, B_real)) in enumerate(self._zip(itr_a_valid, itr_b_valid)):
(A_real, B_real) = self.prepare_batch(A_real, B_real)
(losses, outputs) = self.eval_on_instance(A_real, B_real)
for key in losses:
this_key = ('valid_%s' % key)
if (this_key not in valid_dict):
valid_dict[this_key] = []
valid_dict[this_key].append(losses[key])
pbar.update(1)
pbar.set_postfix(self._get_stats(valid_dict, 'valid'))
for handler_fn in self.handlers:
handler_fn(losses, (A_real, B_real), outputs, {'epoch': (epoch + 1), 'iter': (b + 1), 'mode': 'valid'})
if verbose:
pbar.close()
for key in self.scheduler:
self.scheduler[key].step()
all_dict = train_dict
all_dict.update(valid_dict)
for key in all_dict:
all_dict[key] = np.mean(all_dict[key])
for key in self.optim:
all_dict[('lr_%s' % key)] = self.optim[key].state_dict()['param_groups'][0]['lr']
all_dict['time'] = (time.time() - epoch_start_time)
str_ = ','.join([str(all_dict[key]) for key in all_dict])
print(str_)
if (f is not None):
if (f_mode == 'w'):
f.write((','.join(all_dict.keys()) + '\n'))
f.write((str_ + '\n'))
f.flush()
if ((((epoch + 1) % save_every) == 0) and (model_dir is not None)):
self.save(filename=('%s/%i.pkl' % (model_dir, (epoch + 1))))
if (f is not None):
f.close() |
def mk_vqa_dataloader(anno_path, img_lmdb_dir, cfg, tokenizer, is_train=True):
if isinstance(anno_path, str):
raw_datalist = load_jsonl(anno_path)
else:
raw_datalist = flat_list_of_lists([load_jsonl(p) for p in anno_path])
if (cfg.data_ratio != 1.0):
random.shuffle(raw_datalist)
raw_datalist = raw_datalist[:int((len(raw_datalist) * cfg.data_ratio))]
datalist = []
for raw_d in raw_datalist:
d = dict(txt=raw_d['question'], img_id=raw_d['image_id'], question_id=raw_d['question_id'])
if ('labels' in raw_d):
d['labels'] = raw_d['labels']
if ('answer_type' in raw_d):
d['answer_type'] = raw_d['answer_type']
datalist.append(d)
grouped = defaultdict(list)
for d in datalist:
grouped[d['img_id']].append(d)
group_datalist = mk_input_group(grouped, max_n_example_per_group=(cfg.max_n_example_per_group if is_train else 1), is_train=is_train, example_unique_key='question_id')
ans2label = load_json(cfg.ans2label_path)
dataset = ClipBertVQADataset(datalist=group_datalist, tokenizer=tokenizer, img_lmdb_dir=img_lmdb_dir, ans2label=ans2label, max_img_size=cfg.max_img_size, max_txt_len=cfg.max_txt_len)
LOGGER.info(f'is_train {is_train}, dataset size {len(dataset)} groups, each group {(cfg.max_n_example_per_group if is_train else 1)}')
if cfg.do_inference:
batch_size = cfg.inference_batch_size
else:
batch_size = (cfg.train_batch_size if is_train else cfg.val_batch_size)
sampler = DistributedSampler(dataset, num_replicas=hvd.size(), rank=hvd.rank(), shuffle=is_train)
vqa_collator = VQACollator(tokenizer=tokenizer, max_length=cfg.max_txt_len)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, sampler=sampler, num_workers=cfg.n_workers, pin_memory=cfg.pin_mem, collate_fn=vqa_collator.collate_batch)
return dataloader |
def run_eval(args, logger, model, eval_dataloader, all_guids, task_name, return_preds=False):
model.eval()
eval_loss = 0
nb_eval_steps = 0
preds = []
pred_guids = []
out_label_ids = None
for eval_batch in tqdm(eval_dataloader, desc='Evaluating'):
eval_batch = tuple((t.to(args.device) for t in eval_batch))
if (args.model_type in ['MELBERT_MIP', 'MELBERT']):
(input_ids, input_mask, segment_ids, label_ids, idx, input_ids_2, input_mask_2, segment_ids_2) = eval_batch
else:
(input_ids, input_mask, segment_ids, label_ids, idx) = eval_batch
with torch.no_grad():
if (args.model_type in ['BERT_BASE', 'BERT_SEQ', 'MELBERT_SPV']):
logits = model(input_ids, target_mask=(segment_ids == 1), token_type_ids=segment_ids, attention_mask=input_mask)
loss_fct = nn.NLLLoss()
tmp_eval_loss = loss_fct(logits.view((- 1), args.num_labels), label_ids.view((- 1)))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (len(preds) == 0):
preds.append(logits.detach().cpu().numpy())
pred_guids.append([all_guids[i] for i in idx])
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0)
pred_guids[0].extend([all_guids[i] for i in idx])
out_label_ids = np.append(out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
elif (args.model_type in ['MELBERT_MIP', 'MELBERT']):
logits = model(input_ids, input_ids_2, target_mask=(segment_ids == 1), target_mask_2=segment_ids_2, attention_mask_2=input_mask_2, token_type_ids=segment_ids, attention_mask=input_mask)
loss_fct = nn.NLLLoss()
tmp_eval_loss = loss_fct(logits.view((- 1), args.num_labels), label_ids.view((- 1)))
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (len(preds) == 0):
preds.append(logits.detach().cpu().numpy())
pred_guids.append([all_guids[i] for i in idx])
out_label_ids = label_ids.detach().cpu().numpy()
else:
preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0)
pred_guids[0].extend([all_guids[i] for i in idx])
out_label_ids = np.append(out_label_ids, label_ids.detach().cpu().numpy(), axis=0)
eval_loss = (eval_loss / nb_eval_steps)
preds = preds[0]
preds = np.argmax(preds, axis=1)
result = compute_metrics(preds, out_label_ids)
for key in sorted(result.keys()):
logger.info(f' {key} = {str(result[key])}')
if return_preds:
return preds
return result |
def _echo_run_names(header, d):
click.echo((('-----' + header) + '-----'))
for name in d:
click.echo(name)
click.echo() |
def run_baseline(args, model, inp, dec_prefix, adjust=True):
if (args.task == 'sum'):
forced_bos_token_id = None
else:
forced_bos_token_id = dec_prefix[(- 1)]
if (args.max_len == (- 1)):
input_ids = tokenizer(inp, return_tensors='pt').input_ids
cur_max_len = (input_ids.squeeze().size()[0] * 2)
else:
cur_max_len = args.max_len
if adjust:
adj_batch_size = max(adjust_batch_size(cur_max_len, args.task, args.dataset), 1)
else:
adj_batch_size = args.beam_size
if (args.model == 'greedy'):
gs = GenericSearch(model, tokenizer, device=args.device, beam_size=1, do_sample=False, min_len=args.min_len, max_len=cur_max_len, num_beam_hyps_to_keep=1)
elif (args.model == 'bs'):
gs = GenericSearch(model, tokenizer, device=args.device, beam_size=adj_batch_size, do_sample=False, min_len=args.min_len, max_len=cur_max_len, num_beam_hyps_to_keep=adj_batch_size)
elif (args.model == 'dbs'):
gs = GenericSearch(model, tokenizer, device=args.device, beam_size=adj_batch_size, do_sample=False, min_len=args.min_len, max_len=cur_max_len, num_beam_groups=4, diversity_penalty=args.hamming_penalty, num_beam_hyps_to_keep=adj_batch_size)
elif (args.model == 'topp'):
gs = GenericSearch(model, tokenizer, device=args.device, beam_size=1, do_sample=True, min_len=args.min_len, max_len=cur_max_len, num_beam_hyps_to_keep=adj_batch_size, top_p=args.top_p)
elif (args.model == 'temp'):
gs = GenericSearch(model, tokenizer, device=args.device, beam_size=1, do_sample=True, min_len=args.min_len, max_len=cur_max_len, num_beam_hyps_to_keep=adj_batch_size, temperature=args.temp)
else:
raise NotImplementedError
output_dict = gs.run(inp, forced_bos_token_id)
return output_dict |
def _selective_search_IJCV_top_k(split, year, top_k):
imdb = datasets.pascal_voc(split, year)
imdb.roidb_handler = imdb.selective_search_IJCV_roidb
imdb.config['top_k'] = top_k
return imdb |
def isalpha_num(token):
char_set = set(token)
num_found = False
alpha_found = False
for char in char_set:
if char.isalpha():
alpha_found = True
if char.isnumeric():
num_found = True
if ((alpha_found == True) and (num_found == True)):
return True
else:
return False |
class XCLIPTextModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ASPP(nn.Module):
def __init__(self, backbone, output_stride, BatchNorm, dropout):
super(ASPP, self).__init__()
if ('drn' in backbone):
inplanes = 512
elif (backbone == 'mobilenet'):
inplanes = 320
else:
inplanes = 2048
if (output_stride == 16):
dilations = [1, 6, 12, 18]
elif (output_stride == 8):
dilations = [1, 12, 24, 36]
else:
raise NotImplementedError
self.aspp1 = _ASPPModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)
self.aspp2 = _ASPPModule(inplanes, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.aspp3 = _ASPPModule(inplanes, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.aspp4 = _ASPPModule(inplanes, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm)
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), nn.Conv2d(inplanes, 256, 1, stride=1, bias=False), BatchNorm(256), nn.ReLU())
self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)
self.bn1 = BatchNorm(256)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self._init_weight()
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return self.dropout(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=relu_inplace)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out = (self.bn2(out) + identity)
out = self.relu(out)
return out |
_module()
class NerTransform():
def __init__(self, label_convertor, max_len):
self.label_convertor = build_convertor(label_convertor)
self.max_len = max_len
def __call__(self, results):
texts = results['text']
input_ids = self.label_convertor.convert_text2id(texts)
labels = self.label_convertor.convert_entity2label(results['label'], len(texts))
attention_mask = ([0] * self.max_len)
token_type_ids = ([0] * self.max_len)
for i in range((len(texts) + 2)):
attention_mask[i] = 1
results = dict(labels=labels, texts=texts, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
return results |
class AutoModelForMaskedLM():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class OneHot():
def __init__(self, n_classes, to_float: bool=False):
self.n_classes = n_classes
self.to_float = to_float
def __call__(self, label: torch.Tensor):
return (one_hot(label, self.n_classes).float() if self.to_float else one_hot(label, self.n_classes)) |
def get_script(args, BASH_COMMAND_LIST):
print('Start writing the command list!')
job_script = '\n'
for command in BASH_COMMAND_LIST:
job_script += f'''srun -N 1 -n 1 {command} &
'''
script = get_slurm_script(args, job_script)
file_path = './bash_files/'
if (not os.path.exists(file_path)):
os.makedirs(file_path)
save_file = (file_path + args.file_name)
if os.path.isfile(save_file):
with open(save_file, 'w') as rsh:
rsh.truncate()
with open(save_file, 'w') as rsh:
rsh.write(script)
print(f'The SLURM .sh File Have Been Saved at {file_path}.') |
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('--src_vocab', type=str, default='es.vocab')
parser.add_argument('--tgt_vocab', type=str, default='en.vocab')
parser.add_argument('--arch', type=str, choices=['vanilla', 'mem', 'rg'], default='vanilla')
parser.add_argument('--use_mem_score', action='store_true')
parser.add_argument('--embed_dim', type=int, default=512)
parser.add_argument('--ff_embed_dim', type=int, default=2048)
parser.add_argument('--num_heads', type=int, default=8)
parser.add_argument('--enc_layers', type=int, default=6)
parser.add_argument('--dec_layers', type=int, default=6)
parser.add_argument('--mem_enc_layers', type=int, default=4)
parser.add_argument('--share_encoder', action='store_true')
parser.add_argument('--retriever', type=str, default=None)
parser.add_argument('--nprobe', type=int, default=64)
parser.add_argument('--num_retriever_heads', type=int, default=1)
parser.add_argument('--topk', type=int, default=5)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--mem_dropout', type=float, default=0.1)
parser.add_argument('--label_smoothing', type=float, default=0.1)
parser.add_argument('--gradient_accumulation_steps', type=int, default=1)
parser.add_argument('--total_train_steps', type=int, default=100000)
parser.add_argument('--warmup_steps', type=int, default=4000)
parser.add_argument('--per_gpu_train_batch_size', type=int, default=4096)
parser.add_argument('--dev_batch_size', type=int, default=4096)
parser.add_argument('--rebuild_every', type=int, default=(- 1))
parser.add_argument('--update_retriever_after', default=5000)
parser.add_argument('--resume_ckpt', type=str, default=None)
parser.add_argument('--train_data', type=str, default='dev.txt')
parser.add_argument('--dev_data', type=str, default='dev.txt')
parser.add_argument('--test_data', type=str, default='dev.txt')
parser.add_argument('--ckpt', type=str, default='ckpt')
parser.add_argument('--print_every', type=int, default=100)
parser.add_argument('--eval_every', type=int, default=1000)
parser.add_argument('--only_save_best', action='store_true')
parser.add_argument('--world_size', type=int, default=1)
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--MASTER_ADDR', type=str, default='localhost')
parser.add_argument('--MASTER_PORT', type=str, default='55555')
parser.add_argument('--start_rank', type=int, default=0)
return parser.parse_args() |
def get_marker_parameters():
params = {}
params['dict_id'] = cv2.aruco.DICT_4X4_50
params['marker_length'] = 0.018
params['marker_length_pixels'] = 6
params['pixels_per_mm'] = 2
params['sticker_length_mm'] = {'robots': 25, 'cubes': 28, 'corners': 24}
return params |
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=batch_size)
mask_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
return (pointclouds_pl, labels_pl, mask_pl) |
def _process(func, path, repeat):
data = []
try:
for i in range(repeat):
data.append(func(np.loadtxt(path.format(i))))
data = np.array(data)[(~ np.isnan(data))]
return (np.mean(data), np.std(data))
except ValueError as e:
if (len(data) != 0):
print(e)
return (np.nan, np.nan)
try:
for i in range(repeat):
data.append(func(open(path.format(i)).readlines()))
data = np.array(data)[(~ np.isnan(data))]
return (np.mean(data), np.std(data))
except Exception as e:
print(e)
return (np.nan, np.nan) |
class svm_parameter(Structure):
_names = ['svm_type', 'kernel_type', 'degree', 'gamma', 'coef0', 'cache_size', 'eps', 'C', 'nr_weight', 'weight_label', 'weight', 'nu', 'p', 'shrinking', 'probability']
_types = [c_int, c_int, c_int, c_double, c_double, c_double, c_double, c_double, c_int, POINTER(c_int), POINTER(c_double), c_double, c_double, c_int, c_int]
_fields_ = genFields(_names, _types)
def __init__(self, options=None):
if (options == None):
options = ''
self.parse_options(options)
def __str__(self):
s = ''
attrs = (svm_parameter._names + list(self.__dict__.keys()))
values = list(map((lambda attr: getattr(self, attr)), attrs))
for (attr, val) in zip(attrs, values):
s += (' %s: %s\n' % (attr, val))
s = s.strip()
return s
def set_to_default_values(self):
self.svm_type = C_SVC
self.kernel_type = RBF
self.degree = 3
self.gamma = 0
self.coef0 = 0
self.nu = 0.5
self.cache_size = 100
self.C = 1
self.eps = 0.001
self.p = 0.1
self.shrinking = 1
self.probability = 0
self.nr_weight = 0
self.weight_label = None
self.weight = None
self.cross_validation = False
self.nr_fold = 0
self.print_func = cast(None, PRINT_STRING_FUN)
def parse_options(self, options):
if isinstance(options, list):
argv = options
elif isinstance(options, str):
argv = options.split()
else:
raise TypeError('arg 1 should be a list or a str.')
self.set_to_default_values()
self.print_func = cast(None, PRINT_STRING_FUN)
weight_label = []
weight = []
i = 0
while (i < len(argv)):
if (argv[i] == '-s'):
i = (i + 1)
self.svm_type = int(argv[i])
elif (argv[i] == '-t'):
i = (i + 1)
self.kernel_type = int(argv[i])
elif (argv[i] == '-d'):
i = (i + 1)
self.degree = int(argv[i])
elif (argv[i] == '-g'):
i = (i + 1)
self.gamma = float(argv[i])
elif (argv[i] == '-r'):
i = (i + 1)
self.coef0 = float(argv[i])
elif (argv[i] == '-n'):
i = (i + 1)
self.nu = float(argv[i])
elif (argv[i] == '-m'):
i = (i + 1)
self.cache_size = float(argv[i])
elif (argv[i] == '-c'):
i = (i + 1)
self.C = float(argv[i])
elif (argv[i] == '-e'):
i = (i + 1)
self.eps = float(argv[i])
elif (argv[i] == '-p'):
i = (i + 1)
self.p = float(argv[i])
elif (argv[i] == '-h'):
i = (i + 1)
self.shrinking = int(argv[i])
elif (argv[i] == '-b'):
i = (i + 1)
self.probability = int(argv[i])
elif (argv[i] == '-q'):
self.print_func = PRINT_STRING_FUN(print_null)
elif (argv[i] == '-v'):
i = (i + 1)
self.cross_validation = 1
self.nr_fold = int(argv[i])
if (self.nr_fold < 2):
raise ValueError('n-fold cross validation: n must >= 2')
elif argv[i].startswith('-w'):
i = (i + 1)
self.nr_weight += 1
weight_label += [int(argv[(i - 1)][2:])]
weight += [float(argv[i])]
else:
raise ValueError('Wrong options')
i += 1
libsvm.svm_set_print_string_function(self.print_func)
self.weight_label = (c_int * self.nr_weight)()
self.weight = (c_double * self.nr_weight)()
for i in range(self.nr_weight):
self.weight[i] = weight[i]
self.weight_label[i] = weight_label[i] |
class SDFA_Decoder(nn.Module):
def __init__(self, num_ch_enc, num_ch_dec=[64, 64, 64, 128, 256], output_ch=49, insert_sdfa=[], sdfa_mode='OA', out_mode=''):
super().__init__()
self.insert_sdfa = insert_sdfa
self.sdfa_mode = sdfa_mode
self.out_mode = out_mode
self.num_layers = (len(num_ch_dec) - 1)
self.num_enc_feats = (len(num_ch_enc) - 1)
self.convblocks = {}
num_ch_enc = copy.deepcopy(num_ch_enc)
if self.insert_sdfa:
for idx_insert in self.insert_sdfa:
in_ch_num_h = num_ch_enc[(self.num_enc_feats - idx_insert)]
in_ch_num_l = num_ch_dec[((self.num_layers - idx_insert) + 1)]
self._build_alingfa(idx_insert, in_ch_num_h, in_ch_num_l)
idx_feats = self.num_enc_feats
forward_layer_idx = 1
for i in range(self.num_layers, (- 1), (- 1)):
if (i == self.num_layers):
num_ch_in = num_ch_enc[idx_feats]
idx_feats -= 1
else:
num_ch_in = num_ch_dec[(i + 1)]
num_ch_out = num_ch_dec[i]
self.convblocks[('dec', i, 0)] = ConvBlock(num_ch_in, num_ch_out)
num_ch_in = num_ch_dec[i]
if (idx_feats >= 0):
if (forward_layer_idx not in self.insert_sdfa):
num_ch_in += num_ch_enc[idx_feats]
idx_feats -= 1
num_ch_out = num_ch_dec[i]
self.convblocks[('dec', i, 1)] = ConvBlock(num_ch_in, num_ch_out)
forward_layer_idx += 1
if (self.out_mode == ''):
self.convblocks[('out', 0)] = Conv3x3(num_ch_dec[0], output_ch)
elif (self.out_mode == 'two'):
self.convblocks[('out', 0)] = Conv3x3(num_ch_dec[0], output_ch)
self.convblocks[('out_fine', 0)] = Conv3x3(num_ch_dec[0], output_ch)
else:
raise NotImplementedError
self._convs = nn.ModuleList(list(self.convblocks.values()))
def forward(self, features, img_shape, switch=False):
all_delta_dict = {}
multi_scale_out = {}
x = features[(- 1)]
idx_feats = (self.num_enc_feats - 1)
forward_layer_idx = 1
for i in range(self.num_layers, (- 1), (- 1)):
x = self.convblocks[('dec', i, 0)](x)
if (forward_layer_idx in self.insert_sdfa):
(x, half_x, delta_dict) = self._forward_sdfa(forward_layer_idx, x, features[idx_feats], switch)
for (k, v) in delta_dict.items():
all_delta_dict[(forward_layer_idx, k)] = v
idx_feats -= 1
else:
if (idx_feats >= 0):
tar_shape = features[idx_feats].shape
elif (i == 0):
tar_shape = img_shape
else:
tar_shape = [(s * 2) for s in x.shape]
x = [self._upsample(x, tar_shape)]
if (idx_feats >= 0):
x += [features[idx_feats]]
idx_feats -= 1
x = torch.cat(x, 1)
x = self.convblocks[('dec', i, 1)](x)
forward_layer_idx += 1
final_out = self.convblocks[('out', 0)](x)
if ((self.out_mode == 'two') and switch):
final_out = self.convblocks[('out_fine', 0)](x)
return (final_out, all_delta_dict, multi_scale_out)
def _build_alingfa(self, idx_sdfa, ch_h, ch_l):
self.convblocks[('SDFA-conv', idx_sdfa)] = ConvBlock(ch_h, ch_l, True, True)
if (self.sdfa_mode == 'OA'):
self.convblocks[('SDFA-fuse', idx_sdfa)] = OA(ch_l)
elif (self.sdfa_mode == 'SDFA'):
self.convblocks[('SDFA-fuse', idx_sdfa)] = SDFA(ch_l)
else:
raise NotImplementedError
def _forward_sdfa(self, idx_sdfa, h_x, l_x, switch=False):
l_x = self.convblocks[('SDFA-conv', idx_sdfa)](l_x)
tmp_out = self.convblocks[('SDFA-fuse', idx_sdfa)](h_x, l_x, switch)
(out_x, sampled_high_stage, delta_dict) = tmp_out
return (out_x, sampled_high_stage, delta_dict)
def _upsample(self, x, shape, mode='nearest'):
return F.interpolate(x, size=shape[2:], mode=mode) |
class DynamicLossScaler():
def __init__(self, init_scale=(2.0 ** 15), scale_factor=2.0, scale_window=2000):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self._iter = 0
self._last_overflow_iter = (- 1)
def update_scale(self, overflow):
if overflow:
self.loss_scale /= self.scale_factor
self._last_overflow_iter = self._iter
elif (((self._iter - self._last_overflow_iter) % self.scale_window) == 0):
self.loss_scale *= self.scale_factor
self._iter += 1
def has_overflow(grad_norm):
if ((grad_norm == float('inf')) or (grad_norm != grad_norm)):
return True
return False |
class DataArguments():
dataset_path: str = field(default='tatsu-lab/alpaca_farm')
dataset_name: str = field(default='alpaca_instructions')
train_splits: List[str] = field(default_factory=(lambda : ['unlabeled']))
eval_splits: List[str] = field(default_factory=(lambda : ['val']))
prompt_dict_path: str = field(default=None, metadata={'help': 'Path to the dictionary for the prompt to format examples.'}) |
def main():
print('solving a general instance of the Apollonius circle problem')
solve_general_problem()
print('solving a special instance of the Apollonius circle problem')
solve_special_problem()
print('solving a perturbed instance of the Apollonius circle problem')
solve_perturbed_problem() |
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument('--skip-type', type=str, nargs='+', default=['DefaultFormatBundle', 'Normalize', 'Collect'], help='skip some useless pipeline')
parser.add_argument('--output-dir', default=None, type=str, help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument('--show-interval', type=int, default=999, help='the interval of show (ms)')
args = parser.parse_args()
return args |
class PGFloor(torch.nn.Module):
def __init__(self):
super(PGFloor, self).__init__()
def forward(self, x):
return PGFloorFunc.apply(x) |
class AttnGraphConvolution(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool=False, dropout: float=0.3, alpha: float=0.2, act=F.elu):
super(AttnGraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.alpha = alpha
self.act = act
self.W = nn.Parameter(torch.zeros(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
else:
self.register_parameter('bias', None)
self.a = nn.Parameter(torch.zeros((2 * out_features), 1))
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W.data)
nn.init.xavier_uniform_(self.a.data)
if (self.bias is not None):
nn.init.constant_(self.bias.data, 0)
def forward(self, input: torch.Tensor, adj: torch.Tensor) -> torch.Tensor:
h = torch.mm(input, self.W)
if (self.bias is not None):
h = (h + self.bias)
N = h.size()[0]
a_input = torch.cat([h.repeat(1, N).view((N * N), (- 1)), h.repeat(N, 1)], dim=1).view(N, (- 1), (2 * self.out_features))
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = ((- .0) * torch.ones_like(e))
adj_at = adj.to_dense()
attention = torch.where((adj_at > 0), e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
return self.act(h_prime)
def __repr__(self):
return (((((self.__class__.__name__ + ' (') + str(self.in_features)) + ' -> ') + str(self.out_features)) + ')') |
def _check_length_and_finiteness_of_metrics(nepochs, inner_logdir, metric_files):
for metric_file in metric_files:
assert (inner_logdir / metric_file).exists()
with (inner_logdir / metric_file).open() as f:
metric = np.loadtxt(f)
assert (len(metric) == nepochs)
assert np.all(np.isfinite(metric)) |
def parse_run_results(run_dict: dict):
runs_to_parsed_results = {}
for (name, json_path) in run_dict.items():
runs_to_parsed_results[name] = {}
timesteps = []
episodes = []
exploitability = []
print(f'parsing {json_path}')
with open(json_path, 'r') as json_file:
for line in json_file:
try:
json_result = json.loads(s=line)
except json.JSONDecodeError:
break
timesteps_entry = json_result['timesteps_total']
episodes_entry = json_result['episodes_total']
try:
exploitability_entry = (json_result.get('avg_policy_exploitability') or json_result.get('z_avg_policy_exploitability') or json_result.get('exploitability') or json_result.get('approx_exploitability'))
if (exploitability_entry is None):
raise KeyError
if (not any(((tag in json_path) for tag in ['openspiel', 'sparse', 'xfdo', 'nxdo', 'no_limit']))):
for i in range(99):
try:
next(json_file)
except StopIteration:
break
except UnicodeDecodeError:
continue
except KeyError:
continue
timesteps.append(timesteps_entry)
episodes.append(episodes_entry)
exploitability.append(exploitability_entry)
runs_to_parsed_results[name]['timesteps'] = timesteps
runs_to_parsed_results[name]['episodes'] = episodes
runs_to_parsed_results[name]['exploitability'] = exploitability
return runs_to_parsed_results |
class BezierRNN(nn.Module, metaclass=Named):
def __init__(self, num_classes=10, k=64, gn=False, block_size=12):
super().__init__()
self.num_classes = num_classes
self.net = nn.Sequential(conv2d(3, k), ResBlock(k, (2 * k), gn=gn, stride=2), ResBlock((2 * k), (4 * k), gn=gn, stride=2), RNNBlock(BezierResBlock((4 * k), gn=gn, add=True, bends=(block_size // 2)), L=block_size), BNrelu((4 * k), gn=gn), Expression((lambda u: u.mean((- 1)).mean((- 1)))), nn.Linear((4 * k), num_classes))
def forward(self, x):
return self.net(x) |
_module
class BalancedL1Loss(nn.Module):
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
loss_bbox = (self.loss_weight * balanced_l1_loss(pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs))
return loss_bbox |
_group.command('list')
('filters', nargs=(- 1))
_project()
def list_jobs(filters, project=None):
from cli.jobs import fetch_jobs
try:
filters = parse_args(filters)
if project:
filters['project'] = project
except Exception:
click.secho(f'Failed to parse filters: {filters}', fg='yellow', err=True)
return
try:
with Loader('Fetching jobs...'):
jobs = fetch_jobs(filters)
click.echo('Fetched jobs successfully.')
except requests.exceptions.HTTPError as e:
click.secho(f'Failed to fetch jobs {e}.', fg='red', err=True)
if (e.response.status_code == 400):
click.secho(str(e.response.json()), fg='red', err=True)
return
tbl = TableLogger(columns='state,name,operation,created_at', colwidth={'state': MEDIUM_WIDTH, 'name': LARGE_WIDTH, 'operation': SMALL_WIDTH, 'created_at': DATETIME_WIDTH})
for j in jobs:
tbl(j['state'], j['name'], j['operation'], j['created_at']) |
class InvertibleCheckpointFunction(torch.autograd.Function):
def forward(ctx, fn, fn_inverse, keep_input, num_bwd_passes, preserve_rng_state, num_inputs, *inputs_and_weights):
ctx.fn = fn
ctx.fn_inverse = fn_inverse
ctx.keep_input = keep_input
ctx.weights = inputs_and_weights[num_inputs:]
ctx.num_bwd_passes = num_bwd_passes
ctx.preserve_rng_state = preserve_rng_state
ctx.num_inputs = num_inputs
inputs = inputs_and_weights[:num_inputs]
if preserve_rng_state:
ctx.fwd_cpu_state = torch.get_rng_state()
ctx.had_cuda_in_fwd = False
if torch.cuda._initialized:
ctx.had_cuda_in_fwd = True
(ctx.fwd_gpu_devices, ctx.fwd_gpu_states) = get_device_states(*inputs)
ctx.input_requires_grad = [element.requires_grad for element in inputs]
with torch.no_grad():
x = []
for element in inputs:
if isinstance(element, torch.Tensor):
x.append(element.detach())
else:
x.append(element)
outputs = ctx.fn(*x)
if (not isinstance(outputs, tuple)):
outputs = (outputs,)
detached_outputs = tuple([element.detach_() for element in outputs])
if (not ctx.keep_input):
if (not pytorch_version_one_and_above):
inputs[0].data.set_()
else:
inputs[0].storage().resize_(0)
ctx.inputs = ([inputs] * num_bwd_passes)
ctx.outputs = ([detached_outputs] * num_bwd_passes)
return detached_outputs
def backward(ctx, *grad_outputs):
if (not torch.autograd._is_checkpoint_valid()):
raise RuntimeError('InvertibleCheckpointFunction is not compatible with .grad(), please use .backward() if possible')
if (len(ctx.outputs) == 0):
raise RuntimeError('Trying to perform backward on the InvertibleCheckpointFunction for more than {} times! Try raising `num_bwd_passes` by one.'.format(ctx.num_bwd_passes))
inputs = ctx.inputs.pop()
outputs = ctx.outputs.pop()
if (not ctx.keep_input):
rng_devices = []
if (ctx.preserve_rng_state and ctx.had_cuda_in_fwd):
rng_devices = ctx.fwd_gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state):
if ctx.preserve_rng_state:
torch.set_rng_state(ctx.fwd_cpu_state)
if ctx.had_cuda_in_fwd:
set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states)
with torch.no_grad():
inputs_inverted = ctx.fn_inverse(*(outputs + inputs[1:]))
if (not pytorch_version_one_and_above):
for element in outputs:
element.data.set_()
else:
for element in outputs:
element.storage().resize_(0)
if (not isinstance(inputs_inverted, tuple)):
inputs_inverted = (inputs_inverted,)
if pytorch_version_one_and_above:
for (element_original, element_inverted) in zip(inputs, inputs_inverted):
element_original.storage().resize_(int(np.prod(element_original.size())))
element_original.set_(element_inverted)
else:
for (element_original, element_inverted) in zip(inputs, inputs_inverted):
element_original.set_(element_inverted)
with torch.set_grad_enabled(True):
detached_inputs = []
for element in inputs:
if isinstance(element, torch.Tensor):
detached_inputs.append(element.detach())
else:
detached_inputs.append(element)
detached_inputs = tuple(detached_inputs)
for (det_input, requires_grad) in zip(detached_inputs, ctx.input_requires_grad):
det_input.requires_grad = requires_grad
temp_output = ctx.fn(*detached_inputs)
if (not isinstance(temp_output, tuple)):
temp_output = (temp_output,)
filtered_detached_inputs = tuple(filter((lambda x: x.requires_grad), detached_inputs))
gradients = torch.autograd.grad(outputs=temp_output, inputs=(filtered_detached_inputs + ctx.weights), grad_outputs=grad_outputs)
filtered_inputs = list(filter((lambda x: x.requires_grad), inputs))
input_gradients = []
i = 0
for rg in ctx.input_requires_grad:
if rg:
input_gradients.append(gradients[i])
i += 1
else:
input_gradients.append(None)
gradients = (tuple(input_gradients) + gradients[(- len(ctx.weights)):])
return ((None, None, None, None, None, None) + gradients) |
def main(args):
save_path_base = './reddit_data/Reddit_split_2017-11/split_csv/'
save_path_k_core = (((save_path_base + str(args.k_core)) + '_') + args.save_master_k_core)
G = nx.read_gpickle(save_path_k_core)
top_nodes_G = sorted(G.degree, key=(lambda x: x[1]), reverse=True)[args.skip_n:(101 + args.skip_n)]
top_nodes_G = [n for n in top_nodes_G if (n[0].split('_')[0] != 'U')]
sensitive_nodes = random.sample(top_nodes_G, args.num_sensitive)
(u_to_idx, sr_to_idx) = reddit_mappings(list(G.nodes()))
args.num_users = len(u_to_idx)
args.num_sr = len(sr_to_idx)
cutoff_constant = 0.9
reddit_check_edges(list(G.edges()))
train_cutoff_row = int(np.round((len(G.edges()) * cutoff_constant)))
users_cutoff_row = int(np.round((len(u_to_idx) * cutoff_constant)))
args.cutoff_row = train_cutoff_row
args.users_cutoff_row = users_cutoff_row
all_users = list(u_to_idx)
random.shuffle(all_users)
args.users_train = [u_to_idx[user] for user in all_users[:args.users_cutoff_row]]
args.users_test = [u_to_idx[user] for user in all_users[args.users_cutoff_row:]]
train_set = RedditDataset(list(G.edges())[:args.cutoff_row], u_to_idx, sr_to_idx)
test_set = RedditDataset(list(G.edges())[args.cutoff_row:], u_to_idx, sr_to_idx)
train_fairness_set = NodeClassification(args.users_train, args.prefetch_to_gpu)
test_fairness_set = NodeClassification(args.users_test, args.prefetch_to_gpu)
if args.filter_false_negs:
train_hash = set([train_set.get_mapping(r).numpy().tobytes() for r in train_set.dataset])
all_hash = train_hash.copy()
all_hash.update(set([test_set.get_mapping(r).numpy().tobytes() for r in test_set.dataset]))
else:
train_hash = None
all_hash = None
all_masks = list(map(list, itertools.product([0, 1], repeat=args.num_sensitive)))
if args.held_out_comp:
args.mask_cutoff_row = int(np.round((len(all_masks) * cutoff_constant)))
train_masks = all_masks[:args.mask_cutoff_row]
test_masks = all_masks[args.mask_cutoff_row:]
else:
train_masks = all_masks
print(('Training Set size %d' % len(train_set)))
print(('Test Set size %d' % len(test_set)))
if args.use_multi:
modelD = to_multi_gpu(RedditEncoder(args.num_users, args.num_sr, args.embed_dim, args.p))
else:
modelD = RedditEncoder(args.num_users, args.num_sr, args.embed_dim, args.p).to(args.device)
' Define Discriminators '
if args.use_attr:
(fairD_set, optimizer_fairD_set, filter_set) = ([], [], [])
for sens_node in sensitive_nodes:
D = RedditDiscriminator(G, args.embed_dim, sens_node[0], u_to_idx).to(args.device)
optimizer_fairD = optimizer(D.parameters(), 'adam', args.lr)
fairD_set.append(D)
optimizer_fairD_set.append(optimizer_fairD)
if (not args.sample_mask):
filter_set = None
else:
sr_params = []
for sens_node in sensitive_nodes:
sr_filter = AttributeFilter(args.embed_dim, attribute=sens_node[0]).to(args.device)
sr_params.append(sr_filter)
filter_set.append(sr_filter)
else:
(fairD_set, optimizer_fairD_set, filter_set) = ([None], None, None)
if args.debug:
ipdb.set_trace()
if (args.sample_mask and (not args.use_trained_filters)):
models = ([modelD] + sr_params)
optimizerD = optimizer(itertools.chain.from_iterable((m.parameters() for m in models)), 'adam', args.lr)
else:
optimizerD = optimizer(modelD.parameters(), 'adam', args.lr)
' Comet Logging '
experiment = Experiment(api_key=args.api_key, disabled=(not args.do_log), project_name=args.project_name, workspace=args.workspace)
experiment.set_name(args.namestr)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=8, pin_memory=True, collate_fn=collate_fn)
with experiment.train():
for epoch in tqdm(range(1, (args.num_epochs + 1))):
train_fair_reddit(train_loader, all_hash, epoch, args, modelD, optimizerD, fairD_set, optimizer_fairD_set, filter_set, train_masks, experiment)
if ((epoch % args.valid_freq) == 0):
test_reddit_nce(test_set, epoch, all_hash, args, modelD, experiment, filter_set, subsample=1)
if args.use_attr:
for (i, fairD) in enumerate(fairD_set):
if (filter_set is not None):
test_sensitive_sr(args, test_fairness_set, modelD, fairD, experiment, epoch, [filter_set[i]])
else:
test_sensitive_sr(args, test_fairness_set, modelD, fairD, experiment, epoch, filter_set)
constant = (len(fairD_set) - fairD_set.count(None))
if ((constant != 0) or args.test_new_disc):
if args.test_new_disc:
args.use_attr = True
' Training Fresh Discriminators'
args.freeze_encoder = True
freeze_model(modelD)
with experiment.test():
if args.use_attr:
if (args.sample_mask and args.held_out_comp):
train_compositional_reddit_classifier(args, modelD, G, sensitive_nodes, u_to_idx, train_fairness_set, test_fairness_set, experiment, test_masks, filter_set=filter_set)
elif (args.sample_mask and (not args.held_out_comp)):
train_compositional_reddit_classifier(args, modelD, G, sensitive_nodes, u_to_idx, train_fairness_set, test_fairness_set, experiment, all_masks, filter_set=filter_set)
else:
for sens_node in sensitive_nodes:
train_reddit_classifier(args, modelD, G, sens_node[0], u_to_idx, train_fairness_set, test_fairness_set, experiment=experiment, filter_set=filter_set)
experiment.end()
torch.cuda.empty_cache() |
def _chunk_minibatch(batch, num_batches):
(X, y) = batch
batch_size = (len(X) // num_batches)
for i in range(num_batches):
(yield (X[(i * batch_size):((i + 1) * batch_size)], y[(i * batch_size):((i + 1) * batch_size)])) |
class SGDW(Optimizer):
def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (momentum < 0.0):
raise ValueError('Invalid momentum value: {}'.format(momentum))
if (weight_decay < 0.0):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
if (nesterov and ((momentum <= 0) or (dampening != 0))):
raise ValueError('Nesterov momentum requires a momentum and zero dampening')
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if (p.grad is None):
continue
d_p = p.grad.data
old = torch.clone(p.data).detach()
if (momentum != 0):
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_((1 - dampening), d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_((- group['lr']), d_p)
if (weight_decay != 0):
p.data.add_(((- weight_decay) * group['lr']), old)
return loss |
def prettyprint(o):
if isinstance(o, types.GeneratorType):
return ('(generator) ' + str(list(o)))
else:
return str(o) |
def resolution_to_number(string):
try:
return (int(string.split('x')[0]) * int(string.split('x')[1]))
except Exception as e:
raise P1203StandaloneError('Wrong specification of resolution {string}: {e}'.format(**locals())) |
_ASSIGNERS.register_module()
class HungarianAssigner(BaseAssigner):
def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
self.cls_cost = build_match_cost(cls_cost)
self.reg_cost = build_match_cost(reg_cost)
self.iou_cost = build_match_cost(iou_cost)
def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-07):
assert (gt_bboxes_ignore is None), 'Only case when gt_bboxes_ignore is None is supported.'
(num_gts, num_bboxes) = (gt_bboxes.size(0), bbox_pred.size(0))
assigned_gt_inds = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long)
assigned_labels = bbox_pred.new_full((num_bboxes,), (- 1), dtype=torch.long)
if ((num_gts == 0) or (num_bboxes == 0)):
if (num_gts == 0):
assigned_gt_inds[:] = 0
return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels)
(img_h, img_w, _) = img_meta['img_shape']
factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0)
cls_cost = self.cls_cost(cls_pred, gt_labels)
normalize_gt_bboxes = (gt_bboxes / factor)
reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
bboxes = (bbox_cxcywh_to_xyxy(bbox_pred) * factor)
iou_cost = self.iou_cost(bboxes, gt_bboxes)
cost = ((cls_cost + reg_cost) + iou_cost)
cost = cost.detach().cpu()
if (linear_sum_assignment is None):
raise ImportError('Please run "pip install scipy" to install scipy first.')
(matched_row_inds, matched_col_inds) = linear_sum_assignment(cost)
matched_row_inds = torch.from_numpy(matched_row_inds).to(bbox_pred.device)
matched_col_inds = torch.from_numpy(matched_col_inds).to(bbox_pred.device)
assigned_gt_inds[:] = 0
assigned_gt_inds[matched_row_inds] = (matched_col_inds + 1)
assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
return AssignResult(num_gts, assigned_gt_inds, None, labels=assigned_labels) |
class HolonomicEncoder(Encoder):
def get_action(self, action):
assert (len(action) == 3), f'Expected an action of size 3 but received: {action}'
return action |
class ViTConfig(PretrainedConfig):
model_type = 'vit'
def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, encoder_stride=16, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.encoder_stride = encoder_stride |
class Shape(Layer):
def __init__(self, bigdl_type='float'):
super(Shape, self).__init__(None, bigdl_type) |
def drop_data(df):
df = df.drop(df[(df['Id'] == 0)].index)
df = df.drop(df[(df['Id'] == 1)].index)
return df |
def new_softmax(labels, logits):
flatten_labels = tf.reshape(labels, [(- 1)])
n_samples = tf.shape(flatten_labels)[0]
flatten_logits = tf.reshape(logits, shape=[n_samples, (- 1)])
f_logits = tf.exp(flatten_logits)
row_sums = tf.reduce_sum(f_logits, (- 1))
t2 = tf.expand_dims(flatten_labels, 1)
range = tf.expand_dims(tf.range(n_samples), 1)
ind = tf.concat([range, t2], 1)
res = tf.gather_nd(flatten_logits, ind)
return (((- res) + row_sums) - 1) |
_model
def dla34(pretrained=False, **kwargs):
model_kwargs = dict(levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], block=DlaBasic, **kwargs)
return _create_dla('dla34', pretrained, **model_kwargs) |
def _get_dataloader_by_mode(mode, subset, config):
is_train = (subset == 'train')
data_dir = config['paths']['data_dir']
if (mode == 'detector_translator'):
return ImagePairDataLoader(data_dir, subset, random_order=is_train, randomness=is_train)
elif (mode == 'motion_generator'):
model_config = config['model']
n_points = model_config['n_pts']
n_action = model_config['n_action']
return SequenceDataLoader(data_dir, subset, n_points=n_points, n_action=n_action, random_order=is_train, randomness=is_train)
else:
raise Exception(('unknown dataloader %s' % mode)) |
class MLP_model(nn.Module):
def __init__(self, args, InputNorm=False):
super(MLP_model, self).__init__()
in_channels = args.num_features
hidden_channels = args.MLP_hidden
out_channels = args.num_classes
num_layers = args.All_num_layers
dropout = args.dropout
Normalization = args.normalization
self.lins = nn.ModuleList()
self.normalizations = nn.ModuleList()
self.InputNorm = InputNorm
assert (Normalization in ['bn', 'ln', 'None'])
if (Normalization == 'bn'):
if (num_layers == 1):
if InputNorm:
self.normalizations.append(nn.BatchNorm1d(in_channels))
else:
self.normalizations.append(nn.Identity())
self.lins.append(nn.Linear(in_channels, out_channels))
else:
if InputNorm:
self.normalizations.append(nn.BatchNorm1d(in_channels))
else:
self.normalizations.append(nn.Identity())
self.lins.append(nn.Linear(in_channels, hidden_channels))
self.normalizations.append(nn.BatchNorm1d(hidden_channels))
for _ in range((num_layers - 2)):
self.lins.append(nn.Linear(hidden_channels, hidden_channels))
self.normalizations.append(nn.BatchNorm1d(hidden_channels))
self.lins.append(nn.Linear(hidden_channels, out_channels))
elif (Normalization == 'ln'):
if (num_layers == 1):
if InputNorm:
self.normalizations.append(nn.LayerNorm(in_channels))
else:
self.normalizations.append(nn.Identity())
self.lins.append(nn.Linear(in_channels, out_channels))
else:
if InputNorm:
self.normalizations.append(nn.LayerNorm(in_channels))
else:
self.normalizations.append(nn.Identity())
self.lins.append(nn.Linear(in_channels, hidden_channels))
self.normalizations.append(nn.LayerNorm(hidden_channels))
for _ in range((num_layers - 2)):
self.lins.append(nn.Linear(hidden_channels, hidden_channels))
self.normalizations.append(nn.LayerNorm(hidden_channels))
self.lins.append(nn.Linear(hidden_channels, out_channels))
elif (num_layers == 1):
self.normalizations.append(nn.Identity())
self.lins.append(nn.Linear(in_channels, out_channels))
else:
self.normalizations.append(nn.Identity())
self.lins.append(nn.Linear(in_channels, hidden_channels))
self.normalizations.append(nn.Identity())
for _ in range((num_layers - 2)):
self.lins.append(nn.Linear(hidden_channels, hidden_channels))
self.normalizations.append(nn.Identity())
self.lins.append(nn.Linear(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for lin in self.lins:
lin.reset_parameters()
for normalization in self.normalizations:
if (not (normalization.__class__.__name__ is 'Identity')):
normalization.reset_parameters()
def forward(self, data):
x = data.x
x = self.normalizations[0](x)
for (i, lin) in enumerate(self.lins[:(- 1)]):
x = lin(x)
x = F.relu(x, inplace=True)
x = self.normalizations[(i + 1)](x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[(- 1)](x)
return x |
class TFCLIPVisionModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def build_fake_yaml():
fake_yaml = "\n model:\n name: self_distillation\n framework: pytorch\n\n distillation:\n train:\n start_epoch: 0\n end_epoch: 3\n iteration: 10\n frequency: 1\n optimizer:\n SGD:\n learning_rate: 0.001\n momentum: 0.1\n nesterov: True\n weight_decay: 0.001\n criterion:\n SelfKnowledgeDistillationLoss:\n layer_mappings: [\n [['resblock.1.feature.output', 'resblock.deepst.feature.output'],\n ['resblock.2.feature.output','resblock.deepst.feature.output']],\n [['resblock.2.fc','resblock.deepst.fc'],\n ['resblock.3.fc','resblock.deepst.fc']],\n [['resblock.1.fc','resblock.deepst.fc'],\n ['resblock.2.fc','resblock.deepst.fc'],\n ['resblock.3.fc','resblock.deepst.fc']]\n ]\n temperature: 3.0\n loss_types: ['L2', 'KL', 'CE']\n loss_weights: [0.5, 0.05, 0.02]\n add_origin_loss: True\n dataloader:\n batch_size: 30\n dataset:\n dummy:\n shape: [128, 3, 224, 224]\n label: True\n evaluation:\n accuracy:\n metric:\n topk: 1\n dataloader:\n batch_size: 30\n dataset:\n dummy:\n shape: [128, 3, 224, 224]\n label: True\n "
with open('fake.yaml', 'w', encoding='utf-8') as f:
f.write(fake_yaml) |
def quad_double_pole_step(vrblvl=0):
if (vrblvl > 0):
print('in quad_double_pole_step ...')
phc = get_phcfun()
apar = pointer(c_int32(2))
bvrb = pointer(c_int32(0))
cstep = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> quad_double_pole_step calls phc', end='')
retval = phc(886, apar, bvrb, cstep, vrb)
if (vrblvl > 0):
print(', return value :', retval)
print('the step size :', cstep[0])
return cstep[0] |
_arg_scope
def bias_add(inputs, activation_fn=None, initializer=init_ops.zeros_initializer(), regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, data_format=DATA_FORMAT_NHWC, scope=None):
if (data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC)):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(scope, 'BiasAdd', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
dtype = inputs.dtype.base_dtype
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
if (inputs_rank is None):
raise ValueError('Dims of shape must be known but is None')
elif ((inputs_rank != 4) and (data_format == DATA_FORMAT_NCHW)):
raise ValueError('Data format NCHW only supports 4D Tensor')
axis = (1 if (data_format == DATA_FORMAT_NCHW) else (- 1))
num_features = inputs_shape[axis].value
if (num_features is None):
raise ValueError('`C` dimension must be known but is None')
biases_collections = utils.get_variable_collections(variables_collections, 'biases')
biases = variables.model_variable('biases', shape=[num_features], dtype=dtype, initializer=initializer, regularizer=regularizer, collections=biases_collections, trainable=trainable)
outputs = nn.bias_add(inputs, biases, data_format=data_format)
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) |
def checkin_newton_power_series(nbsym, lser, idx):
if (idx == 0):
okay = (nbsym == len(lser))
else:
okay = (nbsym == (len(lser) + 1))
if (not okay):
if (idx == 0):
dim = nbsym
else:
dim = (nbsym - 1)
print('Wrong length of list of leading terms, should be', (str(dim) + '.'))
return okay |
class Normalize(BaseWaveformTransform):
supports_multichannel = True
def __init__(self, apply_to: str='all', p: float=0.5):
super().__init__(p)
assert (apply_to in ('all', 'only_too_loud_sounds'))
self.apply_to = apply_to
def randomize_parameters(self, samples: NDArray[np.float32], sample_rate: int):
super().randomize_parameters(samples, sample_rate)
if self.parameters['should_apply']:
self.parameters['max_amplitude'] = get_max_abs_amplitude(samples)
def apply(self, samples: NDArray[np.float32], sample_rate: int):
if ((self.apply_to == 'only_too_loud_sounds') and (self.parameters['max_amplitude'] < 1.0)):
return samples
if (self.parameters['max_amplitude'] > 0):
return (samples / self.parameters['max_amplitude'])
else:
return samples |
def adjust_range(in_min, in_max, device, non_zero):
if (device in [DeviceType.HEXAGON.value, DeviceType.HTA.value]):
return adjust_range_for_hexagon(in_min, in_max)
out_max = max(0.0, in_max)
out_min = min(0.0, in_min)
if non_zero:
out_min = min(out_min, (in_min - ((out_max - in_min) / 254.0)))
scale = ((out_max - out_min) / 255.0)
eps = 1e-06
if ((out_min < (- eps)) and (out_max > eps)):
zero = ((- out_min) / scale)
zero_int = int(round(zero))
if ((abs((zero - zero_int)) > eps) and non_zero):
zero_int = int(math.ceil(zero))
elif (out_min > (- eps)):
zero_int = 0
else:
zero_int = 255
return (scale, zero_int, ((- zero_int) * scale), ((255 - zero_int) * scale)) |
.parametrize('ds_split', [0.2, 0.3, [train_test_split(np.arange(20), test_size=0.4, shuffle=True)], ShuffleSplit(n_splits=1)])
.skip('Deslib is not compatible with new python. Waiting for PR.')
def test_ds_split_parameter(ds_split: Any, df_iris: pd.DataFrame) -> None:
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
df_iris = df_iris.sample(n=len(df_iris))
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
ensemble_model = RandomForestClassifier()
dynamic_model = DynamicSelection(ensemble=ensemble_model, algorithm='METADES', ds_split=ds_split)
dynamic_model.fit(df_iris[X], df_iris[y]) |
class DecoderBlock(nn.Module):
def __init__(self, in_channels, n_filters):
super(DecoderBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, (in_channels // 4), 1)
self.norm1 = nn.BatchNorm2d((in_channels // 4))
self.relu1 = nonlinearity
self.deconv2 = nn.ConvTranspose2d((in_channels // 4), (in_channels // 4), 3, stride=2, padding=1, output_padding=1)
self.norm2 = nn.BatchNorm2d((in_channels // 4))
self.relu2 = nonlinearity
self.conv3 = nn.Conv2d((in_channels // 4), n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x |
def load_tf_weights_in_tapas(*args, **kwargs):
requires_backends(load_tf_weights_in_tapas, ['torch']) |
def num_frames(length, fsize, fshift):
pad = (fsize - fshift)
if ((length % fshift) == 0):
M = ((((length + (pad * 2)) - fsize) // fshift) + 1)
else:
M = ((((length + (pad * 2)) - fsize) // fshift) + 2)
return M |
_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if ((version is None) or (version < 2)):
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if (('sem_seg_head' in k) and (not k.startswith((prefix + 'predictor')))):
newk = k.replace(prefix, (prefix + 'pixel_decoder.'))
if (newk != k):
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if (not scratch):
logger.warning(f'Weight format of {self.__class__.__name__} have changed! Please upgrade your models. Applying automatic conversion now ...')
def __init__(self, input_shape: Dict[(str, ShapeSpec)], *, num_classes: int, pixel_decoder: nn.Module, loss_weight: float=1.0, ignore_value: int=(- 1), transformer_predictor: nn.Module, transformer_in_feature: str):
super().__init__()
input_shape = sorted(input_shape.items(), key=(lambda x: x[1].stride))
self.in_features = [k for (k, v) in input_shape]
feature_strides = [v.stride for (k, v) in input_shape]
feature_channels = [v.channels for (k, v) in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
def from_config(cls, cfg, input_shape: Dict[(str, ShapeSpec)]):
return {'input_shape': {k: v for (k, v) in input_shape.items() if (k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES)}, 'ignore_value': cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, 'num_classes': cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES, 'pixel_decoder': build_pixel_decoder(cfg, input_shape), 'loss_weight': cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT, 'transformer_in_feature': cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE, 'transformer_predictor': TransformerPredictor(cfg, (cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM if (cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == 'transformer_encoder') else input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels), mask_classification=True)}
def forward(self, features):
return self.layers(features)
def layers(self, features):
(mask_features, transformer_encoder_features) = self.pixel_decoder.forward_features(features)
if (self.transformer_in_feature == 'transformer_encoder'):
assert (transformer_encoder_features is not None), 'Please use the TransformerEncoderPixelDecoder.'
predictions = self.predictor(transformer_encoder_features, mask_features)
else:
predictions = self.predictor(features[self.transformer_in_feature], mask_features)
return predictions |
def construct_function_from_graph_def(func, graph_def, frozen_func=None):
if (frozen_func is None):
frozen_func = func
for f in graph_def.library.function:
while context.context().has_function(f.signature.name):
context.context().remove_function(f.signature.name)
captures = {c[1].name.split(':')[0]: c[0] for c in frozen_func.graph.captures}
new_func = wrap_function.function_from_graph_def(graph_def, [tensor.name for tensor in frozen_func.inputs], [tensor.name for tensor in frozen_func.outputs], captures)
new_func.graph.structured_outputs = nest.pack_sequence_as(func.graph.structured_outputs, new_func.graph.structured_outputs)
new_func.graph.structured_input_signature = func.structured_input_signature
return new_func |
def generate_forecaster(args):
input_feature_num = (321 if (args.dataset == 'tsinghua_electricity') else 1)
output_feature_num = (321 if (args.dataset == 'tsinghua_electricity') else 1)
metrics = args.metrics
freq = ('h' if (args.dataset == 'tsinghua_electricity') else 't')
if (args.model == 'lstm'):
if (args.framework == 'torch'):
from bigdl.chronos.forecaster import LSTMForecaster as LSTMForecaster_torch
return LSTMForecaster_torch(past_seq_len=args.lookback, input_feature_num=input_feature_num, output_feature_num=output_feature_num, metrics=metrics)
elif (args.framework == 'tensorflow'):
from bigdl.chronos.forecaster.tf import LSTMForecaster as LSTMForecaster_tf
return LSTMForecaster_tf(past_seq_len=args.lookback, input_feature_num=input_feature_num, output_feature_num=output_feature_num, metrics=metrics)
elif (args.model == 'tcn'):
if (args.framework == 'torch'):
from bigdl.chronos.forecaster import TCNForecaster as TCNForecaster_torch
return TCNForecaster_torch(past_seq_len=args.lookback, future_seq_len=args.horizon, input_feature_num=input_feature_num, output_feature_num=output_feature_num, normalization=args.normalization, decomposition_kernel_size=0, metrics=metrics)
elif (args.framework == 'tensorflow'):
from bigdl.chronos.forecaster.tf import TCNForecaster as TCNForecaster_tf
return TCNForecaster_tf(past_seq_len=args.lookback, future_seq_len=args.horizon, input_feature_num=input_feature_num, output_feature_num=output_feature_num, metrics=metrics)
elif (args.model == 'seq2seq'):
if (args.framework == 'torch'):
from bigdl.chronos.forecaster import Seq2SeqForecaster as Seq2SeqForecaster_torch
return Seq2SeqForecaster_torch(past_seq_len=args.lookback, future_seq_len=args.horizon, input_feature_num=input_feature_num, output_feature_num=output_feature_num, metrics=metrics)
elif (args.framework == 'tensorflow'):
from bigdl.chronos.forecaster.tf import Seq2SeqForecaster as Seq2SeqForecaster_tf
return Seq2SeqForecaster_tf(past_seq_len=args.lookback, future_seq_len=args.horizon, input_feature_num=input_feature_num, output_feature_num=output_feature_num, metrics=metrics)
elif (args.model == 'autoformer'):
if (args.framework == 'torch'):
from bigdl.chronos.forecaster import AutoformerForecaster as AutoformerForecaster_torch
return AutoformerForecaster_torch(past_seq_len=args.lookback, future_seq_len=args.horizon, input_feature_num=input_feature_num, output_feature_num=output_feature_num, label_len=int((args.lookback / 2)), freq=freq, metrics=metrics)
else:
invalidInputError((args.framework == 'torch'), f'Autoformer does not support tensorflow backend now.')
elif (args.model == 'nbeats'):
if (args.framework == 'torch'):
from bigdl.chronos.forecaster import NBeatsForecaster as NBeatsForecaster_torch
return NBeatsForecaster_torch(past_seq_len=args.lookback, future_seq_len=args.horizon, metrics=metrics)
else:
invalidInputError((args.framework == 'torch'), f'NBeats does not support tensorflow backend now.') |
def get_elem_value(elem, name):
for child in elem:
if (child.attrib.get('name') != name):
continue
if (child.tag == 'string'):
return child.attrib.get('value')
if (child.tag == 'boolean'):
return (child.attrib.get('value') == 'true')
if (child.tag == 'list'):
return [nested_child.attrib.get('value') for nested_child in child]
raise ('Cannot recognize tag: ' + child.tag)
return None |
def get_num_layer_stage_wise(var_name, num_max_layer):
if (var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed')):
return 0
elif var_name.startswith('backbone.downsample_layers'):
return 0
elif var_name.startswith('backbone.stages'):
stage_id = int(var_name.split('.')[2])
return (stage_id + 1)
else:
return (num_max_layer - 1) |
def resnet50_fc512_efdmix123_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(num_classes=num_classes, loss=loss, block=Bottleneck, layers=[3, 4, 6, 3], last_stride=1, fc_dims=[512], dropout_p=None, efdmix_layers=['layer1', 'layer2', 'layer3'], efdmix_alpha=0.1, **kwargs)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model |
class AgentNetworkException(AgentClientException):
def __init__(self, detail: Union[(str, None)]=None) -> None:
super().__init__('agent_network', detail) |
def test_write_mnist(orca_context_fixture, use_api=False):
sc = orca_context_fixture
temp_dir = tempfile.mkdtemp()
try:
train_image_file = os.path.join(temp_dir, 'train-images')
train_label_file = os.path.join(temp_dir, 'train-labels')
output_path = os.path.join(temp_dir, 'output_dataset')
images = np.array([([i] * 16) for i in range(20)]).reshape((20, 4, 4)).astype(np.uint8)
labels = np.array(list(range(20))).reshape((20,)).astype(np.uint8)
_images_to_mnist_file(images, train_image_file)
_labels_to_mnist_file(labels, train_label_file)
if use_api:
write_parquet('mnist', ('file://' + output_path), image_file=train_image_file, label_file=train_label_file)
else:
write_mnist(image_file=train_image_file, label_file=train_label_file, output_path=('file://' + output_path))
(data, schema) = ParquetDataset._read_as_dict_rdd(('file://' + output_path))
data = data.sortBy((lambda x: x['label'])).collect()
images_load = np.reshape(np.stack([d['image'] for d in data]), ((- 1), 4, 4))
labels_load = np.stack([d['label'] for d in data])
assert np.all((images_load == images))
assert np.all((labels_load == labels_load))
finally:
shutil.rmtree(temp_dir) |
def proc_time_emb(hist_t, cur_t):
hist_t = [((cur_t - i) + 1) for i in hist_t]
hist_t = [np.sum((i >= gap)) for i in hist_t]
return hist_t |
def insert_topics(conn, topics):
sql = 'insert into topics values(null,%s,0)'
cur = conn.cursor()
cur.executemany(sql, topics)
cur.close()
conn.commit() |
def add_chain_recipe_opts(args):
_add_simple_arg(args, 'stage', 0, int)
_add_simple_arg(args, 'train-stage', 0, int)
_add_simple_arg(args, 'decode_nj', 30, int)
_add_simple_arg(args, 'train-set', 'train_clean_5', str)
_add_simple_arg(args, 'test-sets', 'dev_clean_2', str)
_add_simple_arg(args, 'graph-dir', 'data/lang/graph', str)
_add_simple_arg(args, 'gmm', 'tri3b', str)
_add_simple_arg(args, 'srand', 0, int)
_add_simple_arg(args, 'nnet3-affix', '', str)
_add_simple_arg(args, 'affix', '', str)
_add_simple_arg(args, 'tree-affix', '', str)
_add_simple_arg(args, 'get-egs-stage', (- 10), int)
_add_simple_arg(args, 'num-epochs', 4)
_add_simple_arg(args, 'frames-per-iter', 1200000)
_add_simple_arg(args, 'chunk-width', '140')
_add_simple_arg(args, 'xent-regularize', 0.01, float)
_add_simple_arg(args, 'frame-subsampling-factor', 3, int)
_add_simple_arg(args, 'egs_extra_left_context', 5)
_add_simple_arg(args, 'egs_extra_right_context', 5)
_add_simple_arg(args, 'exp', 'exp')
_add_simple_arg(args, 'data', 'data')
_add_simple_arg(args, 'lr_initial', 0.001)
_add_simple_arg(args, 'lr_final', 0.0001)
_add_simple_arg(args, 'num_jobs_initial', 2)
_add_simple_arg(args, 'num_jobs_final', 8)
_add_simple_arg(args, 'l2_regularize', 0.0001)
_add_simple_arg(args, 'leaky_hmm_coefficient', 0.1) |
def create_voxel_off(path):
voxel_path = (path + '/voxelization_{}.npy'.format(res))
off_path = (path + '/voxelization_{}.off'.format(res))
if unpackbits:
occ = np.unpackbits(np.load(voxel_path))
voxels = np.reshape(occ, ((res,) * 3))
else:
voxels = np.reshape(np.load(voxel_path)['occupancies'], ((res,) * 3))
loc = ((((min + max) / 2),) * 3)
scale = (max - min)
VoxelGrid(voxels, loc, scale).to_mesh().export(off_path)
print('Finished: {}'.format(path)) |
def associated_legendre_polynomials(k, zero_m_only=True):
z = sym.symbols('z')
P_l_m = [([0] * (j + 1)) for j in range(k)]
P_l_m[0][0] = 1
if (k > 0):
P_l_m[1][0] = z
for j in range(2, k):
P_l_m[j][0] = sym.simplify(((((((2 * j) - 1) * z) * P_l_m[(j - 1)][0]) - ((j - 1) * P_l_m[(j - 2)][0])) / j))
if (not zero_m_only):
for i in range(1, k):
P_l_m[i][i] = sym.simplify(((1 - (2 * i)) * P_l_m[(i - 1)][(i - 1)]))
if ((i + 1) < k):
P_l_m[(i + 1)][i] = sym.simplify(((((2 * i) + 1) * z) * P_l_m[i][i]))
for j in range((i + 2), k):
P_l_m[j][i] = sym.simplify(((((((2 * j) - 1) * z) * P_l_m[(j - 1)][i]) - (((i + j) - 1) * P_l_m[(j - 2)][i])) / (j - i)))
return P_l_m |
def train_AdaRNN(args, model, optimizer, train_loader_list, epoch, dist_old=None, weight_mat=None):
model.train()
criterion = nn.MSELoss()
criterion_1 = nn.L1Loss()
loss_all = []
loss_1_all = []
dist_mat = torch.zeros(args.num_layers, args.len_seq).cuda()
len_loader = np.inf
for loader in train_loader_list:
if (len(loader) < len_loader):
len_loader = len(loader)
for data_all in tqdm(zip(*train_loader_list), total=len_loader):
optimizer.zero_grad()
list_feat = []
list_label = []
for data in data_all:
(feature, label, label_reg) = (data[0].cuda().float(), data[1].cuda().long(), data[2].cuda().float())
list_feat.append(feature)
list_label.append(label_reg)
flag = False
index = get_index((len(data_all) - 1))
for temp_index in index:
s1 = temp_index[0]
s2 = temp_index[1]
if (list_feat[s1].shape[0] != list_feat[s2].shape[0]):
flag = True
break
if flag:
continue
total_loss = torch.zeros(1).cuda()
for i in range(len(index)):
feature_s = list_feat[index[i][0]]
feature_t = list_feat[index[i][1]]
label_reg_s = list_label[index[i][0]]
label_reg_t = list_label[index[i][1]]
feature_all = torch.cat((feature_s, feature_t), 0)
if (epoch < args.pre_epoch):
(pred_all, loss_transfer, out_weight_list) = model.forward_pre_train(feature_all, len_win=args.len_win)
else:
(pred_all, loss_transfer, dist, weight_mat) = model.forward_Boosting(feature_all, weight_mat)
dist_mat = (dist_mat + dist)
pred_s = pred_all[0:feature_s.size(0)]
pred_t = pred_all[feature_s.size(0):]
loss_s = criterion(pred_s, label_reg_s)
loss_t = criterion(pred_t, label_reg_t)
loss_l1 = criterion_1(pred_s, label_reg_s)
total_loss = (((total_loss + loss_s) + loss_t) + (args.dw * loss_transfer))
loss_all.append([total_loss.item(), (loss_s + loss_t).item(), loss_transfer.item()])
loss_1_all.append(loss_l1.item())
optimizer.zero_grad()
total_loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), 3.0)
optimizer.step()
loss = np.array(loss_all).mean(axis=0)
loss_l1 = np.array(loss_1_all).mean()
if (epoch >= args.pre_epoch):
if (epoch > args.pre_epoch):
weight_mat = model.update_weight_Boosting(weight_mat, dist_old, dist_mat)
return (loss, loss_l1, weight_mat, dist_mat)
else:
weight_mat = transform_type(out_weight_list)
return (loss, loss_l1, weight_mat, None) |
.parametrize('klass', (DummyVecEnv, ShmemVecEnv, SubprocVecEnv))
.parametrize('num_envs', (1, 4))
.parametrize('video_length', (10, 100))
.parametrize('video_interval', (1, 50))
def test_video_recorder(klass, num_envs, video_length, video_interval):
def make_fn():
env = gym.make('PongNoFrameskip-v4')
return env
fns = [make_fn for _ in range(num_envs)]
env = klass(fns)
with tempfile.TemporaryDirectory() as video_path:
env = VecVideoRecorder(env, video_path, record_video_trigger=(lambda x: ((x % video_interval) == 0)), video_length=video_length)
env.reset()
for _ in range(((video_interval + video_length) + 1)):
env.step(([0] * num_envs))
env.close()
recorded_video = glob.glob(os.path.join(video_path, '*.mp4'))
assert (len(recorded_video) == 2)
assert all(((os.stat(p).st_size != 0) for p in recorded_video)) |
class DataManager(object):
def __init__(self, dataset_name, shuffle, seed, init_cls, increment, args=None):
self.args = args
self.dataset_name = dataset_name
self._setup_data(dataset_name, shuffle, seed)
assert (init_cls <= len(self._class_order)), 'No enough classes.'
self._increments = [init_cls]
while ((sum(self._increments) + increment) < len(self._class_order)):
self._increments.append(increment)
offset = (len(self._class_order) - sum(self._increments))
if (offset > 0):
self._increments.append(offset)
self.attack = [transforms.ToTensor()]
def nb_tasks(self):
return len(self._increments)
def get_task_size(self, task):
return self._increments[task]
def get_dataset(self, indices, source, mode, appendent=None, ret_data=False):
if (source == 'train'):
(x, y) = (self._train_data, self._train_targets)
elif (source == 'test'):
(x, y) = (self._test_data, self._test_targets)
else:
raise ValueError('Unknown data source {}.'.format(source))
if (mode == 'train'):
trsf = transforms.Compose([*self._train_trsf, *self._common_trsf])
elif (mode == 'flip'):
trsf = transforms.Compose([*self._test_trsf, transforms.RandomHorizontalFlip(p=1.0), *self._common_trsf])
elif (mode == 'test'):
trsf = transforms.Compose([*self._test_trsf, *self._common_trsf])
elif (mode == 'attack'):
trsf = transforms.Compose([*self._test_trsf, *self.attack])
else:
raise ValueError('Unknown mode {}.'.format(mode))
(data, targets) = ([], [])
for idx in indices:
(class_data, class_targets) = self._select(x, y, low_range=idx, high_range=(idx + 1))
data.append(class_data)
targets.append(class_targets)
if ((appendent is not None) and (len(appendent) != 0)):
(appendent_data, appendent_targets) = appendent
data.append(appendent_data)
targets.append(appendent_targets)
(data, targets) = (np.concatenate(data), np.concatenate(targets))
if ret_data:
return (data, targets, DummyDataset(data, targets, trsf, self.use_path))
else:
return DummyDataset(data, targets, trsf, self.use_path)
def get_anchor_dataset(self, mode, appendent=None, ret_data=False):
if (mode == 'train'):
trsf = transforms.Compose([*self._train_trsf, *self._common_trsf])
elif (mode == 'flip'):
trsf = transforms.Compose([*self._test_trsf, transforms.RandomHorizontalFlip(p=1.0), *self._common_trsf])
elif (mode == 'test'):
trsf = transforms.Compose([*self._test_trsf, *self._common_trsf])
else:
raise ValueError('Unknown mode {}.'.format(mode))
(data, targets) = ([], [])
if ((appendent is not None) and (len(appendent) != 0)):
(appendent_data, appendent_targets) = appendent
data.append(appendent_data)
targets.append(appendent_targets)
(data, targets) = (np.concatenate(data), np.concatenate(targets))
if ret_data:
return (data, targets, DummyDataset(data, targets, trsf, self.use_path))
else:
return DummyDataset(data, targets, trsf, self.use_path)
def get_dataset_with_split(self, indices, source, mode, appendent=None, val_samples_per_class=0):
if (source == 'train'):
(x, y) = (self._train_data, self._train_targets)
elif (source == 'test'):
(x, y) = (self._test_data, self._test_targets)
else:
raise ValueError('Unknown data source {}.'.format(source))
if (mode == 'train'):
trsf = transforms.Compose([*self._train_trsf, *self._common_trsf])
elif (mode == 'test'):
trsf = transforms.Compose([*self._test_trsf, *self._common_trsf])
else:
raise ValueError('Unknown mode {}.'.format(mode))
(train_data, train_targets) = ([], [])
(val_data, val_targets) = ([], [])
for idx in indices:
(class_data, class_targets) = self._select(x, y, low_range=idx, high_range=(idx + 1))
val_indx = np.random.choice(len(class_data), val_samples_per_class, replace=False)
train_indx = list((set(np.arange(len(class_data))) - set(val_indx)))
val_data.append(class_data[val_indx])
val_targets.append(class_targets[val_indx])
train_data.append(class_data[train_indx])
train_targets.append(class_targets[train_indx])
if (appendent is not None):
(appendent_data, appendent_targets) = appendent
for idx in range(0, (int(np.max(appendent_targets)) + 1)):
(append_data, append_targets) = self._select(appendent_data, appendent_targets, low_range=idx, high_range=(idx + 1))
val_indx = np.random.choice(len(append_data), val_samples_per_class, replace=False)
train_indx = list((set(np.arange(len(append_data))) - set(val_indx)))
val_data.append(append_data[val_indx])
val_targets.append(append_targets[val_indx])
train_data.append(append_data[train_indx])
train_targets.append(append_targets[train_indx])
(train_data, train_targets) = (np.concatenate(train_data), np.concatenate(train_targets))
(val_data, val_targets) = (np.concatenate(val_data), np.concatenate(val_targets))
return (DummyDataset(train_data, train_targets, trsf, self.use_path), DummyDataset(val_data, val_targets, trsf, self.use_path))
def _setup_data(self, dataset_name, shuffle, seed):
idata = _get_idata(dataset_name, self.args)
idata.download_data()
(self._train_data, self._train_targets) = (idata.train_data, idata.train_targets)
(self._test_data, self._test_targets) = (idata.test_data, idata.test_targets)
self.use_path = idata.use_path
self._train_trsf = idata.train_trsf
self._test_trsf = idata.test_trsf
self._common_trsf = idata.common_trsf
order = [i for i in range(len(np.unique(self._train_targets)))]
if shuffle:
np.random.seed(seed)
order = np.random.permutation(len(order)).tolist()
else:
order = idata.class_order
self._class_order = order
logging.info(self._class_order)
self._train_targets = _map_new_class_index(self._train_targets, self._class_order)
self._test_targets = _map_new_class_index(self._test_targets, self._class_order)
def _select(self, x, y, low_range, high_range):
idxes = np.where(np.logical_and((y >= low_range), (y < high_range)))[0]
return (x[idxes], y[idxes]) |
def build_vis_if_needed():
script_path = os.path.dirname(os.path.abspath(__file__))
js_bundle_dest = os.path.join(script_path, 'interpret', 'root', 'bld', 'lib', 'interpret-inline.js')
if os.path.exists(js_bundle_dest):
return
js_path = os.path.join(script_path, '..', '..', 'shared', 'vis')
subprocess.run('npm install && npm run build-prod', cwd=js_path, shell=True)
js_bundle_src = os.path.join(js_path, 'dist', 'interpret-inline.js')
os.makedirs(os.path.dirname(js_bundle_dest), exist_ok=True)
shutil.copyfile(js_bundle_src, js_bundle_dest)
js_bundle_src_lic = os.path.join(js_path, 'dist', 'interpret-inline.js.LICENSE.txt')
js_bundle_dest_lic = os.path.join(script_path, 'interpret', 'root', 'bld', 'lib', 'interpret-inline.js.LICENSE.txt')
shutil.copyfile(js_bundle_src_lic, js_bundle_dest_lic) |
def predFlowCoarse(corrKernel21, NetFlowCoarse, grid, up8X=True):
flowCoarse = NetFlowCoarse(corrKernel21, up8X)
(b, _, w, h) = flowCoarse.size()
flowGrad = (flowCoarse.narrow(2, 1, (w - 1)).narrow(3, 1, (h - 1)) - flowCoarse.narrow(2, 0, (w - 1)).narrow(3, 0, (h - 1)))
flowGrad = torch.norm(flowGrad, dim=1, keepdim=True)
flowCoarse = flowCoarse.permute(0, 2, 3, 1)
flowCoarse = torch.clamp((flowCoarse + grid), min=(- 1), max=1)
return (flowGrad, flowCoarse) |
def load_model_for_inference(weights_path: str, quantization: Optional[int]=None, lora_weights_name_or_path: Optional[str]=None, torch_dtype: Optional[str]=None, force_auto_device_map: bool=False, trust_remote_code: bool=False) -> Tuple[(PreTrainedModel, PreTrainedTokenizerBase)]:
if (type(quantization) == str):
quantization = int(quantization)
assert ((quantization is None) or (quantization in [4, 8])), f'Quantization must be 4 or 8, or None for FP32/FP16 training. You passed: {quantization}'
print(f'Loading model from {weights_path}')
config = AutoConfig.from_pretrained(weights_path, trust_remote_code=trust_remote_code)
torch_dtype = (torch_dtype if (torch_dtype in ['auto', None]) else getattr(torch, torch_dtype))
if ('small100' in weights_path):
import transformers
if (transformers.__version__ > '4.34.0'):
raise ValueError('Small100 tokenizer is not supported in transformers > 4.34.0. Please use transformers <= 4.34.0 if you want to use small100')
print(f'Loading custom small100 tokenizer for utils.tokenization_small100')
from utils.tokenization_small100 import SMALL100Tokenizer as AutoTokenizer
else:
from transformers import AutoTokenizer
tokenizer: PreTrainedTokenizerBase = AutoTokenizer.from_pretrained(weights_path, add_eos_token=True, trust_remote_code=trust_remote_code)
if (tokenizer.pad_token_id is None):
if ('<|padding|>' in tokenizer.get_vocab()):
tokenizer.add_special_tokens({'pad_token': '<|padding|>'})
elif (tokenizer.unk_token is not None):
print('Tokenizer does not have a pad token, we will use the unk token as pad token.')
tokenizer.pad_token_id = tokenizer.unk_token_id
else:
print('Tokenizer does not have a pad token. We will use the eos token as pad token.')
tokenizer.pad_token_id = tokenizer.eos_token_id
quant_args = {}
if (quantization is not None):
quant_args = ({'load_in_4bit': True} if (quantization == 4) else {'load_in_8bit': True})
if (quantization == 4):
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4', bnb_4bit_compute_dtype=(torch.bfloat16 if (torch_dtype in ['auto', None]) else torch_dtype))
else:
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
print(f'Bits and Bytes config: {json.dumps(bnb_config.to_dict(), indent=4, ensure_ascii=False)}')
else:
print(f'Loading model with dtype: {torch_dtype}')
bnb_config = None
if (config.model_type in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES):
print(f'Model {weights_path} is a encoder-decoder model. We will load it as a Seq2SeqLM model.')
model: PreTrainedModel = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path=weights_path, device_map=('auto' if force_auto_device_map else None), torch_dtype=torch_dtype, quantization_config=bnb_config, trust_remote_code=trust_remote_code, **quant_args)
elif (config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES):
print(f'Model {weights_path} is an encoder-only model. We will load it as a CausalLM model.')
model: PreTrainedModel = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=weights_path, device_map=('auto' if force_auto_device_map else None), torch_dtype=torch_dtype, trust_remote_code=trust_remote_code, quantization_config=bnb_config, **quant_args)
tokenizer.padding_side = 'left'
else:
raise ValueError(f'''Model {weights_path} of type {config.model_type} is not supported by EasyTranslate.Supported models are:
Seq2SeqLM: {MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES}
CausalLM: {MODEL_FOR_CAUSAL_LM_MAPPING_NAMES}
''')
if lora_weights_name_or_path:
from peft import PeftModel
print(f'Loading pretrained LORA weights from {lora_weights_name_or_path}')
model = PeftModel.from_pretrained(model, lora_weights_name_or_path)
if (quantization is None):
model = model.merge_and_unload()
return (model, tokenizer) |
class Visualizer():
def __init__(self, opt):
self.display_id = opt.display_id
self.use_html = (opt.is_train and (not opt.no_html))
self.win_size = opt.display_winsize
self.name = opt.exp_name
self.log_path = os.path.join(opt.expr_dir, 'train_log.txt')
if (self.display_id > 0):
import visdom
self.vis = visdom.Visdom(port=opt.display_port, env=opt.exp_name)
if self.use_html:
self.web_dir = os.path.join(opt.expr_dir, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print(('create web directory %s...' % self.web_dir))
util.mkdirs([self.web_dir, self.img_dir])
def display_current_results(self, visuals, epoch, ncols=2, save_result=False, image_format='jpg'):
if (self.display_id > 0):
title = self.name
nrows = int(math.ceil((len(visuals.items()) / float(ncols))))
images = []
idx = 0
for (label, image_numpy) in visuals.items():
title += (' | ' if ((idx % nrows) == 0) else ', ')
title += label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if ((len(visuals.items()) % ncols) != 0):
white_image = (np.ones_like(image_numpy.transpose([2, 0, 1])) * 255)
images.append(white_image)
self.vis.images(images, nrow=nrows, win=(self.display_id + 1), opts=dict(title=title))
if (self.use_html and save_result):
for (label, image_numpy) in visuals.items():
img_path = os.path.join(self.img_dir, ('epoch%.3d_%s.%s' % (epoch, label, image_format)))
util.save_image(image_numpy, img_path)
webpage = html.HTML(self.web_dir, ('Experiment name = %s' % self.name), reflesh=1)
for n in range(epoch, 0, (- 1)):
webpage.add_header(('epoch [%d]' % n))
ims = []
txts = []
links = []
for (label, image_numpy) in visuals.items():
img_path = ('epoch%.3d_%s.%s' % (n, label, image_format))
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
if (not hasattr(self, 'plot_data')):
self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data['X'].append((epoch + counter_ratio))
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
self.vis.line(X=np.stack(([np.array(self.plot_data['X'])] * len(self.plot_data['legend'])), 1), Y=np.array(self.plot_data['Y']), opts={'title': (self.name + ' loss over time'), 'legend': self.plot_data['legend'], 'xlabel': 'epoch', 'ylabel': 'loss'}, win=self.display_id)
def print_current_errors(self, epoch, i, errors, t):
message = ('(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t))
for (k, v) in errors.items():
message += (', %s: %.3f' % (k, v))
print(message)
with open(self.log_path, 'a') as log_file:
log_file.write((message + '\n'))
def save_images_old(self, webpage, visuals, image_path, short=False):
image_dir = webpage.get_image_dir()
if short:
short_path = ntpath.basename(image_path)
name = os.path.splitext(short_path)[0]
else:
name = image_path
webpage.add_header(name)
ims = []
txts = []
links = []
for (label, image_numpy) in visuals.items():
image_name = ('%s_%s.jpg' % (name, label))
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size) |
def forward_model(s, parallelization, ncores=None):
params = {}
model = dd.Model(params)
if parallelization:
simul_obs = model.run(s, parallelization, ncores)
else:
simul_obs = model.run(s, parallelization)
return simul_obs |
class GANImageBuffer():
def __init__(self, buffer_size, buffer_ratio=0.5):
self.buffer_size = buffer_size
if (self.buffer_size > 0):
self.img_num = 0
self.image_buffer = []
self.buffer_ratio = buffer_ratio
def query(self, images):
if (self.buffer_size == 0):
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if (self.img_num < self.buffer_size):
self.img_num = (self.img_num + 1)
self.image_buffer.append(image)
return_images.append(image)
else:
use_buffer = (np.random.random() < self.buffer_ratio)
if use_buffer:
random_id = np.random.randint(0, self.buffer_size)
image_tmp = self.image_buffer[random_id].clone()
self.image_buffer[random_id] = image
return_images.append(image_tmp)
else:
return_images.append(image)
return_images = torch.cat(return_images, 0)
return return_images |
def connect_addon(name: str='zpy_addon', addon_dir: Union[(Path, str)]='$BLENDERADDONS') -> None:
log.debug(f'Connecting Addon {name}.')
path = f'$BLENDERADDONS/{name}/__init__.py'
path = zpy.files.verify_path(path, make=False)
bpy.ops.preferences.addon_install(filepath=str(path))
bpy.ops.preferences.addon_enable(module=name) |
def is_pt_flax_cross_test(test_case):
if ((not _run_pt_flax_cross_tests) or (not is_torch_available()) or (not is_flax_available())):
return unittest.skip('test is PT+FLAX test')(test_case)
else:
try:
import pytest
except ImportError:
return test_case
else:
return pytest.mark.is_pt_flax_cross_test()(test_case) |
def register_algo(name):
def decorator(algo_func):
algos_mapping[name] = algo_func
return algo_func
return decorator |
class TestDARN(RWSLayerTest, unittest.TestCase):
def setUp(self):
self.n_samples = 10
self.layer = DARN(n_X=16, n_Y=8)
self.layer.setup() |
def filter_backends(backends, filters=None, **kwargs):
def _match_all(obj, criteria):
return all(((getattr(obj, key_, None) == value_) for (key_, value_) in criteria.items()))
configuration_filters = {}
status_filters = {}
for (key, value) in kwargs.items():
if all(((key in backend.configuration()) for backend in backends)):
configuration_filters[key] = value
else:
status_filters[key] = value
if configuration_filters:
backends = [b for b in backends if _match_all(b.configuration(), configuration_filters)]
if status_filters:
backends = [b for b in backends if _match_all(b.status(), status_filters)]
backends = list(filter(filters, backends))
return backends |
class TargetPassThroughTransformer(PassThroughTransformer):
def __init__(self):
super().__init__()
def transform(self, X: Optional[DataLike]=None, y: Optional[DataLike]=None) -> Optional[DataLike]:
return y
def fit_transform(self, X: Optional[DataLike]=None, y: Optional[DataLike]=None) -> Optional[DataLike]:
self.fit(X, y)
return self.transform(X, y) |
class ConfigFromDict(object):
def __init__(self, attr_dict):
for (k, v) in attr_dict.items():
setattr(self, k, v) |
class TrainEvaluator(AbstractEvaluator):
def __init__(self, backend: Backend, queue: multiprocessing.Queue, metric: Scorer, port: Optional[int], configuration: Optional[Union[(int, Configuration)]]=None, scoring_functions: Optional[List[Scorer]]=None, seed: int=1, output_y_hat_optimization: bool=True, resampling_strategy: Optional[Union[(str, BaseCrossValidator, _RepeatedSplits, BaseShuffleSplit)]]=None, resampling_strategy_args: Optional[Dict[(str, Optional[Union[(float, int, str)]])]]=None, num_run: Optional[int]=None, budget: Optional[float]=None, budget_type: Optional[str]=None, keep_models: bool=False, include: Optional[List[str]]=None, exclude: Optional[List[str]]=None, disable_file_output: bool=False, init_params: Optional[Dict[(str, Any)]]=None):
super().__init__(backend=backend, queue=queue, port=port, configuration=configuration, metric=metric, scoring_functions=scoring_functions, seed=seed, output_y_hat_optimization=output_y_hat_optimization, num_run=num_run, include=include, exclude=exclude, disable_file_output=disable_file_output, init_params=init_params, budget=budget, budget_type=budget_type)
self.resampling_strategy = resampling_strategy
if (resampling_strategy_args is None):
self.resampling_strategy_args = {}
else:
self.resampling_strategy_args = resampling_strategy_args
self.splitter = self.get_splitter(self.datamanager)
self.num_cv_folds = self.splitter.get_n_splits(groups=self.resampling_strategy_args.get('groups'))
self.X_train = self.datamanager.data['X_train']
self.Y_train = self.datamanager.data['Y_train']
self.Y_optimization: Optional[SUPPORTED_TARGET_TYPES] = None
self.Y_targets = ([None] * self.num_cv_folds)
self.Y_train_targets = (np.ones(self.Y_train.shape) * np.NaN)
self.models = ([None] * self.num_cv_folds)
self.indices: List[Optional[Tuple[(List[int], List[int])]]] = ([None] * self.num_cv_folds)
self.partial = True
self.keep_models = keep_models
def fit_predict_and_loss(self, iterative: bool=False) -> None:
additional_run_info: Optional[TYPE_ADDITIONAL_INFO] = None
if iterative:
if (self.num_cv_folds == 1):
for (train_split, test_split) in self.splitter.split(self.X_train, self.Y_train, groups=self.resampling_strategy_args.get('groups')):
self.Y_optimization = self.Y_train[test_split]
self.Y_actual_train = self.Y_train[train_split]
self._partial_fit_and_predict_iterative(0, train_indices=train_split, test_indices=test_split, add_model_to_self=True)
else:
model = self._get_model()
if (not model.estimator_supports_iterative_fit()):
self.fit_predict_and_loss(iterative=False)
return
self.partial = False
converged = ([False] * self.num_cv_folds)
Y_train_pred = ([None] * self.num_cv_folds)
Y_optimization_pred = ([None] * self.num_cv_folds)
Y_valid_pred = ([None] * self.num_cv_folds)
Y_test_pred = ([None] * self.num_cv_folds)
train_splits = ([None] * self.num_cv_folds)
self.models = [self._get_model() for i in range(self.num_cv_folds)]
iterations = ([1] * self.num_cv_folds)
total_n_iterations = ([0] * self.num_cv_folds)
model_max_iter = [cast(IterativeComponent, model).get_max_iter() for model in self.models]
if ((self.budget_type in ['iterations', 'mixed']) and (self.budget is None)):
raise ValueError(f'When budget type is {self.budget_type} the budget can not be None')
if ((self.budget_type in ['iterations', 'mixed']) and (cast(float, self.budget) > 0)):
max_n_iter_budget = int(np.ceil(((cast(float, self.budget) / 100) * model_max_iter[0])))
max_iter = min(model_max_iter[0], max_n_iter_budget)
else:
max_iter = model_max_iter[0]
models_current_iters = ([0] * self.num_cv_folds)
Xt_array = ([None] * self.num_cv_folds)
fit_params_array = ([{}] * self.num_cv_folds)
y = _get_y_array(self.Y_train, self.task_type)
train_losses = ([np.NaN] * self.num_cv_folds)
train_fold_weights = ([np.NaN] * self.num_cv_folds)
opt_losses = ([np.NaN] * self.num_cv_folds)
opt_fold_weights = ([np.NaN] * self.num_cv_folds)
while (not all(converged)):
splitter = self.get_splitter(self.datamanager)
for (i, (train_indices, test_indices)) in enumerate(splitter.split(self.X_train, y, groups=self.resampling_strategy_args.get('groups'))):
if converged[i]:
continue
model = self.models[i]
if (iterations[i] == 1):
self.Y_train_targets[train_indices] = (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices])
self.Y_targets[i] = self.Y_train[test_indices]
(Xt, fit_params) = model.fit_transformer((self.X_train.iloc[train_indices] if hasattr(self.X_train, 'iloc') else self.X_train[train_indices]), (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]))
Xt_array[i] = Xt
fit_params_array[i] = fit_params
n_iter = (int(((2 ** iterations[i]) / 2)) if (iterations[i] > 1) else 2)
total_n_iterations[i] = (total_n_iterations[i] + n_iter)
model.iterative_fit(Xt_array[i], (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]), n_iter=n_iter, **fit_params_array[i])
(train_pred, opt_pred, valid_pred, test_pred) = self._predict(model, train_indices=train_indices, test_indices=test_indices)
Y_train_pred[i] = train_pred
Y_optimization_pred[i] = opt_pred
Y_valid_pred[i] = valid_pred
Y_test_pred[i] = test_pred
train_splits[i] = train_indices
train_loss = self._loss((self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]), train_pred)
train_losses[i] = train_loss
train_fold_weights[i] = len(train_indices)
optimization_loss = self._loss(self.Y_targets[i], opt_pred)
opt_losses[i] = optimization_loss
opt_fold_weights[i] = len(test_indices)
models_current_iters[i] = model.get_current_iter()
if (model.configuration_fully_fitted() or (models_current_iters[i] >= max_iter)):
converged[i] = True
iterations[i] = (iterations[i] + 1)
train_fold_weights_percentage = [(w / sum(train_fold_weights)) for w in train_fold_weights]
opt_fold_weights_percentage = [(w / sum(opt_fold_weights)) for w in opt_fold_weights]
if all((isinstance(elem, dict) for elem in train_losses)):
train_loss = np.average([train_losses[i][str(self.metric)] for i in range(self.num_cv_folds)], weights=train_fold_weights_percentage)
else:
train_loss = np.average(train_losses, weights=train_fold_weights_percentage)
if self.scoring_functions:
opt_loss = {}
for metric in opt_losses[0].keys():
opt_loss[metric] = np.average([opt_losses[i][metric] for i in range(self.num_cv_folds)], weights=opt_fold_weights_percentage)
else:
opt_loss = np.average(opt_losses, weights=opt_fold_weights_percentage)
Y_targets = self.Y_targets
Y_train_targets = self.Y_train_targets
Y_optimization_preds = np.concatenate([Y_optimization_pred[i] for i in range(self.num_cv_folds) if (Y_optimization_pred[i] is not None)])
Y_targets = np.concatenate([Y_targets[i] for i in range(self.num_cv_folds) if (Y_targets[i] is not None)])
if (self.X_valid is not None):
Y_valid_preds = np.array([Y_valid_pred[i] for i in range(self.num_cv_folds) if (Y_valid_pred[i] is not None)])
if (len(Y_valid_preds.shape) == 3):
Y_valid_preds = np.nanmean(Y_valid_preds, axis=0)
else:
Y_valid_preds = None
if (self.X_test is not None):
Y_test_preds = np.array([Y_test_pred[i] for i in range(self.num_cv_folds) if (Y_test_pred[i] is not None)])
if (len(Y_test_preds.shape) == 3):
Y_test_preds = np.nanmean(Y_test_preds, axis=0)
else:
Y_test_preds = None
self.Y_optimization = Y_targets
self.Y_actual_train = Y_train_targets
self.model = self._get_model()
status = StatusType.DONOTADVANCE
if any([(model_current_iter == max_iter) for model_current_iter in models_current_iters]):
status = StatusType.SUCCESS
self.finish_up(loss=opt_loss, train_loss=train_loss, opt_pred=Y_optimization_preds, valid_pred=Y_valid_preds, test_pred=Y_test_preds, additional_run_info=additional_run_info, file_output=True, final_call=all(converged), status=status)
else:
self.partial = False
Y_train_pred = ([None] * self.num_cv_folds)
Y_optimization_pred = ([None] * self.num_cv_folds)
Y_valid_pred = ([None] * self.num_cv_folds)
Y_test_pred = ([None] * self.num_cv_folds)
train_splits = ([None] * self.num_cv_folds)
y = _get_y_array(self.Y_train, self.task_type)
train_losses = []
train_fold_weights = []
opt_losses = []
opt_fold_weights = []
for (i, (train_split, test_split)) in enumerate(self.splitter.split(self.X_train, y, groups=self.resampling_strategy_args.get('groups'))):
if (self.budget_type is None):
(train_pred, opt_pred, valid_pred, test_pred, additional_run_info) = self._partial_fit_and_predict_standard(i, train_indices=train_split, test_indices=test_split, add_model_to_self=(self.num_cv_folds == 1))
else:
(train_pred, opt_pred, valid_pred, test_pred, additional_run_info) = self._partial_fit_and_predict_budget(i, train_indices=train_split, test_indices=test_split, add_model_to_self=(self.num_cv_folds == 1))
if ((additional_run_info is not None) and (len(additional_run_info) > 0) and (i > 0)):
raise TAEAbortException(('Found additional run info "%s" in fold %d, but cannot handle additional run info if fold >= 1.' % (additional_run_info, i)))
Y_train_pred[i] = train_pred
Y_optimization_pred[i] = opt_pred
Y_valid_pred[i] = valid_pred
Y_test_pred[i] = test_pred
train_splits[i] = train_split
train_loss = self._loss(self.Y_train_targets[train_split], train_pred)
train_losses.append(train_loss)
train_fold_weights.append(len(train_split))
optimization_loss = self._loss(self.Y_targets[i], opt_pred)
opt_losses.append(optimization_loss)
opt_fold_weights.append(len(test_split))
train_fold_weights = [(w / sum(train_fold_weights)) for w in train_fold_weights]
opt_fold_weights = [(w / sum(opt_fold_weights)) for w in opt_fold_weights]
if all((isinstance(elem, dict) for elem in train_losses)):
train_loss = np.average([train_losses[i][str(self.metric)] for i in range(self.num_cv_folds)], weights=train_fold_weights)
else:
train_loss = np.average(train_losses, weights=train_fold_weights)
if self.scoring_functions:
opt_loss = {}
for metric in opt_losses[0].keys():
opt_loss[metric] = np.average([opt_losses[i][metric] for i in range(self.num_cv_folds)], weights=opt_fold_weights)
else:
opt_loss = np.average(opt_losses, weights=opt_fold_weights)
Y_targets = self.Y_targets
Y_train_targets = self.Y_train_targets
Y_optimization_pred = np.concatenate([Y_optimization_pred[i] for i in range(self.num_cv_folds) if (Y_optimization_pred[i] is not None)])
Y_targets = np.concatenate([Y_targets[i] for i in range(self.num_cv_folds) if (Y_targets[i] is not None)])
if (self.X_valid is not None):
Y_valid_pred = np.array([Y_valid_pred[i] for i in range(self.num_cv_folds) if (Y_valid_pred[i] is not None)])
if (len(np.shape(Y_valid_pred)) == 3):
Y_valid_pred = np.nanmean(Y_valid_pred, axis=0)
if (self.X_test is not None):
Y_test_pred = np.array([Y_test_pred[i] for i in range(self.num_cv_folds) if (Y_test_pred[i] is not None)])
if (len(np.shape(Y_test_pred)) == 3):
Y_test_pred = np.nanmean(Y_test_pred, axis=0)
self.Y_optimization = Y_targets
self.Y_actual_train = Y_train_targets
if (self.num_cv_folds > 1):
self.model = self._get_model()
self._added_empty_model = True
status = StatusType.SUCCESS
elif ((self.budget_type == 'iterations') or ((self.budget_type == 'mixed') and self.model.estimator_supports_iterative_fit())):
budget_factor = self.model.get_max_iter()
n_iter = int(np.ceil(((cast(float, self.budget) / 100) * budget_factor)))
model_current_iter = self.model.get_current_iter()
if (model_current_iter < n_iter):
status = StatusType.DONOTADVANCE
else:
status = StatusType.SUCCESS
elif self.model.estimator_supports_iterative_fit():
model_max_iter = self.model.get_max_iter()
model_current_iter = self.model.get_current_iter()
if (model_current_iter < model_max_iter):
status = StatusType.DONOTADVANCE
else:
status = StatusType.SUCCESS
else:
status = StatusType.SUCCESS
self.finish_up(loss=opt_loss, train_loss=train_loss, opt_pred=Y_optimization_pred, valid_pred=(Y_valid_pred if (self.X_valid is not None) else None), test_pred=(Y_test_pred if (self.X_test is not None) else None), additional_run_info=additional_run_info, file_output=True, final_call=True, status=status)
def partial_fit_predict_and_loss(self, fold: int, iterative: bool=False) -> None:
if (fold > self.num_cv_folds):
raise ValueError(('Cannot evaluate a fold %d which is higher than the number of folds %d.' % (fold, self.num_cv_folds)))
if (self.budget_type is not None):
raise NotImplementedError()
y = _get_y_array(self.Y_train, self.task_type)
for (i, (train_split, test_split)) in enumerate(self.splitter.split(self.X_train, y, groups=self.resampling_strategy_args.get('groups'))):
if (i != fold):
continue
else:
break
if (self.num_cv_folds > 1):
self.Y_optimization = self.Y_train[test_split]
self.Y_actual_train = self.Y_train[train_split]
if iterative:
self._partial_fit_and_predict_iterative(fold, train_indices=train_split, test_indices=test_split, add_model_to_self=True)
elif (self.budget_type is not None):
raise NotImplementedError()
else:
(train_pred, opt_pred, valid_pred, test_pred, additional_run_info) = self._partial_fit_and_predict_standard(fold, train_indices=train_split, test_indices=test_split, add_model_to_self=True)
train_loss = self._loss(self.Y_actual_train, train_pred)
loss = self._loss(self.Y_targets[fold], opt_pred)
if self.model.estimator_supports_iterative_fit():
model_max_iter = self.model.get_max_iter()
model_current_iter = self.model.get_current_iter()
if (model_current_iter < model_max_iter):
status = StatusType.DONOTADVANCE
else:
status = StatusType.SUCCESS
else:
status = StatusType.SUCCESS
self.finish_up(loss=loss, train_loss=train_loss, opt_pred=opt_pred, valid_pred=valid_pred, test_pred=test_pred, file_output=False, final_call=True, additional_run_info=None, status=status)
def _partial_fit_and_predict_iterative(self, fold: int, train_indices: List[int], test_indices: List[int], add_model_to_self: bool) -> None:
model = self._get_model()
self.indices[fold] = (train_indices, test_indices)
file_output = (True if (self.num_cv_folds == 1) else False)
if model.estimator_supports_iterative_fit():
(Xt, fit_params) = model.fit_transformer((self.X_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.X_train[train_indices]), (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]))
self.Y_train_targets[train_indices] = (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices])
iteration = 1
total_n_iteration = 0
model_max_iter = model.get_max_iter()
if ((self.budget is not None) and (self.budget > 0)):
max_n_iter_budget = int(np.ceil(((self.budget / 100) * model_max_iter)))
max_iter = min(model_max_iter, max_n_iter_budget)
else:
max_iter = model_max_iter
model_current_iter = 0
while ((not model.configuration_fully_fitted()) and (model_current_iter < max_iter)):
n_iter = (int(((2 ** iteration) / 2)) if (iteration > 1) else 2)
total_n_iteration += n_iter
model.iterative_fit(Xt, (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]), n_iter=n_iter, **fit_params)
(Y_train_pred, Y_optimization_pred, Y_valid_pred, Y_test_pred) = self._predict(model, train_indices=train_indices, test_indices=test_indices)
if add_model_to_self:
self.model = model
train_loss = self._loss((self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]), Y_train_pred)
loss = self._loss(self.Y_train[test_indices], Y_optimization_pred)
additional_run_info = model.get_additional_run_info()
model_current_iter = model.get_current_iter()
if (model_current_iter < max_iter):
status = StatusType.DONOTADVANCE
else:
status = StatusType.SUCCESS
if (model.configuration_fully_fitted() or (model_current_iter >= max_iter)):
final_call = True
else:
final_call = False
self.finish_up(loss=loss, train_loss=train_loss, opt_pred=Y_optimization_pred, valid_pred=Y_valid_pred, test_pred=Y_test_pred, additional_run_info=additional_run_info, file_output=file_output, final_call=final_call, status=status)
iteration += 1
return
else:
(Y_train_pred, Y_optimization_pred, Y_valid_pred, Y_test_pred, additional_run_info) = self._partial_fit_and_predict_standard(fold, train_indices, test_indices, add_model_to_self)
train_loss = self._loss((self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]), Y_train_pred)
loss = self._loss(self.Y_train[test_indices], Y_optimization_pred)
if self.model.estimator_supports_iterative_fit():
model_max_iter = self.model.get_max_iter()
model_current_iter = self.model.get_current_iter()
if (model_current_iter < model_max_iter):
status = StatusType.DONOTADVANCE
else:
status = StatusType.SUCCESS
else:
status = StatusType.SUCCESS
self.finish_up(loss=loss, train_loss=train_loss, opt_pred=Y_optimization_pred, valid_pred=Y_valid_pred, test_pred=Y_test_pred, additional_run_info=additional_run_info, file_output=file_output, final_call=True, status=status)
return
def _partial_fit_and_predict_standard(self, fold: int, train_indices: List[int], test_indices: List[int], add_model_to_self: bool=False) -> Tuple[(PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE, TYPE_ADDITIONAL_INFO)]:
model = self._get_model()
self.indices[fold] = (train_indices, test_indices)
_fit_and_suppress_warnings(self.logger, model, (self.X_train.iloc[train_indices] if hasattr(self.X_train, 'iloc') else self.X_train[train_indices]), (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]))
if add_model_to_self:
self.model = model
else:
self.models[fold] = model
self.Y_targets[fold] = (self.Y_train.iloc[test_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[test_indices])
self.Y_train_targets[train_indices] = (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices])
(train_pred, opt_pred, valid_pred, test_pred) = self._predict(model=model, train_indices=train_indices, test_indices=test_indices)
additional_run_info = model.get_additional_run_info()
return (train_pred, opt_pred, valid_pred, test_pred, additional_run_info)
def _partial_fit_and_predict_budget(self, fold: int, train_indices: List[int], test_indices: List[int], add_model_to_self: bool=False) -> Tuple[(PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE, TYPE_ADDITIONAL_INFO)]:
assert (self.budget is not None)
model = self._get_model()
self.indices[fold] = (train_indices, test_indices)
self.Y_targets[fold] = self.Y_train[test_indices]
self.Y_train_targets[train_indices] = ((self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]),)
_fit_with_budget(X_train=self.X_train, Y_train=self.Y_train, budget=self.budget, budget_type=self.budget_type, logger=self.logger, model=model, train_indices=train_indices, task_type=self.task_type)
(train_pred, opt_pred, valid_pred, test_pred) = self._predict(model, train_indices=train_indices, test_indices=test_indices)
if add_model_to_self:
self.model = model
else:
self.models[fold] = model
additional_run_info = model.get_additional_run_info()
return (train_pred, opt_pred, valid_pred, test_pred, additional_run_info)
def _predict(self, model: BaseEstimator, test_indices: List[int], train_indices: List[int]) -> Tuple[(PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE, PIPELINE_DATA_DTYPE)]:
train_pred = self.predict_function((self.X_train.iloc[train_indices] if hasattr(self.X_train, 'iloc') else self.X_train[train_indices]), model, self.task_type, (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]))
opt_pred = self.predict_function((self.X_train.iloc[test_indices] if hasattr(self.X_train, 'iloc') else self.X_train[test_indices]), model, self.task_type, (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]))
if (self.X_valid is not None):
X_valid = self.X_valid.copy()
valid_pred = self.predict_function(X_valid, model, self.task_type, self.Y_train[train_indices])
else:
valid_pred = None
if (self.X_test is not None):
X_test = self.X_test.copy()
test_pred = self.predict_function(X_test, model, self.task_type, (self.Y_train.iloc[train_indices] if hasattr(self.Y_train, 'iloc') else self.Y_train[train_indices]))
else:
test_pred = None
return (train_pred, opt_pred, valid_pred, test_pred)
def get_splitter(self, D: AbstractDataManager) -> Union[(BaseCrossValidator, _RepeatedSplits, BaseShuffleSplit)]:
if (self.resampling_strategy_args is None):
self.resampling_strategy_args = {}
if ((self.resampling_strategy is not None) and (not isinstance(self.resampling_strategy, str))):
if ('groups' not in self.resampling_strategy_args):
self.resampling_strategy_args['groups'] = None
if isinstance(self.resampling_strategy, (BaseCrossValidator, _RepeatedSplits, BaseShuffleSplit)):
self.check_splitter_resampling_strategy(X=D.data['X_train'], y=D.data['Y_train'], groups=self.resampling_strategy_args.get('groups'), task=D.info['task'], resampling_strategy=self.resampling_strategy)
return self.resampling_strategy
raise ValueError('Unsupported resampling strategy {}/{} provided'.format(self.resampling_strategy, type(self.resampling_strategy)))
y = D.data['Y_train']
shuffle = self.resampling_strategy_args.get('shuffle', True)
train_size = 0.67
if self.resampling_strategy_args:
train_size_from_user = self.resampling_strategy_args.get('train_size')
if (train_size_from_user is not None):
train_size = float(train_size_from_user)
test_size = float(('%.4f' % (1 - train_size)))
if ((D.info['task'] in CLASSIFICATION_TASKS) and (D.info['task'] != MULTILABEL_CLASSIFICATION)):
y = y.ravel()
if (self.resampling_strategy in ['holdout', 'holdout-iterative-fit']):
if shuffle:
try:
cv = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=1)
test_cv = copy.deepcopy(cv)
next(test_cv.split(y, y))
except ValueError as e:
if ('The least populated class in y has only' in e.args[0]):
cv = ShuffleSplit(n_splits=1, test_size=test_size, random_state=1)
else:
raise e
else:
tmp_train_size = int(np.floor((train_size * y.shape[0])))
test_fold = np.zeros(y.shape[0])
test_fold[:tmp_train_size] = (- 1)
cv = PredefinedSplit(test_fold=test_fold)
cv.n_splits = 1
elif (self.resampling_strategy in ['cv', 'cv-iterative-fit', 'partial-cv', 'partial-cv-iterative-fit']):
if shuffle:
cv = StratifiedKFold(n_splits=self.resampling_strategy_args['folds'], shuffle=shuffle, random_state=1)
else:
cv = KFold(n_splits=self.resampling_strategy_args['folds'], shuffle=shuffle)
else:
raise ValueError(self.resampling_strategy)
elif (self.resampling_strategy in ['holdout', 'holdout-iterative-fit']):
if shuffle:
cv = ShuffleSplit(n_splits=1, test_size=test_size, random_state=1)
else:
tmp_train_size = int(np.floor((train_size * y.shape[0])))
test_fold = np.zeros(y.shape[0])
test_fold[:tmp_train_size] = (- 1)
cv = PredefinedSplit(test_fold=test_fold)
cv.n_splits = 1
elif (self.resampling_strategy in ['cv', 'partial-cv', 'partial-cv-iterative-fit']):
random_state = (1 if shuffle else None)
cv = KFold(n_splits=self.resampling_strategy_args['folds'], shuffle=shuffle, random_state=random_state)
else:
raise ValueError(self.resampling_strategy)
return cv
def check_splitter_resampling_strategy(cls, X: PIPELINE_DATA_DTYPE, y: np.ndarray, task: int, groups: Any, resampling_strategy: Union[(BaseCrossValidator, _RepeatedSplits, BaseShuffleSplit)]) -> None:
if (((task in CLASSIFICATION_TASKS) and (task != MULTILABEL_CLASSIFICATION)) or ((task in REGRESSION_TASKS) and (task != MULTIOUTPUT_REGRESSION))):
y = y.ravel()
try:
resampling_strategy.get_n_splits(X=X, y=y, groups=groups)
next(resampling_strategy.split(X=X, y=y, groups=groups))
except Exception as e:
raise ValueError('Unsupported resampling strategy {}/{} cause exception: {}'.format(resampling_strategy, groups, str(e))) |
def PreResNetWrapper(num_blocks, num_class=10, block=None, attention_module=None):
b = (lambda in_planes, planes, stride: block(in_planes, planes, stride, attention_module=attention_module))
return PreResNet(b, num_blocks, num_class=num_class) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.