code stringlengths 101 5.91M |
|---|
def lines(f, delim):
while True:
line = f.readline()
if (line == ''):
break
(yield map(float, line.strip().split(delim))) |
class DirectoryCLI(CLIMixin, metaclass=abc.ABCMeta):
def get_parent_parser(cls, desc: str, valid_modalities: frozenset[str]=intnorm.VALID_MODALITIES, **kwargs: typing.Any) -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('image_dir', type=intnormt.dir_path(), help='Path of directory containing images to normalize.')
parser.add_argument('-m', '--mask-dir', type=intnormt.dir_path(), default=None, help='Path of directory of foreground masks corresponding to images.')
parser.add_argument('-o', '--output-dir', type=intnormt.dir_path(), default=None, help='Path of directory in which to save normalized images.')
parser.add_argument('-mo', '--modality', type=str, default='t1', choices=intnorm.VALID_MODALITIES, help='Modality of the images.')
parser.add_argument('-e', '--extension', type=str, default='nii*', help='Extension of images.')
parser.add_argument('-v', '--verbosity', action='count', default=0, help='Increase output verbosity (e.g., -vv is more than -v).')
parser.add_argument('--version', action='store_true', help='Print the version of intensity-normalization.')
return parser
def call_from_argparse_args(self, args: argparse.Namespace, /, **kwargs: typing.Any) -> None:
raise NotImplementedError |
def main():
args = parse_args()
accelerator = Accelerator()
logger.info(accelerator.state)
logger.setLevel((logging.INFO if accelerator.is_local_main_process else logging.ERROR))
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
if is_wandb_available():
import wandb
wandb.init(project=args.output_dir.split('/')[(- 1)])
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
if (args.seed is not None):
set_seed(args.seed)
if accelerator.is_main_process:
if (args.push_to_hub and (not args.preprocessing_only)):
if (args.hub_model_id is None):
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
datasets_splits = []
for (dataset_config_name, train_split_name) in zip(args.dataset_config_names, args.dataset_split_names):
dataset_split = load_dataset(args.dataset_name, dataset_config_name, split=train_split_name, cache_dir=args.cache_dir)
datasets_splits.append(dataset_split)
raw_datasets = DatasetDict()
if (len(datasets_splits) > 1):
raw_datasets['train'] = concatenate_datasets(datasets_splits).shuffle(seed=args.seed)
else:
raw_datasets['train'] = datasets_splits[0]
num_validation_samples = ((raw_datasets['train'].num_rows * args.validation_split_percentage) // 100)
if (num_validation_samples == 0):
raise ValueError(f"`args.validation_split_percentage` is less than a single sample for {len(raw_datasets['train'])} training samples. Increase `args.num_validation_split_percentage`. ")
raw_datasets['validation'] = raw_datasets['train'].select(range(num_validation_samples))
raw_datasets['train'] = raw_datasets['train'].select(range(num_validation_samples, raw_datasets['train'].num_rows))
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(args.model_name_or_path)
raw_datasets = raw_datasets.cast_column(args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate))
if (not feature_extractor.do_normalize):
raise ValueError('Training is only supported for normalized inputs. Make sure ``feature_extractor.do_normalize == True``')
max_length = int((args.max_duration_in_seconds * feature_extractor.sampling_rate))
min_length = int((args.min_duration_in_seconds * feature_extractor.sampling_rate))
def prepare_dataset(batch):
sample = batch[args.audio_column_name]
inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate'], max_length=max_length, truncation=True)
batch['input_values'] = inputs.input_values[0]
batch['input_length'] = len(inputs.input_values[0])
return batch
cache_file_names = None
if (args.train_cache_file_name is not None):
cache_file_names = {'train': args.train_cache_file_name, 'validation': args.validation_cache_file_name}
with accelerator.main_process_first():
vectorized_datasets = raw_datasets.map(prepare_dataset, num_proc=args.preprocessing_num_workers, remove_columns=raw_datasets['train'].column_names, cache_file_names=cache_file_names)
if (min_length > 0.0):
vectorized_datasets = vectorized_datasets.filter((lambda x: (x > min_length)), num_proc=args.preprocessing_num_workers, input_columns=['input_length'])
vectorized_datasets = vectorized_datasets.remove_columns('input_length')
if args.preprocessing_only:
return
config = Wav2Vec2Config.from_pretrained(args.model_name_or_path)
if ((not config.do_stable_layer_norm) or (config.feat_extract_norm != 'layer')):
raise ValueError("PreTraining is only supported for ``config.do_stable_layer_norm=True`` and ``config.feat_extract_norm='layer'")
model = Wav2Vec2ForPreTraining(config)
if args.gradient_checkpointing:
model.gradient_checkpointing_enable()
data_collator = DataCollatorForWav2Vec2Pretraining(model=model, feature_extractor=feature_extractor, pad_to_multiple_of=args.pad_to_multiple_of)
train_dataloader = DataLoader(vectorized_datasets['train'], shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size)
eval_dataloader = DataLoader(vectorized_datasets['validation'], collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
optimizer = AdamW(list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon)
(model, optimizer, train_dataloader, eval_dataloader) = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader)
num_update_steps_per_epoch = math.ceil((len(train_dataloader) / args.gradient_accumulation_steps))
if (args.max_train_steps is None):
args.max_train_steps = (args.num_train_epochs * num_update_steps_per_epoch)
else:
args.num_train_epochs = math.ceil((args.max_train_steps / num_update_steps_per_epoch))
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps)
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(f" Num examples = {len(vectorized_datasets['train'])}")
logger.info(f' Num Epochs = {args.num_train_epochs}')
logger.info(f' Instantaneous batch size per device = {args.per_device_train_batch_size}')
logger.info(f' Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}')
logger.info(f' Gradient Accumulation steps = {args.gradient_accumulation_steps}')
logger.info(f' Total optimization steps = {args.max_train_steps}')
completed_steps = 0
progress_bar = tqdm(range(args.max_train_steps), disable=(not accelerator.is_local_main_process))
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for (step, batch) in enumerate(train_dataloader):
num_losses = batch['mask_time_indices'].sum()
sub_attention_mask = batch.pop('sub_attention_mask', None)
sub_attention_mask = (sub_attention_mask if (sub_attention_mask is not None) else torch.ones_like(batch['mask_time_indices']))
percent_masked = (num_losses / sub_attention_mask.sum())
outputs = model(**batch)
loss = (outputs.loss / args.gradient_accumulation_steps)
accelerator.backward(loss)
if (accelerator.state.num_processes > 1):
num_losses = accelerator.gather(num_losses).sum()
gradient_multiplier = (accelerator.state.num_processes / num_losses)
multiply_grads(model.module.parameters(), gradient_multiplier)
else:
multiply_grads(model.parameters(), (1 / num_losses))
if ((((step + 1) % args.gradient_accumulation_steps) == 0) or (step == (len(train_dataloader) - 1))):
scale = (accelerator.scaler._scale.item() if (hasattr(accelerator, 'scaler') and (accelerator.scaler is not None)) else 1)
if (accelerator.state.num_processes > 1):
grad_norm = get_grad_norm(model.module.parameters(), scale)
else:
grad_norm = get_grad_norm(model.parameters(), scale)
optimizer.step()
optimizer.zero_grad()
if (not accelerator.optimizer_step_was_skipped):
lr_scheduler.step()
elif accelerator.is_local_main_process:
progress_bar.write(f'Gradients have overflown - skipping update step... Updating gradient scale to {scale}...')
gumbel_temperature = max((args.max_gumbel_temperature * (args.gumbel_temperature_decay ** completed_steps)), args.min_gumbel_temperature)
if hasattr(model, 'module'):
model.module.set_gumbel_temperature(gumbel_temperature)
else:
model.set_gumbel_temperature(gumbel_temperature)
progress_bar.update(1)
completed_steps += 1
if (((step + 1) % (args.gradient_accumulation_steps * args.logging_steps)) == 0):
loss.detach()
outputs.contrastive_loss.detach()
outputs.diversity_loss.detach()
if (accelerator.state.num_processes > 1):
loss = accelerator.gather(loss).sum()
outputs.contrastive_loss = accelerator.gather(outputs.contrastive_loss).sum()
outputs.diversity_loss = accelerator.gather(outputs.diversity_loss).sum()
percent_masked = accelerator.gather(percent_masked).sum()
train_logs = {'loss': ((loss * args.gradient_accumulation_steps) / num_losses), 'constrast_loss': (outputs.contrastive_loss / num_losses), 'div_loss': (outputs.diversity_loss / num_losses), '%_mask_idx': (percent_masked / accelerator.num_processes), 'ppl': outputs.codevector_perplexity, 'lr': torch.tensor(optimizer.param_groups[0]['lr']), 'temp': torch.tensor(gumbel_temperature), 'grad_norm': torch.tensor(grad_norm)}
log_str = ''
for (k, v) in train_logs.items():
log_str += '| {}: {:.3e}'.format(k, v.item())
if accelerator.is_local_main_process:
progress_bar.write(log_str)
if is_wandb_available():
wandb.log(train_logs)
if (((step + 1) % (args.gradient_accumulation_steps * args.saving_steps)) == 0):
if ((args.push_to_hub and (epoch < (args.num_train_epochs - 1))) or (args.output_dir is not None)):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if ((args.push_to_hub and (epoch < (args.num_train_epochs - 1))) and accelerator.is_main_process):
repo.push_to_hub(commit_message=f'Training in progress step {completed_steps}', blocking=False, auto_lfs_prune=True)
if (completed_steps >= args.max_train_steps):
break
model.eval()
val_logs = {'val_loss': 0, 'val_contrastive_loss': 0, 'val_diversity_loss': 0, 'val_num_losses': 0}
for (step, batch) in enumerate(eval_dataloader):
with torch.no_grad():
batch.pop('sub_attention_mask', None)
outputs = model(**batch)
val_logs['val_loss'] += outputs.loss
val_logs['val_contrastive_loss'] += outputs.contrastive_loss
val_logs['val_diversity_loss'] += outputs.diversity_loss
val_logs['val_num_losses'] += batch['mask_time_indices'].sum()
if (accelerator.num_processes > 1):
val_logs = {k: accelerator.gather(v).sum() for (k, v) in val_logs.items()}
val_logs = {k: (v / val_logs['val_num_losses']) for (k, v) in val_logs.items()}
log_str = ''
for (k, v) in val_logs.items():
log_str += '| {}: {:.3e}'.format(k, v.item())
if accelerator.is_local_main_process:
progress_bar.write(log_str)
if is_wandb_available():
wandb.log(val_logs)
if (args.output_dir is not None):
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
if args.push_to_hub:
repo.push_to_hub(commit_message='End of training', auto_lfs_prune=True) |
(version='2.0')
class Criterions(object):
def __init__(self, framework):
assert (framework in ('tensorflow', 'pytorch', 'pytorch_fx')), 'framework support tensorflow pytorch'
self.criterions = framework_criterions[framework]().criterions
def __getitem__(self, criterion_type):
assert (criterion_type in self.criterions.keys()), 'only support criterions in {}'.format(self.criterions.keys())
return self.criterions[criterion_type]
def register(self, name, criterion_cls):
assert (name not in self.criterions.keys()), 'registered criterion name already exists.'
self.criterions.update({name: criterion_cls}) |
_model
def regnety_002(pretrained=False, **kwargs):
return _create_regnet('regnety_002', pretrained, **kwargs) |
def fetch_data(dataset: Callable[([str], Dataset)], transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, num_workers: int=0, pin_memory: bool=True, drop_last: bool=False, train_splits: List[str]=[], test_splits: List[str]=[], train_shuffle: bool=True, test_shuffle: bool=False, test_image_size: int=600, train_augmentation: dict={}, test_augmentation: dict={}, batch_size: int=1, test_batch_size: Optional[int]=None) -> Tuple[(List[Tuple[(str, DataLoader)]], List[Tuple[(str, DataLoader)]])]:
train_transform = (transform(augmentation=train_augmentation) if transform else None)
train_loader_list = []
for split in train_splits:
train_loader_list.append((split, DataLoader(dataset=dataset(split=split, transform=train_transform, target_transform=target_transform), batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, shuffle=train_shuffle)))
test_transform = (transform(image_size=[test_image_size, test_image_size], augmentation=test_augmentation) if transform else None)
test_loader_list = []
for split in test_splits:
test_loader_list.append((split, DataLoader(dataset=dataset(split=split, transform=test_transform, target_transform=target_transform), batch_size=(batch_size if (test_batch_size is None) else test_batch_size), num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, shuffle=test_shuffle)))
return (train_loader_list, test_loader_list) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True, help='sentencepiece model to use for encoding')
parser.add_argument('--inputs', nargs='+', default=['-'], help='input files to filter/encode')
parser.add_argument('--outputs', nargs='+', default=['-'], help='path to save encoded outputs')
parser.add_argument('--output_format', choices=['piece', 'id'], default='piece')
parser.add_argument('--min-len', type=int, metavar='N', help='filter sentence pairs with fewer than N tokens')
parser.add_argument('--max-len', type=int, metavar='N', help='filter sentence pairs with more than N tokens')
args = parser.parse_args()
assert (len(args.inputs) == len(args.outputs)), 'number of input and output paths should match'
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if (args.output_format == 'piece'):
def encode(l):
return sp.EncodeAsPieces(l)
elif (args.output_format == 'id'):
def encode(l):
return list(map(str, sp.EncodeAsIds(l)))
else:
raise NotImplementedError
if ((args.min_len is not None) or (args.max_len is not None)):
def valid(line):
return (((args.min_len is None) or (len(line) >= args.min_len)) and ((args.max_len is None) or (len(line) <= args.max_len)))
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [(stack.enter_context(open(input, 'r', encoding='utf-8')) if (input != '-') else sys.stdin) for input in args.inputs]
outputs = [(stack.enter_context(open(output, 'w', encoding='utf-8')) if (output != '-') else sys.stdout) for output in args.outputs]
stats = {'num_empty': 0, 'num_filtered': 0}
def encode_line(line):
line = line.strip()
if (len(line) > 0):
line = encode(line)
if valid(line):
return line
else:
stats['num_filtered'] += 1
else:
stats['num_empty'] += 1
return None
for (i, lines) in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if (not any(((enc_line is None) for enc_line in enc_lines))):
for (enc_line, output_h) in zip(enc_lines, outputs):
print(' '.join(enc_line), file=output_h)
if ((i % 10000) == 0):
print('processed {} lines'.format(i), file=sys.stderr)
print('skipped {} empty lines'.format(stats['num_empty']), file=sys.stderr)
print('filtered {} lines'.format(stats['num_filtered']), file=sys.stderr) |
def restore_model(pkl_file, checkpoint=None, train=False, fp16=None):
info = load_pickle(pkl_file)
init = info['init']
name = info['name']
search_in = join(nnunet.__path__[0], 'training', 'network_training')
tr = recursive_find_python_class([search_in], name, current_module='nnunet.training.network_training')
if (tr is None):
try:
import meddec
search_in = join(meddec.__path__[0], 'model_training')
tr = recursive_find_python_class([search_in], name, current_module='meddec.model_training')
except ImportError:
pass
if (tr is None):
raise RuntimeError(('Could not find the model trainer specified in checkpoint in nnunet.trainig.network_training. If it is not located there, please move it or change the code of restore_model. Your model trainer can be located in any directory within nnunet.trainig.network_training (search is recursive).\nDebug info: \ncheckpoint file: %s\nName of trainer: %s ' % (checkpoint, name)))
assert issubclass(tr, nnUNetTrainer), 'The network trainer was found but is not a subclass of nnUNetTrainer. Please make it so!'
trainer = tr(*init)
if (fp16 is not None):
trainer.fp16 = fp16
trainer.process_plans(info['plans'])
if (checkpoint is not None):
trainer.load_checkpoint(checkpoint, train)
return trainer |
def test_properties_are_correct(archive_fixture):
(archive, x0) = archive_fixture
sigma = 1
batch_size = 2
emitter = GaussianEmitter(archive, sigma=sigma, x0=x0, batch_size=batch_size)
assert np.all((emitter.x0 == x0))
assert (emitter.sigma == sigma)
assert (emitter.batch_size == batch_size) |
def _get_pre_context_function(pre_context_process, kws=None):
pre_context_process = pre_context_process.lower()
kws = (kws or {})
if (pre_context_process in 'summarization'):
return SummarizationContextProcess(**kws)
if (pre_context_process in 'selective'):
return SelectiveContextProcess(**kws)
if (pre_context_process in 'concat'):
return ConcatContextProcess() |
def emb_summarize(f, namer, search_n=25):
load_spacy()
nlps = get_nlps(f, namer)
nlps = filter_oov(nlps)
vecs = [n.vector for n in nlps]
toks_flat = set()
for n in nlps:
toks_flat.update(list(n))
toks_flat = [t.text for t in toks_flat]
vec = np.array(vecs).mean(0)[np.newaxis]
(keys, _, sims) = nlp.vocab.vectors.most_similar(vec, n=search_n, batch_size=10000)
keys = keys[0]
sims = sims[0]
for (k, s) in zip(keys, sims):
w = nlp.vocab.strings[k].lower()
if (w not in toks_flat):
return (w, s)
w = nlp.vocab.strings[keys[0]].lower()
return (f'{w}-same', sims[0]) |
class Constraint(Data):
def __init__(self, constraint, train_x, test_x):
self.constraint = constraint
self.train_x = train_x
self.test_x = test_x
def losses(self, targets, outputs, loss_fn, inputs, model, aux=None):
f = tf.cond(model.net.training, (lambda : self.constraint(inputs, outputs, self.train_x)), (lambda : self.constraint(inputs, outputs, self.test_x)))
return loss_fn(tf.zeros(tf.shape(f), dtype=config.real(tf)), f)
def train_next_batch(self, batch_size=None):
return (self.train_x, None)
def test(self):
return (self.test_x, None) |
def evaluate_3rd_user_task_fbne(fbne_data, valid_batch_index, model, sess, valid_data, is_training):
(evaluate_loss, evaluate_pearson) = (0.0, 0.0)
for index in tqdm.tqdm(valid_batch_index):
(b_target_user, b_k_shot_item, b_second_order_users, b_third_order_items, b_oracle_user_ebd, b_mask_num_second_order_user, b_mask_num_third_order_item, b_intra_2nd_user, b_intra_3rd_user) = fbne_data.batch_gen_3rd_user_task(valid_data, index)
feed_dict = {model.target_user: b_oracle_user_ebd, model.support_item_1st_: b_k_shot_item, model.training_phrase_user_task: is_training, model.support_user_2nd_: b_second_order_users, model.training_phrase_item_task: is_training, model.inter_support_3rd_user: b_intra_3rd_user, model.support_item_3rd: b_third_order_items}
(batch_evaluate_loss, batch_predict_ebd, batch_target_ebd) = sess.run([model.loss_3rd_user, model.predict_u_3rd, model.target_user], feed_dict)
evaluate_loss += batch_evaluate_loss
batch_pearson = Pearson_correlation(batch_predict_ebd, batch_target_ebd)
evaluate_pearson += batch_pearson
return ((evaluate_loss / len(valid_batch_index)), (evaluate_pearson / len(valid_batch_index))) |
class GraphSAGE(nn.Module):
def __init__(self, in_feats, n_hidden, n_classes, n_layers):
super(GraphSAGE, self).__init__()
self.layers = nn.ModuleList()
self.layers.append(GraphSAGELayer(in_feats, n_hidden))
self.layers.append(GraphSAGELayer(n_hidden, n_hidden))
self.layers.append(GraphSAGELayer(n_hidden, n_classes))
def forward(self, g):
h = g.ndata['feat']
for layer in self.layers:
h = layer(g, h)
return h |
def main():
parser.add_argument('--show_glue', action='store_true', help='show glue metric for each task instead of accuracy')
parser.add_argument('--print_mode', default='best', help='best|all|tabular')
parser.add_argument('--show_subdir', action='store_true', help='print the subdir that has the best results for each run')
parser.add_argument('--override_target', default='valid_accuracy', help='override target')
args = parser.parse_args()
args.target = args.override_target
args.best_biggest = True
args.best = True
args.last = 0
args.path_contains = None
res = valids_main(args, print_output=False)
grouped_acc = {}
grouped_met = {}
for (path, v) in res.items():
path = '/'.join([args.base, path])
path = re.sub('//*', '/', path)
match = re.match('(.*)finetune[^/]*/([^/]*)/(.*)', path)
if (not match):
continue
(run, task, subdir) = match.groups()
if (run not in grouped_acc):
grouped_acc[run] = {}
grouped_met[run] = {}
if (task not in grouped_acc[run]):
grouped_acc[run][task] = {}
grouped_met[run][task] = {}
if (v is not None):
grouped_acc[run][task][subdir] = float(v.get('valid_accuracy', (- 100)))
grouped_met[run][task][subdir] = float(v.get(f'valid_{TASK_TO_METRIC[task]}', (- 100)))
else:
print(f'{path} has None return')
header = '\t'.join(TASKS)
for run in sorted(grouped_acc):
print(run)
if (args.print_mode == 'all'):
if args.show_glue:
print('===== GLUE =====')
print(get_all_stat_str(grouped_met[run]))
else:
print('===== ACC =====')
print(get_all_stat_str(grouped_acc[run]))
elif (args.print_mode == 'best'):
print(f' {header}')
if args.show_glue:
print(f'GLEU: {get_best_stat_str(grouped_met[run], args.show_subdir)}')
else:
print(f'ACC: {get_best_stat_str(grouped_acc[run], args.show_subdir)}')
elif (args.print_mode == 'tabular'):
if args.show_glue:
print('===== GLUE =====')
print(get_tabular_stat_str(grouped_met[run]))
else:
print('===== ACC =====')
print(get_tabular_stat_str(grouped_acc[run]))
else:
raise ValueError(args.print_mode)
print() |
class VarDict(object):
def _setattr_(obj, key, val):
if key.endswith('__'):
key = key[:(- 2)]
elif (key in obj.my_dict):
logger.info(('re-assign glb.%s' % key))
obj.my_dict[key] = val
def _getattr_(obj, key):
if key.endswith('__'):
key = key[:(- 2)]
return obj.my_dict[key]
def __init__(self, dict=None):
self.__dict__['my_dict'] = {}
if dict:
for (key, val) in dict.items():
self.__setattr__(key, val)
def __setattr__(self, key, value):
VarDict._setattr_(self, key, value)
def __getattr__(self, key):
return VarDict._getattr_(self, key)
def __str__(self):
return '\n'.join(('{0}:{1}'.format(key, val) for (key, val) in sorted(self.my_dict.items())))
def to_dict(self):
return self.my_dict
def add(self, dict):
for (key, val) in dict.items():
self.__setattr__(key, val) |
class GeneratorHubInterface(nn.Module):
def __init__(self, cfg, task, models):
super().__init__()
self.cfg = cfg
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
for model in self.models:
model.prepare_for_inference_(cfg)
self.align_dict = utils.load_align_dict(cfg.generation.replace_unk)
self.tokenizer = encoders.build_tokenizer(cfg.tokenizer)
self.bpe = encoders.build_bpe(cfg.bpe)
self.max_positions = utils.resolve_max_positions(self.task.max_positions(), *[model.max_positions() for model in models])
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
def device(self):
return self._float_tensor.device
def translate(self, sentences: List[str], beam: int=5, verbose: bool=False, **kwargs) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(self, sentences: List[str], beam: int=1, verbose: bool=False, **kwargs) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)]
def generate(self, tokenized_sentences: List[torch.LongTensor], beam: int=5, verbose: bool=False, skip_invalid_size_inputs=False, inference_step_args=None, prefix_allowed_tokens_fn=None, **kwargs) -> List[List[Dict[(str, torch.Tensor)]]]:
if (torch.is_tensor(tokenized_sentences) and (tokenized_sentences.dim() == 1)):
return self.generate(tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs)[0]
gen_args = copy.deepcopy(self.cfg.generation)
with open_dict(gen_args):
gen_args.beam = beam
for (k, v) in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(self.models, gen_args, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn)
inference_step_args = (inference_step_args or {})
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample((lambda t: t.to(self.device)), batch)
translations = self.task.inference_step(generator, self.models, batch, **inference_step_args)
for (id, hypos) in zip(batch['id'].tolist(), translations):
results.append((id, hypos))
outputs = [hypos for (_, hypos) in sorted(results, key=(lambda x: x[0]))]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.cfg, name, default))
for (source_tokens, target_hypotheses) in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info('S\t{}'.format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo['tokens'])
logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str))
logger.info('P\t{}'.format(' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist()))))
if ((hypo['alignment'] is not None) and getarg('print_alignment', False)):
logger.info('A\t{}'.format(' '.join(['{}-{}'.format(src_idx, tgt_idx) for (src_idx, tgt_idx) in hypo['alignment']])))
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(self, tokens: List[List[int]], skip_invalid_size_inputs: bool) -> Iterator[Dict[(str, Any)]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(dataset=self.task.build_dataset_for_inference(tokens, lengths), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=self.max_positions, ignore_invalid_inputs=skip_invalid_size_inputs, disable_iterator_cache=True).next_epoch_itr(shuffle=False)
return batch_iterator |
class TFTransfoXLPreTrainedModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def get_frame_index(name, frame):
for idx in range(len(frame)):
if (frame.iloc[(idx, 0)] == name):
return idx
raise Exception('Could not find image {} in data frame, unsuccessful in finding frame index'.format(name)) |
def data_loader(X, Y, batch_size, shuffle=True, drop_last=True):
cuda = (True if torch.cuda.is_available() else False)
TensorFloat = (torch.cuda.FloatTensor if cuda else torch.FloatTensor)
(X, Y) = (TensorFloat(X), TensorFloat(Y))
data = torch.utils.data.TensorDataset(X, Y)
dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last)
return dataloader |
class ACE2005NerLoader(Loader):
def __init__(self):
super().__init__()
self.label_set.add('O')
def _load(self, path):
data = load_json(path)
for item in data:
for entity_mention in item['golden-entity-mentions']:
for i in range(entity_mention['start'], entity_mention['end']):
entity_type = entity_mention['entity-type']
if (i == entity_mention['start']):
self.label_set.add('B-{}'.format(entity_type))
else:
self.label_set.add('I-{}'.format(entity_type))
return data
def load_all(self, path):
train_path = os.path.join(path, 'train.json')
dev_path = os.path.join(path, 'dev.json')
test_path = os.path.join(path, 'test.json')
return (self._load(train_path), self._load(dev_path), self._load(test_path)) |
class SGDFactory(OptimizerFactoryInterface):
def add_arguments(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
return sgd(parser)
def from_args(target, args: argparse.Namespace):
opt = chainer.optimizers.SGD(lr=args.lr)
opt.setup(target)
opt.add_hook(WeightDecay(args.weight_decay))
return opt |
_module()
class LAD(KnowledgeDistillationSingleStageDetector):
'Implementation of `LAD <
def __init__(self, backbone, neck, bbox_head, teacher_backbone, teacher_neck, teacher_bbox_head, teacher_ckpt, eval_teacher=True, train_cfg=None, test_cfg=None, pretrained=None):
super(KnowledgeDistillationSingleStageDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = build_backbone(teacher_backbone)
if (teacher_neck is not None):
self.teacher_model.neck = build_neck(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = build_head(teacher_bbox_head)
if (teacher_ckpt is not None):
load_checkpoint(self.teacher_model, teacher_ckpt, map_location='cpu')
def with_teacher_neck(self):
return (hasattr(self.teacher_model, 'neck') and (self.teacher_model.neck is not None))
def extract_teacher_feat(self, img):
x = self.teacher_model.backbone(img)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore=None):
with torch.no_grad():
x_teacher = self.extract_teacher_feat(img)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = self.teacher_model.bbox_head.get_label_assignment(*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, label_assignment_results, img_metas, gt_bboxes, gt_labels, gt_bboxes_ignore)
return losses |
def random_subsample(x: List, num_samples: int=8, time_difference: bool=False) -> Tuple[NDArray]:
t = len(x)
assert ((num_samples > 0) and (t > 0) and (t >= num_samples))
indices = np.linspace(0, (t - 1), num_samples)
indices = np.clip(indices, 0, (t - 1)).astype(int)
indices = np.sort(np.random.choice(indices, replace=False))
indices = np.array(x)[indices]
if time_difference:
(indices, keep_indices) = get_time_difference_indices(indices)
else:
keep_indices = np.array([True for i in range(len(indices))])
return (indices, keep_indices) |
def _eager_safe_variable_handle(shape, key_dtype, value_dtype, shared_name, name, graph_mode, enter_threshold=0, kv_options=variable_scope.default_kv_option()):
container = (ops.get_default_graph()._container or '')
shape = tensor_shape.as_shape(shape.as_list()[1])
handle = gen_kv_variable_ops.kv_variable(value_shape=shape, key_dtype=key_dtype, value_dtype=value_dtype, shared_name=shared_name, name=name, container=container, enter_threshold=enter_threshold)
if graph_mode:
handle._handle_data = resource_variable_ops.get_resource_handle_data(handle)
return handle
exists = gen_kv_variable_ops.kv_variable_is_initialized_v2(handle)
if exists:
raise ValueError(("variable object with name '%s' already created. Use get_kv_variable() if reuse is desired." % shared_name))
with context.graph_mode(), ops.Graph().as_default() as graph:
if kv_options.has_path():
h = gen_kv_variable_ops.kv_variable_v4(value_shape=shape, key_dtype=key_dtype, value_dtype=value_dtype, shared_name=shared_name, name=name, container=container, storage_option=kv_options.serialize_string())
else:
h = gen_kv_variable_ops.kv_variable(value_shape=shape, key_dtype=key_dtype, value_dtype=value_dtype, shared_name=shared_name, name=name, container=container)
handle._handle_data = resource_variable_ops.get_resource_handle_data(h)
ops.dismantle_graph(graph)
return handle |
def retrieve_boxes(scene, objs, all_bboxes, cat2obj):
all_bbox = {(tuple(c['object']['bbox']), c['object']['category']) for c in all_bboxes[scene['image_filename']]}
all_bbox = [(list(b), c) for (b, c) in all_bbox]
assert (len(all_bbox) == len(scene['objects'])), "Error, number of boxes doesn't match number of objects"
def cost(bbox, obj):
(coord, cat) = bbox
(ymin, ymax, xmin, xmax) = coord
xc = ((xmin + xmax) / 2)
yc = ((ymin + ymax) / 2)
return (abs((obj.pos[0] - xc)) + abs((obj.pos[1] - yc)))
final_bbox = []
for (o, tokens) in objs:
best_idx = 0
best_val = .0
for (i, b) in enumerate(all_bbox):
if (cat2obj[b[1]] != o.get_cat()):
continue
cur_cost = cost(b, o)
if (cur_cost < best_val):
best_val = cur_cost
best_idx = i
assert (cat2obj[all_bbox[best_idx][1]] == o.get_cat()), 'Wrong category'
final_bbox.append((all_bbox[best_idx][0], tokens))
return convert_bounding_boxes(final_bbox) |
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args={}, g_args={}):
(x1, x2) = torch.chunk(x, 2, dim=2)
(y1, y2) = (None, None)
with torch.no_grad():
y1 = (x1 + self.f(x2, record_rng=self.training, **f_args))
y2 = (x2 + self.g(y1, record_rng=self.training, **g_args))
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args={}, g_args={}):
(y1, y2) = torch.chunk(y, 2, dim=2)
del y
(dy1, dy2) = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = (y2 - gy1)
del y2, gy1
dx1 = (dy1 + y1.grad)
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = (y1 - fx2)
del y1, fx2
dx2 = (dy2 + x2.grad)
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return (x, dx) |
class TestConfig(unittest.TestCase):
def test_config(self):
config = PostTrainingQuantConfig()
self.assertEqual(config.recipes['smooth_quant'], False)
self.assertEqual(config.recipes['fast_bias_correction'], False)
self.assertEqual(config.recipes['weight_correction'], False)
self.assertEqual(config.recipes['dedicated_qdq_pair'], False)
self.assertEqual(config.recipes['add_qdq_pair_to_weight'], False)
self.assertEqual(config.recipes['graph_optimization_level'], None) |
def parse_resume_step_from_filename(filename):
split = filename.split('model')
if (len(split) < 2):
return 0
split1 = split[(- 1)].split('.')[0]
try:
return int(split1)
except ValueError:
return 0 |
class InverseFlow(Flow):
def __init__(self, flow: Flow) -> None:
super(InverseFlow, self).__init__()
self.flow = flow
def forward(self, f: torch.tensor) -> torch.tensor:
return self.flow.inverse(f)
def inverse(self, f: torch.tensor) -> torch.tensor:
return self.flow.forward(f) |
class StableDiffusionGLIGENPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
def build_model():
initializers = []
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 15, 15])
output = helper.make_tensor_value_info('reshape_output', TensorProto.FLOAT, [88, 11])
add_node = onnx.helper.make_node('Add', ['input', 'add_init'], ['add_out'], name='add')
conv1_weight_initializer = numpy_helper.from_array(np.random.randint((- 1), 2, [3, 3, 3, 3]).astype(np.float32), name='conv1_weight')
conv1_node = helper.make_node('Conv', ['add_out', 'conv1_weight'], ['conv1_output'], name='conv1')
conv2_weight_initializer = numpy_helper.from_array(np.random.randint((- 1), 2, [5, 3, 3, 3]).astype(np.float32), name='conv2_weight')
conv2_node = helper.make_node('Conv', ['add_out', 'conv2_weight'], ['conv2_output'], name='conv2')
concat_node = helper.make_node('Concat', ['conv1_output', 'conv2_output'], ['concat_output'], name='Concat', axis=1)
avg_args = {'kernel_shape': [3, 3]}
avgpool_node = helper.make_node('AveragePool', ['concat_output'], ['avg_output'], name='AveragePool', **avg_args)
reshape_node = onnx.helper.make_node('Reshape', ['avg_output', 'shape'], ['reshape_output'], name='Reshape')
initializers = [conv1_weight_initializer, conv2_weight_initializer]
initializers.append(onnx.numpy_helper.from_array(np.array([88, 11], dtype=np.int64), name='shape'))
initializers.append(onnx.numpy_helper.from_array(np.zeros((1, 3, 15, 15), dtype=np.float32), name='add_init'))
graph = helper.make_graph([conv1_node, conv2_node, concat_node, avgpool_node, reshape_node, add_node], 'test', [input], [output], initializer=initializers)
model = helper.make_model(graph, opset_imports=[helper.make_opsetid('', 13)])
return model |
def convert_fairseq_s2t_checkpoint_to_tfms(checkpoint_path, pytorch_dump_folder_path):
m2m_100 = torch.load(checkpoint_path, map_location='cpu')
args = m2m_100['args']
state_dict = m2m_100['model']
lm_head_weights = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(state_dict)
rename_keys(state_dict)
vocab_size = state_dict['decoder.embed_tokens.weight'].shape[0]
tie_embeds = args.share_decoder_input_output_embed
conv_kernel_sizes = [int(i) for i in args.conv_kernel_sizes.split(',')]
config = Speech2TextConfig(vocab_size=vocab_size, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', num_conv_layers=len(conv_kernel_sizes), conv_channels=args.conv_channels, conv_kernel_sizes=conv_kernel_sizes, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=tie_embeds, num_beams=5, max_length=200, use_cache=True, decoder_start_token_id=2, early_stopping=True)
model = Speech2TextForConditionalGeneration(config)
(missing, unexpected) = model.model.load_state_dict(state_dict, strict=False)
if ((len(missing) > 0) and (not (set(missing) <= {'encoder.embed_positions.weights', 'decoder.embed_positions.weights'}))):
raise ValueError(f'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing, but all the following weights are missing {missing}')
if tie_embeds:
model.lm_head = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
model.lm_head.weight.data = lm_head_weights
model.save_pretrained(pytorch_dump_folder_path) |
def record(msg=''):
if DEBUG_TIME:
global start_time
if ((start_time is None) or (msg == '')):
start_time = time.time()
print((('%.2f seconds: ' % 0) + 'start'))
else:
print((('%.2f seconds: ' % (time.time() - start_time)) + msg)) |
def Load_model_weight_checkpoint(experiment_folder='.', experiment_name=None, rank=0, epoch=10):
path_checkpoint = ('%s/checkpoint/%s/' % (experiment_folder, experiment_name))
pthfile = (path_checkpoint + ('Rank%s_Epoch_%s_weights.pth' % (rank, epoch)))
checkpoint_weights = torch.load(pthfile, map_location=(lambda storage, loc: storage))
try:
checkpoint_weights = checkpoint_weights['state_dict']
except:
pass
return checkpoint_weights |
class Server():
def __init__(self, model, clients=[], cfg=None, deadline=0):
self._cur_time = 0
self.model = model
self.all_clients = clients
self.cfg = cfg
self.deadline = deadline
self.selected_clients = []
self.updates = []
self.clients_info = defaultdict(dict)
self.test_clients_info = defaultdict(dict)
self.failed_clients = []
self.current_round = 0
self.oort = None
self.fedbalancer = None
if (self.cfg.oort or self.cfg.fb_client_selection or self.cfg.oortbalancer):
self.oort = Oort(self.all_clients, deadline, oort_pacer=self.cfg.oort_pacer, pacer_delta=self.cfg.oort_pacer_delta, oort_blacklist=self.cfg.oort_blacklist)
if (self.cfg.fedbalancer or self.cfg.oortbalancer or self.cfg.ss_baseline):
self.fedbalancer = FedBalancer(self.cfg.fb_inference_pipelining, self.cfg.fb_p, self.cfg.fb_w, self.cfg.fb_simple_control_lt_stepsize, self.cfg.fb_simple_control_ddl_stepsize)
for c in self.all_clients:
self.clients_info[str(c.id)]['acc'] = 0.0
self.clients_info[str(c.id)]['device'] = c.device.device_model
self.clients_info[str(c.id)]['sample_num'] = len(c.train_data['y'])
self.clients_info[str(c.id)]['download_times'] = []
self.clients_info[str(c.id)]['upload_times'] = []
self.clients_info[str(c.id)]['one_epoch_train_time'] = (- 1)
self.clients_info[str(c.id)]['overthreshold_loss_count'] = (- 1)
self.clients_info[str(c.id)]['overthreshold_loss_sum'] = (- 1)
self.clients_info[str(c.id)]['utility'] = 0.0
self.clients_info[str(c.id)]['last_selected_round'] = (- 1)
if self.cfg.oort_pacer:
self.clients_info[str(c.id)]['last_selected_round_duration'] = (- 1)
if self.cfg.oort_blacklist:
self.clients_info[str(c.id)]['selected_count'] = (- 1)
c.fedbalancer = self.fedbalancer
c.oort = self.oort
def select_clients(self, possible_clients, num_clients=20, batch_size=10):
num_clients = min(num_clients, len(possible_clients))
if (num_clients < self.cfg.min_selected):
logger.info('insufficient clients: need {} while get {} online'.format(self.cfg.min_selected, num_clients))
return False
if (self.cfg.fb_client_selection or self.cfg.oort or self.cfg.oortbalancer):
if self.cfg.oort_pacer:
self.deadline = self.oort.pacer_deadline_update(self.deadline, self.current_round)
(self.selected_clients, self.clients_info) = self.oort.select_clients(self.all_clients, possible_clients, num_clients, self.clients_info, self.current_round, self.deadline, self.cfg.batch_size, self.cfg.num_epochs, self.cfg.behav_hete, self.cfg.fb_client_selection, self.cfg.fb_inference_pipelining, self.cfg.oortbalancer)
else:
self.selected_clients = np.random.choice(possible_clients, num_clients, replace=False)
return [(c.num_train_samples, c.num_test_samples) for c in self.selected_clients]
def train_model(self, num_epochs=1, batch_size=10):
simulate_time = 0
accs = []
losses = []
self.updates = []
sorted_loss_sum = 0
num_of_samples = 0
max_loss = 0
min_loss = sys.maxsize
if ((self.cfg.fedbalancer or self.cfg.oortbalancer) and self.fedbalancer.if_any_client_sent_response_for_current_round):
self.fedbalancer.loss_threshold_selection()
if (self.cfg.fb_simple_control_ddl_stepsize != 0.0):
self.deadline = self.fedbalancer.deadline_selection(self.selected_clients, self.clients_info, self.cfg.num_epochs, self.deadline, self.cfg.batch_size, self.current_round)
logger.info('this round deadline {}, loss_threshold {}'.format(self.deadline, self.fedbalancer.loss_threshold))
logger.info('this round deadline ratio {}, loss_threshold ratio {}'.format(self.fedbalancer.deadline_ratio, self.fedbalancer.loss_threshold_ratio))
else:
logger.info('this round deadline {}'.format(self.deadline))
if (self.cfg.oort_pacer or self.cfg.ddl_baseline_smartpc):
client_tmp_info = {}
client_tmp_info = {c.id: {'simulate_time_c': 0, 'num_samples': 0, 'update': 0, 'acc': 0, 'loss': 0, 'update_size': 0, 'seed': 0, 'sorted_loss': 0, 'download_time': 0, 'upload_time': 0, 'train_time': 0, 'inference_time': 0, 'completed_epochs': 0, 'c_model_size': 0, 'c_before_comp_upload_time': 0, 'c_ori_download_time': 0, 'c_ori_train_time': 0, 'c_ori_upload_time': 0, 'c_act_download_time': 0, 'c_act_train_time': 0, 'c_act_upload_time': 0, 'client_simulate_time': 0} for c in self.selected_clients}
client_simulate_times = []
if (self.oort != None):
self.oort.curr_round_exploited_utility = 0.0
server_current_model = copy.deepcopy(self.model)
round_failed_clients = []
for c in self.selected_clients:
c.model = None
c._model = None
c.model = copy.deepcopy(server_current_model)
try:
c.set_deadline(self.deadline)
if (self.cfg.fedbalancer or self.cfg.oortbalancer):
c.set_loss_threshold(self.fedbalancer.loss_threshold)
logger.debug('client {} starts training...'.format(c.id))
start_t = self.get_cur_time()
(simulate_time_c, num_samples, update, acc, loss, update_size, sorted_loss, download_time, upload_time, train_time, inference_time, completed_epochs) = c.train(start_t, num_epochs, batch_size)
if (self.cfg.oort_pacer or self.cfg.ddl_baseline_smartpc):
client_tmp_info[c.id]['simulate_time_c'] = simulate_time_c
client_tmp_info[c.id]['num_samples'] = num_samples
client_tmp_info[c.id]['update'] = update
client_tmp_info[c.id]['acc'] = acc
client_tmp_info[c.id]['loss'] = loss
client_tmp_info[c.id]['update_size'] = update_size
client_tmp_info[c.id]['sorted_loss'] = sorted_loss
client_tmp_info[c.id]['download_time'] = download_time
client_tmp_info[c.id]['upload_time'] = upload_time
client_tmp_info[c.id]['train_time'] = train_time
client_tmp_info[c.id]['inference_time'] = inference_time
client_tmp_info[c.id]['completed_epochs'] = completed_epochs
client_tmp_info[c.id]['c_model_size'] = c.model.size
client_tmp_info[c.id]['c_before_comp_upload_time'] = c.before_comp_upload_time
client_tmp_info[c.id]['c_ori_download_time'] = c.ori_download_time
client_tmp_info[c.id]['c_ori_inference_time'] = c.ori_inference_time
client_tmp_info[c.id]['c_ori_train_time'] = c.ori_train_time
client_tmp_info[c.id]['c_ori_upload_time'] = c.ori_upload_time
client_tmp_info[c.id]['c_act_download_time'] = c.act_download_time
client_tmp_info[c.id]['c_act_inference_time'] = c.act_inference_time
client_tmp_info[c.id]['c_act_train_time'] = c.act_train_time
client_tmp_info[c.id]['c_act_upload_time'] = c.act_upload_time
client_tmp_info[c.id]['client_simulate_time'] = max(simulate_time_c, (((download_time + upload_time) + train_time) + inference_time))
client_simulate_times.append((c.id, max(simulate_time_c, (((download_time + upload_time) + train_time) + inference_time))))
else:
self.clients_info[str(c.id)]['download_times'].append(download_time)
self.clients_info[str(c.id)]['upload_times'].append(upload_time)
self.clients_info[str(c.id)]['one_epoch_train_time'] = (train_time / completed_epochs)
if ((self.cfg.fb_client_selection or self.cfg.oort or self.cfg.oortbalancer) and (not self.cfg.oort_pacer)):
self.oort.curr_round_exploited_utility += self.clients_info[str(c.id)]['utility']
if (self.cfg.oort_pacer and (self.cfg.fb_client_selection or self.cfg.oort or self.cfg.oortbalancer)):
self.clients_info[str(c.id)]['last_selected_round_duration'] = (((download_time + train_time) + upload_time) + inference_time)
if (len(sorted_loss) > 0):
sorted_loss_sum += sum(sorted_loss)
num_of_samples += len(sorted_loss)
if (sorted_loss[0] < min_loss):
min_loss = sorted_loss[0]
if (sorted_loss[(- 1)] > max_loss):
max_loss = sorted_loss[(- 1)]
if (self.cfg.fb_client_selection or self.cfg.oort or self.cfg.oortbalancer):
summ = 0
overthreshold_loss_count = 0
for loss_idx in range(len(sorted_loss)):
if (sorted_loss[((len(sorted_loss) - 1) - loss_idx)] > c.loss_threshold):
summ += (sorted_loss[((len(sorted_loss) - 1) - loss_idx)] * sorted_loss[((len(sorted_loss) - 1) - loss_idx)])
overthreshold_loss_count += 1
self.clients_info[str(c.id)]['overthreshold_loss_sum'] = summ
self.clients_info[str(c.id)]['overthreshold_loss_count'] = overthreshold_loss_count
if (self.cfg.fedbalancer or self.cfg.oortbalancer):
self.fedbalancer.if_any_client_sent_response_for_current_round = True
noise1 = np.random.normal(0, self.cfg.noise_factor, 1)[0]
noise2 = np.random.normal(0, self.cfg.noise_factor, 1)[0]
self.fedbalancer.current_round_loss_min.append((np.min(sorted_loss) + noise1))
self.fedbalancer.current_round_loss_max.append((np.percentile(sorted_loss, 80) + noise2))
logger.debug('client {} simulate_time: {}'.format(c.id, simulate_time_c))
logger.debug('client {} num_samples: {}'.format(c.id, num_samples))
logger.debug('client {} acc: {}, loss: {}'.format(c.id, acc, loss))
accs.append(acc)
losses.append(loss)
simulate_time = min(self.deadline, max(simulate_time, simulate_time_c))
self.updates.append((c.id, num_samples, update))
logger.debug('client {} upload successfully with acc {}, loss {}'.format(c.id, acc, loss))
except timeout_decorator.timeout_decorator.TimeoutError as e:
logger.debug('client {} failed: {}'.format(c.id, e))
round_failed_clients.append(c.id)
simulate_time = self.deadline
if (self.cfg.oort_pacer or self.cfg.ddl_baseline_smartpc):
assert False
except Exception as e:
logger.error('client {} failed: {}'.format(c.id, e))
traceback.print_exc()
c.model = None
c._model = None
if (self.cfg.oort_pacer or self.cfg.ddl_baseline_smartpc):
client_simulate_times = sorted(client_simulate_times, key=(lambda tup: tup[1]))
for_loop_until = 0
if self.cfg.oort_pacer:
for_loop_until = min(self.cfg.clients_per_round, len(client_simulate_times))
elif self.cfg.ddl_baseline_smartpc:
for_loop_until = int((min(self.cfg.clients_per_round, len(client_simulate_times)) * self.cfg.ddl_baseline_smartpc_percentage))
for c_idx in range(for_loop_until):
c_id = client_simulate_times[c_idx][0]
self.clients_info[str(c.id)]['download_times'].append(client_tmp_info[c_id]['download_time'])
self.clients_info[str(c.id)]['upload_times'].append(client_tmp_info[c_id]['upload_time'])
self.clients_info[str(c.id)]['one_epoch_train_time'] = (client_tmp_info[c_id]['train_time'] / client_tmp_info[c_id]['completed_epochs'])
if (self.cfg.oort_pacer and (self.cfg.fb_client_selection or self.cfg.oort or self.cfg.oortbalancer)):
self.clients_info[str(c.id)]['last_selected_round_duration'] = (((download_time + train_time) + upload_time) + inference_time)
if (len(client_tmp_info[c_id]['sorted_loss']) > 0):
sorted_loss_sum += sum(client_tmp_info[c_id]['sorted_loss'])
num_of_samples += len(client_tmp_info[c_id]['sorted_loss'])
if (client_tmp_info[c_id]['sorted_loss'][0] < min_loss):
min_loss = client_tmp_info[c_id]['sorted_loss'][0]
if (client_tmp_info[c_id]['sorted_loss'][(- 1)] > max_loss):
max_loss = client_tmp_info[c_id]['sorted_loss'][(- 1)]
if (self.cfg.fb_client_selection or self.cfg.oort or self.cfg.oortbalancer):
summ = 0
overthreshold_loss_count = 0
for loss_idx in range(len(client_tmp_info[c_id]['sorted_loss'])):
loss_threshold = 0
if (self.cfg.fb_client_selection or self.cfg.oortbalancer):
loss_threshold = self.fedbalancer.loss_threshold
else:
loss_threshold = 0
if (client_tmp_info[c_id]['sorted_loss'][((len(client_tmp_info[c_id]['sorted_loss']) - 1) - loss_idx)] > loss_threshold):
summ += (client_tmp_info[c_id]['sorted_loss'][((len(client_tmp_info[c_id]['sorted_loss']) - 1) - loss_idx)] * client_tmp_info[c_id]['sorted_loss'][((len(client_tmp_info[c_id]['sorted_loss']) - 1) - loss_idx)])
overthreshold_loss_count += 1
self.clients_info[c_id]['overthreshold_loss_sum'] = summ
self.clients_info[c_id]['overthreshold_loss_count'] = overthreshold_loss_count
if (self.cfg.fedbalancer or self.cfg.oortbalancer):
self.fedbalancer.if_any_client_sent_response_for_current_round = True
noise1 = np.random.normal(0, self.cfg.noise_factor, 1)[0]
noise2 = np.random.normal(0, self.cfg.noise_factor, 1)[0]
self.fedbalancer.current_round_loss_min.append((np.min(client_tmp_info[c_id]['sorted_loss']) + noise1))
self.fedbalancer.current_round_loss_max.append((np.percentile(client_tmp_info[c_id]['sorted_loss'], 80) + noise2))
logger.debug('client {} simulate_time: {}'.format(c_id, client_tmp_info[c_id]['simulate_time_c']))
logger.debug('client {} num_samples: {}'.format(c_id, client_tmp_info[c_id]['num_samples']))
logger.debug('client {} acc: {}, loss: {}'.format(c_id, client_tmp_info[c_id]['acc'], client_tmp_info[c_id]['loss']))
accs.append(client_tmp_info[c_id]['acc'])
losses.append(client_tmp_info[c_id]['loss'])
if (self.cfg.fedbalancer or self.cfg.oortbalancer):
simulate_time = min(self.deadline, max(simulate_time, client_tmp_info[c_id]['simulate_time_c']))
elif (self.cfg.oort_pacer or self.cfg.ddl_baseline_smartpc):
simulate_time = max(simulate_time, client_tmp_info[c_id]['client_simulate_time'])
else:
simulate_time = min(self.deadline, max(simulate_time, client_tmp_info[c_id]['simulate_time_c']))
self.updates.append((c_id, client_tmp_info[c_id]['num_samples'], client_tmp_info[c_id]['update']))
logger.debug('client {} upload successfully with acc {}, loss {}'.format(c_id, client_tmp_info[c_id]['acc'], client_tmp_info[c_id]['loss']))
try:
avg_acc = (sum(accs) / len(accs))
avg_loss = (sum(losses) / len(losses))
logger.info('average acc: {}, average loss: {}'.format(avg_acc, avg_loss))
logger.info('configuration and update stage simulation time: {}'.format(simulate_time))
if (not self.cfg.oort_pacer):
if (self.cfg.fb_client_selection or self.cfg.oort or self.cfg.oortbalancer):
self.oort.round_exploited_utility.append(self.oort.curr_round_exploited_utility)
if (self.cfg.fedbalancer or self.cfg.oortbalancer):
current_round_loss = ((sorted_loss_sum / num_of_samples) / self.deadline)
self.fedbalancer.prev_train_losses.append(current_round_loss)
logger.info('current_round_loss: {}'.format(current_round_loss))
self.fedbalancer.ratio_control(self.fedbalancer.prev_train_losses, self.current_round)
logger.info('min sample loss: {}, max sample loss: {}'.format(min_loss, max_loss))
self.current_round += 1
except ZeroDivisionError as e:
logger.error('training time window is too short to train!')
except Exception as e:
logger.error('failed reason: {}'.format(e))
traceback.print_exc()
assert False
if (self.cfg.fb_client_selection or self.cfg.oort or self.cfg.oortbalancer):
if (len(self.failed_clients) != 0):
again_failed_clients = 0
for fc_id in self.failed_clients[(- 1)]:
for r_fc_id in round_failed_clients:
if (fc_id == r_fc_id):
again_failed_clients += 1
logger.info(('AGAIN FAILED CLIENTS:' + str(again_failed_clients)))
if (again_failed_clients > (self.cfg.clients_per_round * 0.1)):
self.fedbalancer.guard_time += 10
else:
self.fedbalancer.guard_time = 0
self.failed_clients.append(round_failed_clients)
if (self.oort.epsilon != 0):
self.oort.zero_epsilon(self.all_clients, self.clients_info)
return simulate_time
def update_model(self, update_frac):
logger.info('{} of {} clients upload successfully'.format(len(self.updates), len(self.selected_clients)))
if ((len(self.updates) / len(self.selected_clients)) >= update_frac):
logger.info('round succeed, updating global model...')
if self.cfg.no_training:
logger.info('pseduo-update because of no_training setting.')
self.updates = []
return
if (self.cfg.aggregate_algorithm == 'SucFedAvg'):
logger.info('Aggragate with SucFedAvg')
total_weight = 0.0
total_data_size = sum([client_num_samples for (cid, client_num_samples, client_model_state) in self.updates])
aggregation_weights = [(client_num_samples / total_data_size) for (cid, client_num_samples, client_model_state) in self.updates]
update_state = OrderedDict()
for (k, (cid, client_samples, client_model)) in enumerate(self.updates):
for key in self.model.net.state_dict().keys():
if (k == 0):
update_state[key] = (client_model[key] * aggregation_weights[k])
else:
update_state[key] += (client_model[key] * aggregation_weights[k])
self.model.net.load_state_dict(update_state)
else:
logger.error('not supported aggregating algorithm: {}'.format(self.cfg.aggregate_algorithm))
assert False
else:
logger.info('round failed, global model maintained.')
if (self.fedbalancer != None):
self.fedbalancer.guard_time += 10
self.updates = []
def test_model(self, clients_to_test, set_to_use='test'):
metrics = {}
if (clients_to_test is None):
clients_to_test = self.selected_clients
assert False
for client in clients_to_test:
client.model = copy.deepcopy(self.model)
c_metrics = client.test(set_to_use)
metrics[client.id] = c_metrics
if isinstance(c_metrics['accuracy'], np.ndarray):
self.test_clients_info[client.id]['acc'] = c_metrics['accuracy'].tolist()
else:
self.test_clients_info[client.id]['acc'] = c_metrics['accuracy']
client.model = None
client._model = None
return metrics
def get_clients_info(self, clients):
if (clients is None):
clients = self.all_clients
ids = [c.id for c in clients]
groups = {c.id: c.group for c in clients}
num_samples = {c.id: c.num_samples for c in clients}
return (ids, groups, num_samples)
def get_cur_time(self):
return self._cur_time
def pass_time(self, sec):
self._cur_time += sec
def get_time_window(self):
tw = np.random.normal(self.cfg.time_window[0], self.cfg.time_window[1])
while (tw < 0):
tw = np.random.normal(self.cfg.time_window[0], self.cfg.time_window[1])
return tw |
_config
def model_lifelong_sidetune_double_fcn5s_taskonomy():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'FCN5', 'base_weights_path': '/mnt/models/curvature_encoder_student.dat', 'base_kwargs': {'eval_only': True, 'train': False, 'normalize_outputs': False}, 'use_baked_encoding': False, 'side_class': 'FCN5', 'side_weights_path': '/mnt/models/curvature_encoder_student.dat', 'side_kwargs': {'eval_only': False, 'train': True, 'normalize_outputs': False}, 'normalize_pre_transfer': True}}} |
def main(iterations, use_test=False):
(x_train, y_train, x_val, y_val, x_test, y_test) = load_data(use_test)
y_test += 1
print('Loaded {} training examples, {} validation examples, {} testing examples'.format(len(x_train), len(x_val), len(x_test)))
model = train_model(x_train, y_train, x_val, y_val, iterations, learning_rate=0.001)
preds = test_model(model, x_test)
y_test = y_test.cpu()
acc = accuracy_score(y_test, preds)
f1 = f1_score(y_test, preds, average='macro')
prec = precision_score(y_test, preds, average='macro')
rec = recall_score(y_test, preds, average='macro')
mode = stats.mode(y_train.cpu())[0][0][0]
majority_guess = [mode for _ in preds]
guess_acc = accuracy_score(y_test, majority_guess)
guess_f1 = f1_score(y_test, majority_guess, average='macro')
guess_prec = precision_score(y_test, majority_guess, average='macro')
guess_rec = recall_score(y_test, majority_guess, average='macro')
return (acc, f1, prec, rec, guess_acc, guess_f1, guess_prec, guess_rec) |
class ConfigurationVersioningTest(unittest.TestCase):
def test_local_versioning(self):
configuration = AutoConfig.from_pretrained('bert-base-cased')
configuration.configuration_files = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(tmp_dir)
configuration.hidden_size = 2
json.dump(configuration.to_dict(), open(os.path.join(tmp_dir, 'config.4.0.0.json'), 'w'))
new_configuration = AutoConfig.from_pretrained(tmp_dir)
self.assertEqual(new_configuration.hidden_size, 2)
configuration.configuration_files = ['config.42.0.0.json']
configuration.hidden_size = 768
configuration.save_pretrained(tmp_dir)
shutil.move(os.path.join(tmp_dir, 'config.4.0.0.json'), os.path.join(tmp_dir, 'config.42.0.0.json'))
new_configuration = AutoConfig.from_pretrained(tmp_dir)
self.assertEqual(new_configuration.hidden_size, 768)
def test_repo_versioning_before(self):
repo = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
new_transformers.configuration_utils.__version__ = 'v4.0.0'
(new_configuration, kwargs) = new_transformers.models.auto.AutoConfig.from_pretrained(repo, return_unused_kwargs=True)
self.assertEqual(new_configuration.hidden_size, 2)
self.assertDictEqual(kwargs, {})
import transformers as old_transformers
old_transformers.configuration_utils.__version__ = 'v3.0.0'
old_configuration = old_transformers.models.auto.AutoConfig.from_pretrained(repo)
self.assertEqual(old_configuration.hidden_size, 768) |
def shufflenet_g1_w1(**kwargs):
return get_shufflenet(groups=1, width_scale=1.0, model_name='shufflenet_g1_w1', **kwargs) |
def get_down_seq(ni, nf, no):
sequence = [nn.Conv2d(ni, nf, 4, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(nf, (nf * 2), 4, 2, 1), nn.InstanceNorm2d((nf * 2)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((nf * 2), (nf * 4), 4, 2, 1), nn.InstanceNorm2d((nf * 4)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((nf * 4), (nf * 8), 4, 2, 1), nn.InstanceNorm2d((nf * 8)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((nf * 8), (nf * 16), 4, 2, 1), nn.InstanceNorm2d((nf * 16)), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d((nf * 16), no, 4, 1, 0)]
return sequence |
class FirstOrderDifferenceLoss(torch.nn.Module):
def __init__(self, reduction: str='mean'):
super().__init__()
self.loss = torch.nn.L1Loss(reduction=reduction)
def forward(self, pred, target):
pred_diff = torch.diff(pred)
target_diff = torch.diff(target)
return self.loss(pred_diff, target_diff) |
class ConfigTester(object):
def __init__(self, parent, config_class=None, has_text_modality=True, **kwargs):
self.parent = parent
self.config_class = config_class
self.has_text_modality = has_text_modality
self.inputs_dict = kwargs
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
common_properties = ['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.has_text_modality:
common_properties.extend(['vocab_size'])
for prop in common_properties:
self.parent.assertTrue(hasattr(config, prop), msg=f'`{prop}` does not exist')
for (idx, name) in enumerate(common_properties):
try:
setattr(config, name, idx)
self.parent.assertEqual(getattr(config, name), idx, msg=f'`{name} value {idx} expected, but was {getattr(config, name)}')
except NotImplementedError:
pass
for (idx, name) in enumerate(common_properties):
try:
config = self.config_class(**{name: idx})
self.parent.assertEqual(getattr(config, name), idx, msg=f'`{name} value {idx} expected, but was {getattr(config, name)}')
except NotImplementedError:
pass
def create_and_test_config_to_json_string(self):
config = self.config_class(**self.inputs_dict)
obj = json.loads(config.to_json_string())
for (key, value) in self.inputs_dict.items():
self.parent.assertEqual(obj[key], value)
def create_and_test_config_to_json_file(self):
config_first = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, 'config.json')
config_first.to_json_file(json_file_path)
config_second = self.config_class.from_json_file(json_file_path)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_from_and_save_pretrained(self):
config_first = self.config_class(**self.inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(tmpdirname)
config_second = self.config_class.from_pretrained(tmpdirname)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_from_and_save_pretrained_subfolder(self):
config_first = self.config_class(**self.inputs_dict)
subfolder = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
sub_tmpdirname = os.path.join(tmpdirname, subfolder)
config_first.save_pretrained(sub_tmpdirname)
config_second = self.config_class.from_pretrained(tmpdirname, subfolder=subfolder)
self.parent.assertEqual(config_second.to_dict(), config_first.to_dict())
def create_and_test_config_with_num_labels(self):
config = self.config_class(**self.inputs_dict, num_labels=5)
self.parent.assertEqual(len(config.id2label), 5)
self.parent.assertEqual(len(config.label2id), 5)
config.num_labels = 3
self.parent.assertEqual(len(config.id2label), 3)
self.parent.assertEqual(len(config.label2id), 3)
def check_config_can_be_init_without_params(self):
if self.config_class.is_composition:
return
config = self.config_class()
self.parent.assertIsNotNone(config)
def check_config_arguments_init(self):
kwargs = copy.deepcopy(config_common_kwargs)
config = self.config_class(**kwargs)
wrong_values = []
for (key, value) in config_common_kwargs.items():
if (key == 'torch_dtype'):
if (not is_torch_available()):
continue
else:
import torch
if (config.torch_dtype != torch.float16):
wrong_values.append(('torch_dtype', config.torch_dtype, torch.float16))
elif (getattr(config, key) != value):
wrong_values.append((key, getattr(config, key), value))
if (len(wrong_values) > 0):
errors = '\n'.join([f'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values])
raise ValueError(f'''The following keys were not properly set in the config:
{errors}''')
def run_common_tests(self):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init() |
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--input_model', type=str, required=False, default='inception-v1-12.onnx')
parser.add_argument('--output_model', type=str, required=True)
return parser.parse_args() |
def load_dataset(config: CfgNode, return_class=True, test=False):
dataset_config = config.dataset
processor = PROCESSORS[dataset_config.name.lower()]()
train_dataset = None
valid_dataset = None
if (not test):
try:
train_dataset = processor.get_train_examples(dataset_config.path)
except FileNotFoundError:
logger.warning(f'Has no training dataset in {dataset_config.path}.')
try:
valid_dataset = processor.get_dev_examples(dataset_config.path)
except FileNotFoundError:
logger.warning(f'Has no validation dataset in {dataset_config.path}.')
test_dataset = None
try:
test_dataset = processor.get_test_examples(dataset_config.path)
except FileNotFoundError:
logger.warning(f'Has no test dataset in {dataset_config.path}.')
if ((train_dataset is None) and (valid_dataset is None) and (test_dataset is None)):
logger.error(('Dataset is empty. Either there is no download or the path is wrong. ' + 'If not downloaded, please `cd datasets/` and `bash download_xxx.sh`'))
exit()
if return_class:
return (train_dataset, valid_dataset, test_dataset, processor)
else:
return (train_dataset, valid_dataset, test_dataset) |
def get_root_logger(log_file=None, log_level=logging.INFO):
logger = logging.getLogger(__name__.split('.')[0])
if logger.hasHandlers():
return logger
format_str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=format_str, level=log_level)
(rank, _) = get_dist_info()
if (rank != 0):
logger.setLevel('ERROR')
elif (log_file is not None):
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
return logger |
class OwlViTFeatureExtractor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
class Dataset(object):
def __init__(self, batch_size=100):
mnist = keras.datasets.mnist
((train_images, train_labels), (test_images, test_labels)) = mnist.load_data()
self.train_images = (train_images / 255.0)
self.test_images = (test_images / 255.0)
self.train_labels = train_labels
self.test_labels = test_labels
def __len__(self):
return len(self.test_images)
def __getitem__(self, idx):
return (self.test_images[idx], self.test_labels[idx]) |
class COCOPanopticEvaluator(DatasetEvaluator):
def __init__(self, dataset_name, output_dir):
self._metadata = MetadataCatalog.get(dataset_name)
self._thing_contiguous_id_to_dataset_id = {v: k for (k, v) in self._metadata.thing_dataset_id_to_contiguous_id.items()}
self._stuff_contiguous_id_to_dataset_id = {v: k for (k, v) in self._metadata.stuff_dataset_id_to_contiguous_id.items()}
self._predictions_json = os.path.join(output_dir, 'predictions.json')
def reset(self):
self._predictions = []
def _convert_category_id(self, segment_info):
isthing = segment_info.pop('isthing', None)
if (isthing is None):
return segment_info
if (isthing is True):
segment_info['category_id'] = self._thing_contiguous_id_to_dataset_id[segment_info['category_id']]
else:
segment_info['category_id'] = self._stuff_contiguous_id_to_dataset_id[segment_info['category_id']]
return segment_info
def process(self, inputs, outputs):
from panopticapi.utils import id2rgb
for (input, output) in zip(inputs, outputs):
(panoptic_img, segments_info) = output['panoptic_seg']
panoptic_img = panoptic_img.cpu().numpy()
file_name = os.path.basename(input['file_name'])
file_name_png = (os.path.splitext(file_name)[0] + '.png')
with io.BytesIO() as out:
Image.fromarray(id2rgb(panoptic_img)).save(out, format='PNG')
segments_info = [self._convert_category_id(x) for x in segments_info]
self._predictions.append({'image_id': input['image_id'], 'file_name': file_name_png, 'png_string': out.getvalue(), 'segments_info': segments_info})
def evaluate(self):
comm.synchronize()
self._predictions = comm.gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if (not comm.is_main_process()):
return
gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
gt_folder = PathManager.get_local_path(self._metadata.panoptic_root)
with tempfile.TemporaryDirectory(prefix='panoptic_eval') as pred_dir:
logger.info('Writing all panoptic predictions to {} ...'.format(pred_dir))
for p in self._predictions:
with open(os.path.join(pred_dir, p['file_name']), 'wb') as f:
f.write(p.pop('png_string'))
with open(gt_json, 'r') as f:
json_data = json.load(f)
json_data['annotations'] = self._predictions
with PathManager.open(self._predictions_json, 'w') as f:
f.write(json.dumps(json_data))
from panopticapi.evaluation import pq_compute
with contextlib.redirect_stdout(io.StringIO()):
pq_res = pq_compute(gt_json, PathManager.get_local_path(self._predictions_json), gt_folder=gt_folder, pred_folder=pred_dir)
res = {}
res['PQ'] = (100 * pq_res['All']['pq'])
res['SQ'] = (100 * pq_res['All']['sq'])
res['RQ'] = (100 * pq_res['All']['rq'])
res['PQ_th'] = (100 * pq_res['Things']['pq'])
res['SQ_th'] = (100 * pq_res['Things']['sq'])
res['RQ_th'] = (100 * pq_res['Things']['rq'])
res['PQ_st'] = (100 * pq_res['Stuff']['pq'])
res['SQ_st'] = (100 * pq_res['Stuff']['sq'])
res['RQ_st'] = (100 * pq_res['Stuff']['rq'])
results = OrderedDict({'panoptic_seg': res})
_print_panoptic_results(pq_res)
return results |
def accuracy(test=None, reference=None, confusion_matrix=None, **kwargs):
if (confusion_matrix is None):
confusion_matrix = ConfusionMatrix(test, reference)
(tp, fp, tn, fn) = confusion_matrix.get_matrix()
return float(((tp + tn) / (((tp + fp) + tn) + fn))) |
class DownBlock3D(nn.Module):
def __init__(self, in_channels: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, output_scale_factor=1.0, add_downsample=True, downsample_padding=1):
super().__init__()
resnets = []
temp_convs = []
for i in range(num_layers):
in_channels = (in_channels if (i == 0) else out_channels)
resnets.append(ResnetBlock2D(in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
temp_convs.append(TemporalConvLayer(out_channels, out_channels, dropout=0.1))
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
if add_downsample:
self.downsamplers = nn.ModuleList([Downsample2D(out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name='op')])
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, num_frames=1):
output_states = ()
for (resnet, temp_conv) in zip(self.resnets, self.temp_convs):
hidden_states = resnet(hidden_states, temb)
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
output_states += (hidden_states,)
if (self.downsamplers is not None):
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return (hidden_states, output_states) |
def register_datasets(datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[os.PathLike]=None):
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root) |
def add_bel_output(bel, wire, port):
if (wire not in wire_belports):
wire_belports[wire] = set()
wire_belports[wire].add((bel, port))
bel_wires[bel].append((constids[port], 1, wire)) |
_bs4
_tokenizers
class MarkupLMProcessorTest(unittest.TestCase):
tokenizer_class = MarkupLMTokenizer
rust_tokenizer_class = MarkupLMTokenizerFast
def setUp(self):
vocab = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'G', 'Gl', 'Gn', 'Glo', 'Glow', 'er', 'Glowest', 'Gnewer', 'Gwider', 'Ghello', 'Gworld', '<unk>']
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ['#version: 0.2', 'G l', 'Gl o', 'Glo w', 'e r', '']
self.tags_dict = {'a': 0, 'abbr': 1, 'acronym': 2, 'address': 3}
self.special_tokens_map = {'unk_token': '<unk>'}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
self.tokenizer_config_file = os.path.join(self.tmpdirname, 'tokenizer_config.json')
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(merges))
with open(self.tokenizer_config_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps({'tags_dict': self.tags_dict}))
feature_extractor_map = {'feature_extractor_type': 'MarkupLMFeatureExtractor'}
self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME)
with open(self.feature_extraction_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(feature_extractor_map) + '\n'))
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]:
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
def get_feature_extractor(self, **kwargs):
return MarkupLMFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
feature_extractor = self.get_feature_extractor()
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
processor = MarkupLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer)
processor.save_pretrained(self.tmpdirname)
processor = MarkupLMProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, (MarkupLMTokenizer, MarkupLMTokenizerFast))
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = MarkupLMProcessor(feature_extractor=self.get_feature_extractor(), tokenizer=self.get_tokenizer())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = MarkupLMProcessor.from_pretrained(self.tmpdirname, use_fast=False, bos_token='(BOS)', eos_token='(EOS)', do_resize=False, size=30)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, MarkupLMTokenizer)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor)
tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30)
processor = MarkupLMProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_resize=False, size=30)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, MarkupLMTokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor)
def test_model_input_names(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = MarkupLMProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
self.assertListEqual(processor.model_input_names, tokenizer.model_input_names, msg='`processor` and `tokenizer` model input names do not match') |
class TestNet(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_file = simple_net_file(self.num_output)
self.net = caffe.Net(net_file, caffe.TRAIN)
self.net.blobs['label'].data[...] = np.random.randint(self.num_output, size=self.net.blobs['label'].data.shape)
os.remove(net_file)
def test_memory(self):
params = sum(map(list, six.itervalues(self.net.params)), [])
blobs = self.net.blobs.values()
del self.net
total = 0
for p in params:
total += (p.data.sum() + p.diff.sum())
for bl in blobs:
total += (bl.data.sum() + bl.diff.sum())
def test_layer_dict(self):
layer_dict = self.net.layer_dict
self.assertEqual(list(layer_dict.keys()), list(self.net._layer_names))
for (i, name) in enumerate(self.net._layer_names):
self.assertEqual(layer_dict[name].type, self.net.layers[i].type)
def test_forward_backward(self):
self.net.forward()
self.net.backward()
def test_forward_start_end(self):
conv_blob = self.net.blobs['conv']
ip_blob = self.net.blobs['ip_blob']
sample_data = np.random.uniform(size=conv_blob.data.shape)
sample_data = sample_data.astype(np.float32)
conv_blob.data[:] = sample_data
forward_blob = self.net.forward(start='ip', end='ip')
self.assertIn('ip_blob', forward_blob)
manual_forward = []
for i in range(0, conv_blob.data.shape[0]):
dot = np.dot(self.net.params['ip'][0].data, conv_blob.data[i].reshape((- 1)))
manual_forward.append((dot + self.net.params['ip'][1].data))
manual_forward = np.array(manual_forward)
np.testing.assert_allclose(ip_blob.data, manual_forward, rtol=0.001, atol=1e-05)
def test_backward_start_end(self):
conv_blob = self.net.blobs['conv']
ip_blob = self.net.blobs['ip_blob']
sample_data = np.random.uniform(size=ip_blob.data.shape)
sample_data = sample_data.astype(np.float32)
ip_blob.diff[:] = sample_data
backward_blob = self.net.backward(start='ip', end='ip')
self.assertIn('conv', backward_blob)
manual_backward = []
for i in range(0, conv_blob.data.shape[0]):
dot = np.dot(self.net.params['ip'][0].data.transpose(), sample_data[i].reshape((- 1)))
manual_backward.append(dot)
manual_backward = np.array(manual_backward)
manual_backward = manual_backward.reshape(conv_blob.data.shape)
np.testing.assert_allclose(conv_blob.diff, manual_backward, rtol=0.001, atol=1e-05)
def test_clear_param_diffs(self):
self.net.forward()
self.net.backward()
diff = self.net.params['conv'][0].diff
self.assertTrue((diff.max() > 0))
self.net.clear_param_diffs()
self.assertTrue((diff == 0).all())
def test_inputs_outputs(self):
self.assertEqual(self.net.inputs, [])
self.assertEqual(self.net.outputs, ['loss'])
def test_top_bottom_names(self):
self.assertEqual(self.net.top_names, OrderedDict([('data', ['data', 'label']), ('conv', ['conv']), ('ip', ['ip_blob']), ('loss', ['loss'])]))
self.assertEqual(self.net.bottom_names, OrderedDict([('data', []), ('conv', ['data']), ('ip', ['conv']), ('loss', ['ip_blob', 'label'])]))
def test_save_and_read(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save(f.name)
net_file = simple_net_file(self.num_output)
caffe.Net(net_file, f.name, caffe.TRAIN)
net2 = caffe.Net(net_file, caffe.TRAIN, weights=f.name)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs((self.net.params[name][i].data - net2.params[name][i].data)).sum(), 0)
def test_save_hdf5(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save_hdf5(f.name)
net_file = simple_net_file(self.num_output)
net2 = caffe.Net(net_file, caffe.TRAIN)
net2.load_hdf5(f.name)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs((self.net.params[name][i].data - net2.params[name][i].data)).sum(), 0) |
def predictor(mocker):
p = mocker.MagicMock()
p.get_post_fmean = mocker.MagicMock(side_effect=get_post_fmean)
p.get_post_fcov = mocker.MagicMock(side_effect=get_post_fcov)
p.get_post_samples = mocker.MagicMock(side_effect=get_post_samples)
return p |
class RedisClient():
def __init__(self):
hostname = socket.gethostname()
assert hostname.startswith(ROBOT_HOSTNAME_PREFIX)
self.bot_num = int(hostname[(- 1)])
self.client = Redis(f'{ROBOT_HOSTNAME_PREFIX}{self.bot_num}', password=REDIS_PASSWORD, decode_responses=True)
def get_driver_version(self):
redis_key = f'mmp::bot{self.bot_num}::veh::driver_version'
self.client.delete(redis_key)
time.sleep((3 * 0.008))
return self.client.get(redis_key)
def get_pose(self):
return tuple(map(float, self.client.get(f'mmp::bot{self.bot_num}::veh::sensor::x').split(' ')))
def set_target_pose(self, pose):
self.client.set(f'mmp::bot{self.bot_num}::veh::control::x', f'{pose[0]} {pose[1]} {pose[2]}')
def get_goal_reached(self):
return bool(int(self.client.get(f'mmp::bot{self.bot_num}::veh::sensor::goal_reached')))
def set_stop(self, value):
self.client.set(f'mmp::bot{self.bot_num}::veh::stop', int(value))
def set_max_velocity(self, max_vel_x, max_vel_y, max_vel_theta):
self.client.set(f'mmp::bot{self.bot_num}::veh::control::max_vel', f'{max_vel_x} {max_vel_y} {max_vel_theta}')
def set_max_acceleration(self, max_accel_x, max_accel_y, max_accel_theta):
self.client.set(f'mmp::bot{self.bot_num}::veh::control::max_accel', f'{max_accel_x} {max_accel_y} {max_accel_theta}')
def get_velocity(self):
return tuple(map(float, self.client.get(f'mmp::bot{self.bot_num}::veh::sensor::dx').split(' ')))
def get_cstop(self):
return bool(int(self.client.get('mmp::cstop')))
def get_emergency_shutdown(self):
return bool(int(self.client.get('mmp::emergency_shutdown'))) |
class FixedWindowScheduler():
def __init__(self, scheduler_config: SchedulerConfig, kv_cache: Optional) -> None:
self.scheduler_config = scheduler_config
self.prompt_limit = min(self.scheduler_config.max_model_len, self.scheduler_config.max_num_batched_tokens)
self.policy = PolicyFactory.get_policy(policy_name='fcfs')
self.waiting: List[SequenceGroup] = []
self.running: List[SequenceGroup] = []
self.cleaned: List[int] = []
self.kv_cache = kv_cache
self.swapped: List[SequenceGroup] = []
def add_seq_group(self, seq_group: SequenceGroup) -> None:
self.waiting.append(seq_group)
def abort_seq_group(self, request_id: Union[(str, Iterable[str])]) -> None:
if isinstance(request_id, str):
request_id = (request_id,)
request_ids = set(request_id)
for state_queue in [self.waiting, self.running]:
for seq_group in reversed(state_queue):
if (seq_group.request_id in request_ids):
state_queue.remove(seq_group)
for seq in seq_group.get_seqs():
if seq.is_finished():
continue
seq.status = SequenceStatus.FINISHED_ABORTED
self.free_seq(seq)
request_ids.remove(seq_group.request_id)
if (not request_ids):
return
def has_unfinished_seqs(self) -> bool:
return (self.waiting or self.running)
def get_num_unfinished_seq_groups(self) -> int:
return (len(self.waiting) + len(self.running))
def _schedule(self) -> SchedulerOutputs:
now = time.monotonic()
ignored_seq_groups: List[SequenceGroup] = []
scheduled: List[SequenceGroup] = []
finished_seqs: List[int] = self.cleaned.copy()
self.cleaned = []
num_curr_seqs = sum((seq_group.get_max_num_running_seqs() for seq_group in self.running))
num_batched_tokens = 0
if (not self.swapped):
seq_lens = []
while self.waiting:
seq_group = self.waiting[0]
invalidInputError((seq_group.num_seqs() == 1), 'Waiting sequence group should have only one prompt sequence.')
num_prompt_tokens = seq_group.get_seqs()[0].get_len()
if (num_prompt_tokens > self.prompt_limit):
logger.warning(f'Input prompt ({num_prompt_tokens} tokens) is too long and exceeds limit of {self.prompt_limit}')
for seq in seq_group.get_seqs():
seq.status = SequenceStatus.FINISHED_IGNORED
ignored_seq_groups.append(seq_group)
self.waiting.pop(0)
continue
new_seq_lens = (seq_lens + [num_prompt_tokens])
num_batched_tokens = (len(new_seq_lens) * max(new_seq_lens))
if (num_batched_tokens > self.scheduler_config.max_num_batched_tokens):
break
num_new_seqs = seq_group.get_max_num_running_seqs()
if ((num_curr_seqs + num_new_seqs) > self.scheduler_config.max_num_seqs):
break
seq_group = self.waiting.pop(0)
for seq in seq_group.get_seqs():
seq.status = SequenceStatus.RUNNING
seq_lens = new_seq_lens
self.running.append(seq_group)
num_batched_tokens += num_prompt_tokens
num_curr_seqs += num_new_seqs
scheduled.append(seq_group)
if (scheduled or ignored_seq_groups):
scheduler_outputs = SchedulerOutputs(scheduled_seq_groups=scheduled, prompt_run=True, num_batched_tokens=((len(seq_lens) * max(seq_lens)) if seq_lens else 0), ignored_seq_groups=ignored_seq_groups, finished_seqs=finished_seqs)
return scheduler_outputs
self.running = self.policy.sort_by_priority(now, self.running)
running: List[SequenceGroup] = []
preempted: List[SequenceGroup] = []
while self.running:
seq_group = self.running.pop(0)
running.append(seq_group)
self.running = running
num_batched_tokens = sum((seq_group.num_seqs(status=SequenceStatus.RUNNING) for seq_group in self.running))
scheduler_outputs = SchedulerOutputs(scheduled_seq_groups=self.running, prompt_run=False, num_batched_tokens=num_batched_tokens, ignored_seq_groups=[], finished_seqs=finished_seqs)
return scheduler_outputs
def schedule(self) -> Tuple[(List[SequenceGroupMetadata], SchedulerOutputs)]:
scheduler_outputs = self._schedule()
seq_group_metadata_list: List[SequenceGroupMetadata] = []
for seq_group in scheduler_outputs.scheduled_seq_groups:
seq_data: Dict[(int, List[SequenceData])] = {}
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
seq_id = seq.seq_id
seq_data[seq_id] = seq.data
seq_group_metadata = SequenceGroupMetadata(request_id=seq_group.request_id, is_prompt=scheduler_outputs.prompt_run, seq_data=seq_data, sampling_params=seq_group.sampling_params)
seq_group_metadata_list.append(seq_group_metadata)
return (seq_group_metadata_list, scheduler_outputs)
def free_seq(self, seq: Sequence) -> None:
self.cleaned.append(seq.seq_id)
for i in range(len(self.kv_cache)):
for j in range(2):
if (not (self.kv_cache[i][j].get(seq.seq_id) is None)):
del self.kv_cache[i][j][seq.seq_id]
def free_finished_seq_groups(self) -> None:
self.running = [seq_group for seq_group in self.running if (not seq_group.is_finished())]
def _preempt(self, seq_group: SequenceGroup, blocks_to_swap_out: Optional[Dict[(int, int)]]=None, preemption_mode: Optional[PreemptionMode]=None) -> None:
if (preemption_mode is None):
if (seq_group.get_max_num_running_seqs() == 1):
preemption_mode = PreemptionMode.RECOMPUTE
else:
preemption_mode = PreemptionMode.SWAP
if (preemption_mode == PreemptionMode.RECOMPUTE):
self._preempt_by_recompute(seq_group)
elif (preemption_mode == PreemptionMode.SWAP):
self._preempt_by_swap(seq_group, blocks_to_swap_out)
else:
raise AssertionError('Invalid preemption mode.')
def _preempt_by_recompute(self, seq_group: SequenceGroup) -> None:
seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
for seq in seqs:
seq.status = SequenceStatus.WAITING
if (not (self.kv_cache[0][0].get(seq.seq_id) is None)):
for i in range(len(self.kv_cache)):
for j in range(2):
del self.kv_cache[i][j][seq.seq_id]
self.waiting.insert(0, seq_group)
def _preempt_by_swap(self, seq_group: SequenceGroup, blocks_to_swap_out: Dict[(int, int)]) -> None:
self._swap_out(seq_group, blocks_to_swap_out)
self.swapped.append(seq_group)
def _swap_in(self, seq_group: SequenceGroup, blocks_to_swap_in: Dict[(int, int)]) -> None:
mapping = self.block_manager.swap_in(seq_group)
blocks_to_swap_in.update(mapping)
for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED):
seq.status = SequenceStatus.RUNNING
def _swap_out(self, seq_group: SequenceGroup, blocks_to_swap_out: Dict[(int, int)]) -> None:
if (not self.block_manager.can_swap_out(seq_group)):
raise RuntimeError('Aborted due to the lack of CPU swap space. Please increase the swap space to avoid this error.')
mapping = self.block_manager.swap_out(seq_group)
blocks_to_swap_out.update(mapping)
for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
seq.status = SequenceStatus.SWAPPED |
def load_data(file, col):
print(f".. loading data from '{file}'")
d = pd.read_csv(file)
data = d[col]
print('')
s = pd.Series(data)
print(s.describe())
print(f'med {int(np.median(data))}')
print('')
return data |
def insert_new(article_list, sent):
token_list = word_tokenize(sent)
article_list.append(' '.join(token_list[:sent_limit]))
if (len(token_list) > sent_limit):
insert_new(article_list, ' '.join(token_list[sent_limit:])) |
def main(args):
samples = load_tsv_to_dicts(args.raw_manifest)
ids = [(sample[args.id_header] if args.id_header else '') for sample in samples]
audio_paths = [sample[args.audio_header] for sample in samples]
texts = [sample[args.text_header] for sample in samples]
prepare_w2v_data(args.w2v_dict_dir, args.w2v_sample_rate, args.w2v_label, audio_paths, texts, args.split, args.asr_dir)
run_asr(args.asr_dir, args.split, args.w2v_ckpt, args.w2v_label, args.asr_dir)
ind_to_err_rates = compute_error_rate((args.asr_dir / f'hypo.word-{args.w2v_ckpt.name}-{args.split}.txt'), (args.asr_dir / f'ref.word-{args.w2v_ckpt.name}-{args.split}.txt'), args.err_unit)
uer_path = (args.asr_dir / f'uer_{args.err_unit}.{args.split}.tsv')
with open(uer_path, 'w') as f:
f.write('id\taudio\tuer\n')
for (ind, (id_, audio_path)) in enumerate(zip(ids, audio_paths)):
f.write(f'''{id_} {audio_path} {ind_to_err_rates[ind]:.4f}
''') |
class AutoModelForSeq2SeqLM():
def __init__(self):
raise EnvironmentError('AutoModelForSeq2SeqLM is designed to be instantiated using the `AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path)` or `AutoModelForSeq2SeqLM.from_config(config)` methods.')
_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
if (type(config) in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()):
return MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[type(config)](config)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()))))
_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
_start_docstrings('Instantiate one of the model classes of the library---with a sequence-to-sequence language modeling head---from a pretrained model.', AUTO_MODEL_PRETRAINED_DOCSTRING)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
(config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs)
if (type(config) in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()):
return MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of AutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys())))) |
def get_launcher(distributed=False):
num_gpus = (min(2, get_gpu_count()) if distributed else 1)
master_port = get_master_port(real_launcher=True)
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus} --master_port {master_port}'.split() |
class Module(BaseModule):
def __init__(self, symbol, data_names=('data',), label_names=('softmax_label',), logger=logging, context=ctx.cpu(), work_load_list=None, fixed_param_names=None, state_names=None):
super(Module, self).__init__(logger=logger)
if isinstance(context, ctx.Context):
context = [context]
self._context = context
if (work_load_list is None):
work_load_list = ([1] * len(self._context))
assert (len(work_load_list) == len(self._context))
self._work_load_list = work_load_list
self._symbol = symbol
data_names = (list(data_names) if (data_names is not None) else [])
label_names = (list(label_names) if (label_names is not None) else [])
state_names = (list(state_names) if (state_names is not None) else [])
fixed_param_names = (list(fixed_param_names) if (fixed_param_names is not None) else [])
_check_input_names(symbol, data_names, 'data', True)
_check_input_names(symbol, label_names, 'label', False)
_check_input_names(symbol, state_names, 'state', True)
_check_input_names(symbol, fixed_param_names, 'fixed_param', True)
arg_names = symbol.list_arguments()
input_names = ((data_names + label_names) + state_names)
self._param_names = [x for x in arg_names if (x not in input_names)]
self._fixed_param_names = fixed_param_names
self._aux_names = symbol.list_auxiliary_states()
self._data_names = data_names
self._label_names = label_names
self._state_names = state_names
self._output_names = symbol.list_outputs()
self._arg_params = None
self._aux_params = None
self._params_dirty = False
self._optimizer = None
self._kvstore = None
self._update_on_kvstore = None
self._updater = None
self._preload_opt_states = None
self._grad_req = None
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
def load(prefix, epoch, load_optimizer_states=False, **kwargs):
(sym, args, auxs) = load_checkpoint(prefix, epoch)
mod = Module(symbol=sym, **kwargs)
mod._arg_params = args
mod._aux_params = auxs
mod.params_initialized = True
if load_optimizer_states:
mod._preload_opt_states = ('%s-%04d.states' % (prefix, epoch))
return mod
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
self._symbol.save(('%s-symbol.json' % prefix))
param_name = ('%s-%04d.params' % (prefix, epoch))
self.save_params(param_name)
logging.info('Saved checkpoint to "%s"', param_name)
if save_optimizer_states:
state_name = ('%s-%04d.states' % (prefix, epoch))
self.save_optimizer_states(state_name)
logging.info('Saved optimizer state to "%s"', state_name)
def _reset_bind(self):
self.binded = False
self._exec_group = None
self._data_shapes = None
self._label_shapes = None
def data_names(self):
return self._data_names
def label_names(self):
return self._label_names
def output_names(self):
return self._output_names
def data_shapes(self):
assert self.binded
return self._data_shapes
def label_shapes(self):
assert self.binded
return self._label_shapes
def output_shapes(self):
assert self.binded
return self._exec_group.get_output_shapes()
def get_params(self):
assert (self.binded and self.params_initialized)
if self._params_dirty:
self._sync_params_from_devices()
return (self._arg_params, self._aux_params)
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=False):
if (self.params_initialized and (not force_init)):
warnings.warn('Parameters already initialized and force_init=False. init_params call ignored.', stacklevel=2)
return
assert self.binded, 'call bind before initializing the parameters'
def _impl(name, arr, cache):
if (cache is not None):
if (name in cache):
cache_arr = cache[name]
if (cache_arr is not arr):
cache_arr.copyto(arr)
else:
if (not allow_missing):
raise RuntimeError(('%s is not presented' % name))
if (initializer != None):
initializer(name, arr)
else:
initializer(name, arr)
attrs = self._symbol.attr_dict()
for (name, arr) in self._arg_params.items():
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, arg_params)
for (name, arr) in self._aux_params.items():
desc = InitDesc(name, attrs.get(name, None))
_impl(desc, arr, aux_params)
self.params_initialized = True
self._params_dirty = False
self._exec_group.set_params(self._arg_params, self._aux_params, allow_extra=allow_extra)
def set_params(self, arg_params, aux_params, allow_missing=False, force_init=True, allow_extra=False):
if (not allow_missing):
self.init_params(initializer=None, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init, allow_extra=allow_extra)
return
if (self.params_initialized and (not force_init)):
warnings.warn('Parameters already initialized and force_init=False. set_params call ignored.', stacklevel=2)
return
self._exec_group.set_params(arg_params, aux_params, allow_extra=allow_extra)
self._params_dirty = True
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'):
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already binded, ignoring bind()')
return
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
self._grad_req = grad_req
if (not for_training):
assert (not inputs_need_grad)
else:
pass
(self._data_shapes, self._label_shapes) = zip(*[_parse_data_desc(self.data_names, self.label_names, data_shape, label_shape) for (data_shape, label_shape) in zip(data_shapes, label_shapes)])
if (self._label_shapes.count(None) == len(self._label_shapes)):
self._label_shapes = None
if (shared_module is not None):
assert (isinstance(shared_module, Module) and shared_module.binded and shared_module.params_initialized)
shared_group = shared_module._exec_group
else:
shared_group = None
self._exec_group = DataParallelExecutorGroup(self._symbol, self._context, self._work_load_list, self._data_shapes, self._label_shapes, self._param_names, for_training, inputs_need_grad, shared_group, logger=self.logger, fixed_param_names=self._fixed_param_names, grad_req=grad_req, state_names=self._state_names)
if (shared_module is not None):
self.params_initialized = True
self._arg_params = shared_module._arg_params
self._aux_params = shared_module._aux_params
elif self.params_initialized:
self._exec_group.set_params(self._arg_params, self._aux_params)
else:
assert ((self._arg_params is None) and (self._aux_params is None))
param_arrays = [nd.zeros(x[0].shape, dtype=x[0].dtype) for x in self._exec_group.param_arrays]
self._arg_params = {name: arr for (name, arr) in zip(self._param_names, param_arrays)}
aux_arrays = [nd.zeros(x[0].shape, dtype=x[0].dtype) for x in self._exec_group.aux_arrays]
self._aux_params = {name: arr for (name, arr) in zip(self._aux_names, aux_arrays)}
if ((shared_module is not None) and shared_module.optimizer_initialized):
self.borrow_optimizer(shared_module)
def reshape(self, data_shapes, label_shapes=None):
assert self.binded
(self._data_shapes, self._label_shapes) = zip(*[_parse_data_desc(self.data_names, self.label_names, data_shape, label_shape) for (data_shape, label_shape) in zip(data_shapes, label_shapes)])
self._exec_group.reshape(self._data_shapes, self._label_shapes)
def init_optimizer(self, kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), force_init=False):
assert (self.binded and self.params_initialized)
if (self.optimizer_initialized and (not force_init)):
self.logger.warning('optimizer already initialized, ignoring...')
return
(kvstore, update_on_kvstore) = _create_kvstore(kvstore, len(self._context), self._arg_params)
batch_size = self._exec_group.batch_size
if (kvstore and ('dist' in kvstore.type) and ('_sync' in kvstore.type)):
batch_size *= kvstore.num_workers
rescale_grad = (1.0 / batch_size)
if isinstance(optimizer, str):
idx2name = {}
if update_on_kvstore:
idx2name.update(enumerate(self._exec_group.param_names))
else:
for k in range(len(self._context)):
idx2name.update({((i * len(self._context)) + k): n for (i, n) in enumerate(self._exec_group.param_names)})
optimizer_params = dict(optimizer_params)
if ('rescale_grad' not in optimizer_params):
optimizer_params['rescale_grad'] = rescale_grad
optimizer = opt.create(optimizer, sym=self.symbol, param_idx2name=idx2name, **optimizer_params)
else:
assert isinstance(optimizer, opt.Optimizer)
if (optimizer.rescale_grad != rescale_grad):
warnings.warn((('Optimizer created manually outside Module but rescale_grad ' + ('is not normalized to 1.0/batch_size/num_workers (%s vs. %s). ' % (optimizer.rescale_grad, rescale_grad))) + 'Is this intended?'), stacklevel=2)
self._optimizer = optimizer
self._kvstore = kvstore
self._update_on_kvstore = update_on_kvstore
self._updater = None
if kvstore:
_initialize_kvstore(kvstore=kvstore, param_arrays=self._exec_group.param_arrays, arg_params=self._arg_params, param_names=self._param_names, update_on_kvstore=update_on_kvstore)
if update_on_kvstore:
kvstore.set_optimizer(self._optimizer)
else:
self._updater = opt.get_updater(optimizer)
self.optimizer_initialized = True
if (self._preload_opt_states is not None):
self.load_optimizer_states(self._preload_opt_states)
self._preload_opt_states = None
def borrow_optimizer(self, shared_module):
assert shared_module.optimizer_initialized
self._optimizer = shared_module._optimizer
self._kvstore = shared_module._kvstore
self._update_on_kvstore = shared_module._update_on_kvstore
self._updater = shared_module._updater
self.optimizer_initialized = True
def forward(self, data_batch, is_train=None):
assert (self.binded and self.params_initialized)
self._exec_group.forward(data_batch, is_train)
def backward(self, out_grads=None):
assert (self.binded and self.params_initialized)
self._exec_group.backward(out_grads=out_grads)
def update(self):
assert (self.binded and self.params_initialized and self.optimizer_initialized)
self._params_dirty = True
if self._update_on_kvstore:
_update_params_on_kvstore(self._exec_group.param_arrays, self._exec_group.grad_arrays, self._kvstore, self._exec_group.param_names)
else:
_update_params(self._exec_group.param_arrays, self._exec_group.grad_arrays, updater=self._updater, num_device=len(self._context), kvstore=self._kvstore)
def get_outputs(self, merge_multi_context=True):
assert (self.binded and self.params_initialized)
return self._exec_group.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
assert (self.binded and self.params_initialized and self.inputs_need_grad)
return self._exec_group.get_input_grads(merge_multi_context=merge_multi_context)
def get_states(self, merge_multi_context=True):
assert (self.binded and self.params_initialized)
return self._exec_group.get_states(merge_multi_context=merge_multi_context)
def set_states(self, states=None, value=None):
assert (self.binded and self.params_initialized)
self._exec_group.set_states(states, value)
def update_metric(self, eval_metric, labels):
self._exec_group.update_metric(eval_metric, labels)
def _sync_params_from_devices(self):
self._exec_group.get_params(self._arg_params, self._aux_params)
self._params_dirty = False
def save_optimizer_states(self, fname):
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.save_optimizer_states(fname)
else:
with open(fname, 'wb') as fout:
fout.write(self._updater.get_states())
def load_optimizer_states(self, fname):
assert self.optimizer_initialized
if self._update_on_kvstore:
self._kvstore.load_optimizer_states(fname)
else:
self._updater.set_states(open(fname, 'rb').read())
def install_monitor(self, mon):
assert self.binded
self._exec_group.install_monitor(mon) |
class SwitchableDropoutWrapper(DropoutWrapper):
def __init__(self, cell, is_train, input_keep_prob=1.0, output_keep_prob=1.0, seed=None):
super(SwitchableDropoutWrapper, self).__init__(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob, seed=seed)
self.is_train = is_train
def __call__(self, inputs, state, scope=None):
(outputs_do, new_state_do) = super(SwitchableDropoutWrapper, self).__call__(inputs, state, scope=scope)
tf.get_variable_scope().reuse_variables()
(outputs, new_state) = self._cell(inputs, state, scope)
outputs = tf.cond(self.is_train, (lambda : outputs_do), (lambda : outputs))
if isinstance(state, tuple):
new_state = state.__class__(*[tf.cond(self.is_train, (lambda : new_state_do_i), (lambda : new_state_i)) for (new_state_do_i, new_state_i) in zip(new_state_do, new_state)])
else:
new_state = tf.cond(self.is_train, (lambda : new_state_do), (lambda : new_state))
return (outputs, new_state) |
class SAGPool(torch.nn.Module):
def __init__(self, in_channels, ratio=0.8, Conv=GCNConv, non_linearity=torch.tanh):
super(SAGPool, self).__init__()
self.in_channels = in_channels
self.ratio = ratio
self.score_layer = Conv(in_channels, 1)
self.non_linearity = non_linearity
def forward(self, x, edge_index, edge_attr=None, batch=None):
if (batch is None):
batch = edge_index.new_zeros(x.size(0))
score = self.score_layer(x, edge_index).squeeze()
perm = topk(score, self.ratio, batch)
x = (x[perm] * self.non_linearity(score[perm]).view((- 1), 1))
batch = batch[perm]
(edge_index, edge_attr) = filter_adj(edge_index, edge_attr, perm, num_nodes=score.size(0))
return (x, edge_index, edge_attr, batch, perm) |
class NormalizeActions(EnvWrapper):
def __init__(self, env):
super().__init__(env)
self._mask = np.logical_and(np.isfinite(env.action_space.low), np.isfinite(env.action_space.high))
self._low = np.where(self._mask, env.action_space.low, (- 1))
self._high = np.where(self._mask, env.action_space.high, 1)
def action_space(self):
low = np.where(self._mask, (- np.ones_like(self._low)), self._low)
high = np.where(self._mask, np.ones_like(self._low), self._high)
return FloatBox(low, high, dtype=np.float32)
def step(self, action):
original = ((((action + 1) / 2) * (self._high - self._low)) + self._low)
original = np.where(self._mask, original, action)
return self.env.step(original) |
class Net(torch.nn.Module):
def __init__(self, inputsize, taskcla):
super(Net, self).__init__()
(ncha, size, _) = inputsize
self.taskcla = taskcla
self.conv1 = torch.nn.Conv2d(ncha, 64, kernel_size=(size // 8))
s = utils.compute_conv_output_size(size, (size // 8))
s = (s // 2)
self.conv2 = torch.nn.Conv2d(64, 128, kernel_size=(size // 10))
s = utils.compute_conv_output_size(s, (size // 10))
s = (s // 2)
self.conv3 = torch.nn.Conv2d(128, 256, kernel_size=2)
s = utils.compute_conv_output_size(s, 2)
s = (s // 2)
self.maxpool = torch.nn.MaxPool2d(2)
self.relu = torch.nn.ReLU()
self.drop1 = torch.nn.Dropout(0.2)
self.drop2 = torch.nn.Dropout(0.5)
self.fc1 = torch.nn.Linear(((256 * s) * s), 2048)
self.fc2 = torch.nn.Linear(2048, 2048)
self.last = torch.nn.ModuleList()
for (t, n) in self.taskcla:
self.last.append(torch.nn.Linear(2048, n))
return
def forward(self, x):
h = self.maxpool(self.drop1(self.relu(self.conv1(x))))
h = self.maxpool(self.drop1(self.relu(self.conv2(h))))
h = self.maxpool(self.drop2(self.relu(self.conv3(h))))
h = h.view(x.size(0), (- 1))
h = self.drop2(self.relu(self.fc1(h)))
h = self.relu(self.fc2(h))
y = []
for (t, i) in self.taskcla:
y.append(self.last[t](h))
return (y, h) |
def update_linker(linker):
exits = get_exits(linker)
exits = sorted(exits, key=(lambda e: e.GetIdx()), reverse=True)
elinker = Chem.EditableMol(linker)
for exit in exits:
bonds = exit.GetBonds()
if (len(bonds) > 1):
raise Exception('Exit atom has more than 1 bond')
bond = bonds[0]
source_idx = bond.GetBeginAtomIdx()
target_idx = bond.GetEndAtomIdx()
elinker.RemoveBond(source_idx, target_idx)
for exit in exits:
elinker.RemoveAtom(exit.GetIdx())
return elinker.GetMol() |
class DirectoryIterator(Iterator):
def __init__(self, directory, image_data_generator, target_size=(256, 256), color_mode='rgb', classes=None, class_mode='categorical', batch_size=32, shuffle=True, seed=None, save_to_dir=None, save_prefix='', save_format='png', follow_links=False, subset=None, interpolation='nearest', dtype='float32'):
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if (color_mode not in {'rgb', 'rgba', 'grayscale'}):
raise ValueError('Invalid color mode:', color_mode, '; expected "rgb", "rgba", or "grayscale".')
c = np.load(glob.glob('{}/**/*.npz'.format(directory))[0])['y'].shape[(- 1)]
self.image_shape = (self.target_size + (c,))
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if (subset is not None):
validation_split = self.image_data_generator._validation_split
if (subset == 'validation'):
split = (0, validation_split)
elif (subset == 'training'):
split = (validation_split, 1)
else:
raise ValueError(('Invalid subset name: %s;expected "training" or "validation"' % (subset,)))
else:
split = None
self.split = split
self.subset = subset
self.directory = directory
self.classes = classes
if (class_mode not in {'categorical', 'binary', 'sparse', 'input', None}):
raise ValueError('Invalid class_mode:', class_mode, '; expected one of "categorical", "binary", "sparse", "input" or None.')
self.class_mode = class_mode
self.dtype = dtype
white_list_formats = {'npz'}
self.samples = 0
if (not classes):
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory, white_list_formats=white_list_formats, follow_links=follow_links, split=self.split)
self.samples = sum(pool.map(function_partial, (os.path.join(directory, subdir) for subdir in classes)))
print(('Found %d images belonging to %d classes.' % (self.samples, self.num_classes)))
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(pool.apply_async(_list_valid_filenames_in_directory, (dirpath, white_list_formats, self.split, self.class_indices, follow_links)))
for res in results:
(classes, filenames) = res.get()
self.classes[i:(i + len(classes))] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = ([None] * len(index_array))
for (i, j) in enumerate(index_array):
fname = self.filenames[j]
x = np.load(os.path.join(self.directory, fname))['y']
x = resize(x, self.target_size, anti_aliasing=False, mode='symmetric', clip=False, preserve_range=True)
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
batch_x = np.stack(batch_x)
if self.save_to_dir:
raise NotImplementedError
if (self.class_mode == 'input'):
batch_y = batch_x.copy()
elif (self.class_mode == 'sparse'):
batch_y = self.classes[index_array]
elif (self.class_mode == 'binary'):
batch_y = self.classes[index_array].astype(self.dtype)
elif (self.class_mode == 'categorical'):
batch_y = np.zeros((len(batch_x), self.num_classes), dtype=self.dtype)
for (i, label) in enumerate(self.classes[index_array]):
batch_y[(i, label)] = 1.0
else:
return batch_x
return (batch_x, batch_y)
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batches_of_transformed_samples(index_array) |
def remove_newlines(s):
p = re.compile('[\n|\r\n|\n\r]')
s = re.sub(p, ' ', s)
s = remove_extraneous_whitespace(s)
return s |
def torch_nn_functional_one_hot(tensor, num_classes=(- 1)):
if (num_classes < 0):
raise ValueError("Don't support automatic num_classes inference for MetaTensor analysis")
shape = (list(tensor.shape) + [num_classes])
return torch.empty(shape, device='meta') |
class AtariNet(nn.Module):
def __init__(self, observation_shape, num_actions):
super(AtariNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
self.feat_convs = []
self.resnet1 = []
self.resnet2 = []
self.convs = []
input_channels = self.observation_shape[0]
for num_ch in [16, 32, 32]:
feats_convs = []
feats_convs.append(nn.Conv2d(in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1))
feats_convs.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
self.feat_convs.append(nn.Sequential(*feats_convs))
input_channels = num_ch
for i in range(2):
resnet_block = []
resnet_block.append(nn.ReLU())
resnet_block.append(nn.Conv2d(in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1))
resnet_block.append(nn.ReLU())
resnet_block.append(nn.Conv2d(in_channels=input_channels, out_channels=num_ch, kernel_size=3, stride=1, padding=1))
if (i == 0):
self.resnet1.append(nn.Sequential(*resnet_block))
else:
self.resnet2.append(nn.Sequential(*resnet_block))
self.feat_convs = nn.ModuleList(self.feat_convs)
self.resnet1 = nn.ModuleList(self.resnet1)
self.resnet2 = nn.ModuleList(self.resnet2)
self.fc = nn.Linear(3872, ((256 - num_actions) - 1))
core_output_size = ((self.fc.out_features + num_actions) + 1)
self.core = MemTransformerLM(n_token=None, n_layer=1, n_head=8, d_head=(core_output_size // 8), d_model=core_output_size, d_inner=2048, dropout=0.1, dropatt=0.0, tgt_len=512, mem_len=1, ext_len=0, use_stable_version=True, use_gate=False)
self.core.apply(weights_init)
self.policy = nn.Linear(core_output_size, self.num_actions)
self.baseline = nn.Linear(core_output_size, 1)
def initial_state(self, batch_size):
return tuple((torch.zeros(self.core.n_layer, batch_size, self.core.d_model) for _ in range(2)))
def forward(self, inputs, core_state=(), mems=None, mem_padding=None):
x = inputs['frame']
(T, B, *_) = x.shape
x = torch.flatten(x, 0, 1)
x = (x.float() / 255.0)
for (i, fconv) in enumerate(self.feat_convs):
x = fconv(x)
res_input = x
x = self.resnet1[i](x)
x += res_input
res_input = x
x = self.resnet2[i](x)
x += res_input
x = F.relu(x)
x = x.view((T * B), (- 1))
x = F.relu(self.fc(x))
one_hot_last_action = F.one_hot(inputs['last_action'].view((T * B)), self.num_actions).float()
clipped_reward = torch.clamp(inputs['reward'], (- 1), 1).view((T * B), 1)
core_input = torch.cat([x, clipped_reward, one_hot_last_action], dim=(- 1))
core_input = core_input.view(T, B, (- 1))
padding_mask = inputs['done']
ind_first_done = None
if (padding_mask.dim() > 1):
ind_first_done = (padding_mask.long().argmin(0) + 1)
ind_first_done[(ind_first_done >= padding_mask.shape[0])] = (- 1)
padding_mask[(ind_first_done, range(B))] = False
padding_mask = padding_mask.unsqueeze(0)
if (not padding_mask.any().item()):
padding_mask = None
(core_output, mems) = self.core(core_input, mems, padding_mask=padding_mask, mem_padding=mem_padding)
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
policy_logits = policy_logits.reshape((T * B), self.num_actions)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return (dict(policy_logits=policy_logits, baseline=baseline, action=action), core_state, mems, padding_mask, ind_first_done) |
class DCN(DCNv2):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=0, dilation=1, deformable_groups=2, groups=None, bias=True):
super(DCN, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, deformable_groups)
channels_ = (((self.deformable_groups * 3) * self.kernel_size[0]) * self.kernel_size[1])
self.conv_offset_mask = nn.Conv2d(self.in_channels, channels_, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, input):
out = self.conv_offset_mask(input)
(o1, o2, mask) = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return dcn_v2_conv(input, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.deformable_groups) |
def get_sparse_feature(feature_file, label_file):
(sparse_x, _) = load_svmlight_file(feature_file, multilabel=True)
return (normalize(sparse_x), (np.load(label_file) if (label_file is not None) else None)) |
class FCResNet(nn.Module):
def __init__(self, input_size=(1, 40, 1091)):
super(FCResNet, self).__init__()
self.cnn1 = nn.Conv2d(1, 16, kernel_size=(3, 3), padding=(1, 1))
self.bn1 = nn.BatchNorm2d(16)
self.re1 = nn.ReLU(inplace=True)
self.cnn2 = nn.Conv2d(16, 16, kernel_size=(3, 3), padding=(1, 1))
self.bn2 = nn.BatchNorm2d(16)
self.re2 = nn.ReLU(inplace=True)
self.cnn3 = nn.Conv2d(16, 16, kernel_size=(3, 3), padding=(1, 1))
self.mp1 = nn.MaxPool2d(kernel_size=(1, 2))
self.cnn4 = nn.Conv2d(16, 32, kernel_size=(3, 3), dilation=(2, 2))
self.bn3 = nn.BatchNorm2d(32)
self.re3 = nn.ReLU(inplace=True)
self.cnn5 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn4 = nn.BatchNorm2d(32)
self.re4 = nn.ReLU(inplace=True)
self.cnn6 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp2 = nn.MaxPool2d(kernel_size=(1, 2))
self.cnn7 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(4, 4))
self.bn5 = nn.BatchNorm2d(32)
self.re5 = nn.ReLU(inplace=True)
self.cnn8 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn6 = nn.BatchNorm2d(32)
self.re6 = nn.ReLU(inplace=True)
self.cnn9 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp3 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn10 = nn.Conv2d(32, 32, kernel_size=(3, 3), dilation=(4, 4))
self.bn12 = nn.BatchNorm2d(32)
self.re12 = nn.ReLU(inplace=True)
self.cnn11 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.bn13 = nn.BatchNorm2d(32)
self.re13 = nn.ReLU(inplace=True)
self.cnn12 = nn.Conv2d(32, 32, kernel_size=(3, 3), padding=(1, 1))
self.mp4 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn13 = nn.Conv2d(32, 16, kernel_size=(3, 3), dilation=(8, 8))
self.bn14 = nn.BatchNorm2d(16)
self.re14 = nn.ReLU(inplace=True)
self.cnn14 = nn.Conv2d(16, 16, kernel_size=(3, 3), padding=(1, 1))
self.bn15 = nn.BatchNorm2d(16)
self.re15 = nn.ReLU(inplace=True)
self.cnn15 = nn.Conv2d(16, 16, kernel_size=(3, 3), padding=(1, 1))
self.mp5 = nn.MaxPool2d(kernel_size=(2, 2))
self.cnn16 = nn.Conv2d(16, 1, kernel_size=(3, 3), dilation=(8, 8))
self.avgpool = nn.AvgPool2d(kernel_size=(4, 6))
self.sigmoid = nn.Sigmoid()
def _weights_init(m):
if isinstance(m, (nn.Conv2d or nn.Linear)):
xavier_normal_(m.weight)
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d or nn.BatchNorm1d)):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.apply(_weights_init)
def forward(self, x):
x = self.cnn1(x)
residual = x
x = self.cnn3(self.re2(self.bn2(self.cnn2(self.re1(self.bn1(x))))))
x += residual
x = self.cnn4(self.mp1(x))
residual = x
x = self.cnn6(self.re4(self.bn4(self.cnn5(self.re3(self.bn3(x))))))
x += residual
x = self.cnn7(self.mp2(x))
residual = x
x = self.cnn9(self.re6(self.bn6(self.cnn8(self.re5(self.bn5(x))))))
x += residual
x = self.cnn10(self.mp3(x))
residual = x
x = self.cnn12(self.re13(self.bn13(self.cnn11(self.re12(self.bn12(x))))))
x += residual
x = self.cnn13(self.mp4(x))
residual = x
x = self.cnn15(self.re15(self.bn15(self.cnn14(self.re14(self.bn14(x))))))
x += residual
x = self.cnn16(self.mp5(x))
x = self.avgpool(x)
x = x.view(x.size()[0], (- 1))
out = self.sigmoid(x)
return out |
class MIDI(Dataset):
def __init__(self, piano_roll, max_min_notes, transform=None):
self.piano_roll = piano_roll
self.max_min_notes = max_min_notes
self.transform = transform
def __getitem__(self, ind):
item = self.piano_roll[ind]
item = convert_midi(item, self.max_min_notes)
if (self.transform is not None):
item = self.transform(item)
return item
def __len__(self):
return len(self.piano_roll) |
def kitti_2015_train(img_height, img_width, batch_size, num_workers):
transforms = [tf.CreateScaledImage(True), tf.Resize((img_height, img_width), image_types=('color',)), tf.ConvertDepth(), tf.CreateColoraug(), tf.ToTensor(), tf.NormalizeZeroMean(), tf.AddKeyValue('domain', 'kitti_2015_train_depth'), tf.AddKeyValue('validation_mask', 'validation_mask_kitti_kitti'), tf.AddKeyValue('validation_clamp', 'validation_clamp_kitti'), tf.AddKeyValue('purposes', ('depth',))]
dataset = StandardDataset(dataset='kitti_2015', trainvaltest_split='train', video_mode='mono', stereo_mode='mono', keys_to_load=('color', 'depth'), data_transforms=transforms, video_frames=(0,), disable_const_items=True)
loader = DataLoader(dataset, batch_size, False, num_workers=num_workers, pin_memory=True, drop_last=False)
print(f' - Can use {len(dataset)} images from the kitti_2015 test set for depth evaluation', flush=True)
return loader |
def replace_unk_full(beam_lst, lst_src, int_order):
result = []
for (idx, num) in enumerate(int_order):
fields = get_wikibio_poswrds(lst_src[num])
fields = [wrd for ((k, idx), wrd) in fields.items()]
result.append(fields)
result_2 = []
x_idx = 0
temp_store = []
for ii in range(len(beam_lst)):
try:
x = result[x_idx]
y = beam_lst[ii]
except:
print('x_idx is out of range for x:', x_idx, ii)
try:
(y1, score_1, state_1, rank1, copy1) = y.split('|||')
except:
continue
if (int(rank1) == 0):
if (len(temp_store) > 0):
for (score_, elem) in sorted(temp_store, key=(lambda a: a[0]))[:1]:
(y, score_, state_, rank, copy) = elem
copy = ast.literal_eval(copy)
y = y.split()
for (idx, elem) in enumerate(y):
if (elem == '<unk>'):
if ((copy[idx] >= 0) and (copy[idx] < len(x))):
y[idx] = x[copy[idx]]
result_2.append('{}|||{}|||{}|||{}|||{}'.format(' '.join(y), score_, state_, rank, copy))
x_idx += 1
temp_store = []
rescore = 1
score_ = float(score_1)
temp_store.append(((score_ / rescore), (y1, score_1, state_1, rank1, copy1)))
else:
rescore = 1
score_ = float(score_1)
temp_store.append(((score_ / rescore), (y1, score_1, state_1, rank1, copy1)))
return result_2 |
def get_normalizer():
if FLAGS.backbone.startswith('efficientnetv2'):
bn = effnetv2_utils.BatchNormalization
else:
bn = keras.layers.BatchNormalization
if FLAGS.ghost_bn:
split = [int(x) for x in FLAGS.ghost_bn.split(',')]
prefix = ('tpu_' if FLAGS.backbone.startswith('efficientnetv2') else '')
bn = functools.partial(GhostBatchNormalization, split=split, name=f'{prefix}batch_normalization')
return bn |
def main(args):
config = load_config(args)
global_train_config = config['training_params']
(models, model_names) = config_modelloader_and_convert2mlp(config) |
def _find_tied_weights_for_meta(model):
_name_dict = dict()
_tied_parameters = dict()
for (name, param) in model.named_parameters():
if hasattr(param, 'checkpoint_name'):
if (param.checkpoint_name in _name_dict):
_tied_parameters[name] = _name_dict[param.checkpoint_name]
else:
_name_dict[param.checkpoint_name] = name
else:
logger.warning('params have no checkpoint_name:%s', name)
return _tied_parameters |
class transfer_conv(nn.Module):
def __init__(self, in_feature, out_feature):
super().__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.Connectors = nn.Sequential(nn.Conv2d(in_feature, out_feature, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_feature), nn.ReLU())
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, student):
student = self.Connectors(student)
return student |
_model
def seresnet33ts(pretrained=False, **kwargs):
return _create_byobnet('seresnet33ts', pretrained=pretrained, **kwargs) |
def get_data():
seq_len = 480
data = pd.DataFrame(pd.date_range('', periods=seq_len), columns=['ds'])
data.insert(1, 'y', np.random.rand(seq_len))
expect_horizon = np.random.randint(40, 50)
return (data, expect_horizon) |
def kEfficientNetBN(N=0, include_top=True, input_tensor=None, input_shape=None, pooling='avg', classes=1000, kType=2, dropout_rate=None, drop_connect_rate=0.2, skip_stride_cnt=(- 1), dropout_all_blocks=False, **kwargs):
result = None
if (N == (- 1)):
dropout_rate = (0.2 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(1.0, 1.0, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-s', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, blocks_args=SHALLOW_BLOCKS_ARGS, dropout_all_blocks=dropout_all_blocks, **kwargs)
if (N == 0):
dropout_rate = (0.2 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(1.0, 1.0, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-b0', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, dropout_all_blocks=dropout_all_blocks, **kwargs)
elif (N == 1):
dropout_rate = (0.2 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(1.0, 1.1, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-b1', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, dropout_all_blocks=dropout_all_blocks, **kwargs)
elif (N == 2):
dropout_rate = (0.3 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(1.1, 1.2, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-b2', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, dropout_all_blocks=dropout_all_blocks, **kwargs)
elif (N == 3):
dropout_rate = (0.3 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(1.2, 1.4, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-b3', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, dropout_all_blocks=dropout_all_blocks, **kwargs)
elif (N == 4):
dropout_rate = (0.4 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(1.4, 1.8, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-b4', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, dropout_all_blocks=dropout_all_blocks, **kwargs)
elif (N == 5):
dropout_rate = (0.4 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(1.6, 2.2, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-b5', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, dropout_all_blocks=dropout_all_blocks, **kwargs)
elif (N == 6):
dropout_rate = (0.5 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(1.8, 2.6, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-b6', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, dropout_all_blocks=dropout_all_blocks, **kwargs)
elif (N == 7):
dropout_rate = (0.5 if (dropout_rate is None) else dropout_rate)
result = kEfficientNet(2.0, 3.1, skip_stride_cnt=skip_stride_cnt, model_name='kEffNet-b7', include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, kType=kType, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, dropout_all_blocks=dropout_all_blocks, **kwargs)
return result |
def print_model_settings_dict(settings):
print('Settings dict:')
all_vars = [(k, v) for (k, v) in list(settings.items())]
all_vars = sorted(all_vars, key=(lambda x: x[0]))
for (var_name, var_value) in all_vars:
print('\t{}: {}'.format(var_name, var_value)) |
class MSRANerLoader(Loader):
def __init__(self):
super().__init__()
def _load(self, path):
dataset = []
sentence = []
label = []
with open(path) as f:
for line in f:
if ((len(line) == 0) or (line[0] == '\n')):
if (len(sentence) > 0):
data = {'words': sentence, 'labels': label}
dataset.append(data)
sentence = []
label = []
continue
words = line.split(' ')
sentence.append(words[0])
label.append(words[(- 1)][:(- 1)])
self.label_set.add(words[(- 1)][:(- 1)])
if (len(sentence) > 0):
data = {'words': sentence, 'labels': label}
dataset.append(data)
if (len(dataset) == 0):
raise RuntimeError('No data found {}.'.format(path))
return dataset
def load_all(self, path):
train_path = os.path.join(path, 'MSRA.train')
dev_path = os.path.join(path, 'MSRA.test')
test_path = os.path.join(path, 'MSRA.test')
return (self._load(train_path), self._load(dev_path), self._load(test_path)) |
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert (distribution in ['uniform', 'normal'])
if (distribution == 'uniform'):
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias) |
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=1e-05)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True) |
def show_waiting(line_: str) -> Optional[str]:
usage = 'Usage: %flow show_waiting [global|all]'
line = line_.split()
if ((len(line) == 0) or (line[0] == 'global')):
sym_sets: Iterable[Iterable[Symbol]] = [flow().global_scope.all_symbols_this_indentation()]
elif (line[0] == 'all'):
sym_sets = flow().aliases.values()
else:
warn(usage)
return None
waiter_set = set()
for sym_set in sym_sets:
for data_sym in sym_set:
if (data_sym.is_waiting and (not data_sym.is_anonymous)):
waiter_set.add(data_sym)
if (not waiter_set):
return 'No symbol waiting on dependencies for now!'
else:
return ('Symbol(s) waiting on dependencies: %s' % waiter_set) |
('pseudolabeling')
class PseudoLabelingPredictor(SuperGluePredictor):
def dump_line(self, outputs: JsonDict) -> str:
if (not self.numeric):
prediction = outputs['label']
else:
prediction = outputs['prediction']
if isinstance(prediction, float):
prediction = min(max(prediction, 0), 5)
prediction = f'{prediction:.3f}'
output = {'idx': int(outputs['index']), 'pseudolabel': outputs['logits'], **outputs.get('raw_input', {})}
return (json.dumps(output, ensure_ascii=False) + '\n') |
class MatterportObjectsSplit():
def __init__(self, dataset, split='train'):
self.cfg = dataset.cfg
path_list = dataset.get_split_list(split)
log.info('Found {} pointclouds for {}'.format(len(path_list), split))
self.path_list = path_list
self.split = split
self.dataset = dataset
def __len__(self):
return len(self.path_list)
def get_data(self, idx):
pc_path = self.path_list[idx]
label_path = 'boxes'.join(pc_path.rsplit('pc', 1)).replace('.bin', '.txt')
pc = self.dataset.read_lidar(pc_path)
label = self.dataset.read_label(label_path)
data = {'point': pc, 'calib': {}, 'bounding_boxes': label}
return data
def get_attr(self, idx):
pc_path = self.path_list[idx]
name = Path(pc_path).name.split('.')[0]
attr = {'name': name, 'path': pc_path, 'split': self.split}
return attr |
def modify_model_after_init(model, training_args, adapter_args, adapter_config):
freeze_model_params(model, adapter_args, adapter_config)
if adapter_args.intrinsic_model:
if adapter_args.intrinsic_said:
model = intrinsic_dimension_said(model, adapter_args.intrinsic_dim, training_args.output_dir, set(), adapter_args.intrinsic_projection, 'cpu')
else:
model = intrinsic_dimension(model, adapter_args.intrinsic_dim, training_args.output_dir, set(), adapter_args.intrinsic_projection, 'cpu')
trainable_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
logger.info('***** Model Trainable Parameters {} *****'.format(trainable_params))
for (n, p) in model.named_parameters():
if p.requires_grad:
print('inside n ', n)
if training_args.print_num_parameters:
for (name, param) in model.named_parameters():
if param.requires_grad:
logger.info('##### Parameter name %s', name)
total_lm_head_params = sum((p.numel() for p in model.lm_head.parameters()))
total_trainable_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
total_trainable_bias_params = sum((p.numel() for (n, p) in model.named_parameters() if (p.requires_grad and n.endswith('.b'))))
total_trainable_layernorm_params = sum((p.numel() for (n, p) in model.named_parameters() if (p.requires_grad and ('.layer_norm.weight' in n))))
total_params = sum((p.numel() for p in model.parameters()))
logger.info('Total trainable parameters %s', total_trainable_params)
logger.info('Total traianable bias parameters %s', total_trainable_bias_params)
logger.info('Total trainable layernorm parameters %s', total_trainable_layernorm_params)
logger.info('Total parameters %s', total_params)
t5_base_params =
total_params_ratio = ((((total_params - t5_base_params) * 8) + t5_base_params) / t5_base_params)
total_trainable_params_percent = ((total_trainable_params / t5_base_params) * 100)
total_trainable_bias_params_percent = ((total_trainable_bias_params / total_trainable_params) * 100)
total_trainable_layernorm_params_percent = ((total_trainable_layernorm_params / total_trainable_params) * 100)
total_trainable_lm_head_params_percent = ((total_lm_head_params / t5_base_params) * 100)
logger.info('For adapters/prompt-tuning, total params %s', total_params_ratio)
logger.info('For intrinsic, total params %s', (total_params / t5_base_params))
logger.info('Total trainable params %s', total_trainable_params_percent)
logger.info('Total trainable bias params %s', total_trainable_bias_params_percent)
logger.info('Total trainable layernorm params %s', total_trainable_layernorm_params_percent)
logger.info('Total lm_head params %s', total_trainable_lm_head_params_percent)
return model |
def nin_cifar100(num_classes=100, **kwargs):
return get_nin_cifar(num_classes=num_classes, model_name='nin_cifar100', **kwargs) |
class DataTrainingArguments():
data_file: str = field(metadata={'help': 'Text file with one unlabeled instance per line.'})
class_names_file: str = field(metadata={'help': 'Text file with one class name per line.'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the Rust tokenizers library) or not.'}) |
def conse(path):
con = sqlite3.connect(path)
cur = con.cursor()
sql = 'SELECT start,end,globalPid FROM CUPTI_ACTIVITY_KIND_KERNEL'
cur.execute(sql)
data = cur.fetchall()
conse = {}
for i in data:
if (i[2] not in conse):
conse[i[2]] = {}
conse[i[2]]['start'] = i[0]
conse[i[2]]['end'] = i[1]
elif (conse[i[2]]['end'] < i[1]):
conse[i[2]]['end'] = i[1]
ansstart = 0
ansend = sys.maxsize
for key in conse:
if (conse[key]['start'] > ansstart):
ansstart = conse[key]['start']
if (conse[key]['end'] < ansend):
ansend = conse[key]['end']
con.close()
if ((ansstart + ) > ansend):
print('duration time is too short!')
return ((ansstart + ), (ansend - )) |
class ConvMlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_args={'act': 'gelu'}, norm_args=None, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = (create_norm(norm_args, hidden_features) or nn.Identity())
self.act = create_act(act_args)
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x |
def sp_noise(image, prob):
output = np.zeros(image.shape, np.uint8)
thres = (1 - prob)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if (rdn < prob):
output[i][j] = 0
elif (rdn > thres):
output[i][j] = 255
else:
output[i][j] = image[i][j]
return output |
def load_model_for_evaluate(pre_model_path, model):
map_location = torch.device('cpu')
load_dict = torch.load(pre_model_path, map_location)
pretrained_dict = load_dict['model_params']
model_dict = model._networks.state_dict()
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)}
(miss, unexpected) = model._networks.load_state_dict(pretrained_dict, False)
if (miss is not None):
print(miss)
if (unexpected is not None):
print(unexpected)
return model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.