code stringlengths 101 5.91M |
|---|
def parse_command_args(training_or_testing):
if (training_or_testing == 'training'):
parser = argparse.ArgumentParser(description='BoundingBox-less Location with PyTorch.', formatter_class=CustomFormatter)
optional_args = parser._action_groups.pop()
required_args = parser.add_argument_group('MANDATORY arguments')
required_args.add_argument('--train-dir', required=True, help='Directory with training images. Must contain image files (any format), and a CSV or XML file containing groundtruth, as described in the README.')
optional_args.add_argument('--val-dir', help="Directory with validation images and GT. If 'auto', 20%% of the training samples will be removed from training and used for validation. If left blank, no validation will be done.")
optional_args.add_argument('--imgsize', type=str, default='256x256', metavar='HxW', help='Size of the input images (height x width).')
optional_args.add_argument('--batch-size', type=strictly_positive_int, default=1, metavar='N', help='Input batch size for training.')
optional_args.add_argument('--epochs', type=strictly_positive_int, default=np.inf, metavar='N', help='Number of epochs to train.')
optional_args.add_argument('--nThreads', '-j', default=4, type=strictly_positive_int, metavar='N', help='Number of threads to create for data loading. Must be a striclty positive int')
optional_args.add_argument('--lr', type=strictly_positive, default=4e-05, metavar='LR', help='Learning rate (step size).')
optional_args.add_argument('-p', type=float, default=(- 1), metavar='P', help='alpha in the generalized mean (-inf => minimum)')
optional_args.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training')
optional_args.add_argument('--no-data-augm', action='store_true', default=False, help='Disables data augmentation (random vert+horiz flip)')
optional_args.add_argument('--drop-last-batch', action='store_true', default=False, help='Drop the last batch during training, which may be incomplete. If the dataset size is not divisible by the batch size, then the last batch will be smaller.')
optional_args.add_argument('--seed', type=int, default=1, metavar='S', help='Random seed.')
optional_args.add_argument('--resume', default='', type=str, metavar='PATH', help='Path to latest checkpoint.')
optional_args.add_argument('--save', default='', type=str, metavar='PATH', help='Where to save the model after each epoch.')
optional_args.add_argument('--log-interval', type=strictly_positive, default=3, metavar='N', help='Time to wait between every time the losses are printed (in seconds).')
optional_args.add_argument('--max-trainset-size', type=strictly_positive_int, default=np.inf, metavar='N', help='Only use the first N images of the training dataset.')
optional_args.add_argument('--max-valset-size', type=strictly_positive_int, default=np.inf, metavar='N', help='Only use the first N images of the validation dataset.')
optional_args.add_argument('--val-freq', default=1, type=int, metavar='F', help='Run validation every F epochs. If 0, no validation will be done. If no validation is done, a checkpoint will be saved every F epochs.')
optional_args.add_argument('--visdom-env', default='default_environment', type=str, metavar='NAME', help='Name of the environment in Visdom.')
optional_args.add_argument('--visdom-server', default=None, metavar='SRV', help='Hostname of the Visdom server. If not provided, nothing will be sent to Visdom.')
optional_args.add_argument('--visdom-port', default=8989, metavar='PRT', help='Port of the Visdom server.')
optional_args.add_argument('--optimizer', '--optim', default='sgd', type=str.lower, metavar='OPTIM', choices=['sgd', 'adam'], help='SGD or Adam.')
optional_args.add_argument('--replace-optimizer', action='store_true', default=False, help='Replace optimizer state when resuming from checkpoint. If True, the optimizer will be replaced using the arguments of this scripts. If not resuming, it has no effect.')
optional_args.add_argument('--max-mask-pts', type=strictly_positive_int, default=np.infty, metavar='M', help='Subsample this number of points from the mask, so that GMM fitting runs faster.')
optional_args.add_argument('--paint', default=False, action='store_true', help='Paint red circles at the estimated locations in validation. This maskes it run much slower!')
optional_args.add_argument('--radius', type=strictly_positive, default=5, metavar='R', help='Detections at dist <= R to a GT pointare considered True Positives.')
optional_args.add_argument('--n-points', type=strictly_positive_int, default=None, metavar='N', help='If you know the number of points (e.g, just one pupil), then set it. Otherwise it will be estimated.')
optional_args.add_argument('--ultrasmallnet', default=False, action='store_true', help='If True, the 5 central layers are removed,resulting in a much smaller UNet. This is used for example for the pupil dataset.Make sure to enable this if your are restoring a checkpoint that was trained using this option enabled.')
optional_args.add_argument('--lambdaa', type=strictly_positive, default=1, metavar='L', help='Weight that will increase the importance of estimating the right number of points.')
parser._action_groups.append(optional_args)
args = parser.parse_args()
args.eval_batch_size = 1
if (args.eval_batch_size != 1):
raise NotImplementedError(('Only a batch size of 1 is implemented for now, got %s' % args.eval_batch_size))
if (args.save != ''):
args.save = os.path.abspath(args.save)
if (args.resume != ''):
args.resume = os.path.abspath(args.resume)
if ((args.save != '') and os.path.isfile(args.save) and (not (args.resume and (args.resume == args.save)))):
print("E: Don't overwrite a checkpoint without resuming from it. Are you sure you want to do that? (if you do, remove it manually).")
exit(1)
args.cuda = ((not args.no_cuda) and torch.cuda.is_available())
elif (training_or_testing == 'testing'):
parser = argparse.ArgumentParser(description='BoundingBox-less Location with PyTorch (inference/test only)', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
optional_args = parser._action_groups.pop()
required_args = parser.add_argument_group('MANDATORY arguments')
required_args.add_argument('--dataset', required=True, help='Directory with test images. Must contain image files (any format), and (optionally) a CSV or XML file containing groundtruth, as described in the README.')
required_args.add_argument('--out', type=str, required=True, help='Directory where results will be stored (images+CSV).')
optional_args.add_argument('--model', type=str, metavar='PATH', help='Checkpoint with the CNN model.\n')
optional_args.add_argument('--evaluate', action='store_true', default=False, help='Evaluate metrics (Precision/Recall, RMSE, MAPE, etc.)')
optional_args.add_argument('--no-cuda', '--no-gpu', action='store_true', default=False, help='Use CPU only, no GPU.')
optional_args.add_argument('--imgsize', type=str, default='256x256', metavar='HxW', help='Size of the input images (heightxwidth).')
optional_args.add_argument('--radii', type=str, default=range(0, (15 + 1)), metavar='Rs', help='Detections at dist <= R to a GT pt are True Positives.If not selected, R=0, ..., 15 will be tested.')
optional_args.add_argument('--taus', type=str, default=(- 2), metavar='Ts', help='Detection threshold between 0 and 1. tau=-1 means dynamic Otsu thresholding. tau=-2 means Beta Mixture Model-based thresholding.')
optional_args.add_argument('--n-points', type=int, default=None, metavar='N', help='If you know the exact number of points in the image, then set it. Otherwise it will be estimated by adding a L1 cost term.')
optional_args.add_argument('--max-mask-pts', type=int, default=np.infty, metavar='M', help='Subsample this number of points from the mask, so GMM fitting runs faster.')
optional_args.add_argument('--no-paint', default=False, action='store_true', help="Don't paint a red circle at each estimated location.")
optional_args.add_argument('--force', '-f', default=False, action='store_true', help='Overwrite output files if they exist. In fact, it removes the output directory first')
optional_args.add_argument('--seed', type=int, default=0, metavar='S', help='Random seed.')
optional_args.add_argument('--max-testset-size', type=int, default=np.inf, metavar='N', help='Only use the first N images of the testing dataset.')
optional_args.add_argument('--nThreads', '-j', default=4, type=int, metavar='N', help='Number of data loading threads.')
optional_args.add_argument('--ultrasmallnet', default=False, action='store_true', help='If True, the 5 central layers are removed,resulting in a much smaller UNet. This is used for example for the pupil dataset.Make sure to enable this if your are restoring a checkpoint that was trained using this option enabled.')
parser._action_groups.append(optional_args)
args = parser.parse_args()
if ((not args.no_cuda) and (not torch.cuda.is_available())):
print('W: No GPU (CUDA) devices detected in your system, running with --no-gpu option...')
args.cuda = ((not args.no_cuda) and torch.cuda.is_available())
args.paint = (not args.no_paint)
if isinstance(args.taus, (list, range)):
pass
elif (isinstance(args.taus, str) and (',' in args.taus)):
args.taus = [float(tau) for tau in args.taus.replace('[', '').replace(']', '').split(',')]
else:
args.taus = [float(args.taus)]
if isinstance(args.radii, (list, range)):
pass
elif (isinstance(args.radii, str) and (',' in args.radii)):
args.radii = [int(r) for r in args.radii.replace('[', '').replace(']', '').split(',')]
else:
args.radii = [int(args.radii)]
else:
raise ValueError(("Only 'training' or 'testing' allowed, got %s" % training_or_testing))
try:
(args.height, args.width) = parse('{}x{}', args.imgsize)
(args.height, args.width) = (int(args.height), int(args.width))
except TypeError as e:
print("\\__ E: The input --imgsize must be in format WxH, got '{}'".format(args.imgsize))
exit((- 1))
return args |
def insert_tagged_tokens(tokens, tags, template):
to_insert = {}
cur = (None, [])
for (token, tag) in zip(tokens, tags):
if (tag != cur[0]):
if (cur[0] is not None):
value = ' '.join(cur[1])
to_insert[cur[0]] = value
if (tag == 'O'):
cur = (None, [])
else:
cur = (tag, [token])
else:
cur[1].append(token)
if (cur[0] is not None):
value = ' '.join(cur[1])
to_insert[cur[0]] = value
modified = []
for token in template.split():
modified.append(to_insert.get(token, token))
return ' '.join(modified) |
class Optimizable_optimizer(Optimizer):
def __init__(self, optimizer: Optimizer, num_epochs: int):
super(optimizer, self).__init__(num_epochs)
self.optimizer = optimizer
def optimize(self, temp_model: ModelWithTemperature, lr: float, nll_criterion, logits: torch.FloatTensor, labels: torch.FloatTensor, before_temperature_nll: float):
best_loss = before_temperature_nll
best_epoch = 0
for i in range(self._num_epochs):
temp_model.zero_grad()
loss = nll_criterion(temp_model.temperature_scale(logits), labels)
loss.backward()
self.optimizer.step()
if (loss.item() < best_loss):
best_loss = loss.item()
best_epoch = 1
elif ((i - best_epoch) > 50):
print('Stopped at {} with value {}'.format(best_epoch, best_loss))
break |
_model('lstm_lm')
class LSTMLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
def add_args(parser):
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-hidden-size', type=int, metavar='N', help='decoder hidden size')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='number of decoder layers')
parser.add_argument('--decoder-out-embed-dim', type=int, metavar='N', help='decoder output embedding dimension')
parser.add_argument('--decoder-attention', type=str, metavar='BOOL', help='decoder attention')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion')
parser.add_argument('--decoder-dropout-in', type=float, metavar='D', help='dropout probability for decoder input embedding')
parser.add_argument('--decoder-dropout-out', type=float, metavar='D', help='dropout probability for decoder output')
parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings')
def build_model(cls, args, task):
base_architecture(args)
if (getattr(args, 'max_target_positions', None) is not None):
max_target_positions = args.max_target_positions
else:
max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(embed_path)
utils.print_embed_overlap(embed_dict, dictionary)
return utils.load_embedding(embed_dict, dictionary, embed_tokens)
pretrained_decoder_embed = None
if args.decoder_embed_path:
pretrained_decoder_embed = load_pretrained_embedding_from_file(args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim)
if args.share_decoder_input_output_embed:
if (task.source_dictionary != task.target_dictionary):
raise ValueError('--share-decoder-input-output-embeddings requires a joint dictionary')
if (args.decoder_embed_dim != args.decoder_out_embed_dim):
raise ValueError('--share-decoder-input-output-embeddings requires --decoder-embed-dim to match --decoder-out-embed-dim')
decoder = LSTMDecoder(dictionary=task.dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, attention=options.eval_bool(args.decoder_attention), encoder_output_units=0, pretrained_embed=pretrained_decoder_embed, share_input_output_embed=args.share_decoder_input_output_embed, adaptive_softmax_cutoff=(options.eval_str_list(args.adaptive_softmax_cutoff, type=int) if (args.criterion == 'adaptive_loss') else None), max_target_positions=max_target_positions)
return cls(decoder) |
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, (read_or_write[0] + 'b')))
except TypeError:
raise RuntimeError('expected open file or filename') |
def parse_args():
parser = argparse.ArgumentParser(description='Code generator for tensor contruction')
parser.add_argument('-s', metavar='style', dest='style', type=str, default=None, choices=['numpy', 'mptensor'], help='set output style ("numpy" or "mptensor")')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='verbose mode')
parser.add_argument('-o', metavar='outfile', dest='outfile', type=argparse.FileType('w'), default=sys.stdout, help='write the result to outfile')
parser.add_argument('infile', type=argparse.FileType('r'), help='tensor-network definition file')
return parser.parse_args() |
def CheckArgs(args):
if (not os.path.exists(args.config_dir)):
os.makedirs(args.config_dir)
if (args.feat_dir is not None):
args.feat_dim = common_lib.get_feat_dim(args.feat_dir)
if (args.ali_dir is not None):
args.num_targets = common_lib.get_number_of_leaves_from_tree(args.ali_dir)
elif (args.tree_dir is not None):
args.num_targets = common_lib.get_number_of_leaves_from_tree(args.tree_dir)
if (args.ivector_dir is not None):
args.ivector_dim = common_lib.get_ivector_dim(args.ivector_dir)
if (not (args.feat_dim > 0)):
raise Exception('feat-dim has to be postive')
if (not (args.num_targets > 0)):
print(args.num_targets)
raise Exception('num_targets has to be positive')
if (not (args.ivector_dim >= 0)):
raise Exception('ivector-dim has to be non-negative')
if ((not (args.max_change_per_component >= 0)) or (not (args.max_change_per_component_final >= 0))):
raise Exception('max-change-per-component and max_change-per-component-final should be non-negative')
if (args.num_lstm_layers < 1):
sys.exit('--num-lstm-layers has to be a positive integer')
if ((args.clipping_threshold < 0) or (args.zeroing_threshold < 0)):
sys.exit('--clipping-threshold and --zeroing-threshold have to be non-negative')
if (not (args.zeroing_interval > 0)):
raise Exception('--zeroing-interval has to be positive')
if (args.lstm_delay is None):
args.lstm_delay = ([[(- 1)]] * args.num_lstm_layers)
else:
try:
args.lstm_delay = ParseLstmDelayString(args.lstm_delay.strip())
except ValueError:
sys.exit("--lstm-delay has incorrect format value. Provided value is '{0}'".format(args.lstm_delay))
if (len(args.lstm_delay) != args.num_lstm_layers):
sys.exit('--lstm-delay: Number of delays provided has to match --num-lstm-layers')
return args |
def preresnet101(**kwargs):
return get_preresnet(blocks=101, model_name='preresnet101', **kwargs) |
class T5Adapter(BaseAdapter):
def match(self, model_path: str):
return ('t5' in model_path)
def load_model(self, model_path: str, from_pretrained_kwargs: dict):
tokenizer = T5Tokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForSeq2SeqLM.from_pretrained(model_path, low_cpu_mem_usage=True, **from_pretrained_kwargs)
return (model, tokenizer) |
class Cell(object):
def __init__(self, data=None, fmt=None, span=1, align=None):
self.data = data
if (fmt is None):
fmt = CellFormat()
if isinstance(fmt, str):
fmt = CellFormat(fmt=fmt)
self.fmt = fmt
self.span = span
self.align = align
def __str__(self):
return (self.fmt.fmt % self.data) |
class MolGraph():
def __init__(self, smiles: str, args: Namespace):
self.smiles = smiles
self.n_atoms = 0
self.n_bonds = 0
self.f_atoms = []
self.f_bonds = []
self.a2b = []
self.b2a = []
self.b2revb = []
mol = Chem.MolFromSmiles(smiles)
self.mol = mol
self.n_atoms = mol.GetNumAtoms()
for (i, atom) in enumerate(mol.GetAtoms()):
self.f_atoms.append(atom_features(atom))
self.f_atoms = [self.f_atoms[i] for i in range(self.n_atoms)]
for _ in range(self.n_atoms):
self.a2b.append([])
for a1 in range(self.n_atoms):
for a2 in range((a1 + 1), self.n_atoms):
bond = mol.GetBondBetweenAtoms(a1, a2)
if (bond is None):
continue
f_bond = bond_features(bond)
if args.atom_messages:
self.f_bonds.append(f_bond)
self.f_bonds.append(f_bond)
else:
self.f_bonds.append((self.f_atoms[a1] + f_bond))
self.f_bonds.append((self.f_atoms[a2] + f_bond))
b1 = self.n_bonds
b2 = (b1 + 1)
self.a2b[a2].append(b1)
self.b2a.append(a1)
self.a2b[a1].append(b2)
self.b2a.append(a2)
self.b2revb.append(b2)
self.b2revb.append(b1)
self.n_bonds += 2
def ggnn_features(self, max_atoms=(- 1), out_size=(- 1)):
mol = self.mol
type_check_num_atoms(mol, max_atoms)
atom_array = construct_atomic_number_array(mol, out_size=out_size)
adj_array = construct_discrete_edge_matrix(mol, out_size=out_size)
return (atom_array, adj_array) |
class OptimizationMethod(object):
def __init__(self, name, group, supported_devices=['cpu', 'cuda'], min_sm_version=None, opt_computation=None, opt_memory=None, opt_communication=None, distributed_only=False, process_mode='ONE_PROCESS', is_tunable=True):
self.name = name
self.group = group
self.supported_devices = supported_devices
self.min_sm_version = min_sm_version
self.opt_computation = opt_computation
self.opt_memory = opt_memory
self.opt_communication = opt_communication
self.distributed_only = distributed_only
self.process_mode = process_mode
self.is_tunable = is_tunable
self.disabled = False |
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold):
logger.info(('Writing predictions to: %s' % output_prediction_file))
logger.info(('Writing nbest to: %s' % output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple('PrelimPrediction', ['feature_index', 'start_index', 'end_index', 'start_logit', 'end_logit'])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
score_null = 1000000
min_null_feature_index = 0
null_start_logit = 0
null_end_logit = 0
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
if version_2_with_negative:
feature_null_score = (result.start_logits[0] + result.end_logits[0])
if (feature_null_score < score_null):
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
if (start_index >= len(feature.tokens)):
continue
if (end_index >= len(feature.tokens)):
continue
if (start_index not in feature.token_to_orig_map):
continue
if (end_index not in feature.token_to_orig_map):
continue
if (not feature.token_is_max_context.get(start_index, False)):
continue
if (end_index < start_index):
continue
length = ((end_index - start_index) + 1)
if (length > max_answer_length):
continue
prelim_predictions.append(_PrelimPrediction(feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(_PrelimPrediction(feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit))
prelim_predictions = sorted(prelim_predictions, key=(lambda x: (x.start_logit + x.end_logit)), reverse=True)
_NbestPrediction = collections.namedtuple('NbestPrediction', ['text', 'start_logit', 'end_logit'])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if (len(nbest) >= n_best_size):
break
feature = features[pred.feature_index]
if (pred.start_index > 0):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = ' '.join(tok_tokens)
tok_text = tok_text.replace(' ##', '')
tok_text = tok_text.replace('##', '')
tok_text = tok_text.strip()
tok_text = ' '.join(tok_text.split())
orig_text = ' '.join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if (final_text in seen_predictions):
continue
seen_predictions[final_text] = True
else:
final_text = ''
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
if version_2_with_negative:
if ('' not in seen_predictions):
nbest.append(_NbestPrediction(text='', start_logit=null_start_logit, end_logit=null_end_logit))
if (len(nbest) == 1):
nbest.insert(0, _NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
if (not nbest):
nbest.append(_NbestPrediction(text='empty', start_logit=0.0, end_logit=0.0))
assert (len(nbest) >= 1)
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append((entry.start_logit + entry.end_logit))
if (not best_non_null_entry):
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output['text'] = entry.text
output['probability'] = probs[i]
output['start_logit'] = entry.start_logit
output['end_logit'] = entry.end_logit
nbest_json.append(output)
assert (len(nbest_json) >= 1)
if (not version_2_with_negative):
all_predictions[example.qas_id] = nbest_json[0]['text']
else:
score_diff = ((score_null - best_non_null_entry.start_logit) - best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if (score_diff > null_score_diff_threshold):
all_predictions[example.qas_id] = ''
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
with open(output_nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if version_2_with_negative:
with open(output_null_log_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n')) |
def save_pickle(filepath, x):
with open(filepath, 'wb') as handle:
pickle.dump(x, handle, protocol=pickle.HIGHEST_PROTOCOL) |
class SigmoidNode(Node):
def __init__(self, prev_node):
super().__init__(prev_node)
self.in_var = prev_node.out_var
self.in_dim = prev_node.out_dim
self.out_dim = self.in_dim
self.out_var = Allocation.allocate_var('float', 'x', self.out_dim)
def lowering(self):
(loops, idxs) = LoopNode.create_loops(self.in_var.dim)
in_var_idx = IndexedVariable(self.in_var)
out_var_idx = IndexedVariable(self.out_var)
in_var_idx.set_indices(idxs)
out_var_idx.set_indices(idxs)
expression = Expression('1.f / (1.f + expf(-{t_var_idx}))', t_var_idx=in_var_idx)
node = AssignmentNode(out_var_idx, expression)
loops[(- 1)].add_edge('content', node)
self.var_decls.append(self.out_var)
self.math_required = True
self.add_edge('content', loops[0]) |
def fuse_bn(conv, bn):
kernel = conv.weight
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape((- 1), 1, 1, 1)
return ((kernel * t), (beta - ((running_mean * gamma) / std))) |
def main():
reader = csv.reader(sys.stdin)
writer = csv.writer(sys.stdout)
header = True
for row in reader:
fixed_spans = row[0]
if (not header):
fixed_spans = _fix_spans(ast.literal_eval(row[0]), row[1])
writer.writerow([fixed_spans, row[1]])
header = False |
class DistilBertModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class EndToEndModel(nn.Module):
def __init__(self, segm_model, pose_model, object_names, object_ids):
super(EndToEndModel, self).__init__()
self.segm_model = segm_model
self.resize = nn.AdaptiveMaxPool2d((240, 320))
self.pose_model = pose_model
self.object_names = object_names
self.object_ids = object_ids
def forward(self, x):
assert (x.size(0) == 1)
x = self.segm_model(x)
(_, x) = x.max(1, keepdim=True)
object_names = []
positions = []
orientations = []
for (i, object_name) in enumerate(self.object_names):
mask = self.resize(x.eq(self.object_ids[i]).float())
if (mask.sum().item() < 20):
continue
object_index = torch.LongTensor([i])
(position, orientation) = self.pose_model(mask, object_index)
object_names.append(object_name)
positions.append(position[0].cpu().numpy())
orientations.append(orientation[0].cpu().numpy())
return (x[0].cpu().numpy().squeeze(0), object_names, positions, orientations) |
class myMerlinFlow(FlowSpec):
MODEL_FOLDER = Parameter(name='model_folder', help='Folder to store the model from Merlin, between steps', default='merlin_model')
ROW_SAMPLING = Parameter(name='row_sampling', help='Snowflake row sampling: if 0, NO sampling is applied', default='1')
TRAINING_END_DATE = Parameter(name='training_end_date', help='Data up until this date is used for training, format yyyy-mm-dd', default='2020-09-08')
VALIDATION_END_DATE = Parameter(name='validation_end_date', help='Data up after training end and until this date is used for validation, format yyyy-mm-dd', default='2020-09-15')
COMET_PROJECT_NAME = Parameter(name='comet_project_name', help='Name of the project in our Comet dashboard', default='two_tower_h_and_m_merlin')
VALIDATION_METRIC = Parameter(name='validation_metric', help='Merlin metric to use for picking the best set of hyperparameter', default='recall_at_10')
N_EPOCHS = Parameter(name='n_epoch', help='Number of epochs to train the Merlin model', default='1')
DYNAMO_TABLE = Parameter(name='dynamo_table', help='Name of dynamo db table to store the pre-computed recs. Default is same as in the serverless application', default='userItemTable')
TOP_K = Parameter(name='top_k', help='Number of products to recommend for a giver shopper', default='10')
def start(self):
print(('flow name: %s' % current.flow_name))
print(('run id: %s' % current.run_id))
print(('username: %s' % current.username))
if (os.environ.get('EN_BATCH', '0') == '1'):
print('ATTENTION: AWS BATCH ENABLED!')
from metaflow.metaflow_config import DATASTORE_SYSROOT_S3
print(('DATASTORE_SYSROOT_S3: %s' % DATASTORE_SYSROOT_S3))
if (DATASTORE_SYSROOT_S3 is None):
print('ATTENTION: LOCAL DATASTORE ENABLED')
assert (os.environ['COMET_API_KEY'] and self.COMET_PROJECT_NAME)
assert int(self.ROW_SAMPLING)
from snowflake_client import SnowflakeClient
sf_client = SnowflakeClient(os.environ['SF_USER'], os.environ['SF_PWD'], os.environ['SF_ACCOUNT'], os.environ['SF_ROLE'], os.environ['SF_WAREHOUSE'])
snowflake_version = sf_client.get_version()
print(snowflake_version)
assert (snowflake_version is not None)
self.training_end_date = datetime.strptime(self.TRAINING_END_DATE, '%Y-%m-%d')
self.validation_end_date = datetime.strptime(self.VALIDATION_END_DATE, '%Y-%m-%d')
assert (self.validation_end_date > self.training_end_date)
self.next(self.get_dataset)
def get_dataset(self):
from snowflake_client import SnowflakeClient
from pyarrow import Table as pt
sf_client = SnowflakeClient(os.environ['SF_USER'], os.environ['SF_PWD'], os.environ['SF_ACCOUNT'], os.environ['SF_ROLE'], os.environ['SF_WAREHOUSE'])
snowflake_sampling = int(self.ROW_SAMPLING)
sampling_expression = ('' if (snowflake_sampling == 0) else 'sample({})'.format(snowflake_sampling))
query = '\n SELECT \n ARTICLE_ID,\n PRODUCT_CODE, \n PRODUCT_TYPE_NO,\n PRODUCT_GROUP_NAME,\n GRAPHICAL_APPEARANCE_NO,\n COLOUR_GROUP_CODE,\n PERCEIVED_COLOUR_VALUE_ID,\n PERCEIVED_COLOUR_MASTER_ID,\n DEPARTMENT_NO,\n INDEX_CODE,\n INDEX_GROUP_NO,\n SECTION_NO,\n GARMENT_GROUP_NO,\n ACTIVE,\n FN,\n AGE,\n CLUB_MEMBER_STATUS,\n CUSTOMER_ID,\n FASHION_NEWS_FREQUENCY,\n POSTAL_CODE,\n PRICE,\n SALES_CHANNEL_ID,\n T_DAT,\n S3_URL\n FROM\n "EXPLORATION_DB"."HM_POST"."FILTERED_DATAFRAME"\n {}\n ORDER BY\n T_DAT ASC\n '.format(sampling_expression)
print('Fetching rows with query: \n {} \n\nIt may take a while...\n'.format(query))
dataset = sf_client.fetch_all(query, debug=True)
assert dataset
dataset = [{k.lower(): v for (k, v) in row.items()} for row in dataset]
self.item_id_2_meta = {str(r['article_id']): r for r in dataset}
print('Example articles: {}'.format(list(self.item_id_2_meta.keys())[:3]))
train_dataset = pt.from_pylist([row for row in dataset if (row['t_dat'] < self.training_end_date)])
validation_dataset = pt.from_pylist([row for row in dataset if ((row['t_dat'] >= self.training_end_date) and (row['t_dat'] < self.validation_end_date))])
test_dataset = pt.from_pylist([row for row in dataset if (row['t_dat'] >= self.validation_end_date)])
print('# {:,} events in the training set, {:,} for validation, {:,} for test'.format(len(train_dataset), len(validation_dataset), len(test_dataset)))
self.label_to_dataset = {'train': train_dataset, 'valid': validation_dataset, 'test': test_dataset}
self.next(self.build_workflow)
def build_workflow(self):
from workflow_builder import get_nvt_workflow, read_to_dataframe
import pandas as pd
import itertools
import nvtabular as nvt
label_to_df = {}
for (label, dataset) in self.label_to_dataset.items():
label_to_df[label] = read_to_dataframe(dataset, label)
full_dataset = nvt.Dataset(pd.concat(list(label_to_df.values())))
workflow = get_nvt_workflow()
workflow.fit(full_dataset)
self.label_to_melin_dataset = {}
for (label, _df) in label_to_df.items():
cnt_dataset = nvt.Dataset(_df)
self.label_to_melin_dataset[label] = cnt_dataset
workflow.transform(cnt_dataset).to_parquet(output_path='merlin/{}/'.format(label))
user_unique_ids = list(pd.read_parquet('categories/unique.customer_id.parquet')['customer_id'])
items_unique_ids = list(pd.read_parquet('categories/unique.article_id.parquet')['article_id'])
self.id_2_user_id = {idx: _ for (idx, _) in enumerate(user_unique_ids)}
self.id_2_item_id = {idx: _ for (idx, _) in enumerate(items_unique_ids)}
batch_sizes = [16384, 4096]
learning_rates = [0.04, 0.02]
grid_search = []
for params in itertools.product(batch_sizes, learning_rates):
grid_search.append({'BATCH_SIZE': params[0], 'LEARNING_RATE': params[1]})
self.hypers_sets = [json.dumps(_) for _ in grid_search][:1]
print(self.hypers_sets)
self.next(self.train_model, foreach='hypers_sets')
(vars={'EN_BATCH': os.getenv('EN_BATCH'), 'COMET_API_KEY': os.getenv('COMET_API_KEY')})
_decorator(batch(memory=24000, image='public.ecr.aws/outerbounds/merlin-reasonable-scale:22.11-latest'), flag=os.getenv('EN_BATCH'))
(libraries={'requests': '2.28.1', 'comet-ml': '3.26.0'})
def train_model(self):
import hashlib
from comet_ml import Experiment
import merlin.models.tf as mm
from merlin.io.dataset import Dataset
from merlin.schema.tags import Tags
import tensorflow as tf
self.hyper_string = self.input
self.hypers = json.loads(self.hyper_string)
train = Dataset('merlin/train/*.parquet')
valid = Dataset('merlin/valid/*.parquet')
print('Train dataset shape: {}, Validation: {}'.format(train.to_ddf().compute().shape, valid.to_ddf().compute().shape))
experiment = Experiment(api_key=os.getenv('COMET_API_KEY'), project_name=self.COMET_PROJECT_NAME)
self.comet_experiment_key = experiment.get_key()
experiment.add_tag(current.pathspec)
experiment.log_parameters(self.hypers)
user_schema = train.schema.select_by_tag(Tags.USER)
user_inputs = mm.InputBlockV2(user_schema)
query = mm.Encoder(user_inputs, mm.MLPBlock([128, 64]))
item_schema = train.schema.select_by_tag(Tags.ITEM)
item_inputs = mm.InputBlockV2(item_schema)
candidate = mm.Encoder(item_inputs, mm.MLPBlock([128, 64]))
model = mm.TwoTowerModelV2(query, candidate)
opt = tf.keras.optimizers.Adagrad(learning_rate=self.hypers['LEARNING_RATE'])
model.compile(optimizer=opt, run_eagerly=False, metrics=[mm.RecallAt(int(self.TOP_K)), mm.NDCGAt(int(self.TOP_K))])
model.fit(train, validation_data=valid, batch_size=self.hypers['BATCH_SIZE'], epochs=int(self.N_EPOCHS))
self.metrics = model.evaluate(valid, batch_size=1024, return_dict=True)
print('\n\n====> Eval results: {}\n\n'.format(self.metrics))
model_hash = str(hashlib.md5(self.hyper_string.encode('utf-8')).hexdigest())
self.model_path = 'merlin/model{}/'.format(model_hash)
model.save(self.model_path)
print('Model saved!')
self.next(self.join_runs)
def get_items_topk_recommender_model(self, train_dataset, model, k: int):
from merlin.models.utils.dataset import unique_rows_by_features
from merlin.schema.tags import Tags
candidate_features = unique_rows_by_features(train_dataset, Tags.ITEM, Tags.ITEM_ID)
topk_model = model.to_top_k_encoder(candidate_features, k=k, batch_size=128)
topk_model.compile(run_eagerly=False)
return topk_model
def join_runs(self, inputs):
self.model_paths = {inp.hyper_string: inp.model_path for inp in inputs}
self.experiment_keys = {inp.hyper_string: inp.comet_experiment_key for inp in inputs}
self.results_from_runs = {inp.hyper_string: inp.metrics[self.VALIDATION_METRIC] for inp in inputs}
print('Current results: {}'.format(self.results_from_runs))
(self.best_model, self_best_result) = sorted(self.results_from_runs.items(), key=(lambda x: x[1]), reverse=True)[0]
print('Best model is: {}, best path is {}'.format(self.best_model, self.model_paths[self.best_model]))
self.final_model_path = self.model_paths[self.best_model]
self.item_id_2_meta = inputs[0].item_id_2_meta
self.id_2_item_id = inputs[0].id_2_item_id
self.id_2_user_id = inputs[0].id_2_user_id
self.magicdir = inputs[0].magicdir
self.experiment_key = self.experiment_keys[self.best_model]
self.next(self.model_testing)
def prepare_predictions_for_comet_panel(self, h_m_shoppers, best_predictions, item_id_2_meta, api_key, experiment_key):
from comet_ml import ExistingExperiment
n_shoppers = 10
predictions_to_log = []
for shopper in h_m_shoppers[:n_shoppers]:
cnt_predictions = best_predictions.get(shopper, None)
if (not cnt_predictions):
continue
for p in cnt_predictions['items']:
product_type = (item_id_2_meta[p]['product_group_name'] if (p in item_id_2_meta) else 'NO_GROUP')
predictions_to_log.append({'user_id': shopper, 'product_id': p, 'product_type': product_type, 'score': 1.0})
experiment = ExistingExperiment(api_key=api_key, experiment_key=experiment_key)
experiment.log_asset_data(predictions_to_log, name='predictions.json')
return predictions_to_log
def load_merlin_model(self, dataset, path):
import tensorflow as tf
import merlin.models.tf as mm
loaded_model = tf.keras.models.load_model(path)
_ = loaded_model(mm.sample_batch(dataset, batch_size=128, include_targets=False))
print('Model re-loaded!')
return loaded_model
(vars={'EN_BATCH': os.getenv('EN_BATCH')})
_decorator(batch(memory=24000, image='public.ecr.aws/outerbounds/merlin-reasonable-scale:22.11-latest'), flag=os.getenv('EN_BATCH'))
def model_testing(self):
from merlin.io.dataset import Dataset
import merlin.models.tf as mm
from merlin.schema import Tags
test = Dataset('merlin/test/*.parquet')
loaded_model = self.load_merlin_model(test, self.final_model_path)
topk_rec_model = self.get_items_topk_recommender_model(test, loaded_model, k=int(self.TOP_K))
test_loader = mm.Loader(test, batch_size=1024, transform=mm.ToTarget(test.schema, Tags.ITEM_ID))
self.test_metrics = topk_rec_model.evaluate(test_loader, batch_size=1024, return_dict=True)
print('\n\n====> Test results: {}\n\n'.format(self.test_metrics))
self.next(self.saving_predictions)
(vars={'EN_BATCH': os.getenv('EN_BATCH'), 'COMET_API_KEY': os.getenv('COMET_API_KEY')})
_decorator(batch(image='public.ecr.aws/outerbounds/merlin-reasonable-scale:22.11-latest'), flag=os.getenv('EN_BATCH'))
(libraries={'requests': '2.28.1', 'comet-ml': '3.26.0'})
def saving_predictions(self):
from merlin.io.dataset import Dataset
import merlin.models.tf as mm
train = Dataset('merlin/train/*.parquet')
test = Dataset('merlin/test/*.parquet')
loaded_model = self.load_merlin_model(test, self.final_model_path)
topk_rec_model = self.get_items_topk_recommender_model(train, loaded_model, k=int(self.TOP_K))
test_dataset = mm.Loader(test, batch_size=1024, shuffle=False)
self.raw_predictions = topk_rec_model.predict(test_dataset)[1]
n_rows = self.raw_predictions.shape[0]
self.target_shoppers = test_dataset.data.to_ddf().compute()['customer_id']
print('Inspect the shopper object for debugging...{}'.format(type(self.target_shoppers)))
assert (n_rows == len(self.target_shoppers))
self.h_m_shoppers = [str(self.id_2_user_id[_]) for _ in self.target_shoppers.to_numpy().tolist()]
print('Example target shoppers: ', self.h_m_shoppers[:3])
self.target_items = test_dataset.data.to_ddf().compute()['article_id']
print('Example target items: ', self.target_items[:3])
self.best_predictions = self.serialize_predictions(self.h_m_shoppers, self.id_2_item_id, self.raw_predictions, self.target_items, n_rows)
print('Example target predictions', self.best_predictions[self.h_m_shoppers[0]])
self.prepare_predictions_for_comet_panel(self.h_m_shoppers, self.best_predictions, self.item_id_2_meta, os.getenv('COMET_API_KEY'), self.experiment_key)
print(n_rows, len(self.best_predictions))
self.next(self.export_to_app)
def serialize_predictions(self, h_m_shoppers, id_2_item_id, raw_predictions, target_items, n_rows):
sku_convert = (lambda x: [str(id_2_item_id[_]) for _ in x])
predictions = {}
for _ in range(n_rows):
cnt_user = h_m_shoppers[_]
cnt_raw_preds = raw_predictions[_].tolist()
cnt_target = target_items[_]
if (cnt_user not in predictions):
predictions[cnt_user] = {'items': sku_convert(cnt_raw_preds), 'target': sku_convert([cnt_target])[0]}
return predictions
def export_to_app(self):
if (not (os.environ.get('EXPORT_TO_APP', None) == '1')):
print('Skipping exporting data to the CLIP-based Streamlit app.')
else:
import pandas as pd
from app_utils import encode_image
import torch
from transformers import CLIPProcessor, CLIPModel
rows = []
max_preds = 100
for (shopper, preds) in self.best_predictions.items():
target_item = preds['target']
target_img_url = self.item_id_2_meta[target_item]['s3_url']
top_pred = preds['items'][0]
predicted_img_url = self.item_id_2_meta[top_pred]['s3_url']
if ((not target_img_url) or (not predicted_img_url)):
continue
new_row = {'user_id': shopper, 'target_item': target_item, 'predicted_item': top_pred, 'target_image_url': target_img_url, 'predicted_image_url': predicted_img_url, 'product_type': self.item_id_2_meta[target_item]['product_group_name']}
rows.append(new_row)
if (len(rows) >= max_preds):
break
df = pd.DataFrame(rows)
assert (len(df) == max_preds)
device = ('cuda' if torch.cuda.is_available() else 'cpu')
model = CLIPModel.from_pretrained('openai/clip-vit-base-patch32').to(device)
processor = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32')
img_vectors = []
for img in list(df['target_image_url']):
cnt_vector = encode_image(model, processor, img, device)
img_vectors.append(cnt_vector[0])
df['image_vectors'] = img_vectors
self.prediction_df = df
self.next(self.cache_predictions)
def cache_predictions(self):
if (not bool(int(os.getenv('SAVE_TO_CACHE')))):
print('Skipping deployment')
else:
print('Caching predictions in DynamoDB')
import boto3
dynamodb = boto3.resource('dynamodb', region_name='us-west-2')
table = dynamodb.Table(self.DYNAMO_TABLE)
data = [{'userId': user, 'recs': json.dumps(recs)} for (user, recs) in self.best_predictions.items()]
data.append({'userId': 'no_user', 'recs': json.dumps(['test_rec_{}'.format(_) for _ in range(int(self.TOP_K))])})
with table.batch_writer() as writer:
for item in data:
writer.put_item(Item=item)
print('Predictions are all cached in DynamoDB')
self.next(self.end)
def end(self):
print('All done\n\nSee you, recSys cowboy\n')
return |
def split_data_slice(data, output_file, slice_id, days_offset, days_train, days_test):
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Full data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.format(slice_id, len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.isoformat(), data_end.isoformat()))
start = (datetime.fromtimestamp(data.Time.min(), timezone.utc) + datetime.timedelta(days_offset))
middle = (start + datetime.timedelta(days_train))
end = (middle + datetime.timedelta(days_test))
session_max_times = data.groupby('SessionId').Time.max()
greater_start = session_max_times[(session_max_times >= start.timestamp())].index
lower_end = session_max_times[(session_max_times <= end.timestamp())].index
data_filtered = data[np.in1d(data.SessionId, greater_start.intersection(lower_end))]
print('Slice data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} / {}'.format(slice_id, len(data_filtered), data_filtered.SessionId.nunique(), data_filtered.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat(), end.date().isoformat()))
session_max_times = data_filtered.groupby('SessionId').Time.max()
sessions_train = session_max_times[(session_max_times < middle.timestamp())].index
sessions_test = session_max_times[(session_max_times >= middle.timestamp())].index
train = data[np.in1d(data.SessionId, sessions_train)]
print('Train set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.format(slice_id, len(train), train.SessionId.nunique(), train.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat()))
train.to_csv((((output_file + '_train_full.') + str(slice_id)) + '.txt'), sep='\t', index=False)
test = data[np.in1d(data.SessionId, sessions_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[(tslength >= 2)].index)]
print('Test set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} \n\n'.format(slice_id, len(test), test.SessionId.nunique(), test.ItemId.nunique(), middle.date().isoformat(), end.date().isoformat()))
test.to_csv((((output_file + '_test.') + str(slice_id)) + '.txt'), sep='\t', index=False) |
class MetricList():
def __init__(self, metrics):
assert isinstance(metrics, dict), "'metrics' must be a dictionary of callables"
self.metrics = metrics
self.results = {key: 0.0 for key in self.metrics.keys()}
def __call__(self, y_out, y_batch):
for (key, value) in self.metrics.items():
self.results[key] += value(y_out, y_batch)
def reset(self):
self.results = {key: 0.0 for key in self.metrics.keys()}
def get_results(self, normalize=False):
assert (isinstance(normalize, bool) or isinstance(normalize, Number)), "'normalize' must be boolean or a number"
if (not normalize):
return self.results
else:
return {key: (value / normalize) for (key, value) in self.results.items()} |
def TorchComplexMul(v1_complex, v2_complex):
(v1_real, v1_imag) = v1_complex.chunk(2, dim=(- 1))
(v2_real, v2_imag) = v2_complex.chunk(2, dim=(- 1))
return torch.cat((((v1_real * v2_real) - (v1_imag * v2_imag)), ((v1_real * v2_imag) + (v1_imag * v2_real))), dim=(- 1)) |
def mock_k8s_client():
k8s_client = k8sClient.singleton_instance('default')
k8s_client.get_custom_resource = _get_training_job
k8s_client.get_pod = _get_pod
k8s_client.list_namespaced_pod = mock_list_namespaced_pod
k8s_client.create_custom_resource = mock.MagicMock(return_value=True)
k8s_client.delete_custom_resource = mock.MagicMock(return_value=True)
k8s_client.create_pod = mock.MagicMock(return_value=True)
k8s_client.delete_pod = mock.MagicMock(return_value=True)
k8s_client.create_service = mock.MagicMock(return_value=True)
k8s_client.get_service = mock.MagicMock(return_value=False) |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode, cls_token_at_end=False, pad_on_left=False, cls_token='[CLS]', sep_token='[SEP]', pad_token=0, sequence_a_segment_id=0, sequence_b_segment_id=1, cls_token_segment_id=1, pad_token_segment_id=0, mask_padding_with_zero=True, do_lower_case=False, is_multi_choice=True):
label_map = {label: i for (i, label) in enumerate(label_list)}
num_labels = len(label_list)
if is_multi_choice:
features = [[]]
else:
features = []
for (ex_index, example) in enumerate(examples):
if do_lower_case:
example.text_a = example.text_a.lower()
example.text_b = example.text_b.lower()
example.text_c = example.text_c.lower()
if ((ex_index % 10000) == 0):
logger.info(('Writing example %d of %d' % (ex_index, len(examples))))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
tokens_c = None
if (example.text_b and example.text_c):
tokens_b = tokenizer.tokenize(example.text_b)
tokens_c = tokenizer.tokenize(example.text_c)
_truncate_seq_tuple(tokens_a, tokens_b, tokens_c, (max_seq_length - 4))
elif (example.text_b and (not example.text_c)):
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - 3))
elif (len(tokens_a) > (max_seq_length - 2)):
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = (tokens_a + [sep_token])
segment_ids = ([sequence_a_segment_id] * len(tokens))
if tokens_c:
tokens_b += ([sep_token] + tokens_c)
if tokens_b:
tokens += (tokens_b + [sep_token])
segment_ids += ([sequence_b_segment_id] * (len(tokens_b) + 1))
if cls_token_at_end:
tokens = (tokens + [cls_token])
segment_ids = (segment_ids + [cls_token_segment_id])
else:
tokens = ([cls_token] + tokens)
segment_ids = ([cls_token_segment_id] + segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (max_seq_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
input_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + input_mask)
segment_ids = (([pad_token_segment_id] * padding_length) + segment_ids)
else:
input_ids = (input_ids + ([pad_token] * padding_length))
input_mask = (input_mask + ([(0 if mask_padding_with_zero else 1)] * padding_length))
segment_ids = (segment_ids + ([pad_token_segment_id] * padding_length))
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
if (output_mode in ['classification', 'multi-choice']):
label_id = label_map[example.label]
elif (output_mode == 'regression'):
label_id = float(example.label)
else:
raise KeyError(output_mode)
if is_multi_choice:
features[(- 1)].append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id))
if (len(features[(- 1)]) == num_labels):
features.append([])
else:
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id))
if is_multi_choice:
if (len(features[(- 1)]) == 0):
features = features[:(- 1)]
return features |
class ErnieForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_bernoulli_vae_schema(config):
return [{'type': 'flatten'}, {'type': 'bernoulli-likelihood', 'num_z_channels': config['num_z_channels'], 'logit_net': {'type': 'mlp', 'activation': 'tanh', 'hidden_channels': config['logit_net']}, 'q_coupler': get_q_coupler_config(config, flattened=True)}] |
def build_fake_yaml():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n inputs: input\n device: cpu\n quantization:\n model_wise:\n weight:\n granularity: per_tensor\n scheme: sym\n dtype: int8\n algorithm: minmax\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: basic\n accuracy_criterion:\n relative: 0.1\n exit_policy:\n performance_only: True\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
def test_set_reactivity():
assert (flow().mut_settings.reactivity_mode == ReactivityMode.BATCH)
run_cell(f'%flow reactivity {ReactivityMode.INCREMENTAL.value}')
assert (flow().mut_settings.reactivity_mode == ReactivityMode.INCREMENTAL)
run_cell(f'%flow reactivity {ReactivityMode.BATCH.value}')
assert (flow().mut_settings.reactivity_mode == ReactivityMode.BATCH) |
class Residual(nn.Module):
def __init__(self, do_batchnorm, c, **kw):
super().__init__()
self.res1 = ConvBN(do_batchnorm, c, c, **kw)
self.res2 = ConvBN(do_batchnorm, c, c, **kw)
def forward(self, x):
return (x + F.relu(self.res2(self.res1(x))))
def prep_finetune(self, iid, c, **kw):
layers = [self.res1, self.res2]
return itertools.chain.from_iterable([l.prep_finetune(iid, c, c, **kw) for l in layers]) |
_module()
class DistSamplerSeedHook(Hook):
def before_epoch(self, runner):
if hasattr(runner.data_loader.sampler, 'set_epoch'):
runner.data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'):
runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch) |
def init_yolov3(args, device):
import torch
from models.yolo import Model
from utils.google_utils import attempt_download
from utils.torch_utils import intersect_dicts, torch_distributed_zero_first
log.info('Loading yolov3.pt weights.')
hyp = args.yolo_hyp()
with torch_distributed_zero_first(args.global_rank):
attempt_download('yolov3.pt')
ckpt = torch.load('yolov3.pt', map_location=device)
if hyp.get('anchors'):
ckpt['model'].yaml['anchors'] = round(hyp['anchors'])
net = Model((args.cfg or ckpt['model'].yaml), ch=3, nc=args.nc).to(device)
exclude = (['anchor'] if (args.cfg or hyp.get('anchors')) else [])
state_dict = ckpt['model'].float().state_dict()
state_dict = intersect_dicts(state_dict, net.state_dict(), exclude=exclude)
net.load_state_dict(state_dict, strict=False)
net.to(device)
return net |
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, eps=0.0001, momentum=0.1):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False, eps=eps, momentum=momentum)
self.gamma_embed = SpectralNorm(nn.Linear(num_classes, num_features, bias=False))
self.beta_embed = SpectralNorm(nn.Linear(num_classes, num_features, bias=False))
def forward(self, x, y):
out = self.bn(x)
gamma = (self.gamma_embed(y) + 1)
beta = self.beta_embed(y)
out = ((gamma.view((- 1), self.num_features, 1, 1) * out) + beta.view((- 1), self.num_features, 1, 1))
return out |
def deeplabv3plus_pvtv2(num_classes=1, output_stride=8, pretrained_backbone=True):
return _segm_pvtv2('deeplabv3plus', 'pvtv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) |
def test_eddington_differentpotentials_dMdE_integral():
pots = [potential.PlummerPotential(amp=2.3, b=1.3), potential.PowerSphericalPotentialwCutoff(amp=1.3, alpha=1.9, rc=1.2)]
tols = [1e-06 for pot in pots]
for (pot, tol) in zip(pots, tols):
dfh = eddingtondf(pot=pot)
check_dMdE_integral(dfh, tol)
return None |
def set_visible_area(point_dict, axes):
min_x = .0
min_y = .0
max_x = (- .0)
max_y = (- .0)
for (id, point) in dict_utils.get_item_iterator(point_dict):
min_x = min(point.x, min_x)
min_y = min(point.y, min_y)
max_x = max(point.x, max_x)
max_y = max(point.y, max_y)
axes.set_aspect('equal', adjustable='box')
axes.set_xlim([(min_x - 10), (max_x + 10)])
axes.set_ylim([(min_y - 10), (max_y + 10)]) |
def prototype_test():
state = prototype_state()
state['train_dialogues'] = './tests/data/ttrain.dialogues.pkl'
state['test_dialogues'] = './tests/data/ttest.dialogues.pkl'
state['valid_dialogues'] = './tests/data/tvalid.dialogues.pkl'
state['dictionary'] = './tests/data/ttrain.dict.pkl'
state['save_dir'] = './tests/models/'
state['max_grad_steps'] = 20
state['initialize_from_pretrained_word_embeddings'] = False
state['pretrained_word_embeddings_file'] = './tests/data/MT_WordEmb.pkl'
state['fix_pretrained_word_embeddings'] = False
state['valid_freq'] = 50
state['prefix'] = 'testmodel_'
state['updater'] = 'adam'
state['maxout_out'] = False
state['deep_out'] = True
state['deep_dialogue_input'] = True
state['utterance_encoder_gating'] = 'GRU'
state['dialogue_encoder_gating'] = 'GRU'
state['utterance_decoder_gating'] = 'GRU'
state['bidirectional_utterance_encoder'] = True
state['direct_connection_between_encoders_and_decoder'] = True
state['bs'] = 5
state['sort_k_batches'] = 1
state['use_nce'] = False
state['decoder_bias_type'] = 'all'
state['qdim_encoder'] = 15
state['qdim_decoder'] = 5
state['sdim'] = 10
state['rankdim'] = 10
return state |
def get_processor_name():
if (platform.system() == 'Windows'):
return platform.processor()
elif (platform.system() == 'Darwin'):
os.environ['PATH'] = ((os.environ['PATH'] + os.pathsep) + '/usr/sbin')
command = 'sysctl -n machdep.cpu.brand_string'
return subprocess.check_output(command).strip()
elif (platform.system() == 'Linux'):
command = "cat /proc/cpuinfo | grep 'model name' -m 1"
name = subprocess.check_output(command, shell=True).strip()
return str(name, 'utf-8') |
def main():
args = get_argument()
if args.resnet:
import torchvision.models as models
model = models.resnet18(pretrained=True)
model = ProbModel(model)
else:
model = mobilenet_v2('modeling/classification/mobilenetv2_1.0-f2a8633.pth.tar')
model = ProbModel(model)
model.eval()
if args.quantize:
data = torch.ones((4, 3, 224, 224))
if args.distill_range:
import copy
model_original = copy.deepcopy(model)
model_original.eval()
transformer = TorchTransformer()
transformer._build_graph(model_original, data, [QuantMeasure])
graph = transformer.log.getGraph()
bottoms = transformer.log.getBottoms()
data_distill = getDistilData(model_original, 'imagenet', args.dis_batch_size, bn_merged=False, num_batch=args.dis_num_batch, gpu=True, value_range=[(- 2.), 2.64], size=[224, 224], early_break_factor=(1.2 if args.resnet else 0.5))
transformer = TorchTransformer()
module_dict = {}
if args.distill_range:
module_dict[1] = [(torch.nn.Conv2d, QConv2d), (torch.nn.Linear, QLinear)]
else:
module_dict[1] = [(torch.nn.Conv2d, QuantNConv2d), (torch.nn.Linear, QuantNLinear)]
if (args.relu or args.equalize):
module_dict[0] = [(torch.nn.ReLU6, torch.nn.ReLU)]
(model, transformer) = switch_layers(model, transformer, data, module_dict, ignore_layer=[QuantMeasure], quant_op=True)
graph = transformer.log.getGraph()
bottoms = transformer.log.getBottoms()
if args.distill_range:
targ_layer = [QConv2d, QLinear]
else:
targ_layer = [QuantNConv2d, QuantNLinear]
set_layer_bits(graph, args.bits_weight, args.bits_activation, args.bits_bias, targ_layer)
model = merge_batchnorm(model, graph, bottoms, targ_layer)
if (args.equalize or args.distill_range):
res = create_relation(graph, bottoms, targ_layer, delete_single=False)
if args.equalize:
cross_layer_equalization(graph, res, targ_layer, visualize_state=False, converge_thres=2e-07, signed=True)
if args.clip_weight:
clip_weight(graph, range_clip=[(- 15), 15], targ_type=targ_layer)
if args.correction:
bias_correction(graph, bottoms, targ_layer, bits_weight=args.bits_weight, signed=True)
if args.distill_range:
set_update_stat(model, [QuantMeasure], True)
model = update_quant_range(model.cuda(), data_distill, graph, bottoms)
set_update_stat(model, [QuantMeasure], False)
else:
set_quant_minmax(graph, bottoms)
torch.cuda.empty_cache()
module_dict = {}
if args.distill_range:
module_dict[1] = [(QConv2d, torch.nn.Conv2d), (QLinear, torch.nn.Linear)]
else:
module_dict[1] = [(QuantNConv2d, torch.nn.Conv2d), (QuantNLinear, torch.nn.Linear)]
(model, transformer) = switch_layers(model, transformer, data, module_dict, ignore_layer=[QuantMeasure], quant_op=False)
graph = transformer.log.getGraph()
bottoms = transformer.log.getBottoms()
x = torch.rand(1, 3, 224, 224)
torch_out = torch.onnx._export(model, x, 'model.onnx', export_params=True)
os.system('python3 -m onnxsim model.onnx model-sim.onnx')
os.system('rm model.onnx')
cur_path = os.path.abspath(os.getcwd())
os.system('mv model-sim.onnx {}'.format(os.path.join(args.ncnn_build, 'tools/onnx', 'model-sim.onnx')))
os.chdir(os.path.join(args.ncnn_build, 'tools/onnx'))
os.system('./onnx2ncnn model-sim.onnx model.param model.bin')
lines = [line.strip() for line in open('model.param', 'r')]
with open('model.param', 'w') as ww:
for (idx, line) in enumerate(lines):
if ((idx == 2) and ('input' in line.lower())):
line += ' 0=224 1=224 2=3'
ww.write((line + '\n'))
if (not os.path.exists(os.path.join(cur_path, 'modeling/ncnn'))):
os.makedirs(os.path.join(cur_path, 'modeling/ncnn'))
os.system('rm model-sim.onnx')
if args.quantize:
os.system('mv model.param {}'.format(os.path.join(args.ncnn_build, 'tools/quantize', 'model.param')))
os.system('mv model.bin {}'.format(os.path.join(args.ncnn_build, 'tools/quantize', 'model.bin')))
os.chdir(os.path.join(args.ncnn_build, 'tools/quantize'))
os.system('./ncnn2table --param=model.param --bin=model.bin --images={} --output=model_int8_channel.table --mean={},{},{} --norm={},{},{} --size=224,224 --thread=2'.format(args.image_path, (0.485 * 255), (0.456 * 255), (0.406 * 255), (1 / (0.229 * 255)), (1 / (0.224 * 255)), (1 / (0.225 * 255))))
table_old = [line.strip() for line in open('model_int8_channel.table', 'r')]
table_new = []
count = 0
for ii in range(2):
for idx in graph:
if (type(graph[idx]) in [torch.nn.Conv2d, torch.nn.Linear]):
if (ii == 0):
mi = float(torch.min(graph[idx].weight))
ma = float(torch.max(graph[idx].weight))
else:
mi = float(torch.min(graph[idx].quant.running_min))
ma = float(torch.max(graph[idx].quant.running_max))
scale = (128.0 / max(abs(ma), abs(mi)))
if (ii == 0):
table_new.append(' '.join((table_old[count].split(' ')[0:1] + ([str(scale)] * graph[idx].weight.shape[0]))))
else:
table_new.append(' '.join((table_old[count].split(' ')[0:1] + [str(scale)])))
count += 1
with open('model_int8_tensor.table', 'w') as ww:
for line in table_new:
ww.write((line + '\n'))
os.system('./ncnn2int8 model.param model.bin model_int8.param model_int8.bin model_int8_tensor.table')
lines = [line.strip() for line in open('model_int8.param', 'r')]
os.system('cp model_int8.param {}'.format(os.path.join(cur_path, args.param)))
os.system('cp model_int8.bin {}'.format(os.path.join(cur_path, args.bin)))
os.system('cp model_int8_tensor.table {}'.format(os.path.join(cur_path, args.table)))
else:
os.system('mv model.param {}'.format(os.path.join(cur_path, args.param)))
os.system('mv model.bin {}'.format(os.path.join(cur_path, args.bin)))
os.chdir(cur_path)
line = ' '.join([l.strip() for l in open(args.param, 'r')][(- 1)].split()).split(' ')[1]
print(('=' * 100))
print("Target layer name '{}'".format(line))
print(('=' * 100)) |
class AlbertTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = AlbertTokenizer
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, remove_space=True, keep_accents=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='<unk>', sep_token='[SEP]', pad_token='<pad>', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token)
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, remove_space=remove_space, keep_accents=keep_accents, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, **kwargs)
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.can_save_slow_tokenizer = (False if (not self.vocab_file) else True)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((cls + token_ids_0) + sep)
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not self.can_save_slow_tokenizer):
raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
class HRModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN')):
super(HRModule, self).__init__()
self._check_branches(num_branches, num_blocks, in_channels, num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels, num_channels):
if (num_branches != len(num_blocks)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if (num_branches != len(num_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if (num_branches != len(in_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if ((stride != 1) or (self.in_channels[branch_index] != (num_channels[branch_index] * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.in_channels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (num_channels[branch_index] * block.expansion))[1])
layers = []
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
self.in_channels[branch_index] = (num_channels[branch_index] * block.expansion)
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if (self.num_branches == 1):
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = (num_branches if self.multiscale_output else 1)
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if (j > i):
fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample(scale_factor=(2 ** (j - i)), mode='nearest')))
elif (j == i):
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range((i - j)):
if (k == ((i - j) - 1)):
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1]))
else:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
if (self.num_branches == 1):
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if (i == j):
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse |
def create_model(bert_config, is_training, input_ids, input_mask, P_mask, A_mask, B_mask, segment_ids, labels, num_labels, use_one_hot_embeddings):
model = modeling.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings)
all_out = model.get_sequence_output()
hidden_size = all_out.shape[(- 1)].value
_P_mask = tf.cast(P_mask, tf.float32)
_A_mask = tf.cast(A_mask, tf.float32)
_B_mask = tf.cast(B_mask, tf.float32)
_P_mask_ = tf.broadcast_to(_P_mask, shape=(tf.shape(all_out)[2], tf.shape(all_out)[0], tf.shape(all_out)[1]))
P_mask_ = tf.transpose(_P_mask_, perm=[1, 2, 0])
_A_mask_ = tf.broadcast_to(_A_mask, shape=(tf.shape(all_out)[2], tf.shape(all_out)[0], tf.shape(all_out)[1]))
A_mask_ = tf.transpose(_A_mask_, perm=[1, 2, 0])
_B_mask_ = tf.broadcast_to(_B_mask, shape=(tf.shape(all_out)[2], tf.shape(all_out)[0], tf.shape(all_out)[1]))
B_mask_ = tf.transpose(_B_mask_, perm=[1, 2, 0])
P_ = tf.multiply(all_out, P_mask_)
P = tf.reduce_sum(P_, axis=1)
A_ = tf.multiply(all_out, A_mask_)
A = tf.reduce_sum(A_, axis=1)
B_ = tf.multiply(all_out, B_mask_)
B = tf.reduce_sum(B_, axis=1)
PA = tf.multiply(P, A)
PB = tf.multiply(P, B)
PP = tf.multiply(P, P)
AB = tf.multiply(A, B)
N = tf.subtract(PP, AB)
AB_weights = tf.get_variable('AB_weights', [1, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02))
N_weights = tf.get_variable('N_weights', [1, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02))
if is_training:
AB_weights = tf.nn.dropout(AB_weights, keep_prob=0.9)
N_weights = tf.nn.dropout(N_weights, keep_prob=0.9)
A_out = tf.matmul(PA, AB_weights, transpose_b=True)
B_out = tf.matmul(PB, AB_weights, transpose_b=True)
N_out = tf.matmul(N, N_weights, transpose_b=True)
output_bias = tf.get_variable('output_bias', [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope('loss'):
logits = tf.concat([A_out, B_out, N_out], axis=1)
logits = tf.nn.bias_add(logits, output_bias)
probabilities = tf.nn.softmax(logits, axis=(- 1))
log_probs = tf.nn.log_softmax(logits, axis=(- 1))
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = (- tf.reduce_sum((one_hot_labels * log_probs), axis=(- 1)))
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, logits, probabilities) |
def checkpoint(step, epoch):
model_out_path = 'models/{}/GFN_epoch_{}.pkl'.format(step, epoch)
torch.save(model, model_out_path)
print('===>Checkpoint saved to {}'.format(model_out_path)) |
class ounoise():
def __init__(self, std, action_dim, mean=0, theta=0.15, dt=0.01, x0=None):
self.std = std
self.mean = mean
self.action_dim = action_dim
self.theta = theta
self.dt = dt
self.x0 = x0
def reset(self):
self.x_prev = (self.x0 if (self.x0 is not None) else np.zeros(self.action_dim))
def noise(self):
x = ((self.x_prev + ((self.theta * (self.mean - self.x_prev)) * self.dt)) + ((self.std * np.sqrt(self.dt)) * np.random.normal(size=self.action_dim)))
self.x_prev = x
return x |
def get_games_from_file(filename):
pgn = open(filename, errors='ignore')
offsets = []
while True:
offset = pgn.tell()
headers = chess.pgn.read_headers(pgn)
if (headers is None):
break
offsets.append(offset)
n = len(offsets)
print(f'found {n} games')
games = []
for offset in offsets:
pgn.seek(offset)
games.append(chess.pgn.read_game(pgn))
return games |
class ThreeCarsHighSpeedCollision(Scenario):
def init_scene(self, prefix, settings=None, spectator_tr=None):
super().init_scene(prefix, settings, spectator_tr)
blueprint_library = self.world.get_blueprint_library()
vehicle00_tr = carla.Transform(carla.Location(110, (- 255), 0.05), carla.Rotation(yaw=180))
vehicle00 = self.world.spawn_actor(blueprint_library.filter('prius')[0], vehicle00_tr)
vehicle01_tr = carla.Transform(carla.Location(53, (- 257), 0.0), carla.Rotation(yaw=0))
vehicle01 = self.world.spawn_actor(blueprint_library.filter('a2')[0], vehicle01_tr)
vehicle02_tr = carla.Transform(carla.Location(85, (- 230), 0.04), carla.Rotation(yaw=(- 90)))
vehicle02 = self.world.spawn_actor(blueprint_library.filter('lincoln')[0], vehicle02_tr)
self.wait(1)
vehicle00.set_target_velocity(carla.Vector3D((- 30), 0, 0))
vehicle01.set_target_velocity(carla.Vector3D((+ 30), 0, 0))
vehicle02.set_target_velocity(carla.Vector3D(0, (- 30), 0))
self.add_actor(vehicle00, 'Car')
self.add_actor(vehicle01, 'Car')
self.add_actor(vehicle02, 'Car')
self.wait(1) |
def main():
parser = ArgumentParser()
parser.add_argument('pcd', help='Point cloud file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--device', default='cuda:0', help='Device used for inference')
parser.add_argument('--score-thr', type=float, default=0.0, help='bbox score threshold')
parser.add_argument('--out-dir', type=str, default='demo', help='dir to save results')
parser.add_argument('--show', action='store_true', help='show online visuliaztion results')
parser.add_argument('--snapshot', action='store_true', help='whether to save online visuliaztion results')
args = parser.parse_args()
model = init_model(args.config, args.checkpoint, device=args.device)
(result, data) = inference_detector(model, args.pcd)
show_result_meshlab(data, result, args.out_dir, args.score_thr, show=args.show, snapshot=args.snapshot, task='det') |
class RNNLearnerState(NamedTuple):
params: Params
opt_states: OptStates
key: chex.PRNGKey
env_state: LogEnvState
timestep: TimeStep
dones: Done
hstates: HiddenStates |
def _ent_in_context_at_k(guess_item, gold_item, k):
titles = eval_downstream.get_gold_titles(gold_item)
if ('provenance' in guess_item['output'][0]):
provenance = guess_item['output'][0]['provenance']
for i in range(0, min(k, len(provenance))):
if ('text' in provenance[i]):
normalized_text = eval_downstream.normalize_answer(provenance[i]['text'])
for t in titles:
if (eval_downstream.normalize_answer(t) in normalized_text):
return 1
return 0 |
class AVATAR_PT_MotionPanel(bpy.types.Panel):
bl_idname = 'AVATAR_PT_MotionPanel'
bl_label = 'Motion'
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Avatar'
bpy.types.Object.bvh_offset = IntProperty(name='Offset', description='Start motion offset', default=0, min=0, max=250)
bpy.types.Object.bvh_start_origin = BoolProperty(name='Origin', description='Start at origin', default=False)
def draw(self, context):
layout = self.layout
obj = context.object
wm = context.window_manager
layout.operator('avt.set_rest_pose', text='Reset pose')
layout.prop(context.scene, 'skel_rig', text='')
layout.operator('avt.load_bvh', text='Load BVH') |
class StatefulContainer(object):
def __init__(self):
self._state = dict()
self._factories = dict()
def add_factory(self, name, factory: Callable[([], Any)]):
self._factories[name] = factory
def merge_state_dict(self, state_dict: Dict[(str, Any)]):
self._state.update(state_dict)
def state_dict(self) -> Dict[(str, Any)]:
return self._state
def __getattr__(self, name):
if ((name not in self._state) and (name in self._factories)):
self._state[name] = self._factories[name]()
if (name in self._state):
return self._state[name]
raise AttributeError(f'Task state has no factory for attribute {name}') |
class MultiLoop(object):
no_resolve_ = (str, set)
class _multi_loop_container(object):
def __init__(self, item, level=0):
self.item = item
self.level = level
def multi_loop(self, method, *args, **kwargs):
kwargs = kwargs.copy()
descend = kwargs.pop('loop_descend', 1)
no_resolve = kwargs.pop('no_resolve', self.no_resolve_)
no_resolve += kwargs.pop('no_resolve_add', tuple())
kwargs_new = kwargs.copy()
args_new = list(args)
if (descend <= 0):
return [method(*args_new, **kwargs_new)]
kwargs_new['loop_descend'] = descend
kwargs_new['no_resolve'] = no_resolve
result = []
for (iv, v) in enumerate(args):
if isinstance(v, no_resolve):
continue
level = 1
if isinstance(v, self._multi_loop_container):
if (v.level == descend):
continue
level = (v.level + 1)
v = v.item
if isinstance(v, dict):
if (len(v) <= 1):
continue
for (k, i) in v.items():
args_new[iv] = {k: i}
result += self.multi_loop(method, *args_new, **kwargs_new)
return result
if isinstance(v, Iterable):
for i in v:
args_new[iv] = self._multi_loop_container(i, level)
result += self.multi_loop(method, *args_new, **kwargs_new)
return result
for (kw, v) in kwargs.items():
if isinstance(v, no_resolve):
continue
level = 1
if isinstance(v, self._multi_loop_container):
if (v.level == descend):
continue
level = (v.level + 1)
v = v.item
if isinstance(v, dict):
if (len(v) <= 1):
continue
for (k, i) in v.items():
kwargs_new[kw] = {k: i}
result += self.multi_loop(method, *args_new, **kwargs_new)
return result
if isinstance(v, Iterable):
for i in v:
kwargs_new[kw] = self._multi_loop_container(i, level)
result += self.multi_loop(method, *args_new, **kwargs_new)
return result
for (iv, v) in enumerate(args_new):
if isinstance(v, self._multi_loop_container):
args_new[iv] = v.item
for (kw, v) in kwargs_new.items():
if isinstance(v, self._multi_loop_container):
kwargs_new[kw] = v.item
return [method(*args_new, **kwargs_new)]
def clean(kwargs, extra=None):
kw = kwargs.copy()
if (extra is not None):
if isinstance(extra, str):
extra = (extra,)
else:
extra = tuple()
extra += ('loop_descend', 'no_resolve', 'no_resolve_add')
for x in extra:
kw.pop(x, None)
return kw |
class LSTMState(object):
def __init__(self, states):
self.states = states
def from_pytorch(cls, states):
(hs, cs) = states
(_, bs, d) = hs.shape
hs = hs.view((- 1), 2, bs, d)
cs = cs.view((- 1), 2, bs, d)
nl = hs.shape[0]
states = [(h.sum(dim=0), c.sum(dim=0)) for (h, c) in zip(hs.unbind(dim=0), cs.unbind(dim=0))]
return LSTMState(states)
def stack(cls, iterator_states, dim):
nl = len(iterator_states[0])
hs = [list() for _ in range(nl)]
cs = [list() for _ in range(nl)]
for states in iterator_states:
for (i, state) in enumerate(states.states):
(h, c) = state
hs[i].append(h)
cs[i].append(c)
states = list()
for i in range(nl):
h = torch.stack(hs[i], dim)
c = torch.stack(cs[i], dim)
states.append((h, c))
return LSTMState(states)
def zero_state(cls, num_layers, shape):
states = list()
for _ in range(num_layers):
h = get_zeros(*shape)
c = get_zeros(*shape)
states.append((h, c))
return LSTMState(states)
def shape(self):
return self.states[0][0].shape
def clone(self):
new_states = [(s[0].clone(), s[1].clone()) for s in self.states]
return LSTMState(new_states)
def dim(self):
return self.states[0][0].dim()
def unsqueeze(self, dim):
new_states = [(s[0].unsqueeze(dim), s[1].unsqueeze(dim)) for s in self.states]
return LSTMState(new_states)
def view(self, *sizes):
new_states = [(s[0].view(*sizes), s[1].view(*sizes)) for s in self.states]
return LSTMState(new_states)
def unbind(self, dim):
n = self.states[0][0].shape[dim]
ret = [list() for _ in range(n)]
for s in self.states:
(h, c) = s
hs = h.unbind(dim)
cs = c.unbind(dim)
for (i, (h, c)) in enumerate(zip(hs, cs)):
ret[i].append((h, c))
ret = tuple((LSTMState(s) for s in ret))
return ret
def expand(self, *sizes):
new_states = [(s[0].expand(*sizes), s[1].expand(*sizes)) for s in self.states]
return LSTMState(new_states)
def contiguous(self):
states = list()
for s in self.states:
h = s[0].contiguous()
c = s[1].contiguous()
states.append((h, c))
return LSTMState(states)
def __len__(self):
return len(self.states)
def detach_(self):
for s in self.states:
s[0].detach_()
s[1].detach_()
def size(self):
return self.states[0][0].size()
def cat(self, other, dim):
states = list()
for (s1, s2) in zip(self.states, other.states):
h = torch.cat([s1[0], s2[0]], dim)
c = torch.cat([s1[1], s2[1]], dim)
states.append((h, c))
return LSTMState(states)
def __mul__(self, other):
states = list()
for s in self.states:
h = (s[0] * other)
c = (s[1] * other)
states.append((h, c))
return LSTMState(states)
def __add__(self, other):
states = list()
for (s1, s2) in zip(self.states, other.states):
h = (s1[0] + s2[0])
c = (s1[1] + s2[1])
states.append((h, c))
return LSTMState(states)
def get_output(self):
return self.states[(- 1)][0]
def get(self, ind):
return self.states[ind]
def __getitem__(self, key):
states = list()
for s in self.states:
h = s[0][key]
c = s[1][key]
states.append((h, c))
return LSTMState(states)
def __setitem__(self, key, item):
for (s1, s2) in zip(self.states, item.states):
s1[0][key] = s2[0]
s1[1][key] = s2[1] |
class GatherOperation(Function):
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
assert features.is_contiguous()
assert idx.is_contiguous()
(B, npoint) = idx.size()
(_, C, N) = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
def backward(ctx, grad_out):
(idx, C, N) = ctx.for_backwards
(B, npoint) = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return (grad_features, None) |
class SegformerDropPath(nn.Module):
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob) |
def _do_apply_self_mask(m):
if (not g_options.use_self_mask):
return m
self_mask = _get_self_mask(m)
return ((m * (1 - self_mask)) + ((- 10) * self_mask)) |
def test_summarize_weighted(model, X, w):
d1 = model.distributions[0]
d2 = model.distributions[1]
model.summarize(X, sample_weight=w)
assert_array_almost_equal(model._xw_sum, [0., 4.173105, 4.912965, 4.113657], 4)
assert_array_almost_equal(model._xw_starts_sum, [0.136405, 3.163595], 4)
assert_array_almost_equal(model._xw_ends_sum, [0.876271, 2.423729], 4)
assert_array_almost_equal(d1._w_sum, [5.049643, 5.049643, 5.049643], 4)
assert_array_almost_equal(d1._xw_sum, [8.834015, 5.17916, 0.], 4)
assert_array_almost_equal(d2._w_sum, [11.450351, 11.450351, 11.450351], 4)
assert_array_almost_equal(d2._xw_sum, [18.86598, 12.320832, 21.093086], 4) |
def data_preparation(args):
dataset_path = path.join('data', (('data_3d_' + args.dataset) + '.npz'))
if (args.dataset == 'h36m'):
from common.h36m_dataset import Human36mDataset, TEST_SUBJECTS
dataset = Human36mDataset(dataset_path)
if args.s1only:
subjects_train = ['S1']
else:
subjects_train = ['S1', 'S5', 'S6', 'S7', 'S8']
subjects_test = TEST_SUBJECTS
else:
raise KeyError('Invalid dataset')
print('==> Loading 3D data...')
dataset = read_3d_data(dataset)
print('==> Loading 2D detections...')
keypoints = create_2d_data(path.join('data', (((('data_2d_' + args.dataset) + '_') + args.keypoints) + '.npz')), dataset)
action_filter = (None if (args.actions == '*') else args.actions.split(','))
if (action_filter is not None):
action_filter = map((lambda x: dataset.define_actions(x)[0]), action_filter)
print('==> Selected actions: {}'.format(action_filter))
stride = args.downsample
(poses_train, poses_train_2d, actions_train, cams_train) = fetch(subjects_train, dataset, keypoints, action_filter, stride)
(poses_valid, poses_valid_2d, actions_valid, cams_valid) = fetch(subjects_test, dataset, keypoints, action_filter, stride)
train_det2d3d_loader = DataLoader(PoseDataSet(poses_train, poses_train_2d, actions_train, cams_train), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
train_gt2d3d_loader = DataLoader(PoseDataSet(poses_train, poses_train_2d, actions_train, cams_train), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
valid_loader = DataLoader(PoseDataSet(poses_valid, poses_valid_2d, actions_valid, cams_valid), batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
target_2d_loader = DataLoader(PoseTarget(poses_train_2d), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
target_3d_loader = DataLoader(PoseTarget(poses_train), batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True)
mpi3d_npz = np.load('data_extra/test_set/test_3dhp.npz')
tmp = mpi3d_npz
mpi3d_loader = DataLoader(PoseBuffer([tmp['pose3d']], [tmp['pose2d']]), batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, pin_memory=True)
return {'dataset': dataset, 'train_det2d3d_loader': train_det2d3d_loader, 'train_gt2d3d_loader': train_gt2d3d_loader, 'target_2d_loader': target_2d_loader, 'target_3d_loader': target_3d_loader, 'H36M_test': valid_loader, 'mpi3d_loader': mpi3d_loader, 'action_filter': action_filter, 'subjects_test': subjects_test, 'keypoints': keypoints} |
class PointNet2FPModule(nn.Module):
def __init__(self, *, mlp: List[int], bn: bool=True, use_paconv=False, args=None):
super().__init__()
self.use_paconv = use_paconv
if self.use_paconv:
self.mlp = paconv.SharedPAConv(mlp, bn=bn, config=args)
else:
self.mlp = block.SharedMLP(mlp, bn=bn)
def forward(self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor) -> torch.Tensor:
if (known is not None):
(dist, idx) = pointops.nearestneighbor(unknown, known)
dist_recip = (1.0 / (dist + 1e-08))
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = (dist_recip / norm)
interpolated_feats = pointops.interpolation(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1))
if (unknow_feats is not None):
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1)
else:
new_features = interpolated_feats
return self.mlp(new_features.unsqueeze((- 1))).squeeze((- 1)) |
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 1
self.depth = 0.33
self.width = 0.5
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split('.')[0]
self.train_ann = 'train.json'
self.val_ann = 'train.json'
self.input_size = (608, 1088)
self.test_size = (608, 1088)
self.random_size = (12, 26)
self.max_epoch = 80
self.print_interval = 20
self.eval_interval = 5
self.test_conf = 0.001
self.nmsthre = 0.7
self.no_aug_epochs = 10
self.basic_lr_per_img = (0.001 / 64.0)
self.warmup_epochs = 1
def get_data_loader(self, batch_size, is_distributed, no_aug=False):
from yolox.data import MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection
dataset = MOTDataset(data_dir=os.path.join(get_yolox_datadir(), 'mix_det'), json_file=self.train_ann, name='', img_size=self.input_size, preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500))
dataset = MosaicDetection(dataset, mosaic=(not no_aug), img_size=self.input_size, preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup)
self.dataset = dataset
if is_distributed:
batch_size = (batch_size // dist.get_world_size())
sampler = InfiniteSampler(len(self.dataset), seed=(self.seed if self.seed else 0))
batch_sampler = YoloBatchSampler(sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=(not no_aug))
dataloader_kwargs = {'num_workers': self.data_num_workers, 'pin_memory': True}
dataloader_kwargs['batch_sampler'] = batch_sampler
train_loader = DataLoader(self.dataset, **dataloader_kwargs)
return train_loader
def get_eval_loader(self, batch_size, is_distributed, testdev=False):
from yolox.data import MOTDataset, ValTransform
valdataset = MOTDataset(data_dir=os.path.join(get_yolox_datadir(), 'mot'), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=ValTransform(rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)))
if is_distributed:
batch_size = (batch_size // dist.get_world_size())
sampler = torch.utils.data.distributed.DistributedSampler(valdataset, shuffle=False)
else:
sampler = torch.utils.data.SequentialSampler(valdataset)
dataloader_kwargs = {'num_workers': self.data_num_workers, 'pin_memory': True, 'sampler': sampler}
dataloader_kwargs['batch_size'] = batch_size
val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
return val_loader
def get_evaluator(self, batch_size, is_distributed, testdev=False):
from yolox.evaluators import COCOEvaluator
val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
evaluator = COCOEvaluator(dataloader=val_loader, img_size=self.test_size, confthre=self.test_conf, nmsthre=self.nmsthre, num_classes=self.num_classes, testdev=testdev)
return evaluator |
def load_file_list(path=None, regx='\\.npz', printable=True):
if (path == False):
path = os.getcwd()
file_list = os.listdir(path)
return_list = []
for (idx, f) in enumerate(file_list):
if re.search(regx, f):
return_list.append(f)
if printable:
print(('Match file list = %s' % return_list))
print(('Number of files = %d' % len(return_list)))
return return_list |
class ElementWiseUnaryOp(UnaryOpBase):
def __init__(self):
super().__init__()
self.inp_ranks = [rank_all()]
self.out_ranks = [rank_all()]
def type_transfer(self, input_shapes: List[AbsTensor]) -> List[AbsTensor]:
SanityCheck.eq(len(input_shapes), 1)
return [input_shapes[0]]
def deduct_inp_ranks_and_dtype(self, out_abs_tensor: List[AbsTensor]) -> List[Tuple[(int, DType)]]:
return [(out_abs_tensor[0].ndims, out_abs_tensor[0].dtype)] |
class CoarseAlign():
def __init__(self, nbScale, nbIter, tolerance, transform, minSize, segId, segFg, scaleR=2, imageNet=True, segNet=True):
self.nbIter = nbIter
self.tolerance = tolerance
resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']
if imageNet:
resNetfeat = models.resnet50(pretrained=True)
else:
resNetfeat = resnet50()
featPth = '../../model/pretrained/resnet50_moco.pth'
param = torch.load(featPth)
state_dict = {k.replace('module.', ''): v for (k, v) in param['model'].items()}
msg = 'Loading pretrained model from {}'.format(featPth)
print(msg)
resNetfeat.load_state_dict(state_dict)
resnet_module_list = [getattr(resNetfeat, l) for l in resnet_feature_layers]
last_layer_idx = resnet_feature_layers.index('layer3')
self.net = torch.nn.Sequential(*resnet_module_list[:(last_layer_idx + 1)])
self.net.cuda()
self.net.eval()
if segNet:
self.segNet = segEval.SegNet('../../model/pretrained/ade20k_resnet50dilated_encoder.pth', '../../model/pretrained/ade20k_resnet50dilated_decoder.pth', segId, segFg)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.toTensor = transforms.ToTensor()
self.preproc = transforms.Compose([transforms.ToTensor(), normalize])
if (transform == 'Affine'):
self.Transform = outil.Affine
self.nbPoint = 3
else:
self.Transform = outil.Homography
self.nbPoint = 4
self.strideNet = 16
self.minSize = minSize
if (nbScale == 1):
self.scaleList = [1]
else:
self.scaleList = (np.linspace(scaleR, 1, ((nbScale // 2) + 1)).tolist() + np.linspace(1, (1 / scaleR), ((nbScale // 2) + 1)).tolist()[1:])
print(self.scaleList)
torch.cuda.empty_cache()
def ResizeMinSize(self, I, minSize):
(w, h) = I.size
ratio = min((w / float(minSize)), (h / float(minSize)))
(new_w, new_h) = (int(round((w / ratio))), int(round((h / ratio))))
(new_w, new_h) = (((new_w // self.strideNet) * self.strideNet), ((new_h // self.strideNet) * self.strideNet))
(ratioW, ratioH) = ((new_w / float(w)), (new_h / float(h)))
Iresize = I.resize((new_w, new_h), resample=Image.LANCZOS)
return Iresize
def setPair(self, Is_org, It_org):
with torch.no_grad():
IsList = []
for i in range(len(self.scaleList)):
IsList.append(self.ResizeMinSize(Is_org, int((self.minSize * self.scaleList[i]))))
self.Is = IsList[(len(self.scaleList) // 2)]
self.IsTensor = self.toTensor(self.Is).unsqueeze(0).cuda()
featsMultiScale = []
WMultiScale = []
HMultiScale = []
for i in range(len(self.scaleList)):
feat = F.normalize(self.net(self.preproc(IsList[i]).unsqueeze(0).cuda()))
(Ws, Hs) = outil.getWHTensor(feat)
featsMultiScale.append(feat.contiguous().view(1024, (- 1)))
WMultiScale.append(Ws)
HMultiScale.append(Hs)
torch.cuda.empty_cache()
featsMultiScale = torch.cat(featsMultiScale, dim=1)
WMultiScale = torch.cat(WMultiScale)
HMultiScale = torch.cat(HMultiScale)
torch.cuda.empty_cache()
self.It = self.ResizeMinSize(It_org, self.minSize)
self.ItTensor = self.toTensor(self.It).unsqueeze(0).cuda()
featt = F.normalize(self.net(self.preproc(self.It).unsqueeze(0).cuda()))
(Wt, Ht) = outil.getWHTensor(featt)
(WtInt, HtInt) = outil.getWHTensor_Int(featt)
(self.W2, self.H2) = (featt.size()[2], featt.size()[3])
featt = featt.contiguous().view(1024, (- 1))
(index1, index2) = outil.mutualMatching(featsMultiScale, featt)
self.W1MutualMatch = WMultiScale[index1]
self.H1MutualMatch = HMultiScale[index1]
self.W2MutualMatch = Wt[index2]
self.H2MutualMatch = Ht[index2]
self.W2MutualMatchInt = WtInt[index2]
self.H2MutualMatchInt = HtInt[index2]
def skyFromSeg(self, path):
return self.segNet.getSky(path)
def getCoarse(self, Mt):
with torch.no_grad():
MtExtend = (1 - Mt).astype(np.float32)
MtExtend = torch.from_numpy(MtExtend).cuda().unsqueeze(0).unsqueeze(0)
MtTensor = F.interpolate(input=MtExtend, size=(self.W2, self.H2), mode='bilinear')
MtTensor = (MtTensor > 0.5).squeeze()
validMutualMatch = MtTensor[(self.W2MutualMatchInt, self.H2MutualMatchInt)]
ones = torch.cuda.FloatTensor(self.W1MutualMatch[validMutualMatch].size(0)).fill_(1)
match1 = torch.cat((self.H1MutualMatch[validMutualMatch].unsqueeze(1), self.W1MutualMatch[validMutualMatch].unsqueeze(1), ones.unsqueeze(1)), dim=1)
match2 = torch.cat((self.H2MutualMatch[validMutualMatch].unsqueeze(1), self.W2MutualMatch[validMutualMatch].unsqueeze(1), ones.unsqueeze(1)), dim=1)
if (len(match1) < self.nbPoint):
return None
(bestParam, _, indexInlier, _) = outil.RANSAC(self.nbIter, match1, match2, self.tolerance, self.nbPoint, self.Transform)
if (bestParam is None):
return None
else:
return bestParam.astype(np.float32) |
def visual_image_MT(vis, mask_train, pred_train1, mask_val, pred_val1, pred_val2):
vis.heatmap(mask_train, win='train_mask', opts=dict(title='Train Mask', colormap='Viridis'))
vis.heatmap(pred_train1, win='train_pred1', opts=dict(title='Train Pred', colormap='Viridis'))
vis.heatmap(mask_val, win='val_mask', opts=dict(title='Val Mask', colormap='Viridis'))
vis.heatmap(pred_val1, win='val_pred1', opts=dict(title='Val Pred1', colormap='Viridis'))
vis.heatmap(pred_val2, win='val_pred2', opts=dict(title='Val Pred2', colormap='Viridis')) |
def evaluate(y_true, y_pred_proba, debug=False):
max_threshold = (- 1)
max_f1 = 0
max_recall = 0
max_precision = 0
mac_acc = 0
for THRESHOLD in range(50, 51):
THRESHOLD = (THRESHOLD / 100)
y_pred_thr = [(1 if (x >= THRESHOLD) else 0) for x in y_pred_proba]
f1 = f1_score(y_true, y_pred_thr, average='macro')
recall = recall_score(y_true, y_pred_thr)
precision = precision_score(y_true, y_pred_thr)
acc = accuracy_score(y_true, y_pred_thr)
if debug:
print('THRESHOLD: {:.3f} \tF1: {:.8f} \tRecall: {:.8f} \tPrecision: {:.8f}'.format(THRESHOLD, f1, recall, precision))
if (f1 > max_f1):
max_f1 = f1
max_recall = recall
max_precision = precision
mac_acc = acc
max_threshold = THRESHOLD
print('##MAX## \nTHRESHOLD: {:.3f} \tF1: {:.8f} \tRecall: {:.8f} \tPrec: {:.8f} \tAcc: {:.8f}'.format(max_threshold, max_f1, max_recall, max_precision, mac_acc))
return (max_f1, max_recall, max_precision, mac_acc) |
class ResUnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(ResUnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
use_bias = (norm_layer == nn.InstanceNorm2d)
if (input_nc is None):
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=3, stride=2, padding=1, bias=use_bias)
res_downconv = [ResidualBlock(inner_nc, norm_layer), ResidualBlock(inner_nc, norm_layer)]
res_upconv = [ResidualBlock(outer_nc, norm_layer), ResidualBlock(outer_nc, norm_layer)]
downrelu = nn.ReLU(True)
uprelu = nn.ReLU(True)
if (norm_layer != None):
downnorm = norm_layer(inner_nc)
upnorm = norm_layer(outer_nc)
if outermost:
upsample = nn.Upsample(scale_factor=2, mode='nearest')
upconv = nn.Conv2d((inner_nc * 2), outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
down = ([downconv, downrelu] + res_downconv)
up = [upsample, upconv]
model = ((down + [submodule]) + up)
elif innermost:
upsample = nn.Upsample(scale_factor=2, mode='nearest')
upconv = nn.Conv2d(inner_nc, outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
down = ([downconv, downrelu] + res_downconv)
if (norm_layer == None):
up = ([upsample, upconv, uprelu] + res_upconv)
else:
up = ([upsample, upconv, upnorm, uprelu] + res_upconv)
model = (down + up)
else:
upsample = nn.Upsample(scale_factor=2, mode='nearest')
upconv = nn.Conv2d((inner_nc * 2), outer_nc, kernel_size=3, stride=1, padding=1, bias=use_bias)
if (norm_layer == None):
down = ([downconv, downrelu] + res_downconv)
up = ([upsample, upconv, uprelu] + res_upconv)
else:
down = ([downconv, downnorm, downrelu] + res_downconv)
up = ([upsample, upconv, upnorm, uprelu] + res_upconv)
if use_dropout:
model = (((down + [submodule]) + up) + [nn.Dropout(0.5)])
else:
model = ((down + [submodule]) + up)
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1) |
def match_api(A: str, B: str, num=50, equal_type=1, fuzz=True, neq_dir='output/neq', fail_dir='output/fail', success_dir='output/success', err_dir='output/err', bug_dir='output/bug'):
def verify_wrapper(api_A, api_B, indices, neq_dir, fail_dir, success_dir, err_dir, bug_dir, index=None, value=None):
for _ in range(10):
res = verify(api_A, api_B, num, equal_type, fuzz, indices, neq_dir, fail_dir, success_dir, err_dir, bug_dir, strict_mode=True)
if (ResultType.NOT_EQUIVALENT in res):
return False
elif (ResultType.SUCCESS in res):
return True
return False
os.makedirs(neq_dir, exist_ok=True)
os.makedirs(fail_dir, exist_ok=True)
os.makedirs(success_dir, exist_ok=True)
os.makedirs(err_dir, exist_ok=True)
os.makedirs(bug_dir, exist_ok=True)
api_A = TorchAPI(A)
api_B = TorchAPI(B)
results = []
if ((len(api_A.arg_defs) == 0) or (len(api_B.arg_defs) == 0) or (api_A.is_class != api_B.is_class)):
return (None, [])
indices = match_argument(api_A.arg_defs, api_B.arg_defs)
source_matched_indices = [p[0] for p in indices]
if (len(source_matched_indices) < len(api_A.arg_defs)):
for i in range(len(api_A.arg_defs)):
if (i not in source_matched_indices):
if api_A.arg_defs[i].is_optional:
api_A.arg_defs[i].ignore = True
else:
return (None, [])
target_matched_indices = [p[1] for p in indices]
if (len(target_matched_indices) < len(api_B.arg_defs)):
for i in range(len(api_B.arg_defs)):
if ((i not in target_matched_indices) and (not api_B.arg_defs[i].is_optional)):
return (None, [])
if fuzz:
results = verify(api_A, api_B, num, equal_type, fuzz, indices, neq_dir, fail_dir, success_dir, err_dir, bug_dir)
else:
results = verify_wrapper(api_A, api_B, indices, neq_dir, fail_dir, success_dir, err_dir, bug_dir)
return (results, indices) |
def _extract_archive(file_path, path='.', archive_format='auto'):
if (archive_format is None):
return False
if (archive_format == 'auto'):
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if (archive_type == 'tar'):
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
elif (archive_type == 'zip'):
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
else:
raise Exception('Invalid archive type.')
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False |
class Blip2Model(nn.Module):
def __init__(self, args: Namespace):
super(Blip2Model, self).__init__()
self.args = args
(self.model, self.vis_processors, _) = load_model_and_preprocess(name='blip2_t5', model_type='pretrain_flant5xxl', is_eval=True, device=args.device)
def forward(self, q: str, image_input) -> list:
question = f'Question: {q} Answer:'
images = [self.vis_processors['eval'](im) for im in image_input]
images = torch.stack(images).to(self.args.device)
answer = self.model.generate({'image': images, 'prompt': ([question] * len(images))})
return [a.lower() for a in answer] |
class ContinuousMLPPolicy(Policy):
def __init__(self, env_spec, name='ContinuousMLPPolicy', hidden_sizes=(64, 64), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=tf.nn.tanh, output_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), output_b_init=tf.zeros_initializer(), layer_normalization=False):
super().__init__(name, env_spec)
action_dim = env_spec.action_space.flat_dim
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._layer_normalization = layer_normalization
self._obs_dim = env_spec.observation_space.flat_dim
self.model = MLPModel(output_dim=action_dim, name='MLPModel', hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization)
self._initialize()
def _initialize(self):
state_input = tf.compat.v1.placeholder(tf.float32, shape=(None, self._obs_dim))
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
outputs = self.model.build(state_input).outputs
self._f_prob = tf.compat.v1.get_default_session().make_callable(outputs, feed_list=[state_input])
def input_dim(self):
return self._obs_dim
def get_action_sym(self, obs_var, name=None):
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(obs_var, name=name).outputs
def get_action(self, observation):
(actions, agent_infos) = self.get_actions([observation])
action = actions[0]
return (action, {k: v[0] for (k, v) in agent_infos.items()})
def get_actions(self, observations):
observations = self.observation_space.flatten_n(observations)
actions = self._f_prob(observations)
actions = self.action_space.unflatten_n(actions)
return (actions, dict())
def get_regularizable_vars(self):
trainable = self.get_trainable_vars()
return [var for var in trainable if (('hidden' in var.name) and ('kernel' in var.name))]
def vectorized(self):
return True
def clone(self, name):
new_policy = self.__class__(name=name, env_spec=self._env_spec, hidden_sizes=self._hidden_sizes, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization)
new_policy.model.parameters = self.model.parameters
return new_policy
def __getstate__(self):
new_dict = super().__getstate__()
del new_dict['_f_prob']
return new_dict
def __setstate__(self, state):
super().__setstate__(state)
self._initialize() |
class DatasetLossGame(StochasticCooperativeGame):
def __init__(self, extension, data, labels, loss, groups=None):
self.extension = extension
self.loss = loss
self.N = len(data)
assert (len(labels) == self.N)
self.exogenous = (data, labels)
num_features = data.shape[1]
if (groups is None):
self.players = num_features
self.groups_matrix = None
else:
inds_list = []
for group in groups:
inds_list += list(group)
assert np.all((np.sort(inds_list) == np.arange(num_features)))
self.players = len(groups)
self.groups_matrix = np.zeros((len(groups), num_features), dtype=bool)
for (i, group) in enumerate(groups):
self.groups_matrix[(i, group)] = True
def __call__(self, S, U):
if (U is None):
U = self.sample(len(S))
(x, y) = U
if (self.loss is utils.crossentropyloss):
if ((y.ndim == 1) or (y.shape[1] == 1)):
if np.issubdtype(y.dtype, np.floating):
y = y.astype(int)
if (self.groups_matrix is not None):
S = np.matmul(S, self.groups_matrix)
return (- self.loss(self.extension(x, S), y)) |
def mixres50_w234a234(**kwargs):
return ResNet(Bottleneck, qm.MixActivConv2d, [3, 4, 6, 3], wbits=[2, 3, 4], abits=[2, 3, 4], share_weight=True, **kwargs) |
def _get_local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip |
class GradientReceiver(Receiver):
def receive_notify(self, solver: 'Solver', message):
if (not (Signal.TRAIN_PIPE_END in message)):
return
for netnode in solver.netnodes:
if (not netnode.require_no_grad):
model = netnode.net
total_norm = 0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += (param_norm.item() ** 2)
total_norm = (total_norm ** (1.0 / 2))
assert isinstance(solver.receivers[0], SummaryWriter)
solver.summary_receiver.add_scalar('gradient/total_norm', total_norm, solver.global_step) |
def _test_update(inertia):
x1 = torch.tensor([1.0, 1.4, 1.8, (- 1.1), 0.0])
x2 = torch.tensor([2.2, 8.2, 0.1, 105.2, 0.0])
y = ((x1 * inertia) + (x2 * (1 - inertia)))
_update_parameter(x1, x2, inertia=inertia)
assert_array_almost_equal(x1, y) |
def train(args, train_dataset, model, tokenizer, train_highway=False):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
if train_highway:
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (('highway' in n) and (not any(((nd in n) for nd in no_decay))))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if (('highway' in n) and any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.0}]
else:
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (('highway' not in n) and (not any(((nd in n) for nd in no_decay))))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if (('highway' not in n) and any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet']) else None)
inputs['train_highway'] = train_highway
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
def custom_loss(y_pred, y_true, model_name):
if ('stochastic' in model_name):
return KL_loss(y_pred, ((Coeff_Energy * y_true[0]) + (Coeff_Latency * y_true[1])))
return torch.sum(((y_pred - y_true) ** 2)) |
class Statement(SliceableMixin):
_TEXT_REPR_MAX_LENGTH: int = 70
_stmts_by_ts: Dict[(Timestamp, List['Statement'])] = {}
_stmts_by_id: Dict[(IdType, List['Statement'])] = {}
def __init__(self, stmt_node: ast.stmt, frame: Optional[FrameType]=None, timestamp: Optional[Timestamp]=None, prev_stmt: Optional['Statement']=None, override: bool=False) -> None:
self.stmt_node: ast.stmt = stmt_node
self.frame: Optional[FrameType] = frame
self._timestamp = (timestamp or Timestamp.current())
self._finished: bool = False
self.override: bool = override
self.prev_stmt = prev_stmt
self.class_scope: Optional[Namespace] = None
self.lambda_call_point_deps_done_once = False
self.node_id_for_last_call: Optional[int] = None
self._stmt_contains_cascading_reactive_rval: Optional[bool] = None
self.dynamic_parents: Dict[(IdType, Set[Symbol])] = {}
self.dynamic_children: Dict[(IdType, Set[Symbol])] = {}
self.static_parents: Dict[(IdType, Set[Symbol])] = {}
self.static_children: Dict[(IdType, Set[Symbol])] = {}
def current(cls) -> 'Statement':
return cls.at_timestamp(Timestamp.current())
def id(self) -> IdType:
return self.stmt_id
def timestamp(self) -> Timestamp:
return self._timestamp
def prev(self) -> Optional['Statement']:
return self.prev_stmt
def text(self) -> str:
if (isinstance(self.stmt_node, ast.Assign) and (self.stmt_node.lineno == max((getattr(nd, 'lineno', self.stmt_node.lineno) for nd in ast.walk(self.stmt_node))))):
components = []
for node in (self.stmt_node.targets + [self.stmt_node.value]):
components.append(astunparse.unparse(node).strip())
components[(- 1)] = self._strip_tuple_parens(node, components[(- 1)])
return ' = '.join(components).strip()
else:
return astunparse.unparse(self.stmt_node).strip()
def _strip_tuple_parens(node: ast.AST, text: str) -> str:
if (isinstance(node, (ast.BinOp, ast.Tuple)) and (len(text) >= 2) and (text[0] == '(') and (text[(- 1)] == ')')):
return text[1:(- 1)]
else:
return text
def create_and_track(cls, stmt_node: ast.stmt, frame: Optional[FrameType]=None, timestamp: Optional[Timestamp]=None, override: bool=False) -> 'Statement':
stmt_id = id(stmt_node)
prev_stmt = (cls.from_id(stmt_id) if cls.has_id(stmt_id) else None)
stmt = cls(stmt_node, frame=frame, timestamp=timestamp, prev_stmt=prev_stmt, override=override)
if (override and cls._stmts_by_ts.get(timestamp)):
prev = cls.at_timestamp(timestamp)
all_with_prev_id = cls._stmts_by_id.pop(prev.id)
assert (len(all_with_prev_id) == 1)
assert (not cls.has_id(stmt.stmt_id))
cls._stmts_by_ts[stmt.timestamp] = [stmt]
cls._stmts_by_id[stmt.stmt_id] = [stmt]
for _ in SlicingContext.iter_slicing_contexts():
for cid in list(prev.children.keys()):
cls.from_id(cid).replace_parent_edges(prev, stmt)
for pid in list(prev.parents.keys()):
cls.from_id(pid).replace_child_edges(prev, stmt)
else:
cls._stmts_by_ts.setdefault(stmt.timestamp, []).append(stmt)
cls._stmts_by_id.setdefault(stmt.stmt_id, []).append(stmt)
with static_slicing_context():
for (parent, syms) in flow().stmt_deferred_static_parents.get(stmt.timestamp, {}).items():
stmt.add_parent_edges(parent, syms)
flow().stmt_deferred_static_parents.pop(stmt.timestamp, None)
return stmt
def is_module_stmt(self) -> bool:
return (tracer().parent_stmt_by_id.get(self.stmt_id) is None)
def is_outer_stmt(self) -> bool:
return pyc.is_outer_stmt(self.stmt_id)
def is_initial_frame_stmt(self) -> bool:
return tracer().is_initial_frame_stmt(self.stmt_id)
def clear(cls):
cls._stmts_by_ts = {}
def at_timestamp(cls, ts: TimestampOrCounter, stmt_num: Optional[int]=None) -> 'Statement':
assert (isinstance(ts, Timestamp) or (stmt_num is not None))
if isinstance(ts, Timestamp):
ts_to_use = ts
else:
ts_to_use = Timestamp(ts, stmt_num)
return cls._stmts_by_ts[ts_to_use][0]
def from_id(cls, stmt_id: IdType) -> 'Statement':
return cls._stmts_by_id[stmt_id][0]
def from_id_nullable(cls, stmt_id: IdType) -> Optional['Statement']:
return cls._stmts_by_id.get(stmt_id, [None])[0]
def has_id(cls, stmt_id: IdType) -> bool:
return (len(cls._stmts_by_id.get(stmt_id, [])) > 0)
def all_at_timestamp(cls, ts: Timestamp) -> List['Statement']:
return cls._stmts_by_ts.get(ts, [])
def containing_cell(self) -> 'Cell':
return cells().at_timestamp(self.timestamp)
def lineno(self) -> int:
return self.stmt_node.lineno
def finished(self) -> bool:
return self._finished
def stmt_id(self) -> int:
return id(self.stmt_node)
def __str__(self):
return self.text
def __repr__(self):
return f'<{self.__class__.__name__}[ts={self.timestamp},text={repr(self.text[:self._TEXT_REPR_MAX_LENGTH])}]>'
def slice(self, blacken: bool=True, seed_only: bool=False, format_type: Optional[Type[FormatType]]=None) -> Slice:
return self.format_slice(blacken=blacken, seed_only=seed_only, format_type=format_type)
def code(self, blacken: bool=True, format_type: Optional[Type[FormatType]]=None) -> Slice:
return self.format_slice(blacken=blacken, seed_only=True, format_type=format_type)
def to_function(self, *args, **kwargs):
return self.code().to_function(*args, **kwargs)
def stmt_contains_cascading_reactive_rval(self) -> bool:
if (self._stmt_contains_cascading_reactive_rval is None):
self._stmt_contains_cascading_reactive_rval = stmt_contains_cascading_reactive_rval(self.stmt_node)
return self._stmt_contains_cascading_reactive_rval
def _contains_lval(self) -> bool:
return stmt_contains_lval(self.stmt_node)
def get_post_call_scope(self, call_frame: FrameType) -> Scope:
old_scope = tracer().cur_frame_original_scope
if isinstance(self.stmt_node, ast.ClassDef):
pending_ns = Namespace.make_child_namespace(old_scope, self.stmt_node.name)
tracer().pending_class_namespaces.append(pending_ns)
return pending_ns
if isinstance(self.stmt_node, (ast.FunctionDef, ast.AsyncFunctionDef)):
func_name = self.stmt_node.name
else:
func_name = None
func_sym = tracer().calling_symbol
if ((func_sym is None) or (func_sym.call_scope is None)):
func_sym = flow().statement_to_func_sym.get(id(self.stmt_node), None)
if (func_sym is None):
return old_scope
if (func_sym.call_scope is None):
msg = ('got non-function symbol %s for name %s' % (func_sym.full_path, func_name))
if flow().is_dev_mode:
raise TypeError(msg)
else:
logger.warning(msg)
return old_scope
if (not self.finished):
prev_call_scope = func_sym.call_scope
new_call_scope = prev_call_scope.parent_scope.make_child_scope(func_sym.name)
if (prev_call_scope.symtab is not None):
new_call_scope.symtab = prev_call_scope.symtab
func_sym.call_scope = new_call_scope
func_sym.create_symbols_for_call_args(call_frame)
return func_sym.call_scope
def _handle_reactive_store(target: ast.AST) -> None:
try:
symbol_ref = SymbolRef(target)
reactive_seen = False
blocking_seen = False
for resolved in symbol_ref.gen_resolved_symbols(tracer().cur_frame_original_scope, only_yield_final_symbol=False, yield_all_intermediate_symbols=True, inherit_reactivity=False, yield_in_reverse=True):
if resolved.is_blocking:
blocking_seen = True
if (resolved.is_reactive and (not blocking_seen)):
if resolved.is_cascading_reactive:
flow().updated_deep_reactive_symbols.add(resolved.sym)
else:
flow().updated_deep_reactive_symbols_last_cell.add(resolved.sym)
reactive_seen = True
if ((not resolved.is_live) and resolved.atom.is_cascading_reactive):
resolved.sym.bump_cascading_reactive_cell_num()
if resolved.is_last:
resolved.sym.refresh()
if (reactive_seen and (not blocking_seen)):
if resolved.is_cascading_reactive:
flow().updated_reactive_symbols.add(resolved.sym)
else:
flow().updated_reactive_symbols_last_cell.add(resolved.sym)
if (blocking_seen and (resolved.sym not in flow().updated_symbols)):
flow().blocked_reactive_timestamps_by_symbol[resolved.sym] = flow().cell_counter()
except TypeError:
return
def _handle_assign_target_for_deps(self, target: ast.AST, deps: Set[Symbol], maybe_fixup_literal_namespace: bool=False) -> None:
try:
(scope, name, obj, is_subscript, excluded_deps) = tracer().resolve_store_data_for_target(target, self.frame)
except KeyError:
if flow().is_dev_mode:
logger.warning('keyerror for %s', (astunparse.unparse(target) if isinstance(target, ast.AST) else target))
return
if (isinstance(target, ast.Name) and getattr(obj, '__name__', '').startswith(Slice.FUNC_PREFIX)):
obj.__name__ = target.id
subscript_vals_to_use = [is_subscript]
if scope.is_namespace_scope:
namespace = cast(Namespace, scope)
for (modname, classname) in DUPED_ATTRSUB_CLASSES:
module = sys.modules.get(modname)
if (module is None):
continue
clazz = getattr(module, classname, None)
if (clazz is None):
continue
if (isinstance(namespace.obj, clazz) and (name in namespace.obj.columns)):
subscript_vals_to_use.append((not is_subscript))
break
for subscript_val in subscript_vals_to_use:
upserted = scope.upsert_symbol_for_name(name, obj, (deps - excluded_deps), self.stmt_node, is_subscript=subscript_val, symbol_node=target, is_cascading_reactive=self.stmt_contains_cascading_reactive_rval)
logger.info('sym %s upserted to scope %s has parents %s', upserted, scope, upserted.parents)
self._handle_reactive_store(target)
if maybe_fixup_literal_namespace:
namespace_for_upsert = flow().namespaces.get(id(obj), None)
if ((namespace_for_upsert is not None) and namespace_for_upsert.is_anonymous):
namespace_for_upsert.scope_name = str(name)
namespace_for_upsert.parent_scope = scope
def _handle_store_target_tuple_unpack_from_deps(self, target: Union[(ast.List, ast.Tuple)], deps: Set[Symbol]) -> None:
for inner_target in target.elts:
if isinstance(inner_target, (ast.List, ast.Tuple)):
self._handle_store_target_tuple_unpack_from_deps(inner_target, deps)
else:
self._handle_assign_target_for_deps(inner_target, deps)
def _handle_starred_store_target(self, target: ast.Starred, inner_deps: List[Optional[Symbol]]) -> None:
try:
(scope, name, obj, is_subscript, _) = tracer().resolve_store_data_for_target(target, self.frame)
except KeyError as e:
logger.info('Exception: %s', e)
return
ns = flow().namespaces.get(id(obj), None)
if (ns is None):
ns = Namespace(obj, str(name), parent_scope=scope)
for (i, inner_dep) in enumerate(inner_deps):
deps = (set() if (inner_dep is None) else {inner_dep})
ns.upsert_symbol_for_name(i, inner_dep.obj, deps, self.stmt_node, is_subscript=True, is_cascading_reactive=self.stmt_contains_cascading_reactive_rval)
scope.upsert_symbol_for_name(name, obj, set(), self.stmt_node, is_subscript=is_subscript, symbol_node=target, is_cascading_reactive=self.stmt_contains_cascading_reactive_rval)
self._handle_reactive_store(target.value)
def _handle_store_target_tuple_unpack_from_namespace(self, target: Union[(ast.List, ast.Tuple)], rhs_namespace: Namespace, extra_deps: Set[Symbol]) -> None:
saved_starred_node: Optional[ast.Starred] = None
saved_starred_deps = []
for ((i, inner_dep), (_, inner_target)) in match_container_obj_or_namespace_with_literal_nodes(rhs_namespace, target):
if isinstance(inner_target, ast.Starred):
saved_starred_node = inner_target
saved_starred_deps.append(inner_dep)
continue
if (inner_dep is None):
inner_deps = set()
else:
inner_deps = {inner_dep}
inner_deps |= extra_deps
if isinstance(inner_target, (ast.List, ast.Tuple)):
inner_namespace = flow().namespaces.get(inner_dep.obj_id, None)
if ((inner_namespace is None) or (inner_namespace.obj is None)):
self._handle_store_target_tuple_unpack_from_deps(inner_target, inner_deps)
else:
self._handle_store_target_tuple_unpack_from_namespace(inner_target, inner_namespace, extra_deps)
else:
self._handle_assign_target_for_deps(inner_target, inner_deps, maybe_fixup_literal_namespace=True)
if (saved_starred_node is not None):
self._handle_starred_store_target(saved_starred_node, saved_starred_deps)
def _handle_store_target(self, target: ast.AST, value: ast.AST, skip_namespace_check: bool=False) -> None:
if isinstance(target, (ast.List, ast.Tuple)):
rhs_namespace = (None if skip_namespace_check else flow().namespaces.get(id(tracer().saved_assign_rhs_obj), None))
if ((rhs_namespace is None) or (rhs_namespace.obj is None)):
self._handle_store_target_tuple_unpack_from_deps(target, resolve_rval_symbols(value))
else:
extra_deps: Set[Symbol] = set()
if isinstance(value, ast.Call):
extra_deps |= resolve_rval_symbols(value)
self._handle_store_target_tuple_unpack_from_namespace(target, rhs_namespace, extra_deps)
else:
self._handle_assign_target_for_deps(target, resolve_rval_symbols(value), maybe_fixup_literal_namespace=True)
def _handle_store(self, node: Union[(ast.Assign, ast.For)]) -> None:
if isinstance(node, ast.Assign):
for target in node.targets:
self._handle_store_target(target, node.value)
elif isinstance(node, ast.For):
self._handle_store_target(node.target, node.iter, skip_namespace_check=True)
else:
raise TypeError(('node type not supported for node: %s' % ast.dump(node)))
def _handle_delete(self) -> None:
assert isinstance(self.stmt_node, ast.Delete)
for target in self.stmt_node.targets:
try:
(scope, obj, name, is_subscript) = tracer().resolve_del_data_for_target(target)
scope.delete_data_symbol_for_name(name, is_subscript=is_subscript)
except KeyError as e:
logger.info('got key error: %s', e)
def _make_lval_data_symbols(self) -> None:
if isinstance(self.stmt_node, (ast.Assign, ast.For)):
self._handle_store(self.stmt_node)
else:
self._make_lval_data_symbols_old()
def _make_lval_data_symbols_old(self) -> None:
symbol_edges = get_symbol_edges(self.stmt_node)
should_overwrite = (not isinstance(self.stmt_node, ast.AugAssign))
is_function_def = isinstance(self.stmt_node, (ast.FunctionDef, ast.AsyncFunctionDef))
is_class_def = isinstance(self.stmt_node, ast.ClassDef)
is_import = isinstance(self.stmt_node, (ast.Import, ast.ImportFrom))
if (is_function_def or is_class_def):
assert (len(symbol_edges) == 1)
for (target, dep_node) in symbol_edges:
rval_deps = resolve_rval_symbols(dep_node)
if is_import:
dep_node_as_alias = cast(ast.alias, dep_node)
if isinstance(self.stmt_node, ast.ImportFrom):
module = (sys.modules.get(f'{self.stmt_node.module}.{dep_node_as_alias.name}') or sys.modules.get(self.stmt_node.module))
if (self.frame.f_locals is shell().user_ns):
for alias in self.stmt_node.names:
if (alias.name == '*'):
flow().starred_import_modules.add(module.__name__)
break
else:
module = sys.modules.get(dep_node_as_alias.name)
if (module not in (None, builtins)):
module_sym = tracer().create_if_not_exists_module_symbol(module, self.stmt_node, is_load=False, is_named=(dep_node_as_alias.asname is None))
if (module_sym is not None):
rval_deps.update(flow().aliases.get(module_sym.obj_id, set()))
target_as_str = cast(str, target)
if ((target_as_str == '*') or ('.' in target_as_str)):
continue
logger.info('create edges from %s to %s', rval_deps, target)
if is_class_def:
assert (self.class_scope is not None)
class_ref = self.frame.f_locals[cast(ast.ClassDef, self.stmt_node).name]
self.class_scope.obj = class_ref
flow().namespaces[id(class_ref)] = self.class_scope
try:
(scope, name, obj, is_subscript, excluded_deps) = tracer().resolve_store_data_for_target(target, self.frame)
scope.upsert_symbol_for_name(name, obj, (rval_deps - excluded_deps), self.stmt_node, overwrite=should_overwrite, is_subscript=is_subscript, is_function_def=is_function_def, is_import=is_import, class_scope=self.class_scope, propagate=(not isinstance(self.stmt_node, ast.For)), symbol_node=(target if isinstance(target, ast.AST) else None), is_cascading_reactive=self.stmt_contains_cascading_reactive_rval)
if isinstance(self.stmt_node, (ast.FunctionDef, ast.ClassDef, ast.AsyncFunctionDef, ast.Import, ast.ImportFrom)):
self._handle_reactive_store(self.stmt_node)
elif isinstance(target, ast.AST):
self._handle_reactive_store(target)
except KeyError:
if flow().is_dev_mode:
logger.warning('keyerror for %s', (astunparse.unparse(target) if isinstance(target, ast.AST) else target))
except ImportError:
raise
except Exception as e:
logger.warning('exception while handling store: %s', e)
if flow().is_test:
raise e
def handle_dependencies(self) -> None:
for external_call in tracer().external_calls:
logger.info('external call: %s', external_call)
external_call._handle_impl()
if self._contains_lval():
self._make_lval_data_symbols()
elif isinstance(self.stmt_node, ast.Delete):
self._handle_delete()
else:
resolve_rval_symbols(self.stmt_node)
def mark_finished(self) -> None:
self._finished = True
self.frame = None
def finished_execution_hook(self) -> None:
if self._finished:
return
self.handle_dependencies()
with tracer().dataflow_tracing_disabled():
for sym in list(tracer().this_stmt_updated_symbols):
passing_watchpoints = sym.watchpoints(sym.obj, position=(flow().get_position(self.frame)[0], self.lineno), symbol_name=sym.readable_name)
if passing_watchpoints:
flow().active_watchpoints.append((passing_watchpoints, sym))
self.mark_finished() |
def test_subscript_dependency_fp():
run_cell('lst = [0, 1, 2]')
run_cell('x = 5')
run_cell('y = x + lst[0]')
run_cell('lst[1] = 10')
run_cell('logging.info(y)')
assert_not_detected('y depends only on unchanged lst[0] and not on changed lst[1]') |
class ImageSize(PyClassnameExceptionRaiser):
def __init__(self, *args, **kwargs):
if ((len(args) == 0) and (len(kwargs) == 0)):
self.x = 0
self.y = 0
self.z = 0
self.c = 0
self.t = 0
return
missing_keys = get_missing_keys(kwargs.keys(), get_required_dimension_keys())
if missing_keys:
self.raise_creating_clex('Missing arguments: {}'.format(','.join(missing_keys)))
self.x = self._parse_int(kwargs, 'x')
self.y = self._parse_int(kwargs, 'y')
self.z = self._parse_int(kwargs, 'z')
self.c = self._parse_int(kwargs, 'c')
self.t = self._parse_int(kwargs, 't')
def _parse_int(self, dict, key):
try:
return int(dict[key])
except ValueError:
raise PyImarisWriterException('Error parsing int from parameter {} : "{}"'.format(key, dict[key]))
def __str__(self):
return '{} (x: {} y: {} z: {} c: {} t: {})'.format(self.__class__.__name__, self.x, self.y, self.z, self.c, self.t)
def __truediv__(self, other):
return ImageSize(x=(self.x // other.x), y=(self.y // other.y), z=(self.z // other.z), c=(self.c // other.c), t=(self.t // other.t)) |
def clip_random(image, min_shape):
(img_height, img_width) = (tf.shape(image)[0], tf.shape(image)[1])
(min_height, min_width) = (min_shape[0], min_shape[1])
height = tf.cond(tf.math.greater(img_height, min_height), (lambda : tf.random.uniform([], min_height, img_height, dtype=tf.int32)), (lambda : img_height))
width = tf.cond(tf.math.greater(img_width, min_width), (lambda : tf.random.uniform([], min_width, img_width, dtype=tf.int32)), (lambda : img_width))
image = tf.image.random_crop(image, size=(height, width, tf.shape(image)[(- 1)]))
return image |
def main():
parser = prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config) |
def metrics_mean(l):
metrics = {}
for e in l:
for metric_name in e:
if (metric_name not in metrics):
metrics[metric_name] = []
metrics[metric_name].append(e[metric_name])
for metric_name in metrics:
metrics[metric_name] = np.mean(np.array(metrics[metric_name]))
return metrics |
def get_frame(discr, gen, dc_vars, device=None, discr_src=None):
if (type(dc_vars) is not edic):
dc_vars = edic(dc_vars)
shape_x = (dc_vars['shape_x'] if ('shape_x' in dc_vars) else (dc_vars['dim_x'],))
shape_s = (discr.shape_s if hasattr(discr, 'shape_s') else (dc_vars['dim_s'],))
shape_v = (discr.shape_v if hasattr(discr, 'shape_v') else (dc_vars['dim_v'],))
std_v1x = (discr.std_v1x if hasattr(discr, 'std_v1x') else dc_vars['qstd_v'])
std_s1vx = (discr.std_s1vx if hasattr(discr, 'std_s1vx') else dc_vars['qstd_s'])
std_s1x = (discr.std_s1x if hasattr(discr, 'std_s1x') else dc_vars['qstd_s'])
mode = dc_vars['mode']
if mode.startswith('svgm'):
q_args_stem = (discr.v1x, std_v1x, discr.s1vx, std_s1vx)
elif mode.startswith('svae'):
q_args_stem = (discr.s1x, std_s1x)
else:
return None
if ((mode == 'svgm-da2') and (discr_src is not None)):
q_args = ((discr_src.v1x, (discr_src.std_v1x if hasattr(discr_src, 'std_v1x') else dc_vars['qstd_v']), discr_src.s1vx, (discr_src.std_s1vx if hasattr(discr_src, 'std_s1vx') else dc_vars['qstd_s'])) + q_args_stem)
elif ((mode == 'svae-da2') and (discr_src is not None)):
q_args = ((discr_src.s1x, (discr_src.std_s1x if hasattr(discr_src, 'std_s1x') else dc_vars['qstd_s'])) + q_args_stem)
elif (mode in MODES_TWIST):
q_args = (((None,) * len(q_args_stem)) + q_args_stem)
else:
q_args = (q_args_stem + ((None,) * len(q_args_stem)))
if mode.startswith('svgm'):
frame = SemVar(shape_s, shape_v, shape_x, dc_vars['dim_y'], gen.x1sv, dc_vars['pstd_x'], discr.y1s, *q_args, *dc_vars.sublist(['mu_s', 'sig_s', 'mu_v', 'sig_v', 'corr_sv']), (mode in MODES_DA), *dc_vars.sublist(['src_mvn_prior', 'tgt_mvn_prior']), device)
elif mode.startswith('svae'):
frame = SupVAE(shape_s, shape_x, dc_vars['dim_y'], gen.x1s, dc_vars['pstd_x'], discr.y1s, *q_args, *dc_vars.sublist(['mu_s', 'sig_s']), (mode in MODES_DA), *dc_vars.sublist(['src_mvn_prior', 'tgt_mvn_prior']), device)
return frame |
class ChatBotBasedSudokuSolver(object):
def __init__(self, llm_agent) -> None:
self.llm_agent = llm_agent
self.parser = LLMReplyParserForSudoku()
def generate_prompt(self, init_board):
(role, msg_content) = ('user', self._generate_message_content(init_board))
msgs = self.llm_agent.compose_messages([role], [msg_content])
return msgs
def run(self, sudoku_puzzle_instance_str):
sudoku_puzzle_instance_str = self._rectify_sudoku_puzzle_instance_str(sudoku_puzzle_instance_str)
prompt = self.generate_prompt(sudoku_puzzle_instance_str)
llm_reply = self.llm_agent.get_reply(prompt)
(success, solution) = self.parser.parse_llm_reply(llm_reply)
if (not success):
print('Failed to extract solution from the reply')
return (False, None)
(success, json_obj) = utils.extract_json_from_text_string('{{"rows": {} }}'.format(sudoku_puzzle_instance_str))
if (not success):
return (False, None)
(success, init_board) = self.parser.extract_sudoku_board(json_obj)
if (not success):
raise 'Invalid initial board: {}'.format(sudoku_puzzle_instance_str)
result = RuleBasedSudokuStateChecker.check_sudoku_board(init_board, solution)
return (result.is_valid, result.rows)
def _rectify_sudoku_puzzle_instance_str(self, sudoku_puzzle_instance_str):
sudoku_puzzle_instance_str = sudoku_puzzle_instance_str.replace("'", '')
sudoku_puzzle_instance_str = sudoku_puzzle_instance_str.replace('"', '')
cell_allowed = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '*']
for cell in cell_allowed:
sudoku_puzzle_instance_str = sudoku_puzzle_instance_str.replace(cell, (('"' + cell) + '"'))
return sudoku_puzzle_instance_str |
def main(argv):
with open(FLAGS.config, 'r') as f:
args = AttrDict(yaml.safe_load(f))
logdir = 'logs/exp'
for (k, v) in args.items():
if (k == 'ref'):
logdir += f"_{v.split('/')[(- 1)].split('.')[0]}"
else:
logdir += f'_{v}'
demo_traj = jnp.array(np.load(args.ref))
demo_len = demo_traj.shape[0]
args.ep_len = min(args.ep_len, demo_len)
args.cycle_len = min(args.get('cycle_len', demo_len), demo_len)
args.ep_len_eval = min(args.get('ep_len_eval', demo_len), demo_len)
train_env = envs.get_environment(env_name='humanoid_mimic_train', system_config=args.system_config, reference_traj=demo_traj, obs_type=args.get('obs_type', 'timestamp'), cyc_len=args.cycle_len, total_length=args.ep_len_eval, rollout_length=args.ep_len, early_termination=args.get('early_termination', False), demo_replay_mode=args.demo_replay_mode, err_threshold=args.threshold, replay_rate=args.get('replay_rate', 0.05), reward_scaling=args.get('reward_scaling', 1.0), rot_weight=args.rot_weight, vel_weight=args.vel_weight, ang_weight=args.ang_weight)
eval_env = envs.get_environment(env_name='humanoid_mimic', system_config=args.system_config, reference_traj=demo_traj, obs_type=args.get('obs_type', 'timestamp'), cyc_len=args.cycle_len, rot_weight=args.rot_weight, vel_weight=args.vel_weight, ang_weight=args.ang_weight)
with metrics.Writer(logdir) as writer:
(make_inference_fn, params, _) = dmm.train(seed=args.seed, environment=train_env, eval_environment=eval_env, episode_length=(args.ep_len - 1), eval_episode_length=(args.ep_len_eval - 1), num_envs=args.num_envs, num_eval_envs=args.num_eval_envs, learning_rate=args.lr, num_evals=(args.max_it + 1), max_gradient_norm=args.max_grad_norm, network_factory=functools.partial(apg_networks.make_apg_networks, hidden_layer_sizes=(512, 256)), normalize_observations=args.normalize_observations, save_dir=logdir, progress_fn=writer.write_scalars, use_linear_scheduler=args.use_lr_scheduler, truncation_length=args.get('truncation_length', None)) |
def test_getitem():
np.random.seed(1)
torch.manual_seed(1)
point_cloud_range = [(- 50), (- 50), (- 5), 50, 50, 3]
file_client_args = dict(backend='disk')
class_names = ['car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone', 'barrier']
pipeline = [dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=5, use_dim=5, file_client_args=file_client_args), dict(type='LoadPointsFromMultiSweeps', sweeps_num=9, use_dim=[0, 1, 2, 3, 4], file_client_args=file_client_args, pad_empty_sweeps=True, remove_close=True, test_mode=True), dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True), dict(type='GlobalRotScaleTrans', rot_range=[(- 0.3925), 0.3925], scale_ratio_range=[0.95, 1.05], translation_std=[0, 0, 0]), dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5), dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range), dict(type='ObjectNameFilter', classes=class_names), dict(type='PointShuffle'), dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])]
input_modality = dict(use_lidar=True, use_camera=False, use_radar=False, use_map=False, use_external=False)
dataset_cfg = dict(type='CBGSDataset', dataset=dict(type='NuScenesDataset', data_root='tests/data/nuscenes', ann_file='tests/data/nuscenes/nus_info.pkl', pipeline=pipeline, classes=class_names, modality=input_modality, test_mode=False, use_valid_flag=True, box_type_3d='LiDAR'))
nus_dataset = build_dataset(dataset_cfg)
assert (len(nus_dataset) == 20)
data = nus_dataset[0]
assert (data['img_metas'].data['flip'] is True)
assert (data['img_metas'].data['pcd_horizontal_flip'] is True)
assert (data['points']._data.shape == (537, 5))
data = nus_dataset[2]
assert (data['img_metas'].data['flip'] is False)
assert (data['img_metas'].data['pcd_horizontal_flip'] is False)
assert (data['points']._data.shape == (901, 5)) |
def numseconds_to_numsamples(numseconds, sample_rate):
candidate = int((numseconds * sample_rate))
log2 = np.log2(candidate)
out_value = int((2 ** np.round(log2)))
assert (out_value != 0), 'The inputs given gave an output value of 0. This is not acceptable.'
return out_value |
def add_interactive_args(parser):
group = parser.add_argument_group('Interactive')
gen_parser_from_dataclass(group, InteractiveConfig()) |
def PrintCategories():
sys.stderr.write(''.join(((' %s\n' % cat) for cat in _ERROR_CATEGORIES)))
sys.exit(0) |
class SparseBottleneck(nn.Module):
def __init__(self, in_planes, growth_rate, sparsity=0.5, sparse_func='reg'):
super(SparseBottleneck, self).__init__()
assert (sparse_func in models.sparse_func_dict)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, (4 * growth_rate), kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d((4 * growth_rate))
self.conv2 = nn.Conv2d((4 * growth_rate), growth_rate, kernel_size=3, padding=1, bias=False)
self.sparse1 = models.sparse_func_dict[sparse_func](sparsity)
self.sparse2 = models.sparse_func_dict[sparse_func](sparsity)
def forward(self, x):
out = self.conv1(self.sparse1(self.bn1(x)))
out = self.conv2(self.sparse2(self.bn2(out)))
out = torch.cat([out, x], 1)
return out |
class _FC_Layers(nn.Module):
def __init__(self, opt):
super(_FC_Layers, self).__init__()
self.opt = opt
self.classifier = nn.Sequential(nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, self.opt.num_classes))
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, nn.ConvTranspose2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, input):
return self.classifier(input) |
class PUSlice(object):
num_classes = 32
inputchannel = 1
def __init__(self, data_dir, normlizetype):
self.data_dir = data_dir
self.normlizetype = normlizetype
def data_preprare(self, test=False):
if (len(os.path.basename(self.data_dir).split('.')) == 2):
with open(self.data_dir, 'rb') as fo:
list_data = pickle.load(fo, encoding='bytes')
else:
list_data = get_files(self.data_dir, test)
with open(os.path.join(self.data_dir, 'PUSlice.pkl'), 'wb') as fo:
pickle.dump(list_data, fo)
if test:
test_dataset = dataset(list_data=list_data, test=True, transform=None)
return test_dataset
else:
data_pd = pd.DataFrame({'data': list_data[0], 'label': list_data[1]})
(train_pd, val_pd) = train_test_split(data_pd, test_size=0.2, random_state=40, stratify=data_pd['label'])
train_dataset = dataset(list_data=train_pd, transform=data_transforms('train', self.normlizetype))
val_dataset = dataset(list_data=val_pd, transform=data_transforms('val', self.normlizetype))
return (train_dataset, val_dataset) |
def glue_eval_data_collator(dataset: Dataset, batch_size: int):
batch_idx = np.arange(len(dataset))
steps_per_epoch = math.ceil((len(dataset) / batch_size))
batch_idx = np.array_split(batch_idx, steps_per_epoch)
for idx in batch_idx:
batch = dataset[idx]
batch = {k: np.array(v) for (k, v) in batch.items()}
(yield batch) |
class TestQuantization(unittest.TestCase):
def tearDownClass(self):
shutil.rmtree('./saved_results', ignore_errors=True)
def test_int8_huggingface_model(self):
from neural_compressor.utils.load_huggingface import OptimizedModel
model_name_or_path = 'Intel/distilbert-base-uncased-finetuned-sst-2-english-int8-static'
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path)
model = OptimizedModel.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=None, cache_dir=None, revision=None, use_auth_token=None)
stat = model.state_dict()
self.assertTrue((stat['classifier.module._packed_params.dtype'] == torch.qint8))
from huggingface_hub import hf_hub_download
resolved_weights_file = hf_hub_download(repo_id=model_name_or_path, filename='pytorch_model.bin')
q_config = torch.load(resolved_weights_file)['best_configure']
inc_model = Model(model)
inc_model.q_config = q_config
save_for_huggingface_upstream(inc_model, tokenizer, 'saved_results')
load_model = OptimizedModel.from_pretrained('saved_results') |
def run_clpr_train(input_doc):
input_doc = filter_feats(input_doc, load=True)
print('Finished Feature selection')
input_doc = add_embeddings(input_doc)
clpr_feats = []
for (idx, l) in enumerate(input_doc._.Labels):
if (l == 1):
clpr_feats.append(input_doc._.Features[idx])
input_doc._.CLPR_Features = clpr_feats
print('Added Embeddings')
adu_model = fit_clpr_model(input_doc)
print('Fit CLPR Model')
return adu_model |
_flax
class FlaxDDIMSchedulerTest(FlaxSchedulerCommonTest):
scheduler_classes = (FlaxDDIMScheduler,)
forward_default_kwargs = (('num_inference_steps', 50),)
def get_scheduler_config(self, **kwargs):
config = {'num_train_timesteps': 1000, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear'}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
(key1, key2) = random.split(random.PRNGKey(0))
num_inference_steps = 10
model = self.dummy_model()
sample = self.dummy_sample_deter
state = scheduler.set_timesteps(state, num_inference_steps)
for t in state.timesteps:
residual = model(sample, t)
output = scheduler.step(state, residual, t, sample)
sample = output.prev_sample
state = output.state
(key1, key2) = random.split(key2)
return sample
def check_over_configs(self, time_step=0, **config):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop('num_inference_steps', None)
for scheduler_class in self.scheduler_classes:
(sample, _) = self.dummy_sample
residual = (0.1 * sample)
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
(new_scheduler, new_state) = scheduler_class.from_pretrained(tmpdirname)
if ((num_inference_steps is not None) and hasattr(scheduler, 'set_timesteps')):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif ((num_inference_steps is not None) and (not hasattr(scheduler, 'set_timesteps'))):
kwargs['num_inference_steps'] = num_inference_steps
output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample
assert (jnp.sum(jnp.abs((output - new_output))) < 1e-05), 'Scheduler outputs are not identical'
def test_from_save_pretrained(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop('num_inference_steps', None)
for scheduler_class in self.scheduler_classes:
(sample, _) = self.dummy_sample
residual = (0.1 * sample)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
(new_scheduler, new_state) = scheduler_class.from_pretrained(tmpdirname)
if ((num_inference_steps is not None) and hasattr(scheduler, 'set_timesteps')):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif ((num_inference_steps is not None) and (not hasattr(scheduler, 'set_timesteps'))):
kwargs['num_inference_steps'] = num_inference_steps
output = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, 1, sample, **kwargs).prev_sample
assert (jnp.sum(jnp.abs((output - new_output))) < 1e-05), 'Scheduler outputs are not identical'
def check_over_forward(self, time_step=0, **forward_kwargs):
kwargs = dict(self.forward_default_kwargs)
kwargs.update(forward_kwargs)
num_inference_steps = kwargs.pop('num_inference_steps', None)
for scheduler_class in self.scheduler_classes:
(sample, _) = self.dummy_sample
residual = (0.1 * sample)
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(tmpdirname)
(new_scheduler, new_state) = scheduler_class.from_pretrained(tmpdirname)
if ((num_inference_steps is not None) and hasattr(scheduler, 'set_timesteps')):
state = scheduler.set_timesteps(state, num_inference_steps)
new_state = new_scheduler.set_timesteps(new_state, num_inference_steps)
elif ((num_inference_steps is not None) and (not hasattr(scheduler, 'set_timesteps'))):
kwargs['num_inference_steps'] = num_inference_steps
output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample
new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample
assert (jnp.sum(jnp.abs((output - new_output))) < 1e-05), 'Scheduler outputs are not identical'
def test_scheduler_outputs_equivalence(self):
def set_nan_tensor_to_zero(t):
return t.at[(t != t)].set(0)
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for (tuple_iterable_value, dict_iterable_value) in zip(tuple_object, dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, Dict):
for (tuple_iterable_value, dict_iterable_value) in zip(tuple_object.values(), dict_object.values()):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif (tuple_object is None):
return
else:
self.assertTrue(jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-05), msg=f'Tuple and dict output are not equal. Difference: {jnp.max(jnp.abs((tuple_object - dict_object)))}. Tuple has `nan`: {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}.')
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop('num_inference_steps', None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
(sample, _) = self.dummy_sample
residual = (0.1 * sample)
if ((num_inference_steps is not None) and hasattr(scheduler, 'set_timesteps')):
state = scheduler.set_timesteps(state, num_inference_steps)
elif ((num_inference_steps is not None) and (not hasattr(scheduler, 'set_timesteps'))):
kwargs['num_inference_steps'] = num_inference_steps
outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs)
if ((num_inference_steps is not None) and hasattr(scheduler, 'set_timesteps')):
state = scheduler.set_timesteps(state, num_inference_steps)
elif ((num_inference_steps is not None) and (not hasattr(scheduler, 'set_timesteps'))):
kwargs['num_inference_steps'] = num_inference_steps
outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs)
recursive_check(outputs_tuple[0], outputs_dict.prev_sample)
def test_step_shape(self):
kwargs = dict(self.forward_default_kwargs)
num_inference_steps = kwargs.pop('num_inference_steps', None)
for scheduler_class in self.scheduler_classes:
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
(sample, _) = self.dummy_sample
residual = (0.1 * sample)
if ((num_inference_steps is not None) and hasattr(scheduler, 'set_timesteps')):
state = scheduler.set_timesteps(state, num_inference_steps)
elif ((num_inference_steps is not None) and (not hasattr(scheduler, 'set_timesteps'))):
kwargs['num_inference_steps'] = num_inference_steps
output_0 = scheduler.step(state, residual, 0, sample, **kwargs).prev_sample
output_1 = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample
self.assertEqual(output_0.shape, sample.shape)
self.assertEqual(output_0.shape, output_1.shape)
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
state = scheduler.set_timesteps(state, 5)
assert jnp.equal(state.timesteps, jnp.array([801, 601, 401, 201, 1])).all()
def test_betas(self):
for (beta_start, beta_end) in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ['linear', 'squaredcos_cap_v2']:
self.check_over_configs(beta_schedule=schedule)
def test_time_indices(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for (t, num_inference_steps) in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
def test_variance(self):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config()
scheduler = scheduler_class(**scheduler_config)
state = scheduler.create_state()
assert (jnp.sum(jnp.abs((scheduler._get_variance(state, 0, 0) - 0.0))) < 1e-05)
assert (jnp.sum(jnp.abs((scheduler._get_variance(state, 420, 400) - 0.14771))) < 1e-05)
assert (jnp.sum(jnp.abs((scheduler._get_variance(state, 980, 960) - 0.3246))) < 1e-05)
assert (jnp.sum(jnp.abs((scheduler._get_variance(state, 0, 0) - 0.0))) < 1e-05)
assert (jnp.sum(jnp.abs((scheduler._get_variance(state, 487, 486) - 0.00979))) < 1e-05)
assert (jnp.sum(jnp.abs((scheduler._get_variance(state, 999, 998) - 0.02))) < 1e-05)
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
assert (abs((result_sum - 172.0067)) < 0.01)
assert (abs((result_mean - 0.223967)) < 0.001)
def test_full_loop_with_set_alpha_to_one(self):
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
if (jax_device == 'tpu'):
assert (abs((result_sum - 149.8409)) < 0.01)
assert (abs((result_mean - 0.1951)) < 0.001)
else:
assert (abs((result_sum - 149.8295)) < 0.01)
assert (abs((result_mean - 0.1951)) < 0.001)
def test_full_loop_with_no_set_alpha_to_one(self):
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = jnp.sum(jnp.abs(sample))
result_mean = jnp.mean(jnp.abs(sample))
if (jax_device == 'tpu'):
pass
else:
assert (abs((result_sum - 149.0784)) < 0.01)
assert (abs((result_mean - 0.1941)) < 0.001)
def test_prediction_type(self):
for prediction_type in ['epsilon', 'sample', 'v_prediction']:
self.check_over_configs(prediction_type=prediction_type) |
def combine_columns(df: pd.DataFrame, columns: List[str], separator: str=' / ') -> pd.DataFrame:
def _pad_percentage(f: float) -> str:
return '{:5.1f}'.format(f).replace(' ', '\\phantom{0}')
out_df = df.xs(columns[0], axis=1, level=(- 1)).applymap(_pad_percentage)
for col in columns[1:]:
out_df += (separator + df.xs(col, axis=1, level=(- 1)).applymap(_pad_percentage))
return out_df |
class Parallelize():
def __init__(self, benchmark: Benchmark, num_workers: int=4):
self.benchmark = benchmark
self.num_workers = num_workers
def run_single_job(self, pipeline_class: type, config: blocks.PipelineConfig, filepath: Path, description: Text) -> Annotation:
idx_process = (int(current_process().name.split('-')[1]) - 1)
pipeline = pipeline_class(config)
progress = TQDMProgressBar(description, leave=False, position=idx_process, do_close=True)
return self.benchmark.run_single(pipeline, filepath, progress)
def __call__(self, pipeline_class: type, config: blocks.PipelineConfig, metric: Optional[BaseMetric]=None) -> Union[(pd.DataFrame, List[Annotation])]:
audio_file_paths = self.benchmark.get_file_paths()
num_audio_files = len(audio_file_paths)
try:
torch.multiprocessing.set_start_method('spawn')
except RuntimeError:
pass
freeze_support()
pool = Pool(processes=self.num_workers, initargs=(RLock(),), initializer=tqdm.set_lock)
arg_list = [(pipeline_class, config, filepath, f'Streaming {filepath.stem} ({(i + 1)}/{num_audio_files})') for (i, filepath) in enumerate(audio_file_paths)]
jobs = [pool.apply_async(self.run_single_job, args=args) for args in arg_list]
pool.close()
predictions = [job.get() for job in jobs]
metric = (pipeline_class.suggest_metric() if (metric is None) else metric)
return self.benchmark.evaluate(predictions, metric) |
def get_images(directory, img_ext):
assert os.path.isdir(directory)
image_paths = glob.glob((directory + f'/**/*{img_ext}'), recursive=True)
for path in image_paths:
if (path.split(os.sep)[(- 2)] not in ['damaged_jpeg', 'jpeg_write']):
(yield path) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.