code stringlengths 17 6.64M |
|---|
@Registry.register('data.dsprites', 'class')
class DSpritesData(base.ImageTfdsData):
'Provides the DSprites data set.\n\n DSprites only comes with a training set. Therefore, the training, validation,\n and test set are split out of the original training set.\n\n For additional details and usage, see the base class.\n\n The data set page is https://github.com/deepmind/dsprites-dataset/.\n '
def __init__(self, predicted_attribute, num_classes=None, data_dir=None):
dataset_builder = tfds.builder('dsprites:2.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
info = dataset_builder.info
if (predicted_attribute not in dataset_builder.info.features):
raise ValueError('{} is not a valid attribute to predict.'.format(predicted_attribute))
num_original_classes = info.features[predicted_attribute].num_classes
if (num_classes is None):
num_classes = num_original_classes
if ((not isinstance(num_classes, int)) or (num_classes <= 1) or (num_classes > num_original_classes)):
raise ValueError('The number of classes should be None or in [2, ..., num_classes].')
class_division_factor = (float(num_original_classes) / num_classes)
num_total = dataset_builder.info.splits['train'].num_examples
num_samples_train = ((TRAIN_SPLIT_PERCENT * num_total) // 100)
num_samples_val = ((VAL_SPLIT_PERCENT * num_total) // 100)
num_samples_splits = {'train': num_samples_train, 'val': num_samples_val, 'trainval': (num_samples_val + num_samples_train), 'test': ((num_total - num_samples_val) - num_samples_train), 'train800': 800, 'val200': 200, 'train800val200': 1000}
tfds_splits = {'train': 'train[:{}]'.format(num_samples_splits['train']), 'val': 'train[{}:{}]'.format(num_samples_splits['train'], num_samples_splits['trainval']), 'trainval': 'train[:{}]'.format(num_samples_splits['trainval']), 'test': 'train[{}:]'.format(num_samples_splits['trainval']), 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200))}
def preprocess_fn(tensors):
images = (tf.tile(tensors['image'], [1, 1, 3]) * 255)
label = tf.cast(tf.math.floordiv(tf.cast(tensors[predicted_attribute], tf.float32), class_division_factor), info.features[predicted_attribute].dtype)
return dict(image=images, label=label)
super(DSpritesData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=preprocess_fn, num_classes=num_classes)
|
@Registry.register('data.dtd', 'class')
class DTDData(base.ImageTfdsData):
'Provides Describable Textures Dataset (DTD) data.\n\n As of version 1.0.0, the train/val/test splits correspond to those of the\n 1st fold of the official cross-validation partition.\n\n For additional details and usage, see the base class.\n '
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('dtd:3.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
tfds_splits = {'train': 'train', 'val': 'validation', 'trainval': 'train+validation', 'test': 'test', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'}
train_count = dataset_builder.info.splits['train'].num_examples
val_count = dataset_builder.info.splits['validation'].num_examples
test_count = dataset_builder.info.splits['test'].num_examples
num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
super(DTDData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_tensors_fn(['image', 'label']), num_classes=dataset_builder.info.features['label'].num_classes)
|
@Registry.register('data.eurosat', 'class')
class EurosatData(base.ImageTfdsData):
'Provides EuroSat dataset.\n\n EuroSAT dataset is based on Sentinel-2 satellite images covering 13 spectral\n bands and consisting of 10 classes with 27000 labeled and\n geo-referenced samples.\n\n URL: https://github.com/phelber/eurosat\n '
def __init__(self, subset='rgb', data_key='image', data_dir=None):
dataset_name = 'eurosat/{}:2.*.*'.format(subset)
dataset_builder = tfds.builder(dataset_name, data_dir=data_dir)
dataset_builder.download_and_prepare()
num_examples = dataset_builder.info.splits[tfds.Split.TRAIN].num_examples
train_count = ((num_examples * TRAIN_SPLIT_PERCENT) // 100)
val_count = ((num_examples * VALIDATION_SPLIT_PERCENT) // 100)
test_count = ((num_examples * TEST_SPLIT_PERCENT) // 100)
tfds_splits = {'train': 'train[:{}]'.format(train_count), 'val': 'train[{}:{}]'.format(train_count, (train_count + val_count)), 'trainval': 'train[:{}]'.format((train_count + val_count)), 'test': 'train[{}:]'.format((train_count + val_count)), 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(train_count, (train_count + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(train_count, (train_count + 200))}
num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
num_channels = 3
if (data_key == 'sentinel2'):
num_channels = 13
super(EurosatData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=100, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_and_cast_tensors_fn({data_key: ('image', None), 'label': ('label', None)}), image_key=data_key, num_channels=num_channels, num_classes=dataset_builder.info.features['label'].num_classes)
|
@Registry.register('data.oxford_flowers102', 'class')
class OxfordFlowers102Data(base.ImageTfdsData):
'Provides Oxford 102 categories flowers dataset.\n\n See corresponding tfds dataset for details.\n\n URL: https://www.robots.ox.ac.uk/~vgg/data/flowers/102/\n '
def __init__(self, data_dir=None, train_split_percent=None):
dataset_builder = tfds.builder('oxford_flowers102:2.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
train_count = dataset_builder.info.splits[tfds.Split.TRAIN].num_examples
val_count = dataset_builder.info.splits[tfds.Split.VALIDATION].num_examples
test_count = dataset_builder.info.splits[tfds.Split.TEST].num_examples
if train_split_percent:
tfds_splits = {'train': 'train[:{s}%]+validation[:{s}%]'.format(s=train_split_percent), 'val': 'train[-{s}%:]+validation[-{s}%:]'.format(s=train_split_percent), 'trainval': 'train+validation', 'test': 'test', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'}
num_samples_splits = {'train': (((train_count + val_count) // 100) * train_split_percent), 'val': (((train_count + val_count) // 100) * (100 - train_split_percent)), 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
else:
tfds_splits = {'train': 'train', 'val': 'validation', 'trainval': 'train+validation', 'test': 'test', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'}
num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
super(OxfordFlowers102Data, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_and_cast_tensors_fn({'image': ('image', None), 'label': ('label', None)}), num_classes=dataset_builder.info.features['label'].num_classes)
|
@Registry.register('data.oxford_iiit_pet', 'class')
class OxfordIIITPetData(base.ImageTfdsData):
'Provides OxfordIIITPet data.\n\n The OxfordIIITPet dataset comes only with a training and test set.\n Therefore, the validation set is split out of the original training set, and\n the remaining examples are used as the "train" split. The "trainval" split\n corresponds to the original training set.\n\n For additional details and usage, see the base class.\n '
def __init__(self, data_dir=None, train_split_percent=None):
dataset_builder = tfds.builder('oxford_iiit_pet:3.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
train_split_percent = (train_split_percent or TRAIN_SPLIT_PERCENT)
trainval_count = dataset_builder.info.splits[tfds.Split.TRAIN].num_examples
test_count = dataset_builder.info.splits[tfds.Split.TEST].num_examples
num_samples_splits = {'train': ((train_split_percent * trainval_count) // 100), 'val': (trainval_count - ((train_split_percent * trainval_count) // 100)), 'trainval': trainval_count, 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
tfds_splits = {'train': 'train[:{}]'.format(num_samples_splits['train']), 'val': 'train[{}:]'.format(num_samples_splits['train']), 'trainval': tfds.Split.TRAIN, 'test': tfds.Split.TEST, 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200))}
super(OxfordIIITPetData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_tensors_fn(['image', 'label']), num_classes=dataset_builder.info.features['label'].num_classes)
|
@Registry.register('data.patch_camelyon', 'class')
class PatchCamelyonData(base.ImageTfdsData):
'Provides PatchCamelyon data.'
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('patch_camelyon:2.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
tfds_splits = {'test': 'test', 'train': 'train', 'val': 'validation', 'trainval': 'train+validation', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'}
num_samples_splits = {'test': dataset_builder.info.splits['test'].num_examples, 'train': dataset_builder.info.splits['train'].num_examples, 'val': dataset_builder.info.splits['validation'].num_examples, 'train800': 800, 'val200': 200, 'train800val200': 1000}
num_samples_splits['trainval'] = (num_samples_splits['train'] + num_samples_splits['val'])
super(PatchCamelyonData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_tensors_fn(['image', 'label']), num_classes=dataset_builder.info.features['label'].num_classes)
|
def partialclass(cls, *base_args, **base_kwargs):
'Builds a subclass with partial application of the given args and keywords.\n\n Equivalent to functools.partial performance, base_args are preprended to the\n positional arguments given during object initialization and base_kwargs are\n updated with the kwargs given later.\n\n Args:\n cls: The base class.\n *base_args: Positional arguments to be applied to the subclass.\n **base_kwargs: Keyword arguments to be applied to the subclass.\n\n Returns:\n A subclass of the input class.\n '
class _NewClass(cls):
def __init__(self, *args, **kwargs):
bound_args = (base_args + args)
bound_kwargs = base_kwargs.copy()
bound_kwargs.update(kwargs)
super(_NewClass, self).__init__(*bound_args, **bound_kwargs)
return _NewClass
|
def parse_name(string_to_parse):
'Parses input to the registry\'s lookup function.\n\n Args:\n string_to_parse: can be either an arbitrary name or function call\n (optionally with positional and keyword arguments).\n e.g. "multiclass", "resnet50_v2(filters_factor=8)".\n\n Returns:\n A tuple of input name and a dctinary with arguments. Examples:\n "multiclass" -> ("multiclass", (), {})\n "resnet50_v2(9, filters_factor=4)" ->\n ("resnet50_v2", (9,), {"filters_factor": 4})\n '
expr = ast.parse(string_to_parse, mode='eval').body
if (not isinstance(expr, (ast.Attribute, ast.Call, ast.Name))):
raise ValueError('The given string should be a name or a call, but a {} was parsed from the string {!r}'.format(type(expr), string_to_parse))
if isinstance(expr, ast.Name):
return (string_to_parse, {})
elif isinstance(expr, ast.Attribute):
return (string_to_parse, {})
def _get_func_name(expr):
if isinstance(expr, ast.Attribute):
return ((_get_func_name(expr.value) + '.') + expr.attr)
elif isinstance(expr, ast.Name):
return expr.id
else:
raise ValueError('Type {!r} is not supported in a function name, the string to parse was {!r}'.format(type(expr), string_to_parse))
def _get_func_args_and_kwargs(call):
args = tuple([ast.literal_eval(arg) for arg in call.args])
kwargs = {kwarg.arg: ast.literal_eval(kwarg.value) for kwarg in call.keywords}
return (args, kwargs)
func_name = _get_func_name(expr.func)
(func_args, func_kwargs) = _get_func_args_and_kwargs(expr)
if func_args:
raise ValueError('Positional arguments are not supported here, but these were found: {!r}'.format(func_args))
return (func_name, func_kwargs)
|
class Registry(object):
'Implements global Registry.'
_GLOBAL_REGISTRY = {}
@staticmethod
def global_registry():
return Registry._GLOBAL_REGISTRY
@staticmethod
def register(name, item_type):
'Creates a function that registers its input.'
if (item_type not in ['function', 'class']):
raise ValueError(('Unknown item type: %s' % item_type))
def _register(item):
if (name in Registry.global_registry()):
raise KeyError('The name {!r} was already registered in with type {!r}'.format(name, item_type))
Registry.global_registry()[name] = (item, item_type)
return item
return _register
@staticmethod
def lookup(lookup_string, kwargs_extra=None):
'Lookup a name in the registry.'
(name, kwargs) = parse_name(lookup_string)
if kwargs_extra:
kwargs.update(kwargs_extra)
(item, item_type) = Registry.global_registry()[name]
if (item_type == 'function'):
return functools.partial(item, **kwargs)
elif (item_type == 'class'):
return partialclass(item, **kwargs)
|
@Registry.register('data.resisc45', 'class')
class Resisc45Data(base.ImageTfdsData):
'Provides RESISC-45 dataset.\n\n RESISC45 dataset is a publicly available benchmark for Remote Sensing Image\n Scene Classification (RESISC), created by Northwestern Polytechnical\n University (NWPU). This dataset contains 31,500 images, covering 45 scene\n classes with 700 images in each class.\n\n URL: http://www.escience.cn/people/JunweiHan/NWPU-RESISC45.html\n '
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('resisc45:3.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
num_examples = dataset_builder.info.splits['train'].num_examples
train_count = ((num_examples * TRAIN_SPLIT_PERCENT) // 100)
val_count = ((num_examples * VALIDATION_SPLIT_PERCENT) // 100)
test_count = ((num_examples * TEST_SPLIT_PERCENT) // 100)
tfds_splits = {'train': 'train[:{}]'.format(train_count), 'val': 'train[{}:{}]'.format(train_count, (train_count + val_count)), 'trainval': 'train[:{}]'.format((train_count + val_count)), 'test': 'train[{}:]'.format((train_count + val_count)), 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(train_count, (train_count + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(train_count, (train_count + 200))}
num_samples_splits = {'train': train_count, 'val': val_count, 'trainval': (train_count + val_count), 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
super(Resisc45Data, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_and_cast_tensors_fn({'image': ('image', None), 'label': ('label', None)}), num_classes=dataset_builder.info.features['label'].num_classes)
|
@Registry.register('data.smallnorb', 'class')
class SmallNORBData(base.ImageTfdsData):
'Provides the SmallNORB data set.\n\n SmallNORB comes only with a training and test set. Therefore, the validation\n set is split out of the original training set, and the remaining examples are\n used as the "train" split. The "trainval" split corresponds to the original\n training set.\n\n For additional details and usage, see the base class.\n\n The data set page is https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/.\n '
def __init__(self, predicted_attribute, data_dir=None):
dataset_builder = tfds.builder('smallnorb:2.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
if (predicted_attribute not in dataset_builder.info.features):
raise ValueError('{} is not a valid attribute to predict.'.format(predicted_attribute))
tfds_splits = {'train': 'train', 'val': 'test[:{}%]'.format(VAL_SPLIT_PERCENT), 'trainval': 'train+test[:{}%]'.format(VAL_SPLIT_PERCENT), 'test': 'test[{}%:]'.format(VAL_SPLIT_PERCENT), 'train800': 'train[:800]', 'val200': 'test[:200]', 'train800val200': 'train[:800]+test[:200]'}
train_count = dataset_builder.info.splits['train'].num_examples
test_count = dataset_builder.info.splits['test'].num_examples
num_samples_validation = ((VAL_SPLIT_PERCENT * test_count) // 100)
num_samples_splits = {'train': train_count, 'val': num_samples_validation, 'trainval': (train_count + num_samples_validation), 'test': (test_count - num_samples_validation), 'train800': 800, 'val200': 200, 'train800val200': 1000}
def preprocess_fn(tensors):
image = tf.tile(tensors['image'], [1, 1, 3])
return dict(image=image, label=tensors[predicted_attribute])
info = dataset_builder.info
super(SmallNORBData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=preprocess_fn, num_classes=info.features[predicted_attribute].num_classes)
|
@Registry.register('data.sun397', 'class')
class Sun397Data(base.ImageTfdsData):
'Provides Sun397Data data.'
def __init__(self, config='tfds', data_dir=None):
if (config == 'tfds'):
dataset_builder = tfds.builder('sun397/tfds:4.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
tfds_splits = {'train': 'train', 'val': 'validation', 'test': 'test', 'trainval': 'train+validation', 'train800': 'train[:800]', 'val200': 'validation[:200]', 'train800val200': 'train[:800]+validation[:200]'}
num_samples_splits = {'test': dataset_builder.info.splits['test'].num_examples, 'train': dataset_builder.info.splits['train'].num_examples, 'val': dataset_builder.info.splits['validation'].num_examples, 'train800': 800, 'val200': 200, 'train800val200': 1000}
num_samples_splits['trainval'] = (num_samples_splits['train'] + num_samples_splits['val'])
else:
raise ValueError(('No supported config %r for Sun397Data.' % config))
super(Sun397Data, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_tensors_fn(['image', 'label']), num_classes=dataset_builder.info.features['label'].num_classes)
|
@Registry.register('data.svhn', 'class')
class SvhnData(base.ImageTfdsData):
'Provides SVHN data.\n\n The Street View House Numbers (SVHN) Dataset is an image digit recognition\n dataset of over 600,000 color digit images coming from real world data.\n Split size:\n - Training set: 73,257 images\n - Testing set: 26,032 images\n - Extra training set: 531,131 images\n Following the common setup on SVHN, we only use the official training and\n testing data. Images are cropped to 32x32.\n\n URL: http://ufldl.stanford.edu/housenumbers/\n '
def __init__(self, data_dir=None):
dataset_builder = tfds.builder('svhn_cropped:3.*.*', data_dir=data_dir)
dataset_builder.download_and_prepare()
trainval_count = dataset_builder.info.splits[tfds.Split.TRAIN].num_examples
test_count = dataset_builder.info.splits[tfds.Split.TEST].num_examples
num_samples_splits = {'train': ((TRAIN_SPLIT_PERCENT * trainval_count) // 100), 'val': (trainval_count - ((TRAIN_SPLIT_PERCENT * trainval_count) // 100)), 'trainval': trainval_count, 'test': test_count, 'train800': 800, 'val200': 200, 'train800val200': 1000}
tfds_splits = {'train': 'train[:{}]'.format(num_samples_splits['train']), 'val': 'train[{}:]'.format(num_samples_splits['train']), 'trainval': 'train', 'test': 'test', 'train800': 'train[:800]', 'val200': 'train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200)), 'train800val200': 'train[:800]+train[{}:{}]'.format(num_samples_splits['train'], (num_samples_splits['train'] + 200))}
super(SvhnData, self).__init__(dataset_builder=dataset_builder, tfds_splits=tfds_splits, num_samples_splits=num_samples_splits, num_preprocessing_threads=400, shuffle_buffer_size=10000, base_preprocess_fn=base.make_get_and_cast_tensors_fn({'image': ('image', None), 'label': ('label', None)}), num_classes=dataset_builder.info.features['label'].num_classes)
|
class Evaluator():
'\n An evaluator with below logics:\n\n 1. find which eval module to use.\n 2. store the eval results, pretty print it in log file as well.\n '
def __init__(self) -> None:
self.results = defaultdict(dict)
self.iteration = (- 1)
self.threshold_end = 0.5
def update_iteration(self, iteration: int) -> None:
'update iteration info'
self.iteration = iteration
def update_result(self, metric: str, value: Union[(float, dict)]) -> None:
if (self.iteration > (- 1)):
key_name = ('epoch_' + str(self.iteration))
else:
key_name = 'final'
if isinstance(value, float):
self.results[key_name].update({metric: value})
elif (metric in self.results[key_name]):
self.results[key_name][metric].update(value)
else:
self.results[key_name].update({metric: value})
def classify(self, probs, targets, test_data, multilabel=False):
'\n Evaluate classification result.\n Args:\n probs: np.ndarray for num_data x num_class, predicted probabilities\n targets: np.ndarray for multilabel, list of integers for single label\n test_labels: map test image ids to a list of class labels\n '
if (not targets):
raise ValueError('When evaluating classification, need at least give targets')
if multilabel:
self._eval_multilabel(probs, targets, test_data)
else:
self._eval_singlelabel(probs, targets, test_data)
def _eval_singlelabel(self, scores: np.ndarray, targets: List[int], eval_type: str) -> None:
'\n if number of labels > 2:\n top1 and topk (5 by default) accuracy\n if number of labels == 2:\n top1 and rocauc\n '
acc_dict = singlelabel.compute_acc_auc(scores, targets)
log_results = {k: np.around((v * 100), decimals=2) for (k, v) in acc_dict.items()}
save_results = acc_dict
self.log_and_update(log_results, save_results, eval_type)
def _eval_multilabel(self, scores: np.ndarray, targets: np.ndarray, eval_type: str) -> None:
num_labels = scores.shape[(- 1)]
targets = multilabel.multihot(targets, num_labels)
log_results = {}
(ap, ar, mAP, mAR) = multilabel.compute_map(scores, targets)
f1_dict = multilabel.get_best_f1_scores(targets, scores, self.threshold_end)
log_results['mAP'] = np.around((mAP * 100), decimals=2)
log_results['mAR'] = np.around((mAR * 100), decimals=2)
log_results.update({k: np.around((v * 100), decimals=2) for (k, v) in f1_dict.items()})
save_results = {'ap': ap, 'ar': ar, 'mAP': mAP, 'mAR': mAR, 'f1': f1_dict}
self.log_and_update(log_results, save_results, eval_type)
def log_and_update(self, log_results, save_results, eval_type):
log_str = ''
for (k, result) in log_results.items():
if (not isinstance(result, np.ndarray)):
log_str += f'{k}: {result:.2f} '
else:
log_str += f'{k}: {list(result)} '
logger.info(f'Classification results with {eval_type}: {log_str}')
self.update_result('classification', {eval_type: save_results})
|
class Trainer():
'\n a trainer with below logics:\n\n 1. Build optimizer, scheduler\n 2. Load checkpoints if provided\n 3. Train and eval at each epoch\n '
def __init__(self, cfg: CfgNode, model: nn.Module, evaluator: Evaluator, device: torch.device) -> None:
self.cfg = cfg
self.model = model
self.device = device
logger.info('\tSetting up the optimizer...')
self.optimizer = make_optimizer([self.model], cfg.SOLVER)
self.scheduler = make_scheduler(self.optimizer, cfg.SOLVER)
self.cls_criterion = build_loss(self.cfg)
self.checkpointer = Checkpointer(self.model, save_dir=cfg.OUTPUT_DIR, save_to_disk=True)
if (len(cfg.MODEL.WEIGHT_PATH) > 0):
checkpointables = [key for key in self.checkpointer.checkpointables if (key not in ['head.last_layer.bias', 'head.last_layer.weight'])]
self.checkpointer.load(cfg.MODEL.WEIGHT_PATH, checkpointables)
logger.info(f'Model weight loaded from {cfg.MODEL.WEIGHT_PATH}')
self.evaluator = evaluator
self.cpu_device = torch.device('cpu')
def forward_one_batch(self, inputs, targets, is_train):
'Train a single (full) epoch on the model using the given\n data loader.\n\n Args:\n X: input dict\n targets\n is_train: bool\n Returns:\n loss\n outputs: output logits\n '
inputs = inputs.to(self.device, non_blocking=True)
targets = targets.to(self.device, non_blocking=True)
if self.cfg.DBG:
logger.info(f'shape of inputs: {inputs.shape}')
logger.info(f'shape of targets: {targets.shape}')
with torch.set_grad_enabled(is_train):
outputs = self.model(inputs)
if self.cfg.DBG:
logger.info('shape of model output: {}, targets: {}'.format(outputs.shape, targets.shape))
if (self.cls_criterion.is_local() and is_train):
self.model.eval()
loss = self.cls_criterion(outputs, targets, self.cls_weights, self.model, inputs)
elif self.cls_criterion.is_local():
return (torch.tensor(1), outputs)
else:
loss = self.cls_criterion(outputs, targets, self.cls_weights)
if (loss == float('inf')):
logger.info('encountered infinite loss, skip gradient updating for this batch!')
return ((- 1), (- 1))
elif torch.isnan(loss).any():
logger.info('encountered nan loss, skip gradient updating for this batch!')
return ((- 1), (- 1))
if is_train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return (loss, outputs)
def get_input(self, data):
if (not isinstance(data['image'], torch.Tensor)):
for (k, v) in data.items():
data[k] = torch.from_numpy(v)
inputs = data['image'].float()
labels = data['label']
return (inputs, labels)
def train_classifier(self, train_loader, val_loader, test_loader):
'\n Train a classifier using epoch\n '
self.model.eval()
total_epoch = self.cfg.SOLVER.TOTAL_EPOCH
total_data = len(train_loader)
best_epoch = (- 1)
best_metric = 0
log_interval = self.cfg.SOLVER.LOG_EVERY_N
losses = AverageMeter('Loss', ':.4e')
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
self.cls_weights = train_loader.dataset.get_class_weights(self.cfg.DATA.CLASS_WEIGHTS_TYPE)
patience = 0
for epoch in range(total_epoch):
losses.reset()
batch_time.reset()
data_time.reset()
lr = self.scheduler.get_lr()[0]
logger.info('Training {} / {} epoch, with learning rate {}'.format((epoch + 1), total_epoch, lr))
self.model.train()
end = time.time()
for (idx, input_data) in enumerate(train_loader):
if (self.cfg.DBG and (idx == 20)):
break
(X, targets) = self.get_input(input_data)
data_time.update((time.time() - end))
(train_loss, _) = self.forward_one_batch(X, targets, True)
if (train_loss == (- 1)):
return None
losses.update(train_loss.item(), X.shape[0])
batch_time.update((time.time() - end))
end = time.time()
if (((idx + 1) % log_interval) == 0):
seconds_per_batch = batch_time.val
eta = datetime.timedelta(seconds=int(((seconds_per_batch * ((total_data - idx) - 1)) + ((seconds_per_batch * total_data) * ((total_epoch - epoch) - 1)))))
logger.info((('\tTraining {}/{}. train loss: {:.4f},'.format((idx + 1), total_data, train_loss) + '\t{:.4f} s / batch. (data: {:.2e}). ETA={}, '.format(seconds_per_batch, data_time.val, str(eta))) + 'max mem: {:.1f} GB '.format(gpu_mem_usage())))
logger.info((('Epoch {} / {}: '.format((epoch + 1), total_epoch) + 'avg data time: {:.2e}, avg batch time: {:.4f}, '.format(data_time.avg, batch_time.avg)) + 'average train loss: {:.4f}'.format(losses.avg)))
self.scheduler.step()
self.model.eval()
self.save_prompt((epoch + 1))
self.evaluator.update_iteration(epoch)
self.eval_classifier(val_loader, 'val', (epoch == (total_epoch - 1)))
if (test_loader is not None):
self.eval_classifier(test_loader, 'test', (epoch == (total_epoch - 1)))
t_name = ('val_' + val_loader.dataset.name)
try:
curr_acc = self.evaluator.results[f'epoch_{epoch}']['classification'][t_name]['top1']
except KeyError:
return
if (curr_acc > best_metric):
best_metric = curr_acc
best_epoch = (epoch + 1)
logger.info(f'Best epoch {best_epoch}: best metric: {best_metric:.3f}')
patience = 0
else:
patience += 1
if (patience >= self.cfg.SOLVER.PATIENCE):
logger.info('No improvement. Breaking out of loop.')
break
if self.cfg.MODEL.SAVE_CKPT:
Checkpointer(self.model, save_dir=self.cfg.OUTPUT_DIR, save_to_disk=True).save('last_model')
@torch.no_grad()
def save_prompt(self, epoch):
if self.cfg.MODEL.PROMPT.SAVE_FOR_EACH_EPOCH:
if ((self.cfg.MODEL.TYPE in ['vit', 'ssl-vit']) and ('prompt' in self.cfg.MODEL.TRANSFER_TYPE)):
prompt_embds = self.model.prompt_embeddings.cpu().numpy()
out = {'shallow_prompt': prompt_embds}
if self.cfg.MODEL.PROMPT.DEEP:
deep_embds = self.model.enc.transformer.deep_prompt_embeddings.cpu().numpy()
out['deep_prompt'] = deep_embds
torch.save(out, os.path.join(self.cfg.OUTPUT_DIR, f'prompt_ep{epoch}.pth'))
@torch.no_grad()
def eval_classifier(self, data_loader, prefix, save=False):
'evaluate classifier'
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
log_interval = self.cfg.SOLVER.LOG_EVERY_N
test_name = ((prefix + '_') + data_loader.dataset.name)
total = len(data_loader)
total_logits = []
total_targets = []
for (idx, input_data) in enumerate(data_loader):
end = time.time()
(X, targets) = self.get_input(input_data)
data_time.update((time.time() - end))
if self.cfg.DBG:
logger.info('during eval: {}'.format(X.shape))
(loss, outputs) = self.forward_one_batch(X, targets, False)
if (loss == (- 1)):
return
losses.update(loss, X.shape[0])
batch_time.update((time.time() - end))
if (((idx + 1) % log_interval) == 0):
logger.info(('\tTest {}/{}. loss: {:.3f}, {:.4f} s / batch. (data: {:.2e})'.format((idx + 1), total, losses.val, batch_time.val, data_time.val) + 'max mem: {:.5f} GB '.format(gpu_mem_usage())))
total_targets.extend(list(targets.numpy()))
total_logits.append(outputs)
logger.info(((f'Inference ({prefix}):' + 'avg data time: {:.2e}, avg batch time: {:.4f}, '.format(data_time.avg, batch_time.avg)) + 'average loss: {:.4f}'.format(losses.avg)))
if (self.model.side is not None):
logger.info('--> side tuning alpha = {:.4f}'.format(self.model.side_alpha))
joint_logits = torch.cat(total_logits, dim=0).cpu().numpy()
self.evaluator.classify(joint_logits, total_targets, test_name, self.cfg.DATA.MULTILABEL)
if (save and self.cfg.MODEL.SAVE_CKPT):
out = {'targets': total_targets, 'joint_logits': joint_logits}
out_path = os.path.join(self.cfg.OUTPUT_DIR, f'{test_name}_logits.pth')
torch.save(out, out_path)
logger.info(f'Saved logits and targets for {test_name} at {out_path}')
|
def build_model(cfg):
'\n build model here\n '
assert (cfg.MODEL.TYPE in _MODEL_TYPES.keys()), "Model type '{}' not supported".format(cfg.MODEL.TYPE)
assert (cfg.NUM_GPUS <= torch.cuda.device_count()), 'Cannot use more GPU devices than available'
train_type = cfg.MODEL.TYPE
model = _MODEL_TYPES[train_type](cfg)
log_model_info(model, verbose=cfg.DBG)
(model, device) = load_model_to_device(model, cfg)
logger.info(f'Device used for model: {device}')
return (model, device)
|
def log_model_info(model, verbose=False):
'Logs model info'
if verbose:
logger.info(f'''Classification Model:
{model}''')
model_total_params = sum((p.numel() for p in model.parameters()))
model_grad_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
logger.info('Total Parameters: {0}\t Gradient Parameters: {1}'.format(model_total_params, model_grad_params))
logger.info(('tuned percent:%.3f' % ((model_grad_params / model_total_params) * 100)))
|
def get_current_device():
if torch.cuda.is_available():
cur_device = torch.cuda.current_device()
else:
cur_device = torch.device('cpu')
return cur_device
|
def load_model_to_device(model, cfg):
cur_device = get_current_device()
if torch.cuda.is_available():
model = model.cuda(device=cur_device)
if (cfg.NUM_GPUS > 1):
model = torch.nn.parallel.DistributedDataParallel(module=model, device_ids=[cur_device], output_device=cur_device, find_unused_parameters=True)
else:
model = model.to(cur_device)
return (model, cur_device)
|
def build_mae_model(model_type, crop_size, prompt_cfg, model_root, adapter_cfg=None):
if (not (model_type in ['mae_vitb16', 'mae_vitl16'])):
raise ValueError('Does not support other arch')
if (prompt_cfg is not None):
model = prompt_mae_vit(model_type, prompt_cfg)
else:
model = mae_vit_model(model_type)
out_dim = model.embed_dim
ckpt = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(ckpt, map_location='cpu')
state_dict = checkpoint['model']
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
model.head = torch.nn.Identity()
return (model, out_dim)
|
def build_mocov3_model(model_type, crop_size, prompt_cfg, model_root, adapter_cfg=None):
if (not (model_type in ['mocov3_vitb16', 'mocov3_vits16'])):
raise ValueError('Does not support other arch')
if (prompt_cfg is not None):
model = prompt_moco_vit(model_type, prompt_cfg)
else:
model = moco_vit_model()
out_dim = (384 if model_type.endswith('s16') else 768)
ckpt = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(ckpt, map_location='cpu')
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
if k.startswith('module.'):
key = k.replace('module.', '')
if key.startswith('base_encoder.'):
key = key.replace('base_encoder.', '')
elif key.startswith('momentum'):
del state_dict[k]
continue
state_dict[key] = state_dict[k]
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
model.head = torch.nn.Identity()
return (model, out_dim)
|
class MLP(nn.Module):
def __init__(self, input_dim: int, mlp_dims: List[int], dropout: float=0.1, nonlinearity: Type[nn.Module]=nn.ReLU, normalization: Type[nn.Module]=nn.BatchNorm1d, special_bias: bool=False, add_bn_first: bool=False):
super(MLP, self).__init__()
projection_prev_dim = input_dim
projection_modulelist = []
last_dim = mlp_dims[(- 1)]
mlp_dims = mlp_dims[:(- 1)]
if add_bn_first:
if (normalization is not None):
projection_modulelist.append(normalization(projection_prev_dim))
if (dropout != 0):
projection_modulelist.append(nn.Dropout(dropout))
for (idx, mlp_dim) in enumerate(mlp_dims):
fc_layer = nn.Linear(projection_prev_dim, mlp_dim)
nn.init.kaiming_normal_(fc_layer.weight, a=0, mode='fan_out')
projection_modulelist.append(fc_layer)
projection_modulelist.append(nonlinearity())
if (normalization is not None):
projection_modulelist.append(normalization(mlp_dim))
if (dropout != 0):
projection_modulelist.append(nn.Dropout(dropout))
projection_prev_dim = mlp_dim
self.projection = nn.Sequential(*projection_modulelist)
self.last_layer = nn.Linear(projection_prev_dim, last_dim)
nn.init.kaiming_normal_(self.last_layer.weight, a=0, mode='fan_out')
if special_bias:
prior_prob = 0.01
bias_value = (- math.log(((1 - prior_prob) / prior_prob)))
torch.nn.init.constant_(self.last_layer.bias, bias_value)
def forward(self, x: torch.Tensor) -> torch.Tensor:
'\n input_arguments:\n @x: torch.FloatTensor\n '
x = self.projection(x)
x = self.last_layer(x)
return x
|
class SSLViT(nn.Module):
'moco-v3 and mae model.'
def __init__(self, cfg):
super(SSLViT, self).__init__()
if ('prompt' in cfg.MODEL.TRANSFER_TYPE):
prompt_cfg = cfg.MODEL.PROMPT
else:
prompt_cfg = None
if ((cfg.MODEL.TRANSFER_TYPE != 'end2end') and ('prompt' not in cfg.MODEL.TRANSFER_TYPE)):
self.froze_enc = True
else:
self.froze_enc = False
if (cfg.MODEL.TRANSFER_TYPE == 'adapter'):
adapter_cfg = cfg.MODEL.ADAPTER
else:
adapter_cfg = None
self.build_backbone(prompt_cfg, cfg, adapter_cfg)
self.cfg = cfg
self.setup_side()
self.setup_head(cfg)
def setup_side(self):
if (self.cfg.MODEL.TRANSFER_TYPE != 'side'):
self.side = None
else:
self.side_alpha = nn.Parameter(torch.tensor(0.0))
m = models.alexnet(pretrained=True)
self.side = nn.Sequential(OrderedDict([('features', m.features), ('avgpool', m.avgpool)]))
self.side_projection = nn.Linear(9216, self.feat_dim, bias=False)
def setup_head(self, cfg):
self.head = MLP(input_dim=self.feat_dim, mlp_dims=(([self.feat_dim] * self.cfg.MODEL.MLP_NUM) + [cfg.DATA.NUMBER_CLASSES]), special_bias=True)
def build_backbone(self, prompt_cfg, cfg, adapter_cfg):
if ('moco' in cfg.DATA.FEATURE):
build_fn = build_mocov3_model
elif ('mae' in cfg.DATA.FEATURE):
build_fn = build_mae_model
(self.enc, self.feat_dim) = build_fn(cfg.DATA.FEATURE, cfg.DATA.CROPSIZE, prompt_cfg, cfg.MODEL.MODEL_ROOT, adapter_cfg=adapter_cfg)
transfer_type = cfg.MODEL.TRANSFER_TYPE
if (transfer_type == 'partial-1'):
total_layer = len(self.enc.blocks)
for (k, p) in self.enc.named_parameters():
if (('blocks.{}'.format((total_layer - 1)) not in k) and ('fc_norm' not in k) and (k != 'norm')):
p.requires_grad = False
elif (transfer_type == 'partial-2'):
total_layer = len(self.enc.blocks)
for (k, p) in self.enc.named_parameters():
if (('blocks.{}'.format((total_layer - 1)) not in k) and ('blocks.{}'.format((total_layer - 2)) not in k) and ('fc_norm' not in k) and (k != 'norm')):
p.requires_grad = False
elif (transfer_type == 'partial-4'):
total_layer = len(self.enc.blocks)
for (k, p) in self.enc.named_parameters():
if (('blocks.{}'.format((total_layer - 1)) not in k) and ('blocks.{}'.format((total_layer - 2)) not in k) and ('blocks.{}'.format((total_layer - 3)) not in k) and ('blocks.{}'.format((total_layer - 4)) not in k) and ('fc_norm' not in k) and (k != 'norm')):
p.requires_grad = False
elif ((transfer_type == 'linear') or (transfer_type == 'sidetune')):
for (k, p) in self.enc.named_parameters():
p.requires_grad = False
elif (transfer_type == 'tinytl-bias'):
for (k, p) in self.enc.named_parameters():
if ('bias' not in k):
p.requires_grad = False
elif (transfer_type == 'prompt+bias'):
for (k, p) in self.enc.named_parameters():
if (('prompt' not in k) and ('bias' not in k)):
p.requires_grad = False
elif ((transfer_type == 'prompt') and (prompt_cfg.LOCATION == 'below')):
for (k, p) in self.enc.named_parameters():
if (('prompt' not in k) and ('patch_embed.proj.weight' not in k) and ('patch_embed.proj.bias' not in k)):
p.requires_grad = False
elif (transfer_type == 'prompt'):
for (k, p) in self.enc.named_parameters():
if ('prompt' not in k):
p.requires_grad = False
elif (transfer_type == 'end2end'):
logger.info('Enable all parameters update during training')
elif (transfer_type == 'adapter'):
for (k, p) in self.enc.named_parameters():
if ('adapter' not in k):
p.requires_grad = False
else:
raise ValueError('transfer type {} is not supported'.format(transfer_type))
for (k, p) in self.enc.named_parameters():
if ('gate' in k):
p.requires_grad = True
if ('temp' in k):
p.requires_grad = True
def forward(self, x, return_feature=False):
if (self.side is not None):
side_output = self.side(x)
side_output = side_output.view(side_output.size(0), (- 1))
side_output = self.side_projection(side_output)
if (self.froze_enc and self.enc.training):
self.enc.eval()
x = self.enc(x)
if (self.side is not None):
alpha_squashed = torch.sigmoid(self.side_alpha)
x = ((alpha_squashed * x) + ((1 - alpha_squashed) * side_output))
if return_feature:
return (x, x)
x = self.head(x)
return x
def forward_cls_layerwise(self, x):
cls_embeds = self.enc.forward_cls_layerwise(x)
return cls_embeds
def get_features(self, x):
'get a (batch_size, self.feat_dim) feature'
x = self.enc(x)
return x
|
class SigmoidLoss(nn.Module):
def __init__(self, cfg=None):
super(SigmoidLoss, self).__init__()
def is_single(self):
return True
def is_local(self):
return False
def multi_hot(self, labels: torch.Tensor, nb_classes: int) -> torch.Tensor:
labels = labels.unsqueeze(1)
target = torch.zeros(labels.size(0), nb_classes, device=labels.device).scatter_(1, labels, 1.0)
return target
def loss(self, logits, targets, per_cls_weights, multihot_targets: Optional[bool]=False):
num_classes = logits.shape[1]
targets = self.multi_hot(targets, num_classes)
loss = F.binary_cross_entropy_with_logits(logits, targets, reduction='none')
weight = torch.tensor(per_cls_weights, device=logits.device).unsqueeze(0)
loss = torch.mul(loss.to(torch.float32), weight.to(torch.float32))
return (torch.sum(loss) / targets.shape[0])
def forward(self, pred_logits, targets, per_cls_weights, multihot_targets=False):
loss = self.loss(pred_logits, targets, per_cls_weights, multihot_targets)
return loss
|
class SoftmaxLoss(SigmoidLoss):
def __init__(self, cfg=None):
super(SoftmaxLoss, self).__init__()
def loss(self, logits, targets, per_cls_weights, kwargs):
weight = torch.tensor(per_cls_weights, device=logits.device)
loss = F.cross_entropy(logits, targets, weight, reduction='none')
return (torch.sum(loss) / targets.shape[0])
|
def build_loss(cfg):
loss_name = cfg.SOLVER.LOSS
assert (loss_name in LOSS), f'loss name {loss_name} is not supported'
loss_fn = LOSS[loss_name]
if (not loss_fn):
return None
else:
return loss_fn(cfg)
|
def make_scheduler(optimizer: optim.Optimizer, train_params: CfgNode) -> LambdaLR:
warmup = train_params.WARMUP_EPOCH
total_iters = train_params.TOTAL_EPOCH
if (train_params.SCHEDULER == 'cosine'):
scheduler = WarmupCosineSchedule(optimizer, warmup_steps=warmup, t_total=total_iters)
elif (train_params.SCHEDULER == 'cosine_hardrestart'):
scheduler = WarmupCosineWithHardRestartsSchedule(optimizer, warmup_steps=warmup, t_total=total_iters)
elif (train_params.SCHEDULER == 'plateau'):
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=5, verbose=True, factor=train_params.LR_DECAY_FACTOR)
else:
scheduler = None
return scheduler
|
class WarmupCosineSchedule(LambdaLR):
' Linear warmup and then cosine decay.\n Linearly increases learning rate from 0 to 1 over `warmup_steps`.\n Decreases learning rate from 1. to 0. over remaining\n `t_total - warmup_steps` steps following a cosine curve.\n If `cycles` (default=0.5) is different from default, learning rate\n follows cosine function after warmup.\n '
def __init__(self, optimizer, warmup_steps, t_total, cycles=0.5, last_epoch=(- 1)):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if (step < self.warmup_steps):
return (float(step) / float(max(1.0, self.warmup_steps)))
progress = (float((step - self.warmup_steps)) / float(max(1, (self.t_total - self.warmup_steps))))
return max(0.0, (0.5 * (1.0 + math.cos((((math.pi * float(self.cycles)) * 2.0) * progress)))))
|
class WarmupCosineWithHardRestartsSchedule(LambdaLR):
' Linear warmup and then cosine cycles with hard restarts.\n Linearly increases learning rate from 0 to 1 over `warmup_steps`.\n If `cycles` (default=1.) is different from default, learning rate\n follows `cycles` times a cosine decaying learning rate\n (with hard restarts).\n '
def __init__(self, optimizer, warmup_steps, t_total, cycles=1.0, last_epoch=(- 1)):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineWithHardRestartsSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if (step < self.warmup_steps):
return (float(step) / float(max(1, self.warmup_steps)))
progress = (float((step - self.warmup_steps)) / float(max(1, (self.t_total - self.warmup_steps))))
if (progress >= 1.0):
return 0.0
return max(0.0, (0.5 * (1.0 + math.cos((math.pi * ((float(self.cycles) * progress) % 1.0))))))
|
def make_optimizer(models: List[Any], train_params: CfgNode) -> Optimizer:
params = []
for model in models:
if train_params.DBG_TRAINABLE:
logger.info('Trainable params:')
for (key, value) in model.named_parameters():
if value.requires_grad:
if train_params.DBG_TRAINABLE:
logger.info('\t{}, {}, {}'.format(key, value.numel(), value.shape))
params.append((key, value))
if (train_params.WEIGHT_DECAY > 0):
if (train_params.OPTIMIZER == 'adamw'):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in params if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in params if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=train_params.BASE_LR)
else:
_params = []
for p in params:
(key, value) = p
lr = train_params.BASE_LR
weight_decay = train_params.WEIGHT_DECAY
if ('last_layer.bias' in key):
weight_decay = 0.0
if (train_params.BIAS_MULTIPLIER == 1.0):
_params += [{'params': [value], 'lr': lr, 'weight_decay': weight_decay}]
else:
if (('bias' in key) and ('last_layer.bias' not in key)):
lr_value = (lr * train_params.BIAS_MULTIPLIER)
else:
lr_value = lr
if train_params.DBG_TRAINABLE:
logger.info('\t{}, {:.4f}'.format(key, lr_value))
_params += [{'params': [value], 'lr': lr_value, 'weight_decay': weight_decay}]
if (train_params.OPTIMIZER == 'adam'):
optimizer = optim.Adam(_params, lr=train_params.BASE_LR, weight_decay=train_params.WEIGHT_DECAY)
else:
optimizer = optim.SGD(_params, train_params.BASE_LR, momentum=train_params.MOMENTUM, weight_decay=train_params.WEIGHT_DECAY)
return optimizer
else:
if (train_params.OPTIMIZER == 'adam'):
optimizer = optim.Adam(model.parameters(), lr=train_params.BASE_LR)
else:
_params = []
for p in params:
(key, value) = p
lr = train_params.BASE_LR
if (train_params.BIAS_MULTIPLIER == 1.0):
_params += [{'params': [value], 'lr': lr}]
else:
if (('bias' in key) and ('last_layer.bias' not in key)):
lr_value = (lr * train_params.BIAS_MULTIPLIER)
else:
lr_value = lr
if train_params.DBG_TRAINABLE:
logger.info('\t{}, {:.4f}'.format(key, lr_value))
_params += [{'params': [value], 'lr': lr_value}]
optimizer = optim.SGD(_params, train_params.BASE_LR, momentum=train_params.MOMENTUM)
return optimizer
|
class AdamW(Optimizer):
' Implements Adam algorithm with weight decay fix.\n Parameters:\n lr (float): learning rate. Default 1e-3.\n betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)\n eps (float): Adams epsilon. Default: 1e-6\n weight_decay (float): Weight decay. Default: 0.0\n correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.\n '
def __init__(self, params: Iterable, lr: float=0.001, betas: Tuple[(float, float)]=(0.9, 0.999), eps: float=1e-06, weight_decay: float=0.0, correct_bias: bool=True) -> None:
if (lr < 0.0):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[1]))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(eps))
defaults = {'lr': lr, 'betas': betas, 'eps': eps, 'weight_decay': weight_decay, 'correct_bias': correct_bias}
super(AdamW, self).__init__(params, defaults)
def step(self, closure: Optional[Callable]=None) -> Optional[Callable]:
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1.0 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1.0 - beta2), grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']:
bias_correction1 = (1.0 - (beta1 ** state['step']))
bias_correction2 = (1.0 - (beta2 ** state['step']))
step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
if (group['weight_decay'] > 0.0):
p.data.add_(((- group['lr']) * group['weight_decay']), p.data)
return loss
|
def get_world_size() -> int:
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size()
|
def get_rank() -> int:
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank()
|
def is_master_process(num_gpus=8):
'\n Determines if the current process is the master process.\n '
if torch.distributed.is_initialized():
return ((dist.get_rank() % num_gpus) == 0)
else:
return True
|
def run(local_rank, num_proc, func, init_method, shard_id, num_shards, backend, cfg, args):
"\n Runs a function from a child process.\n Args:\n local_rank (int): rank of the current process on the current machine.\n num_proc (int): number of processes per machine.\n func (function): function to execute on each of the process.\n init_method (string): method to initialize the distributed training.\n TCP initialization: equiring a network address reachable from all\n processes followed by the port.\n Shared file-system initialization: makes use of a file system that\n is shared and visible from all machines. The URL should start with\n file:// and contain a path to a non-existent file on a shared file\n system.\n shard_id (int): the rank of the current machine.\n num_shards (int): number of overall machines for the distributed\n training job.\n backend (string): three distributed backends ('nccl', 'gloo', 'mpi') are\n supports, each with different capabilities. Details can be found\n here:\n https://pytorch.org/docs/stable/distributed.html\n cfg (CfgNode): configs. Details can be found in\n loco/config/defaults.py\n "
world_size = (num_proc * num_shards)
rank = ((shard_id * num_proc) + local_rank)
try:
torch.distributed.init_process_group(backend=backend, init_method=init_method, world_size=world_size, rank=rank)
except Exception as e:
raise e
torch.cuda.set_device(local_rank)
func(cfg, args)
|
def destroy_process_group():
'Destroys the default process group.'
torch.distributed.destroy_process_group()
|
def scaled_all_reduce(cfg, tensors):
'Performs the scaled all_reduce operation on the provided tensors.\n\n The input tensors are modified in-place. Currently supports only the sum\n reduction operator. The reduced values are scaled by the inverse size of\n the process group (equivalent to cfg.NUM_GPUS).\n '
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
for reduction in reductions:
reduction.wait()
for tensor in tensors:
tensor.mul_(((1.0 / cfg.NUM_GPUS) / cfg.NUM_SHARDS))
return tensors
|
def cat_all_gather(tensors):
'Performs the concatenated all_gather operation on the provided tensors.\n '
tensors_gather = [torch.ones_like(tensors) for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensors, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
|
def local_cat_all_gather(tensors):
'Performs the concatenated all_gather operation on the provided tensors.\n '
tensors_gather = [torch.ones_like(tensors) for _ in range(get_local_size())]
torch.distributed.all_gather(tensors_gather, tensors, async_op=False, group=_LOCAL_PROCESS_GROUP)
output = torch.cat(tensors_gather, dim=0)
return output
|
def get_local_size():
'\n Returns:\n The size of the per-machine process group,\n i.e. the number of processes per machine.\n '
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
|
def get_local_rank():
'\n Returns:\n The rank of the current process within the local (per-machine) process group.\n '
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
assert (_LOCAL_PROCESS_GROUP is not None)
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
|
def get_world_size() -> int:
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size()
|
def get_rank() -> int:
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank()
|
def is_master_process(num_gpus=8):
'\n Determines if the current process is the master process.\n '
if torch.distributed.is_initialized():
return ((dist.get_rank() % num_gpus) == 0)
else:
return True
|
def run(local_rank, num_proc, func, init_method, shard_id, num_shards, backend, cfg, args):
"\n Runs a function from a child process.\n Args:\n local_rank (int): rank of the current process on the current machine.\n num_proc (int): number of processes per machine.\n func (function): function to execute on each of the process.\n init_method (string): method to initialize the distributed training.\n TCP initialization: equiring a network address reachable from all\n processes followed by the port.\n Shared file-system initialization: makes use of a file system that\n is shared and visible from all machines. The URL should start with\n file:// and contain a path to a non-existent file on a shared file\n system.\n shard_id (int): the rank of the current machine.\n num_shards (int): number of overall machines for the distributed\n training job.\n backend (string): three distributed backends ('nccl', 'gloo', 'mpi') are\n supports, each with different capabilities. Details can be found\n here:\n https://pytorch.org/docs/stable/distributed.html\n cfg (CfgNode): configs. Details can be found in\n loco/config/defaults.py\n "
world_size = (num_proc * num_shards)
rank = ((shard_id * num_proc) + local_rank)
try:
torch.distributed.init_process_group(backend=backend, init_method=init_method, world_size=world_size, rank=rank)
except Exception as e:
raise e
torch.cuda.set_device(local_rank)
func(cfg, args)
|
def destroy_process_group():
'Destroys the default process group.'
torch.distributed.destroy_process_group()
|
def scaled_all_reduce(cfg, tensors):
'Performs the scaled all_reduce operation on the provided tensors.\n\n The input tensors are modified in-place. Currently supports only the sum\n reduction operator. The reduced values are scaled by the inverse size of\n the process group (equivalent to cfg.NUM_GPUS).\n '
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
for reduction in reductions:
reduction.wait()
for tensor in tensors:
tensor.mul_(((1.0 / cfg.NUM_GPUS) / cfg.NUM_SHARDS))
return tensors
|
def cat_all_gather(tensors):
'Performs the concatenated all_gather operation on the provided tensors.\n '
tensors_gather = [torch.ones_like(tensors) for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensors, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
|
def local_cat_all_gather(tensors):
'Performs the concatenated all_gather operation on the provided tensors.\n '
tensors_gather = [torch.ones_like(tensors) for _ in range(get_local_size())]
torch.distributed.all_gather(tensors_gather, tensors, async_op=False, group=_LOCAL_PROCESS_GROUP)
output = torch.cat(tensors_gather, dim=0)
return output
|
def get_local_size():
'\n Returns:\n The size of the per-machine process group,\n i.e. the number of processes per machine.\n '
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
|
def get_local_rank():
'\n Returns:\n The rank of the current process within the local (per-machine) process group.\n '
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
assert (_LOCAL_PROCESS_GROUP is not None)
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
|
def save_or_append_df(out_path, df):
if os.path.exists(out_path):
previous_df = pd.read_pickle(out_path)
df = pd.concat([previous_df, df], ignore_index=True)
df.to_pickle(out_path)
print(f'Saved output at {out_path}')
|
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, bytes):
return str(obj, encoding='utf-8')
elif isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
raise TypeError('Unserializable object {} of type {}'.format(obj, type(obj)))
|
def write_json(data: Union[(list, dict)], outfile: str) -> None:
(json_dir, _) = os.path.split(outfile)
if (json_dir and (not os.path.exists(json_dir))):
os.makedirs(json_dir)
with open(outfile, 'w') as f:
json.dump(data, f, cls=JSONEncoder, ensure_ascii=False, indent=2)
|
def read_json(filename: str) -> Union[(list, dict)]:
'read json files'
with open(filename, 'rb') as fin:
data = json.load(fin, encoding='utf-8')
return data
|
def pil_loader(path: str) -> Image.Image:
'load an image from path, and suppress warning'
ImageFile.LOAD_TRUNCATED_IMAGES = True
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
|
def _suppress_print():
'Suppresses printing from the current process.'
def print_pass(*objects, sep=' ', end='\n', file=sys.stdout, flush=False):
pass
builtins.print = print_pass
|
@functools.lru_cache(maxsize=None)
def _cached_log_stream(filename):
return PathManager.open(filename, 'a')
|
@functools.lru_cache()
def setup_logging(num_gpu, num_shards, output='', name='visual_prompt', color=True):
'Sets up the logging.'
if is_master_process(num_gpu):
logging.root.handlers = []
logging.basicConfig(level=logging.INFO, format=_FORMAT, stream=sys.stdout)
else:
_suppress_print()
if (name is None):
name = __name__
logger = logging.getLogger(name)
logger.handlers.clear()
logger.setLevel(logging.INFO)
logger.propagate = False
plain_formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(name)s: %(lineno)4d: %(message)s', datefmt='%m/%d %H:%M:%S')
if color:
formatter = _ColorfulFormatter((colored('[%(asctime)s %(name)s]: ', 'green') + '%(message)s'), datefmt='%m/%d %H:%M:%S', root_name=name, abbrev_name=str(name))
else:
formatter = plain_formatter
if is_master_process(num_gpu):
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if is_master_process((num_gpu * num_shards)):
if (len(output) > 0):
if (output.endswith('.txt') or output.endswith('.log')):
filename = output
else:
filename = os.path.join(output, 'logs.txt')
PathManager.mkdirs(os.path.dirname(filename))
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger
|
def setup_single_logging(name, output=''):
'Sets up the logging.'
logging.root.handlers = []
logging.basicConfig(level=logging.INFO, format=_FORMAT, stream=sys.stdout)
if (len(name) == 0):
name = __name__
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.propagate = False
plain_formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(name)s: %(lineno)4d: %(message)s', datefmt='%m/%d %H:%M:%S')
formatter = _ColorfulFormatter((colored('[%(asctime)s %(name)s]: ', 'green') + '%(message)s'), datefmt='%m/%d %H:%M:%S', root_name=name, abbrev_name=str(name))
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if (len(output) > 0):
if (output.endswith('.txt') or output.endswith('.log')):
filename = output
else:
filename = os.path.join(output, 'logs.txt')
PathManager.mkdirs(os.path.dirname(filename))
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger
|
def get_logger(name):
'Retrieves the logger.'
return logging.getLogger(name)
|
def log_json_stats(stats, sort_keys=True):
'Logs json stats.'
logger = get_logger(__name__)
stats = {k: (decimal.Decimal('{:.6f}'.format(v)) if isinstance(v, float) else v) for (k, v) in stats.items()}
json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True)
if ((stats['_type'] == 'test_epoch') or (stats['_type'] == 'train_epoch')):
logger.info('json_stats: {:s}'.format(json_stats))
else:
logger.info('{:s}'.format(json_stats))
|
class _ColorfulFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self._root_name = (kwargs.pop('root_name') + '.')
self._abbrev_name = kwargs.pop('abbrev_name', '')
if len(self._abbrev_name):
self._abbrev_name = (self._abbrev_name + '.')
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
def formatMessage(self, record: logging.LogRecord) -> str:
record.name = record.name.replace(self._root_name, self._abbrev_name)
log = super(_ColorfulFormatter, self).formatMessage(record)
if (record.levelno == logging.WARNING):
prefix = colored('WARNING', 'red', attrs=['blink'])
elif ((record.levelno == logging.ERROR) or (record.levelno == logging.CRITICAL)):
prefix = colored('ERROR', 'red', attrs=['blink', 'underline'])
else:
return log
return ((prefix + ' ') + log)
|
def gpu_mem_usage():
'Computes the GPU memory usage for the current device (GB).'
if (not torch.cuda.is_available()):
return 0
_B_IN_GB = ((1024 * 1024) * 1024)
mem_usage_bytes = torch.cuda.max_memory_allocated()
return (mem_usage_bytes / _B_IN_GB)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__)
|
def remove_trailing(eval_dict):
min_num = min([len(v) for (k, v) in eval_dict.items() if ('top5' not in k)])
new_dict = {}
for (k, v) in eval_dict.items():
if ('top5' not in k):
new_dict[k] = v[:min_num]
return new_dict
|
def get_meta(job_root, job_path, model_type):
j_data = job_path.split('/run')[0].split(((job_root + '/') + model_type))[(- 1)].split('/')
(data_name, feat_type, opt_params) = (j_data[1], j_data[2], j_data[3])
lr = float(opt_params.split('_')[0].split('lr')[(- 1)])
wd = float(opt_params.split('_')[1].split('wd')[(- 1)])
return (data_name, feat_type, lr, wd)
|
def update_eval(line, eval_dict, data_name):
if (('top1' in line) and ('top' in line.split(': top1:')[(- 1)])):
metric = 'top'
else:
metric = 'rocauc'
top1 = float(line.split(': top1:')[(- 1)].split(metric)[0])
eval_type = line.split(' Classification results with ')[(- 1)].split(': top1')[0]
eval_type = ''.join(eval_type.split(('_' + data_name)))
eval_dict[(eval_type + '_top1')].append(top1)
|
def get_nmi(job_path):
with open(job_path) as f:
lines = f.readlines()
nmi_dict = defaultdict(list)
num_jobs = 0
log_temp = []
for l in lines:
if ('Rank of current process:' in l):
num_jobs += 1
if (num_jobs == 2):
break
if ('Clutering nmi' in l):
n = l.split('Clutering nmi: ')[(- 1)].split(',')[0]
a_n = l.split('adjusted nmi: ')[(- 1)].split(',')[0]
v = l.split('v: ')[(- 1)].split(',')[0]
nmi_dict['nmi'].append(float(n))
nmi_dict['a_nmi'].append(float(a_n))
nmi_dict['v_nmi'].append(float(v))
return nmi_dict
|
def get_mean_accuracy(job_path, data_name):
val_data = torch.load(job_path.replace('logs.txt', f'val_{data_name}_logits.pth'))
test_data = torch.load(job_path.replace('logs.txt', f'val_{data_name}_logits.pth'))
v_matrix = confusion_matrix(val_data['targets'], np.argmax(val_data['joint_logits'], 1))
t_matrix = confusion_matrix(test_data['targets'], np.argmax(test_data['joint_logits'], 1))
return ((np.mean((v_matrix.diagonal() / v_matrix.sum(axis=1))) * 100), (np.mean((t_matrix.diagonal() / t_matrix.sum(axis=1))) * 100))
|
def get_training_data(job_path, model_type, job_root):
(data_name, feat_type, lr, wd) = get_meta(job_root, job_path, model_type)
with open(job_path) as f:
lines = f.readlines()
train_loss = []
eval_dict = defaultdict(list)
num_jobs = 0
total_params = (- 1)
gradiented_params = (- 1)
batch_size = None
for line in lines:
if (("{'BATCH_SIZE'" in line) and (batch_size is None)):
batch_size = int(line.split("'BATCH_SIZE': ")[(- 1)].split(',')[0])
if ('Total Parameters: ' in line):
total_params = int(line.split('Total Parameters: ')[(- 1)].split('\t')[0])
gradiented_params = int(line.split('Gradient Parameters: ')[(- 1)].split('\n')[0])
if ('Rank of current process:' in line):
num_jobs += 1
if (num_jobs == 2):
break
if ('average train loss:' in line):
loss = float(line.split('average train loss: ')[(- 1)])
train_loss.append(loss)
if (' Classification results with ' in line):
update_eval(line, eval_dict, data_name)
meta_dict = {'data': data_name, 'feature': feat_type, 'lr': ((float(lr) * 256) / int(batch_size)), 'wd': wd, 'total_params': total_params, 'tuned_params': gradiented_params, 'tuned / total (%)': round(((gradiented_params / total_params) * 100), 4), 'batch_size': batch_size}
(v_top1, t_top1) = (None, None)
return (train_loss, eval_dict, meta_dict, (v_top1, t_top1))
|
def get_time(file):
with open(file) as f:
lines = f.readlines()
start_time = lines[0].split('[')[1].split(']')[0]
start_time = datetime.datetime.strptime(start_time, '%m/%d %H:%M:%S')
end_time = lines[(- 1)].split('[')[1].split(']')[0]
end_time = datetime.datetime.strptime(end_time, '%m/%d %H:%M:%S')
per_iter = None
with open(file) as f:
lines = f.readlines()
per_batch = []
per_batch_train = []
for line in lines[::(- 1)]:
if (('. loss:' in line) and ('Test' in line)):
per_iter = line.split(' s / batch')[0].split(',')[(- 1)]
per_batch.append(float(per_iter))
if ('. train loss:' in line):
per_iter = line.split(' s / batch')[0].split(',')[(- 1)]
per_batch_train.append(float(per_iter))
return (datetime.timedelta(seconds=(end_time - start_time).total_seconds()), np.mean(per_batch), np.mean(per_batch_train))
|
def get_df(files, model_type, root, is_best=True, is_last=True, max_epoch=300):
pd_dict = defaultdict(list)
for job_path in tqdm(files, desc=model_type):
(train_loss, eval_results, meta_dict, (v_top1, t_top1)) = get_training_data(job_path, model_type, root)
batch_size = meta_dict['batch_size']
if (len(eval_results) == 0):
print(f'job {job_path} not ready')
continue
if (len(eval_results['val_top1']) == 0):
print(f'job {job_path} not ready')
continue
if (('val_top1' not in eval_results) or ('test_top1' not in eval_results)):
print(f'inbalanced: {job_path}')
continue
for (k, v) in meta_dict.items():
pd_dict[k].append(v)
metric_b = 'val_top1'
best_epoch = np.argmax(eval_results[metric_b])
if is_best:
for (name, val) in eval_results.items():
if ('top5' in name):
continue
if (len(val) == 0):
continue
if (not isinstance(val[0], list)):
try:
pd_dict[('b-' + name)].append(val[best_epoch])
except:
pd_dict[('b-' + name)].append((- 1))
print(name, best_epoch, val)
if is_last:
if (v_top1 is not None):
pd_dict['l-val_top1'].append(v_top1)
pd_dict['l-test_top1'].append(t_top1)
val = eval_results['val_top1']
else:
for (name, val) in eval_results.items():
if ('top5' in name):
continue
if (len(val) == 0):
continue
pd_dict[('l-' + name)].append(val[(- 1)])
pd_dict['best_epoch'].append(f'{(best_epoch + 1)} | {len(val)}')
pd_dict['file'].append(job_path)
(total_time, _, _) = get_time(job_path)
pd_dict['total_time'].append(total_time)
result_df = None
if (len(pd_dict) > 0):
result_df = pd.DataFrame(pd_dict)
result_df = result_df.sort_values(['data', 'feature', 'lr', 'wd'])
return result_df
|
def delete_ckpts(f):
(f_dir, _) = os.path.split(f)
for f_delete in glob.glob((f_dir + '/*.pth')):
os.remove(f_delete)
print(f'removed {f_delete}')
|
def average_df(df, metric_names=['l-val_top1', 'l-val_base_top1'], take_average=True):
data_names = set(list(df['data']))
f_names = set(list(df['feature']))
t_names = set(list(df['type']))
hp_names = [c for c in df.columns if (c not in (['data', 'feature', 'type', 'file', 'best_epoch'] + metric_names))]
data_dict = defaultdict(list)
for d_name in data_names:
for f_name in f_names:
for t_name in t_names:
result = df[(df.data == d_name)]
result = result[(result.feature == f_name)]
result = result[(result.type == t_name)]
if (len(result) == 0):
continue
data_dict['data'].append(d_name)
data_dict['feature'].append(f_name)
data_dict['type'].append(t_name)
data_dict['total_runs'].append(len(result))
for m in metric_names:
if take_average:
data_dict[m].append('{:.2f}'.format(np.mean([r for (i, r) in enumerate(result[m])])))
data_dict[f'{m}-std'].append('{:.2f}'.format(np.std([r for (i, r) in enumerate(result[m])])))
else:
data_dict[m].append('{:.2f}'.format(np.median([r for (i, r) in enumerate(result[m])])))
for h_name in hp_names:
data_dict[h_name].append(result[h_name].iloc[0])
df = pd.DataFrame(data_dict)
df = df.sort_values(['data', 'feature', 'type'])
return df
|
def filter_df(df, sorted_cols, max_num):
data_names = set(list(df['data']))
f_names = set(list(df['feature']))
t_names = set(list(df['type']))
df_list = []
for d_name in data_names:
for f_name in f_names:
for t_name in t_names:
result = df[(df.data == d_name)]
result = result[(result.feature == f_name)]
result = result[(result.type == t_name)]
if (len(result) == 0):
continue
cols = [c for c in sorted_cols if (c in result.columns)]
result = result.sort_values(cols, ignore_index=True)
_num = min([max_num, len(result)])
df_list.append(result.iloc[(- _num):])
return pd.concat(df_list)
|
def display_results(df, sorted_cols=['data', 'feature', 'type', 'l-val_top1'], max_num=1):
cols = [c for c in df.columns if (c not in [])]
df = df[cols]
if (max_num is not None):
df = filter_df(df, sorted_cols[3:], max_num)
return df.sort_values(sorted_cols).reset_index(drop=True)
|
def setup(args):
'\n Create configs and perform basic setups.\n '
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
output_dir = cfg.OUTPUT_DIR
lr = cfg.SOLVER.BASE_LR
wd = cfg.SOLVER.WEIGHT_DECAY
output_folder = os.path.join(cfg.DATA.NAME, cfg.DATA.FEATURE, f'{args.id}_lr{lr}_wd{wd}')
count = 1
while (count <= cfg.RUN_N_TIMES):
output_path = os.path.join(output_dir, output_folder, f'run{count}')
sleep(randint(3, 30))
if (not PathManager.exists(output_path)):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if (count > cfg.RUN_N_TIMES):
raise ValueError(f'Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more')
cfg.freeze()
return cfg
|
def get_loaders(cfg, logger):
logger.info('Loading training data (final training data for vtab)...')
if cfg.DATA.NAME.startswith('vtab-'):
train_loader = data_loader.construct_trainval_loader(cfg)
else:
train_loader = data_loader.construct_train_loader(cfg)
logger.info('Loading validation data...')
val_loader = data_loader.construct_val_loader(cfg)
logger.info('Loading test data...')
if cfg.DATA.NO_TEST:
logger.info('...no test data is constructed')
test_loader = None
else:
test_loader = data_loader.construct_test_loader(cfg)
return (train_loader, val_loader, test_loader)
|
def train(cfg, args):
if torch.cuda.is_available():
torch.cuda.empty_cache()
if (cfg.SEED is not None):
torch.manual_seed(cfg.SEED)
np.random.seed(cfg.SEED)
random.seed(0)
logging_train_setup(args, cfg)
logger = logging.get_logger('visual_prompt')
(train_loader, val_loader, test_loader) = get_loaders(cfg, logger)
logger.info('Constructing models...')
(model, cur_device) = build_model(cfg)
trainable_params = [name for (name, p) in model.named_parameters() if p.requires_grad]
print(trainable_params)
logger.info('Setting up Evalutator...')
evaluator = Evaluator()
logger.info('Setting up Trainer...')
trainer = Trainer(cfg, model, evaluator, cur_device)
if train_loader:
trainer.train_classifier(train_loader, val_loader, test_loader)
else:
print('No train loader presented. Exit')
if (cfg.SOLVER.TOTAL_EPOCH == 0):
trainer.eval_classifier(test_loader, 'test', 0)
|
def main(args):
'main function to call from workflow'
cfg = setup(args)
with open(os.path.join(cfg.OUTPUT_DIR, 'configs.yaml'), 'w') as f:
f.write(cfg.dump())
train(cfg, args)
|
def create_gold(json_file, gold_file, db_id_file):
with open(db_id_file) as f:
database_id_list = f.readlines()
database_id_list = [db_id.strip() for db_id in database_id_list]
with open(json_file) as f:
data = json.load(f)
gold_query = {}
for (i, interaction) in enumerate(data):
database_id = interaction['database_id']
if (database_id not in gold_query):
gold_query[database_id] = []
query_interaction = []
for turn in interaction['interaction']:
query_interaction.append(turn['query'].replace('\n', ' '))
gold_query[database_id].append(query_interaction)
with open(gold_file, 'w') as f:
for (i, database_id) in enumerate(database_id_list):
for (j, interaction) in enumerate(gold_query[database_id]):
for query in interaction:
f.write('{}\t{}\n'.format(query, database_id))
if (not ((i == (len(database_id_list) - 1)) and (j == (len(gold_query[database_id]) - 1)))):
f.write('\n')
|
def timeval(string):
'Returns the numeric version of a time.\n\n Inputs:\n string (str): String representing a time.\n\n Returns:\n String representing the absolute time.\n '
if (string.endswith('am') or (string.endswith('pm') and string[:(- 2)].isdigit())):
numval = int(string[:(- 2)])
if ((len(string) == 3) or (len(string) == 4)):
numval *= 100
if string.endswith('pm'):
numval += 1200
return str(numval)
return ''
|
def is_time(string):
'Returns whether a string represents a time.\n\n Inputs:\n string (str): String to check.\n\n Returns:\n Whether the string represents a time.\n '
if (string.endswith('am') or string.endswith('pm')):
if string[:(- 2)].isdigit():
return True
return False
|
def deanonymize(sequence, ent_dict, key):
'Deanonymizes a sequence.\n\n Inputs:\n sequence (list of str): List of tokens to deanonymize.\n ent_dict (dict str->(dict str->str)): Maps from tokens to the entity dictionary.\n key (str): The key to use, in this case either natural language or SQL.\n\n Returns:\n Deanonymized sequence of tokens.\n '
new_sequence = []
for token in sequence:
if (token in ent_dict):
new_sequence.extend(ent_dict[token][key])
else:
new_sequence.append(token)
return new_sequence
|
class Anonymizer():
'Anonymization class for keeping track of entities in this domain and\n scripts for anonymizing/deanonymizing.\n\n Members:\n anonymization_map (list of dict (str->str)): Containing entities from\n the anonymization file.\n entity_types (list of str): All entities in the anonymization file.\n keys (set of str): Possible keys (types of text handled); in this case it should be\n one for natural language and another for SQL.\n entity_set (set of str): entity_types as a set.\n '
def __init__(self, filename):
self.anonymization_map = []
self.entity_types = []
self.keys = set()
pairs = [json.loads(line) for line in open(filename).readlines()]
for pair in pairs:
for key in pair:
if (key != 'type'):
self.keys.add(key)
self.anonymization_map.append(pair)
if (pair['type'] not in self.entity_types):
self.entity_types.append(pair['type'])
self.entity_types.append(ENTITY_NAME)
self.entity_types.append(CONSTANT_NAME)
self.entity_types.append(TIME_NAME)
self.entity_set = set(self.entity_types)
def get_entity_type_from_token(self, token):
'Gets the type of an entity given an anonymized token.\n\n Inputs:\n token (str): The entity token.\n\n Returns:\n str, representing the type of the entity.\n '
colon_loc = token.index(SEPARATOR)
entity_type = token[:colon_loc]
assert (entity_type in self.entity_set)
return entity_type
def is_anon_tok(self, token):
'Returns whether a token is an anonymized token or not.\n\n Input:\n token (str): The token to check.\n\n Returns:\n bool, whether the token is an anonymized token.\n '
return (token.split(SEPARATOR)[0] in self.entity_set)
def get_anon_id(self, token):
'Gets the entity index (unique ID) for a token.\n\n Input:\n token (str): The token to get the index from.\n\n Returns:\n int, the token ID if it is an anonymized token; otherwise -1.\n '
if self.is_anon_tok(token):
return self.entity_types.index(token.split(SEPARATOR)[0])
else:
return (- 1)
def anonymize(self, sequence, tok_to_entity_dict, key, add_new_anon_toks=False):
'Anonymizes a sequence.\n\n Inputs:\n sequence (list of str): Sequence to anonymize.\n tok_to_entity_dict (dict): Existing dictionary mapping from anonymized\n tokens to entities.\n key (str): Which kind of text this is (natural language or SQL)\n add_new_anon_toks (bool): Whether to add new entities to tok_to_entity_dict.\n\n Returns:\n list of str, the anonymized sequence.\n '
sorted_dict = sorted(tok_to_entity_dict.items(), key=(lambda k: len(k[1][key])))[::(- 1)]
anonymized_sequence = copy.deepcopy(sequence)
if add_new_anon_toks:
type_counts = {}
for entity_type in self.entity_types:
type_counts[entity_type] = 0
for token in tok_to_entity_dict:
entity_type = self.get_entity_type_from_token(token)
type_counts[entity_type] += 1
for (token, modalities) in sorted_dict:
our_modality = modalities[key]
while util.subsequence(our_modality, anonymized_sequence):
found = False
for startidx in range(((len(anonymized_sequence) - len(our_modality)) + 1)):
if (anonymized_sequence[startidx:(startidx + len(our_modality))] == our_modality):
anonymized_sequence = ((anonymized_sequence[:startidx] + [token]) + anonymized_sequence[(startidx + len(our_modality)):])
found = True
break
assert found, (((('Thought ' + str(our_modality)) + ' was in [') + str(anonymized_sequence)) + '] but could not find it')
if add_new_anon_toks:
sorted_anon_map = sorted(self.anonymization_map, key=(lambda k: len(k[key])))[::(- 1)]
for pair in sorted_anon_map:
our_modality = pair[key]
token_type = pair['type']
new_token = ((token_type + SEPARATOR) + str(type_counts[token_type]))
while util.subsequence(our_modality, anonymized_sequence):
found = False
for startidx in range(((len(anonymized_sequence) - len(our_modality)) + 1)):
if (anonymized_sequence[startidx:(startidx + len(our_modality))] == our_modality):
if (new_token not in tok_to_entity_dict):
type_counts[token_type] += 1
tok_to_entity_dict[new_token] = pair
anonymized_sequence = ((anonymized_sequence[:startidx] + [new_token]) + anonymized_sequence[(startidx + len(our_modality)):])
found = True
break
assert found, (((('Thought ' + str(our_modality)) + ' was in [') + str(anonymized_sequence)) + '] but could not find it')
for (index, token) in enumerate(anonymized_sequence):
if (token.isdigit() or is_time(token)):
if token.isdigit():
entity_type = CONSTANT_NAME
value = new_token
if is_time(token):
entity_type = TIME_NAME
value = timeval(token)
new_token = ''
new_dict = {}
found = False
for (entity, value) in tok_to_entity_dict.items():
if (value[key][0] == token):
new_token = entity
new_dict = value
found = True
break
if (not found):
new_token = ((entity_type + SEPARATOR) + str(type_counts[entity_type]))
new_dict = {}
for tempkey in self.keys:
new_dict[tempkey] = [token]
tok_to_entity_dict[new_token] = new_dict
type_counts[entity_type] += 1
anonymized_sequence[index] = new_token
return anonymized_sequence
|
class UtteranceItem():
def __init__(self, interaction, index):
self.interaction = interaction
self.utterance_index = index
def __str__(self):
return str(self.interaction.utterances[self.utterance_index])
def histories(self, maximum):
if (maximum > 0):
history_seqs = []
for utterance in self.interaction.utterances[:self.utterance_index]:
history_seqs.append(utterance.input_seq_to_use)
if (len(history_seqs) > maximum):
history_seqs = history_seqs[(- maximum):]
return history_seqs
return []
def input_sequence(self):
return self.interaction.utterances[self.utterance_index].input_seq_to_use
def previous_query(self):
if (self.utterance_index == 0):
return []
return self.interaction.utterances[(self.utterance_index - 1)].anonymized_gold_query
def anonymized_gold_query(self):
return self.interaction.utterances[self.utterance_index].anonymized_gold_query
def snippets(self):
return self.interaction.utterances[self.utterance_index].available_snippets
def original_gold_query(self):
return self.interaction.utterances[self.utterance_index].original_gold_query
def contained_entities(self):
return self.interaction.utterances[self.utterance_index].contained_entities
def original_gold_queries(self):
return [q[0] for q in self.interaction.utterances[self.utterance_index].all_gold_queries]
def gold_tables(self):
return [q[1] for q in self.interaction.utterances[self.utterance_index].all_gold_queries]
def gold_query(self):
return (self.interaction.utterances[self.utterance_index].gold_query_to_use + [vocab.EOS_TOK])
def gold_edit_sequence(self):
return self.interaction.utterances[self.utterance_index].gold_edit_sequence
def gold_table(self):
return self.interaction.utterances[self.utterance_index].gold_sql_results
def all_snippets(self):
return self.interaction.snippets
def within_limits(self, max_input_length=float('inf'), max_output_length=float('inf')):
return self.interaction.utterances[self.utterance_index].length_valid(max_input_length, max_output_length)
def expand_snippets(self, sequence):
if (sequence[(- 1)] == vocab.EOS_TOK):
sequence = sequence[:(- 1)]
no_snippets_sequence = self.interaction.expand_snippets(sequence)
no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence)
return no_snippets_sequence
def flatten_sequence(self, sequence):
if (sequence[(- 1)] == vocab.EOS_TOK):
sequence = sequence[:(- 1)]
no_snippets_sequence = self.interaction.expand_snippets(sequence)
deanon_sequence = self.interaction.deanonymize(no_snippets_sequence, 'sql')
return deanon_sequence
|
class UtteranceBatch():
def __init__(self, items):
self.items = items
def __len__(self):
return len(self.items)
def start(self):
self.index = 0
def next(self):
item = self.items[self.index]
self.index += 1
return item
def done(self):
return (self.index >= len(self.items))
|
class PredUtteranceItem():
def __init__(self, input_sequence, interaction_item, previous_query, index, available_snippets):
self.input_seq_to_use = input_sequence
self.interaction_item = interaction_item
self.index = index
self.available_snippets = available_snippets
self.prev_pred_query = previous_query
def input_sequence(self):
return self.input_seq_to_use
def histories(self, maximum):
if (maximum == 0):
return histories
histories = []
for utterance in self.interaction_item.processed_utterances[:self.index]:
histories.append(utterance.input_sequence())
if (len(histories) > maximum):
histories = histories[(- maximum):]
return histories
def snippets(self):
return self.available_snippets
def previous_query(self):
return self.prev_pred_query
def flatten_sequence(self, sequence):
return self.interaction_item.flatten_sequence(sequence)
def remove_snippets(self, sequence):
return sql_util.fix_parentheses(self.interaction_item.expand_snippets(sequence))
def set_predicted_query(self, query):
self.anonymized_pred_query = query
|
class InteractionItem():
def __init__(self, interaction, max_input_length=float('inf'), max_output_length=float('inf'), nl_to_sql_dict={}, maximum_length=float('inf')):
if (maximum_length != float('inf')):
self.interaction = copy.deepcopy(interaction)
self.interaction.utterances = self.interaction.utterances[:maximum_length]
else:
self.interaction = interaction
self.processed_utterances = []
self.snippet_bank = []
self.identifier = self.interaction.identifier
self.max_input_length = max_input_length
self.max_output_length = max_output_length
self.nl_to_sql_dict = nl_to_sql_dict
self.index = 0
def __len__(self):
return len(self.interaction)
def __str__(self):
s = 'Utterances, gold queries, and predictions:\n'
for (i, utterance) in enumerate(self.interaction.utterances):
s += (' '.join(utterance.input_seq_to_use) + '\n')
pred_utterance = self.processed_utterances[i]
s += (' '.join(pred_utterance.gold_query()) + '\n')
s += (' '.join(pred_utterance.anonymized_query()) + '\n')
s += '\n'
s += 'Snippets:\n'
for snippet in self.snippet_bank:
s += (str(snippet) + '\n')
return s
def start_interaction(self):
assert (len(self.snippet_bank) == 0)
assert (len(self.processed_utterances) == 0)
assert (self.index == 0)
def next_utterance(self):
utterance = self.interaction.utterances[self.index]
self.index += 1
available_snippets = self.available_snippets(snippet_keep_age=1)
return PredUtteranceItem(utterance.input_seq_to_use, self, (self.processed_utterances[(- 1)].anonymized_pred_query if (len(self.processed_utterances) > 0) else []), (self.index - 1), available_snippets)
def done(self):
return (len(self.processed_utterances) == len(self.interaction))
def finish(self):
self.snippet_bank = []
self.processed_utterances = []
self.index = 0
def utterance_within_limits(self, utterance_item):
return utterance_item.within_limits(self.max_input_length, self.max_output_length)
def available_snippets(self, snippet_keep_age):
return [snippet for snippet in self.snippet_bank if (snippet.index <= snippet_keep_age)]
def gold_utterances(self):
utterances = []
for (i, utterance) in enumerate(self.interaction.utterances):
utterances.append(UtteranceItem(self.interaction, i))
return utterances
def get_schema(self):
return self.interaction.schema
def add_utterance(self, utterance, predicted_sequence, snippets=None, previous_snippets=[], simple=False):
if (not snippets):
self.add_snippets(predicted_sequence, previous_snippets=previous_snippets, simple=simple)
else:
for snippet in snippets:
snippet.assign_id(len(self.snippet_bank))
self.snippet_bank.append(snippet)
for snippet in self.snippet_bank:
snippet.increase_age()
self.processed_utterances.append(utterance)
def add_snippets(self, sequence, previous_snippets=[], simple=False):
if sequence:
if simple:
snippets = sql_util.get_subtrees_simple(sequence, oldsnippets=previous_snippets)
else:
snippets = sql_util.get_subtrees(sequence, oldsnippets=previous_snippets)
for snippet in snippets:
snippet.assign_id(len(self.snippet_bank))
self.snippet_bank.append(snippet)
for snippet in self.snippet_bank:
snippet.increase_age()
def expand_snippets(self, sequence):
return sql_util.fix_parentheses(snip.expand_snippets(sequence, self.snippet_bank))
def remove_snippets(self, sequence):
if (sequence[(- 1)] == vocab.EOS_TOK):
sequence = sequence[:(- 1)]
no_snippets_sequence = self.expand_snippets(sequence)
no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence)
return no_snippets_sequence
def flatten_sequence(self, sequence, gold_snippets=False):
if (sequence[(- 1)] == vocab.EOS_TOK):
sequence = sequence[:(- 1)]
if gold_snippets:
no_snippets_sequence = self.interaction.expand_snippets(sequence)
else:
no_snippets_sequence = self.expand_snippets(sequence)
no_snippets_sequence = sql_util.fix_parentheses(no_snippets_sequence)
deanon_sequence = self.interaction.deanonymize(no_snippets_sequence, 'sql')
return deanon_sequence
def gold_query(self, index):
return (self.interaction.utterances[index].gold_query_to_use + [vocab.EOS_TOK])
def original_gold_query(self, index):
return self.interaction.utterances[index].original_gold_query
def gold_table(self, index):
return self.interaction.utterances[index].gold_sql_results
|
class InteractionBatch():
def __init__(self, items):
self.items = items
def __len__(self):
return len(self.items)
def start(self):
self.timestep = 0
self.current_interactions = []
def get_next_utterance_batch(self, snippet_keep_age, use_gold=False):
items = []
self.current_interactions = []
for interaction in self.items:
if (self.timestep < len(interaction)):
utterance_item = interaction.original_utterances(snippet_keep_age, use_gold)[self.timestep]
self.current_interactions.append(interaction)
items.append(utterance_item)
self.timestep += 1
return UtteranceBatch(items)
def done(self):
finished = True
for interaction in self.items:
if (self.timestep < len(interaction)):
finished = False
return finished
return finished
|
class ATISDataset():
' Contains the ATIS data. '
def __init__(self, params):
self.anonymizer = None
if params.anonymize:
self.anonymizer = anon.Anonymizer(ANONYMIZATION_FILENAME)
if (not os.path.exists(params.data_directory)):
os.mkdir(params.data_directory)
self.entities_dictionary = NLtoSQLDict(ENTITIES_FILENAME)
database_schema = None
if params.database_schema_filename:
if ('removefrom' not in params.data_directory):
(database_schema, column_names_surface_form, column_names_embedder_input) = self.read_database_schema_simple(params.database_schema_filename)
else:
(database_schema, column_names_surface_form, column_names_embedder_input) = self.read_database_schema(params.database_schema_filename)
int_load_function = load_function(params, self.entities_dictionary, self.anonymizer, database_schema=database_schema)
def collapse_list(the_list):
' Collapses a list of list into a single list.'
return [s for i in the_list for s in i]
if ('atis' not in params.data_directory):
self.train_data = ds.DatasetSplit(os.path.join(params.data_directory, params.processed_train_filename), params.raw_train_filename, int_load_function)
self.valid_data = ds.DatasetSplit(os.path.join(params.data_directory, params.processed_validation_filename), params.raw_validation_filename, int_load_function)
train_input_seqs = collapse_list(self.train_data.get_ex_properties((lambda i: i.input_seqs())))
valid_input_seqs = collapse_list(self.valid_data.get_ex_properties((lambda i: i.input_seqs())))
all_input_seqs = (train_input_seqs + valid_input_seqs)
self.input_vocabulary = ATISVocabulary(all_input_seqs, os.path.join(params.data_directory, params.input_vocabulary_filename), params, is_input='input', anonymizer=(self.anonymizer if params.anonymization_scoring else None))
self.output_vocabulary_schema = ATISVocabulary(column_names_embedder_input, os.path.join(params.data_directory, ('schema_' + params.output_vocabulary_filename)), params, is_input='schema', anonymizer=(self.anonymizer if params.anonymization_scoring else None))
train_output_seqs = collapse_list(self.train_data.get_ex_properties((lambda i: i.output_seqs())))
valid_output_seqs = collapse_list(self.valid_data.get_ex_properties((lambda i: i.output_seqs())))
all_output_seqs = (train_output_seqs + valid_output_seqs)
sql_keywords = ['.', 't1', 't2', '=', 'select', 'as', 'join', 'on', ')', '(', 'where', 't3', 'by', ',', 'group', 'distinct', 't4', 'and', 'limit', 'desc', '>', 'avg', 'having', 'max', 'in', '<', 'sum', 't5', 'intersect', 'not', 'min', 'except', 'or', 'asc', 'like', '!', 'union', 'between', 't6', '-', 't7', '+', '/']
sql_keywords += ['count', 'from', 'value', 'order']
sql_keywords += ['group_by', 'order_by', 'limit_value', '!=']
skip_tokens = list((set(column_names_surface_form) - set(sql_keywords)))
if (params.data_directory == 'processed_data_sparc_removefrom_test'):
all_output_seqs = []
out_vocab_ordered = ['select', 'value', ')', '(', 'where', '=', ',', 'count', 'group_by', 'order_by', 'limit_value', 'desc', '>', 'distinct', 'avg', 'and', 'having', '<', 'in', 'max', 'sum', 'asc', 'like', 'not', 'or', 'min', 'intersect', 'except', '!=', 'union', 'between', '-', '+']
for i in range(len(out_vocab_ordered)):
all_output_seqs.append(out_vocab_ordered[:(i + 1)])
self.output_vocabulary = ATISVocabulary(all_output_seqs, os.path.join(params.data_directory, params.output_vocabulary_filename), params, is_input='output', anonymizer=(self.anonymizer if params.anonymization_scoring else None), skip=skip_tokens)
else:
self.train_data = ds.DatasetSplit(os.path.join(params.data_directory, params.processed_train_filename), params.raw_train_filename, int_load_function)
if params.train:
self.valid_data = ds.DatasetSplit(os.path.join(params.data_directory, params.processed_validation_filename), params.raw_validation_filename, int_load_function)
if (params.evaluate or params.attention):
self.dev_data = ds.DatasetSplit(os.path.join(params.data_directory, params.processed_dev_filename), params.raw_dev_filename, int_load_function)
if params.enable_testing:
self.test_data = ds.DatasetSplit(os.path.join(params.data_directory, params.processed_test_filename), params.raw_test_filename, int_load_function)
train_input_seqs = []
train_input_seqs = collapse_list(self.train_data.get_ex_properties((lambda i: i.input_seqs())))
self.input_vocabulary = ATISVocabulary(train_input_seqs, os.path.join(params.data_directory, params.input_vocabulary_filename), params, is_input='input', min_occur=2, anonymizer=(self.anonymizer if params.anonymization_scoring else None))
train_output_seqs = collapse_list(self.train_data.get_ex_properties((lambda i: i.output_seqs())))
self.output_vocabulary = ATISVocabulary(train_output_seqs, os.path.join(params.data_directory, params.output_vocabulary_filename), params, is_input='output', anonymizer=(self.anonymizer if params.anonymization_scoring else None))
self.output_vocabulary_schema = None
def read_database_schema_simple(self, database_schema_filename):
with open(database_schema_filename, 'r') as f:
database_schema = json.load(f)
database_schema_dict = {}
column_names_surface_form = []
column_names_embedder_input = []
for table_schema in database_schema:
db_id = table_schema['db_id']
database_schema_dict[db_id] = table_schema
column_names = table_schema['column_names']
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
for (i, (table_id, column_name)) in enumerate(column_names_original):
column_name_surface_form = column_name
column_names_surface_form.append(column_name_surface_form.lower())
for table_name in table_names_original:
column_names_surface_form.append(table_name.lower())
for (i, (table_id, column_name)) in enumerate(column_names):
column_name_embedder_input = column_name
column_names_embedder_input.append(column_name_embedder_input.split())
for table_name in table_names:
column_names_embedder_input.append(table_name.split())
database_schema = database_schema_dict
return (database_schema, column_names_surface_form, column_names_embedder_input)
def read_database_schema(self, database_schema_filename):
with open(database_schema_filename, 'r') as f:
database_schema = json.load(f)
database_schema_dict = {}
column_names_surface_form = []
column_names_embedder_input = []
for table_schema in database_schema:
db_id = table_schema['db_id']
database_schema_dict[db_id] = table_schema
column_names = table_schema['column_names']
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
for (i, (table_id, column_name)) in enumerate(column_names_original):
if (table_id >= 0):
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name, column_name)
else:
column_name_surface_form = column_name
column_names_surface_form.append(column_name_surface_form.lower())
for table_name in table_names_original:
column_names_surface_form.append('{}.*'.format(table_name.lower()))
for (i, (table_id, column_name)) in enumerate(column_names):
if (table_id >= 0):
table_name = table_names[table_id]
column_name_embedder_input = ((table_name + ' . ') + column_name)
else:
column_name_embedder_input = column_name
column_names_embedder_input.append(column_name_embedder_input.split())
for table_name in table_names:
column_name_embedder_input = (table_name + ' . *')
column_names_embedder_input.append(column_name_embedder_input.split())
database_schema = database_schema_dict
return (database_schema, column_names_surface_form, column_names_embedder_input)
def get_all_utterances(self, dataset, max_input_length=float('inf'), max_output_length=float('inf')):
' Returns all utterances in a dataset.'
items = []
for interaction in dataset.examples:
for (i, utterance) in enumerate(interaction.utterances):
if utterance.length_valid(max_input_length, max_output_length):
items.append(atis_batch.UtteranceItem(interaction, i))
return items
def get_all_interactions(self, dataset, max_interaction_length=float('inf'), max_input_length=float('inf'), max_output_length=float('inf'), sorted_by_length=False):
'Gets all interactions in a dataset that fit the criteria.\n\n Inputs:\n dataset (ATISDatasetSplit): The dataset to use.\n max_interaction_length (int): Maximum interaction length to keep.\n max_input_length (int): Maximum input sequence length to keep.\n max_output_length (int): Maximum output sequence length to keep.\n sorted_by_length (bool): Whether to sort the examples by interaction length.\n '
ints = [atis_batch.InteractionItem(interaction, max_input_length, max_output_length, self.entities_dictionary, max_interaction_length) for interaction in dataset.examples]
if sorted_by_length:
return sorted(ints, key=(lambda x: len(x)))[::(- 1)]
else:
return ints
def get_utterance_batches(self, batch_size, max_input_length=float('inf'), max_output_length=float('inf'), randomize=True):
'Gets batches of utterances in the data.\n\n Inputs:\n batch_size (int): Batch size to use.\n max_input_length (int): Maximum length of input to keep.\n max_output_length (int): Maximum length of output to use.\n randomize (bool): Whether to randomize the ordering.\n '
items = self.get_all_utterances(self.train_data, max_input_length, max_output_length)
if randomize:
random.shuffle(items)
batches = []
current_batch_items = []
for item in items:
if (len(current_batch_items) >= batch_size):
batches.append(atis_batch.UtteranceBatch(current_batch_items))
current_batch_items = []
current_batch_items.append(item)
batches.append(atis_batch.UtteranceBatch(current_batch_items))
assert (sum([len(batch) for batch in batches]) == len(items))
return batches
def get_interaction_batches(self, batch_size, max_interaction_length=float('inf'), max_input_length=float('inf'), max_output_length=float('inf'), randomize=True):
'Gets batches of interactions in the data.\n\n Inputs:\n batch_size (int): Batch size to use.\n max_interaction_length (int): Maximum length of interaction to keep\n max_input_length (int): Maximum length of input to keep.\n max_output_length (int): Maximum length of output to keep.\n randomize (bool): Whether to randomize the ordering.\n '
items = self.get_all_interactions(self.train_data, max_interaction_length, max_input_length, max_output_length, sorted_by_length=(not randomize))
if randomize:
random.shuffle(items)
batches = []
current_batch_items = []
for item in items:
if (len(current_batch_items) >= batch_size):
batches.append(atis_batch.InteractionBatch(current_batch_items))
current_batch_items = []
current_batch_items.append(item)
batches.append(atis_batch.InteractionBatch(current_batch_items))
assert (sum([len(batch) for batch in batches]) == len(items))
return batches
def get_random_utterances(self, num_samples, max_input_length=float('inf'), max_output_length=float('inf')):
'Gets a random selection of utterances in the data.\n\n Inputs:\n num_samples (bool): Number of random utterances to get.\n max_input_length (int): Limit of input length.\n max_output_length (int): Limit on output length.\n '
items = self.get_all_utterances(self.train_data, max_input_length, max_output_length)
random.shuffle(items)
return items[:num_samples]
def get_random_interactions(self, num_samples, max_interaction_length=float('inf'), max_input_length=float('inf'), max_output_length=float('inf')):
'Gets a random selection of interactions in the data.\n\n Inputs:\n num_samples (bool): Number of random interactions to get.\n max_input_length (int): Limit of input length.\n max_output_length (int): Limit on output length.\n '
items = self.get_all_interactions(self.train_data, max_interaction_length, max_input_length, max_output_length)
random.shuffle(items)
return items[:num_samples]
|
def num_utterances(dataset):
'Returns the total number of utterances in the dataset.'
return sum([len(interaction) for interaction in dataset.examples])
|
class ATISVocabulary():
' Stores the vocabulary for the ATIS data.\n\n Attributes:\n raw_vocab (Vocabulary): Vocabulary object.\n tokens (set of str): Set of all of the strings in the vocabulary.\n inorder_tokens (list of str): List of all tokens, with a strict and\n unchanging order.\n '
def __init__(self, token_sequences, filename, params, is_input='input', min_occur=1, anonymizer=None, skip=None):
if (is_input == 'input'):
functional_types = INPUT_FN_TYPES
elif (is_input == 'output'):
functional_types = OUTPUT_FN_TYPES
elif (is_input == 'schema'):
functional_types = [UNK_TOK]
else:
functional_types = []
self.raw_vocab = Vocabulary(token_sequences, filename, functional_types=functional_types, min_occur=min_occur, ignore_fn=(lambda x: (snippets.is_snippet(x) or (anonymizer and anonymizer.is_anon_tok(x)) or (skip and (x in skip)))))
self.tokens = set(self.raw_vocab.token_to_id.keys())
self.inorder_tokens = self.raw_vocab.id_to_token
assert (len(self.inorder_tokens) == len(self.raw_vocab))
def __len__(self):
return len(self.raw_vocab)
def token_to_id(self, token):
' Maps from a token to a unique ID.\n\n Inputs:\n token (str): The token to look up.\n\n Returns:\n int, uniquely identifying the token.\n '
return self.raw_vocab.token_to_id[token]
def id_to_token(self, identifier):
' Maps from a unique integer to an identifier.\n\n Inputs:\n identifier (int): The unique ID.\n\n Returns:\n string, representing the token.\n '
return self.raw_vocab.id_to_token[identifier]
|
class DatasetSplit():
'Stores a split of the ATIS dataset.\n\n Attributes:\n examples (list of Interaction): Stores the examples in the split.\n '
def __init__(self, processed_filename, raw_filename, load_function):
if os.path.exists(processed_filename):
print(('Loading preprocessed data from ' + processed_filename))
with open(processed_filename, 'rb') as infile:
self.examples = pickle.load(infile)
else:
print(((('Loading raw data from ' + raw_filename) + ' and writing to ') + processed_filename))
infile = open(raw_filename, 'rb')
examples_from_file = pickle.load(infile)
assert isinstance(examples_from_file, list), (raw_filename + ' does not contain a list of examples')
infile.close()
self.examples = []
for example in examples_from_file:
(obj, keep) = load_function(example)
if keep:
self.examples.append(obj)
print((('Loaded ' + str(len(self.examples))) + ' examples'))
outfile = open(processed_filename, 'wb')
pickle.dump(self.examples, outfile)
outfile.close()
def get_ex_properties(self, function):
' Applies some function to the examples in the dataset.\n\n Inputs:\n function: (lambda Interaction -> T): Function to apply to all\n examples.\n\n Returns\n list of the return value of the function\n '
elems = []
for example in self.examples:
elems.append(function(example))
return elems
|
class NLtoSQLDict():
'\n Entity dict file should contain, on each line, a JSON dictionary with\n "input" and "output" keys specifying the string for the input and output\n pairs. The idea is that the existence of the key in an input sequence\n likely corresponds to the existence of the value in the output sequence.\n\n The entity_dict should map keys (input strings) to a list of values (output\n strings) where this property holds. This allows keys to map to multiple\n output strings (e.g. for times).\n '
def __init__(self, entity_dict_filename):
self.entity_dict = {}
pairs = [json.loads(line) for line in open(entity_dict_filename).readlines()]
for pair in pairs:
input_seq = pair['input']
output_seq = pair['output']
if (input_seq not in self.entity_dict):
self.entity_dict[input_seq] = []
self.entity_dict[input_seq].append(output_seq)
def get_sql_entities(self, tokenized_nl_string):
'\n Gets the output-side entities which correspond to the input entities in\n the input sequence.\n Inputs:\n tokenized_input_string: list of tokens in the input string.\n Outputs:\n set of output strings.\n '
assert (len(tokenized_nl_string) > 0)
flat_input_string = ' '.join(tokenized_nl_string)
entities = []
for (entry, values) in self.entity_dict.items():
in_middle = (((' ' + entry) + ' ') in flat_input_string)
leftspace = (' ' + entry)
at_end = ((leftspace in flat_input_string) and flat_input_string.endswith(leftspace))
rightspace = (entry + ' ')
at_beginning = ((rightspace in flat_input_string) and flat_input_string.startswith(rightspace))
if (in_middle or at_end or at_beginning):
for out_string in values:
entities.append(out_string)
for token in tokenized_nl_string:
if token.isnumeric():
entities.append(token)
return entities
|
class Schema():
def __init__(self, table_schema, simple=False):
if simple:
self.helper1(table_schema)
else:
self.helper2(table_schema)
def helper1(self, table_schema):
self.table_schema = table_schema
column_names = table_schema['column_names']
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
assert ((len(column_names) == len(column_names_original)) and (len(table_names) == len(table_names_original)))
column_keep_index = []
self.column_names_surface_form = []
self.column_names_surface_form_to_id = {}
for (i, (table_id, column_name)) in enumerate(column_names_original):
column_name_surface_form = column_name
column_name_surface_form = column_name_surface_form.lower()
if (column_name_surface_form not in self.column_names_surface_form_to_id):
self.column_names_surface_form.append(column_name_surface_form)
self.column_names_surface_form_to_id[column_name_surface_form] = (len(self.column_names_surface_form) - 1)
column_keep_index.append(i)
column_keep_index_2 = []
for (i, table_name) in enumerate(table_names_original):
column_name_surface_form = table_name.lower()
if (column_name_surface_form not in self.column_names_surface_form_to_id):
self.column_names_surface_form.append(column_name_surface_form)
self.column_names_surface_form_to_id[column_name_surface_form] = (len(self.column_names_surface_form) - 1)
column_keep_index_2.append(i)
self.column_names_embedder_input = []
self.column_names_embedder_input_to_id = {}
for (i, (table_id, column_name)) in enumerate(column_names):
column_name_embedder_input = column_name
if (i in column_keep_index):
self.column_names_embedder_input.append(column_name_embedder_input)
self.column_names_embedder_input_to_id[column_name_embedder_input] = (len(self.column_names_embedder_input) - 1)
for (i, table_name) in enumerate(table_names):
column_name_embedder_input = table_name
if (i in column_keep_index_2):
self.column_names_embedder_input.append(column_name_embedder_input)
self.column_names_embedder_input_to_id[column_name_embedder_input] = (len(self.column_names_embedder_input) - 1)
max_id_1 = max((v for (k, v) in self.column_names_surface_form_to_id.items()))
max_id_2 = max((v for (k, v) in self.column_names_embedder_input_to_id.items()))
assert ((len(self.column_names_surface_form) - 1) == max_id_2 == max_id_1)
self.num_col = len(self.column_names_surface_form)
def helper2(self, table_schema):
self.table_schema = table_schema
column_names = table_schema['column_names']
column_names_original = table_schema['column_names_original']
table_names = table_schema['table_names']
table_names_original = table_schema['table_names_original']
assert ((len(column_names) == len(column_names_original)) and (len(table_names) == len(table_names_original)))
column_keep_index = []
self.column_names_surface_form = []
self.column_names_surface_form_to_id = {}
for (i, (table_id, column_name)) in enumerate(column_names_original):
if (table_id >= 0):
table_name = table_names_original[table_id]
column_name_surface_form = '{}.{}'.format(table_name, column_name)
else:
column_name_surface_form = column_name
column_name_surface_form = column_name_surface_form.lower()
if (column_name_surface_form not in self.column_names_surface_form_to_id):
self.column_names_surface_form.append(column_name_surface_form)
self.column_names_surface_form_to_id[column_name_surface_form] = (len(self.column_names_surface_form) - 1)
column_keep_index.append(i)
start_i = len(self.column_names_surface_form_to_id)
for (i, table_name) in enumerate(table_names_original):
column_name_surface_form = '{}.*'.format(table_name.lower())
self.column_names_surface_form.append(column_name_surface_form)
self.column_names_surface_form_to_id[column_name_surface_form] = (i + start_i)
self.column_names_embedder_input = []
self.column_names_embedder_input_to_id = {}
for (i, (table_id, column_name)) in enumerate(column_names):
if (table_id >= 0):
table_name = table_names[table_id]
column_name_embedder_input = ((table_name + ' . ') + column_name)
else:
column_name_embedder_input = column_name
if (i in column_keep_index):
self.column_names_embedder_input.append(column_name_embedder_input)
self.column_names_embedder_input_to_id[column_name_embedder_input] = (len(self.column_names_embedder_input) - 1)
start_i = len(self.column_names_embedder_input_to_id)
for (i, table_name) in enumerate(table_names):
column_name_embedder_input = (table_name + ' . *')
self.column_names_embedder_input.append(column_name_embedder_input)
self.column_names_embedder_input_to_id[column_name_embedder_input] = (i + start_i)
assert (len(self.column_names_surface_form) == len(self.column_names_surface_form_to_id) == len(self.column_names_embedder_input) == len(self.column_names_embedder_input_to_id))
max_id_1 = max((v for (k, v) in self.column_names_surface_form_to_id.items()))
max_id_2 = max((v for (k, v) in self.column_names_embedder_input_to_id.items()))
assert ((len(self.column_names_surface_form) - 1) == max_id_2 == max_id_1)
self.num_col = len(self.column_names_surface_form)
def __len__(self):
return self.num_col
def in_vocabulary(self, column_name, surface_form=False):
if surface_form:
return (column_name in self.column_names_surface_form_to_id)
else:
return (column_name in self.column_names_embedder_input_to_id)
def column_name_embedder_bow(self, column_name, surface_form=False, column_name_token_embedder=None):
assert self.in_vocabulary(column_name, surface_form)
if surface_form:
column_name_id = self.column_names_surface_form_to_id[column_name]
column_name_embedder_input = self.column_names_embedder_input[column_name_id]
else:
column_name_embedder_input = column_name
column_name_embeddings = [column_name_token_embedder(token) for token in column_name_embedder_input.split()]
column_name_embeddings = torch.stack(column_name_embeddings, dim=0)
return torch.mean(column_name_embeddings, dim=0)
def set_column_name_embeddings(self, column_name_embeddings):
self.column_name_embeddings = column_name_embeddings
assert (len(self.column_name_embeddings) == self.num_col)
def column_name_embedder(self, column_name, surface_form=False):
assert self.in_vocabulary(column_name, surface_form)
if surface_form:
column_name_id = self.column_names_surface_form_to_id[column_name]
else:
column_name_id = self.column_names_embedder_input_to_id[column_name]
return self.column_name_embeddings[column_name_id]
|
class Interaction():
' ATIS interaction class.\n\n Attributes:\n utterances (list of Utterance): The utterances in the interaction.\n snippets (list of Snippet): The snippets that appear through the interaction.\n anon_tok_to_ent:\n identifier (str): Unique identifier for the interaction in the dataset.\n '
def __init__(self, utterances, schema, snippets, anon_tok_to_ent, identifier, params):
self.utterances = utterances
self.schema = schema
self.snippets = snippets
self.anon_tok_to_ent = anon_tok_to_ent
self.identifier = identifier
for (i, utterance) in enumerate(self.utterances):
deanon_input = self.deanonymize(utterance.input_seq_to_use, ANON_INPUT_KEY)
assert (deanon_input == utterance.original_input_seq), (((((('Anonymized sequence [' + ' '.join(utterance.input_seq_to_use)) + '] is not the same as [') + ' '.join(utterance.original_input_seq)) + '] when deanonymized (is [') + ' '.join(deanon_input)) + '] instead)')
desnippet_gold = self.expand_snippets(utterance.gold_query_to_use)
deanon_gold = self.deanonymize(desnippet_gold, OUTPUT_KEY)
assert (deanon_gold == utterance.original_gold_query), ((("Anonymized and/or snippet'd query " + ' '.join(utterance.gold_query_to_use)) + ' is not the same as ') + ' '.join(utterance.original_gold_query))
def __str__(self):
string = 'Utterances:\n'
for utterance in self.utterances:
string += (str(utterance) + '\n')
string += 'Anonymization dictionary:\n'
for (ent_tok, deanon) in self.anon_tok_to_ent.items():
string += (((ent_tok + '\t') + str(deanon)) + '\n')
return string
def __len__(self):
return len(self.utterances)
def deanonymize(self, sequence, key):
' Deanonymizes a predicted query or an input utterance.\n\n Inputs:\n sequence (list of str): The sequence to deanonymize.\n key (str): The key in the anonymization table, e.g. NL or SQL.\n '
return anon.deanonymize(sequence, self.anon_tok_to_ent, key)
def expand_snippets(self, sequence):
' Expands snippets for a sequence.\n\n Inputs:\n sequence (list of str): A SQL query.\n\n '
return expand_snippets(sequence, self.snippets)
def input_seqs(self):
in_seqs = []
for utterance in self.utterances:
in_seqs.append(utterance.input_seq_to_use)
return in_seqs
def output_seqs(self):
out_seqs = []
for utterance in self.utterances:
out_seqs.append(utterance.gold_query_to_use)
return out_seqs
|
def load_function(parameters, nl_to_sql_dict, anonymizer, database_schema=None):
def fn(interaction_example):
keep = False
raw_utterances = interaction_example['interaction']
if ('database_id' in interaction_example):
database_id = interaction_example['database_id']
interaction_id = interaction_example['interaction_id']
identifier = ((database_id + '/') + str(interaction_id))
else:
identifier = interaction_example['id']
schema = None
if database_schema:
if ('removefrom' not in parameters.data_directory):
schema = Schema(database_schema[database_id], simple=True)
else:
schema = Schema(database_schema[database_id])
snippet_bank = []
utterance_examples = []
anon_tok_to_ent = {}
for utterance in raw_utterances:
available_snippets = [snippet for snippet in snippet_bank if (snippet.index <= 1)]
proc_utterance = Utterance(utterance, available_snippets, nl_to_sql_dict, parameters, anon_tok_to_ent, anonymizer)
keep_utterance = proc_utterance.keep
if schema:
assert keep_utterance
if keep_utterance:
keep = True
utterance_examples.append(proc_utterance)
if parameters.use_snippets:
if ('atis' in parameters.data_directory):
snippets = sql_util.get_subtrees(proc_utterance.anonymized_gold_query, proc_utterance.available_snippets)
else:
snippets = sql_util.get_subtrees_simple(proc_utterance.anonymized_gold_query, proc_utterance.available_snippets)
for snippet in snippets:
snippet.assign_id(len(snippet_bank))
snippet_bank.append(snippet)
for snippet in snippet_bank:
snippet.increase_age()
interaction = Interaction(utterance_examples, schema, snippet_bank, anon_tok_to_ent, identifier, parameters)
return (interaction, keep)
return fn
|
def is_snippet(token):
" Determines whether a token is a snippet or not.\n\n Inputs:\n token (str): The token to check.\n\n Returns:\n bool, indicating whether it's a snippet.\n "
return token.startswith(SNIPPET_PREFIX)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.