repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
KnowledgeFactor | KnowledgeFactor-main/cls/tools/deployment/pytorch2onnx.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from functools import partial
import mmcv
import numpy as np
import onnxruntime as rt
import torch
from mmcv.onnx import register_extra_symbolics
from mmcv.runner import load_checkpoint
from mmcls.models import build_classifier
torch.manual_seed(3)
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
gt_labels = rng.randint(
low=0, high=num_classes, size=(N, 1)).astype(np.uint8)
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'gt_labels': torch.LongTensor(gt_labels),
}
return mm_inputs
def pytorch2onnx(model,
input_shape,
opset_version=11,
dynamic_export=False,
show=False,
output_file='tmp.onnx',
do_simplify=False,
verify=False):
"""Export Pytorch model to ONNX model and verify the outputs are same
between Pytorch and ONNX.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
opset_version (int): The onnx op version. Default: 11.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output ONNX model.
Default: `tmp.onnx`.
verify (bool): Whether compare the outputs between Pytorch and ONNX.
Default: False.
"""
model.cpu().eval()
num_classes = model.head.num_classes
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
imgs = mm_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# replace original forward function
origin_forward = model.forward
model.forward = partial(model.forward, img_metas={}, return_loss=False)
register_extra_symbolics(opset_version)
# support dynamic shape export
if dynamic_export:
dynamic_axes = {
'input': {
0: 'batch',
2: 'width',
3: 'height'
},
'probs': {
0: 'batch'
}
}
else:
dynamic_axes = {}
with torch.no_grad():
torch.onnx.export(
model, (img_list, ),
output_file,
input_names=['input'],
output_names=['probs'],
export_params=True,
keep_initializers_as_inputs=True,
dynamic_axes=dynamic_axes,
verbose=show,
opset_version=opset_version)
print(f'Successfully exported ONNX model: {output_file}')
model.forward = origin_forward
if do_simplify:
from mmcv import digit_version
import onnxsim
import onnx
min_required_version = '0.3.0'
assert digit_version(mmcv.__version__) >= digit_version(
min_required_version
), f'Requires to install onnx-simplify>={min_required_version}'
if dynamic_axes:
input_shape = (input_shape[0], input_shape[1], input_shape[2] * 2,
input_shape[3] * 2)
else:
input_shape = (input_shape[0], input_shape[1], input_shape[2],
input_shape[3])
imgs = _demo_mm_inputs(input_shape, model.head.num_classes).pop('imgs')
input_dic = {'input': imgs.detach().cpu().numpy()}
input_shape_dic = {'input': list(input_shape)}
model_opt, check_ok = onnxsim.simplify(
output_file,
input_shapes=input_shape_dic,
input_data=input_dic,
dynamic_input_shape=dynamic_export)
if check_ok:
onnx.save(model_opt, output_file)
print(f'Successfully simplified ONNX model: {output_file}')
else:
print('Failed to simplify ONNX model.')
if verify:
# check by onnx
import onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# test the dynamic model
if dynamic_export:
dynamic_test_inputs = _demo_mm_inputs(
(input_shape[0], input_shape[1], input_shape[2] * 2,
input_shape[3] * 2), model.head.num_classes)
imgs = dynamic_test_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# check the numerical value
# get pytorch output
pytorch_result = model(img_list, img_metas={}, return_loss=False)[0]
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1)
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(
None, {net_feed_input[0]: img_list[0].detach().numpy()})[0]
if not np.allclose(pytorch_result, onnx_result):
raise ValueError(
'The outputs are different between Pytorch and ONNX')
print('The outputs are same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(description='Convert MMCls to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file', default=None)
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument(
'--verify', action='store_true', help='verify the onnx model')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--simplify',
action='store_true',
help='Whether to simplify onnx model.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[224, 224],
help='input image size')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export ONNX with dynamic input shape. \
Defaults to False.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
1,
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
# build the model and load checkpoint
classifier = build_classifier(cfg.model)
if args.checkpoint:
load_checkpoint(classifier, args.checkpoint, map_location='cpu')
# conver model to onnx file
pytorch2onnx(
classifier,
input_shape,
opset_version=args.opset_version,
show=args.show,
dynamic_export=args.dynamic_export,
output_file=args.output_file,
do_simplify=args.simplify,
verify=args.verify)
| 7,488 | 32.137168 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/tools/deployment/mmcls_handler.py | # Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmcls.apis import inference_model, init_model
class MMclsHandler(BaseHandler):
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_model(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = []
for image in data:
results.append(inference_model(self.model, image))
return results
def postprocess(self, data):
for result in data:
result['pred_label'] = int(result['pred_label'])
return data
| 1,650 | 30.75 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/tools/deployment/pytorch2torchscript.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from functools import partial
import mmcv
import numpy as np
import torch
from mmcv.runner import load_checkpoint
from torch import nn
from mmcls.models import build_classifier
torch.manual_seed(3)
def _demo_mm_inputs(input_shape: tuple, num_classes: int):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
gt_labels = rng.randint(
low=0, high=num_classes, size=(N, 1)).astype(np.uint8)
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(False),
'gt_labels': torch.LongTensor(gt_labels),
}
return mm_inputs
def pytorch2torchscript(model: nn.Module, input_shape: tuple, output_file: str,
verify: bool):
"""Export Pytorch model to TorchScript model through torch.jit.trace and
verify the outputs are same between Pytorch and TorchScript.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output
TorchScript model.
verify (bool): Whether compare the outputs between Pytorch
and TorchScript through loading generated output_file.
"""
model.cpu().eval()
num_classes = model.head.num_classes
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
imgs = mm_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# replace original forward function
origin_forward = model.forward
model.forward = partial(model.forward, img_metas={}, return_loss=False)
with torch.no_grad():
trace_model = torch.jit.trace(model, img_list[0])
save_dir, _ = osp.split(output_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
trace_model.save(output_file)
print(f'Successfully exported TorchScript model: {output_file}')
model.forward = origin_forward
if verify:
# load by torch.jit
jit_model = torch.jit.load(output_file)
# check the numerical value
# get pytorch output
pytorch_result = model(img_list, img_metas={}, return_loss=False)[0]
# get jit output
jit_result = jit_model(img_list[0])[0].detach().numpy()
if not np.allclose(pytorch_result, jit_result):
raise ValueError(
'The outputs are different between Pytorch and TorchScript')
print('The outputs are same between Pytorch and TorchScript')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMCls to TorchScript')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file', type=str)
parser.add_argument(
'--verify',
action='store_true',
help='verify the TorchScript model',
default=False)
parser.add_argument('--output-file', type=str, default='tmp.pt')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[224, 224],
help='input image size')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
1,
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
# build the model and load checkpoint
classifier = build_classifier(cfg.model)
if args.checkpoint:
load_checkpoint(classifier, args.checkpoint, map_location='cpu')
# conver model to TorchScript file
pytorch2torchscript(
classifier,
input_shape,
output_file=args.output_file,
verify=args.verify)
| 4,363 | 30.171429 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/tools/convert_models/mobilenetv2_to_mmcls.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_conv1(model_key, model_weight, state_dict, converted_names):
if model_key.find('features.0.0') >= 0:
new_key = model_key.replace('features.0.0', 'backbone.conv1.conv')
else:
new_key = model_key.replace('features.0.1', 'backbone.conv1.bn')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_conv5(model_key, model_weight, state_dict, converted_names):
if model_key.find('features.18.0') >= 0:
new_key = model_key.replace('features.18.0', 'backbone.conv2.conv')
else:
new_key = model_key.replace('features.18.1', 'backbone.conv2.bn')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('classifier.1', 'head.fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_block(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer_id = int(split_keys[1])
new_layer_id = 0
sub_id = 0
if layer_id == 1:
new_layer_id = 1
sub_id = 0
elif layer_id in range(2, 4):
new_layer_id = 2
sub_id = layer_id - 2
elif layer_id in range(4, 7):
new_layer_id = 3
sub_id = layer_id - 4
elif layer_id in range(7, 11):
new_layer_id = 4
sub_id = layer_id - 7
elif layer_id in range(11, 14):
new_layer_id = 5
sub_id = layer_id - 11
elif layer_id in range(14, 17):
new_layer_id = 6
sub_id = layer_id - 14
elif layer_id == 17:
new_layer_id = 7
sub_id = 0
new_key = model_key.replace(f'features.{layer_id}',
f'backbone.layer{new_layer_id}.{sub_id}')
if new_layer_id == 1:
if new_key.find('conv.0.0') >= 0:
new_key = new_key.replace('conv.0.0', 'conv.0.conv')
elif new_key.find('conv.0.1') >= 0:
new_key = new_key.replace('conv.0.1', 'conv.0.bn')
elif new_key.find('conv.1') >= 0:
new_key = new_key.replace('conv.1', 'conv.1.conv')
elif new_key.find('conv.2') >= 0:
new_key = new_key.replace('conv.2', 'conv.1.bn')
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
else:
if new_key.find('conv.0.0') >= 0:
new_key = new_key.replace('conv.0.0', 'conv.0.conv')
elif new_key.find('conv.0.1') >= 0:
new_key = new_key.replace('conv.0.1', 'conv.0.bn')
elif new_key.find('conv.1.0') >= 0:
new_key = new_key.replace('conv.1.0', 'conv.1.conv')
elif new_key.find('conv.1.1') >= 0:
new_key = new_key.replace('conv.1.1', 'conv.1.bn')
elif new_key.find('conv.2') >= 0:
new_key = new_key.replace('conv.2', 'conv.2.conv')
elif new_key.find('conv.3') >= 0:
new_key = new_key.replace('conv.3', 'conv.2.bn')
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in torchvision pretrained MobileNetV2 models to mmcls
style."""
# load pytorch model
blobs = torch.load(src, map_location='cpu')
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'features.0' in key:
convert_conv1(key, weight, state_dict, converted_names)
elif 'classifier' in key:
convert_head(key, weight, state_dict, converted_names)
elif 'features.18' in key:
convert_conv5(key, weight, state_dict, converted_names)
else:
convert_block(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,732 | 33.801471 | 75 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/tools/convert_models/vgg_to_mmcls.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import OrderedDict
import torch
def get_layer_maps(layer_num, with_bn):
layer_maps = {'conv': {}, 'bn': {}}
if with_bn:
if layer_num == 11:
layer_idxs = [0, 4, 8, 11, 15, 18, 22, 25]
elif layer_num == 13:
layer_idxs = [0, 3, 7, 10, 14, 17, 21, 24, 28, 31]
elif layer_num == 16:
layer_idxs = [0, 3, 7, 10, 14, 17, 20, 24, 27, 30, 34, 37, 40]
elif layer_num == 19:
layer_idxs = [
0, 3, 7, 10, 14, 17, 20, 23, 27, 30, 33, 36, 40, 43, 46, 49
]
else:
raise ValueError(f'Invalid number of layers: {layer_num}')
for i, layer_idx in enumerate(layer_idxs):
if i == 0:
new_layer_idx = layer_idx
else:
new_layer_idx += int((layer_idx - layer_idxs[i - 1]) / 2)
layer_maps['conv'][layer_idx] = new_layer_idx
layer_maps['bn'][layer_idx + 1] = new_layer_idx
else:
if layer_num == 11:
layer_idxs = [0, 3, 6, 8, 11, 13, 16, 18]
new_layer_idxs = [0, 2, 4, 5, 7, 8, 10, 11]
elif layer_num == 13:
layer_idxs = [0, 2, 5, 7, 10, 12, 15, 17, 20, 22]
new_layer_idxs = [0, 1, 3, 4, 6, 7, 9, 10, 12, 13]
elif layer_num == 16:
layer_idxs = [0, 2, 5, 7, 10, 12, 14, 17, 19, 21, 24, 26, 28]
new_layer_idxs = [0, 1, 3, 4, 6, 7, 8, 10, 11, 12, 14, 15, 16]
elif layer_num == 19:
layer_idxs = [
0, 2, 5, 7, 10, 12, 14, 16, 19, 21, 23, 25, 28, 30, 32, 34
]
new_layer_idxs = [
0, 1, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19
]
else:
raise ValueError(f'Invalid number of layers: {layer_num}')
layer_maps['conv'] = {
layer_idx: new_layer_idx
for layer_idx, new_layer_idx in zip(layer_idxs, new_layer_idxs)
}
return layer_maps
def convert(src, dst, layer_num, with_bn=False):
"""Convert keys in torchvision pretrained VGG models to mmcls style."""
# load pytorch model
assert os.path.isfile(src), f'no checkpoint found at {src}'
blobs = torch.load(src, map_location='cpu')
# convert to pytorch style
state_dict = OrderedDict()
layer_maps = get_layer_maps(layer_num, with_bn)
prefix = 'backbone'
delimiter = '.'
for key, weight in blobs.items():
if 'features' in key:
module, layer_idx, weight_type = key.split(delimiter)
new_key = delimiter.join([prefix, key])
layer_idx = int(layer_idx)
for layer_key, maps in layer_maps.items():
if layer_idx in maps:
new_layer_idx = maps[layer_idx]
new_key = delimiter.join([
prefix, 'features',
str(new_layer_idx), layer_key, weight_type
])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
elif 'classifier' in key:
new_key = delimiter.join([prefix, key])
state_dict[new_key] = weight
print(f'Convert {key} to {new_key}')
else:
state_dict[key] = weight
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src torchvision model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--bn', action='store_true', help='whether original vgg has BN')
parser.add_argument(
'--layer_num',
type=int,
choices=[11, 13, 16, 19],
default=11,
help='number of VGG layers')
args = parser.parse_args()
convert(args.src, args.dst, layer_num=args.layer_num, with_bn=args.bn)
if __name__ == '__main__':
main()
| 4,084 | 33.618644 | 75 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/tools/convert_models/publish_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import datetime
import os
import subprocess
import torch
from mmcv import digit_version
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if digit_version(torch.__version__) >= digit_version('1.6'):
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
current_date = datetime.datetime.now().strftime('%Y%m%d')
final_file = out_file_name + f'_{current_date}-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
print(f'Successfully generated the publish-ckpt as {final_file}.')
def main():
args = parse_args()
out_dir = os.path.dirname(args.out_file)
if not os.path.exists(out_dir):
raise ValueError(f'Directory {out_dir} does not exist, '
'please generate it manually.')
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,742 | 30.125 | 78 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/tools/convert_models/shufflenetv2_to_mmcls.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_conv1(model_key, model_weight, state_dict, converted_names):
if model_key.find('conv1.0') >= 0:
new_key = model_key.replace('conv1.0', 'backbone.conv1.conv')
else:
new_key = model_key.replace('conv1.1', 'backbone.conv1.bn')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_conv5(model_key, model_weight, state_dict, converted_names):
if model_key.find('conv5.0') >= 0:
new_key = model_key.replace('conv5.0', 'backbone.layers.3.conv')
else:
new_key = model_key.replace('conv5.1', 'backbone.layers.3.bn')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('fc', 'head.fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_block(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, branch = split_keys[:3]
layer_id = int(layer[-1]) - 2
new_key = model_key.replace(layer, f'backbone.layers.{layer_id}')
if branch == 'branch1':
if new_key.find('branch1.0') >= 0:
new_key = new_key.replace('branch1.0', 'branch1.0.conv')
elif new_key.find('branch1.1') >= 0:
new_key = new_key.replace('branch1.1', 'branch1.0.bn')
elif new_key.find('branch1.2') >= 0:
new_key = new_key.replace('branch1.2', 'branch1.1.conv')
elif new_key.find('branch1.3') >= 0:
new_key = new_key.replace('branch1.3', 'branch1.1.bn')
elif branch == 'branch2':
if new_key.find('branch2.0') >= 0:
new_key = new_key.replace('branch2.0', 'branch2.0.conv')
elif new_key.find('branch2.1') >= 0:
new_key = new_key.replace('branch2.1', 'branch2.0.bn')
elif new_key.find('branch2.3') >= 0:
new_key = new_key.replace('branch2.3', 'branch2.1.conv')
elif new_key.find('branch2.4') >= 0:
new_key = new_key.replace('branch2.4', 'branch2.1.bn')
elif new_key.find('branch2.5') >= 0:
new_key = new_key.replace('branch2.5', 'branch2.2.conv')
elif new_key.find('branch2.6') >= 0:
new_key = new_key.replace('branch2.6', 'branch2.2.bn')
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in torchvision pretrained ShuffleNetV2 models to mmcls
style."""
# load pytorch model
blobs = torch.load(src, map_location='cpu')
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'conv1' in key:
convert_conv1(key, weight, state_dict, converted_names)
elif 'fc' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_block(key, weight, state_dict, converted_names)
elif 'conv5' in key:
convert_conv5(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 4,137 | 35.298246 | 74 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/tools/analysis_tools/analysis_para.py | import argparse
import torch
from mmcv import Config
from prettytable import PrettyTable
from mmcls.models.builder import build_classifier
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad:
continue
param = parameter.numel()
table.add_row([name, param])
total_params += param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
def parse_args():
parser = argparse.ArgumentParser(description='Explain a model')
parser.add_argument('config', help='train config file path')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
print(cfg)
model = build_classifier(cfg.model)
count_parameters(model)
if __name__ == '__main__':
main()
| 953 | 22.268293 | 67 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/apis/inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmcls.datasets.pipelines import Compose
from mmcls.models import build_classifier
def init_model(config, checkpoint=None, device='cuda:0', options=None):
"""Initialize a classifier from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
options (dict): Options to override some settings in the used config.
Returns:
nn.Module: The constructed classifier.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if options is not None:
config.merge_from_dict(options)
config.model.pretrained = None
model = build_classifier(config.model)
if checkpoint is not None:
map_loc = 'cpu' if device == 'cpu' else None
checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
from mmcls.datasets import ImageNet
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use imagenet by default.')
model.CLASSES = ImageNet.CLASSES
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def inference_model(model, img):
"""Inference image(s) with the classifier.
Args:
model (nn.Module): The loaded classifier.
img (str/ndarray): The image filename or loaded image.
Returns:
result (dict): The classification results that contains
`class_name`, `pred_label` and `pred_score`.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
if isinstance(img, str):
if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile':
cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile'))
data = dict(img_info=dict(filename=img), img_prefix=None)
else:
if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile':
cfg.data.test.pipeline.pop(0)
data = dict(img=img)
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with torch.no_grad():
scores = model(return_loss=False, **data)
pred_score = np.max(scores, axis=1)[0]
pred_label = np.argmax(scores, axis=1)[0]
result = {'pred_label': pred_label, 'pred_score': float(pred_score)}
result['pred_class'] = model.CLASSES[result['pred_label']]
return result
def show_result_pyplot(model, img, result, fig_size=(15, 10), wait_time=0):
"""Visualize the classification results on the image.
Args:
model (nn.Module): The loaded classifier.
img (str or np.ndarray): Image filename or loaded image.
result (list): The classification result.
fig_size (tuple): Figure size of the pyplot figure.
Defaults to (15, 10).
wait_time (int): How many seconds to display the image.
Defaults to 0.
"""
if hasattr(model, 'module'):
model = model.module
model.show_result(
img, result, show=True, fig_size=fig_size, wait_time=wait_time)
| 3,971 | 35.777778 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/apis/multitask_test.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
def multitask_single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
**show_kwargs):
model.eval()
results = dict()
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
batch_results = model(return_loss=False, **data)
for i, result in enumerate(batch_results):
key = f'task_{i}'
if key not in results.keys():
results[key] = []
results[key].extend(result)
if show or out_dir:
scores = np.vstack(result)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [model.CLASSES[lb] for lb in pred_label]
img_metas = data['img_metas'].data[0]
imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
result_show = {
'pred_score': pred_score[i],
'pred_label': pred_label[i],
'pred_class': pred_class[i]
}
model.module.show_result(
img_show,
result_show,
show=show,
out_file=out_file,
**show_kwargs)
batch_size = data['img'].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multitask_multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = dict()
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
# Check if tmpdir is valid for cpu_collect
if (not gpu_collect) and (tmpdir is not None and osp.exists(tmpdir)):
raise OSError((f'The tmpdir {tmpdir} already exists.',
' Since tmpdir will be deleted after testing,',
' please make sure you specify an empty one.'))
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
batch_results = model(return_loss=False, **data)
for i, result in enumerate(batch_results):
key = f'task_{i}'
if key not in results.keys():
results[key] = []
if isinstance(result, list):
results[key].extend(result)
else:
results[key].append(result)
if rank == 0:
batch_size = data['img'].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
for task_name in results.keys():
if gpu_collect:
results[task_name] = collect_results_gpu(results[task_name], len(dataset))
else:
results[task_name] = collect_results_cpu(results[task_name], len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_result = mmcv.load(part_file)
part_list.append(part_result)
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
part_list.append(part_result)
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 7,703 | 36.398058 | 94 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/apis/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
**show_kwargs):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)
batch_size = len(result)
results.extend(result)
if show or out_dir:
scores = np.vstack(result)
pred_score = np.max(scores, axis=1)
pred_label = np.argmax(scores, axis=1)
pred_class = [model.CLASSES[lb] for lb in pred_label]
img_metas = data['img_metas'].data[0]
imgs = tensor2imgs(data['img'], **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
result_show = {
'pred_score': pred_score[i],
'pred_label': pred_label[i],
'pred_class': pred_class[i]
}
model.module.show_result(
img_show,
result_show,
show=show,
out_file=out_file,
**show_kwargs)
batch_size = data['img'].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
# Check if tmpdir is valid for cpu_collect
if (not gpu_collect) and (tmpdir is not None and osp.exists(tmpdir)):
raise OSError((f'The tmpdir {tmpdir} already exists.',
' Since tmpdir will be deleted after testing,',
' please make sure you specify an empty one.'))
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, **data)
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
if rank == 0:
batch_size = data['img'].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_result = mmcv.load(part_file)
part_list.append(part_result)
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
part_list.append(part_result)
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 7,129 | 34.829146 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/apis/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import DistSamplerSeedHook, build_optimizer, build_runner
from mmcls.core import DistOptimizerHook
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.utils import get_root_logger
# TODO import eval hooks from mmcv and delete them from mmcls
try:
from mmcv.runner.hooks import EvalHook, DistEvalHook
except ImportError:
warnings.warn('DeprecationWarning: EvalHook and DistEvalHook from mmcls '
'will be deprecated.'
'Please install mmcv through master branch.')
from mmcls.core import EvalHook, DistEvalHook
from ..core.evaluation import MultiTaskEvalHook, DistMultiTaskEvalHook
# TODO import optimizer hook from mmcv and delete them from mmcls
try:
from mmcv.runner import Fp16OptimizerHook
except ImportError:
warnings.warn('DeprecationWarning: FP16OptimizerHook from mmcls will be '
'deprecated. Please install mmcv>=1.1.4.')
from mmcls.core import Fp16OptimizerHook
from ..core.utils.visualize import LogBuffer_ignore
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
device='cuda',
meta=None):
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
round_up=True,
seed=cfg.seed) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
if device == 'cuda':
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
elif device == 'cpu':
model = model.cpu()
else:
raise ValueError(F'unsupported device name {device}.')
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
runner.log_buffer = LogBuffer_ignore()
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(
cfg.lr_config,
optimizer_config,
cfg.checkpoint_config,
cfg.log_config,
cfg.get('momentum_config', None),
custom_hooks_config=cfg.get('custom_hooks', None))
if distributed:
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=cfg.data.samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False,
round_up=True)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
if cfg.get('multi_task', False):
eval_hook = DistMultiTaskEvalHook if distributed else MultiTaskEvalHook
else:
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| 5,723 | 33.481928 | 83 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/evaluation/multilabel_eval_metrics.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import torch
def average_performance(pred, target, thr=None, k=None):
"""Calculate CP, CR, CF1, OP, OR, OF1, where C stands for per-class
average, O stands for overall average, P stands for precision, R stands for
recall and F1 stands for F1-score.
Args:
pred (torch.Tensor | np.ndarray): The model prediction with shape
(N, C), where C is the number of classes.
target (torch.Tensor | np.ndarray): The target of each prediction with
shape (N, C), where C is the number of classes. 1 stands for
positive examples, 0 stands for negative examples and -1 stands for
difficult examples.
thr (float): The confidence threshold. Defaults to None.
k (int): Top-k performance. Note that if thr and k are both given, k
will be ignored. Defaults to None.
Returns:
tuple: (CP, CR, CF1, OP, OR, OF1)
"""
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred = pred.detach().cpu().numpy()
target = target.detach().cpu().numpy()
elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)):
raise TypeError('pred and target should both be torch.Tensor or'
'np.ndarray')
if thr is None and k is None:
thr = 0.5
warnings.warn('Neither thr nor k is given, set thr as 0.5 by '
'default.')
elif thr is not None and k is not None:
warnings.warn('Both thr and k are given, use threshold in favor of '
'top-k.')
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
eps = np.finfo(np.float32).eps
target[target == -1] = 0
if thr is not None:
# a label is predicted positive if the confidence is no lower than thr
pos_inds = pred >= thr
else:
# top-k labels will be predicted positive for any example
sort_inds = np.argsort(-pred, axis=1)
sort_inds_ = sort_inds[:, :k]
inds = np.indices(sort_inds_.shape)
pos_inds = np.zeros_like(pred)
pos_inds[inds[0], sort_inds_] = 1
tp = (pos_inds * target) == 1
fp = (pos_inds * (1 - target)) == 1
fn = ((1 - pos_inds) * target) == 1
precision_class = tp.sum(axis=0) / np.maximum(
tp.sum(axis=0) + fp.sum(axis=0), eps)
recall_class = tp.sum(axis=0) / np.maximum(
tp.sum(axis=0) + fn.sum(axis=0), eps)
CP = precision_class.mean() * 100.0
CR = recall_class.mean() * 100.0
CF1 = 2 * CP * CR / np.maximum(CP + CR, eps)
OP = tp.sum() / np.maximum(tp.sum() + fp.sum(), eps) * 100.0
OR = tp.sum() / np.maximum(tp.sum() + fn.sum(), eps) * 100.0
OF1 = 2 * OP * OR / np.maximum(OP + OR, eps)
return CP, CR, CF1, OP, OR, OF1
| 2,900 | 38.739726 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/evaluation/eval_metrics.py | # Copyright (c) OpenMMLab. All rights reserved.
from numbers import Number
import numpy as np
import torch
def calculate_confusion_matrix(pred, target):
"""Calculate confusion matrix according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
Returns:
torch.Tensor: Confusion matrix
The shape is (C, C), where C is the number of classes.
"""
if isinstance(pred, np.ndarray):
pred = torch.from_numpy(pred)
if isinstance(target, np.ndarray):
target = torch.from_numpy(target)
assert (
isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor)), \
(f'pred and target should be torch.Tensor or np.ndarray, '
f'but got {type(pred)} and {type(target)}.')
num_classes = pred.size(1)
_, pred_label = pred.topk(1, dim=1)
pred_label = pred_label.view(-1)
target_label = target.view(-1)
assert len(pred_label) == len(target_label)
confusion_matrix = torch.zeros(num_classes, num_classes)
with torch.no_grad():
for t, p in zip(target_label, pred_label):
confusion_matrix[t.long(), p.long()] += 1
return confusion_matrix
def precision_recall_f1(pred, target, average_mode='macro', thrs=0.):
"""Calculate precision, recall and f1 score according to the prediction and
target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted mean.
Defaults to 'macro'.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
tuple: tuple containing precision, recall, f1 score.
The type of precision, recall, f1 score is one of the following:
+----------------------------+--------------------+-------------------+
| Args | ``thrs`` is number | ``thrs`` is tuple |
+============================+====================+===================+
| ``average_mode`` = "macro" | float | list[float] |
+----------------------------+--------------------+-------------------+
| ``average_mode`` = "none" | np.array | list[np.array] |
+----------------------------+--------------------+-------------------+
"""
allowed_average_mode = ['macro', 'none']
if average_mode not in allowed_average_mode:
raise ValueError(f'Unsupport type of averaging {average_mode}.')
if isinstance(pred, torch.Tensor):
pred = pred.numpy()
if isinstance(target, torch.Tensor):
target = target.numpy()
assert (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)),\
(f'pred and target should be torch.Tensor or np.ndarray, '
f'but got {type(pred)} and {type(target)}.')
if isinstance(thrs, Number):
thrs = (thrs, )
return_single = True
elif isinstance(thrs, tuple):
return_single = False
else:
raise TypeError(
f'thrs should be a number or tuple, but got {type(thrs)}.')
label = np.indices(pred.shape)[1]
pred_label = np.argsort(pred, axis=1)[:, -1]
pred_score = np.sort(pred, axis=1)[:, -1]
precisions = []
recalls = []
f1_scores = []
for thr in thrs:
# Only prediction values larger than thr are counted as positive
_pred_label = pred_label.copy()
if thr is not None:
_pred_label[pred_score <= thr] = -1
pred_positive = label == _pred_label.reshape(-1, 1)
gt_positive = label == target.reshape(-1, 1)
precision = (pred_positive & gt_positive).sum(0) / np.maximum(
pred_positive.sum(0), 1) * 100
recall = (pred_positive & gt_positive).sum(0) / np.maximum(
gt_positive.sum(0), 1) * 100
f1_score = 2 * precision * recall / np.maximum(precision + recall,
1e-20)
if average_mode == 'macro':
precision = float(precision.mean())
recall = float(recall.mean())
f1_score = float(f1_score.mean())
precisions.append(precision)
recalls.append(recall)
f1_scores.append(f1_score)
if return_single:
return precisions[0], recalls[0], f1_scores[0]
else:
return precisions, recalls, f1_scores
def precision(pred, target, average_mode='macro', thrs=0.):
"""Calculate precision according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted mean.
Defaults to 'macro'.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
float | np.array | list[float | np.array]: Precision.
+----------------------------+--------------------+-------------------+
| Args | ``thrs`` is number | ``thrs`` is tuple |
+============================+====================+===================+
| ``average_mode`` = "macro" | float | list[float] |
+----------------------------+--------------------+-------------------+
| ``average_mode`` = "none" | np.array | list[np.array] |
+----------------------------+--------------------+-------------------+
"""
precisions, _, _ = precision_recall_f1(pred, target, average_mode, thrs)
return precisions
def recall(pred, target, average_mode='macro', thrs=0.):
"""Calculate recall according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted mean.
Defaults to 'macro'.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
float | np.array | list[float | np.array]: Recall.
+----------------------------+--------------------+-------------------+
| Args | ``thrs`` is number | ``thrs`` is tuple |
+============================+====================+===================+
| ``average_mode`` = "macro" | float | list[float] |
+----------------------------+--------------------+-------------------+
| ``average_mode`` = "none" | np.array | list[np.array] |
+----------------------------+--------------------+-------------------+
"""
_, recalls, _ = precision_recall_f1(pred, target, average_mode, thrs)
return recalls
def f1_score(pred, target, average_mode='macro', thrs=0.):
"""Calculate F1 score according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted mean.
Defaults to 'macro'.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
float | np.array | list[float | np.array]: F1 score.
+----------------------------+--------------------+-------------------+
| Args | ``thrs`` is number | ``thrs`` is tuple |
+============================+====================+===================+
| ``average_mode`` = "macro" | float | list[float] |
+----------------------------+--------------------+-------------------+
| ``average_mode`` = "none" | np.array | list[np.array] |
+----------------------------+--------------------+-------------------+
"""
_, _, f1_scores = precision_recall_f1(pred, target, average_mode, thrs)
return f1_scores
def support(pred, target, average_mode='macro'):
"""Calculate the total number of occurrences of each label according to the
prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction with shape (N, C).
target (torch.Tensor | np.array): The target of each prediction with
shape (N, 1) or (N,).
average_mode (str): The type of averaging performed on the result.
Options are 'macro' and 'none'. If 'none', the scores for each
class are returned. If 'macro', calculate metrics for each class,
and find their unweighted sum.
Defaults to 'macro'.
Returns:
float | np.array: Support.
- If the ``average_mode`` is set to macro, the function returns
a single float.
- If the ``average_mode`` is set to none, the function returns
a np.array with shape C.
"""
confusion_matrix = calculate_confusion_matrix(pred, target)
with torch.no_grad():
res = confusion_matrix.sum(1)
if average_mode == 'macro':
res = float(res.sum().numpy())
elif average_mode == 'none':
res = res.numpy()
else:
raise ValueError(f'Unsupport type of averaging {average_mode}.')
return res
| 10,811 | 42.421687 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/evaluation/eval_hooks.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class EvalHook(Hook):
"""Evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, interval=1, by_epoch=True, **eval_kwargs):
warnings.warn(
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
'deprecated, please install mmcv through master branch.')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
self.by_epoch = by_epoch
def after_train_epoch(self, runner):
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from mmcls.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmcls.apis import single_gpu_test
runner.log_buffer.clear()
results = single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistEvalHook(EvalHook):
"""Distributed evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str, optional): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
by_epoch=True,
**eval_kwargs):
warnings.warn(
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
'deprecated, please install mmcv through master branch.')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got '
f'{type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.by_epoch = by_epoch
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from mmcls.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmcls.apis import multi_gpu_test
runner.log_buffer.clear()
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
| 3,967 | 36.433962 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/evaluation/multitask_eval_hooks.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class MultiTaskEvalHook(Hook):
"""Evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, interval=1, by_epoch=True, **eval_kwargs):
warnings.warn(
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
'deprecated, please install mmcv through master branch.')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
self.by_epoch = by_epoch
def after_train_epoch(self, runner):
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from mmcls.apis import multitask_single_gpu_test
results = multitask_single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmcls.apis import multitask_single_gpu_test
runner.log_buffer.clear()
results = multitask_single_gpu_test(runner.model, self.dataloader, show=False)
self.evaluate(runner, results)
def evaluate(self, runner, results):
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistMultiTaskEvalHook(MultiTaskEvalHook):
"""Distributed evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval (by epochs). Default: 1.
tmpdir (str, optional): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
by_epoch=True,
**eval_kwargs):
warnings.warn(
'DeprecationWarning: EvalHook and DistEvalHook in mmcls will be '
'deprecated, please install mmcv through master branch.')
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got '
f'{type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.gpu_collect = gpu_collect
self.by_epoch = by_epoch
self.eval_kwargs = eval_kwargs
def after_train_epoch(self, runner):
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
return
from mmcls.apis import multitask_multi_gpu_test
results = multitask_multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
def after_train_iter(self, runner):
if self.by_epoch or not self.every_n_iters(runner, self.interval):
return
from mmcls.apis import multitask_multi_gpu_test
runner.log_buffer.clear()
results = multitask_multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
| 4,076 | 36.75 | 86 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/evaluation/mean_ap.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
def average_precision(pred, target):
r"""Calculate the average precision for a single class.
AP summarizes a precision-recall curve as the weighted mean of maximum
precisions obtained for any r'>r, where r is the recall:
.. math::
\text{AP} = \sum_n (R_n - R_{n-1}) P_n
Note that no approximation is involved since the curve is piecewise
constant.
Args:
pred (np.ndarray): The model prediction with shape (N, ).
target (np.ndarray): The target of each prediction with shape (N, ).
Returns:
float: a single float as average precision value.
"""
eps = np.finfo(np.float32).eps
# sort examples
sort_inds = np.argsort(-pred)
sort_target = target[sort_inds]
# count true positive examples
pos_inds = sort_target == 1
tp = np.cumsum(pos_inds)
total_pos = tp[-1]
# count not difficult examples
pn_inds = sort_target != -1
pn = np.cumsum(pn_inds)
tp[np.logical_not(pos_inds)] = 0
precision = tp / np.maximum(pn, eps)
ap = np.sum(precision) / np.maximum(total_pos, eps)
return ap
def mAP(pred, target):
"""Calculate the mean average precision with respect of classes.
Args:
pred (torch.Tensor | np.ndarray): The model prediction with shape
(N, C), where C is the number of classes.
target (torch.Tensor | np.ndarray): The target of each prediction with
shape (N, C), where C is the number of classes. 1 stands for
positive examples, 0 stands for negative examples and -1 stands for
difficult examples.
Returns:
float: A single float as mAP value.
"""
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
pred = pred.detach().cpu().numpy()
target = target.detach().cpu().numpy()
elif not (isinstance(pred, np.ndarray) and isinstance(target, np.ndarray)):
raise TypeError('pred and target should both be torch.Tensor or'
'np.ndarray')
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
num_classes = pred.shape[1]
ap = np.zeros(num_classes)
for k in range(num_classes):
ap[k] = average_precision(pred[:, k], target[:, k])
mean_ap = ap.mean() * 100.0
return mean_ap
| 2,414 | 31.2 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/fp16/hooks.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
import torch.nn as nn
from mmcv.runner import OptimizerHook
from mmcv.utils.parrots_wrapper import _BatchNorm
from ..utils import allreduce_grads
from .utils import cast_tensor_type
class Fp16OptimizerHook(OptimizerHook):
"""FP16 optimizer hook.
The steps of fp16 optimizer is as follows.
1. Scale the loss value.
2. BP in the fp16 model.
2. Copy gradients from fp16 model to fp32 weights.
3. Update fp32 weights.
4. Copy updated parameters from fp32 weights to fp16 model.
Refer to https://arxiv.org/abs/1710.03740 for more details.
Args:
loss_scale (float): Scale factor multiplied with loss.
"""
def __init__(self,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
loss_scale=512.,
distributed=True):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.loss_scale = loss_scale
self.distributed = distributed
def before_run(self, runner):
# keep a copy of fp32 weights
runner.optimizer.param_groups = copy.deepcopy(
runner.optimizer.param_groups)
# convert model to fp16
wrap_fp16_model(runner.model)
def copy_grads_to_fp32(self, fp16_net, fp32_weights):
"""Copy gradients from fp16 model to fp32 weight copy."""
for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()):
if fp16_param.grad is not None:
if fp32_param.grad is None:
fp32_param.grad = fp32_param.data.new(fp32_param.size())
fp32_param.grad.copy_(fp16_param.grad)
def copy_params_to_fp16(self, fp16_net, fp32_weights):
"""Copy updated params from fp32 weight copy to fp16 model."""
for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights):
fp16_param.data.copy_(fp32_param.data)
def after_train_iter(self, runner):
# clear grads of last iteration
runner.model.zero_grad()
runner.optimizer.zero_grad()
# scale the loss value
scaled_loss = runner.outputs['loss'] * self.loss_scale
scaled_loss.backward()
# copy fp16 grads in the model to fp32 params in the optimizer
fp32_weights = []
for param_group in runner.optimizer.param_groups:
fp32_weights += param_group['params']
self.copy_grads_to_fp32(runner.model, fp32_weights)
# allreduce grads
if self.distributed:
allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb)
# scale the gradients back
for param in fp32_weights:
if param.grad is not None:
param.grad.div_(self.loss_scale)
if self.grad_clip is not None:
self.clip_grads(fp32_weights)
# update fp32 params
runner.optimizer.step()
# copy fp32 params to the fp16 model
self.copy_params_to_fp16(runner.model, fp32_weights)
def wrap_fp16_model(model):
# convert model to fp16
model.half()
# patch the normalization layers to make it work in fp32 mode
patch_norm_fp32(model)
# set `fp16_enabled` flag
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True
def patch_norm_fp32(module):
if isinstance(module, (_BatchNorm, nn.GroupNorm)):
module.float()
module.forward = patch_forward_method(module.forward, torch.half,
torch.float)
for child in module.children():
patch_norm_fp32(child)
return module
def patch_forward_method(func, src_type, dst_type, convert_output=True):
"""Patch the forward method of a module.
Args:
func (callable): The original forward method.
src_type (torch.dtype): Type of input arguments to be converted from.
dst_type (torch.dtype): Type of input arguments to be converted to.
convert_output (bool): Whether to convert the output back to src_type.
Returns:
callable: The patched forward method.
"""
def new_forward(*args, **kwargs):
output = func(*cast_tensor_type(args, src_type, dst_type),
**cast_tensor_type(kwargs, src_type, dst_type))
if convert_output:
output = cast_tensor_type(output, dst_type, src_type)
return output
return new_forward
| 4,548 | 33.992308 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/fp16/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections import abc
import numpy as np
import torch
def cast_tensor_type(inputs, src_type, dst_type):
if isinstance(inputs, torch.Tensor):
return inputs.to(dst_type)
elif isinstance(inputs, str):
return inputs
elif isinstance(inputs, np.ndarray):
return inputs
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type, dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type, dst_type) for item in inputs)
else:
return inputs
| 712 | 27.52 | 74 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/fp16/decorators.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
from inspect import getfullargspec
import torch
from .utils import cast_tensor_type
def auto_fp16(apply_to=None, out_fp32=False):
"""Decorator to enable fp16 training automatically.
This decorator is useful when you write custom modules and want to support
mixed precision training. If inputs arguments are fp32 tensors, they will
be converted to fp16 automatically. Arguments other than fp32 tensors are
ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp32 (bool): Whether to convert the output back to fp32.
:Example:
class MyModule1(nn.Module)
# Convert x and y to fp16
@auto_fp16()
def forward(self, x, y):
pass
class MyModule2(nn.Module):
# convert pred to fp16
@auto_fp16(apply_to=('pred', ))
def do_something(self, pred, others):
pass
"""
def auto_fp16_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@auto_fp16 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
# NOTE: default args are not taken into consideration
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.float, torch.half))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = {}
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.float, torch.half)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp32:
output = cast_tensor_type(output, torch.half, torch.float)
return output
return new_func
return auto_fp16_wrapper
def force_fp32(apply_to=None, out_fp16=False):
"""Decorator to convert input arguments to fp32 in force.
This decorator is useful when you write custom modules and want to support
mixed precision training. If there are some inputs that must be processed
in fp32 mode, then this decorator can handle it. If inputs arguments are
fp16 tensors, they will be converted to fp32 automatically. Arguments other
than fp16 tensors are ignored.
Args:
apply_to (Iterable, optional): The argument names to be converted.
`None` indicates all arguments.
out_fp16 (bool): Whether to convert the output back to fp16.
:Example:
class MyModule1(nn.Module)
# Convert x and y to fp32
@force_fp32()
def loss(self, x, y):
pass
class MyModule2(nn.Module):
# convert pred to fp32
@force_fp32(apply_to=('pred', ))
def post_process(self, pred, others):
pass
"""
def force_fp32_wrapper(old_func):
@functools.wraps(old_func)
def new_func(*args, **kwargs):
# check if the module has set the attribute `fp16_enabled`, if not,
# just fallback to the original method.
if not isinstance(args[0], torch.nn.Module):
raise TypeError('@force_fp32 can only be used to decorate the '
'method of nn.Module')
if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
return old_func(*args, **kwargs)
# get the arg spec of the decorated method
args_info = getfullargspec(old_func)
# get the argument names to be casted
args_to_cast = args_info.args if apply_to is None else apply_to
# convert the args that need to be processed
new_args = []
if args:
arg_names = args_info.args[:len(args)]
for i, arg_name in enumerate(arg_names):
if arg_name in args_to_cast:
new_args.append(
cast_tensor_type(args[i], torch.half, torch.float))
else:
new_args.append(args[i])
# convert the kwargs that need to be processed
new_kwargs = dict()
if kwargs:
for arg_name, arg_value in kwargs.items():
if arg_name in args_to_cast:
new_kwargs[arg_name] = cast_tensor_type(
arg_value, torch.half, torch.float)
else:
new_kwargs[arg_name] = arg_value
# apply converted arguments to the decorated method
output = old_func(*new_args, **new_kwargs)
# cast the results back to fp32 if necessary
if out_fp16:
output = cast_tensor_type(output, torch.float, torch.half)
return output
return new_func
return force_fp32_wrapper
| 6,259 | 37.641975 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/export/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import numpy as np
import onnxruntime as ort
import torch
from mmcls.models.classifiers import BaseClassifier
class ONNXRuntimeClassifier(BaseClassifier):
"""Wrapper for classifier's inference with ONNXRuntime."""
def __init__(self, onnx_file, class_names, device_id):
super(ONNXRuntimeClassifier, self).__init__()
sess = ort.InferenceSession(onnx_file)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = ort.get_device() == 'GPU'
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.CLASSES = class_names
self.device_id = device_id
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
self.is_cuda_available = is_cuda_available
def simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def forward_train(self, imgs, **kwargs):
raise NotImplementedError('This method is not implemented.')
def forward_test(self, imgs, img_metas, **kwargs):
input_data = imgs
# set io binding for inputs/outputs
device_type = 'cuda' if self.is_cuda_available else 'cpu'
if not self.is_cuda_available:
input_data = input_data.cpu()
self.io_binding.bind_input(
name='input',
device_type=device_type,
device_id=self.device_id,
element_type=np.float32,
shape=input_data.shape,
buffer_ptr=input_data.data_ptr())
for name in self.output_names:
self.io_binding.bind_output(name)
# run session to get outputs
self.sess.run_with_iobinding(self.io_binding)
results = self.io_binding.copy_outputs_to_cpu()[0]
return list(results)
class TensorRTClassifier(BaseClassifier):
def __init__(self, trt_file, class_names, device_id):
super(TensorRTClassifier, self).__init__()
from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with TensorRT from source.')
model = TRTWraper(
trt_file, input_names=['input'], output_names=['probs'])
self.model = model
self.device_id = device_id
self.CLASSES = class_names
def simple_test(self, img, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def forward_train(self, imgs, **kwargs):
raise NotImplementedError('This method is not implemented.')
def forward_test(self, imgs, img_metas, **kwargs):
input_data = imgs
with torch.cuda.device(self.device_id), torch.no_grad():
results = self.model({'input': input_data})['probs']
results = results.detach().cpu().numpy()
return list(results)
| 3,439 | 34.463918 | 71 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/utils/kd_hook.py | import torch
from mmcv.parallel import is_module_wrapper
from mmcv.runner import (HOOKS, OPTIMIZER_BUILDERS, OPTIMIZERS,
DefaultOptimizerConstructor, Hook, OptimizerHook)
from mmcv.utils import build_from_cfg
@OPTIMIZER_BUILDERS.register_module()
class KDOptimizerBuilder(DefaultOptimizerConstructor):
def __init__(self, optimizer_cfg, paramwise_cfg=None):
super(KDOptimizerBuilder, self).__init__(optimizer_cfg,
paramwise_cfg)
def __call__(self, model):
if hasattr(model, 'module'):
model = model.module
optimizer_cfg = self.optimizer_cfg.copy()
# if no paramwise option is specified, just use the global setting
if not self.paramwise_cfg:
optimizer_cfg['params'] = model.student.parameters()
student_optimizer = build_from_cfg(optimizer_cfg,
OPTIMIZERS)
return student_optimizer
| 1,003 | 37.615385 | 74 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/utils/dist_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
import torch.distributed as dist
from mmcv.runner import OptimizerHook
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
| 1,904 | 31.844828 | 73 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/core/utils/visualize.py | import os.path as osp
from mmcv.utils import TORCH_VERSION, digit_version
from mmcv.runner.dist_utils import master_only
from mmcv.runner.hooks import HOOKS
from mmcv.runner.hooks.logger.base import LoggerHook
from collections import OrderedDict
import numpy as np
@HOOKS.register_module()
class TensorboardVisLoggerHook(LoggerHook):
def __init__(self,
log_dir=None,
interval=10,
vis_tags=None,
ignore_last=True,
reset_flag=False,
by_epoch=True):
super(TensorboardVisLoggerHook, self).__init__(interval, ignore_last,
reset_flag, by_epoch)
self.log_dir = log_dir
self.vis_tags = vis_tags
@master_only
def before_run(self, runner):
super(TensorboardVisLoggerHook, self).before_run(runner)
if (TORCH_VERSION == 'parrots'
or digit_version(TORCH_VERSION) < digit_version('1.1')):
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please install tensorboardX to use '
'TensorboardLoggerHook.')
else:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ImportError(
'Please run "pip install future tensorboard" to install '
'the dependencies to use torch.utils.tensorboard '
'(applicable to PyTorch 1.1 or higher)')
if self.log_dir is None:
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
self.writer = SummaryWriter(self.log_dir)
@master_only
def log(self, runner):
tags = self.get_loggable_tags(runner, allow_text=True, tags_to_skip=('time', 'data_time', 'relation'))
for tag, val in tags.items():
if isinstance(val, str):
self.writer.add_text(tag, val, self.get_iter(runner))
else:
self.writer.add_scalar(tag, val, self.get_iter(runner))
if self.vis_tags is not None:
for tag in self.vis_tags:
if tag in runner.log_buffer.output.keys():
val = runner.log_buffer.output[tag]
self.writer.add_image(tag, val, self.get_iter(runner))
@master_only
def after_run(self, runner):
self.writer.close()
class LogBuffer_ignore:
def __init__(self, igore_key=['relation']):
self.val_history = OrderedDict()
self.n_history = OrderedDict()
self.output = OrderedDict()
self.ignore_keys = igore_key
self.ready = False
def clear(self):
self.val_history.clear()
self.n_history.clear()
self.clear_output()
def clear_output(self):
self.output.clear()
self.ready = False
def update(self, vars, count=1):
assert isinstance(vars, dict)
for key, var in vars.items():
if key not in self.val_history:
self.val_history[key] = []
self.n_history[key] = []
self.val_history[key].append(var)
self.n_history[key].append(count)
def average(self, n=0):
"""Average latest n values or all values."""
assert n >= 0
for key in self.val_history:
if key in self.ignore_keys:
self.output[key] = self.val_history[key][-1]
else:
values = np.array(self.val_history[key][-n:])
nums = np.array(self.n_history[key][-n:])
avg = np.sum(values * nums) / np.sum(nums)
self.output[key] = avg
self.ready = True | 3,792 | 34.12037 | 110 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/necks/gap.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import NECKS
@NECKS.register_module()
class GlobalAveragePooling(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
Args:
dim (int): Dimensions of each sample channel, can be one of {1, 2, 3}.
Default: 2
"""
def __init__(self, dim=2):
super(GlobalAveragePooling, self).__init__()
assert dim in [1, 2, 3], 'GlobalAveragePooling dim only support ' \
f'{1, 2, 3}, get {dim} instead.'
if dim == 1:
self.gap = nn.AdaptiveAvgPool1d(1)
elif dim == 2:
self.gap = nn.AdaptiveAvgPool2d((1, 1))
else:
self.gap = nn.AdaptiveAvgPool3d((1, 1, 1))
def init_weights(self):
pass
def forward(self, inputs):
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple(
[out.view(x.size(0), -1) for out, x in zip(outs, inputs)])
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.view(inputs.size(0), -1)
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs
| 1,492 | 31.456522 | 78 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/classifiers/base.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import BaseModule
from mmcls.core.visualization import imshow_infos
# TODO import `auto_fp16` from mmcv and delete them from mmcls
try:
from mmcv.runner import auto_fp16
except ImportError:
warnings.warn('auto_fp16 from mmcls will be deprecated.'
'Please install mmcv>=1.1.4.')
from mmcls.core import auto_fp16
class BaseClassifier(BaseModule, metaclass=ABCMeta):
"""Base class for classifiers."""
def __init__(self, init_cfg=None):
super(BaseClassifier, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
@property
def with_head(self):
return hasattr(self, 'head') and self.head is not None
@abstractmethod
def extract_feat(self, imgs):
pass
def extract_feats(self, imgs):
assert isinstance(imgs, list)
for img in imgs:
yield self.extract_feat(img)
@abstractmethod
def forward_train(self, imgs, **kwargs):
"""
Args:
img (list[Tensor]): List of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
kwargs (keyword arguments): Specific to concrete implementation.
"""
pass
@abstractmethod
def simple_test(self, img, **kwargs):
pass
def forward_test(self, imgs, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
"""
if isinstance(imgs, torch.Tensor):
imgs = [imgs]
for var, name in [(imgs, 'imgs')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
if len(imgs) == 1:
return self.simple_test(imgs[0], **kwargs)
else:
raise NotImplementedError('aug_test has not been implemented')
@auto_fp16(apply_to=('img', ))
def forward(self, img, return_loss=True, **kwargs):
"""Calls either forward_train or forward_test depending on whether
return_loss=True.
Note this setting will change the expected inputs. When
`return_loss=True`, img and img_meta are single-nested (i.e. Tensor and
List[dict]), and when `resturn_loss=False`, img and img_meta should be
double nested (i.e. List[Tensor], List[List[dict]]), with the outer
list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, **kwargs)
else:
return self.forward_test(img, **kwargs)
def _parse_losses(self, losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
elif isinstance(loss_value, dict):
for name, value in loss_value.items():
log_vars[name] = value
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
def train_step(self, data, optimizer):
"""The iteration step during training.
This method defines an iteration step during training, except for the
back propagation and optimizer updating, which are done in an optimizer
hook. Note that in some complicated cases or models, the whole process
including back propagation and optimizer updating are also defined in
this method, such as GAN.
Args:
data (dict): The output of dataloader.
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
runner is passed to ``train_step()``. This argument is unused
and reserved.
Returns:
dict: Dict of outputs. The following fields are contained.
- loss (torch.Tensor): A tensor for back propagation, which \
can be a weighted sum of multiple losses.
- log_vars (dict): Dict contains all the variables to be sent \
to the logger.
- num_samples (int): Indicates the batch size (when the model \
is DDP, it means the batch size on each GPU), which is \
used for averaging the logs.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
def val_step(self, data, optimizer):
"""The iteration step during validation.
This method shares the same signature as :func:`train_step`, but used
during val epochs. Note that the evaluation after training epochs is
not implemented with this method, but an evaluation hook.
"""
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(data['img'].data))
return outputs
def show_result(self,
img,
result,
text_color='white',
font_scale=0.5,
row_width=20,
show=False,
fig_size=(15, 10),
win_name='',
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or ndarray): The image to be displayed.
result (dict): The classification results to draw over `img`.
text_color (str or tuple or :obj:`Color`): Color of texts.
font_scale (float): Font scales of texts.
row_width (int): width between each row of results on the image.
show (bool): Whether to show the image.
Default: False.
fig_size (tuple): Image show figure size. Defaults to (15, 10).
win_name (str): The window name.
wait_time (int): How many seconds to display the image.
Defaults to 0.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (ndarray): Image with overlayed results.
"""
img = mmcv.imread(img)
img = img.copy()
img = imshow_infos(
img,
result,
text_color=text_color,
font_size=int(font_scale * 50),
row_width=row_width,
win_name=win_name,
show=show,
fig_size=fig_size,
wait_time=wait_time,
out_file=out_file)
return img
| 7,775 | 35 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/classifiers/image.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import warnings
from re import S
import torch.nn as nn
import torch.nn.functional as F
from ..builder import CLASSIFIERS, build_backbone, build_head, build_neck
from ..utils.augment import Augments
from .base import BaseClassifier
@CLASSIFIERS.register_module()
class ImageClassifier(BaseClassifier):
def __init__(self,
backbone,
neck=None,
head=None,
pretrained=None,
train_cfg=None,
init_cfg=None):
super(ImageClassifier, self).__init__(init_cfg)
if pretrained is not None:
warnings.warn('DeprecationWarning: pretrained is a deprecated \
key, please consider using init_cfg')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
return_tuple = backbone.pop('return_tuple', True)
self.backbone = build_backbone(backbone)
if return_tuple is False:
warnings.warn(
'The `return_tuple` is a temporary arg, we will force to '
'return tuple in the future. Please handle tuple in your '
'custom neck or head.', DeprecationWarning)
self.return_tuple = return_tuple
if neck is not None:
self.neck = build_neck(neck)
if head is not None:
self.head = build_head(head)
self.augments = None
if train_cfg is not None:
augments_cfg = train_cfg.get('augments', None)
if augments_cfg is not None:
self.augments = Augments(augments_cfg)
else:
# Considering BC-breaking
mixup_cfg = train_cfg.get('mixup', None)
cutmix_cfg = train_cfg.get('cutmix', None)
assert mixup_cfg is None or cutmix_cfg is None, \
'If mixup and cutmix are set simultaneously,' \
'use augments instead.'
if mixup_cfg is not None:
warnings.warn('The mixup attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(mixup_cfg)
cfg['type'] = 'BatchMixup'
# In the previous version, mixup_prob is always 1.0.
cfg['prob'] = 1.0
self.augments = Augments(cfg)
if cutmix_cfg is not None:
warnings.warn('The cutmix attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(cutmix_cfg)
cutmix_prob = cfg.pop('cutmix_prob')
cfg['type'] = 'BatchCutMix'
cfg['prob'] = cutmix_prob
self.augments = Augments(cfg)
def get_logit(self, img):
x = self.extract_feat(img)
if isinstance(x, tuple):
x = x[-1]
return self.head.fc(x)
def extract_feat(self, img):
"""Directly extract features from the backbone + neck."""
x = self.backbone(img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
warnings.simplefilter('once')
warnings.warn(
'We will force all backbones to return a tuple in the '
'future. Please check your backbone and wrap the output '
'as a tuple.', DeprecationWarning)
else:
if isinstance(x, tuple):
x = x[-1]
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
gt_label (Tensor): It should be of shape (N, 1) encoding the
ground-truth label of input images for single label task. It
shoulf be of shape (N, C) encoding the ground-truth label
of input images for multi-labels task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
if self.augments is not None:
img, gt_label = self.augments(img, gt_label)
x = self.extract_feat(img)
losses = dict()
try:
loss = self.head.forward_train(x, gt_label)
except TypeError as e:
if 'not tuple' in str(e) and self.return_tuple:
return TypeError(
'Seems the head cannot handle tuple input. We have '
'changed all backbones\' output to a tuple. Please '
'update your custom head\'s forward function. '
'Temporarily, you can set "return_tuple=False" in '
'your backbone config to disable this feature.')
raise e
losses.update(loss)
return losses
def simple_test(self, img, img_metas):
"""Test without augmentation."""
x = self.extract_feat(img)
try:
res = self.head.simple_test(x)
except TypeError as e:
if 'not tuple' in str(e) and self.return_tuple:
return TypeError(
'Seems the head cannot handle tuple input. We have '
'changed all backbones\' output to a tuple. Please '
'update your custom head\'s forward function. '
'Temporarily, you can set "return_tuple=False" in '
'your backbone config to disable this feature.')
raise e
return res
| 5,829 | 37.355263 | 77 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/classifiers/kf.py | import copy
import numpy as np
import torch
import torch.nn.functional as F
import warnings
from shutil import ExecError
from torch import nn
from mmcls.models.losses.kd_loss import (InfoMax_loss, InfoMin_loss)
from ..builder import (CLASSIFIERS, build_backbone, build_head, build_loss,
build_neck)
from ..utils.augment import Augments
from .base import BaseClassifier
@CLASSIFIERS.register_module()
class KFImageClassifier(BaseClassifier):
def __init__(self,
backbone,
kd_loss,
neck=None,
head=None,
pretrained=None,
train_cfg=None,
init_cfg=None):
super(KFImageClassifier, self).__init__(init_cfg)
if pretrained is not None:
warnings.warn('DeprecationWarning: pretrained is a deprecated \
key, please consider using init_cfg')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
assert 'student' in backbone.keys(), 'student network should be specified'
assert 'teacher' in backbone.keys(), 'teacher network should be specified'
return_tuple = backbone.pop('return_tuple', True)
self.num_task = backbone['num_task']
self.student = nn.ModuleDict(
{
'CKN': build_backbone(backbone['student']['CKN']),
'TSN': nn.ModuleList([build_backbone(backbone['student']['TSN']) for i in range(self.num_task)]),
'neck': build_neck(neck['student']),
'head_task': build_head(head['task']),
'head': build_head(head['student'])
}
)
self.teacher = nn.ModuleDict(
{
'backbone': build_backbone(backbone['teacher']),
'neck': build_neck(neck['teacher']),
'head': build_head(head['teacher'])
}
)
self.feat_channels_student = train_cfg['feat_channels']['student']
self.feat_channels_teacher = train_cfg['feat_channels']['teacher']
feat_fcs = []
for i in range(len(self.feat_channels_student)):
feat_fcs.append(nn.Sequential(
nn.Linear(
self.feat_channels_teacher[i], self.feat_channels_student[i]),
nn.BatchNorm1d(self.feat_channels_student[i]),
nn.ReLU(True),
nn.Linear(
self.feat_channels_student[i], self.feat_channels_student[i])
)
)
self.feat_fcs = nn.ModuleList(feat_fcs)
self.criterionCls = F.cross_entropy
self.criterionTask = F.binary_cross_entropy_with_logits
self.criterionKD = build_loss(kd_loss)
self.lambda_kd = train_cfg['lambda_kd']
self.alpha = train_cfg['alpha']
self.beta = train_cfg['beta']
self.lambda_feat = train_cfg['lambda_feat']
self.teacher_ckpt = train_cfg['teacher_checkpoint']
self.task_weight = train_cfg['task_weight']
if return_tuple is False:
warnings.warn(
'The `return_tuple` is a temporary arg, we will force to '
'return tuple in the future. Please handle tuple in your '
'custom neck or head.', DeprecationWarning)
self.return_tuple = return_tuple
self.load_teacher()
self.augments = None
if train_cfg is not None:
augments_cfg = train_cfg.get('augments', None)
if augments_cfg is not None:
self.augments = Augments(augments_cfg)
else:
# Considering BC-breaking
mixup_cfg = train_cfg.get('mixup', None)
cutmix_cfg = train_cfg.get('cutmix', None)
assert mixup_cfg is None or cutmix_cfg is None, \
'If mixup and cutmix are set simultaneously,' \
'use augments instead.'
if mixup_cfg is not None:
warnings.warn('The mixup attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(mixup_cfg)
cfg['type'] = 'BatchMixup'
# In the previous version, mixup_prob is always 1.0.
cfg['prob'] = 1.0
self.augments = Augments(cfg)
if cutmix_cfg is not None:
warnings.warn('The cutmix attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(cutmix_cfg)
cutmix_prob = cfg.pop('cutmix_prob')
cfg['type'] = 'BatchCutMix'
cfg['prob'] = cutmix_prob
self.augments = Augments(cfg)
def extract_feat(self, imgs):
pass
def load_teacher(self):
split_lins = '*' * 20
state_dict = torch.load(self.teacher_ckpt)
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
try:
self.teacher.load_state_dict(state_dict)
print(split_lins)
print(
f'Teacher pretrained model has been loaded {self.teacher_ckpt}')
print(split_lins)
except:
print('Teacher model not loaded')
print(state_dict.keys())
print(self.teacher.state_dict().keys())
AssertionError('Teacher model not loaded')
exit()
for param in self.teacher.parameters():
param.requires_grad = False
#####################################################
# Functions for teacher network
def extract_teacher_feat(self, img):
"""Directly extract features from the backbone + neck."""
x = self.teacher['backbone'](img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
warnings.simplefilter('once')
warnings.warn(
'We will force all backbones to return a tuple in the '
'future. Please check your backbone and wrap the output '
'as a tuple.', DeprecationWarning)
else:
if isinstance(x, tuple):
x = x[-1]
# if self.with_neck:
x = self.teacher['neck'](x)
return x
def get_teacher_logit(self, img):
"""Directly extract features from the backbone + neck."""
x = self.extract_teacher_feat(img)
if isinstance(x, tuple):
last_x = x[-1]
logit = self.teacher['head'].fc(last_x) # head
return logit, x
#####################################################
# Functions for student network
def extract_common_feat(self, img):
"""Directly extract features from the backbone + neck."""
x = self.student['CKN'](img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
warnings.simplefilter('once')
warnings.warn(
'We will force all backbones to return a tuple in the '
'future. Please check your backbone and wrap the output '
'as a tuple.', DeprecationWarning)
else:
if isinstance(x, tuple):
x = x[-1]
x = self.student['neck'](x)
return x
def extract_task_feat(self, img):
"""Directly extract features from the backbone + neck."""
result = dict(feats=[],
mu_vars=[])
for i in range(self.num_task):
(mu, var), x = self.student['TSN'][i](img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
else:
if isinstance(x, tuple):
x = x[-1]
result['feats'].append(x)
result['mu_vars'].append((mu, var))
return result
def extract_student_feat(self, img):
common_xs = self.extract_common_feat(img)
task_result = self.extract_task_feat(img)
if self.num_task == 1:
return common_xs, task_result['feats'][0], task_result
else:
return common_xs, task_result['feats'], task_result
def get_student_logit(self, img):
"""Directly extract features from the backbone + neck."""
common_xs, task_feat, task_result = self.extract_student_feat(
img)
if isinstance(common_xs, tuple):
common_x = common_xs[-1]
if isinstance(task_feat, tuple):
task_feat = task_feat[-1]
if isinstance(task_feat, list):
feat = [common_x + task_f[-1] for task_f in task_feat]
else:
feat = common_x + task_feat
logit = self.student['head'].get_logits(feat) # head
task_logit = self.student['head_task'].get_logits(task_feat)
return logit, task_logit, common_xs, task_result
def get_logit(self, img):
logit, _, _, _ = self.get_student_logit(img)
return logit
def get_adv_logit(self, img):
_, task_logit, _, _ = self.get_student_logit(
img)
return task_logit
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
gt_label (Tensor): It should be of shape (N, 1) encoding the
ground-truth label of input images for single label task. It
shoulf be of shape (N, C) encoding the ground-truth label
of input images for multi-labels task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
if self.augments is not None:
img, gt_label = self.augments(img, gt_label)
with torch.no_grad():
teacher_logit, teacher_x = self.get_teacher_logit(img)
student_logit, task_logit, student_common_x, task_result = self.get_student_logit(
img)
loss_infomax = 0.0
# Deep feature simulation for KD
assert len(teacher_x) == len(student_common_x)
for layer_id, (teacher_x_layer, student_x_layer) in enumerate(zip(teacher_x, student_common_x)):
loss_infomax += InfoMax_loss(self.feat_fcs[layer_id](teacher_x_layer),
student_x_layer) * self.lambda_feat
loss_infomax = loss_infomax/len(student_common_x)
# Output simulation for KD
loss_kd = self.criterionKD(
student_logit, teacher_logit.detach()) * self.lambda_kd
# Cls loss and infor loss
loss_cls = self.student['head'].loss(student_logit, gt_label)['loss']
# onehot_gt_label = F.one_hot(gt_label,
# num_classes=student_logit.shape[1]).float()
loss_task = self.student['head_task'].loss(task_logit, gt_label)['loss'] * self.task_weight
# InfoMin Loss for task feature
loss_infomin = 0.0
for mu, log_var in task_result['mu_vars']:
loss_infomin += InfoMin_loss(mu, log_var) * self.beta
losses = dict(loss_infomax=loss_infomax,
loss_kd=loss_kd,
loss_cls=loss_cls,
loss_task=loss_task,
loss_infomin=loss_infomin)
# print(losses)
return losses
def simple_test(self, img, img_metas):
"""Test without augmentation."""
cls_score = self.get_logit(img)
try:
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(
cls_score, dim=1) if cls_score is not None else None
res = self.student['head'].post_process(pred)
except TypeError as e:
if 'not tuple' in str(e) and self.return_tuple:
return TypeError(
'Seems the head cannot handle tuple input. We have '
'changed all backbones\' output to a tuple. Please '
'update your custom head\'s forward function. '
'Temporarily, you can set "return_tuple=False" in '
'your backbone config to disable this feature.')
raise e
return res
| 12,546 | 38.332288 | 113 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/classifiers/kd.py | import copy
import warnings
from shutil import ExecError
import torch
import torch.nn.functional as F
from torch import nn
from ..builder import (CLASSIFIERS, build_backbone, build_head, build_loss,
build_neck)
from ..utils.augment import Augments
from .base import BaseClassifier
@CLASSIFIERS.register_module()
class KDImageClassifier(BaseClassifier):
def __init__(self,
backbone,
kd_loss,
neck=None,
head=None,
pretrained=None,
train_cfg=None,
init_cfg=None):
super(KDImageClassifier, self).__init__(init_cfg)
if pretrained is not None:
warnings.warn('DeprecationWarning: pretrained is a deprecated \
key, please consider using init_cfg')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
assert 'student' in backbone.keys(), 'student network should be specified'
assert 'teacher' in backbone.keys(), 'teacher network should be specified'
return_tuple = backbone.pop('return_tuple', True)
self.student = nn.ModuleDict(
{
'backbone': build_backbone(backbone['student']),
'neck': build_neck(neck['student']),
'head': build_head(head['student'])
}
)
self.teacher = nn.ModuleDict(
{
'backbone': build_backbone(backbone['teacher']),
'neck': build_neck(neck['teacher']),
'head': build_head(head['teacher'])
}
)
self.criterionCls = F.cross_entropy
self.criterionKD = build_loss(kd_loss)
self.lambda_kd = train_cfg['lambda_kd']
self.teacher_ckpt = train_cfg['teacher_checkpoint']
if return_tuple is False:
warnings.warn(
'The `return_tuple` is a temporary arg, we will force to '
'return tuple in the future. Please handle tuple in your '
'custom neck or head.', DeprecationWarning)
self.return_tuple = return_tuple
self.load_teacher()
self.augments = None
if train_cfg is not None:
augments_cfg = train_cfg.get('augments', None)
if augments_cfg is not None:
self.augments = Augments(augments_cfg)
else:
# Considering BC-breaking
mixup_cfg = train_cfg.get('mixup', None)
cutmix_cfg = train_cfg.get('cutmix', None)
assert mixup_cfg is None or cutmix_cfg is None, \
'If mixup and cutmix are set simultaneously,' \
'use augments instead.'
if mixup_cfg is not None:
warnings.warn('The mixup attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(mixup_cfg)
cfg['type'] = 'BatchMixup'
# In the previous version, mixup_prob is always 1.0.
cfg['prob'] = 1.0
self.augments = Augments(cfg)
if cutmix_cfg is not None:
warnings.warn('The cutmix attribute will be deprecated. '
'Please use augments instead.')
cfg = copy.deepcopy(cutmix_cfg)
cutmix_prob = cfg.pop('cutmix_prob')
cfg['type'] = 'BatchCutMix'
cfg['prob'] = cutmix_prob
self.augments = Augments(cfg)
def load_teacher(self):
try:
self.teacher.load_state_dict(
torch.load(self.teacher_ckpt)['state_dict'])
print(
f'Teacher pretrained model has been loaded {self.teacher_ckpt}')
except:
ExecError('Teacher model not loaded')
for param in self.teacher.parameters():
param.requires_grad = False
###########################
def get_logit(self, img):
"""Directly extract features from the backbone + neck."""
x = self.extract_feat(self.student, img)
if isinstance(x, tuple):
x = x[-1]
logit = self.student['head'].fc(x) # head
return logit
def get_logits(self, model, img):
"""Directly extract features from the backbone + neck."""
x = self.extract_feat(model, img)
if isinstance(x, tuple):
x = x[-1]
logit = model['head'].fc(x) # head
return logit
def extract_feat(self, model, img):
"""Directly extract features from the backbone + neck."""
x = model['backbone'](img)
if self.return_tuple:
if not isinstance(x, tuple):
x = (x, )
warnings.simplefilter('once')
warnings.warn(
'We will force all backbones to return a tuple in the '
'future. Please check your backbone and wrap the output '
'as a tuple.', DeprecationWarning)
else:
if isinstance(x, tuple):
x = x[-1]
# if self.with_neck:
x = model['neck'](x)
return x
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
gt_label (Tensor): It should be of shape (N, 1) encoding the
ground-truth label of input images for single label task. It
shoulf be of shape (N, C) encoding the ground-truth label
of input images for multi-labels task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
if self.augments is not None:
img, gt_label = self.augments(img, gt_label)
with torch.no_grad():
teacher_logit = self.get_logits(self.teacher, img)
student_logit = self.get_logits(self.student, img)
loss_cls = self.criterionCls(student_logit, gt_label)
loss_kd = self.criterionKD(
student_logit, teacher_logit.detach()) * self.lambda_kd
losses = dict(loss_cls=loss_cls,
loss_kd=loss_kd)
return losses
def simple_test(self, img, img_metas):
"""Test without augmentation."""
x = self.extract_feat(self.student, img)
try:
res = self.student['head'].simple_test(x)
except TypeError as e:
if 'not tuple' in str(e) and self.return_tuple:
return TypeError(
'Seems the head cannot handle tuple input. We have '
'changed all backbones\' output to a tuple. Please '
'update your custom head\'s forward function. '
'Temporarily, you can set "return_tuple=False" in '
'your backbone config to disable this feature.')
raise e
return res
| 7,148 | 37.643243 | 82 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/embed.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner.base_module import BaseModule
from .helpers import to_2tuple
class PatchEmbed(BaseModule):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
img_size (int | tuple): The size of input image. Default: 224
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None
conv_cfg (dict, optional): The config dict for conv layers.
Default: None
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None
"""
def __init__(self,
img_size=224,
in_channels=3,
embed_dims=768,
norm_cfg=None,
conv_cfg=None,
init_cfg=None):
super(PatchEmbed, self).__init__(init_cfg)
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
elif isinstance(img_size, tuple):
if len(img_size) == 1:
img_size = to_2tuple(img_size[0])
assert len(img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(img_size)}'
self.img_size = img_size
self.embed_dims = embed_dims
# Use conv layer to embed
conv_cfg = conv_cfg or dict()
_conv_cfg = dict(
type='Conv2d', kernel_size=16, stride=16, padding=0, dilation=1)
_conv_cfg.update(conv_cfg)
self.projection = build_conv_layer(_conv_cfg, in_channels, embed_dims)
# Calculate how many patches a input image is splited to.
h_out, w_out = [(self.img_size[i] + 2 * self.projection.padding[i] -
self.projection.dilation[i] *
(self.projection.kernel_size[i] - 1) - 1) //
self.projection.stride[i] + 1 for i in range(2)]
self.patches_resolution = (h_out, w_out)
self.num_patches = h_out * w_out
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't " \
f'match model ({self.img_size[0]}*{self.img_size[1]}).'
# The output size is (B, N, D), where N=H*W/P/P, D is embid_dim
x = self.projection(x).flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x
# Modified from pytorch-image-models
class HybridEmbed(BaseModule):
"""CNN Feature Map Embedding.
Extract feature map from CNN, flatten,
project to embedding dim.
Args:
backbone (nn.Module): CNN backbone
img_size (int | tuple): The size of input image. Default: 224
feature_size (int | tuple, optional): Size of feature map extracted by
CNN backbone. Default: None
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_cfg (dict, optional): The config dict for conv layers.
Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
backbone,
img_size=224,
feature_size=None,
in_channels=3,
embed_dims=768,
conv_cfg=None,
init_cfg=None):
super(HybridEmbed, self).__init__(init_cfg)
assert isinstance(backbone, nn.Module)
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
elif isinstance(img_size, tuple):
if len(img_size) == 1:
img_size = to_2tuple(img_size[0])
assert len(img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(img_size)}'
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of
# determining the exact dim of the output feature
# map for all networks, the feature metadata has
# reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of
# each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(
torch.zeros(1, in_channels, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
# last feature if backbone outputs list/tuple of features
o = o[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
self.num_patches = feature_size[0] * feature_size[1]
# Use conv layer to embed
conv_cfg = conv_cfg or dict()
_conv_cfg = dict(
type='Conv2d', kernel_size=1, stride=1, padding=0, dilation=1)
_conv_cfg.update(conv_cfg)
self.projection = build_conv_layer(_conv_cfg, feature_dim, embed_dims)
def forward(self, x):
x = self.backbone(x)
if isinstance(x, (list, tuple)):
# last feature if backbone outputs list/tuple of features
x = x[-1]
x = self.projection(x).flatten(2).transpose(1, 2)
return x
class PatchMerging(BaseModule):
"""Merge patch feature map.
This layer use nn.Unfold to group feature map by kernel_size, and use norm
and linear layer to embed grouped feature map.
Args:
input_resolution (tuple): The size of input patch resolution.
in_channels (int): The num of input channels.
expansion_ratio (Number): Expansion ratio of output channels. The num
of output channels is equal to int(expansion_ratio * in_channels).
kernel_size (int | tuple, optional): the kernel size in the unfold
layer. Defaults to 2.
stride (int | tuple, optional): the stride of the sliding blocks in the
unfold layer. Defaults to be equal with kernel_size.
padding (int | tuple, optional): zero padding width in the unfold
layer. Defaults to 0.
dilation (int | tuple, optional): dilation parameter in the unfold
layer. Defaults to 1.
bias (bool, optional): Whether to add bias in linear layer or not.
Defaults to False.
norm_cfg (dict, optional): Config dict for normalization layer.
Defaults to dict(type='LN').
init_cfg (dict, optional): The extra config for initialization.
Defaults to None.
"""
def __init__(self,
input_resolution,
in_channels,
expansion_ratio,
kernel_size=2,
stride=None,
padding=0,
dilation=1,
bias=False,
norm_cfg=dict(type='LN'),
init_cfg=None):
super().__init__(init_cfg)
H, W = input_resolution
self.input_resolution = input_resolution
self.in_channels = in_channels
self.out_channels = int(expansion_ratio * in_channels)
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
padding = to_2tuple(padding)
dilation = to_2tuple(dilation)
self.sampler = nn.Unfold(kernel_size, dilation, padding, stride)
sample_dim = kernel_size[0] * kernel_size[1] * in_channels
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, sample_dim)[1]
else:
self.norm = None
self.reduction = nn.Linear(sample_dim, self.out_channels, bias=bias)
# See https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
H_out = (H + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
W_out = (W + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.output_resolution = (H_out, W_out)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, 'input feature has wrong size'
x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W
# Use nn.Unfold to merge patch. About 25% faster than original method,
# but need to modify pretrained model for compatibility
x = self.sampler(x) # B, 4*C, H/2*W/2
x = x.transpose(1, 2) # B, H/2*W/2, 4*C
x = self.norm(x) if self.norm else x
x = self.reduction(x)
return x
| 9,624 | 36.893701 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/se_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .make_divisible import make_divisible
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
squeeze_channels (None or int): The intermediate channel number of
SElayer. Default: None, means the value of ``squeeze_channels``
is ``make_divisible(channels // ratio, divisor)``.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will
be ``make_divisible(channels // ratio, divisor)``. Only used when
``squeeze_channels`` is None. Default: 16.
divisor(int): The divisor to true divide the channel number. Only
used when ``squeeze_channels`` is None. Default: 8.
conv_cfg (None or dict): Config dict for convolution layer. Default:
None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def __init__(self,
channels,
squeeze_channels=None,
ratio=16,
divisor=8,
bias='auto',
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
if squeeze_channels is None:
squeeze_channels = make_divisible(channels // ratio, divisor)
assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \
'"squeeze_channels" should be a positive integer, but get ' + \
f'{squeeze_channels} instead.'
self.conv1 = ConvModule(
in_channels=channels,
out_channels=squeeze_channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=squeeze_channels,
out_channels=channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
| 2,989 | 38.866667 | 77 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/inverted_residual.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .se_layer import SELayer
# class InvertedResidual(nn.Module):
class InvertedResidual(BaseModule):
"""Inverted Residual Block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernal size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Defaul: None, which means no
se layer.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2]
self.with_cp = with_cp
self.with_se = se_cfg is not None
self.with_expand_conv = (mid_channels != in_channels)
if self.with_se:
assert isinstance(se_cfg, dict)
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + out
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| 3,688 | 31.078261 | 78 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/attention.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.transformer import build_dropout
from mmcv.cnn.utils.weight_init import trunc_normal_
from mmcv.runner.base_module import BaseModule
from ..builder import ATTENTION
from .helpers import to_2tuple
class WindowMSA(BaseModule):
"""Window based multi-head self-attention (W-MSA) module with relative
position bias.
Args:
embed_dims (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Defaults to True.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Defaults to None.
attn_drop (float, optional): Dropout ratio of attention weight.
Defaults to 0.
proj_drop (float, optional): Dropout ratio of output. Defaults to 0.
init_cfg (dict, optional): The extra config for initialization.
Defaults to None.
"""
def __init__(self,
embed_dims,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.,
proj_drop=0.,
init_cfg=None):
super().__init__(init_cfg)
self.embed_dims = embed_dims
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_embed_dims = embed_dims // num_heads
self.scale = qk_scale or head_embed_dims**-0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),
num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# About 2x faster than original impl
Wh, Ww = self.window_size
rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)
rel_position_index = rel_index_coords + rel_index_coords.T
rel_position_index = rel_position_index.flip(1).contiguous()
self.register_buffer('relative_position_index', rel_position_index)
self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(embed_dims, embed_dims)
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
def init_weights(self):
super(WindowMSA, self).init_weights()
trunc_normal_(self.relative_position_bias_table, std=0.02)
def forward(self, x, mask=None):
"""
Args:
x (tensor): input features with shape of (num_windows*B, N, C)
mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww,
Wh*Ww), value should be between (-inf, 0].
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[
2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1],
self.window_size[0] * self.window_size[1],
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N,
N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
@staticmethod
def double_step_seq(step1, len1, step2, len2):
seq1 = torch.arange(0, step1 * len1, step1)
seq2 = torch.arange(0, step2 * len2, step2)
return (seq1[:, None] + seq2[None, :]).reshape(1, -1)
@ATTENTION.register_module()
class ShiftWindowMSA(BaseModule):
"""Shift Window Multihead Self-Attention Module.
Args:
embed_dims (int): Number of input channels.
input_resolution (Tuple[int, int]): The resolution of the input feature
map.
num_heads (int): Number of attention heads.
window_size (int): The height and width of the window.
shift_size (int, optional): The shift step of each window towards
right-bottom. If zero, act as regular window-msa. Defaults to 0.
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Defaults to None.
attn_drop (float, optional): Dropout ratio of attention weight.
Defaults to 0.0.
proj_drop (float, optional): Dropout ratio of output. Defaults to 0.
dropout_layer (dict, optional): The dropout_layer used before output.
Defaults to dict(type='DropPath', drop_prob=0.).
auto_pad (bool, optional): Auto pad the feature map to be divisible by
window_size, Defaults to False.
init_cfg (dict, optional): The extra config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
input_resolution,
num_heads,
window_size,
shift_size=0,
qkv_bias=True,
qk_scale=None,
attn_drop=0,
proj_drop=0,
dropout_layer=dict(type='DropPath', drop_prob=0.),
auto_pad=False,
init_cfg=None):
super().__init__(init_cfg)
self.embed_dims = embed_dims
self.input_resolution = input_resolution
self.shift_size = shift_size
self.window_size = window_size
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, don't partition
self.shift_size = 0
self.window_size = min(self.input_resolution)
self.w_msa = WindowMSA(embed_dims, to_2tuple(self.window_size),
num_heads, qkv_bias, qk_scale, attn_drop,
proj_drop)
self.drop = build_dropout(dropout_layer)
H, W = self.input_resolution
# Handle auto padding
self.auto_pad = auto_pad
if self.auto_pad:
self.pad_r = (self.window_size -
W % self.window_size) % self.window_size
self.pad_b = (self.window_size -
H % self.window_size) % self.window_size
self.H_pad = H + self.pad_b
self.W_pad = W + self.pad_r
else:
H_pad, W_pad = self.input_resolution
assert H_pad % self.window_size + W_pad % self.window_size == 0,\
f'input_resolution({self.input_resolution}) is not divisible '\
f'by window_size({self.window_size}). Please check feature '\
f'map shape or set `auto_pad=True`.'
self.H_pad, self.W_pad = H_pad, W_pad
self.pad_r, self.pad_b = 0, 0
if self.shift_size > 0:
# calculate attention mask for SW-MSA
img_mask = torch.zeros((1, self.H_pad, self.W_pad, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size,
-self.shift_size), slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
# nW, window_size, window_size, 1
mask_windows = self.window_partition(img_mask)
mask_windows = mask_windows.view(
-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0,
float(-100.0)).masked_fill(
attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer('attn_mask', attn_mask)
def forward(self, query):
H, W = self.input_resolution
B, L, C = query.shape
assert L == H * W, 'input feature has wrong size'
query = query.view(B, H, W, C)
if self.pad_r or self.pad_b:
query = F.pad(query, (0, 0, 0, self.pad_r, 0, self.pad_b))
# cyclic shift
if self.shift_size > 0:
shifted_query = torch.roll(
query,
shifts=(-self.shift_size, -self.shift_size),
dims=(1, 2))
else:
shifted_query = query
# nW*B, window_size, window_size, C
query_windows = self.window_partition(shifted_query)
# nW*B, window_size*window_size, C
query_windows = query_windows.view(-1, self.window_size**2, C)
# W-MSA/SW-MSA (nW*B, window_size*window_size, C)
attn_windows = self.w_msa(query_windows, mask=self.attn_mask)
# merge windows
attn_windows = attn_windows.view(-1, self.window_size,
self.window_size, C)
# B H' W' C
shifted_x = self.window_reverse(attn_windows, self.H_pad, self.W_pad)
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(
shifted_x,
shifts=(self.shift_size, self.shift_size),
dims=(1, 2))
else:
x = shifted_x
if self.pad_r or self.pad_b:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
x = self.drop(x)
return x
def window_reverse(self, windows, H, W):
window_size = self.window_size
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size,
window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
def window_partition(self, x):
B, H, W, C = x.shape
window_size = self.window_size
x = x.view(B, H // window_size, window_size, W // window_size,
window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()
windows = windows.view(-1, window_size, window_size, C)
return windows
| 11,410 | 38.213058 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/helpers.py | # Copyright (c) OpenMMLab. All rights reserved.
import collections.abc
import warnings
from distutils.version import LooseVersion
from itertools import repeat
import torch
def is_tracing() -> bool:
if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
on_trace = torch.jit.is_tracing()
# In PyTorch 1.6, torch.jit.is_tracing has a bug.
# Refers to https://github.com/pytorch/pytorch/issues/42448
if isinstance(on_trace, bool):
return on_trace
else:
return torch._C._is_tracing()
else:
warnings.warn(
'torch.jit.is_tracing is only supported after v1.6.0. '
'Therefore is_tracing returns False automatically. Please '
'set on_trace manually if you are using trace.', UserWarning)
return False
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
| 1,127 | 25.232558 | 73 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/channel_shuffle.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def channel_shuffle(x, groups):
"""Channel Shuffle operation.
This function enables cross-group information flow for multiple groups
convolution layers.
Args:
x (Tensor): The input tensor.
groups (int): The number of groups to divide the input tensor
in the channel dimension.
Returns:
Tensor: The output tensor after channel shuffle operation.
"""
batch_size, num_channels, height, width = x.size()
assert (num_channels % groups == 0), ('num_channels should be '
'divisible by groups')
channels_per_group = num_channels // groups
x = x.view(batch_size, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch_size, -1, height, width)
return x
| 889 | 28.666667 | 74 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/augment/identity.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from .builder import AUGMENT
@AUGMENT.register_module(name='Identity')
class Identity(object):
"""Change gt_label to one_hot encoding and keep img as the same.
Args:
num_classes (int): The number of classes.
prob (float): MixUp probability. It should be in range [0, 1].
Default to 1.0
"""
def __init__(self, num_classes, prob=1.0):
super(Identity, self).__init__()
assert isinstance(num_classes, int)
assert isinstance(prob, float) and 0.0 <= prob <= 1.0
self.num_classes = num_classes
self.prob = prob
def one_hot(self, gt_label):
return F.one_hot(gt_label, num_classes=self.num_classes)
def __call__(self, img, gt_label):
return img, self.one_hot(gt_label)
| 857 | 26.677419 | 70 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/augment/cutmix.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
import torch.nn.functional as F
from .builder import AUGMENT
class BaseCutMixLayer(object, metaclass=ABCMeta):
"""Base class for CutMixLayer.
Args:
alpha (float): Parameters for Beta distribution. Positive(>0)
num_classes (int): The number of classes
prob (float): MixUp probability. It should be in range [0, 1].
Default to 1.0
cutmix_minmax (List[float], optional): cutmix min/max image ratio.
(as percent of image size). When cutmix_minmax is not None, we
generate cutmix bounding-box using cutmix_minmax instead of alpha
correct_lam (bool): Whether to apply lambda correction when cutmix bbox
clipped by image borders. Default to True
"""
def __init__(self,
alpha,
num_classes,
prob=1.0,
cutmix_minmax=None,
correct_lam=True):
super(BaseCutMixLayer, self).__init__()
assert isinstance(alpha, float) and alpha > 0
assert isinstance(num_classes, int)
assert isinstance(prob, float) and 0.0 <= prob <= 1.0
self.alpha = alpha
self.num_classes = num_classes
self.prob = prob
self.cutmix_minmax = cutmix_minmax
self.correct_lam = correct_lam
def rand_bbox_minmax(self, img_shape, count=None):
"""Min-Max CutMix bounding-box Inspired by Darknet cutmix
implementation. It generates a random rectangular bbox based on min/max
percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and
.8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
count (int, optional): Number of bbox to generate. Default to None
"""
assert len(self.cutmix_minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(
int(img_h * self.cutmix_minmax[0]),
int(img_h * self.cutmix_minmax[1]),
size=count)
cut_w = np.random.randint(
int(img_w * self.cutmix_minmax[0]),
int(img_w * self.cutmix_minmax[1]),
size=count)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def rand_bbox(self, img_shape, lam, margin=0., count=None):
"""Standard CutMix bounding-box that generates a random square bbox
based on lambda value. This implementation includes support for
enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin
(reduce amount of box outside image). Default to 0.
count (int, optional): Number of bbox to generate. Default to None
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def cutmix_bbox_and_lam(self, img_shape, lam, count=None):
"""Generate bbox and apply lambda correction.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
count (int, optional): Number of bbox to generate. Default to None
"""
if self.cutmix_minmax is not None:
yl, yu, xl, xu = self.rand_bbox_minmax(img_shape, count=count)
else:
yl, yu, xl, xu = self.rand_bbox(img_shape, lam, count=count)
if self.correct_lam or self.cutmix_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1. - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
@abstractmethod
def cutmix(self, imgs, gt_label):
pass
@AUGMENT.register_module(name='BatchCutMix')
class BatchCutMixLayer(BaseCutMixLayer):
"""CutMix layer for batch CutMix."""
def __init__(self, *args, **kwargs):
super(BatchCutMixLayer, self).__init__(*args, **kwargs)
def cutmix(self, img, gt_label):
one_hot_gt_label = F.one_hot(gt_label, num_classes=self.num_classes)
lam = np.random.beta(self.alpha, self.alpha)
batch_size = img.size(0)
index = torch.randperm(batch_size)
(bby1, bby2, bbx1,
bbx2), lam = self.cutmix_bbox_and_lam(img.shape, lam)
img[:, :, bby1:bby2, bbx1:bbx2] = \
img[index, :, bby1:bby2, bbx1:bbx2]
mixed_gt_label = lam * one_hot_gt_label + (
1 - lam) * one_hot_gt_label[index, :]
return img, mixed_gt_label
def __call__(self, img, gt_label):
return self.cutmix(img, gt_label)
| 5,453 | 37.680851 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/augment/mixup.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
import torch.nn.functional as F
from .builder import AUGMENT
class BaseMixupLayer(object, metaclass=ABCMeta):
"""Base class for MixupLayer.
Args:
alpha (float): Parameters for Beta distribution.
num_classes (int): The number of classes.
prob (float): MixUp probability. It should be in range [0, 1].
Default to 1.0
"""
def __init__(self, alpha, num_classes, prob=1.0):
super(BaseMixupLayer, self).__init__()
assert isinstance(alpha, float) and alpha > 0
assert isinstance(num_classes, int)
assert isinstance(prob, float) and 0.0 <= prob <= 1.0
self.alpha = alpha
self.num_classes = num_classes
self.prob = prob
@abstractmethod
def mixup(self, imgs, gt_label):
pass
@AUGMENT.register_module(name='BatchMixup')
class BatchMixupLayer(BaseMixupLayer):
"""Mixup layer for batch mixup."""
def __init__(self, *args, **kwargs):
super(BatchMixupLayer, self).__init__(*args, **kwargs)
def mixup(self, img, gt_label):
one_hot_gt_label = F.one_hot(gt_label, num_classes=self.num_classes)
lam = np.random.beta(self.alpha, self.alpha)
batch_size = img.size(0)
index = torch.randperm(batch_size)
mixed_img = lam * img + (1 - lam) * img[index, :]
mixed_gt_label = lam * one_hot_gt_label + (
1 - lam) * one_hot_gt_label[index, :]
return mixed_img, mixed_gt_label
def __call__(self, img, gt_label):
return self.mixup(img, gt_label)
| 1,674 | 27.87931 | 76 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/utils/augment/augments.py | # Copyright (c) OpenMMLab. All rights reserved.
import random
import numpy as np
from .builder import build_augment
class Augments(object):
"""Data augments.
We implement some data augmentation methods, such as mixup, cutmix.
Args:
augments_cfg (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict`):
Config dict of augments
Example:
>>> augments_cfg = [
dict(type='BatchCutMix', alpha=1., num_classes=10, prob=0.5),
dict(type='BatchMixup', alpha=1., num_classes=10, prob=0.3)
]
>>> augments = Augments(augments_cfg)
>>> imgs = torch.randn(16, 3, 32, 32)
>>> label = torch.randint(0, 10, (16, ))
>>> imgs, label = augments(imgs, label)
To decide which augmentation within Augments block is used
the following rule is applied.
We pick augmentation based on the probabilities. In the example above,
we decide if we should use BatchCutMix with probability 0.5,
BatchMixup 0.3. As Identity is not in augments_cfg, we use Identity with
probability 1 - 0.5 - 0.3 = 0.2.
"""
def __init__(self, augments_cfg):
super(Augments, self).__init__()
if isinstance(augments_cfg, dict):
augments_cfg = [augments_cfg]
assert len(augments_cfg) > 0, \
'The length of augments_cfg should be positive.'
self.augments = [build_augment(cfg) for cfg in augments_cfg]
self.augment_probs = [aug.prob for aug in self.augments]
has_identity = any([cfg['type'] == 'Identity' for cfg in augments_cfg])
if has_identity:
assert sum(self.augment_probs) == 1.0,\
'The sum of augmentation probabilities should equal to 1,' \
' but got {:.2f}'.format(sum(self.augment_probs))
else:
assert sum(self.augment_probs) <= 1.0,\
'The sum of augmentation probabilities should less than or ' \
'equal to 1, but got {:.2f}'.format(sum(self.augment_probs))
identity_prob = 1 - sum(self.augment_probs)
if identity_prob > 0:
num_classes = self.augments[0].num_classes
self.augments += [
build_augment(
dict(
type='Identity',
num_classes=num_classes,
prob=identity_prob))
]
self.augment_probs += [identity_prob]
def __call__(self, img, gt_label):
if self.augments:
random_state = np.random.RandomState(random.randint(0, 2**32 - 1))
aug = random_state.choice(self.augments, p=self.augment_probs)
return aug(img, gt_label)
return img, gt_label
| 2,799 | 36.837838 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/losses/label_smooth_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
import torch.nn as nn
from ..builder import LOSSES
from .cross_entropy_loss import CrossEntropyLoss
from .utils import convert_to_one_hot
@LOSSES.register_module()
class LabelSmoothLoss(nn.Module):
r"""Intializer for the label smoothed cross entropy loss.
Refers to `Rethinking the Inception Architecture for Computer Vision
<https://arxiv.org/abs/1512.00567>`_
This decreases gap between output scores and encourages generalization.
Labels provided to forward can be one-hot like vectors (NxC) or class
indices (Nx1).
And this accepts linear combination of one-hot like labels from mixup or
cutmix except multi-label task.
Args:
label_smooth_val (float): The degree of label smoothing.
num_classes (int, optional): Number of classes. Defaults to None.
mode (str): Refers to notes, Options are 'original', 'classy_vision',
'multi_label'. Defaults to 'classy_vision'
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". Defaults to 'mean'.
loss_weight (float): Weight of the loss. Defaults to 1.0.
Notes:
if the mode is "original", this will use the same label smooth method
as the original paper as:
.. math::
(1-\epsilon)\delta_{k, y} + \frac{\epsilon}{K}
where epsilon is the `label_smooth_val`, K is the num_classes and
delta(k,y) is Dirac delta, which equals 1 for k=y and 0 otherwise.
if the mode is "classy_vision", this will use the same label smooth
method as the facebookresearch/ClassyVision repo as:
.. math::
\frac{\delta_{k, y} + \epsilon/K}{1+\epsilon}
if the mode is "multi_label", this will accept labels from multi-label
task and smoothing them as:
.. math::
(1-2\epsilon)\delta_{k, y} + \epsilon
"""
def __init__(self,
label_smooth_val,
num_classes=None,
mode=None,
reduction='mean',
loss_weight=1.0):
super().__init__()
self.num_classes = num_classes
self.loss_weight = loss_weight
assert (isinstance(label_smooth_val, float)
and 0 <= label_smooth_val < 1), \
f'LabelSmoothLoss accepts a float label_smooth_val ' \
f'over [0, 1), but gets {label_smooth_val}'
self.label_smooth_val = label_smooth_val
accept_reduction = {'none', 'mean', 'sum'}
assert reduction in accept_reduction, \
f'LabelSmoothLoss supports reduction {accept_reduction}, ' \
f'but gets {mode}.'
self.reduction = reduction
if mode is None:
warnings.warn(
'LabelSmoothLoss mode is not set, use "classy_vision" '
'by default. The default value will be changed to '
'"original" recently. Please set mode manually if want '
'to keep "classy_vision".', UserWarning)
mode = 'classy_vision'
accept_mode = {'original', 'classy_vision', 'multi_label'}
assert mode in accept_mode, \
f'LabelSmoothLoss supports mode {accept_mode}, but gets {mode}.'
self.mode = mode
self._eps = label_smooth_val
if mode == 'classy_vision':
self._eps = label_smooth_val / (1 + label_smooth_val)
if mode == 'multi_label':
self.ce = CrossEntropyLoss(use_sigmoid=True)
self.smooth_label = self.multilabel_smooth_label
else:
self.ce = CrossEntropyLoss(use_soft=True)
self.smooth_label = self.original_smooth_label
def generate_one_hot_like_label(self, label):
"""This function takes one-hot or index label vectors and computes one-
hot like label vectors (float)"""
# check if targets are inputted as class integers
if label.dim() == 1 or (label.dim() == 2 and label.shape[1] == 1):
label = convert_to_one_hot(label.view(-1, 1), self.num_classes)
return label.float()
def original_smooth_label(self, one_hot_like_label):
assert self.num_classes > 0
smooth_label = one_hot_like_label * (1 - self._eps)
smooth_label += self._eps / self.num_classes
return smooth_label
def multilabel_smooth_label(self, one_hot_like_label):
assert self.num_classes > 0
smooth_label = torch.full_like(one_hot_like_label, self._eps)
smooth_label.masked_fill_(one_hot_like_label > 0, 1 - self._eps)
return smooth_label
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
r"""Label smooth loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
label (torch.Tensor): The ground truth label of the prediction
with shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \*). Dafaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss into a scalar. Options are "none", "mean" and "sum".
Defaults to None.
Returns:
torch.Tensor: Loss.
"""
if self.num_classes is not None:
assert self.num_classes == cls_score.shape[1], \
f'num_classes should equal to cls_score.shape[1], ' \
f'but got num_classes: {self.num_classes} and ' \
f'cls_score.shape[1]: {cls_score.shape[1]}'
else:
self.num_classes = cls_score.shape[1]
one_hot_like_label = self.generate_one_hot_like_label(label=label)
assert one_hot_like_label.shape == cls_score.shape, \
f'LabelSmoothLoss requires output and target ' \
f'to be same shape, but got output.shape: {cls_score.shape} ' \
f'and target.shape: {one_hot_like_label.shape}'
smoothed_label = self.smooth_label(one_hot_like_label)
return self.ce.forward(
cls_score,
smoothed_label,
weight=weight,
avg_factor=avg_factor,
reduction_override=reduction_override,
**kwargs)
| 6,591 | 38.238095 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/losses/asymmetric_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weight_reduce_loss
def asymmetric_loss(pred,
target,
weight=None,
gamma_pos=1.0,
gamma_neg=4.0,
clip=0.05,
reduction='mean',
avg_factor=None):
r"""asymmetric loss.
Please refer to the `paper <https://arxiv.org/abs/2009.14119>`__ for
details.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Dafaults to None.
gamma_pos (float): positive focusing parameter. Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We usually set
gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
eps = 1e-8
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if clip and clip > 0:
pt = (1 - pred_sigmoid +
clip).clamp(max=1) * (1 - target) + pred_sigmoid * target
else:
pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target
asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg *
(1 - target))
loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class AsymmetricLoss(nn.Module):
"""asymmetric loss.
Args:
gamma_pos (float): positive focusing parameter.
Defaults to 0.0.
gamma_neg (float): Negative focusing parameter. We
usually set gamma_neg > gamma_pos. Defaults to 4.0.
clip (float, optional): Probability margin. Defaults to 0.05.
reduction (str): The method used to reduce the loss into
a scalar.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self,
gamma_pos=0.0,
gamma_neg=4.0,
clip=0.05,
reduction='mean',
loss_weight=1.0):
super(AsymmetricLoss, self).__init__()
self.gamma_pos = gamma_pos
self.gamma_neg = gamma_neg
self.clip = clip
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""asymmetric loss."""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * asymmetric_loss(
pred,
target,
weight,
gamma_pos=self.gamma_pos,
gamma_neg=self.gamma_neg,
clip=self.clip,
reduction=reduction,
avg_factor=avg_factor)
return loss_cls
| 3,887 | 33.40708 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/losses/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
``loss_func(pred, target, **kwargs)``. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like ``loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)``.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
def convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor:
"""This function converts target class indices to one-hot vectors, given
the number of classes.
Args:
targets (Tensor): The ground truth label of the prediction
with shape (N, 1)
classes (int): the number of classes.
Returns:
Tensor: Processed loss values.
"""
assert (torch.max(targets).item() <
classes), 'Class Index must be less than number of classes'
one_hot_targets = torch.zeros((targets.shape[0], classes),
dtype=torch.long,
device=targets.device)
one_hot_targets.scatter_(1, targets.long(), 1)
return one_hot_targets
| 3,827 | 30.377049 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/losses/accuracy.py | # Copyright (c) OpenMMLab. All rights reserved.
from numbers import Number
import numpy as np
import torch
import torch.nn as nn
def accuracy_numpy(pred, target, topk=1, thrs=0.):
if isinstance(thrs, Number):
thrs = (thrs, )
res_single = True
elif isinstance(thrs, tuple):
res_single = False
else:
raise TypeError(
f'thrs should be a number or tuple, but got {type(thrs)}.')
res = []
maxk = max(topk)
num = pred.shape[0]
pred_label = pred.argsort(axis=1)[:, -maxk:][:, ::-1]
pred_score = np.sort(pred, axis=1)[:, -maxk:][:, ::-1]
for k in topk:
correct_k = pred_label[:, :k] == target.reshape(-1, 1)
res_thr = []
for thr in thrs:
# Only prediction values larger than thr are counted as correct
_correct_k = correct_k & (pred_score[:, :k] > thr)
_correct_k = np.logical_or.reduce(_correct_k, axis=1)
res_thr.append(_correct_k.sum() * 100. / num)
if res_single:
res.append(res_thr[0])
else:
res.append(res_thr)
return res
def accuracy_torch(pred, target, topk=1, thrs=0.):
if isinstance(thrs, Number):
thrs = (thrs, )
res_single = True
elif isinstance(thrs, tuple):
res_single = False
else:
raise TypeError(
f'thrs should be a number or tuple, but got {type(thrs)}.')
res = []
maxk = max(topk)
num = pred.size(0)
pred_score, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
for k in topk:
res_thr = []
for thr in thrs:
# Only prediction values larger than thr are counted as correct
_correct = correct & (pred_score.t() > thr)
correct_k = _correct[:k].reshape(-1).float().sum(0, keepdim=True)
res_thr.append(correct_k.mul_(100. / num))
if res_single:
res.append(res_thr[0])
else:
res.append(res_thr)
return res
def accuracy(pred, target, topk=1, thrs=0.):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor | np.array): The model prediction.
target (torch.Tensor | np.array): The target of each prediction
topk (int | tuple[int]): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thrs (Number | tuple[Number], optional): Predictions with scores under
the thresholds are considered negative. Default to 0.
Returns:
float | list[float] | list[list[float]]: Accuracy
- float: If both ``topk`` and ``thrs`` is a single value.
- list[float]: If one of ``topk`` or ``thrs`` is a tuple.
- list[list[float]]: If both ``topk`` and ``thrs`` is a tuple. \
And the first dim is ``topk``, the second dim is ``thrs``.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
if isinstance(pred, torch.Tensor) and isinstance(target, torch.Tensor):
res = accuracy_torch(pred, target, topk, thrs)
elif isinstance(pred, np.ndarray) and isinstance(target, np.ndarray):
res = accuracy_numpy(pred, target, topk, thrs)
else:
raise TypeError(
f'pred and target should both be torch.Tensor or np.ndarray, '
f'but got {type(pred)} and {type(target)}.')
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, )):
"""Module to calculate the accuracy.
Args:
topk (tuple): The criterion used to calculate the
accuracy. Defaults to (1,).
"""
super().__init__()
self.topk = topk
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
list[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk)
| 4,342 | 32.152672 | 78 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/losses/focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction with
shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, ). Dafaults to None.
gamma (float): The gamma for calculating the modulating factor.
Defaults to 2.0.
alpha (float): A balanced form for Focal Loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' ,
loss is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert pred.shape == \
target.shape, 'pred and target should be in the same shape.'
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class FocalLoss(nn.Module):
"""Focal loss.
Args:
gamma (float): Focusing parameter in focal loss.
Defaults to 2.0.
alpha (float): The parameter in balanced form of focal
loss. Defaults to 0.25.
reduction (str): The method used to reduce the loss into
a scalar. Options are "none" and "mean". Defaults to 'mean'.
loss_weight (float): Weight of loss. Defaults to 1.0.
"""
def __init__(self,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
r"""Sigmoid focal loss.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
target (torch.Tensor): The ground truth label of the prediction
with shape (N, \*).
weight (torch.Tensor, optional): Sample-wise loss weight with shape
(N, \*). Dafaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss into a scalar. Options are "none", "mean" and "sum".
Defaults to None.
Returns:
torch.Tensor: Loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * sigmoid_focal_loss(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
return loss_cls
| 4,089 | 34.565217 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/losses/cross_entropy_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The gt label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (torch.Tensor, optional): The weight for each class with
shape (C), C is the number of classes. Default None.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def soft_cross_entropy(pred,
label,
weight=None,
reduction='mean',
class_weight=None,
avg_factor=None):
"""Calculate the Soft CrossEntropy loss. The label can be float.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The gt label of the prediction with shape (N, C).
When using "mixup", the label can be float.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (torch.Tensor, optional): The weight for each class with
shape (C), C is the number of classes. Default None.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = -label * F.log_softmax(pred, dim=-1)
if class_weight is not None:
loss *= class_weight
loss = loss.sum(dim=-1)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
r"""Calculate the binary CrossEntropy loss with logits.
Args:
pred (torch.Tensor): The prediction with shape (N, \*).
label (torch.Tensor): The gt label with shape (N, \*).
weight (torch.Tensor, optional): Element-wise weight of loss with shape
(N, ). Defaults to None.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". If reduction is 'none' , loss
is same shape as pred and label. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (torch.Tensor, optional): The weight for each class with
shape (C), C is the number of classes. Default None.
Returns:
torch.Tensor: The calculated loss
"""
assert pred.dim() == label.dim()
# Ensure that the size of class_weight is consistent with pred and label to
# avoid automatic boracast,
if class_weight is not None:
N = pred.size()[0]
class_weight = class_weight.repeat(N, 1)
loss = F.binary_cross_entropy_with_logits(
pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
assert weight.dim() == 1
weight = weight.float()
if pred.dim() > 1:
weight = weight.reshape(-1, 1)
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
"""Cross entropy loss.
Args:
use_sigmoid (bool): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_soft (bool): Whether to use the soft version of CrossEntropyLoss.
Defaults to False.
reduction (str): The method used to reduce the loss.
Options are "none", "mean" and "sum". Defaults to 'mean'.
loss_weight (float): Weight of the loss. Defaults to 1.0.
class_weight (List[float], optional): The weight for each class with
shape (C), C is the number of classes. Default None.
"""
def __init__(self,
use_sigmoid=False,
use_soft=False,
reduction='mean',
loss_weight=1.0,
class_weight=None):
super(CrossEntropyLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.use_soft = use_soft
assert not (
self.use_soft and self.use_sigmoid
), 'use_sigmoid and use_soft could not be set simultaneously'
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_soft:
self.cls_criterion = soft_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(self.class_weight)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| 6,753 | 34.547368 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/losses/kd_loss.py | import re
from numpy import inf
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
@LOSSES.register_module()
class Logits(nn.Module):
'''
Do Deep Nets Really Need to be Deep?
http://papers.nips.cc/paper/5484-do-deep-nets-really-need-to-be-deep.pdf
'''
def __init__(self):
super(Logits, self).__init__()
def forward(self, out_s, out_t):
loss = F.mse_loss(out_s, out_t)
return loss
@LOSSES.register_module()
class SoftTarget(nn.Module):
'''
Distilling the Knowledge in a Neural Network
https://arxiv.org/pdf/1503.02531.pdf
'''
def __init__(self, temperature):
super(SoftTarget, self).__init__()
self.T = temperature
def forward(self, out_s, out_t):
loss = F.kl_div(F.log_softmax(out_s/self.T, dim=1),
F.softmax(out_t/self.T, dim=1),
reduction='batchmean') * self.T * self.T
return loss
def InfoMin_loss(mu, log_var):
shape = mu.shape
if len(shape) == 2:
return torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
elif len(shape) == 1:
# print(torch.mean(1 + log_var - mu ** 2 - log_var.exp()))
return -0.5 * torch.mean(1 + log_var - mu ** 2 - log_var.exp())
def InfoMax_loss(x1, x2):
x1 = x1 / (torch.norm(x1, p=2, dim=1, keepdim=True) + 1e-10)
x2 = x2 / (torch.norm(x2, p=2, dim=1, keepdim=True) + 1e-10)
bs = x1.size(0)
s = torch.matmul(x1, x2.permute(1, 0))
mask_joint = torch.eye(bs).cuda()
mask_marginal = 1 - mask_joint
Ej = (s * mask_joint).mean()
Em = torch.exp(s * mask_marginal).mean()
# decoupled comtrastive learning?!!!!
# infomax_loss = - (Ej - torch.log(Em)) * self.alpha
infomax_loss = - (Ej - torch.log(Em)) #/ Em
return infomax_loss
| 1,876 | 26.602941 | 96 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/backbones/mobilenet_v2.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(MobileNetV2, self).__init__(init_cfg)
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 9,588 | 35.184906 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/backbones/resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer,
constant_init)
from mmcv.utils.parrots_wrapper import _BatchNorm
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class WideBasicBlock(nn.Module):
"""BasicBlock for ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the output channels of conv1. This is a
reserved argument in BasicBlock and should always be 1. Default: 1.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module, optional): downsample operation on identity
branch. Default: None.
style (str): `pytorch` or `caffe`. It is unused and reserved for
unified API with Bottleneck.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict, optional): dictionary to construct and config conv
layer. Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
in_channels,
out_channels,
expansion=1,
stride=1,
dilation=1,
dropout=0,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='SyncBN',
momentum=0.001,
requires_grad=True)):
super(WideBasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert self.expansion == 1
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dropout = dropout
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.dropout > 0:
self.drop = nn.Dropout2d(p=self.dropout)
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.in_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
3,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.LeakyReLU(0.1, inplace=True)
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.norm1(x)
out = self.relu(out)
out = self.conv1(out)
if self.dropout > 0:
out = self.drop(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
# out = self.relu(out)
return out
class BasicBlock(nn.Module):
"""BasicBlock for ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the output channels of conv1. This is a
reserved argument in BasicBlock and should always be 1. Default: 1.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module, optional): downsample operation on identity
branch. Default: None.
style (str): `pytorch` or `caffe`. It is unused and reserved for
unified API with Bottleneck.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict, optional): dictionary to construct and config conv
layer. Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
in_channels,
out_channels,
expansion=1,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicBlock, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert self.expansion == 1
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, out_channels, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
3,
padding=1,
bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int): The ratio of ``out_channels/mid_channels`` where
``mid_channels`` is the input/output channels of conv2. Default: 4.
stride (int): stride of the block. Default: 1
dilation (int): dilation of convolution. Default: 1
downsample (nn.Module, optional): downsample operation on identity
branch. Default: None.
style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the
stride-two layer is the 3x3 conv layer, otherwise the stride-two
layer is the first 1x1 conv layer. Default: "pytorch".
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
conv_cfg (dict, optional): dictionary to construct and config conv
layer. Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
in_channels,
out_channels,
expansion=4,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
self.in_channels = in_channels
self.out_channels = out_channels
self.expansion = expansion
assert out_channels % expansion == 0
self.mid_channels = out_channels // expansion
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=1)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, self.mid_channels, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, out_channels, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
in_channels,
self.mid_channels,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg,
self.mid_channels,
self.mid_channels,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
self.mid_channels,
out_channels,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def get_expansion(block, expansion=None):
"""Get the expansion of a residual block.
The block expansion will be obtained by the following order:
1. If ``expansion`` is given, just return it.
2. If ``block`` has the attribute ``expansion``, then return
``block.expansion``.
3. Return the default value according the the block type:
1 for ``BasicBlock`` and 4 for ``Bottleneck``.
Args:
block (class): The block class.
expansion (int | None): The given expansion ratio.
Returns:
int: The expansion of the block.
"""
if isinstance(expansion, int):
assert expansion > 0
elif expansion is None:
if hasattr(block, 'expansion'):
expansion = block.expansion
elif issubclass(block, WideBasicBlock):
expansion = 1
elif issubclass(block, BasicBlock):
expansion = 1
elif issubclass(block, Bottleneck):
expansion = 4
else:
raise TypeError(f'expansion is not specified for {block.__name__}')
else:
raise TypeError('expansion must be an integer or None')
return expansion
class ResLayer(nn.Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): Residual block used to build ResLayer.
num_blocks (int): Number of blocks.
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
expansion (int, optional): The expansion for BasicBlock/Bottleneck.
If not specified, it will firstly be obtained via
``block.expansion``. If the block has no attribute "expansion",
the following default values will be used: 1 for BasicBlock and
4 for Bottleneck. Default: None.
stride (int): stride of the first block. Default: 1.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict, optional): dictionary to construct and config conv
layer. Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
def __init__(self,
block,
num_blocks,
in_channels,
out_channels,
expansion=None,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
**kwargs):
self.block = block
self.expansion = get_expansion(block, expansion)
downsample = None
if stride != 1 or in_channels != out_channels:
downsample = []
conv_stride = stride
if avg_down and stride != 1:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
in_channels,
out_channels,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, out_channels)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
layers.append(
block(
in_channels=in_channels,
out_channels=out_channels,
expansion=self.expansion,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
in_channels = out_channels
for i in range(1, num_blocks):
layers.append(
block(
in_channels=in_channels,
out_channels=out_channels,
expansion=self.expansion,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
@BACKBONES.register_module()
class ResNet(BaseBackbone):
"""ResNet backbone.
Please refer to the `paper <https://arxiv.org/abs/1512.03385>`__ for
details.
Args:
depth (int): Network depth, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
stem_channels (int): Output channels of the stem layer. Default: 64.
base_channels (int): Middle channels of the first stage. Default: 64.
num_stages (int): Stages of the network. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
Default: ``(1, 2, 2, 2)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
out_indices (Sequence[int]): Output from which stages. If only one
stage is specified, a single tensor (feature map) is returned,
otherwise multiple stages are specified, a tuple of tensors will
be returned. Default: ``(3, )``.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv.
Default: False.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict | None): The config dict for conv layers. Default: None.
norm_cfg (dict): The config dict for norm layers.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: True.
Example:
>>> from mmcls.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
in_channels=3,
stem_channels=64,
base_channels=64,
expansion=None,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(3, ),
style='pytorch',
deep_stem=False,
avg_down=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
with_cp=False,
zero_init_residual=True,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(ResNet, self).__init__(init_cfg)
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.expansion = get_expansion(self.block, expansion)
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
_in_channels = stem_channels
_out_channels = base_channels * self.expansion
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
res_layer = self.make_res_layer(
block=self.block,
num_blocks=num_blocks,
in_channels=_in_channels,
out_channels=_out_channels,
expansion=self.expansion,
stride=stride,
dilation=dilation,
style=self.style,
avg_down=self.avg_down,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
_in_channels = _out_channels
_out_channels *= 2
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = res_layer[-1].out_channels
def make_res_layer(self, **kwargs):
return ResLayer(**kwargs)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(
ConvModule(
in_channels,
stem_channels // 2,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True),
ConvModule(
stem_channels // 2,
stem_channels // 2,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True),
ConvModule(
stem_channels // 2,
stem_channels,
kernel_size=3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=True))
else:
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
stem_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.frozen_stages >= 0:
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
super(ResNet, self).init_weights()
if (isinstance(self.init_cfg, dict)
and self.init_cfg['type'] == 'Pretrained'):
# Suppress zero_init_residual if use pretrained model.
return
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
def forward(self, x):
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
@BACKBONES.register_module()
class ResNetV1d(ResNet):
"""ResNetV1d backbone.
This variant is described in `Bag of Tricks.
<https://arxiv.org/pdf/1812.01187.pdf>`_.
Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
the input stem with three 3x3 convs. And in the downsampling block, a 2x2
avg_pool with stride 2 is added before conv, whose stride is changed to 1.
"""
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(
deep_stem=True, avg_down=True, **kwargs)
| 26,579 | 33.474708 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/backbones/tsn.py | from re import S
import torch.nn as nn
import torch
from ..builder import BACKBONES, build_backbone
from .base_backbone import BaseBackbone
import torch.nn.functional as F
@BACKBONES.register_module()
class TSN_backbone(BaseBackbone):
def __init__(self, backbone, in_channels, out_channels):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.encoder = build_backbone(backbone)
self.fc = nn.Linear(self.in_channels, self.out_channels, bias=False)
def forward(self, x):
x = self.encoder(x)
if isinstance(x, tuple):
x = x[-1]
x = F.adaptive_avg_pool2d(x, (1,1))
x = x.view(x.size(0), -1)
x = self.fc(x)
mu = torch.mean(x, 0)
log_var = torch.log(torch.var(x, 0))
return (mu, log_var), x
| 860 | 25.90625 | 76 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/backbones/disentangle.py | import torch
import torch.nn as nn
from ..builder import BACKBONES
class Flatten3D(nn.Module):
def forward(self, x):
x = x.view(x.size()[0], -1)
return x
@BACKBONES.register_module()
class SimpleConv64(nn.Module):
def __init__(self,
latent_dim=10,
num_channels=1,
image_size=64
):
super().__init__()
assert image_size == 64, 'This model only works with image size 64x64.'
self.main = nn.Sequential(
nn.Conv2d(num_channels, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 32, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(32, 64, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(64, 128, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(128, 256, 4, 2, 1),
nn.ReLU(True),
nn.Conv2d(256, 256, 4, 2, 1),
nn.ReLU(True),
Flatten3D(),
nn.Linear(256, latent_dim, bias=True)
)
def forward(self, x):
output = self.main(x)
return (output, )
@BACKBONES.register_module()
class SimpleGaussianConv64(SimpleConv64):
def __init__(self, latent_dim, num_channels, image_size):
super().__init__(latent_dim * 2, num_channels, image_size)
# override value of _latent_dim
self._latent_dim = latent_dim
def forward(self, x):
mu_logvar = self.main(x)
mu = mu_logvar[:, :self._latent_dim]
logvar = mu_logvar[:, self._latent_dim:]
output = self.reparameterize(mu, logvar)
return (mu, logvar), output
def reparameterize(self, mu, logvar):
"""
Will a single z be enough ti compute the expectation
for the loss??
:param mu: (Tensor) Mean of the latent Gaussian
:param logvar: (Tensor) Standard deviation of the latent Gaussian
:return:
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
| 2,028 | 27.577465 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/backbones/resnet_cifar.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import build_conv_layer, build_norm_layer
from ..builder import BACKBONES
from .resnet import ResNet
@BACKBONES.register_module()
class ResNet_CIFAR(ResNet):
"""ResNet backbone for CIFAR.
Compared to standard ResNet, it uses `kernel_size=3` and `stride=1` in
conv1, and does not apply MaxPoolinng after stem. It has been proven to
be more efficient than standard ResNet in other public codebase, e.g.,
`https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`.
Args:
depth (int): Network depth, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Default: 3.
stem_channels (int): Output channels of the stem layer. Default: 64.
base_channels (int): Middle channels of the first stage. Default: 64.
num_stages (int): Stages of the network. Default: 4.
strides (Sequence[int]): Strides of the first block of each stage.
Default: ``(1, 2, 2, 2)``.
dilations (Sequence[int]): Dilation of each stage.
Default: ``(1, 1, 1, 1)``.
out_indices (Sequence[int]): Output from which stages. If only one
stage is specified, a single tensor (feature map) is returned,
otherwise multiple stages are specified, a tuple of tensors will
be returned. Default: ``(3, )``.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
deep_stem (bool): This network has specific designed stem, thus it is
asserted to be False.
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters. Default: -1.
conv_cfg (dict | None): The config dict for conv layers. Default: None.
norm_cfg (dict): The config dict for norm layers.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
zero_init_residual (bool): Whether to use zero init for last norm layer
in resblocks to let them behave as identity. Default: True.
"""
def __init__(self, depth, deep_stem=False, **kwargs):
super(ResNet_CIFAR, self).__init__(
depth, deep_stem=deep_stem, **kwargs)
assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem'
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
base_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, base_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 3,707 | 44.219512 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/backbones/shufflenet_v2.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, normal_init
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import channel_shuffle
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
"""InvertedResidual block for ShuffleNetV2 backbone.
Args:
in_channels (int): The input channels of the block.
out_channels (int): The output channels of the block.
stride (int): Stride of the 3x3 convolution layer. Default: 1
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
self.with_cp = with_cp
branch_features = out_channels // 2
if self.stride == 1:
assert in_channels == branch_features * 2, (
f'in_channels ({in_channels}) should equal to '
f'branch_features * 2 ({branch_features * 2}) '
'when stride is 1')
if in_channels != branch_features * 2:
assert self.stride != 1, (
f'stride ({self.stride}) should not equal 1 when '
f'in_channels != branch_features * 2')
if self.stride > 1:
self.branch1 = nn.Sequential(
ConvModule(
in_channels,
in_channels,
kernel_size=3,
stride=self.stride,
padding=1,
groups=in_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None),
ConvModule(
in_channels,
branch_features,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
)
self.branch2 = nn.Sequential(
ConvModule(
in_channels if (self.stride > 1) else branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
branch_features,
branch_features,
kernel_size=3,
stride=self.stride,
padding=1,
groups=branch_features,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None),
ConvModule(
branch_features,
branch_features,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, x):
def _inner_forward(x):
if self.stride > 1:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
else:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
out = channel_shuffle(out, 2)
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class ShuffleNetV2(BaseBackbone):
"""ShuffleNetV2 backbone.
Args:
widen_factor (float): Width multiplier - adjusts the number of
channels in each layer by this amount. Default: 1.0.
out_indices (Sequence[int]): Output from which stages.
Default: (0, 1, 2, 3).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
def __init__(self,
widen_factor=1.0,
out_indices=(3, ),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
norm_eval=False,
with_cp=False,
init_cfg=None):
super(ShuffleNetV2, self).__init__(init_cfg)
self.stage_blocks = [4, 8, 4]
for index in out_indices:
if index not in range(0, 4):
raise ValueError('the item in out_indices must in '
f'range(0, 4). But received {index}')
if frozen_stages not in range(-1, 4):
raise ValueError('frozen_stages must be in range(-1, 4). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
if widen_factor == 0.5:
channels = [48, 96, 192, 1024]
elif widen_factor == 1.0:
channels = [116, 232, 464, 1024]
elif widen_factor == 1.5:
channels = [176, 352, 704, 1024]
elif widen_factor == 2.0:
channels = [244, 488, 976, 2048]
else:
raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. '
f'But received {widen_factor}')
self.in_channels = 24
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layers = nn.ModuleList()
for i, num_blocks in enumerate(self.stage_blocks):
layer = self._make_layer(channels[i], num_blocks)
self.layers.append(layer)
output_channels = channels[-1]
self.layers.append(
ConvModule(
in_channels=self.in_channels,
out_channels=output_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def _make_layer(self, out_channels, num_blocks):
"""Stack blocks to make a layer.
Args:
out_channels (int): out_channels of the block.
num_blocks (int): number of blocks.
"""
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
layers.append(
InvertedResidual(
in_channels=self.in_channels,
out_channels=out_channels,
stride=stride,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(self.frozen_stages):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self):
super(ShuffleNetV2, self).init_weights()
if (isinstance(self.init_cfg, dict)
and self.init_cfg['type'] == 'Pretrained'):
# Suppress default init if use pretrained model.
return
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
if 'conv1' in name:
normal_init(m, mean=0, std=0.01)
else:
normal_init(m, mean=0, std=1.0 / m.weight.shape[1])
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m.weight, val=1, bias=0.0001)
if isinstance(m, _BatchNorm):
if m.running_mean is not None:
nn.init.constant_(m.running_mean, 0)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
outs = []
for i, layer in enumerate(self.layers):
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def train(self, mode=True):
super(ShuffleNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
| 10,408 | 33.92953 | 78 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/backbones/wideresnet.py | import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import (build_conv_layer, build_norm_layer)
from .resnet import ResNet, WideBasicBlock
from ..builder import BACKBONES
@BACKBONES.register_module()
class WideResNet_CIFAR(ResNet):
"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of
channels which is twice larger in every block. The number of channels
in outer 1x1 convolutions is the same, e.g. last block in ResNet-50
has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048.
"""
arch_settings = {
28: (WideBasicBlock, (4, 4, 4)),
}
def __init__(self, depth, out_channel, deep_stem=False,
norm_cfg=dict(type='BN',
momentum=0.1,
requires_grad=True),
**kwargs):
super(WideResNet_CIFAR, self).__init__(
depth,
deep_stem=deep_stem,
norm_cfg=norm_cfg, **kwargs)
assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem'
self.norm_cfg = norm_cfg
self.norm1_name, norm1 = build_norm_layer(
self.norm_cfg, out_channel, postfix=1)
self.add_module(self.norm1_name, norm1)
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
base_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.relu = nn.LeakyReLU(0.1, inplace=True)
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
if i == self.out_indices[-1]:
x = self.relu(self.norm1(x))
else:
x = self.relu(x)
outs.append(x)
else:
return tuple(outs) | 2,163 | 33.349206 | 75 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/heads/cls_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from mmcls.models.losses import Accuracy
from ..builder import HEADS, build_loss
from ..utils import is_tracing
from .base_head import BaseHead
@HEADS.register_module()
class ClsHead(BaseHead):
"""classification head.
Args:
loss (dict): Config of classification loss.
topk (int | tuple): Top-k accuracy.
cal_acc (bool): Whether to calculate accuracy during training.
If you use Mixup/CutMix or something like that during training,
it is not reasonable to calculate accuracy. Defaults to False.
"""
def __init__(self,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, ),
cal_acc=False,
init_cfg=None):
super(ClsHead, self).__init__(init_cfg=init_cfg)
assert isinstance(loss, dict)
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
for _topk in topk:
assert _topk > 0, 'Top-k should be larger than 0'
self.topk = topk
self.compute_loss = build_loss(loss)
self.compute_accuracy = Accuracy(topk=self.topk)
self.cal_acc = cal_acc
def loss(self, cls_score, gt_label):
num_samples = len(cls_score)
losses = dict()
# compute loss
loss = self.compute_loss(cls_score, gt_label, avg_factor=num_samples)
if self.cal_acc:
# compute accuracy
acc = self.compute_accuracy(cls_score, gt_label)
assert len(acc) == len(self.topk)
losses['accuracy'] = {
f'top-{k}': a
for k, a in zip(self.topk, acc)
}
losses['loss'] = loss
return losses
def forward_train(self, cls_score, gt_label):
if isinstance(cls_score, tuple):
cls_score = cls_score[-1]
losses = self.loss(cls_score, gt_label)
return losses
def simple_test(self, cls_score):
"""Test without augmentation."""
if isinstance(cls_score, tuple):
cls_score = cls_score[-1]
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
return self.post_process(pred)
def post_process(self, pred):
on_trace = is_tracing()
if torch.onnx.is_in_onnx_export() or on_trace:
return pred
pred = list(pred.detach().cpu().numpy())
return pred
| 2,636 | 31.9625 | 77 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/heads/multi_label_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from ..builder import HEADS, build_loss
from ..utils import is_tracing
from .base_head import BaseHead
@HEADS.register_module()
class MultiLabelClsHead(BaseHead):
"""Classification head for multilabel task.
Args:
loss (dict): Config of classification loss.
"""
def __init__(self,
loss=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
init_cfg=None):
super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg)
assert isinstance(loss, dict)
self.compute_loss = build_loss(loss)
def loss(self, cls_score, gt_label):
gt_label = gt_label.type_as(cls_score)
num_samples = len(cls_score)
losses = dict()
# map difficult examples to positive ones
_gt_label = torch.abs(gt_label)
# compute loss
loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples)
losses['loss'] = loss
return losses
def forward_train(self, cls_score, gt_label):
if isinstance(cls_score, tuple):
cls_score = cls_score[-1]
gt_label = gt_label.type_as(cls_score)
losses = self.loss(cls_score, gt_label)
return losses
def simple_test(self, x):
if isinstance(x, tuple):
x = x[-1]
if isinstance(x, list):
x = sum(x) / float(len(x))
pred = F.sigmoid(x) if x is not None else None
return self.post_process(pred)
def post_process(self, pred):
on_trace = is_tracing()
if torch.onnx.is_in_onnx_export() or on_trace:
return pred
pred = list(pred.detach().cpu().numpy())
return pred
| 1,887 | 28.046154 | 78 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/heads/multitask_linear_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import HEADS
from .cls_head import ClsHead
@HEADS.register_module()
class MultiTaskLinearClsHead(ClsHead):
"""Linear classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
init_cfg=dict(type='Normal', layer='Linear', std=0.01),
*args,
**kwargs):
super(MultiTaskLinearClsHead, self).__init__(
init_cfg=init_cfg, *args, **kwargs)
self.in_channels = in_channels
self.num_classes = num_classes
self.num_task = len(self.num_classes)
self.fcs = nn.ModuleList(
[nn.Linear(self.in_channels, self.num_classes[i])
for i in range(self.num_task)]
)
def simple_test(self, x):
"""Test without augmentation."""
if isinstance(x, tuple):
x = x[-1]
preds = []
for i in range(self.num_task):
cls_score = self.fcs[i](x)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(
cls_score, dim=1) if cls_score is not None else None
preds.append(self.post_process(pred))
return preds
def get_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
logits = []
for i in range(self.num_task):
logit = self.fcs[i](x)
logits.append(logit)
return logits
def forward_train(self, x, gt_label):
if isinstance(x, tuple):
x = x[-1]
losses = dict()
for i in range(self.num_task):
cls_score = self.fcs[i](x)
loss_task = self.loss(cls_score, gt_label[:, i])['loss']
losses[f'task{i}_loss'] = loss_task
return losses
| 2,253 | 30.746479 | 74 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/models/heads/linear_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import HEADS
from .cls_head import ClsHead
@HEADS.register_module()
class LinearClsHead(ClsHead):
"""Linear classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
init_cfg=dict(type='Normal', layer='Linear', std=0.01),
*args,
**kwargs):
super(LinearClsHead, self).__init__(init_cfg=init_cfg, *args, **kwargs)
self.in_channels = in_channels
self.num_classes = num_classes
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self.fc = nn.Linear(self.in_channels, self.num_classes)
def simple_test(self, x):
"""Test without augmentation."""
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
return self.post_process(pred)
def forward_train(self, x, gt_label):
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
losses = self.loss(cls_score, gt_label)
return losses
def get_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
return cls_score
@HEADS.register_module()
class LinearBCEClsHead(ClsHead):
"""Linear classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
init_cfg=dict(type='Normal', layer='Linear', std=0.01),
*args,
**kwargs):
super(LinearBCEClsHead, self).__init__(
init_cfg=init_cfg, *args, **kwargs)
self.in_channels = in_channels
self.num_classes = num_classes
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self.fc = nn.Linear(self.in_channels, self.num_classes)
def simple_test(self, x):
"""Test without augmentation."""
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
return self.post_process(pred)
def forward_train(self, x, gt_label):
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
onehot_gt_label = F.one_hot(gt_label,
num_classes=self.num_classes).float()
losses = self.loss(cls_score, onehot_gt_label)
return losses
def get_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
cls_score = self.fc(x)
return cls_score
| 3,723 | 30.559322 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/base_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from abc import ABCMeta, abstractmethod
import mmcv
import numpy as np
from torch.utils.data import Dataset
from mmcls.core.evaluation import precision_recall_f1, support
from mmcls.models.losses import accuracy
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base dataset.
Args:
data_prefix (str): the prefix of data path
pipeline (list): a list of dict, where each element represents
a operation defined in `mmcls.datasets.pipelines`
ann_file (str | None): the annotation file. When ann_file is str,
the subclass is expected to read from the ann_file. When ann_file
is None, the subclass is expected to read according to data_prefix
test_mode (bool): in train mode or test mode
"""
CLASSES = None
def __init__(self,
data_prefix,
pipeline,
classes=None,
ann_file=None,
test_mode=False):
super(BaseDataset, self).__init__()
self.ann_file = ann_file
self.data_prefix = data_prefix
self.test_mode = test_mode
self.pipeline = Compose(pipeline)
self.CLASSES = self.get_classes(classes)
self.data_infos = self.load_annotations()
@abstractmethod
def load_annotations(self):
pass
@property
def class_to_idx(self):
"""Map mapping class name to class index.
Returns:
dict: mapping from class name to class index.
"""
return {_class: i for i, _class in enumerate(self.CLASSES)}
def get_gt_labels(self):
"""Get all ground-truth labels (categories).
Returns:
list[int]: categories for all images.
"""
gt_labels = np.array([data['gt_label'] for data in self.data_infos])
return gt_labels
def get_cat_ids(self, idx):
"""Get category id by index.
Args:
idx (int): Index of data.
Returns:
int: Image category of specified index.
"""
return self.data_infos[idx]['gt_label'].astype(np.int)
def prepare_data(self, idx):
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def __len__(self):
return len(self.data_infos)
def __getitem__(self, idx):
return self.prepare_data(idx)
@classmethod
def get_classes(cls, classes=None):
"""Get class names of current dataset.
Args:
classes (Sequence[str] | str | None): If classes is None, use
default CLASSES defined by builtin dataset. If classes is a
string, take it as a file name. The file contains the name of
classes where each line contains one class name. If classes is
a tuple or list, override the CLASSES defined by the dataset.
Returns:
tuple[str] or list[str]: Names of categories of the dataset.
"""
if classes is None:
return cls.CLASSES
if isinstance(classes, str):
# take it as a file path
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def evaluate(self,
results,
metric='accuracy',
metric_options=None,
logger=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
Default value is `accuracy`.
metric_options (dict, optional): Options for calculating metrics.
Allowed keys are 'topk', 'thrs' and 'average_mode'.
Defaults to None.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Defaults to None.
Returns:
dict: evaluation results
"""
if metric_options is None:
metric_options = {'topk': (1, 5)}
if isinstance(metric, str):
metrics = [metric]
else:
metrics = metric
allowed_metrics = [
'accuracy', 'precision', 'recall', 'f1_score', 'support'
]
eval_results = {}
results = np.vstack(results)
gt_labels = self.get_gt_labels()
num_imgs = len(results)
assert len(gt_labels) == num_imgs, 'dataset testing results should '\
'be of the same length as gt_labels.'
invalid_metrics = set(metrics) - set(allowed_metrics)
if len(invalid_metrics) != 0:
raise ValueError(f'metric {invalid_metrics} is not supported.')
topk = metric_options.get('topk', (1, 5))
thrs = metric_options.get('thrs')
average_mode = metric_options.get('average_mode', 'macro')
if 'accuracy' in metrics:
if thrs is not None:
acc = accuracy(results, gt_labels, topk=topk, thrs=thrs)
else:
acc = accuracy(results, gt_labels, topk=topk)
if isinstance(topk, tuple):
eval_results_ = {
f'accuracy_top-{k}': a
for k, a in zip(topk, acc)
}
else:
eval_results_ = {'accuracy': acc}
if isinstance(thrs, tuple):
for key, values in eval_results_.items():
eval_results.update({
f'{key}_thr_{thr:.2f}': value.item()
for thr, value in zip(thrs, values)
})
else:
eval_results.update(
{k: v.item()
for k, v in eval_results_.items()})
if 'support' in metrics:
support_value = support(
results, gt_labels, average_mode=average_mode)
eval_results['support'] = support_value
precision_recall_f1_keys = ['precision', 'recall', 'f1_score']
if len(set(metrics) & set(precision_recall_f1_keys)) != 0:
if thrs is not None:
precision_recall_f1_values = precision_recall_f1(
results, gt_labels, average_mode=average_mode, thrs=thrs)
else:
precision_recall_f1_values = precision_recall_f1(
results, gt_labels, average_mode=average_mode)
for key, values in zip(precision_recall_f1_keys,
precision_recall_f1_values):
if key in metrics:
if isinstance(thrs, tuple):
eval_results.update({
f'{key}_thr_{thr:.2f}': value
for thr, value in zip(thrs, values)
})
else:
eval_results[key] = values
return eval_results
| 7,191 | 33.576923 | 78 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/dataset_wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import bisect
import math
from collections import defaultdict
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
add `get_cat_ids` function.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
"""
def __init__(self, datasets):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
def get_cat_ids(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
@DATASETS.register_module()
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
return self.dataset.get_cat_ids(idx % self._ori_len)
def __len__(self):
return self.times * self._ori_len
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
@DATASETS.register_module()
class ClassBalancedDataset(object):
r"""A wrapper of repeated dataset with repeat factor.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in [#1]_, in each epoch, an image may appear multiple
times based on its "repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to implement :func:`self.get_cat_ids` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction :math:`f(c)` of images that
contain it.
2. For each category c, compute the category-level repeat factor
.. math::
r(c) = \max(1, \sqrt{\frac{t}{f(c)}})
3. For each image I and its labels :math:`L(I)`, compute the image-level
repeat factor
.. math::
r(I) = \max_{c \in L(I)} r(c)
References:
.. [#1] https://arxiv.org/pdf/1908.03195.pdf
Args:
dataset (:obj:`CustomDataset`): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with `f_c` >= `oversample_thr`, there is
no oversampling. For categories with `f_c` < `oversample_thr`, the
degree of oversampling following the square-root inverse frequency
heuristic above.
"""
def __init__(self, dataset, oversample_thr):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.CLASSES = dataset.CLASSES
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for dataset_index, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_index] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
flags.extend([flag] * int(math.ceil(repeat_factor)))
assert len(flags) == len(repeat_indices)
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
assert v > 0, f'caterogy {k} does not contain any images'
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I and its labels L(I), compute the image-level
# repeat factor:
# r(I) = max_{c in L(I)} r(c)
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def __len__(self):
return len(self.repeat_indices)
| 6,092 | 34.219653 | 167 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import platform
import random
from distutils.version import LooseVersion
from functools import partial
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import DataLoader
from .samplers import DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
hard_limit = rlimit[1]
soft_limit = min(4096, hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import (ConcatDataset, RepeatDataset,
ClassBalancedDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
round_up=True,
seed=None,
pin_memory=True,
persistent_workers=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
round_up (bool): Whether to round up the length of dataset by adding
extra samples to make it evenly divisible. Default: True.
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
persistent_workers (bool): If True, the data loader will not shutdown
the worker processes after a dataset has been consumed once.
This allows to maintain the workers Dataset instances alive.
The argument also has effect in PyTorch>=1.7.0.
Default: True
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle, round_up=round_up)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
if LooseVersion(torch.__version__) >= LooseVersion('1.7.0'):
kwargs['persistent_workers'] = persistent_workers
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 4,471 | 34.776 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/cifar.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path
import pickle
import numpy as np
import torch.distributed as dist
from mmcv.runner import get_dist_info
from mmcls.datasets.disentangle_data.multi_task import MultiTask
from mmcls.datasets.pipelines.compose import Compose
from .base_dataset import BaseDataset
from .builder import DATASETS
from .utils import check_integrity, download_and_extract_archive
@DATASETS.register_module()
class CIFAR10(BaseDataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This implementation is modified from
https://github.com/pytorch/vision/blob/master/torchvision/datasets/cifar.py
""" # noqa: E501
base_folder = 'cifar-10-batches-py'
url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
filename = 'cifar-10-python.tar.gz'
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def load_annotations(self):
rank, world_size = get_dist_info()
if rank == 0 and not self._check_integrity():
download_and_extract_archive(
self.url,
self.data_prefix,
filename=self.filename,
md5=self.tgz_md5)
if world_size > 1:
dist.barrier()
assert self._check_integrity(), \
'Shared storage seems unavailable. ' \
f'Please download the dataset manually through {self.url}.'
if not self.test_mode:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.imgs = []
self.gt_labels = []
# load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.data_prefix, self.base_folder,
file_name)
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.imgs.append(entry['data'])
if 'labels' in entry:
self.gt_labels.extend(entry['labels'])
else:
self.gt_labels.extend(entry['fine_labels'])
self.imgs = np.vstack(self.imgs).reshape(-1, 3, 32, 32)
self.imgs = self.imgs.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
data_infos = []
for img, gt_label in zip(self.imgs, self.gt_labels):
gt_label = np.array(gt_label, dtype=np.int64)
info = {'img': img, 'gt_label': gt_label}
data_infos.append(info)
return data_infos
def _load_meta(self):
path = os.path.join(self.data_prefix, self.base_folder,
self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError(
'Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
data = pickle.load(infile, encoding='latin1')
self.CLASSES = data[self.meta['key']]
def _check_integrity(self):
root = self.data_prefix
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
@DATASETS.register_module()
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset."""
base_folder = 'cifar-100-python'
url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
filename = 'cifar-100-python.tar.gz'
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
@DATASETS.register_module()
class CIFAR10_2Task(CIFAR10):
gt2newgt = {0: 0, 1: 1, 2: 4, 3: 5, 4: 6, 5: 7, 6: 8, 7: 9, 8: 2, 9: 3}
def load_annotations(self):
rank, world_size = get_dist_info()
if rank == 0 and not self._check_integrity():
download_and_extract_archive(
self.url,
self.data_prefix,
filename=self.filename,
md5=self.tgz_md5)
if world_size > 1:
dist.barrier()
assert self._check_integrity(), \
'Shared storage seems unavailable. ' \
f'Please download the dataset manually through {self.url}.'
if not self.test_mode:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.imgs = []
self.gt_labels = []
# load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.data_prefix, self.base_folder,
file_name)
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.imgs.append(entry['data'])
if 'labels' in entry:
self.gt_labels.extend(entry['labels'])
else:
self.gt_labels.extend(entry['fine_labels'])
self.imgs = np.vstack(self.imgs).reshape(-1, 3, 32, 32)
self.imgs = self.imgs.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
data_infos = []
for img, gt_label in zip(self.imgs, self.gt_labels):
gt_label = np.array(self.gt2newgt[gt_label], dtype=np.int64)
info = {'img': img, 'gt_label': gt_label}
data_infos.append(info)
return data_infos
@DATASETS.register_module()
class CIFAR10_Select(CIFAR10):
def __init__(self,
data_prefix,
pipeline,
select_class=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
classes=None,
ann_file=None,
test_mode=False):
assert isinstance(select_class, list) or isinstance(
select_class, tuple)
self.select_class = select_class
self.select2gt = {
self.select_class[i]: i for i in range(len(self.select_class))}
super(CIFAR10_Select, self).__init__(data_prefix,
pipeline,
classes=classes,
ann_file=ann_file,
test_mode=test_mode)
def load_annotations(self):
rank, world_size = get_dist_info()
if rank == 0 and not self._check_integrity():
download_and_extract_archive(
self.url,
self.data_prefix,
filename=self.filename,
md5=self.tgz_md5)
if world_size > 1:
dist.barrier()
assert self._check_integrity(), \
'Shared storage seems unavailable. ' \
f'Please download the dataset manually through {self.url}.'
if not self.test_mode:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.imgs = []
self.gt_labels = []
# load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.data_prefix, self.base_folder,
file_name)
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.imgs.append(entry['data'])
if 'labels' in entry:
self.gt_labels.extend(entry['labels'])
else:
self.gt_labels.extend(entry['fine_labels'])
self.imgs = np.vstack(self.imgs).reshape(-1, 3, 32, 32)
self.imgs = self.imgs.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
data_infos = []
for img, gt_label in zip(self.imgs, self.gt_labels):
if gt_label in self.select_class:
gt_label = np.array(self.select2gt[gt_label], dtype=np.int64)
info = {'img': img, 'gt_label': gt_label}
data_infos.append(info)
return data_infos
@DATASETS.register_module()
class CIFAR10_MultiTask(CIFAR10, MultiTask):
num_tasks = 10
def load_annotations(self):
rank, world_size = get_dist_info()
if rank == 0 and not self._check_integrity():
download_and_extract_archive(
self.url,
self.data_prefix,
filename=self.filename,
md5=self.tgz_md5)
if world_size > 1:
dist.barrier()
assert self._check_integrity(), \
'Shared storage seems unavailable. ' \
f'Please download the dataset manually through {self.url}.'
if not self.test_mode:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.imgs = []
self.gt_labels = []
# load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.data_prefix, self.base_folder,
file_name)
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.imgs.append(entry['data'])
if 'labels' in entry:
self.gt_labels.extend(entry['labels'])
else:
self.gt_labels.extend(entry['fine_labels'])
self.gt_labels = np.eye(10)[self.gt_labels]
self.imgs = np.vstack(self.imgs).reshape(-1, 3, 32, 32)
self.imgs = self.imgs.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
data_infos = []
for img, gt_label in zip(self.imgs, self.gt_labels):
gt_label = np.array(gt_label, dtype=np.int64)
info = {'img': img, 'gt_label': gt_label}
data_infos.append(info)
return data_infos
| 10,938 | 33.507886 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/imagenet.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import numpy as np
from .base_dataset import BaseDataset
from mmcls.datasets.disentangle_data.multi_task import MultiTask
from .builder import DATASETS
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
def find_folders(root):
"""Find classes by folders under a root.
Args:
root (string): root directory of folders
Returns:
folder_to_idx (dict): the map from folder name to class idx
"""
folders = [
d for d in os.listdir(root) if os.path.isdir(os.path.join(root, d))
]
folders.sort()
folder_to_idx = {folders[i]: i for i in range(len(folders))}
return folder_to_idx
def get_samples(root, folder_to_idx, extensions):
"""Make dataset by walking all images under a root.
Args:
root (string): root directory of folders
folder_to_idx (dict): the map from class name to class idx
extensions (tuple): allowed extensions
Returns:
samples (list): a list of tuple where each element is (image, label)
"""
samples = []
root = os.path.expanduser(root)
for folder_name in sorted(list(folder_to_idx.keys())):
_dir = os.path.join(root, folder_name)
for _, _, fns in sorted(os.walk(_dir)):
for fn in sorted(fns):
if has_file_allowed_extension(fn, extensions):
path = os.path.join(folder_name, fn)
item = (path, folder_to_idx[folder_name])
samples.append(item)
return samples
@DATASETS.register_module()
class ImageNet(BaseDataset):
"""`ImageNet <http://www.image-net.org>`_ Dataset.
This implementation is modified from
https://github.com/pytorch/vision/blob/master/torchvision/datasets/imagenet.py
""" # noqa: E501
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif')
CLASSES = [
'tench, Tinca tinca',
'goldfish, Carassius auratus',
'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', # noqa: E501
'tiger shark, Galeocerdo cuvieri',
'hammerhead, hammerhead shark',
'electric ray, crampfish, numbfish, torpedo',
'stingray',
'cock',
'hen',
'ostrich, Struthio camelus',
'brambling, Fringilla montifringilla',
'goldfinch, Carduelis carduelis',
'house finch, linnet, Carpodacus mexicanus',
'junco, snowbird',
'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
'robin, American robin, Turdus migratorius',
'bulbul',
'jay',
'magpie',
'chickadee',
'water ouzel, dipper',
'kite',
'bald eagle, American eagle, Haliaeetus leucocephalus',
'vulture',
'great grey owl, great gray owl, Strix nebulosa',
'European fire salamander, Salamandra salamandra',
'common newt, Triturus vulgaris',
'eft',
'spotted salamander, Ambystoma maculatum',
'axolotl, mud puppy, Ambystoma mexicanum',
'bullfrog, Rana catesbeiana',
'tree frog, tree-frog',
'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
'loggerhead, loggerhead turtle, Caretta caretta',
'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea', # noqa: E501
'mud turtle',
'terrapin',
'box turtle, box tortoise',
'banded gecko',
'common iguana, iguana, Iguana iguana',
'American chameleon, anole, Anolis carolinensis',
'whiptail, whiptail lizard',
'agama',
'frilled lizard, Chlamydosaurus kingi',
'alligator lizard',
'Gila monster, Heloderma suspectum',
'green lizard, Lacerta viridis',
'African chameleon, Chamaeleo chamaeleon',
'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis', # noqa: E501
'African crocodile, Nile crocodile, Crocodylus niloticus',
'American alligator, Alligator mississipiensis',
'triceratops',
'thunder snake, worm snake, Carphophis amoenus',
'ringneck snake, ring-necked snake, ring snake',
'hognose snake, puff adder, sand viper',
'green snake, grass snake',
'king snake, kingsnake',
'garter snake, grass snake',
'water snake',
'vine snake',
'night snake, Hypsiglena torquata',
'boa constrictor, Constrictor constrictor',
'rock python, rock snake, Python sebae',
'Indian cobra, Naja naja',
'green mamba',
'sea snake',
'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
'diamondback, diamondback rattlesnake, Crotalus adamanteus',
'sidewinder, horned rattlesnake, Crotalus cerastes',
'trilobite',
'harvestman, daddy longlegs, Phalangium opilio',
'scorpion',
'black and gold garden spider, Argiope aurantia',
'barn spider, Araneus cavaticus',
'garden spider, Aranea diademata',
'black widow, Latrodectus mactans',
'tarantula',
'wolf spider, hunting spider',
'tick',
'centipede',
'black grouse',
'ptarmigan',
'ruffed grouse, partridge, Bonasa umbellus',
'prairie chicken, prairie grouse, prairie fowl',
'peacock',
'quail',
'partridge',
'African grey, African gray, Psittacus erithacus',
'macaw',
'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
'lorikeet',
'coucal',
'bee eater',
'hornbill',
'hummingbird',
'jacamar',
'toucan',
'drake',
'red-breasted merganser, Mergus serrator',
'goose',
'black swan, Cygnus atratus',
'tusker',
'echidna, spiny anteater, anteater',
'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus', # noqa: E501
'wallaby, brush kangaroo',
'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', # noqa: E501
'wombat',
'jellyfish',
'sea anemone, anemone',
'brain coral',
'flatworm, platyhelminth',
'nematode, nematode worm, roundworm',
'conch',
'snail',
'slug',
'sea slug, nudibranch',
'chiton, coat-of-mail shell, sea cradle, polyplacophore',
'chambered nautilus, pearly nautilus, nautilus',
'Dungeness crab, Cancer magister',
'rock crab, Cancer irroratus',
'fiddler crab',
'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica', # noqa: E501
'American lobster, Northern lobster, Maine lobster, Homarus americanus', # noqa: E501
'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish', # noqa: E501
'crayfish, crawfish, crawdad, crawdaddy',
'hermit crab',
'isopod',
'white stork, Ciconia ciconia',
'black stork, Ciconia nigra',
'spoonbill',
'flamingo',
'little blue heron, Egretta caerulea',
'American egret, great white heron, Egretta albus',
'bittern',
'crane',
'limpkin, Aramus pictus',
'European gallinule, Porphyrio porphyrio',
'American coot, marsh hen, mud hen, water hen, Fulica americana',
'bustard',
'ruddy turnstone, Arenaria interpres',
'red-backed sandpiper, dunlin, Erolia alpina',
'redshank, Tringa totanus',
'dowitcher',
'oystercatcher, oyster catcher',
'pelican',
'king penguin, Aptenodytes patagonica',
'albatross, mollymawk',
'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus', # noqa: E501
'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
'dugong, Dugong dugon',
'sea lion',
'Chihuahua',
'Japanese spaniel',
'Maltese dog, Maltese terrier, Maltese',
'Pekinese, Pekingese, Peke',
'Shih-Tzu',
'Blenheim spaniel',
'papillon',
'toy terrier',
'Rhodesian ridgeback',
'Afghan hound, Afghan',
'basset, basset hound',
'beagle',
'bloodhound, sleuthhound',
'bluetick',
'black-and-tan coonhound',
'Walker hound, Walker foxhound',
'English foxhound',
'redbone',
'borzoi, Russian wolfhound',
'Irish wolfhound',
'Italian greyhound',
'whippet',
'Ibizan hound, Ibizan Podenco',
'Norwegian elkhound, elkhound',
'otterhound, otter hound',
'Saluki, gazelle hound',
'Scottish deerhound, deerhound',
'Weimaraner',
'Staffordshire bullterrier, Staffordshire bull terrier',
'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier', # noqa: E501
'Bedlington terrier',
'Border terrier',
'Kerry blue terrier',
'Irish terrier',
'Norfolk terrier',
'Norwich terrier',
'Yorkshire terrier',
'wire-haired fox terrier',
'Lakeland terrier',
'Sealyham terrier, Sealyham',
'Airedale, Airedale terrier',
'cairn, cairn terrier',
'Australian terrier',
'Dandie Dinmont, Dandie Dinmont terrier',
'Boston bull, Boston terrier',
'miniature schnauzer',
'giant schnauzer',
'standard schnauzer',
'Scotch terrier, Scottish terrier, Scottie',
'Tibetan terrier, chrysanthemum dog',
'silky terrier, Sydney silky',
'soft-coated wheaten terrier',
'West Highland white terrier',
'Lhasa, Lhasa apso',
'flat-coated retriever',
'curly-coated retriever',
'golden retriever',
'Labrador retriever',
'Chesapeake Bay retriever',
'German short-haired pointer',
'vizsla, Hungarian pointer',
'English setter',
'Irish setter, red setter',
'Gordon setter',
'Brittany spaniel',
'clumber, clumber spaniel',
'English springer, English springer spaniel',
'Welsh springer spaniel',
'cocker spaniel, English cocker spaniel, cocker',
'Sussex spaniel',
'Irish water spaniel',
'kuvasz',
'schipperke',
'groenendael',
'malinois',
'briard',
'kelpie',
'komondor',
'Old English sheepdog, bobtail',
'Shetland sheepdog, Shetland sheep dog, Shetland',
'collie',
'Border collie',
'Bouvier des Flandres, Bouviers des Flandres',
'Rottweiler',
'German shepherd, German shepherd dog, German police dog, alsatian',
'Doberman, Doberman pinscher',
'miniature pinscher',
'Greater Swiss Mountain dog',
'Bernese mountain dog',
'Appenzeller',
'EntleBucher',
'boxer',
'bull mastiff',
'Tibetan mastiff',
'French bulldog',
'Great Dane',
'Saint Bernard, St Bernard',
'Eskimo dog, husky',
'malamute, malemute, Alaskan malamute',
'Siberian husky',
'dalmatian, coach dog, carriage dog',
'affenpinscher, monkey pinscher, monkey dog',
'basenji',
'pug, pug-dog',
'Leonberg',
'Newfoundland, Newfoundland dog',
'Great Pyrenees',
'Samoyed, Samoyede',
'Pomeranian',
'chow, chow chow',
'keeshond',
'Brabancon griffon',
'Pembroke, Pembroke Welsh corgi',
'Cardigan, Cardigan Welsh corgi',
'toy poodle',
'miniature poodle',
'standard poodle',
'Mexican hairless',
'timber wolf, grey wolf, gray wolf, Canis lupus',
'white wolf, Arctic wolf, Canis lupus tundrarum',
'red wolf, maned wolf, Canis rufus, Canis niger',
'coyote, prairie wolf, brush wolf, Canis latrans',
'dingo, warrigal, warragal, Canis dingo',
'dhole, Cuon alpinus',
'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
'hyena, hyaena',
'red fox, Vulpes vulpes',
'kit fox, Vulpes macrotis',
'Arctic fox, white fox, Alopex lagopus',
'grey fox, gray fox, Urocyon cinereoargenteus',
'tabby, tabby cat',
'tiger cat',
'Persian cat',
'Siamese cat, Siamese',
'Egyptian cat',
'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', # noqa: E501
'lynx, catamount',
'leopard, Panthera pardus',
'snow leopard, ounce, Panthera uncia',
'jaguar, panther, Panthera onca, Felis onca',
'lion, king of beasts, Panthera leo',
'tiger, Panthera tigris',
'cheetah, chetah, Acinonyx jubatus',
'brown bear, bruin, Ursus arctos',
'American black bear, black bear, Ursus americanus, Euarctos americanus', # noqa: E501
'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
'sloth bear, Melursus ursinus, Ursus ursinus',
'mongoose',
'meerkat, mierkat',
'tiger beetle',
'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
'ground beetle, carabid beetle',
'long-horned beetle, longicorn, longicorn beetle',
'leaf beetle, chrysomelid',
'dung beetle',
'rhinoceros beetle',
'weevil',
'fly',
'bee',
'ant, emmet, pismire',
'grasshopper, hopper',
'cricket',
'walking stick, walkingstick, stick insect',
'cockroach, roach',
'mantis, mantid',
'cicada, cicala',
'leafhopper',
'lacewing, lacewing fly',
"dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", # noqa: E501
'damselfly',
'admiral',
'ringlet, ringlet butterfly',
'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
'cabbage butterfly',
'sulphur butterfly, sulfur butterfly',
'lycaenid, lycaenid butterfly',
'starfish, sea star',
'sea urchin',
'sea cucumber, holothurian',
'wood rabbit, cottontail, cottontail rabbit',
'hare',
'Angora, Angora rabbit',
'hamster',
'porcupine, hedgehog',
'fox squirrel, eastern fox squirrel, Sciurus niger',
'marmot',
'beaver',
'guinea pig, Cavia cobaya',
'sorrel',
'zebra',
'hog, pig, grunter, squealer, Sus scrofa',
'wild boar, boar, Sus scrofa',
'warthog',
'hippopotamus, hippo, river horse, Hippopotamus amphibius',
'ox',
'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
'bison',
'ram, tup',
'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis', # noqa: E501
'ibex, Capra ibex',
'hartebeest',
'impala, Aepyceros melampus',
'gazelle',
'Arabian camel, dromedary, Camelus dromedarius',
'llama',
'weasel',
'mink',
'polecat, fitch, foulmart, foumart, Mustela putorius',
'black-footed ferret, ferret, Mustela nigripes',
'otter',
'skunk, polecat, wood pussy',
'badger',
'armadillo',
'three-toed sloth, ai, Bradypus tridactylus',
'orangutan, orang, orangutang, Pongo pygmaeus',
'gorilla, Gorilla gorilla',
'chimpanzee, chimp, Pan troglodytes',
'gibbon, Hylobates lar',
'siamang, Hylobates syndactylus, Symphalangus syndactylus',
'guenon, guenon monkey',
'patas, hussar monkey, Erythrocebus patas',
'baboon',
'macaque',
'langur',
'colobus, colobus monkey',
'proboscis monkey, Nasalis larvatus',
'marmoset',
'capuchin, ringtail, Cebus capucinus',
'howler monkey, howler',
'titi, titi monkey',
'spider monkey, Ateles geoffroyi',
'squirrel monkey, Saimiri sciureus',
'Madagascar cat, ring-tailed lemur, Lemur catta',
'indri, indris, Indri indri, Indri brevicaudatus',
'Indian elephant, Elephas maximus',
'African elephant, Loxodonta africana',
'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
'barracouta, snoek',
'eel',
'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch', # noqa: E501
'rock beauty, Holocanthus tricolor',
'anemone fish',
'sturgeon',
'gar, garfish, garpike, billfish, Lepisosteus osseus',
'lionfish',
'puffer, pufferfish, blowfish, globefish',
'abacus',
'abaya',
"academic gown, academic robe, judge's robe",
'accordion, piano accordion, squeeze box',
'acoustic guitar',
'aircraft carrier, carrier, flattop, attack aircraft carrier',
'airliner',
'airship, dirigible',
'altar',
'ambulance',
'amphibian, amphibious vehicle',
'analog clock',
'apiary, bee house',
'apron',
'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin', # noqa: E501
'assault rifle, assault gun',
'backpack, back pack, knapsack, packsack, rucksack, haversack',
'bakery, bakeshop, bakehouse',
'balance beam, beam',
'balloon',
'ballpoint, ballpoint pen, ballpen, Biro',
'Band Aid',
'banjo',
'bannister, banister, balustrade, balusters, handrail',
'barbell',
'barber chair',
'barbershop',
'barn',
'barometer',
'barrel, cask',
'barrow, garden cart, lawn cart, wheelbarrow',
'baseball',
'basketball',
'bassinet',
'bassoon',
'bathing cap, swimming cap',
'bath towel',
'bathtub, bathing tub, bath, tub',
'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', # noqa: E501
'beacon, lighthouse, beacon light, pharos',
'beaker',
'bearskin, busby, shako',
'beer bottle',
'beer glass',
'bell cote, bell cot',
'bib',
'bicycle-built-for-two, tandem bicycle, tandem',
'bikini, two-piece',
'binder, ring-binder',
'binoculars, field glasses, opera glasses',
'birdhouse',
'boathouse',
'bobsled, bobsleigh, bob',
'bolo tie, bolo, bola tie, bola',
'bonnet, poke bonnet',
'bookcase',
'bookshop, bookstore, bookstall',
'bottlecap',
'bow',
'bow tie, bow-tie, bowtie',
'brass, memorial tablet, plaque',
'brassiere, bra, bandeau',
'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
'breastplate, aegis, egis',
'broom',
'bucket, pail',
'buckle',
'bulletproof vest',
'bullet train, bullet',
'butcher shop, meat market',
'cab, hack, taxi, taxicab',
'caldron, cauldron',
'candle, taper, wax light',
'cannon',
'canoe',
'can opener, tin opener',
'cardigan',
'car mirror',
'carousel, carrousel, merry-go-round, roundabout, whirligig',
"carpenter's kit, tool kit",
'carton',
'car wheel',
'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM', # noqa: E501
'cassette',
'cassette player',
'castle',
'catamaran',
'CD player',
'cello, violoncello',
'cellular telephone, cellular phone, cellphone, cell, mobile phone',
'chain',
'chainlink fence',
'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour', # noqa: E501
'chain saw, chainsaw',
'chest',
'chiffonier, commode',
'chime, bell, gong',
'china cabinet, china closet',
'Christmas stocking',
'church, church building',
'cinema, movie theater, movie theatre, movie house, picture palace',
'cleaver, meat cleaver, chopper',
'cliff dwelling',
'cloak',
'clog, geta, patten, sabot',
'cocktail shaker',
'coffee mug',
'coffeepot',
'coil, spiral, volute, whorl, helix',
'combination lock',
'computer keyboard, keypad',
'confectionery, confectionary, candy store',
'container ship, containership, container vessel',
'convertible',
'corkscrew, bottle screw',
'cornet, horn, trumpet, trump',
'cowboy boot',
'cowboy hat, ten-gallon hat',
'cradle',
'crane',
'crash helmet',
'crate',
'crib, cot',
'Crock Pot',
'croquet ball',
'crutch',
'cuirass',
'dam, dike, dyke',
'desk',
'desktop computer',
'dial telephone, dial phone',
'diaper, nappy, napkin',
'digital clock',
'digital watch',
'dining table, board',
'dishrag, dishcloth',
'dishwasher, dish washer, dishwashing machine',
'disk brake, disc brake',
'dock, dockage, docking facility',
'dogsled, dog sled, dog sleigh',
'dome',
'doormat, welcome mat',
'drilling platform, offshore rig',
'drum, membranophone, tympan',
'drumstick',
'dumbbell',
'Dutch oven',
'electric fan, blower',
'electric guitar',
'electric locomotive',
'entertainment center',
'envelope',
'espresso maker',
'face powder',
'feather boa, boa',
'file, file cabinet, filing cabinet',
'fireboat',
'fire engine, fire truck',
'fire screen, fireguard',
'flagpole, flagstaff',
'flute, transverse flute',
'folding chair',
'football helmet',
'forklift',
'fountain',
'fountain pen',
'four-poster',
'freight car',
'French horn, horn',
'frying pan, frypan, skillet',
'fur coat',
'garbage truck, dustcart',
'gasmask, respirator, gas helmet',
'gas pump, gasoline pump, petrol pump, island dispenser',
'goblet',
'go-kart',
'golf ball',
'golfcart, golf cart',
'gondola',
'gong, tam-tam',
'gown',
'grand piano, grand',
'greenhouse, nursery, glasshouse',
'grille, radiator grille',
'grocery store, grocery, food market, market',
'guillotine',
'hair slide',
'hair spray',
'half track',
'hammer',
'hamper',
'hand blower, blow dryer, blow drier, hair dryer, hair drier',
'hand-held computer, hand-held microcomputer',
'handkerchief, hankie, hanky, hankey',
'hard disc, hard disk, fixed disk',
'harmonica, mouth organ, harp, mouth harp',
'harp',
'harvester, reaper',
'hatchet',
'holster',
'home theater, home theatre',
'honeycomb',
'hook, claw',
'hoopskirt, crinoline',
'horizontal bar, high bar',
'horse cart, horse-cart',
'hourglass',
'iPod',
'iron, smoothing iron',
"jack-o'-lantern",
'jean, blue jean, denim',
'jeep, landrover',
'jersey, T-shirt, tee shirt',
'jigsaw puzzle',
'jinrikisha, ricksha, rickshaw',
'joystick',
'kimono',
'knee pad',
'knot',
'lab coat, laboratory coat',
'ladle',
'lampshade, lamp shade',
'laptop, laptop computer',
'lawn mower, mower',
'lens cap, lens cover',
'letter opener, paper knife, paperknife',
'library',
'lifeboat',
'lighter, light, igniter, ignitor',
'limousine, limo',
'liner, ocean liner',
'lipstick, lip rouge',
'Loafer',
'lotion',
'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system', # noqa: E501
"loupe, jeweler's loupe",
'lumbermill, sawmill',
'magnetic compass',
'mailbag, postbag',
'mailbox, letter box',
'maillot',
'maillot, tank suit',
'manhole cover',
'maraca',
'marimba, xylophone',
'mask',
'matchstick',
'maypole',
'maze, labyrinth',
'measuring cup',
'medicine chest, medicine cabinet',
'megalith, megalithic structure',
'microphone, mike',
'microwave, microwave oven',
'military uniform',
'milk can',
'minibus',
'miniskirt, mini',
'minivan',
'missile',
'mitten',
'mixing bowl',
'mobile home, manufactured home',
'Model T',
'modem',
'monastery',
'monitor',
'moped',
'mortar',
'mortarboard',
'mosque',
'mosquito net',
'motor scooter, scooter',
'mountain bike, all-terrain bike, off-roader',
'mountain tent',
'mouse, computer mouse',
'mousetrap',
'moving van',
'muzzle',
'nail',
'neck brace',
'necklace',
'nipple',
'notebook, notebook computer',
'obelisk',
'oboe, hautboy, hautbois',
'ocarina, sweet potato',
'odometer, hodometer, mileometer, milometer',
'oil filter',
'organ, pipe organ',
'oscilloscope, scope, cathode-ray oscilloscope, CRO',
'overskirt',
'oxcart',
'oxygen mask',
'packet',
'paddle, boat paddle',
'paddlewheel, paddle wheel',
'padlock',
'paintbrush',
"pajama, pyjama, pj's, jammies",
'palace',
'panpipe, pandean pipe, syrinx',
'paper towel',
'parachute, chute',
'parallel bars, bars',
'park bench',
'parking meter',
'passenger car, coach, carriage',
'patio, terrace',
'pay-phone, pay-station',
'pedestal, plinth, footstall',
'pencil box, pencil case',
'pencil sharpener',
'perfume, essence',
'Petri dish',
'photocopier',
'pick, plectrum, plectron',
'pickelhaube',
'picket fence, paling',
'pickup, pickup truck',
'pier',
'piggy bank, penny bank',
'pill bottle',
'pillow',
'ping-pong ball',
'pinwheel',
'pirate, pirate ship',
'pitcher, ewer',
"plane, carpenter's plane, woodworking plane",
'planetarium',
'plastic bag',
'plate rack',
'plow, plough',
"plunger, plumber's helper",
'Polaroid camera, Polaroid Land camera',
'pole',
'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', # noqa: E501
'poncho',
'pool table, billiard table, snooker table',
'pop bottle, soda bottle',
'pot, flowerpot',
"potter's wheel",
'power drill',
'prayer rug, prayer mat',
'printer',
'prison, prison house',
'projectile, missile',
'projector',
'puck, hockey puck',
'punching bag, punch bag, punching ball, punchball',
'purse',
'quill, quill pen',
'quilt, comforter, comfort, puff',
'racer, race car, racing car',
'racket, racquet',
'radiator',
'radio, wireless',
'radio telescope, radio reflector',
'rain barrel',
'recreational vehicle, RV, R.V.',
'reel',
'reflex camera',
'refrigerator, icebox',
'remote control, remote',
'restaurant, eating house, eating place, eatery',
'revolver, six-gun, six-shooter',
'rifle',
'rocking chair, rocker',
'rotisserie',
'rubber eraser, rubber, pencil eraser',
'rugby ball',
'rule, ruler',
'running shoe',
'safe',
'safety pin',
'saltshaker, salt shaker',
'sandal',
'sarong',
'sax, saxophone',
'scabbard',
'scale, weighing machine',
'school bus',
'schooner',
'scoreboard',
'screen, CRT screen',
'screw',
'screwdriver',
'seat belt, seatbelt',
'sewing machine',
'shield, buckler',
'shoe shop, shoe-shop, shoe store',
'shoji',
'shopping basket',
'shopping cart',
'shovel',
'shower cap',
'shower curtain',
'ski',
'ski mask',
'sleeping bag',
'slide rule, slipstick',
'sliding door',
'slot, one-armed bandit',
'snorkel',
'snowmobile',
'snowplow, snowplough',
'soap dispenser',
'soccer ball',
'sock',
'solar dish, solar collector, solar furnace',
'sombrero',
'soup bowl',
'space bar',
'space heater',
'space shuttle',
'spatula',
'speedboat',
"spider web, spider's web",
'spindle',
'sports car, sport car',
'spotlight, spot',
'stage',
'steam locomotive',
'steel arch bridge',
'steel drum',
'stethoscope',
'stole',
'stone wall',
'stopwatch, stop watch',
'stove',
'strainer',
'streetcar, tram, tramcar, trolley, trolley car',
'stretcher',
'studio couch, day bed',
'stupa, tope',
'submarine, pigboat, sub, U-boat',
'suit, suit of clothes',
'sundial',
'sunglass',
'sunglasses, dark glasses, shades',
'sunscreen, sunblock, sun blocker',
'suspension bridge',
'swab, swob, mop',
'sweatshirt',
'swimming trunks, bathing trunks',
'swing',
'switch, electric switch, electrical switch',
'syringe',
'table lamp',
'tank, army tank, armored combat vehicle, armoured combat vehicle',
'tape player',
'teapot',
'teddy, teddy bear',
'television, television system',
'tennis ball',
'thatch, thatched roof',
'theater curtain, theatre curtain',
'thimble',
'thresher, thrasher, threshing machine',
'throne',
'tile roof',
'toaster',
'tobacco shop, tobacconist shop, tobacconist',
'toilet seat',
'torch',
'totem pole',
'tow truck, tow car, wrecker',
'toyshop',
'tractor',
'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi', # noqa: E501
'tray',
'trench coat',
'tricycle, trike, velocipede',
'trimaran',
'tripod',
'triumphal arch',
'trolleybus, trolley coach, trackless trolley',
'trombone',
'tub, vat',
'turnstile',
'typewriter keyboard',
'umbrella',
'unicycle, monocycle',
'upright, upright piano',
'vacuum, vacuum cleaner',
'vase',
'vault',
'velvet',
'vending machine',
'vestment',
'viaduct',
'violin, fiddle',
'volleyball',
'waffle iron',
'wall clock',
'wallet, billfold, notecase, pocketbook',
'wardrobe, closet, press',
'warplane, military plane',
'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
'washer, automatic washer, washing machine',
'water bottle',
'water jug',
'water tower',
'whiskey jug',
'whistle',
'wig',
'window screen',
'window shade',
'Windsor tie',
'wine bottle',
'wing',
'wok',
'wooden spoon',
'wool, woolen, woollen',
'worm fence, snake fence, snake-rail fence, Virginia fence',
'wreck',
'yawl',
'yurt',
'web site, website, internet site, site',
'comic book',
'crossword puzzle, crossword',
'street sign',
'traffic light, traffic signal, stoplight',
'book jacket, dust cover, dust jacket, dust wrapper',
'menu',
'plate',
'guacamole',
'consomme',
'hot pot, hotpot',
'trifle',
'ice cream, icecream',
'ice lolly, lolly, lollipop, popsicle',
'French loaf',
'bagel, beigel',
'pretzel',
'cheeseburger',
'hotdog, hot dog, red hot',
'mashed potato',
'head cabbage',
'broccoli',
'cauliflower',
'zucchini, courgette',
'spaghetti squash',
'acorn squash',
'butternut squash',
'cucumber, cuke',
'artichoke, globe artichoke',
'bell pepper',
'cardoon',
'mushroom',
'Granny Smith',
'strawberry',
'orange',
'lemon',
'fig',
'pineapple, ananas',
'banana',
'jackfruit, jak, jack',
'custard apple',
'pomegranate',
'hay',
'carbonara',
'chocolate sauce, chocolate syrup',
'dough',
'meat loaf, meatloaf',
'pizza, pizza pie',
'potpie',
'burrito',
'red wine',
'espresso',
'cup',
'eggnog',
'alp',
'bubble',
'cliff, drop, drop-off',
'coral reef',
'geyser',
'lakeside, lakeshore',
'promontory, headland, head, foreland',
'sandbar, sand bar',
'seashore, coast, seacoast, sea-coast',
'valley, vale',
'volcano',
'ballplayer, baseball player',
'groom, bridegroom',
'scuba diver',
'rapeseed',
'daisy',
"yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", # noqa: E501
'corn',
'acorn',
'hip, rose hip, rosehip',
'buckeye, horse chestnut, conker',
'coral fungus',
'agaric',
'gyromitra',
'stinkhorn, carrion fungus',
'earthstar',
'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa', # noqa: E501
'bolete',
'ear, spike, capitulum',
'toilet tissue, toilet paper, bathroom tissue'
]
def load_annotations(self):
if self.ann_file is None:
folder_to_idx = find_folders(self.data_prefix)
samples = get_samples(
self.data_prefix,
folder_to_idx,
extensions=self.IMG_EXTENSIONS)
if len(samples) == 0:
raise (RuntimeError('Found 0 files in subfolders of: '
f'{self.data_prefix}. '
'Supported extensions are: '
f'{",".join(self.IMG_EXTENSIONS)}'))
self.folder_to_idx = folder_to_idx
elif isinstance(self.ann_file, str):
with open(self.ann_file) as f:
samples = [x.strip().rsplit(' ', 1) for x in f.readlines()]
else:
raise TypeError('ann_file must be a str or None')
self.samples = samples
data_infos = []
for filename, gt_label in self.samples:
info = {'img_prefix': self.data_prefix}
info['img_info'] = {'filename': filename}
info['gt_label'] = np.array(gt_label, dtype=np.int64)
data_infos.append(info)
return data_infos
@DATASETS.register_module()
class ImageNet_MultiTask(ImageNet, MultiTask):
num_tasks = 1000
def load_annotations(self):
if self.ann_file is None:
folder_to_idx = find_folders(self.data_prefix)
samples = get_samples(
self.data_prefix,
folder_to_idx,
extensions=self.IMG_EXTENSIONS)
if len(samples) == 0:
raise (RuntimeError('Found 0 files in subfolders of: '
f'{self.data_prefix}. '
'Supported extensions are: '
f'{",".join(self.IMG_EXTENSIONS)}'))
self.folder_to_idx = folder_to_idx
elif isinstance(self.ann_file, str):
with open(self.ann_file) as f:
samples = [x.strip().rsplit(' ', 1) for x in f.readlines()]
else:
raise TypeError('ann_file must be a str or None')
self.samples = samples
data_infos = []
for filename, gt_label in self.samples:
info = {'img_prefix': self.data_prefix}
info['img_info'] = {'filename': filename}
gt_label = np.eye(1000)[int(gt_label)]
info['gt_label'] = np.array(gt_label, dtype=np.int64)
data_infos.append(info)
return data_infos
| 37,785 | 32.203866 | 146 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/disentangle_data/dsprites.py | # Copyright (c) OpenMMLab. All rights reserved.
import codecs
import numpy as np
import os
import os.path as osp
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info, master_only
from numpy import random
from mmcls.datasets.builder import DATASETS
from mmcls.datasets.utils import (download_and_extract_archive, download_url,
rm_suffix)
from .multi_task import MultiTask
@DATASETS.register_module()
class dSprites(MultiTask):
"""Latent factor values
Color: white
Shape: square, ellipse, heart
Scale: 6 values linearly spaced in [0.5, 1]
Orientation: 40 values in [0, 2 pi]
Position X: 32 values in [0, 1]
Position Y: 32 values in [0, 1]
""" # noqa: E501
resources = 'https://github.com/deepmind/dsprites-dataset/blob/master/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'
split_ratio = 0.7
num_tasks = 5
CLASSES = [
]
def load_annotations(self):
filename = self.resources.rpartition('/')[2]
data_file = osp.join(
self.data_prefix, filename)
if not osp.exists(data_file):
self.download()
_, world_size = get_dist_info()
if world_size > 1:
dist.barrier()
assert osp.exists(data_file), \
'Shared storage seems unavailable. Please download dataset ' \
f'manually through {self.resource}.'
data = np.load(data_file)
num_data = len(data['imgs'])
data_index = np.arange(num_data)
np.random.seed(42)
np.random.shuffle(data_index)
train_index = data_index[:int(num_data*self.split_ratio)]
test_index = data_index[int(num_data*self.split_ratio):]
train_set = (data['imgs'][train_index],
data['latents_classes'][train_index][:, 1:])
test_set = (data['imgs'][test_index],
data['latents_classes'][test_index][:, 1:])
if not self.test_mode:
imgs, gt_labels = train_set
else:
imgs, gt_labels = test_set
data_infos = []
for img, gt_label in zip(imgs, gt_labels):
gt_label = np.array(gt_label, dtype=np.int64)
info = {'img': img, 'gt_label': gt_label}
data_infos.append(info)
return data_infos
@master_only
def download(self):
os.makedirs(self.data_prefix, exist_ok=True)
# download files
url = self.resources
filename = url.rpartition('/')[2]
download_url(url,
root=self.data_prefix,
filename=filename)
| 2,668 | 31.156627 | 121 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/disentangle_data/shape3d.py | # Copyright (c) OpenMMLab. All rights reserved.
import codecs
import os
import os.path as osp
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info, master_only
from .multi_task import MultiTask
from mmcls.datasets.builder import DATASETS
from mmcls.datasets.utils import download_and_extract_archive, download_url, rm_suffix
import h5py
@DATASETS.register_module()
class Shape3D(MultiTask):
"""Latent factor values
Color: white
Shape: square, ellipse, heart
Scale: 6 values linearly spaced in [0.5, 1]
Orientation: 40 values in [0, 2 pi]
Position X: 32 values in [0, 1]
Position Y: 32 values in [0, 1]
""" # noqa: E501
resources = '3dshapes.h5'
split_ratio = 0.7
num_tasks = 6
CLASSES = ['floor_hue', 'wall_hue', 'object_hue', 'scale', 'shape',
'orientation']
def load_annotations(self):
filename = self.resources.rpartition('/')[2]
data_file = osp.join(
self.data_prefix, filename)
if not osp.exists(data_file):
self.download()
_, world_size = get_dist_info()
if world_size > 1:
dist.barrier()
assert osp.exists(data_file), \
'Shared storage seems unavailable. Please download dataset ' \
f'manually through {self.resource}.'
data = h5py.File(data_file, 'r')
num_data = len(data['images'])
labels = self.convert_value_to_label(data)
train_set = (data['images'][:int(num_data*self.split_ratio)],
labels[:int(num_data*self.split_ratio)])
test_set = (data['images'][int(num_data*self.split_ratio):],
labels[int(num_data*self.split_ratio):])
if not self.test_mode:
imgs, gt_labels = train_set
else:
imgs, gt_labels = test_set
data_infos = []
for img, gt_label in zip(imgs, gt_labels):
gt_label = np.array(gt_label, dtype=np.int64)
info = {'img': img, 'gt_label': gt_label}
data_infos.append(info)
return data_infos
def convert_value_to_label(self, data):
labels = []
for i in range(self.num_tasks):
values = np.unique(data['labels'][:, i])
values = np.sort(values)
num_class = len(values)
print(f'Task {i}, with {num_class} classes')
value2cls = {values[c]:c for c in range(num_class)}
label_converted = np.vectorize(value2cls.get)(data['labels'][:, i])
labels.append(label_converted)
labels = np.stack(labels, axis=-1)
return labels
@master_only
def download(self):
assert f'Shape3D dataset can only be downloaded manually'
| 2,826 | 32.258824 | 86 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/disentangle_data/mpi3d.py | # Copyright (c) OpenMMLab. All rights reserved.
import codecs
import numpy as np
import os
import os.path as osp
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info, master_only
from numpy import random
from mmcls.datasets.builder import DATASETS
from mmcls.datasets.utils import (download_and_extract_archive, download_url,
rm_suffix)
from .multi_task import MultiTask
@DATASETS.register_module()
class MPI3d(MultiTask):
"""Factors Possible Values
object_color white=0, green=1, red=2, blue=3, brown=4, olive=5
object_shape cone=0, cube=1, cylinder=2, hexagonal=3, pyramid=4, sphere=5
object_size small=0, large=1
camera_height top=0, center=1, bottom=2
background_color purple=0, sea green=1, salmon=2
horizontal_axis 0,...,39
vertical_axis 0,...,39
""" # noqa: E501
resources = 'https://github.com/deepmind/dsprites-dataset/blob/master/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'
split_ratio = 0.7
num_tasks = 7
num_classes = [6,6,2,3,3,40,40]
CLASSES = [
]
def load_annotations(self):
filename = self.resources.rpartition('/')[2]
data_file = osp.join(
self.data_prefix, filename)
if not osp.exists(data_file):
self.download()
_, world_size = get_dist_info()
if world_size > 1:
dist.barrier()
assert osp.exists(data_file), \
'Shared storage seems unavailable. Please download dataset ' \
f'manually through {self.resource}.'
data = np.load(data_file)
num_data = len(data['images'])
label = self.create_label(num_data)
data_index = np.arange(num_data)
np.random.seed(42)
np.random.shuffle(data_index)
train_index = data_index[:int(num_data*self.split_ratio)]
test_index = data_index[int(num_data*self.split_ratio):]
train_set = (data['images'][train_index],
label[train_index])
test_set = (data['images'][test_index],
label[test_index])
if not self.test_mode:
imgs, gt_labels = train_set
else:
imgs, gt_labels = test_set
data_infos = []
for img, gt_label in zip(imgs, gt_labels):
gt_label = np.array(gt_label, dtype=np.int64)
info = {'img': img, 'gt_label': gt_label}
data_infos.append(info)
return data_infos
def create_label(self, num_data):
label = np.zeros((num_data, self.num_tasks))
for i in range(self.num_tasks):
num_per_cls = np.prod([num_factor for f, num_factor in enumerate(self.num_classes) if f != i])
for j in range(self.num_classes[i]):
label[:, j*num_per_cls:(j+1)*num_per_cls] = j
return label
@master_only
def download(self):
os.makedirs(self.data_prefix, exist_ok=True)
# download files
url = self.resources
filename = url.rpartition('/')[2]
download_url(url,
root=self.data_prefix,
filename=filename)
| 3,167 | 32 | 121 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/samplers/distributed_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
round_up=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.round_up = round_up
if self.round_up:
self.total_size = self.num_samples * self.num_replicas
else:
self.total_size = len(self.dataset)
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
if self.round_up:
indices = (
indices *
int(self.total_size / len(indices) + 1))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
if self.round_up:
assert len(indices) == self.num_samples
return iter(indices)
| 1,433 | 31.590909 | 77 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/pipelines/auto_augment.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
import random
from numbers import Number
from typing import Sequence
import mmcv
import numpy as np
from ..builder import PIPELINES
from .compose import Compose
# Default hyperparameters for all Ops
_HPARAMS_DEFAULT = dict(pad_val=128)
def random_negative(value, random_negative_prob):
"""Randomly negate value based on random_negative_prob."""
return -value if np.random.rand() < random_negative_prob else value
def merge_hparams(policy: dict, hparams: dict):
"""Merge hyperparameters into policy config.
Only merge partial hyperparameters required of the policy.
Args:
policy (dict): Original policy config dict.
hparams (dict): Hyperparameters need to be merged.
Returns:
dict: Policy config dict after adding ``hparams``.
"""
op = PIPELINES.get(policy['type'])
assert op is not None, f'Invalid policy type "{policy["type"]}".'
for key, value in hparams.items():
if policy.get(key, None) is not None:
continue
if key in inspect.getfullargspec(op.__init__).args:
policy[key] = value
return policy
@PIPELINES.register_module()
class AutoAugment(object):
"""Auto augmentation.
This data augmentation is proposed in `AutoAugment: Learning Augmentation
Policies from Data <https://arxiv.org/abs/1805.09501>`_.
Args:
policies (list[list[dict]]): The policies of auto augmentation. Each
policy in ``policies`` is a specific augmentation policy, and is
composed by several augmentations (dict). When AutoAugment is
called, a random policy in ``policies`` will be selected to
augment images.
hparams (dict): Configs of hyperparameters. Hyperparameters will be
used in policies that require these arguments if these arguments
are not set in policy dicts. Defaults to use _HPARAMS_DEFAULT.
"""
def __init__(self, policies, hparams=_HPARAMS_DEFAULT):
assert isinstance(policies, list) and len(policies) > 0, \
'Policies must be a non-empty list.'
for policy in policies:
assert isinstance(policy, list) and len(policy) > 0, \
'Each policy in policies must be a non-empty list.'
for augment in policy:
assert isinstance(augment, dict) and 'type' in augment, \
'Each specific augmentation must be a dict with key' \
' "type".'
self.hparams = hparams
policies = copy.deepcopy(policies)
self.policies = []
for sub in policies:
merged_sub = [merge_hparams(policy, hparams) for policy in sub]
self.policies.append(merged_sub)
self.sub_policy = [Compose(policy) for policy in self.policies]
def __call__(self, results):
sub_policy = random.choice(self.sub_policy)
return sub_policy(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(policies={self.policies})'
return repr_str
@PIPELINES.register_module()
class RandAugment(object):
r"""Random augmentation.
This data augmentation is proposed in `RandAugment: Practical automated
data augmentation with a reduced search space
<https://arxiv.org/abs/1909.13719>`_.
Args:
policies (list[dict]): The policies of random augmentation. Each
policy in ``policies`` is one specific augmentation policy (dict).
The policy shall at least have key `type`, indicating the type of
augmentation. For those which have magnitude, (given to the fact
they are named differently in different augmentation, )
`magnitude_key` and `magnitude_range` shall be the magnitude
argument (str) and the range of magnitude (tuple in the format of
(val1, val2)), respectively. Note that val1 is not necessarily
less than val2.
num_policies (int): Number of policies to select from policies each
time.
magnitude_level (int | float): Magnitude level for all the augmentation
selected.
total_level (int | float): Total level for the magnitude. Defaults to
30.
magnitude_std (Number | str): Deviation of magnitude noise applied.
- If positive number, magnitude is sampled from normal distribution
(mean=magnitude, std=magnitude_std).
- If 0 or negative number, magnitude remains unchanged.
- If str "inf", magnitude is sampled from uniform distribution
(range=[min, magnitude]).
hparams (dict): Configs of hyperparameters. Hyperparameters will be
used in policies that require these arguments if these arguments
are not set in policy dicts. Defaults to use _HPARAMS_DEFAULT.
Note:
`magnitude_std` will introduce some randomness to policy, modified by
https://github.com/rwightman/pytorch-image-models.
When magnitude_std=0, we calculate the magnitude as follows:
.. math::
\text{magnitude} = \frac{\text{magnitude\_level}}
{\text{total\_level}} \times (\text{val2} - \text{val1})
+ \text{val1}
"""
def __init__(self,
policies,
num_policies,
magnitude_level,
magnitude_std=0.,
total_level=30,
hparams=_HPARAMS_DEFAULT):
assert isinstance(num_policies, int), 'Number of policies must be ' \
f'of int type, got {type(num_policies)} instead.'
assert isinstance(magnitude_level, (int, float)), \
'Magnitude level must be of int or float type, ' \
f'got {type(magnitude_level)} instead.'
assert isinstance(total_level, (int, float)), 'Total level must be ' \
f'of int or float type, got {type(total_level)} instead.'
assert isinstance(policies, list) and len(policies) > 0, \
'Policies must be a non-empty list.'
assert isinstance(magnitude_std, (Number, str)), \
'Magnitude std must be of number or str type, ' \
f'got {type(magnitude_std)} instead.'
if isinstance(magnitude_std, str):
assert magnitude_std == 'inf', \
'Magnitude std must be of number or "inf", ' \
f'got "{magnitude_std}" instead.'
assert num_policies > 0, 'num_policies must be greater than 0.'
assert magnitude_level >= 0, 'magnitude_level must be no less than 0.'
assert total_level > 0, 'total_level must be greater than 0.'
self.num_policies = num_policies
self.magnitude_level = magnitude_level
self.magnitude_std = magnitude_std
self.total_level = total_level
self.hparams = hparams
policies = copy.deepcopy(policies)
self._check_policies(policies)
self.policies = [merge_hparams(policy, hparams) for policy in policies]
def _check_policies(self, policies):
for policy in policies:
assert isinstance(policy, dict) and 'type' in policy, \
'Each policy must be a dict with key "type".'
type_name = policy['type']
magnitude_key = policy.get('magnitude_key', None)
if magnitude_key is not None:
assert 'magnitude_range' in policy, \
f'RandAugment policy {type_name} needs `magnitude_range`.'
magnitude_range = policy['magnitude_range']
assert (isinstance(magnitude_range, Sequence)
and len(magnitude_range) == 2), \
f'`magnitude_range` of RandAugment policy {type_name} ' \
f'should be a Sequence with two numbers.'
def _process_policies(self, policies):
processed_policies = []
for policy in policies:
processed_policy = copy.deepcopy(policy)
magnitude_key = processed_policy.pop('magnitude_key', None)
if magnitude_key is not None:
magnitude = self.magnitude_level
# if magnitude_std is positive number or 'inf', move
# magnitude_value randomly.
if self.magnitude_std == 'inf':
magnitude = random.uniform(0, magnitude)
elif self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(self.total_level, max(0, magnitude))
val1, val2 = processed_policy.pop('magnitude_range')
magnitude = (magnitude / self.total_level) * (val2 -
val1) + val1
processed_policy.update({magnitude_key: magnitude})
processed_policies.append(processed_policy)
return processed_policies
def __call__(self, results):
if self.num_policies == 0:
return results
sub_policy = random.choices(self.policies, k=self.num_policies)
sub_policy = self._process_policies(sub_policy)
sub_policy = Compose(sub_policy)
return sub_policy(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(policies={self.policies}, '
repr_str += f'num_policies={self.num_policies}, '
repr_str += f'magnitude_level={self.magnitude_level}, '
repr_str += f'total_level={self.total_level})'
return repr_str
@PIPELINES.register_module()
class Shear(object):
"""Shear images.
Args:
magnitude (int | float): The magnitude used for shear.
pad_val (int, Sequence[int]): Pixel pad_val value for constant fill.
If a sequence of length 3, it is used to pad_val R, G, B channels
respectively. Defaults to 128.
prob (float): The probability for performing Shear therefore should be
in range [0, 1]. Defaults to 0.5.
direction (str): The shearing direction. Options are 'horizontal' and
'vertical'. Defaults to 'horizontal'.
random_negative_prob (float): The probability that turns the magnitude
negative, which should be in range [0,1]. Defaults to 0.5.
interpolation (str): Interpolation method. Options are 'nearest',
'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'bicubic'.
"""
def __init__(self,
magnitude,
pad_val=128,
prob=0.5,
direction='horizontal',
random_negative_prob=0.5,
interpolation='bicubic'):
assert isinstance(magnitude, (int, float)), 'The magnitude type must '\
f'be int or float, but got {type(magnitude)} instead.'
if isinstance(pad_val, int):
pad_val = tuple([pad_val] * 3)
elif isinstance(pad_val, Sequence):
assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \
f'elements, got {len(pad_val)} instead.'
assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\
'tuple must got elements of int type.'
else:
raise TypeError('pad_val must be int or tuple with 3 elements.')
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
assert direction in ('horizontal', 'vertical'), 'direction must be ' \
f'either "horizontal" or "vertical", got {direction} instead.'
assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \
f'should be in range [0,1], got {random_negative_prob} instead.'
self.magnitude = magnitude
self.pad_val = tuple(pad_val)
self.prob = prob
self.direction = direction
self.random_negative_prob = random_negative_prob
self.interpolation = interpolation
def __call__(self, results):
if np.random.rand() > self.prob:
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
for key in results.get('img_fields', ['img']):
img = results[key]
img_sheared = mmcv.imshear(
img,
magnitude,
direction=self.direction,
border_value=self.pad_val,
interpolation=self.interpolation)
results[key] = img_sheared.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'prob={self.prob}, '
repr_str += f'direction={self.direction}, '
repr_str += f'random_negative_prob={self.random_negative_prob}, '
repr_str += f'interpolation={self.interpolation})'
return repr_str
@PIPELINES.register_module()
class Translate(object):
"""Translate images.
Args:
magnitude (int | float): The magnitude used for translate. Note that
the offset is calculated by magnitude * size in the corresponding
direction. With a magnitude of 1, the whole image will be moved out
of the range.
pad_val (int, Sequence[int]): Pixel pad_val value for constant fill.
If a sequence of length 3, it is used to pad_val R, G, B channels
respectively. Defaults to 128.
prob (float): The probability for performing translate therefore should
be in range [0, 1]. Defaults to 0.5.
direction (str): The translating direction. Options are 'horizontal'
and 'vertical'. Defaults to 'horizontal'.
random_negative_prob (float): The probability that turns the magnitude
negative, which should be in range [0,1]. Defaults to 0.5.
interpolation (str): Interpolation method. Options are 'nearest',
'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'.
"""
def __init__(self,
magnitude,
pad_val=128,
prob=0.5,
direction='horizontal',
random_negative_prob=0.5,
interpolation='nearest'):
assert isinstance(magnitude, (int, float)), 'The magnitude type must '\
f'be int or float, but got {type(magnitude)} instead.'
if isinstance(pad_val, int):
pad_val = tuple([pad_val] * 3)
elif isinstance(pad_val, Sequence):
assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \
f'elements, got {len(pad_val)} instead.'
assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\
'tuple must got elements of int type.'
else:
raise TypeError('pad_val must be int or tuple with 3 elements.')
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
assert direction in ('horizontal', 'vertical'), 'direction must be ' \
f'either "horizontal" or "vertical", got {direction} instead.'
assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \
f'should be in range [0,1], got {random_negative_prob} instead.'
self.magnitude = magnitude
self.pad_val = tuple(pad_val)
self.prob = prob
self.direction = direction
self.random_negative_prob = random_negative_prob
self.interpolation = interpolation
def __call__(self, results):
if np.random.rand() > self.prob:
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
for key in results.get('img_fields', ['img']):
img = results[key]
height, width = img.shape[:2]
if self.direction == 'horizontal':
offset = magnitude * width
else:
offset = magnitude * height
img_translated = mmcv.imtranslate(
img,
offset,
direction=self.direction,
border_value=self.pad_val,
interpolation=self.interpolation)
results[key] = img_translated.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'prob={self.prob}, '
repr_str += f'direction={self.direction}, '
repr_str += f'random_negative_prob={self.random_negative_prob}, '
repr_str += f'interpolation={self.interpolation})'
return repr_str
@PIPELINES.register_module()
class Rotate(object):
"""Rotate images.
Args:
angle (float): The angle used for rotate. Positive values stand for
clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the rotation in
the source image. If None, the center of the image will be used.
Defaults to None.
scale (float): Isotropic scale factor. Defaults to 1.0.
pad_val (int, Sequence[int]): Pixel pad_val value for constant fill.
If a sequence of length 3, it is used to pad_val R, G, B channels
respectively. Defaults to 128.
prob (float): The probability for performing Rotate therefore should be
in range [0, 1]. Defaults to 0.5.
random_negative_prob (float): The probability that turns the angle
negative, which should be in range [0,1]. Defaults to 0.5.
interpolation (str): Interpolation method. Options are 'nearest',
'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'.
"""
def __init__(self,
angle,
center=None,
scale=1.0,
pad_val=128,
prob=0.5,
random_negative_prob=0.5,
interpolation='nearest'):
assert isinstance(angle, float), 'The angle type must be float, but ' \
f'got {type(angle)} instead.'
if isinstance(center, tuple):
assert len(center) == 2, 'center as a tuple must have 2 ' \
f'elements, got {len(center)} elements instead.'
else:
assert center is None, 'The center type' \
f'must be tuple or None, got {type(center)} instead.'
assert isinstance(scale, float), 'the scale type must be float, but ' \
f'got {type(scale)} instead.'
if isinstance(pad_val, int):
pad_val = tuple([pad_val] * 3)
elif isinstance(pad_val, Sequence):
assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \
f'elements, got {len(pad_val)} instead.'
assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\
'tuple must got elements of int type.'
else:
raise TypeError('pad_val must be int or tuple with 3 elements.')
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \
f'should be in range [0,1], got {random_negative_prob} instead.'
self.angle = angle
self.center = center
self.scale = scale
self.pad_val = tuple(pad_val)
self.prob = prob
self.random_negative_prob = random_negative_prob
self.interpolation = interpolation
def __call__(self, results):
if np.random.rand() > self.prob:
return results
angle = random_negative(self.angle, self.random_negative_prob)
for key in results.get('img_fields', ['img']):
img = results[key]
img_rotated = mmcv.imrotate(
img,
angle,
center=self.center,
scale=self.scale,
border_value=self.pad_val,
interpolation=self.interpolation)
results[key] = img_rotated.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(angle={self.angle}, '
repr_str += f'center={self.center}, '
repr_str += f'scale={self.scale}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'prob={self.prob}, '
repr_str += f'random_negative_prob={self.random_negative_prob}, '
repr_str += f'interpolation={self.interpolation})'
return repr_str
@PIPELINES.register_module()
class AutoContrast(object):
"""Auto adjust image contrast.
Args:
prob (float): The probability for performing invert therefore should
be in range [0, 1]. Defaults to 0.5.
"""
def __init__(self, prob=0.5):
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
self.prob = prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
for key in results.get('img_fields', ['img']):
img = results[key]
img_contrasted = mmcv.auto_contrast(img)
results[key] = img_contrasted.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class Invert(object):
"""Invert images.
Args:
prob (float): The probability for performing invert therefore should
be in range [0, 1]. Defaults to 0.5.
"""
def __init__(self, prob=0.5):
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
self.prob = prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
for key in results.get('img_fields', ['img']):
img = results[key]
img_inverted = mmcv.iminvert(img)
results[key] = img_inverted.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class Equalize(object):
"""Equalize the image histogram.
Args:
prob (float): The probability for performing invert therefore should
be in range [0, 1]. Defaults to 0.5.
"""
def __init__(self, prob=0.5):
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
self.prob = prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
for key in results.get('img_fields', ['img']):
img = results[key]
img_equalized = mmcv.imequalize(img)
results[key] = img_equalized.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class Solarize(object):
"""Solarize images (invert all pixel values above a threshold).
Args:
thr (int | float): The threshold above which the pixels value will be
inverted.
prob (float): The probability for solarizing therefore should be in
range [0, 1]. Defaults to 0.5.
"""
def __init__(self, thr, prob=0.5):
assert isinstance(thr, (int, float)), 'The thr type must '\
f'be int or float, but got {type(thr)} instead.'
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
self.thr = thr
self.prob = prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
for key in results.get('img_fields', ['img']):
img = results[key]
img_solarized = mmcv.solarize(img, thr=self.thr)
results[key] = img_solarized.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(thr={self.thr}, '
repr_str += f'prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class SolarizeAdd(object):
"""SolarizeAdd images (add a certain value to pixels below a threshold).
Args:
magnitude (int | float): The value to be added to pixels below the thr.
thr (int | float): The threshold below which the pixels value will be
adjusted.
prob (float): The probability for solarizing therefore should be in
range [0, 1]. Defaults to 0.5.
"""
def __init__(self, magnitude, thr=128, prob=0.5):
assert isinstance(magnitude, (int, float)), 'The thr magnitude must '\
f'be int or float, but got {type(magnitude)} instead.'
assert isinstance(thr, (int, float)), 'The thr type must '\
f'be int or float, but got {type(thr)} instead.'
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
self.magnitude = magnitude
self.thr = thr
self.prob = prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
for key in results.get('img_fields', ['img']):
img = results[key]
img_solarized = np.where(img < self.thr,
np.minimum(img + self.magnitude, 255),
img)
results[key] = img_solarized.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'thr={self.thr}, '
repr_str += f'prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class Posterize(object):
"""Posterize images (reduce the number of bits for each color channel).
Args:
bits (int | float): Number of bits for each pixel in the output img,
which should be less or equal to 8.
prob (float): The probability for posterizing therefore should be in
range [0, 1]. Defaults to 0.5.
"""
def __init__(self, bits, prob=0.5):
assert bits <= 8, f'The bits must be less than 8, got {bits} instead.'
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
self.bits = int(bits)
self.prob = prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
for key in results.get('img_fields', ['img']):
img = results[key]
img_posterized = mmcv.posterize(img, bits=self.bits)
results[key] = img_posterized.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(bits={self.bits}, '
repr_str += f'prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class Contrast(object):
"""Adjust images contrast.
Args:
magnitude (int | float): The magnitude used for adjusting contrast. A
positive magnitude would enhance the contrast and a negative
magnitude would make the image grayer. A magnitude=0 gives the
origin img.
prob (float): The probability for performing contrast adjusting
therefore should be in range [0, 1]. Defaults to 0.5.
random_negative_prob (float): The probability that turns the magnitude
negative, which should be in range [0,1]. Defaults to 0.5.
"""
def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5):
assert isinstance(magnitude, (int, float)), 'The magnitude type must '\
f'be int or float, but got {type(magnitude)} instead.'
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \
f'should be in range [0,1], got {random_negative_prob} instead.'
self.magnitude = magnitude
self.prob = prob
self.random_negative_prob = random_negative_prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
for key in results.get('img_fields', ['img']):
img = results[key]
img_contrasted = mmcv.adjust_contrast(img, factor=1 + magnitude)
results[key] = img_contrasted.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'prob={self.prob}, '
repr_str += f'random_negative_prob={self.random_negative_prob})'
return repr_str
@PIPELINES.register_module()
class ColorTransform(object):
"""Adjust images color balance.
Args:
magnitude (int | float): The magnitude used for color transform. A
positive magnitude would enhance the color and a negative magnitude
would make the image grayer. A magnitude=0 gives the origin img.
prob (float): The probability for performing ColorTransform therefore
should be in range [0, 1]. Defaults to 0.5.
random_negative_prob (float): The probability that turns the magnitude
negative, which should be in range [0,1]. Defaults to 0.5.
"""
def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5):
assert isinstance(magnitude, (int, float)), 'The magnitude type must '\
f'be int or float, but got {type(magnitude)} instead.'
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \
f'should be in range [0,1], got {random_negative_prob} instead.'
self.magnitude = magnitude
self.prob = prob
self.random_negative_prob = random_negative_prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
for key in results.get('img_fields', ['img']):
img = results[key]
img_color_adjusted = mmcv.adjust_color(img, alpha=1 + magnitude)
results[key] = img_color_adjusted.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'prob={self.prob}, '
repr_str += f'random_negative_prob={self.random_negative_prob})'
return repr_str
@PIPELINES.register_module()
class Brightness(object):
"""Adjust images brightness.
Args:
magnitude (int | float): The magnitude used for adjusting brightness. A
positive magnitude would enhance the brightness and a negative
magnitude would make the image darker. A magnitude=0 gives the
origin img.
prob (float): The probability for performing contrast adjusting
therefore should be in range [0, 1]. Defaults to 0.5.
random_negative_prob (float): The probability that turns the magnitude
negative, which should be in range [0,1]. Defaults to 0.5.
"""
def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5):
assert isinstance(magnitude, (int, float)), 'The magnitude type must '\
f'be int or float, but got {type(magnitude)} instead.'
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \
f'should be in range [0,1], got {random_negative_prob} instead.'
self.magnitude = magnitude
self.prob = prob
self.random_negative_prob = random_negative_prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
for key in results.get('img_fields', ['img']):
img = results[key]
img_brightened = mmcv.adjust_brightness(img, factor=1 + magnitude)
results[key] = img_brightened.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'prob={self.prob}, '
repr_str += f'random_negative_prob={self.random_negative_prob})'
return repr_str
@PIPELINES.register_module()
class Sharpness(object):
"""Adjust images sharpness.
Args:
magnitude (int | float): The magnitude used for adjusting sharpness. A
positive magnitude would enhance the sharpness and a negative
magnitude would make the image bulr. A magnitude=0 gives the
origin img.
prob (float): The probability for performing contrast adjusting
therefore should be in range [0, 1]. Defaults to 0.5.
random_negative_prob (float): The probability that turns the magnitude
negative, which should be in range [0,1]. Defaults to 0.5.
"""
def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5):
assert isinstance(magnitude, (int, float)), 'The magnitude type must '\
f'be int or float, but got {type(magnitude)} instead.'
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \
f'should be in range [0,1], got {random_negative_prob} instead.'
self.magnitude = magnitude
self.prob = prob
self.random_negative_prob = random_negative_prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
for key in results.get('img_fields', ['img']):
img = results[key]
img_sharpened = mmcv.adjust_sharpness(img, factor=1 + magnitude)
results[key] = img_sharpened.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(magnitude={self.magnitude}, '
repr_str += f'prob={self.prob}, '
repr_str += f'random_negative_prob={self.random_negative_prob})'
return repr_str
@PIPELINES.register_module()
class Cutout(object):
"""Cutout images.
Args:
shape (int | float | tuple(int | float)): Expected cutout shape (h, w).
If given as a single value, the value will be used for
both h and w.
pad_val (int, Sequence[int]): Pixel pad_val value for constant fill.
If it is a sequence, it must have the same length with the image
channels. Defaults to 128.
prob (float): The probability for performing cutout therefore should
be in range [0, 1]. Defaults to 0.5.
"""
def __init__(self, shape, pad_val=128, prob=0.5):
if isinstance(shape, float):
shape = int(shape)
elif isinstance(shape, tuple):
shape = tuple(int(i) for i in shape)
elif not isinstance(shape, int):
raise TypeError(
'shape must be of '
f'type int, float or tuple, got {type(shape)} instead')
if isinstance(pad_val, int):
pad_val = tuple([pad_val] * 3)
elif isinstance(pad_val, Sequence):
assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \
f'elements, got {len(pad_val)} instead.'
assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \
f'got {prob} instead.'
self.shape = shape
self.pad_val = tuple(pad_val)
self.prob = prob
def __call__(self, results):
if np.random.rand() > self.prob:
return results
for key in results.get('img_fields', ['img']):
img = results[key]
img_cutout = mmcv.cutout(img, self.shape, pad_val=self.pad_val)
results[key] = img_cutout.astype(img.dtype)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(shape={self.shape}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'prob={self.prob})'
return repr_str
| 37,110 | 39.338043 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/mmcls/datasets/pipelines/formating.py | # Copyright (c) OpenMMLab. All rights reserved.
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from PIL import Image
from ..builder import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(
f'Type {type(data)} cannot be converted to tensor.'
'Supported types are: `numpy.ndarray`, `torch.Tensor`, '
'`Sequence`, `int` and `float`')
@PIPELINES.register_module()
class ToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
img = results[key]
if len(img.shape) < 3:
img = np.expand_dims(img, -1)
results[key] = to_tensor(img.transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class Transpose(object):
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, order={self.order})'
@PIPELINES.register_module()
class ToPIL(object):
def __init__(self):
pass
def __call__(self, results):
results['img'] = Image.fromarray(results['img'])
return results
@PIPELINES.register_module()
class ToNumpy(object):
def __init__(self):
pass
def __call__(self, results):
results['img'] = np.array(results['img'], dtype=np.float32)
return results
@PIPELINES.register_module()
class Collect(object):
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img" and "gt_label".
Args:
keys (Sequence[str]): Keys of results to be collected in ``data``.
meta_keys (Sequence[str], optional): Meta keys to be converted to
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
Default: ('filename', 'ori_shape', 'img_shape', 'flip',
'flip_direction', 'img_norm_cfg')
Returns:
dict: The result dict contains the following keys
- keys in ``self.keys``
- ``img_metas`` if avaliable
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_filename', 'ori_shape',
'img_shape', 'flip', 'flip_direction',
'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
img_meta = {}
for key in self.meta_keys:
if key in results:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
@PIPELINES.register_module()
class WrapFieldsToLists(object):
"""Wrap fields of the data dictionary into lists for evaluation.
This class can be used as a last step of a test or validation
pipeline for single image evaluation or inference.
Example:
>>> test_pipeline = [
>>> dict(type='LoadImageFromFile'),
>>> dict(type='Normalize',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True),
>>> dict(type='ImageToTensor', keys=['img']),
>>> dict(type='Collect', keys=['img']),
>>> dict(type='WrapIntoLists')
>>> ]
"""
def __call__(self, results):
# Wrap dict fields into lists
for key, val in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
| 5,129 | 27.342541 | 78 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/_base_/models/resnet18_shape3d.py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet_CIFAR',
depth=18,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='MultiTaskLinearClsHead',
num_classes=[10,10,10,8,4,15],
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
))
| 430 | 24.352941 | 60 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/_base_/models/wide-resnet28-10.py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 10,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=640,
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=10,
in_channels=640,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)) | 568 | 24.863636 | 60 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/_base_/models/resnet18_cifar.py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet_CIFAR',
depth=18,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=10,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
))
| 406 | 22.941176 | 60 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/_base_/models/resnet18.py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| 423 | 22.555556 | 60 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/_base_/models/wide-resnet28-2.py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=128,
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)) | 567 | 24.818182 | 60 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/_base_/models/resnet50.py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
| 424 | 22.611111 | 60 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/_base_/models/resnet18_dsprite.py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='ResNet_CIFAR',
in_channels=1,
depth=18,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='MultiTaskLinearClsHead',
num_classes=[3,6,40,32,32],
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
))
| 450 | 24.055556 | 60 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/_base_/datasets/pipelines/rand_aug.py | # Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models
rand_increasing_policies = [
dict(type='AutoContrast'),
dict(type='Equalize'),
dict(type='Invert'),
dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)),
dict(type='Posterize', magnitude_key='bits', magnitude_range=(4, 0)),
dict(type='Solarize', magnitude_key='thr', magnitude_range=(256, 0)),
dict(
type='SolarizeAdd',
magnitude_key='magnitude',
magnitude_range=(0, 110)),
dict(
type='ColorTransform',
magnitude_key='magnitude',
magnitude_range=(0, 0.9)),
dict(type='Contrast', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(
type='Brightness', magnitude_key='magnitude',
magnitude_range=(0, 0.9)),
dict(
type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0.9)),
dict(
type='Shear',
magnitude_key='magnitude',
magnitude_range=(0, 0.3),
direction='horizontal'),
dict(
type='Shear',
magnitude_key='magnitude',
magnitude_range=(0, 0.3),
direction='vertical'),
dict(
type='Translate',
magnitude_key='magnitude',
magnitude_range=(0, 0.45),
direction='horizontal'),
dict(
type='Translate',
magnitude_key='magnitude',
magnitude_range=(0, 0.45),
direction='vertical')
] | 1,429 | 32.255814 | 79 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kf/wideresnet28-2_mobilenetv2_b128x1_cifar10_softtar_kf.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# 93.61
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KFImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(
lambda_kd=0.1,
lambda_feat=1.0,
alpha=1.0,
beta=1e-3,
task_weight=1.0,
teacher_checkpoint=None, # Input your teacher checkpoint
feat_channels=dict(student=[160, 320, 1280],
teacher=[32, 64, 128]),
),
backbone=dict(
num_task=1,
student=dict(
CKN=dict(type='MobileNetV2_CIFAR',
out_indices=(5, 6, 7),
widen_factor=1.0),
TSN=dict(type='TSN_backbone',
backbone=dict(type='MobileNetV2_CIFAR',
out_indices=(7, ),
widen_factor=0.5),
in_channels=1280,
out_channels=1280)
),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(0, 1, 2),
out_channel=128,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=1280,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=10,
reduction='mean',
loss_weight=1.0),
),
task=dict(
type='LinearClsHead',
num_classes=10,
in_channels=1280,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=10,
reduction='mean',
loss_weight=1.0),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss',
loss_weight=1.0),
)
)
)
evaluation = dict(interval=5)
checkpoint_config = dict(max_keep_ckpts=1)
| 3,043 | 26.423423 | 65 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kf/wideresnet28-2_wideresnet28-2_b128x1_cifar10_softtar_kf.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# 93.58
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KFImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(
lambda_kd=0.1,
lambda_feat=1.0,
alpha=1.0,
beta=1e-3,
task_weight=1.0,
teacher_checkpoint=None, # Input your teacher checkpoint
feat_channels=dict(student=[32, 64, 128],
teacher=[32, 64, 128]),
),
backbone=dict(
num_task=1,
student=dict(
CKN=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(0, 1, 2),
out_channel=128,
style='pytorch'),
TSN=dict(type='TSN_backbone',
backbone=dict(type='MobileNetV2_CIFAR',
out_indices=(7, ),
widen_factor=0.5),
in_channels=1280,
out_channels=128)
),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(0, 1, 2),
out_channel=128,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=10,
reduction='mean',
loss_weight=1.0),
),
task=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=10,
reduction='mean',
loss_weight=1.0),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss',
loss_weight=1.0),
)
)
)
evaluation = dict(interval=5)
checkpoint_config = dict(max_keep_ckpts=1)
| 3,275 | 26.529412 | 65 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kf/wideresnet28-2_resnet18_b128x1_cifar10_softtar_kf.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KFImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(
lambda_kd=0.1,
lambda_feat=1.0,
alpha=1.0,
beta=1e-3,
task_weight=1.0,
teacher_checkpoint=None, # Input your teacher checkpoint
feat_channels=dict(student=[128, 256, 512],
teacher=[32, 64, 128]),
),
backbone=dict(
num_task=1,
student=dict(
CKN=dict(type='ResNet_CIFAR',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
TSN=dict(type='TSN_backbone',
backbone=dict(type='MobileNetV2_CIFAR',
out_indices=(7, ),
widen_factor=0.5),
in_channels=1280,
out_channels=512)
),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(0, 1, 2),
out_channel=128,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=10,
reduction='mean',
loss_weight=1.0),
),
task=dict(
type='LinearClsHead',
num_classes=10,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=10,
reduction='mean',
loss_weight=1.0),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss',
loss_weight=1.0),
)
)
)
evaluation = dict(interval=5)
checkpoint_config = dict(max_keep_ckpts=1)
| 3,091 | 26.607143 | 65 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/imagenet-kd/resnet50_resnet18_b32x8_imagenet_softtar_kd.py | _base_ = [
'../_base_/datasets/imagenet_bs32_randaug.py',
'../_base_/schedules/imagenet_bs256_coslr.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='KDImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(
augments=[
dict(type='BatchMixup', alpha=0.1,
num_classes=1000, prob=0.5)
],
lambda_kd=0.1,
teacher_checkpoint=None, # Input your teacher checkpoint
),
backbone=dict(
student=dict(type='ResNet',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
teacher=dict(type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
teacher=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=2048,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)
))
checkpoint_config = dict(max_keep_ckpts=1)
| 1,872 | 25.380282 | 64 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/imagenet-kd/resnet18_resnet18_b32x8_imagenet_softtar_kd.py | _base_ = [
'../_base_/datasets/imagenet_bs32_randaug.py',
'../_base_/schedules/imagenet_bs256_coslr.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='KDImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(
augments=[
dict(type='BatchMixup', alpha=0.1,
num_classes=1000, prob=0.5)
],
lambda_kd=0.1,
teacher_checkpoint=None, # Input your teacher checkpoint
),
backbone=dict(
student=dict(type='ResNet',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
teacher=dict(type='ResNet',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
teacher=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)
))
checkpoint_config = dict(max_keep_ckpts=1)
| 1,871 | 25.366197 | 64 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kd/wideresnet28-2_resnet18_b128x1_cifar10.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KDImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(lambda_kd=0.1,
teacher_checkpoint=None), # Input your teacher checkpoint
backbone=dict(
student=dict(
type='ResNet_CIFAR',
depth=18,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=128,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)
))
| 1,992 | 24.883117 | 76 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kd/wideresnet28-2_wideresnet28-2_b128x1_cifar10.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KDImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(lambda_kd=0.1,
teacher_checkpoint=None), # Input your teacher checkpoint
backbone=dict(
student=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=128,
style='pytorch'),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=128,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)
))
| 2,153 | 25.268293 | 76 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kd/wideresnet28-10_wideresnet28-2_b128x1_cifar10.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KDImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(lambda_kd=0.1,
teacher_checkpoint=None), # Input your teacher checkpoint
backbone=dict(
# return_tuple=False,
student=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=128,
style='pytorch'),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 10,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=640,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=640,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)
))
| 2,184 | 25.325301 | 76 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kd/wideresnet28-10_mobilenetv2_b128x1_cifar10.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KDImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(lambda_kd=0.1,
teacher_checkpoint=None), # Input your teacher checkpoint
backbone=dict(
# return_tuple=False,
student=dict(type='MobileNetV2_CIFAR',
out_indices=(7, ),
widen_factor=1.0),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 10,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=640,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=1280,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=640,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)
))
| 1,987 | 25.506667 | 76 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kd/wideresnet28-2_mobilenetv2_b128x1_cifar10.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KDImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(lambda_kd=0.1,
teacher_checkpoint=None), # Input your teacher checkpoint
backbone=dict(
# return_tuple=False,
student=dict(type='MobileNetV2_CIFAR',
out_indices=(7, ),
widen_factor=1.0),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 2,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=128,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=1280,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=128,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)
))
| 1,986 | 25.493333 | 76 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/cifar10-kd/wideresnet28-10_resnet18_b128x1_cifar10.py | _base_ = [
'../_base_/datasets/cifar10_bs128.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# optimizer
optimizer = dict(type='SGD',
lr=0.1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[100, 150])
runner = dict(type='EpochBasedRunner', max_epochs=200)
# model settings
model = dict(
type='KDImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(lambda_kd=0.1,
teacher_checkpoint=None), # Input your teacher checkpoint
backbone=dict(
# return_tuple=False,
student=dict(
type='ResNet_CIFAR',
depth=18,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
teacher=dict(
type='WideResNet_CIFAR',
depth=28,
stem_channels=16,
base_channels=16 * 10,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
out_channel=640,
style='pytorch')
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=10,
in_channels=512,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
),
teacher=dict(
type='LinearClsHead',
num_classes=10,
in_channels=640,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
)
))
| 2,023 | 24.948718 | 76 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/imagenet-kf/resnet18_resnet18_b32x8_imagenet_softtar_kf.py | _base_ = [
'../_base_/datasets/imagenet_bs32_randaug.py',
'../_base_/schedules/imagenet_bs256_coslr.py'
]
# checkpoint saving
checkpoint_config = dict(interval=10)
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='KFImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(
augments=[
dict(type='BatchMixup', alpha=0.1,
num_classes=1000, prob=0.5)
],
lambda_kd=0.1,
lambda_feat=1.0,
alpha=1.0,
beta=1e-3,
task_weight=0.1,
teacher_checkpoint='/home/yangxingyi/.cache/torch/checkpoints/resnet18-5c106cde_converted.pth',
feat_channels=dict(student=[128, 256, 512],
teacher=[128, 256, 512]),
),
backbone=dict(
num_task=1,
student=dict(
CKN=dict(type='ResNet',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
TSN=dict(type='TSN_backbone',
backbone=dict(type='MobileNetV2',
out_indices=(7, ),
widen_factor=0.5),
in_channels=1280,
out_channels=512)
),
teacher=dict(type='ResNet',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=1000,
reduction='mean',
loss_weight=1.0),
),
task=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=1000,
reduction='mean',
loss_weight=1.0),
),
teacher=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(type='CrossEntropyLoss',
loss_weight=1.0),
)
)
)
checkpoint_config = dict(max_keep_ckpts=1)
| 2,876 | 27.205882 | 103 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/imagenet-kf/resnet18_mbnv2_b32x8_imagenet_softtar_kf.py | _base_ = [
'../_base_/datasets/imagenet_bs32_randaug.py',
'../_base_/schedules/imagenet_bs256_coslr_mobilenetv2.py'
]
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='KFImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=2.0),
train_cfg=dict(
augments=[
dict(type='BatchMixup', alpha=0.1, num_classes=1000, prob=0.5)
],
lambda_kd=0.1,
lambda_feat=1.0,
alpha=1.0,
beta=1e-3,
task_weight=0.1,
teacher_checkpoint= '/home/yangxingyi/.cache/torch/checkpoints/resnet18-5c106cde_converted.pth', # Input your teacher checkpoint
feat_channels=dict(student=[160, 320, 1280],
teacher=[128, 256, 512]),
),
backbone=dict(
num_task=1,
student=dict(
CKN=dict(type='MobileNetV2',
out_indices=(5, 6, 7),
widen_factor=1.0),
TSN=dict(type='TSN_backbone',
backbone=dict(type='MobileNetV2',
out_indices=(7, ),
widen_factor=0.5),
in_channels=1280,
out_channels=1280)
),
teacher=dict(type='ResNet',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=1000,
reduction='mean',
loss_weight=1.0),
),
task=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=1000,
reduction='mean',
loss_weight=1.0),
),
teacher=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(type='CrossEntropyLoss',
loss_weight=1.0),
)
)
)
checkpoint_config = dict(interval=10, max_keep_ckpts=1)
| 2,802 | 27.896907 | 136 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/imagenet-kf/resnet18_mbnv2_b32x8_imagenet_softtar_kf_tmp.py | _base_ = [
'../_base_/datasets/imagenet_bs32_randaug.py',
'../_base_/schedules/imagenet_bs256_coslr_mobilenetv2.py'
]
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='KFImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=2.0),
train_cfg=dict(
augments=[
dict(type='BatchMixup', alpha=0.1, num_classes=1000, prob=0.5)
],
lambda_kd=0.1,
lambda_feat=1.0,
alpha=1.0,
beta=1e-3,
task_weight=0.1,
teacher_checkpoint= '/home/yangxingyi/.cache/torch/checkpoints/resnet18-5c106cde_converted.pth', # Input your teacher checkpoint
feat_channels=dict(student=[160, 320, 1280],
teacher=[128, 256, 512]),
),
backbone=dict(
num_task=1,
student=dict(
CKN=dict(type='MobileNetV2',
out_indices=(5, 6, 7),
widen_factor=1.0),
TSN=dict(type='TSN_backbone',
backbone=dict(type='MobileNetV2',
out_indices=(7, ),
widen_factor=0.5),
in_channels=1280,
out_channels=1280)
),
teacher=dict(type='ResNet',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=1000,
reduction='mean',
loss_weight=1.0),
),
task=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1280,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=1000,
reduction='mean',
loss_weight=1.0),
),
teacher=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(type='CrossEntropyLoss',
loss_weight=1.0),
)
)
)
checkpoint_config = dict(interval=10, max_keep_ckpts=1)
| 2,802 | 27.896907 | 136 | py |
KnowledgeFactor | KnowledgeFactor-main/cls/configs/imagenet-kf/resnet50_resnet18_b32x8_imagenet_softtar_kf.py | _base_ = [
'../_base_/datasets/imagenet_bs32_randaug.py',
'../_base_/schedules/imagenet_bs256_coslr.py'
]
# yapf:disable
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
fp16 = dict(loss_scale=512.)
# model settings
model = dict(
type='KFImageClassifier',
kd_loss=dict(type='SoftTarget',
temperature=10.0),
train_cfg=dict(
augments=[
dict(type='BatchMixup', alpha=0.1,
num_classes=1000, prob=0.5)
],
lambda_kd=0.1,
lambda_feat=1.0,
alpha=1.0,
beta=1e-3,
task_weight=1.0,
teacher_checkpoint=None, # Input your teacher checkpoint
feat_channels=dict(student=[128, 256, 512],
teacher=[512, 1024, 2048]),
),
backbone=dict(
num_task=1,
student=dict(
CKN=dict(type='ResNet',
depth=18,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
TSN=dict(type='TSN_backbone',
backbone=dict(type='MobileNetV2',
out_indices=(7, ),
widen_factor=0.5),
in_channels=1280,
out_channels=512)
),
teacher=dict(type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
style='pytorch'),
),
neck=dict(
student=dict(type='GlobalAveragePooling'),
teacher=dict(type='GlobalAveragePooling')
),
head=dict(
student=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=1000,
reduction='mean',
loss_weight=1.0),
),
task=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=512,
loss=dict(
type='LabelSmoothLoss',
label_smooth_val=0.1,
num_classes=1000,
reduction='mean',
loss_weight=1.0),
),
teacher=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=2048,
loss=dict(type='CrossEntropyLoss',
loss_weight=1.0),
)
)
)
checkpoint_config = dict(interval=10, max_keep_ckpts=2)
| 2,796 | 26.693069 | 64 | py |
Detecting-Cyberbullying-Across-SMPs | Detecting-Cyberbullying-Across-SMPs-master/models.py | import tflearn
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_1d, global_max_pool
from tflearn.layers.merge_ops import merge
from tflearn.layers.estimator import regression
import tensorflow as tf
import os
os.environ['KERAS_BACKEND']='theano'
from keras.layers import Embedding
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding, Merge, Dropout, LSTM, GRU, Bidirectional
from keras.models import Model,Sequential
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
from keras import initializers, optimizers
def lstm_keras(inp_dim, vocab_size, embed_size, num_classes, learn_rate):
# K.clear_session()
model = Sequential()
model.add(Embedding(vocab_size, embed_size, input_length=inp_dim, trainable=True))
model.add(Dropout(0.25))
model.add(LSTM(embed_size))
model.add(Dropout(0.50))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print model.summary()
return model
def cnn(inp_dim, vocab_size, embed_size, num_classes, learn_rate):
tf.reset_default_graph()
network = input_data(shape=[None, inp_dim], name='input')
network = tflearn.embedding(network, input_dim=vocab_size, output_dim=embed_size, name="EmbeddingLayer")
network = dropout(network, 0.25)
branch1 = conv_1d(network, embed_size, 3, padding='valid', activation='relu', regularizer="L2", name="layer_1")
branch2 = conv_1d(network, embed_size, 4, padding='valid', activation='relu', regularizer="L2", name="layer_2")
branch3 = conv_1d(network, embed_size, 5, padding='valid', activation='relu', regularizer="L2", name="layer_3")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.50)
network = fully_connected(network, num_classes, activation='softmax', name="fc")
network = regression(network, optimizer='adam', learning_rate=learn_rate,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
return model
def blstm(inp_dim,vocab_size, embed_size, num_classes, learn_rate):
# K.clear_session()
model = Sequential()
model.add(Embedding(vocab_size, embed_size, input_length=inp_dim, trainable=True))
model.add(Dropout(0.25))
model.add(Bidirectional(LSTM(embed_size)))
model.add(Dropout(0.50))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
class AttLayer(Layer):
def __init__(self, **kwargs):
super(AttLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.W = self.add_weight(name='kernel',
shape=(input_shape[-1],),
initializer='random_normal',
trainable=True)
super(AttLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x, mask=None):
eij = K.tanh(K.dot(x, self.W))
ai = K.exp(eij)
weights = ai/K.sum(ai, axis=1).dimshuffle(0,'x')
weighted_input = x*weights.dimshuffle(0,1,'x')
return weighted_input.sum(axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
def blstm_atten(inp_dim, vocab_size, embed_size, num_classes, learn_rate):
# K.clear_session()
model = Sequential()
model.add(Embedding(vocab_size, embed_size, input_length=inp_dim))
model.add(Dropout(0.25))
model.add(Bidirectional(LSTM(embed_size, return_sequences=True)))
model.add(AttLayer())
model.add(Dropout(0.50))
model.add(Dense(num_classes, activation='softmax'))
adam = optimizers.Adam(lr=learn_rate, beta_1=0.9, beta_2=0.999)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
def get_model(m_type,inp_dim, vocab_size, embed_size, num_classes, learn_rate):
if m_type == 'cnn':
model = cnn(inp_dim, vocab_size, embed_size, num_classes, learn_rate)
elif m_type == 'lstm':
model = lstm_keras(inp_dim, vocab_size, embed_size, num_classes, learn_rate)
elif m_type == "blstm":
model = blstm(inp_dim)
elif m_type == "blstm_attention":
model = blstm_atten(inp_dim, vocab_size, embed_size, num_classes, learn_rate)
else:
print "ERROR: Please specify a correst model"
return None
return model | 5,025 | 39.208 | 115 | py |
mmda | mmda-main/src/mmda/predictors/__init__.py | # flake8: noqa
from necessary import necessary
with necessary(["tokenizers"], soft=True) as TOKENIZERS_AVAILABLE:
if TOKENIZERS_AVAILABLE:
from mmda.predictors.heuristic_predictors.whitespace_predictor import WhitespacePredictor
from mmda.predictors.heuristic_predictors.dictionary_word_predictor import DictionaryWordPredictor
__all__ = ['DictionaryWordPredictor', 'WhitespacePredictor']
with necessary('pysbd', soft=True) as PYSBD_AVAILABLE:
if PYSBD_AVAILABLE:
from mmda.predictors.heuristic_predictors.sentence_boundary_predictor \
import PysbdSentenceBoundaryPredictor
__all__.append('PysbdSentenceBoundaryPredictor')
with necessary(["layoutparser", "torch", "torchvision", "effdet"], soft=True) as PYTORCH_AVAILABLE:
if PYTORCH_AVAILABLE:
from mmda.predictors.lp_predictors import LayoutParserPredictor
__all__.append('LayoutParserPredictor')
| 934 | 41.5 | 106 | py |
mmda | mmda-main/src/mmda/predictors/xgb_predictors/citation_link_predictor.py | from scipy.stats import rankdata
import numpy as np
import os
import pandas as pd
from typing import List, Dict, Tuple
import xgboost as xgb
from mmda.types.document import Document
from mmda.featurizers.citation_link_featurizers import CitationLink, featurize
class CitationLinkPredictor:
def __init__(self, artifacts_dir: str, threshold):
full_model_path = os.path.join(artifacts_dir, "links_v1.json")
model = xgb.XGBClassifier()
model.load_model(full_model_path)
self.model = model
self.threshold = threshold
# returns a paired mention id and bib id to represent a link
def predict(self, doc: Document) -> List[Tuple[str, str]]:
if len(doc.bibs) == 0:
return []
predicted_links = []
# iterate over mentions
for mention in doc.mentions:
# create all possible links for this mention
possible_links = []
for bib in doc.bibs:
link = CitationLink(mention = mention, bib = bib)
possible_links.append(link)
# featurize and find link with highest score
X_instances = featurize(possible_links)
y_pred = self.model.predict_proba(X_instances)
match_scores = [pred[1] for pred in y_pred] # probability that label is 1
match_index = np.argmax(match_scores)
selected_link = possible_links[match_index]
if match_scores[match_index] < self.threshold: continue
predicted_links.append((selected_link.mention.id, selected_link.bib.id))
return predicted_links
| 1,620 | 34.23913 | 85 | py |
mmda | mmda-main/src/mmda/predictors/xgb_predictors/section_nesting_predictor.py | """
SectionNestingPredictor -- Use token-level predictions for "Section" to predict the
parent-child relationships between sections.
Adapted from https://github.com/rauthur/section-annotations-gold
@rauthur
"""
import json
import logging
import re
from collections import OrderedDict
from copy import deepcopy
from dataclasses import dataclass
from functools import cached_property
from typing import Dict, List, Optional, Sequence, Tuple
import numpy as np
import xgboost as xgb
from mmda.predictors.base_predictors.base_predictor import BasePredictor
from mmda.types.annotation import SpanGroup
from mmda.types.box import Box
from mmda.types.document import Document
from mmda.types.names import PagesField, SectionsField
@dataclass
class Example:
parent_id: int
parent_text: str
parent_is_root: bool
child_id: int
child_text: str
parent_no_font_size: bool
child_no_font_size: bool
is_one_size_larger_font: bool
same_font: bool
parent_bold_font: bool
child_bold_font: bool
normalized_page_distance: float
on_same_page: bool
relative_y_pos: int
relative_x_pos: int
abs_x_diff_pos: float
abs_y_diff_pos: float
parent_has_num_prefix: bool
child_has_num_prefix: bool
child_num_prefix_is_top_level: bool
parent_prefix_is_implied_parent_of_child_prefix: bool
child_text_starts_with_something_ending_with_a_period: bool
child_is_top_level_keyword: bool
child_is_all_caps: bool
child_starts_with_upper_letter_prefix: bool
parent_text_starts_with_something_ending_with_a_period: bool
parent_is_top_level_keyword: bool
parent_is_all_caps: bool
parent_starts_with_upper_letter_prefix: bool
@dataclass
class _FontInfo:
size: float
name: str
class PdfStats:
sections: Sequence[SpanGroup]
section_index: Dict[int, SpanGroup]
fontinfo_index: Dict[int, _FontInfo]
def __init__(self, sections) -> None:
self.sections = sections
self._build_section_index()
self._build_fontinfo_index()
def section(self, id_: int) -> SpanGroup:
return self.section_index[id_]
def section_fontsize(self, id_: int) -> float:
return self.fontinfo_index[id_].size
def section_fontname(self, id_: int) -> str:
return self.fontinfo_index[id_].name
def _build_section_index(self):
self.section_index = {s.id: s for s in self.sections}
def _build_fontinfo_index(self):
self.fontinfo_index = {}
for section in self.sections:
assert section.id is not None, "Sections must have an ID"
self.fontinfo_index[section.id] = _FontInfo(
size=self._round_size_with_default(section),
name=self._fontname_with_default(section),
)
@cached_property
def unique_fontsizes(self) -> List[float]:
sizes = {self._round_size_with_default(s) for s in self.sections}
return sorted([s for s in sizes if s > 0])
def _round_size_with_default(self, section: SpanGroup, default=-1) -> float:
return round(section.metadata.get("size", default), 4)
def _fontname_with_default(self, section: SpanGroup, default="[NONE]") -> str:
return section.metadata.get("fontname", default)
NUM_PREFIX_REGEX = "^([0-9\.]+)"
def num_prefix(s: str) -> Tuple[Optional[str], Optional[str]]:
m = re.search(NUM_PREFIX_REGEX, s)
if m is None:
return None, None
s = m.group(0)
if s.endswith("."):
s = s[:-1]
if "." in s:
p = ".".join(s.split(".")[:-1])
else:
p = None
return s, p
def child_text_starts_with_something_ending_with_a_period(
s: SpanGroup,
) -> bool:
if s.text is None:
return False
text = s.text.strip()
if "." not in text:
return False
parts = text.split(" ")
# There must be at least 2 words
if len(parts) <= 1:
return False
# Ensure there is 1 occurence of '.' at the end of the word
if not parts[0].endswith("."):
return False
if sum([1 if c == "." else 0 for c in parts[0]]) != 1:
return False
# Numbering should not be too long
if len(parts[0]) > 3:
return False
return True
_TOP_LEVEL_KEYWORDS = [
"abstract",
"introduction",
"conclusions",
"references",
"acknowledgements",
"methods",
"discussion",
"keywords",
"appendix",
]
def child_is_top_level_keyword(s: SpanGroup) -> bool:
if s.text is None:
return False
text = s.text.strip().lower()
# Trim any trailing punctuation like "Acknowledgements."
if text.endswith(".") or text.endswith(":"):
text = text[:-1]
if len(text) == 0:
return False
# Single-word by may have some prefix like "I. Introduction"
if len(text.split(" ")) > 2:
return False
for kw in _TOP_LEVEL_KEYWORDS:
if text.endswith(kw):
return True
return False
def child_is_all_caps(s: SpanGroup) -> bool:
if s.text is None:
return False
text = s.text.strip()
if len(text) == 0:
return False
return text.upper() == text
def child_starts_with_upper_letter_prefix(s: SpanGroup) -> bool:
if s.text is None:
return False
text = s.text.strip()
if len(text) == 0:
return False
parts = text.split(" ")
if len(parts) <= 1:
return False
if len(parts[0]) != 1:
return False
return parts[0] >= "A" and parts[0] <= "Z"
def span_group_page(span_group: SpanGroup) -> int:
if len(span_group.spans) == 0:
return -1
return span_group.spans[0].box.page
SPAN_GROUP_ROOT = SpanGroup(spans=[], id=-1)
def make_example(
pdf_stats: PdfStats,
a: SpanGroup,
b: SpanGroup,
num_pages: int,
) -> Example:
parent_is_root = a.id == -1
a_font_size = -1 if parent_is_root else pdf_stats.section_fontsize(a.id)
if a_font_size == -1:
parent_no_font_size = True
else:
parent_no_font_size = False
b_font_size = pdf_stats.section_fontsize(b.id)
if b_font_size == -1:
child_no_font_size = True
else:
child_no_font_size = False
if (not parent_no_font_size) and (not child_no_font_size):
a_font_index = pdf_stats.unique_fontsizes.index(a_font_size)
b_font_index = pdf_stats.unique_fontsizes.index(b_font_size)
assert a_font_index >= 0
assert b_font_index >= 0
is_one_size_larger_font = True if a_font_index - b_font_index == 1 else False
else:
is_one_size_larger_font = False
parent_prefix, _ = num_prefix(a.text)
child_prefix, implied_parent = num_prefix(b.text)
b_box = Box.small_boxes_to_big_box(boxes=[s.box for s in b.spans])
if parent_is_root:
a_box = Box(l=0, t=0, w=0, h=0, page=0)
relative_x_pos = -1
relative_y_pos = -1
a_font_name = "[ROOT]"
b_font_name = pdf_stats.section_fontname(b.id)
else:
a_box = Box.small_boxes_to_big_box(boxes=[s.box for s in a.spans])
relative_x_pos = 0
if a_box.l < b_box.l:
relative_x_pos = -1
elif b_box.l < a_box.l:
relative_x_pos = 1
relative_y_pos = 0
if a_box.t < b_box.t:
relative_y_pos = -1
elif b_box.t < a_box.t:
relative_y_pos = 1
a_font_name = pdf_stats.section_fontname(a.id)
b_font_name = pdf_stats.section_fontname(b.id)
a_page = span_group_page(a)
b_page = span_group_page(b)
return Example(
parent_id=a.id,
parent_text=a.text,
parent_is_root=parent_is_root,
child_id=b.id,
child_text=b.text,
parent_no_font_size=parent_no_font_size,
child_no_font_size=child_no_font_size,
same_font=a_font_name == b_font_name,
parent_bold_font="bold" in a_font_name.lower(),
child_bold_font="bold" in b_font_name.lower(),
is_one_size_larger_font=is_one_size_larger_font,
normalized_page_distance=(b_page - a_page) / num_pages,
on_same_page=a_page == b_page,
relative_x_pos=relative_x_pos,
relative_y_pos=relative_y_pos,
abs_x_diff_pos=abs(a_box.l - b_box.l),
abs_y_diff_pos=abs(a_box.t - b_box.t),
parent_has_num_prefix=not parent_prefix is None,
child_has_num_prefix=not child_prefix is None,
child_num_prefix_is_top_level=(
child_prefix is not None and implied_parent is None
),
parent_prefix_is_implied_parent_of_child_prefix=(
parent_prefix is not None and parent_prefix == implied_parent
),
child_text_starts_with_something_ending_with_a_period=child_text_starts_with_something_ending_with_a_period(
b
),
child_is_top_level_keyword=child_is_top_level_keyword(b),
child_is_all_caps=child_is_all_caps(b),
child_starts_with_upper_letter_prefix=child_starts_with_upper_letter_prefix(b),
parent_text_starts_with_something_ending_with_a_period=child_text_starts_with_something_ending_with_a_period(
a
),
parent_is_top_level_keyword=child_is_top_level_keyword(a),
parent_is_all_caps=child_is_all_caps(a),
parent_starts_with_upper_letter_prefix=child_starts_with_upper_letter_prefix(a),
)
@dataclass
class SectionNode:
prev: Optional["SectionNode"]
next: Optional["SectionNode"]
section: SpanGroup
class SectionIndex:
index: Dict[int, SectionNode]
def __init__(self) -> None:
self.index = OrderedDict()
self.index[-1] = SectionNode(
prev=None,
next=None,
section=SPAN_GROUP_ROOT,
)
def add(self, section: SpanGroup, parent_id: int):
if parent_id not in self.index:
raise ValueError("Cannot find parent!")
parent = self.index[parent_id]
curr = parent.next
while curr is not None:
currnext = curr.next
del self.index[curr.section.id]
del curr
curr = currnext
node = SectionNode(prev=parent, next=None, section=section)
parent.next = node
self.index[section.id] = node
def __str__(self) -> str:
curr = self.index[-1]
nodes = []
while curr is not None:
nodes.append(f"[{curr.section.id}] {curr.section.text}")
curr = curr.next
return " -> ".join(nodes)
def bf(b: bool):
return 1.0 if b else 0.0
def convert_example(x: Example):
return [
bf(x.is_one_size_larger_font),
bf(x.same_font),
bf(x.parent_no_font_size),
bf(x.child_no_font_size),
bf(x.parent_bold_font),
bf(x.child_bold_font),
x.normalized_page_distance,
bf(x.on_same_page),
x.relative_y_pos,
x.relative_x_pos,
x.abs_x_diff_pos,
x.abs_y_diff_pos,
bf(x.parent_has_num_prefix),
bf(x.child_has_num_prefix),
bf(x.child_num_prefix_is_top_level),
bf(x.parent_prefix_is_implied_parent_of_child_prefix),
bf(x.parent_is_root),
bf(x.child_text_starts_with_something_ending_with_a_period),
bf(x.child_is_top_level_keyword),
bf(x.child_is_all_caps),
bf(x.child_starts_with_upper_letter_prefix),
bf(x.parent_text_starts_with_something_ending_with_a_period),
bf(x.parent_is_top_level_keyword),
bf(x.parent_is_all_caps),
bf(x.parent_starts_with_upper_letter_prefix),
]
class SectionNestingPredictor(BasePredictor):
REQUIRED_BACKENDS = None
REQUIRED_DOCUMENT_FIELDS = [SectionsField, PagesField]
def __init__(self, model_file: str) -> None:
super().__init__()
self.model = xgb.XGBClassifier()
self.model.load_model(model_file)
def predict(self, document: Document) -> List[SpanGroup]:
sections = document.sections
if len(sections) == 0:
return []
index = SectionIndex()
pdf_stats = PdfStats(sections)
results = []
for section in sections:
xs = []
# FIXME: Debugging only?
parent_texts = []
parent_ids = []
for node in index.index.values():
x = make_example(pdf_stats, node.section, section, len(document.pages))
xs.append(convert_example(x))
parent_texts.append(node.section.text)
parent_ids.append(node.section.id)
logging.debug("SECTION: %s [%i]", section.text, section.id)
logging.debug("CANDIDATES: %s", json.dumps(parent_texts))
pos_probs = self.model.predict_proba(xs)[:, 1]
pos_probs = pos_probs / sum(pos_probs)
pos_index = np.argmax(pos_probs)
logging.debug(json.dumps([float(round(p, 4)) for p in pos_probs]))
logging.debug(f"Picked {parent_texts[pos_index]}!")
parent_id = parent_ids[pos_index]
# Maintain the text from VILA for each span group
metadata = deepcopy(section.metadata)
metadata.parent_id = parent_id
results.append(
SpanGroup(
spans=deepcopy(section.spans),
box_group=deepcopy(section.box_group),
id=deepcopy(section.id), # Ensure some ID is created
doc=None, # Allows calling doc.annotate(...)
metadata=metadata,
)
)
index.add(section, parent_ids[pos_index])
return results
| 13,683 | 26.813008 | 117 | py |
mmda | mmda-main/src/mmda/predictors/hf_predictors/vila_predictor.py | # This file rewrites the PDFPredictor classes in
# https://github.com/allenai/VILA/blob/dd242d2fcbc5fdcf05013174acadb2dc896a28c3/src/vila/predictors.py#L1
# to reduce the dependency on the VILA package.
from typing import List, Union, Dict, Any, Tuple
from abc import abstractmethod
from dataclasses import dataclass
import inspect
import itertools
from tqdm import tqdm
import torch
from transformers import AutoTokenizer, AutoConfig, AutoModelForTokenClassification
from vila.models.hierarchical_model import (
HierarchicalModelForTokenClassification,
HierarchicalModelConfig,
)
from vila.dataset.preprocessors import instantiate_dataset_preprocessor
from mmda.types.metadata import Metadata
from mmda.types.names import PagesField, RowsField, TokensField
from mmda.types.annotation import Annotation, Span, SpanGroup
from mmda.types.document import Document
from mmda.predictors.hf_predictors.utils import (
convert_document_page_to_pdf_dict,
convert_sequence_tagging_to_spans,
normalize_bbox,
)
from mmda.predictors.hf_predictors.base_hf_predictor import BaseHFPredictor
# Two constants for the constraining the size of the page for
# inputs to the model.
# TODO: Move this to somewhere else.
MAX_PAGE_WIDTH = 1000
MAX_PAGE_HEIGHT = 1000
def columns_used_in_model_inputs(model):
signature = inspect.signature(model.forward)
signature_columns = list(signature.parameters.keys())
return signature_columns
@dataclass
class VILAPreprocessorConfig:
agg_level: str = "row"
label_all_tokens: bool = False
group_bbox_agg: str = "first"
added_special_sepration_token: str = "[SEP]"
# This is introduced to support the updates in the
# vila 0.4.0 which fixes the typo.
@property
def added_special_separation_token(self):
return self.added_special_sepration_token
class BaseVILAPredictor(BaseHFPredictor):
REQUIRED_BACKENDS = ["transformers", "torch", "vila"]
REQUIRED_DOCUMENT_FIELDS = [PagesField, TokensField]
def __init__(
self, model: Any, config: Any, tokenizer: Any, preprocessor, device=None
):
self.model = model
self.tokenizer = tokenizer
self.config = config
self.preprocessor = preprocessor
if device is None:
self.device = model.device
else:
self.device = device
model.to(self.device)
self.model.eval()
self._used_cols = columns_used_in_model_inputs(self.model)
# Sometimes the input data might contain certain columns that are
# not used in the model inputs. For example, for a BERT model,
# it won't use the `bbox` column.
@classmethod
def from_pretrained(
cls,
model_name_or_path: str,
preprocessor=None,
device: str = None,
**preprocessor_config
):
config = AutoConfig.from_pretrained(model_name_or_path)
model = AutoModelForTokenClassification.from_pretrained(
model_name_or_path, config=config
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
if preprocessor is None:
preprocessor = cls.initialize_preprocessor(
tokenizer, VILAPreprocessorConfig(**preprocessor_config)
)
return cls(model, config, tokenizer, preprocessor, device)
@staticmethod
@abstractmethod
def initialize_preprocessor(tokenizer, config):
# preprocessors defines how to create the actual model inputs
# based on the raw pdf data (characterized by pdf_dict).
# For example, in i-vila models, we can inject a special token
# in the model inputs. This requires additional preprocessing
# of the pdf_dicts, and it is handled by preprocessors in the
# vila module.
pass
def preprocess(
self, pdf_dict: Dict[str, List[Any]], page_width: int, page_height: int
) -> Dict[str, List[Any]]:
_labels = pdf_dict.get("labels")
pdf_dict["labels"] = [0] * len(pdf_dict["words"])
# because the preprocess_sample requires the labels to be
# a numeric value, so we temporarily set the labels to 0.
# (it will set some labels to -100)
# and we will change them back to the original labels later.
model_inputs = self.preprocessor.preprocess_sample(pdf_dict)
model_inputs["bbox"] = [
[
normalize_bbox(
bbox,
page_width,
page_height,
target_width=MAX_PAGE_WIDTH,
target_height=MAX_PAGE_HEIGHT,
)
for bbox in batch
]
for batch in model_inputs["bbox"]
]
pdf_dict["labels"] = _labels
return model_inputs
@abstractmethod
def get_true_token_level_category_prediction(
self, pdf_dict, model_inputs, model_predictions
) -> List[Union[str, int]]:
# Typically BERT-based models will generate categories for each
# word-piece encoded tokens (and also for included special tokens
# like [SEP] and [CLS]). Therefore, we need to clean the predictions
# to get the category predictions for the tokens that are actually
# appeared inside the document.
# The implementation of this method is specific to each model.
pass
def postprocess(
self, document, pdf_dict, model_inputs, model_predictions
) -> List[SpanGroup]:
true_token_prediction = self.get_true_token_level_category_prediction(
pdf_dict, model_inputs, model_predictions
)
token_prediction_spans = convert_sequence_tagging_to_spans(
true_token_prediction
)
prediction_spans = []
for (token_start, token_end, label) in token_prediction_spans:
cur_spans = document.tokens[token_start:token_end]
start = min([ele.start for ele in cur_spans])
end = max([ele.end for ele in cur_spans])
sg = SpanGroup(spans=[Span(start, end)], metadata=Metadata(type=label))
prediction_spans.append(sg)
return prediction_spans
def predict(self, document: Document) -> List[Annotation]:
page_prediction_results = []
for page_id, page in enumerate(tqdm(document.pages)):
if page.tokens:
page_width, page_height = document.images[page_id].size
pdf_dict = convert_document_page_to_pdf_dict(
page, page_width=page_width, page_height=page_height
)
# VILA models trained based on absolute page width rather than the
# size (1000, 1000) in vanilla LayoutLM models
model_inputs = self.preprocess(pdf_dict, page_width, page_height)
model_outputs = self.model(**self.model_input_collator(model_inputs))
model_predictions = self.get_category_prediction(model_outputs)
page_prediction_results.extend(
self.postprocess(page, pdf_dict, model_inputs, model_predictions)
)
return page_prediction_results
############################################
###### Some other auxiliary functions ######
############################################
def get_category_prediction(self, model_outputs):
predictions = model_outputs.logits.argmax(dim=-1).cpu().numpy()
return predictions
def model_input_collator(self, sample):
return {
key: torch.tensor(val, dtype=torch.int64, device=self.device)
for key, val in sample.items()
if key in self._used_cols
}
class SimpleVILAPredictor(BaseVILAPredictor):
REQUIRED_DOCUMENT_FIELDS = [PagesField, TokensField]
@staticmethod
def initialize_preprocessor(tokenizer, config: VILAPreprocessorConfig):
return instantiate_dataset_preprocessor("base", tokenizer, config)
def get_true_token_level_category_prediction(
self, pdf_dict, model_inputs, model_predictions
):
encoded_labels = model_inputs["labels"]
true_predictions = [
[(p, l) for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(model_predictions, encoded_labels)
]
true_predictions = list(itertools.chain.from_iterable(true_predictions))
preds = [ele[0] for ele in true_predictions]
# right here, the true_prediction has one-to-one correspondence with
# the words in the input document.
return preds
class IVILAPredictor(SimpleVILAPredictor):
REQUIRED_DOCUMENT_FIELDS = [PagesField, TokensField, RowsField] # , Blocks]
# TODO: Right now we only use the rows, but we should also use the blocks
# in the future.
@staticmethod
def initialize_preprocessor(tokenizer, config):
return instantiate_dataset_preprocessor("layout_indicator", tokenizer, config)
class HVILAPredictor(BaseVILAPredictor):
REQUIRED_DOCUMENT_FIELDS = [PagesField, TokensField, RowsField] # , Blocks]
# TODO: Right now we only use the rows, but we should also use the blocks
# in the future.
@classmethod
def from_pretrained(
cls,
model_name_or_path: str,
preprocessor=None,
device: str = None,
**preprocessor_config
):
config = HierarchicalModelConfig.from_pretrained(model_name_or_path)
model = HierarchicalModelForTokenClassification.from_pretrained(
model_name_or_path, config=config
)
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
if preprocessor is None:
preprocessor = cls.initialize_preprocessor(
tokenizer, VILAPreprocessorConfig(**preprocessor_config)
)
return cls(model, config, tokenizer, preprocessor, device)
@staticmethod
def initialize_preprocessor(tokenizer, config):
return instantiate_dataset_preprocessor(
"hierarchical_modeling", tokenizer, config
)
@staticmethod
def flatten_line_level_prediction(batched_line_pred, batched_line_word_count):
final_flattend_pred = []
for line_pred, line_word_count in zip(
batched_line_pred, batched_line_word_count
):
assert len(line_pred) == len(line_word_count)
for (pred, label), (line_id, count) in zip(line_pred, line_word_count):
final_flattend_pred.append([[pred, label, line_id]] * count)
return list(itertools.chain.from_iterable(final_flattend_pred))
def get_true_token_level_category_prediction(
self, pdf_dict, model_inputs, model_predictions
):
encoded_labels = model_inputs["labels"]
true_predictions = [
[(p, l) for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(model_predictions, encoded_labels)
]
flatten_predictions = self.flatten_line_level_prediction(
true_predictions, model_inputs["group_word_count"]
)
preds = [ele[0] for ele in flatten_predictions]
return preds
| 11,250 | 35.060897 | 105 | py |
mmda | mmda-main/src/mmda/predictors/hf_predictors/mention_predictor.py | import itertools
import os.path
import string
from typing import Dict, Iterator, List, Optional
from optimum.onnxruntime import ORTModelForTokenClassification
import torch
from transformers import AutoModelForTokenClassification, AutoTokenizer, BatchEncoding
from mmda.types.annotation import Annotation, SpanGroup
from mmda.types.document import Document
from mmda.types.span import Span
from mmda.parsers.pdfplumber_parser import PDFPlumberParser
class Labels:
# BILOU https://stackoverflow.com/q/17116446
MENTION_OUTSIDE_ID = 0
MENTION_BEGIN_ID = 1
MENTION_INSIDE_ID = 2
MENTION_LAST_ID = 3
MENTION_UNIT_ID = 4
MENTION_OUTSIDE = "O"
MENTION_BEGIN = "B-MENTION"
MENTION_INSIDE = "I-MENTION"
MENTION_LAST = "E-MENTION" # "end"
MENTION_UNIT = "S-MENTION" # "single"
ID_TO_LABEL: Dict[int, str] = {
MENTION_OUTSIDE_ID: MENTION_OUTSIDE,
MENTION_BEGIN_ID: MENTION_BEGIN,
MENTION_INSIDE_ID: MENTION_INSIDE,
MENTION_LAST_ID: MENTION_LAST,
MENTION_UNIT_ID: MENTION_UNIT
}
LABEL_TO_ID: Dict[str, int] = {
MENTION_OUTSIDE: MENTION_OUTSIDE_ID,
MENTION_BEGIN: MENTION_BEGIN_ID,
MENTION_INSIDE: MENTION_INSIDE_ID,
MENTION_LAST: MENTION_LAST_ID,
MENTION_UNIT: MENTION_UNIT_ID
}
class MentionPredictor:
# This predictor works best on documents generated by a pdfplumber instance
# that is configured to use the following value for `split_at_punctuation`.
PUNCTUATION_TO_SPLIT_AT = PDFPlumberParser.DEFAULT_PUNCTUATION_CHARS
def __init__(self, artifacts_dir: str):
self.tokenizer = AutoTokenizer.from_pretrained(artifacts_dir)
onnx = os.path.exists(os.path.join(artifacts_dir, "model.onnx"))
if onnx:
self.model = ORTModelForTokenClassification.from_pretrained(artifacts_dir, file_name="model.onnx")
else:
self.model = AutoModelForTokenClassification.from_pretrained(artifacts_dir)
# this is a side-effect(y) function
self.model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
if not onnx:
# https://stackoverflow.com/a/60018731
self.model.eval() # for some reason the onnx version doesnt have an eval()
def predict(self, doc: Document, print_warnings: bool = False) -> List[SpanGroup]:
if not hasattr(doc, 'pages'):
return []
spangroups = []
counter = itertools.count()
for page in doc.pages:
spangroups.extend(self.predict_page(page, counter=counter, print_warnings=print_warnings))
return spangroups
def predict_page(self, page: Annotation, counter: Iterator[int], print_warnings: bool = False) -> List[SpanGroup]:
if not hasattr(page, 'tokens'):
return []
ret = []
words: List[str] = ["".join(token.symbols) for token in page.tokens]
word_spans: List[List[Span]] = [[Span.from_json(span_dict=span.to_json()) for span in token.spans] for token in page.tokens]
inputs = self.tokenizer(
[words],
is_split_into_words=True,
max_length=512,
truncation=True,
padding='max_length',
return_overflowing_tokens=True,
return_tensors="pt"
)
with torch.no_grad():
# Control device memory use to predictable levels
# by limiting size of batches sent to it.
prediction_label_ids = []
for index, sequence in enumerate(inputs["input_ids"]):
batch = BatchEncoding(
data=dict(
input_ids=inputs["input_ids"][index:index+1],
token_type_ids=inputs["token_type_ids"][index:index+1],
attention_mask=inputs["attention_mask"][index:index+1],
)
)
batch.to(self.model.device)
batch_outputs = self.model(**batch)
batch_prediction_label_ids = torch.argmax(batch_outputs.logits, dim=-1)[0]
prediction_label_ids.append(batch_prediction_label_ids)
def has_label_id(lbls: List[int], want_label_id: int) -> bool:
return any(lbl == want_label_id for lbl in lbls)
# make list of word ids and list of label ids for each word
word_ids: List[Optional[int]] = []
word_label_ids: List[Optional[List[int]]] = []
for idx1 in range(len(inputs['input_ids'])):
batch_label_ids = prediction_label_ids[idx1]
input_ = inputs[idx1]
# keep track of this loop's word_ids
these_word_ids = [input_.word_ids[0]]
# append to the outer word_ids and word_label_ids lists
if input_.word_ids[0] is not None:
word_ids.append(input_.word_ids[0])
word_label_ids.append(batch_label_ids[0])
else:
# preserve the Nones
word_ids.append(None)
word_label_ids.append(None)
for idx2 in range(1, len(input_.word_ids)):
word_id: int = input_.word_ids[idx2]
# get previous_word_id from this current list of word_ids
previous_word_id: int = input_.word_ids[idx2 - 1]
# if all of these_word_ids are None...
if all([not bool(word_id) for word_id in these_word_ids]):
# ... then try to get previous_word_id from looping through larger word_ids list til not None
for idx3 in range(len(word_ids) - 1, -1, -1):
if word_ids[idx3] is not None:
previous_word_id = word_ids[idx3]
break
if word_id is not None:
label_id: int = batch_label_ids[idx2]
if word_id == previous_word_id:
# add to previous_word_id's word_label_ids by finding its corresponding index in word_ids
for idx3 in range(len(word_ids) - 1, -1, -1):
if word_ids[idx3] == previous_word_id:
word_label_ids[idx3].append(label_id)
break
else:
word_label_ids.append([label_id])
word_ids.append(word_id)
else:
# again, preserve the Nones
word_ids.append(None)
word_label_ids.append(None)
# always
these_word_ids.append(word_id)
acc: List[Span] = []
outside_mention = True
def append_acc():
nonlocal acc
if acc:
ret.append(SpanGroup(spans=acc, id=next(counter)))
acc = []
# now we can zip our lists of word_ids (which correspond to spans) and label_ids (for which there can be
# multiple because of batching), we can decide how to label each span how to accumulate them into SpanGroups
for word_id, label_ids in zip(word_ids, word_label_ids):
if not word_id:
continue
spans = word_spans[word_id]
has_begin = has_label_id(label_ids, Labels.MENTION_BEGIN_ID)
has_last = has_label_id(label_ids, Labels.MENTION_LAST_ID)
has_unit = has_label_id(label_ids, Labels.MENTION_UNIT_ID)
warnings = []
label_id: Optional[int] = None
if sum(1 for cond in [has_begin, has_last, has_unit] if cond) > 1:
warnings.append(
"found multiple labels for the same word: "
f"has_begin={has_begin} has_last={has_last} has_unit={has_unit}, spans = {spans}"
)
for cur_label_id in label_ids:
# prioritize begin, last, unit over the rest
if cur_label_id not in (Labels.MENTION_INSIDE_ID, Labels.MENTION_OUTSIDE_ID):
label_id = cur_label_id
break
if label_id is None:
# prioritize inside over outside
label_id = Labels.MENTION_INSIDE_ID \
if any(lbl == Labels.MENTION_INSIDE_ID for lbl in label_ids) else label_ids[0]
if outside_mention and has_last:
warnings.append(f"found an 'L' while outside mention, spans = {spans}")
if not outside_mention and (has_begin or has_unit):
warnings.append(f"found a 'B' or 'U' while inside mention, spans = {spans}")
if warnings and print_warnings:
print("warnings:")
for warning in warnings:
print(f" - {warning}")
if label_id == Labels.MENTION_UNIT_ID:
append_acc()
acc = spans
append_acc()
outside_mention = True
if label_id == Labels.MENTION_BEGIN_ID:
append_acc()
acc = spans
outside_mention = False
elif label_id == Labels.MENTION_LAST_ID:
acc.extend(spans)
append_acc()
outside_mention = True
elif label_id == Labels.MENTION_INSIDE_ID:
acc.extend(spans)
append_acc()
return ret
| 9,459 | 40.130435 | 132 | py |
mmda | mmda-main/src/mmda/predictors/hf_predictors/token_classification_predictor.py | from typing import List, Union, Dict, Any, Tuple, Optional, Sequence
from abc import abstractmethod
from tqdm import tqdm
from vila.predictors import (
SimplePDFPredictor,
LayoutIndicatorPDFPredictor,
HierarchicalPDFPredictor,
)
from mmda.types.metadata import Metadata
from mmda.types.names import BlocksField, PagesField, RowsField, TokensField
from mmda.types.annotation import Annotation, Span, SpanGroup
from mmda.types.document import Document
from mmda.predictors.hf_predictors.utils import (
convert_document_page_to_pdf_dict,
convert_sequence_tagging_to_spans,
)
from mmda.predictors.hf_predictors.base_hf_predictor import BaseHFPredictor
class BaseSinglePageTokenClassificationPredictor(BaseHFPredictor):
REQUIRED_BACKENDS = ["transformers", "torch", "vila"]
REQUIRED_DOCUMENT_FIELDS = [PagesField, TokensField]
DEFAULT_SUBPAGE_PER_RUN = 2 # TODO: Might remove this in the future for longformer-like models
@property
@abstractmethod
def VILA_MODEL_CLASS(self):
pass
def __init__(self, predictor, subpage_per_run: Optional[int] = None):
self.predictor = predictor
# TODO: Make this more robust
self.id2label = self.predictor.model.config.id2label
self.label2id = self.predictor.model.config.label2id
self.subpage_per_run = subpage_per_run or self.DEFAULT_SUBPAGE_PER_RUN
@classmethod
def from_pretrained(
cls,
model_name_or_path: str,
preprocessor=None,
device: Optional[str] = None,
subpage_per_run: Optional[int] = None,
**preprocessor_config
):
predictor = cls.VILA_MODEL_CLASS.from_pretrained(
model_path=model_name_or_path,
preprocessor=preprocessor,
device=device,
**preprocessor_config
)
return cls(predictor, subpage_per_run)
def predict(
self, document: Document, subpage_per_run: Optional[int] = None
) -> List[Annotation]:
page_prediction_results = []
for page_id, page in enumerate(tqdm(document.pages)):
if page.tokens:
page_width, page_height = document.images[page_id].size
pdf_dict = self.preprocess(
page, page_width=page_width, page_height=page_height
)
model_predictions = self.predictor.predict(
page_data=pdf_dict,
page_size=(page_width, page_height),
batch_size=subpage_per_run or self.subpage_per_run,
return_type="list",
)
assert len(model_predictions) == len(
page.tokens), f"Model predictions and tokens are not the same length ({len(model_predictions)} != {len(page.tokens)}) for page {page_id}"
page_prediction_results.extend(
self.postprocess(page, model_predictions)
)
return page_prediction_results
def preprocess(self, page: Document, page_width: float, page_height: float) -> Dict:
# In the latest vila implementations (after 0.4.0), the predictor will
# handle all other preprocessing steps given the pdf_dict input format.
return convert_document_page_to_pdf_dict(
page, page_width=page_width, page_height=page_height
)
def postprocess(self, document: Document, model_predictions) -> List[SpanGroup]:
token_prediction_spans = convert_sequence_tagging_to_spans(model_predictions)
prediction_spans = []
for (token_start, token_end, label) in token_prediction_spans:
cur_spans = document.tokens[token_start:token_end]
start = min([ele.start for ele in cur_spans])
end = max([ele.end for ele in cur_spans])
sg = SpanGroup(spans=[Span(start, end)], metadata=Metadata(type=label))
prediction_spans.append(sg)
return prediction_spans
class SinglePageTokenClassificationPredictor(
BaseSinglePageTokenClassificationPredictor
):
VILA_MODEL_CLASS = SimplePDFPredictor
class IVILATokenClassificationPredictor(BaseSinglePageTokenClassificationPredictor):
VILA_MODEL_CLASS = LayoutIndicatorPDFPredictor
@property
def REQUIRED_DOCUMENT_FIELDS(self) -> List:
base_reqs = [PagesField, TokensField]
if self.predictor.preprocessor.config.agg_level == "row":
base_reqs.append(RowsField)
elif self.predictor.preprocessor.config.agg_level == "block":
base_reqs.append(BlocksField)
return base_reqs
class HVILATokenClassificationPredictor(BaseSinglePageTokenClassificationPredictor):
VILA_MODEL_CLASS = HierarchicalPDFPredictor
@property
def REQUIRED_DOCUMENT_FIELDS(self) -> List:
base_reqs = [PagesField, TokensField]
if self.predictor.preprocessor.config.agg_level == "row":
base_reqs.append(RowsField)
elif self.predictor.preprocessor.config.agg_level == "block":
base_reqs.append(BlocksField)
return base_reqs
| 5,137 | 34.191781 | 157 | py |
mmda | mmda-main/src/mmda/predictors/hf_predictors/span_group_classification_predictor.py | """
@kylel
"""
from typing import List, Any, Tuple, Optional, Sequence
from collections import defaultdict
import numpy as np
import torch
import transformers
from smashed.interfaces.simple import (
TokenizerMapper,
UnpackingMapper,
FixedBatchSizeMapper,
FromTokenizerListCollatorMapper,
Python2TorchMapper,
)
from mmda.types.metadata import Metadata
from mmda.types.annotation import Annotation, Span, SpanGroup
from mmda.types.document import Document
from mmda.predictors.hf_predictors.base_hf_predictor import BaseHFPredictor
class SpanGroupClassificationBatch:
def __init__(
self,
input_ids: List[List[int]],
attention_mask: List[List[int]],
span_group_ids: List[List[Optional[int]]],
context_id: List[int]
):
assert len(input_ids) == len(attention_mask) == len(span_group_ids) == len(context_id), \
f"Inputs to batch arent same length"
self.batch_size = len(input_ids)
assert [len(example) for example in input_ids] == \
[len(example) for example in attention_mask] == \
[len(example) for example in span_group_ids], f"Examples in batch arent same length"
self.input_ids = input_ids
self.attention_mask = attention_mask
self.span_group_ids = span_group_ids
self.context_id = context_id
class SpanGroupClassificationPrediction:
def __init__(self, context_id: int, span_group_id: int, label: str, score: float):
self.context_id = context_id
self.span_group_id = span_group_id
self.label = label
self.score = score
class SpanGroupClassificationPredictor(BaseHFPredictor):
"""
This is a generic wrapper around Huggingface Token Classification models.
First, we need a `span_group_name` which defines the Document field that we will treat as the
target unit of prediction. For example, if `span_group_name` is 'tokens', then we expect
to classify every Document.token. But technically, `span_group_name` could be anything,
such as `words` or `rows` or any SpanGroup.
Second, we need a `context_name` which defines the Document field that we will treat as the
intuitive notion of an "example" that we want to run our model over. For example, if
`context_name` is 'pages', then we'll loop over each page, running our classifier
over all the 'tokens' in each page. If the `context_name` is `bib_entries`, then we'll
loop over each bib entry, running our classifier over the 'tokens' in each page.
The key consequence of defining a `context_name` is, when the model constructs batches
of sequences that fit within the Huggingface transformer's window, it will *not*
mix sequences from different contexts into the same batch.
@kylel
"""
REQUIRED_BACKENDS = ["transformers", "torch", "smashed"]
REQUIRED_DOCUMENT_FIELDS = []
_SPAN_GROUP = 'inputs'
_CONTEXT_ID = 'context_id'
_HF_RESERVED_INPUT_IDS = 'input_ids'
_HF_RESERVED_ATTN_MASK = 'attention_mask'
_HF_RESERVED_WORD_IDS = 'word_ids'
_HF_RESERVED_WORD_PAD_VALUE = -1
def __init__(
self,
model: Any,
config: Any,
tokenizer: Any,
span_group_name: str,
context_name: str,
batch_size: Optional[int] = 2,
device: Optional[str] = 'cpu'
):
super().__init__(model=model, config=config, tokenizer=tokenizer)
self.span_group_name = span_group_name
self.context_name = context_name
self.batch_size = batch_size
self.device = device
# handles tokenization, sliding window, truncation, subword to input word mapping, etc.
self.tokenizer_mapper = TokenizerMapper(
input_field=self._SPAN_GROUP,
tokenizer=tokenizer,
is_split_into_words=True,
add_special_tokens=True,
truncation=True,
max_length=model.config.max_position_embeddings,
return_overflowing_tokens=True,
return_word_ids=True
)
# since input data is automatically chunked into segments (e.g. 512 length),
# each example <dict> actually becomes many input sequences.
# this mapper unpacks all of this into one <dict> per input sequence.
# we set `repeat` because we want other fields (`context_id`) to repeat across sequnces
self.unpacking_mapper = UnpackingMapper(
fields_to_unpack=[
self._HF_RESERVED_INPUT_IDS,
self._HF_RESERVED_ATTN_MASK,
self._HF_RESERVED_WORD_IDS
],
ignored_behavior='repeat'
)
# at the end of this, each <dict> contains <lists> of length `batch_size`
# where each element is variable length within the `max_length` limit.
# `keep_last` controls whether we want partial batches, which we always do
# for token classification (i.e. we dont want to miss anything!)
self.batch_size_mapper = FixedBatchSizeMapper(
batch_size=batch_size,
keep_last=True
)
# this performs padding so all sequences in a batch are of same length
self.list_collator_mapper = FromTokenizerListCollatorMapper(
tokenizer=tokenizer,
pad_to_length=None, # keeping this `None` is best because dynamic padding
fields_pad_ids={
self._HF_RESERVED_WORD_IDS: self._HF_RESERVED_WORD_PAD_VALUE
}
)
# this casts python Dict[List] into tensors. if using GPU, would do `device='gpu'`
self.python_to_torch_mapper = Python2TorchMapper(
device=device
)
# combining everything
self.preprocess_mapper = self.tokenizer_mapper >> \
self.unpacking_mapper >> \
self.batch_size_mapper
@classmethod
def from_pretrained(
cls,
model_name_or_path: str,
span_group_name: str,
context_name: str,
batch_size: Optional[int] = 2,
device: Optional[str] = 'cpu',
*args,
**kwargs
):
"""If `model_name_or_path` is a path, should be a directory
containing `vocab.txt`, `config.json`, and `pytorch_model.bin`
NOTE: slightly annoying, but if loading in this way, the `_name_or_path`
in `model.config` != `config`.
"""
tokenizer = transformers.AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_name_or_path, *args, **kwargs
)
config = transformers.AutoConfig.from_pretrained(
pretrained_model_name_or_path=model_name_or_path, *args, **kwargs
)
model = transformers.AutoModelForTokenClassification.from_pretrained(
pretrained_model_name_or_path=model_name_or_path, *args, **kwargs
)
predictor = cls(model=model, config=config, tokenizer=tokenizer,
span_group_name=span_group_name, context_name=context_name,
batch_size=batch_size, device=device)
return predictor
def preprocess(
self,
document: Document,
context_name: str
) -> List[SpanGroupClassificationBatch]:
"""Processes document into whatever makes sense for the Huggingface model"""
# (1) get it into a dictionary format that Smashed expects
dataset = [
{
self._SPAN_GROUP: [sg.text for sg in getattr(context, self.span_group_name)],
self._CONTEXT_ID: i
}
for i, context in enumerate(getattr(document, context_name))
]
# (2) apply Smashed
batch_dicts = self.preprocess_mapper.map(dataset=dataset)
# (3) convert dicts to objects
return [
# slightly annoying, but the names `input_ids`, `attention_mask` and `word_ids` are
# reserved and produced after tokenization, which is why hard-coded here.
SpanGroupClassificationBatch(
input_ids=batch_dict[self._HF_RESERVED_INPUT_IDS],
attention_mask=batch_dict[self._HF_RESERVED_ATTN_MASK],
span_group_ids=batch_dict[self._HF_RESERVED_WORD_IDS],
context_id=batch_dict[self._CONTEXT_ID]
) for batch_dict in batch_dicts
]
def postprocess(
self,
doc: Document,
context_name: str,
preds: List[SpanGroupClassificationPrediction]
) -> List[Annotation]:
"""This function handles a bunch of nonsense that happens with Huggingface models &
how we processed the data. Namely:
Because Huggingface might drop tokens during the course of tokenization
we need to organize our predictions into a Lookup <dict> and cross-reference
with the original input SpanGroups to make sure they all got classified.
"""
# (1) organize predictions into a Lookup at the (Context, SpanGroup) level.
context_id_to_span_group_id_to_pred = defaultdict(dict)
for pred in preds:
context_id_to_span_group_id_to_pred[pred.context_id][pred.span_group_id] = pred
# (2) iterate through original data to check against that Lookup
annotations: List[Annotation] = []
for i, context in enumerate(getattr(doc, context_name)):
for j, span_group in enumerate(getattr(context, self.span_group_name)):
pred = context_id_to_span_group_id_to_pred[i].get(j, None)
# TODO: double-check whether this deepcopy is needed...
new_metadata = Metadata.from_json(span_group.metadata.to_json())
if pred is not None:
new_metadata.label = pred.label
new_metadata.score = pred.score
else:
new_metadata.label = None
new_metadata.score = None
new_span_group = SpanGroup(
spans=span_group.spans,
box_group=span_group.box_group,
metadata=new_metadata
)
annotations.append(new_span_group)
return annotations
def predict(self, document: Document) -> List[Annotation]:
# (0) Check fields
assert self.span_group_name in document.fields, f"Input doc missing {self.span_group_name}"
assert self.context_name in document.fields, f"Input doc missing {self.context_name}"
# (1) Make batches
batches: List[SpanGroupClassificationBatch] = self.preprocess(
document=document, context_name=self.context_name
)
# (2) Predict each batch.
preds: List[SpanGroupClassificationPrediction] = []
for batch in batches:
for pred in self._predict_batch(batch=batch):
preds.append(pred)
# (3) Postprocess into proper Annotations
annotations = self.postprocess(doc=document, context_name=self.context_name, preds=preds)
return annotations
def _predict_batch(
self,
batch: SpanGroupClassificationBatch
) -> List[SpanGroupClassificationPrediction]:
#
# preprocessing!! (padding & tensorification)
#
pytorch_batch = self.python_to_torch_mapper.transform(
data=self.list_collator_mapper.transform(
data={
self._HF_RESERVED_INPUT_IDS: batch.input_ids,
self._HF_RESERVED_ATTN_MASK: batch.attention_mask
}
)
)
#
# inference!! (preferably on gpu)
#
# TODO: add something here for gpu migration
pytorch_output = self.model(**pytorch_batch)
scores_tensor = torch.softmax(pytorch_output.logits, dim=2)
token_scoresss = [
[
token_scores for token_scores, yn in zip(token_scoress, yns)
if yn == 1
]
for token_scoress, yns in zip(scores_tensor.tolist(), batch.attention_mask)
]
#
# postprocessing (map back to original inputs)!!
#
preds = []
for j, (context_id, word_ids, token_scoress) in enumerate(zip(
batch.context_id,
batch.span_group_ids,
token_scoresss)
):
for word_id, token_scores, is_valid_pred in zip(
word_ids,
token_scoress,
self._token_pooling_strategy_mask(word_ids=word_ids)
):
if word_id is None or is_valid_pred is False:
continue
else:
label_id = np.argmax(token_scores)
pred = SpanGroupClassificationPrediction(
context_id=context_id,
span_group_id=word_id,
label=self.config.id2label[label_id],
score=token_scores[label_id]
)
preds.append(pred)
return preds
def _token_pooling_strategy_mask(
self,
token_ids: Optional[List[int]] = None,
word_ids: Optional[List[int]] = None,
token_scores: Optional[List[Tuple[float, float]]] = None,
strategy: str = 'first'
) -> List[bool]:
"""
words are split into multiple tokens, each of which has a prediction.
there are multiple strategies to decide the model prediction at a word-level:
1) 'first': take only the first token prediction for whole word
2) 'max': take the highest scoring token prediction for whole word
3) ...
"""
if strategy == 'first':
mask = [True]
prev_word_id = word_ids[0]
for current_word_id in word_ids[1:]:
if current_word_id == prev_word_id:
mask.append(False)
else:
mask.append(True)
prev_word_id = current_word_id
else:
raise NotImplementedError(f"mode {strategy} not implemented yet")
# if no word ID (e.g. [cls], [sep]), always mask
mask = [
is_word if word_id is not None else False
for is_word, word_id in zip(mask, word_ids)
]
return mask
| 14,554 | 39.543175 | 99 | py |
mmda | mmda-main/src/mmda/predictors/hf_predictors/base_hf_predictor.py | from abc import abstractmethod
from typing import Union, List, Dict, Any
from transformers import AutoTokenizer, AutoConfig, AutoModel
from mmda.types.document import Document
from mmda.predictors.base_predictors.base_predictor import BasePredictor
class BaseHFPredictor(BasePredictor):
REQUIRED_BACKENDS = ["transformers", "torch"]
def __init__(self, model: Any, config: Any, tokenizer: Any):
self.model = model
self.config = config
self.tokenizer = tokenizer
@classmethod
def from_pretrained(cls, model_name_or_path: str, *args, **kwargs):
config = AutoConfig.from_pretrained(model_name_or_path)
model = AutoModel.from_pretrained(
model_name_or_path, config=config, *args, **kwargs
)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
return cls(model, config, tokenizer)
@abstractmethod
def preprocess(self, document: Document) -> Dict:
"""Convert the input document into the format that is required
by the model.
"""
@abstractmethod
def postprocess(self, model_outputs: Any) -> Dict:
"""Convert the model outputs into the Annotation format""" | 1,206 | 32.527778 | 72 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.