code stringlengths 17 6.64M |
|---|
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
|
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
|
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
|
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
|
class Block(nn.Module):
'Grouped convolution block.'
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = (cardinality * bottleneck_width)
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, (self.expansion * group_width), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((self.expansion * group_width))
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * group_width))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * group_width), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * group_width)))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
|
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
self.linear = nn.Linear(((cardinality * bottleneck_width) * 8), num_classes)
def _make_layer(self, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = ((Block.expansion * self.cardinality) * self.bottleneck_width)
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3, 3, 3], cardinality=2, bottleneck_width=64)
|
def ResNeXt29_4x64d():
return ResNeXt(num_blocks=[3, 3, 3], cardinality=4, bottleneck_width=64)
|
def ResNeXt29_8x64d():
return ResNeXt(num_blocks=[3, 3, 3], cardinality=8, bottleneck_width=64)
|
def ResNeXt29_32x4d():
return ResNeXt(num_blocks=[3, 3, 3], cardinality=32, bottleneck_width=4)
|
def test_resnext():
net = ResNeXt29_2x64d()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size())
|
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
out = (out * w)
out += self.shortcut(x)
out = F.relu(out)
return out
|
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False))
self.fc1 = nn.Conv2d(planes, (planes // 16), kernel_size=1)
self.fc2 = nn.Conv2d((planes // 16), planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
out = (out * w)
out += shortcut
return out
|
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def SENet18():
return SENet(PreActBlock, [2, 2, 2, 2])
|
def test():
net = SENet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
|
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'
(N, C, H, W) = x.size()
g = self.groups
return x.view(N, g, (C / g), H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)
|
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = (out_planes / 4)
g = (1 if (in_planes == 24) else groups)
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if (stride == 2):
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = (F.relu(torch.cat([out, res], 1)) if (self.stride == 2) else F.relu((out + res)))
return out
|
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = (2 if (i == 0) else 1)
cat_planes = (self.in_planes if (i == 0) else 0)
layers.append(Bottleneck(self.in_planes, (out_planes - cat_planes), stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out
|
def ShuffleNetG2():
cfg = {'out_planes': [200, 400, 800], 'num_blocks': [4, 8, 4], 'groups': 2}
return ShuffleNet(cfg)
|
def ShuffleNetG3():
cfg = {'out_planes': [240, 480, 960], 'num_blocks': [4, 8, 4], 'groups': 3}
return ShuffleNet(cfg)
|
def test():
net = ShuffleNetG2()
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y)
|
def VGG19():
return VGG('VGG19')
|
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
|
def test():
net = VGG('VGG11')
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.size())
|
def fmad(ys, xs, dxs):
v = [t.zeros_like(y, requires_grad=True) for y in ys]
g = grad(ys, xs, grad_outputs=v, create_graph=True)
return grad(g, v, grad_outputs=dxs)
|
def chunks(lst, n):
'Yield successive n-sized chunks from lst.'
for i in range(0, len(lst), n):
(yield lst[i:(i + n)])
|
class LogicDataset(Dataset):
def __init__(self, examples, args=None, simple_tokenizer_vocab=None):
self.simple_tokenizer_vocab = simple_tokenizer_vocab
if args.keep_only_negative:
self.examples = [i for i in examples if (i['label'] == 0)]
self.examples = examples
for (index, example) in enumerate(self.examples):
self.examples[index] = self.convert_raw_example(example)
random.shuffle(self.examples)
if (args.limit_example_num != (- 1)):
self.examples = self.examples[:args.limit_example_num]
self.tokenizer = AutoTokenizer.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_name_or_path), do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None))
self.max_length = args.max_length
self.args = args
if args.skip_long_examples:
self.skip_long_examples()
def __len__(self):
return len(self.examples)
def report_length(self):
all_leng = []
print('\n\n')
total = 200
for example in self.examples:
leng = ((' '.join((example['rules'] + example['facts'])).lower() + ' ') + example['query'].lower())
leng = len(self.tokenizer.tokenize(leng))
all_leng.append(leng)
if (len(all_leng) == total):
break
print('Average_length', (sum(all_leng) / total))
print('Max', max(all_leng))
print('\n\n')
def report_allkinds_of_stats(self):
print('\n\n')
all = []
for example in self.examples:
all.append((len(example['facts']) / len(example['preds'])))
print('Number of fact percentage', (sum(all) / len(all)))
all = []
for example in self.examples:
all.append((len(example['rules']) / len(example['preds'])))
print('Number of rules percentage', (sum(all) / len(all)))
def convert_raw_example(self, example):
new_example = {}
new_example['rules'] = []
for rule in example['rules']:
one_rule = ''
one_rule += ' and '.join(rule[0])
one_rule += ', '
one_rule += rule[(- 1)]
one_rule += ' .'
new_example['rules'].append(one_rule)
new_example['facts'] = []
for fact in example['facts']:
one_fact = 'Alice '
one_fact += fact
one_fact += ''
new_example['facts'].append(one_fact)
new_example['query'] = (('Query: Alice is ' + example['query']) + ' ?')
new_example['label'] = example['label']
new_example['depth'] = example['depth']
new_example['preds'] = example['preds']
return new_example
def __getitem__(self, index):
example = self.examples[index]
'\n "rules": [\n "If Person X is serious, Person Y drop Person X and Person X help Person Y, then Person X get Person Y.",\n "If Person X open Person Y and Person Y help Person X, then Person Y ride Person X."\n ],\n "facts": [\n "Alice is serious.",\n "Alice help Bob.",\n "Bob open Alice."\n ],\n "query": "Alice ride Bob",\n "label": 1\n '
if self.args.ignore_fact:
text_a = ' '.join(example['rules']).lower()
elif self.args.ignore_both:
text_a = ' '
else:
text_a = ' '.join((example['rules'] + example['facts'])).lower()
if self.args.ignore_query:
text_b = ' '
else:
text_b = example['query'].lower()
if self.args.shorten_input:
text_a.strip('If')
text_a.strip('then')
text_b.strip('If')
text_b.strip('then')
return (text_a, text_b, example['label'], example)
def collate_fn(self, examples):
batch_encoding = self.tokenizer([(example[0], example[1]) for example in examples], max_length=self.max_length, padding='longest', truncation=True)
if ('t5' in self.args.model_name_or_path):
labels_as_text = [('true' if (example[2] == 1) else 'false') for example in examples]
target_encoding = self.tokenizer(labels_as_text, padding='longest', max_length=self.max_length, truncation=True)
label_ids = torch.tensor(target_encoding.input_ids)
label_ids[(label_ids == self.tokenizer.pad_token_id)] = (- 100)
else:
label_ids = torch.LongTensor([example[2] for example in examples])
return (torch.LongTensor(batch_encoding['input_ids']), torch.LongTensor(batch_encoding['attention_mask']), (torch.LongTensor(batch_encoding['token_type_ids']) if ('token_type_ids' in batch_encoding) else torch.LongTensor([1])), label_ids, [example[(- 1)] for example in examples])
def skip_long_examples(self):
keep = []
counter = 0
for i in tqdm(range(len(self))):
example = self[i]
batch_encoding = self.tokenizer([(example[0], example[1])], max_length=self.max_length, padding='longest', truncation=False)
if (len(batch_encoding['input_ids'][0]) > 650):
print('Over limit')
counter += 1
else:
keep.append(i)
print('Skipped ', counter, 'out of', len(self))
self.examples = [self.examples[i] for i in keep]
def limit_length(self, new_length):
print('Limiting {} to {}'.format(len(self), new_length))
self.examples = self.examples[:new_length]
@staticmethod
def split_dataset(file_name):
all_examples = json.load(open(file_name))
random.seed(0)
random.shuffle(all_examples)
train_examples = all_examples[:((len(all_examples) // 10) * 8)]
dev_examples = all_examples[((len(all_examples) // 10) * 8):((len(all_examples) // 10) * 9)]
test_examples = all_examples[((len(all_examples) // 10) * 9):]
with open((file_name + '_train'), 'w') as f:
json.dump(train_examples, f)
with open((file_name + '_val'), 'w') as f:
json.dump(dev_examples, f)
with open((file_name + '_test'), 'w') as f:
json.dump(test_examples, f)
return
@classmethod
def initialze_from_file(cls, file, args):
if (',' in file):
files = file.split(',')
else:
files = [file]
all_examples = []
for file in files:
with open(file) as f:
examples = json.load(f)
all_examples.extend(examples)
return cls(all_examples, args)
@classmethod
def initialize_from_file_by_depth(cls, file, args):
examples_by_depth = cls.load_examples_by_depth(file, depth=args.group_by_which_depth)
datasets_by_depth = {}
for (depth, _data) in examples_by_depth.items():
datasets_by_depth[depth] = cls(_data, args)
return datasets_by_depth
@staticmethod
def load_examples_by_depth(file, depth='depth'):
with open(file) as f:
examples = json.load(f)
examples_by_depth = defaultdict(list)
for example in examples:
examples_by_depth[example[depth]].append(example)
return examples_by_depth
|
def limit_examples(examples_by_depth, max_depth_during_train, control_num=2000):
for key in list(examples_by_depth.keys()):
if (key > max_depth_during_train):
del examples_by_depth[key]
limit_length = len(examples_by_depth[max_depth_during_train])
print('Original lenght', limit_length)
assert (limit_length >= control_num)
limit_length = control_num
print('Limiting to {}'.format(limit_length))
all_examples = []
for key in examples_by_depth:
print('Length ', key)
random.shuffle(examples_by_depth[key])
examples_by_depth[key] = examples_by_depth[key][:limit_length]
all_examples.extend(examples_by_depth[key])
return all_examples
|
def merge_and_balance_dataset(file_name, file_range, max_depth_during_train, final_file_name, control_num, depth='depth'):
all_examples = []
for i in range(file_range):
print(i)
with open(file_name.replace('INDEX', str(i))) as f:
examples = json.load(f)
examples_by_depth = defaultdict(list)
for example in examples:
examples_by_depth[example[depth]].append(example)
all_examples.extend(limit_examples(examples_by_depth, max_depth_during_train, control_num=control_num))
with open(final_file_name, 'w') as f:
json.dump(all_examples, f)
|
@functools.lru_cache()
def _get_global_gloo_group():
'\n Return a process group based on gloo backend, containing all the ranks\n The result is cached.\n '
if (dist.get_backend() == 'nccl'):
return dist.new_group(backend='gloo')
return dist.group.WORLD
|
def all_gather(data):
'\n Run all_gather on arbitrary picklable data (not necessarily tensors)\n Args:\n data: any picklable object\n Returns:\n list[data]: list of data gathered from each rank\n '
world_size = get_world_size()
if (world_size == 1):
return [data]
cpu_group = None
if (os.getenv('MDETR_CPU_REDUCE') == '1'):
cpu_group = _get_global_gloo_group()
buffer = io.BytesIO()
torch.save(data, buffer)
data_view = buffer.getbuffer()
device = ('cuda' if (cpu_group is None) else 'cpu')
tensor = torch.ByteTensor(data_view).to(device)
local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long)
size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)]
if (cpu_group is None):
dist.all_gather(size_list, local_size)
else:
print('gathering on cpu')
dist.all_gather(size_list, local_size, group=cpu_group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
assert isinstance(local_size.item(), int)
local_size = int(local_size.item())
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
if (local_size != max_size):
padding = torch.empty(size=((max_size - local_size),), dtype=torch.uint8, device=device)
tensor = torch.cat((tensor, padding), dim=0)
if (cpu_group is None):
dist.all_gather(tensor_list, tensor)
else:
dist.all_gather(tensor_list, tensor, group=cpu_group)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
tensor = torch.split(tensor, [size, (max_size - size)], dim=0)[0]
buffer = io.BytesIO(tensor.cpu().numpy())
obj = torch.load(buffer)
data_list.append(obj)
return data_list
|
def reduce_dict(input_dict, average=True):
'\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that all processes\n have the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n '
world_size = get_world_size()
if (world_size < 2):
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
def is_dist_avail_and_initialized():
'\n Returns:\n True if distributed training is enabled\n '
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
'\n Returns:\n The number of processes in the process group\n '
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
'\n Returns:\n The rank of the current process within the global process group.\n '
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def get_local_rank() -> int:
'\n Returns:\n The rank of the current process within the local (per-machine) process group.\n '
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
assert (_LOCAL_PROCESS_GROUP is not None)
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
|
def get_local_size() -> int:
'\n Returns:\n The size of the per-machine process group,\n i.e. the number of processes per machine.\n '
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
|
def is_main_process():
'Return true if the current process is the main one'
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
'Utility function to save only from the main process'
if is_main_process():
torch.save(*args, **kwargs)
|
def init_distributed_mode(args):
'Initialize distributed training, if appropriate'
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, timeout=datetime.timedelta(0, 7200))
dist.barrier()
setup_for_distributed((args.debug or (args.rank == 0)))
|
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (args.n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
|
def train(args, train_dataset, model, tokenizer, eval_dataset=None):
' Train the model '
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, collate_fn=train_dataset.collate_fn, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=args.num_workers)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if (args.resume_dir is not None):
print('Resume training from: ', args.resume_dir)
if (not args.resume_dir.endswith('--1')):
args.model_name_or_path = args.resume_dir
print('Load Model Weight')
model.load_state_dict(torch.load((args.resume_dir + '/pytorch_model.bin'), map_location='cpu'))
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
num_warmup_steps = int((t_total * args.warmup_steps))
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
if (os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))):
print('Loading optimizer and scheduler from checkpoints', args.model_name_or_path)
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt'), map_location='cpu'))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt'), map_location='cpu'))
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
print('***** Running training *****')
print(' Num examples = %d', len(train_dataset))
print(' Num Epochs = %d', args.num_train_epochs)
print(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
print(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
print(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
print(' Total optimization steps = %d', t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
epochs_trained = int(args.model_name_or_path.split('-')[(- 1)].split('/')[0])
epochs_trained += 1
global_step = ((epochs_trained * len(train_dataloader)) * args.gradient_accumulation_steps)
print(' Continuing training from checkpoint, will skip to saved global_step')
print(' Continuing training starting from epoch %d', epochs_trained)
print(' Continuing training from global step %d', global_step)
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
train_meter = TrainingMeter()
epoch_num = epochs_trained
for _ in train_iterator:
epoch_iterator = train_dataloader
for (step, batch) in enumerate(tqdm(epoch_iterator)):
(batch, examples) = (batch[:(- 1)], batch[(- 1)])
if args.skip_training:
break
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if ((args.model_type != 'distilbert') and (args.model_type != 't5')):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
with torch.cuda.amp.autocast(enabled=args.use_autocast):
outputs = model(**inputs)
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
logits = outputs[1]
labels = batch[3]
acc = ((logits.argmax((- 1)) == labels).sum().float() / labels.view((- 1)).size(0))
train_meter.update({'loss': loss.item(), 'acc': acc.item()})
tr_loss += loss.item()
if ((((step + 1) % args.gradient_accumulation_steps) == 0) or ((len(epoch_iterator) <= args.gradient_accumulation_steps) and ((step + 1) == len(epoch_iterator)))):
if ((not args.fp16) and (not args.use_autocast)):
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
logs = {}
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
loss_scalar = ((tr_loss - logging_loss) / args.logging_steps)
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
print(json.dumps({**logs, **{'step': global_step}, **{'step_per_epoch': (len(train_dataloader) // args.gradient_accumulation_steps)}}))
train_meter.report()
train_meter.clean()
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
print('Saving model checkpoint to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank <= 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(epoch_num))
print('Saving model checkpoint to ', output_dir)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
evaluate(args, model, tokenizer, eval_dataset=eval_dataset)
epoch_num += 1
return (global_step, (tr_loss / global_step))
|
def evaluate(args, model, tokenizer, prefix='', eval_dataset=None):
results = {}
args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu))
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, collate_fn=eval_dataset.collate_fn, sampler=eval_sampler, batch_size=args.eval_batch_size)
if ((args.n_gpu > 1) and (not isinstance(model, torch.nn.DataParallel))):
model = torch.nn.DataParallel(model)
print('***** Running evaluation {} *****'.format(prefix))
print(' Num examples = %d', len(eval_dataset))
print(' Batch size = %d', args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
output_strings = []
label_strings = []
results_by_reasoning_depth = defaultdict(int)
counter_by_reasoning_depth = defaultdict(int)
def nested_defaultdict():
return defaultdict(int)
label_distribution_by_reasoning_depth = defaultdict(nested_defaultdict)
correct_or_not_all = defaultdict(list)
correct_counter = 0
total_counter = 0
for (_, batch) in enumerate(tqdm(eval_dataloader, desc='Evaluating')):
model.eval()
(batch, examples) = (batch[:(- 1)], batch[(- 1)])
batch = tuple((t.to(args.device) for t in batch))
with torch.no_grad():
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if ((args.model_type != 'distilbert') and (args.model_type != 't5')):
inputs['token_type_ids'] = (batch[2] if (args.model_type in ['bert', 'xlnet', 'albert']) else None)
if (args.model_type == 't5'):
if hasattr(model, 'module'):
_module = model.module
else:
_module = model
output_sequences = _module.generate(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=False)
outputs = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)
output_strings.extend(outputs)
label_strings.extend(tokenizer.batch_decode(inputs['labels'], skip_special_tokens=True))
nb_eval_steps += 1
else:
outputs = model(**inputs)
(tmp_eval_loss, logits) = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if (preds is None):
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
if (args.model_type == 't5'):
correct_or_not = [(output_strings[i] == label_strings[i]) for i in range(len(output_strings))]
correct_counter += sum(correct_or_not)
total_counter += len(correct_or_not)
if args.report_example_length:
correct_or_not = (logits.argmax((- 1)) == inputs['labels'].detach()).cpu().tolist()
for index in range(len(examples)):
results_by_reasoning_depth[examples[index]['depth']] += correct_or_not[index]
counter_by_reasoning_depth[examples[index]['depth']] += 1
label_distribution_by_reasoning_depth[examples[index]['depth']][examples[index]['label']] += 1
for index in range(len(examples)):
correct_or_not_all[examples[index]['example_index']].append(correct_or_not[index])
if args.report_example_length:
print()
keys = list(results_by_reasoning_depth.keys())
keys.sort()
for key in keys:
if (args.local_rank <= 0):
print(' Depth {}: {}'.format(key, (results_by_reasoning_depth[key] / counter_by_reasoning_depth[key])))
print(' Label_distribution {} : {}'.format(key, label_distribution_by_reasoning_depth[key]))
if ('t5' in args.model_name_or_path):
result = {'acc': (correct_counter / total_counter)}
results.update(result)
else:
eval_loss = (eval_loss / nb_eval_steps)
preds = np.argmax(preds, axis=1)
result = {'acc': (out_label_ids == preds).mean()}
results.update(result)
return results
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_type', default=None, type=str, required=True)
parser.add_argument('--model_name_or_path', default=None, type=str, required=True)
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_visualization', action='store_true')
parser.add_argument('--evaluate_during_training', action='store_true', help='Run evaluation during training at each logging step.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--per_gpu_train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-06, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--max_steps', default=(- 1), type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--warmup_steps', default=0, type=float, help='Linear warmup over warmup_steps.')
parser.add_argument('--logging_steps', type=int, default=500, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=500, help='Save checkpoint every X updates steps.')
parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--from_scratch', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--nopooler', action='store_true', help='Do not load the pooler')
parser.add_argument('--seed', type=int, default=9595, help='random seed for initialization')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit')
parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--custom_weight', type=str, default=None)
parser.add_argument('--custom_config', type=str, default=None)
parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank')
parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='For distant debugging.')
parser.add_argument('--max_length', type=int, default=128)
parser.add_argument('--file_root', type=str, default=None)
parser.add_argument('--file_path', type=str)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--start_gradual_index', type=int, default=1)
parser.add_argument('--load_bert_weight', type=str, default=None)
parser.add_argument('--use_gradual_sampler', action='store_true')
parser.add_argument('--limit_to_negative_examples', action='store_true')
parser.add_argument('--limit_to_positive_examples', action='store_true')
parser.add_argument('--skip_training', action='store_true')
parser.add_argument('--further_split', action='store_true')
parser.add_argument('--further_further_split', action='store_true')
parser.add_argument('--report_example_length', action='store_true')
parser.add_argument('--ignore_fact', action='store_true')
parser.add_argument('--ignore_both', action='store_true')
parser.add_argument('--ignore_query', action='store_true')
parser.add_argument('--change_positional_embedding_after_loading', action='store_true')
parser.add_argument('--change_positional_embedding_before_loading', action='store_true')
parser.add_argument('--shorten_input', action='store_true')
parser.add_argument('--shrink_ratio', default=1, type=int)
parser.add_argument('--use_autocast', action='store_true')
parser.add_argument('--max_depth_during_train', default=1000, type=int)
parser.add_argument('--train_file_path', default=None)
parser.add_argument('--val_file_path', default=None)
parser.add_argument('--group_by_which_depth', default='depth')
parser.add_argument('--keep_only_negative', action='store_true')
parser.add_argument('--limit_report_depth', default=(- 1), type=int)
parser.add_argument('--limit_report_max_depth', default=100, type=int)
parser.add_argument('--skip_long_examples', action='store_true')
parser.add_argument('--limit_example_num', default=(- 1), type=int)
parser.add_argument('--resume_dir', default=None)
args = parser.parse_args()
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir)):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = (0 if args.no_cuda else torch.cuda.device_count())
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
setup_for_distributed((args.local_rank <= 0))
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool((args.local_rank != (- 1))), args.fp16)
set_seed(args)
num_labels = 2
if (args.local_rank not in [(- 1), 0]):
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained((args.config_name if args.config_name else args.model_name_or_path), num_labels=num_labels, cache_dir=(args.cache_dir if args.cache_dir else None))
tokenizer = AutoTokenizer.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_name_or_path), do_lower_case=args.do_lower_case, cache_dir=(args.cache_dir if args.cache_dir else None))
if ('bert' in args.model_name_or_path):
model = BertForSequenceClassification.from_pretrained(args.model_name_or_path, config=config, cache_dir=(args.cache_dir if args.cache_dir else None))
if ('t5' in args.model_name_or_path):
from transformers import T5Tokenizer, T5ForConditionalGeneration
model = T5ForConditionalGeneration.from_pretrained(args.model_name_or_path)
else:
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, config=config, cache_dir=(args.cache_dir if args.cache_dir else None))
if args.change_positional_embedding_before_loading:
expand_position_embeddings(model, args.max_length, args.model_name_or_path)
if (args.custom_weight is not None):
model.apply(model._init_weights)
custom_state_dict = torch.load(args.custom_weight, map_location='cpu')
for key in list(custom_state_dict.keys()):
custom_state_dict[key.replace('module.', '')] = custom_state_dict[key]
load_state_dict_flexible(model, custom_state_dict)
print('\n\nLoaded {}'.format(args.custom_weight))
if (args.load_bert_weight is not None):
original_bert_weight = torch.load(args.load_bert_weight, map_location='cpu')
old_keys = []
new_keys = []
for key in original_bert_weight.keys():
new_key = None
if ('gamma' in key):
new_key = key.replace('gamma', 'weight')
if ('beta' in key):
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for (old_key, new_key) in zip(old_keys, new_keys):
original_bert_weight[new_key] = original_bert_weight.pop(old_key)
load_state_dict_flexible(model, original_bert_weight)
if args.change_positional_embedding_after_loading:
expand_position_embeddings(model, args.max_length, args.model_name_or_path)
if args.nopooler:
model.bert.pooler.apply(model._init_weights)
if args.from_scratch:
print('\n\nReinitializing parameters\n\n')
model.bert.apply(model._init_weights)
if (args.local_rank == 0):
torch.distributed.barrier()
model.to(args.device)
print('Training/evaluation parameters %s', args)
if args.do_train:
train_dataset = LogicDataset.initialze_from_file(args.train_file_path, args)
train_dataset.report_length()
val_dataset = LogicDataset.initialze_from_file(args.val_file_path, args)
(global_step, tr_loss) = train(args, train_dataset, model, tokenizer, val_dataset)
print(' global_step = %s, average loss = %s', global_step, tr_loss)
if (args.do_train and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))):
if ((not os.path.exists(args.output_dir)) and (args.local_rank in [(- 1), 0])):
os.makedirs(args.output_dir)
print('Saving model checkpoint to %s', args.output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
tokenizer = AutoTokenizer.from_pretrained(args.output_dir)
print('Enterring evaluation')
if (args.do_eval and (args.local_rank in [(- 1), 0])):
model.eval()
if (',' in args.val_file_path):
val_files = args.val_file_path.split(',')
else:
val_files = [args.val_file_path]
all_results = {}
all_kinds_of_results = []
results_string_final = ''
for val_file in val_files:
results_string = {}
results = []
print('\n\n', val_file)
val_dataset = LogicDataset.initialze_from_file(val_file, args)
val_dataset.report_allkinds_of_stats()
datasets = LogicDataset.initialize_from_file_by_depth(val_file, args)
depths = list(datasets.keys())
depths.sort()
total_example = sum([len(datasets[i]) for i in datasets])
for depth in depths:
print('\n\n')
print('Evaluating examples of depth ', depth)
result = evaluate(args, model, tokenizer, eval_dataset=datasets[depth])
results_string[depth] = 'Acc: {} ; Percentage {}'.format(result['acc'], (len(datasets[depth]) / total_example))
all_kinds_of_results.append(result['acc'])
if ((depth >= args.limit_report_depth) and (depth <= args.limit_report_max_depth)):
results.append(result['acc'])
pprint.pprint(results_string)
results_string_final += (val_file + '\n\n')
results_string_final += pprint.pformat(results_string)
results_string_final += '\n\n\n'
all_kinds_of_results.insert(0, (sum(all_kinds_of_results) / len(all_kinds_of_results)))
all_results[val_file] = '{:.3f}'.format(((sum(results) / len(results)) * 100))
all_kinds_of_results.insert(1, (sum(results) / len(results)))
print('Final Reporting')
for key in sorted(list(all_results.keys())):
print(key)
print()
for key in sorted(list(all_results.keys())):
print(all_results[key])
pprint.pprint(all_results)
with open('eval_result.txt', 'a+') as f:
f.write(args.custom_weight)
f.write('\n\n')
f.write(results_string_final)
f.write('\n\n\n\n\n')
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
class TrainingMeter():
def __init__(self):
self.counter_dict = defaultdict(float)
self.true_dict = defaultdict(float)
def update(self, loss_dict):
for (key, item) in loss_dict.items():
self.counter_dict[key] += 1
self.true_dict[key] += item
def report(self):
keys = list(self.counter_dict.keys())
keys.sort()
for key in keys:
print(' {} : {:.7}'.format(key, (self.true_dict[key] / self.counter_dict[key])))
def clean(self):
self.counter_dict = defaultdict(float)
self.true_dict = defaultdict(float)
|
def load_state_dict_flexible(model, state_dict):
try:
model.load_state_dict(state_dict)
except:
print('Full loading failed!! Try partial loading!!')
own_state = model.state_dict()
for (name, param) in state_dict.items():
if (name not in own_state):
print(('Skipped: ' + name))
continue
if isinstance(param, torch.nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
print(('Successfully loaded: ' + name))
except:
print(('Part load failed: ' + name))
print('\n\n')
|
def expand_position_embeddings(model, length=None, model_type='bert'):
if ('bert' in model_type):
embedding_model = model.bert.embeddings
original_embedding = embedding_model.position_embeddings.weight.data
new_embedding = nn.Embedding((length - 500), (1024 if ('large' in model_type) else 768))
_init_weights(new_embedding, model.config)
new_embedding = torch.cat((original_embedding, new_embedding.weight.data), dim=0)
embedding_model.position_embeddings.weight = torch.nn.Parameter(new_embedding)
embedding_model.register_buffer('position_ids', torch.arange(3000).expand((1, (- 1))))
|
def _init_weights(module, config):
' Initialize the weights '
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=config.initializer_range)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if (isinstance(module, nn.Linear) and (module.bias is not None)):
module.bias.data.zero_()
|
def init():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--input_file', default='input.json', type=str)
arg_parser.add_argument('--output_file', default='output.json', type=str)
arg_parser.add_argument('--min_rule_num', default=0, type=int)
arg_parser.add_argument('--max_rule_num', default=80, type=int)
args = arg_parser.parse_args()
return args
|
def stats(examples):
label_sum = 0.0
depth_sum = 0.0
backward_depth_sum = 0.0
max_tree_depth_sum = 0.0
tree_depth_sum = 0.0
example_num = len(examples)
if (example_num == 0):
return
for example in examples:
label_sum += example['label']
depth_sum += example['depth']
backward_depth_sum += example['backward_depth']
max_tree_depth_sum += example['max_tree_depth']
tree_depth_sum += example['tree_depth']
print('# of examples:', example_num)
print('percentage of positive example:', (label_sum / example_num))
print('avg depth:', (depth_sum / example_num))
print('avg backward_depth:', (backward_depth_sum / example_num))
print('avg max_tree_depth:', (max_tree_depth_sum / example_num))
print('avg tree_depth:', (tree_depth_sum / example_num))
|
def main():
args = init()
with open(args.input_file, 'r') as fin:
examples = json.load(fin)
random.shuffle(examples)
print('loaded')
balanced_examples = {}
for key in range(0, 121):
balanced_examples[key] = [[], []]
threshold = 1.0
for example in examples:
rule_num = len(example['rules'])
balanced_examples[rule_num][example['label']].append(example)
for key in balanced_examples:
if ((args.min_rule_num <= key) and (key <= args.max_rule_num)):
l0 = len(balanced_examples[key][0])
l1 = len(balanced_examples[key][1])
threshold = min(threshold, ((min(l0, l1) * 2.0) / (l0 + l1)))
balanced_examples_ = []
for key in balanced_examples:
l0 = len(balanced_examples[key][0])
l1 = len(balanced_examples[key][1])
l = math.ceil((((l0 + l1) * threshold) / 2.0))
balanced_examples_.extend(balanced_examples[key][0][:l])
balanced_examples_.extend(balanced_examples[key][1][:l])
if (l0 < l):
balanced_examples_.extend(balanced_examples[key][1][l:((l + l) - l0)])
if (l1 < l):
balanced_examples_.extend(balanced_examples[key][0][l:((l + l) - l1)])
balanced_examples = balanced_examples_
print(f'threshold: {threshold}')
print(f'# examples after balance: {len(balanced_examples)}')
with open(args.output_file, 'w') as fout:
json.dump(balanced_examples, fout, indent=2)
|
def init():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--vocab_file', default='vocab.txt', type=str)
arg_parser.add_argument('--output_file', default='prop_examples.txt', type=str)
arg_parser.add_argument('--example_num', default=1000, type=int)
arg_parser.add_argument('--min_pred_num', default=5, type=int)
arg_parser.add_argument('--max_pred_num', default=30, type=int)
arg_parser.add_argument('--balance_by_depth', action='store_true')
arg_parser.add_argument('--max_depth', default=6, type=int)
arg_parser.add_argument('--algo', default='RP', type=str)
args = arg_parser.parse_args()
return args
|
def read_vocab(vocab_file):
vocab = []
with open(vocab_file, 'r') as fin:
vocab = [line.strip() for line in fin.readlines()]
print('vocabulary size: ', len(vocab))
return vocab
|
def sample_one_rule(preds):
head_num = random.randint(1, 3)
lits = random.sample(preds, min((head_num + 1), len(preds)))
random.shuffle(lits)
return (lits[:(- 1)], lits[(- 1)])
|
def sample_rule_priority(preds):
pred_num = len(preds)
rule_num = random.randint(0, (4 * pred_num))
fact_num = random.randint(0, pred_num)
cache = set()
rules = []
for _ in range(0, rule_num):
rule = None
while True:
rule = sample_one_rule(preds)
rule_hash = ((' '.join(sorted(rule[0])) + ' ') + rule[1])
if (rule_hash not in cache):
cache.add(rule_hash)
break
rules.append(rule)
facts = random.sample(preds, fact_num)
query = random.sample(preds, 1)[0]
return (rules, facts, query)
|
def sample_label_priority(preds):
preds_ = preds[:]
random.shuffle(preds_)
pred_num = len(preds)
graph_depth = random.randint(1, (pred_num // 2))
width = (pred_num // graph_depth)
preds_0 = preds_[:(pred_num % graph_depth)]
preds_ = preds_[(pred_num % graph_depth):]
rules = []
levels = []
prev_level = [[x, random.randint(0, 1)] for x in preds_[:width]]
if (graph_depth > 1):
(prev_level[0][1], prev_level[1][1]) = (0, 1)
else:
(prev_level[0][1], prev_level[1][1], prev_level[2][1], prev_level[3][1]) = (0, 1, 0, 1)
preds_ = preds_[width:]
levels.append(prev_level)
for d in range(0, (graph_depth - 1)):
level = [[x, random.randint(0, 1)] for x in preds_[:width]]
preds_ = preds_[width:]
if (len(preds_0) != 0):
level.append((preds_0[0], random.randint(0, 1)))
preds_0 = preds_0[1:]
(level[0][1], level[1][1]) = (0, 1)
for node in level:
(lit, label) = (node[0], node[1])
head_cand = [x[0] for x in prev_level if (x[1] == label)]
head_num = random.randint(1, min(3, len(head_cand)))
head = random.sample(head_cand, head_num)
rules.append((head, lit))
levels.append(level)
prev_level = level
rule_num = random.randint((0 * pred_num), (3 * pred_num))
nodes = [x for y in levels for x in y]
neg_nodes = [x for x in nodes if (x[1] == 0)]
rule_cnt = 0
while (rule_cnt < rule_num):
tail_node = random.sample(nodes, 1)[0]
tail = tail_node[0]
head_cand = [x for x in nodes if (x[0] != tail)]
while True:
head_num = random.randint(1, min(3, len(head_cand)))
head_nodes = None
head_nodes = random.sample(head_cand, head_num)
if (not (all([(x[1] == 1) for x in head_nodes]) and (tail_node[1] == 0))):
break
head = [x[0] for x in head_nodes]
rules.append((head, tail))
rule_cnt += 1
if all(((x[1] == 1) for x in head_nodes)):
neg_tail = random.sample(neg_nodes, 1)[0][0]
neg_head_cand = [x for x in neg_nodes if (x[0] != neg_tail)]
neg_head_num = random.randint(1, min(3, len(neg_head_cand)))
neg_head_nodes = random.sample(neg_head_cand, neg_head_num)
neg_head = [x[0] for x in neg_head_nodes]
rules.append((neg_head, neg_tail))
rule_cnt += 1
facts = [x[0] for x in levels[0] if (x[1] == 1)]
query = random.sample([x[0] for x in nodes], 1)[0]
return (rules, facts, query)
|
def sample_lp_star(preds):
preds_ = preds[:]
pred_num = len(preds)
graph_depth = random.randint(2, (pred_num // 2))
width = (pred_num // graph_depth)
preds_0 = preds_[:(pred_num % graph_depth)]
preds_ = preds_[(pred_num % graph_depth):]
rules = []
levels = []
prev_level = [[x, random.randint(0, 1)] for x in preds_[:width]]
(prev_level[0][1], prev_level[1][1]) = (0, 1)
preds_ = preds_[width:]
levels.append(prev_level)
for d in range(0, (graph_depth - 1)):
level = [[x, random.randint(0, 1)] for x in preds_[:width]]
if (preds_0 != []):
level.append((preds_0[0], random.randint(0, 1)))
preds_0 = preds_0[1:]
(level[0][1], level[1][1]) = (0, 1)
preds_ = preds_[width:]
for node in level:
(lit, label) = (node[0], node[1])
head_nodes_cand = prev_level
if (label == 1):
head_nodes_cand = [x for x in prev_level if (x[1] == 1)]
head_num = random.randint(1, min(3, len(head_nodes_cand)))
while True:
head_nodes = random.sample(head_nodes_cand, head_num)
if (not (all([x[1] for x in head_nodes]) and (label == 0))):
break
head = [x[0] for x in head_nodes]
rules.append((head, lit))
levels.append(level)
prev_level = level
rule_num = random.randint((0 * pred_num), (3 * pred_num))
nodes = [x for y in levels for x in y]
for _ in range(0, rule_num):
tail_d = random.randint(0, (len(levels) - 2))
tail_level = levels[tail_d]
tail_node = random.sample([x for x in tail_level if (x[1] == 1)], 1)[0]
tail = tail_node[0]
head_cand = [x for y in levels[tail_d:] for x in y if (x[0] != tail)]
head_num = random.randint(1, min(3, len(head_cand)))
while True:
head_nodes = random.sample(head_cand, head_num)
if (not all([x[1] for x in head_nodes])):
break
head_nodes = random.sample(head_cand, head_num)
head = [x[0] for x in head_nodes]
rules.append((head, tail))
facts = [x[0] for x in levels[0] if (x[1] == 1)]
query = random.sample([x[0] for x in nodes], 1)[0]
return (rules, facts, query)
|
def forward_chain(rules, facts):
res = {}
for fact in facts:
res[fact] = 0
depth = 1
prev_len = 0
while (len(res) > prev_len):
new_facts = []
for rule in rules:
(head, tail) = rule
if all([(lit in res) for lit in head]):
new_facts.append(tail)
prev_len = len(res)
for fact in new_facts:
if (fact not in res):
res[fact] = depth
depth += 1
return res
|
def backward_chain_(u, depth, rules, facts, max_depth, ances):
INF = 100000000
if (u in facts):
return INF
if ((u in ances) or (depth == max_depth)):
return depth
res = depth
for rule in [x for x in rules if (x[1] == u)]:
(head, _) = rule
tmp = INF
for lit in head:
ances.add(u)
tmp = min(tmp, backward_chain_(lit, (depth + 1), rules, facts, max_depth, ances))
ances.remove(u)
res = max(res, tmp)
return res
|
def backward_chain(query, rules, facts, max_depth):
return backward_chain_(query, 0, rules, facts, max_depth, set())
|
def process_example(example, max_depth):
[random.shuffle(rule[0]) for rule in example['rules']]
random.shuffle(example['rules'])
random.shuffle(example['facts'])
res = forward_chain(example['rules'], example['facts'])
example['label'] = (1 if (example['query'] in res) else 0)
if (example['label'] == 0):
depth = backward_chain(example['query'], example['rules'], example['facts'], (max_depth + 1))
else:
depth = res[example['query']]
example['depth'] = depth
|
def sample_one_example(vocab, min_pred_num, max_pred_num, max_depth, algo):
pred_num = random.randint(min_pred_num, max_pred_num)
preds = random.sample(vocab, pred_num)
if (algo == 'RP'):
(rules, facts, query) = sample_rule_priority(preds)
if (algo == 'LP'):
(rules, facts, query) = sample_label_priority(preds)
if (algo == 'LP_STAR'):
(rules, facts, query) = sample_lp_star(preds)
if (query is None):
return None
example = {'preds': preds, 'rules': rules, 'facts': facts, 'query': query}
process_example(example, max_depth)
return example
|
def sample_examples(example_num, vocab, min_pred_num, max_pred_num, max_depth, algo):
examples = []
for _ in tqdm(range(0, example_num)):
example = None
while (example is None):
example = sample_one_example(vocab, min_pred_num, max_pred_num, max_depth, algo)
examples.append(example)
return examples
|
def stats(examples):
label_sum = 0.0
depth_sum = 0.0
example_num = len(examples)
if (example_num == 0):
return
for example in examples:
label_sum += example['label']
depth_sum += example['depth']
print('# of examples:', example_num)
print('percentage of positive example:', (label_sum / example_num))
print('avg depth:', (depth_sum / example_num))
|
def write_examples(examples, output_file):
random.shuffle(examples)
with open(output_file, 'w') as fout:
json.dump(examples, fout)
|
def main():
args = init()
vocab = read_vocab(args.vocab_file)
if args.balance_by_depth:
examples = {}
example_num = args.example_num
keys = [x for x in range(0, (args.max_depth + 1))]
for k in keys:
examples[k] = []
while True:
examples_ = sample_examples(1000, vocab, args.min_pred_num, args.max_pred_num, args.max_depth, args.algo)
for example in examples_:
if (example['depth'] > args.max_depth):
continue
key = example['depth']
if (len(examples[key]) < args.example_num):
examples[key].append(example)
if all([(len(examples[k]) == args.example_num) for k in keys]):
break
examples = [x for k in keys for x in examples[k]]
else:
examples = sample_examples(args.example_num, vocab, args.min_pred_num, args.max_pred_num, args.max_depth, args.algo)
stats(examples)
write_examples(examples, args.output_file)
|
def main():
args = parser.parse_args()
if (args.seed is not None):
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.')
if (args.gpu is not None):
warnings.warn('You have chosen a specific GPU. This will completely disable data parallelism.')
if ((args.dist_url == 'env://') and (args.world_size == (- 1))):
args.world_size = int(os.environ['WORLD_SIZE'])
args.distributed = ((args.world_size > 1) or args.multiprocessing_distributed)
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
args.world_size = (ngpus_per_node * args.world_size)
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
main_worker(args.gpu, ngpus_per_node, args)
|
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print("=> creating model '{}'".format(args.arch))
if (args.adversarial_method is None):
model = moco.builder.MoCo(models.__dict__[args.arch], args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp)
if (args.adversarial_method in ['ifm', 'ifm_only']):
model = moco.adv_builder.MoCo(models.__dict__[args.arch], args.moco_dim, args.moco_k, args.moco_m, args.moco_t, args.mlp, args.epsilon)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
raise NotImplementedError('Only DistributedDataParallel is supported.')
else:
raise NotImplementedError('Only DistributedDataParallel is supported.')
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if (args.gpu is None):
checkpoint = torch.load(args.resume)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.dataset_root, args.dataset, 'train')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if args.aug_plus:
augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([moco.loader.GaussianBlur([0.1, 2.0])], p=0.5), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]
else:
augmentation = [transforms.RandomResizedCrop(224, scale=(0.2, 1.0)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]
train_dataset = datasets.ImageFolder(traindir, moco.loader.TwoCropsTransform(transforms.Compose(augmentation)))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, criterion, optimizer, epoch, args)
filename = f'/data/scratch/joshrob/opt/moco/{args.dataset}_{args.lr}_{args.batch_size}_{args.method}_{args.epsilon}_{args.epochs}.log'
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, is_best=False, filename=filename)
|
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), [batch_time, data_time, losses, top1, top5], prefix='Epoch: [{}]'.format(epoch))
model.train()
end = time.time()
for (i, (images, _)) in enumerate(train_loader):
data_time.update((time.time() - end))
if (args.gpu is not None):
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
if (args.method is None):
(output, target) = model(im_q=images[0], im_k=images[1])
loss = criterion(output, target)
if (args.method == 'ifm'):
(output, output_adv, target) = model(im_q=images[0], im_k=images[1])
loss_orig = criterion(output, target)
loss_adv = criterion(output_adv, target)
loss = (loss_orig + (args.alpha * loss_adv))
loss /= (1 + args.alpha)
if (args.method == 'ifm_only'):
(output, output_adv, target) = model(im_q=images[0], im_k=images[1])
loss_adv = criterion(output_adv, target)
loss = loss_adv
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images[0].size(0))
top1.update(acc1[0], images[0].size(0))
top5.update(acc5[0], images[0].size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
progress.display(i)
|
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __str__(self):
fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})')
return fmtstr.format(**self.__dict__)
|
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=''):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [(self.prefix + self.batch_fmtstr.format(batch))]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str((num_batches // 1)))
fmt = (('{:' + str(num_digits)) + 'd}')
return (((('[' + fmt) + '/') + fmt.format(num_batches)) + ']')
|
def adjust_learning_rate(optimizer, epoch, args):
'Decay the learning rate based on schedule'
lr = args.lr
if args.cos:
lr *= (0.5 * (1.0 + math.cos(((math.pi * epoch) / args.epochs))))
else:
for milestone in args.schedule:
lr *= (0.1 if (epoch >= milestone) else 1.0)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def accuracy(output, target, topk=(1,)):
'Computes the accuracy over the k top predictions for the specified values of k'
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
class TwoCropsTransform():
'Take two random crops of one image as the query and key.'
def __init__(self, base_transform):
self.base_transform = base_transform
def __call__(self, x):
q = self.base_transform(x)
k = self.base_transform(x)
return [q, k]
|
class GaussianBlur(object):
'Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709'
def __init__(self, sigma=[0.1, 2.0]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
def reformat_incre_equations(x):
result = ''
if (len(x) >= 1):
for eq in x:
if (len(result) == 0):
result += eq[2:(- 2)]
else:
result += (', ' + eq[2:(- 2)])
return result
|
def reformat_equations_from_peano(eq_list):
result = ''
for eq in eq_list.split(','):
if ('eq' in eq):
if (len(result) == 0):
result += eq[(eq.index('eq') + 2):]
else:
result += (', ' + eq[(eq.index('eq') + 2):])
elif ('answer' in eq):
if (len(result) == 0):
result += (eq[(eq.index('answer') + 6):].strip() + ' = ?')
else:
result += ((', ' + eq[(eq.index('answer') + 6):].strip()) + ' = ?')
return result
|
def get_declarative_equations(model, question, prompt, max_tokens, stop_token, temperature):
prompt = prompt.format(question=question)
response = openai.Completion.create(model=model, prompt=prompt, max_tokens=max_tokens, stop=stop_token, temperature=temperature, top_p=1)
result = response['choices'][0]['text']
eq_list = re.findall('\\[\\[.*?\\]\\]', result)
if (len(eq_list) > 0):
return reformat_equations_from_peano(reformat_incre_equations(eq_list))
else:
print()
print(response['choices'][0]['text'])
return response['choices'][0]['text']
|
def get_final_using_sympy(equations):
try:
transformations = ((standard_transformations + (implicit_multiplication_application,)) + (convert_xor,))
if (str(equations) == 'nan'):
return np.nan
equation_list = equations.split(',')
for eq in equation_list:
for c in range(len(eq)):
if (c < (len(eq) - 2)):
if (eq[c].isalpha() and eq[(c + 1)].isalpha() and eq[(c + 2)].isalpha()):
return 'invalid equations'
goal_var = None
goal_expression_list = []
if (equation_list[(- 1)].split('=')[0].strip().isalpha() or (len(equation_list[(- 1)].split('=')[0].strip()) == 2)):
goal_var = equation_list[(- 1)].split('=')[0].strip()
elif ('=' in equation_list[(- 1)]):
for l in (list(string.ascii_lowercase) + list(string.ascii_uppercase)):
if (l not in equation_list[(- 1)]):
goal_var = l
break
if (goal_var is not None):
goal_expression = (((goal_var + ' - (') + equation_list[(- 1)].split('=')[0].strip()) + ')')
goal_expression = parse_expr(goal_expression, transformations=transformations)
goal_expression = sympify(goal_expression)
try:
return float(solve(goal_expression)[0])
except Exception as e:
pass
goal_expression_list.append(goal_expression)
else:
return 'invalid equations'
if (len(equation_list) == 1):
try:
goal_expression = parse_expr(equation_list[0].split('=')[0], transformations=transformations)
return float(sympify(goal_expression))
except Exception as e:
return 'invalid equations'
if (goal_var == None):
return 'no goal found'
for i in range((len(equation_list) - 1)):
sub_eqs = equation_list[i]
if ('?' not in sub_eqs):
try:
sub_eqs_split = sub_eqs.split('=')
sub_eqs = (((sub_eqs_split[0].strip() + ' - (') + sub_eqs_split[1].strip()) + ')')
sub_eqs = parse_expr(sub_eqs, transformations=transformations)
sub_eqs = sympify(sub_eqs)
except Exception as e:
return 'invalid equations'
goal_expression_list.append(sub_eqs)
try:
try:
return float(solve(goal_expression_list)[Symbol(goal_var)])
except Exception as e:
return float(solve(goal_expression_list)[0][Symbol(goal_var)])
except Exception as e:
pass
return 'no solution'
except Exception as e:
print(e)
return 'bug'
|
def get_detector(opt=None):
if (opt.detector == 'yolo'):
from detector.yolo_api import YOLODetector
from detector.yolo_cfg import cfg
return YOLODetector(cfg, opt)
elif (opt.detector == 'tracker'):
from detector.tracker_api import Tracker
from detector.tracker_cfg import cfg
return Tracker(cfg, opt)
else:
raise NotImplementedError
|
class BaseDetector(ABC):
def __init__(self):
pass
@abstractmethod
def image_preprocess(self, img_name):
pass
@abstractmethod
def images_detection(self, imgs, orig_dim_list):
pass
@abstractmethod
def detect_one_img(self, img_name):
pass
|
class TrackState(object):
New = 0
Tracked = 1
Lost = 2
Removed = 3
|
class BaseTrack(object):
_count = 0
track_id = 0
is_activated = False
state = TrackState.New
history = OrderedDict()
features = []
curr_feature = None
score = 0
start_frame = 0
frame_id = 0
time_since_update = 0
location = (np.inf, np.inf)
@property
def end_frame(self):
return self.frame_id
@staticmethod
def next_id():
BaseTrack._count += 1
return BaseTrack._count
def activate(self, *args):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def update(self, *args, **kwargs):
raise NotImplementedError
def mark_lost(self):
self.state = TrackState.Lost
def mark_removed(self):
self.state = TrackState.Removed
|
class Evaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
self.reset_accumulator()
def load_annotations(self):
assert (self.data_type == 'mot')
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
gt_objs = self.gt_frame_dict.get(frame_id, [])
(gt_tlwhs, gt_ids) = unzip_objs(gt_objs)[:2]
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
(match_is, match_js) = mm.lap.linear_sum_assignment(iou_distance)
(match_is, match_js) = map((lambda a: np.asarray(a, dtype=int)), [match_is, match_js])
match_ious = iou_distance[(match_is, match_js)]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
self.acc.update(gt_ids, trk_ids, iou_distance)
if (rtn_events and (iou_distance.size > 0) and hasattr(self.acc, 'last_mot_events')):
events = self.acc.last_mot_events
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_results(filename, self.data_type, is_gt=False)
frames = sorted(list((set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys()))))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
(trk_tlwhs, trk_ids) = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
names = copy.deepcopy(names)
if (metrics is None):
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(accs, metrics=metrics, names=names, generate_overall=True)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()
|
def write_results(filename, results_dict: Dict, data_type: str):
if (not filename):
return
path = os.path.dirname(filename)
if (not os.path.exists(path)):
os.makedirs(path)
if (data_type in ('mot', 'mcmot', 'lab')):
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif (data_type == 'kitti'):
save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for (frame_id, frame_data) in results_dict.items():
if (data_type == 'kitti'):
frame_id -= 1
for (tlwh, track_id) in frame_data:
if (track_id < 0):
continue
(x1, y1, w, h) = tlwh
(x2, y2) = ((x1 + w), (y1 + h))
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)
f.write(line)
logger.info('Save results to {}'.format(filename))
|
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if (data_type in ('mot', 'lab')):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
|
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if (len(linelist) < 7):
continue
fid = int(linelist[0])
if (fid < 1):
continue
results_dict.setdefault(fid, list())
if is_gt:
if (('MOT16-' in filename) or ('MOT17-' in filename)):
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if ((mark == 0) or (label not in valid_labels)):
continue
score = 1
elif is_ignore:
if (('MOT16-' in filename) or ('MOT17-' in filename)):
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if ((label not in ignore_labels) and (vis_ratio >= 0)):
continue
else:
continue
score = 1
else:
score = float(linelist[6])
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
|
def unzip_objs(objs):
if (len(objs) > 0):
(tlwhs, ids, scores) = zip(*objs)
else:
(tlwhs, ids, scores) = ([], [], [])
tlwhs = np.asarray(tlwhs, dtype=float).reshape((- 1), 4)
return (tlwhs, ids, scores)
|
def get_logger(name='root'):
formatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
|
def parse_model_cfg(path):
'Parses the yolo-v3 layer configuration file and returns module definitions'
file = open(path, 'r')
lines = file.read().split('\n')
lines = [x for x in lines if (x and (not x.startswith('#')))]
lines = [x.rstrip().lstrip() for x in lines]
module_defs = []
for line in lines:
if line.startswith('['):
module_defs.append({})
module_defs[(- 1)]['type'] = line[1:(- 1)].rstrip()
if (module_defs[(- 1)]['type'] == 'convolutional'):
module_defs[(- 1)]['batch_normalize'] = 0
else:
(key, value) = line.split('=')
value = value.strip()
module_defs[(- 1)][key.rstrip()] = value.strip()
return module_defs
|
def parse_data_cfg(path):
'Parses the data configuration file'
options = dict()
options['gpus'] = '0'
options['num_workers'] = '10'
with open(path, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if ((line == '') or line.startswith('#')):
continue
(key, value) = line.split('=')
options[key.strip()] = value.strip()
return options
|
class Timer(object):
'A simple timer.'
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
self.duration = 0.0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = (time.time() - self.start_time)
self.total_time += self.diff
self.calls += 1
self.average_time = (self.total_time / self.calls)
if average:
self.duration = self.average_time
else:
self.duration = self.diff
return self.duration
def clear(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
self.duration = 0.0
|
def add_path(path):
if (path not in sys.path):
sys.path.insert(0, path)
|
class BoundingBox():
def __init__(self, imageName, classId, x, y, w, h, typeCoordinates=CoordinatesType.Absolute, imgSize=None, bbType=BBType.GroundTruth, classConfidence=None, format=BBFormat.XYWH):
"Constructor.\n Args:\n imageName: String representing the image name.\n classId: String value representing class id.\n x: Float value representing the X upper-left coordinate of the bounding box.\n y: Float value representing the Y upper-left coordinate of the bounding box.\n w: Float value representing the width bounding box.\n h: Float value representing the height bounding box.\n typeCoordinates: (optional) Enum (Relative or Absolute) represents if the bounding box\n coordinates (x,y,w,h) are absolute or relative to size of the image. Default:'Absolute'.\n imgSize: (optional) 2D vector (width, height)=>(int, int) represents the size of the\n image of the bounding box. If typeCoordinates is 'Relative', imgSize is required.\n bbType: (optional) Enum (Groundtruth or Detection) identifies if the bounding box\n represents a ground truth or a detection. If it is a detection, the classConfidence has\n to be informed.\n classConfidence: (optional) Float value representing the confidence of the detected\n class. If detectionType is Detection, classConfidence needs to be informed.\n format: (optional) Enum (BBFormat.XYWH or BBFormat.XYX2Y2) indicating the format of the\n coordinates of the bounding boxes. BBFormat.XYWH: <left> <top> <width> <height>\n BBFormat.XYX2Y2: <left> <top> <right> <bottom>.\n "
self._imageName = imageName
self._typeCoordinates = typeCoordinates
if ((typeCoordinates == CoordinatesType.Relative) and (imgSize is None)):
raise IOError("Parameter 'imgSize' is required. It is necessary to inform the image size.")
if ((bbType == BBType.Detected) and (classConfidence is None)):
raise IOError("For bbType='Detection', it is necessary to inform the classConfidence value.")
self._classConfidence = classConfidence
self._bbType = bbType
self._classId = classId
self._format = format
if (typeCoordinates == CoordinatesType.Relative):
(self._x, self._y, self._w, self._h) = convertToAbsoluteValues(imgSize, (x, y, w, h))
self._width_img = imgSize[0]
self._height_img = imgSize[1]
if (format == BBFormat.XYWH):
self._x2 = self._w
self._y2 = self._h
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
else:
raise IOError('For relative coordinates, the format must be XYWH (x,y,width,height)')
else:
self._x = x
self._y = y
if (format == BBFormat.XYWH):
self._w = w
self._h = h
self._x2 = (self._x + self._w)
self._y2 = (self._y + self._h)
else:
self._x2 = w
self._y2 = h
self._w = (self._x2 - self._x)
self._h = (self._y2 - self._y)
if (imgSize is None):
self._width_img = None
self._height_img = None
else:
self._width_img = imgSize[0]
self._height_img = imgSize[1]
def getAbsoluteBoundingBox(self, format=BBFormat.XYWH):
if (format == BBFormat.XYWH):
return (self._x, self._y, self._w, self._h)
elif (format == BBFormat.XYX2Y2):
return (self._x, self._y, self._x2, self._y2)
def getRelativeBoundingBox(self, imgSize=None):
if ((imgSize is None) and (self._width_img is None) and (self._height_img is None)):
raise IOError("Parameter 'imgSize' is required. It is necessary to inform the image size.")
if (imgSize is None):
return convertToRelativeValues((imgSize[0], imgSize[1]), (self._x, self._y, self._w, self._h))
else:
return convertToRelativeValues((self._width_img, self._height_img), (self._x, self._y, self._w, self._h))
def getImageName(self):
return self._imageName
def getConfidence(self):
return self._classConfidence
def getFormat(self):
return self._format
def getClassId(self):
return self._classId
def getImageSize(self):
return (self._width_img, self._height_img)
def getCoordinatesType(self):
return self._typeCoordinates
def getBBType(self):
return self._bbType
@staticmethod
def compare(det1, det2):
det1BB = det1.getAbsoluteBoundingBox()
det1ImgSize = det1.getImageSize()
det2BB = det2.getAbsoluteBoundingBox()
det2ImgSize = det2.getImageSize()
if ((det1.getClassId() == det2.getClassId()) and (det1.classConfidence == det2.classConfidenc()) and (det1BB[0] == det2BB[0]) and (det1BB[1] == det2BB[1]) and (det1BB[2] == det2BB[2]) and (det1BB[3] == det2BB[3]) and (det1ImgSize[0] == det1ImgSize[0]) and (det2ImgSize[1] == det2ImgSize[1])):
return True
return False
@staticmethod
def clone(boundingBox):
absBB = boundingBox.getAbsoluteBoundingBox(format=BBFormat.XYWH)
newBoundingBox = BoundingBox(boundingBox.getImageName(), boundingBox.getClassId(), absBB[0], absBB[1], absBB[2], absBB[3], typeCoordinates=boundingBox.getCoordinatesType(), imgSize=boundingBox.getImageSize(), bbType=boundingBox.getBBType(), classConfidence=boundingBox.getConfidence(), format=BBFormat.XYWH)
return newBoundingBox
|
class BoundingBoxes():
def __init__(self):
self._boundingBoxes = []
def addBoundingBox(self, bb):
self._boundingBoxes.append(bb)
def removeBoundingBox(self, _boundingBox):
for d in self._boundingBoxes:
if BoundingBox.compare(d, _boundingBox):
del self._boundingBoxes[d]
return
def removeAllBoundingBoxes(self):
self._boundingBoxes = []
def getBoundingBoxes(self):
return self._boundingBoxes
def getBoundingBoxByClass(self, classId):
boundingBoxes = []
for d in self._boundingBoxes:
if (d.getClassId() == classId):
boundingBoxes.append(d)
return boundingBoxes
def getClasses(self):
classes = []
for d in self._boundingBoxes:
c = d.getClassId()
if (c not in classes):
classes.append(c)
return classes
def getBoundingBoxesByType(self, bbType):
return [d for d in self._boundingBoxes if (d.getBBType() == bbType)]
def getBoundingBoxesByImageName(self, imageName):
return [d for d in self._boundingBoxes if (d.getImageName() == imageName)]
def count(self, bbType=None):
if (bbType is None):
return len(self._boundingBoxes)
count = 0
for d in self._boundingBoxes:
if (d.getBBType() == bbType):
count += 1
return count
def clone(self):
newBoundingBoxes = BoundingBoxes()
for d in self._boundingBoxes:
det = BoundingBox.clone(d)
newBoundingBoxes.addBoundingBox(det)
return newBoundingBoxes
def drawAllBoundingBoxes(self, image, imageName):
bbxes = self.getBoundingBoxesByImageName(imageName)
for bb in bbxes:
if (bb.getBBType() == BBType.GroundTruth):
image = add_bb_into_image(image, bb, color=(0, 255, 0))
else:
image = add_bb_into_image(image, bb, color=(255, 0, 0))
return image
|
class Evaluator():
def GetPascalVOCMetrics(self, boundingboxes, IOUThreshold=0.5, method=MethodAveragePrecision.EveryPointInterpolation):
'Get the metrics used by the VOC Pascal 2012 challenge.\n Get\n Args:\n boundingboxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold: IOU threshold indicating which detections will be considered TP or FP\n (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"\n or EveryPointInterpolation" (ElevenPointInterpolation);\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict[\'class\']: class representing the current dictionary;\n dict[\'precision\']: array with the precision values;\n dict[\'recall\']: array with the recall values;\n dict[\'AP\']: average precision;\n dict[\'interpolated precision\']: interpolated precision values;\n dict[\'interpolated recall\']: interpolated recall values;\n dict[\'total positives\']: total number of ground truth positives;\n dict[\'total TP\']: total number of True Positive detections;\n dict[\'total FP\']: total number of False Negative detections;\n '
ret = []
groundTruths = []
detections = []
classes = []
for bb in boundingboxes.getBoundingBoxes():
if (bb.getBBType() == BBType.GroundTruth):
groundTruths.append([bb.getImageName(), bb.getClassId(), 1, bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)])
else:
detections.append([bb.getImageName(), bb.getClassId(), bb.getConfidence(), bb.getAbsoluteBoundingBox(BBFormat.XYX2Y2)])
if (bb.getClassId() not in classes):
classes.append(bb.getClassId())
classes = sorted(classes)
for c in classes:
dects = []
[dects.append(d) for d in detections if (d[1] == c)]
gts = []
[gts.append(g) for g in groundTruths if (g[1] == c)]
npos = len(gts)
dects = sorted(dects, key=(lambda conf: conf[2]), reverse=True)
TP = np.zeros(len(dects))
FP = np.zeros(len(dects))
det = Counter([cc[0] for cc in gts])
for (key, val) in det.items():
det[key] = np.zeros(val)
for d in range(len(dects)):
gt = [gt for gt in gts if (gt[0] == dects[d][0])]
iouMax = sys.float_info.min
for j in range(len(gt)):
iou = Evaluator.iou(dects[d][3], gt[j][3])
if (iou > iouMax):
iouMax = iou
jmax = j
if (iouMax >= IOUThreshold):
if (det[dects[d][0]][jmax] == 0):
TP[d] = 1
det[dects[d][0]][jmax] = 1
else:
FP[d] = 1
else:
FP[d] = 1
acc_FP = np.cumsum(FP)
acc_TP = np.cumsum(TP)
rec = (acc_TP / npos)
prec = np.divide(acc_TP, (acc_FP + acc_TP))
if (method == MethodAveragePrecision.EveryPointInterpolation):
[ap, mpre, mrec, ii] = Evaluator.CalculateAveragePrecision(rec, prec)
else:
[ap, mpre, mrec, _] = Evaluator.ElevenPointInterpolatedAP(rec, prec)
r = {'class': c, 'precision': prec, 'recall': rec, 'AP': ap, 'interpolated precision': mpre, 'interpolated recall': mrec, 'total positives': npos, 'total TP': np.sum(TP), 'total FP': np.sum(FP)}
ret.append(r)
return ret
def PlotPrecisionRecallCurve(self, boundingBoxes, IOUThreshold=0.5, method=MethodAveragePrecision.EveryPointInterpolation, showAP=False, showInterpolatedPrecision=False, savePath=None, showGraphic=True):
'PlotPrecisionRecallCurve\n Plot the Precision x Recall curve for a given class.\n Args:\n boundingBoxes: Object of the class BoundingBoxes representing ground truth and detected\n bounding boxes;\n IOUThreshold (optional): IOU threshold indicating which detections will be considered\n TP or FP (default value = 0.5);\n method (default = EveryPointInterpolation): It can be calculated as the implementation\n in the official PASCAL VOC toolkit (EveryPointInterpolation), or applying the 11-point\n interpolatio as described in the paper "The PASCAL Visual Object Classes(VOC) Challenge"\n or EveryPointInterpolation" (ElevenPointInterpolation).\n showAP (optional): if True, the average precision value will be shown in the title of\n the graph (default = False);\n showInterpolatedPrecision (optional): if True, it will show in the plot the interpolated\n precision (default = False);\n savePath (optional): if informed, the plot will be saved as an image in this path\n (ex: /home/mywork/ap.png) (default = None);\n showGraphic (optional): if True, the plot will be shown (default = True)\n Returns:\n A list of dictionaries. Each dictionary contains information and metrics of each class.\n The keys of each dictionary are:\n dict[\'class\']: class representing the current dictionary;\n dict[\'precision\']: array with the precision values;\n dict[\'recall\']: array with the recall values;\n dict[\'AP\']: average precision;\n dict[\'interpolated precision\']: interpolated precision values;\n dict[\'interpolated recall\']: interpolated recall values;\n dict[\'total positives\']: total number of ground truth positives;\n dict[\'total TP\']: total number of True Positive detections;\n dict[\'total FP\']: total number of False Negative detections;\n '
results = self.GetPascalVOCMetrics(boundingBoxes, IOUThreshold, method)
result = None
for result in results:
if (result is None):
raise IOError(('Error: Class %d could not be found.' % classId))
classId = result['class']
precision = result['precision']
recall = result['recall']
average_precision = result['AP']
mpre = result['interpolated precision']
mrec = result['interpolated recall']
npos = result['total positives']
total_tp = result['total TP']
total_fp = result['total FP']
plt.close()
if showInterpolatedPrecision:
if (method == MethodAveragePrecision.EveryPointInterpolation):
plt.plot(mrec, mpre, '--r', label='Interpolated precision (every point)')
elif (method == MethodAveragePrecision.ElevenPointInterpolation):
nrec = []
nprec = []
for idx in range(len(mrec)):
r = mrec[idx]
if (r not in nrec):
idxEq = np.argwhere((mrec == r))
nrec.append(r)
nprec.append(max([mpre[int(id)] for id in idxEq]))
plt.plot(nrec, nprec, 'or', label='11-point interpolated precision')
plt.plot(recall, precision, label='Precision')
plt.xlabel('recall')
plt.ylabel('precision')
if showAP:
ap_str = '{0:.2f}%'.format((average_precision * 100))
plt.title(('Precision x Recall curve \nClass: %s, AP: %s' % (str(classId), ap_str)))
else:
plt.title(('Precision x Recall curve \nClass: %s' % str(classId)))
plt.legend(shadow=True)
plt.grid()
if (savePath is not None):
plt.savefig(os.path.join(savePath, (classId + '.png')))
if (showGraphic is True):
plt.show()
plt.pause(0.05)
return results
@staticmethod
def CalculateAveragePrecision(rec, prec):
mrec = []
mrec.append(0)
[mrec.append(e) for e in rec]
mrec.append(1)
mpre = []
mpre.append(0)
[mpre.append(e) for e in prec]
mpre.append(0)
for i in range((len(mpre) - 1), 0, (- 1)):
mpre[(i - 1)] = max(mpre[(i - 1)], mpre[i])
ii = []
for i in range((len(mrec) - 1)):
if (mrec[1:][i] != mrec[0:(- 1)][i]):
ii.append((i + 1))
ap = 0
for i in ii:
ap = (ap + np.sum(((mrec[i] - mrec[(i - 1)]) * mpre[i])))
return [ap, mpre[0:(len(mpre) - 1)], mrec[0:(len(mpre) - 1)], ii]
@staticmethod
def ElevenPointInterpolatedAP(rec, prec):
mrec = []
[mrec.append(e) for e in rec]
mpre = []
[mpre.append(e) for e in prec]
recallValues = np.linspace(0, 1, 11)
recallValues = list(recallValues[::(- 1)])
rhoInterp = []
recallValid = []
for r in recallValues:
argGreaterRecalls = np.argwhere((mrec[:] >= r))
pmax = 0
if (argGreaterRecalls.size != 0):
pmax = max(mpre[argGreaterRecalls.min():])
recallValid.append(r)
rhoInterp.append(pmax)
ap = (sum(rhoInterp) / 11)
rvals = []
rvals.append(recallValid[0])
[rvals.append(e) for e in recallValid]
rvals.append(0)
pvals = []
pvals.append(0)
[pvals.append(e) for e in rhoInterp]
pvals.append(0)
cc = []
for i in range(len(rvals)):
p = (rvals[i], pvals[(i - 1)])
if (p not in cc):
cc.append(p)
p = (rvals[i], pvals[i])
if (p not in cc):
cc.append(p)
recallValues = [i[0] for i in cc]
rhoInterp = [i[1] for i in cc]
return [ap, rhoInterp, recallValues, None]
@staticmethod
def _getAllIOUs(reference, detections):
ret = []
bbReference = reference.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
for d in detections:
bb = d.getAbsoluteBoundingBox(BBFormat.XYX2Y2)
iou = Evaluator.iou(bbReference, bb)
ret.append((iou, reference, d))
return sorted(ret, key=(lambda i: i[0]), reverse=True)
@staticmethod
def iou(boxA, boxB):
if (Evaluator._boxesIntersect(boxA, boxB) is False):
return 0
interArea = Evaluator._getIntersectionArea(boxA, boxB)
union = Evaluator._getUnionAreas(boxA, boxB, interArea=interArea)
iou = (interArea / union)
assert (iou >= 0)
return iou
@staticmethod
def _boxesIntersect(boxA, boxB):
if (boxA[0] > boxB[2]):
return False
if (boxB[0] > boxA[2]):
return False
if (boxA[3] < boxB[1]):
return False
if (boxA[1] > boxB[3]):
return False
return True
@staticmethod
def _getIntersectionArea(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
return (((xB - xA) + 1) * ((yB - yA) + 1))
@staticmethod
def _getUnionAreas(boxA, boxB, interArea=None):
area_A = Evaluator._getArea(boxA)
area_B = Evaluator._getArea(boxB)
if (interArea is None):
interArea = Evaluator._getIntersectionArea(boxA, boxB)
return float(((area_A + area_B) - interArea))
@staticmethod
def _getArea(box):
return (((box[2] - box[0]) + 1) * ((box[3] - box[1]) + 1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.