id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
10,053 | import torch
from torch import nn
import torch.nn.functional as F
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from ..backbone import build_backbone
from ..rpn import build_rpn
from ..roi_heads import build_roi_heads
from ..language_backbone import build_language_backbone
from transformers import AutoTokenizer
import random
import timeit
import pdb
from copy import deepcopy
The provided code snippet includes necessary dependencies for implementing the `random_word` function. Write a Python function `def random_word(input_ids, mask_token_id, vocabs, padding_token_id, greenlight_map)` to solve the following problem:
greenlight_map, batch_size x 256 (seq_len): 0 means this location cannot be calculated in the MLM loss -1 means this location cannot be masked!! 1 means this location can be masked and can be calculated in the MLM loss
Here is the function:
def random_word(input_ids, mask_token_id, vocabs, padding_token_id, greenlight_map):
"""
greenlight_map, batch_size x 256 (seq_len):
0 means this location cannot be calculated in the MLM loss
-1 means this location cannot be masked!!
1 means this location can be masked and can be calculated in the MLM loss
"""
output_label = deepcopy(input_ids)
for j in range(input_ids.size(0)):
for i in range(input_ids.size(1)):
prob = random.random()
# mask token with probability
ratio = 0.15
if greenlight_map is not None and greenlight_map[j,i] == -1:
output_label[j,i] = -100
continue
if (not input_ids[j,i] == padding_token_id) and prob < ratio:
prob /= ratio
# 80% randomly change token to mask token
if prob < 0.8:
input_ids[j,i] = mask_token_id
# 10% randomly change token to random token
elif prob < 0.9:
input_ids[j,i] = random.choice(vocabs)
else:
# no masking token (will be ignored by loss function later)
output_label[j,i] = -100
if greenlight_map is not None and greenlight_map[j,i] != 1:
output_label[j,i] = -100 # If this location should not be masked
return input_ids, output_label | greenlight_map, batch_size x 256 (seq_len): 0 means this location cannot be calculated in the MLM loss -1 means this location cannot be masked!! 1 means this location can be masked and can be calculated in the MLM loss |
10,054 | from collections import OrderedDict
import torch
from torch import nn
from maskrcnn_benchmark.modeling import registry
from . import bert_model
from . import rnn_model
from . import clip_model
from . import word_utils
def build_rnn_backbone(cfg):
body = rnn_model.RNNEnoder(cfg)
model = nn.Sequential(OrderedDict([("body", body)]))
return model | null |
10,055 | from collections import OrderedDict
import torch
from torch import nn
from maskrcnn_benchmark.modeling import registry
from . import bert_model
from . import rnn_model
from . import clip_model
from . import word_utils
def build_clip_backbone(cfg):
body = clip_model.CLIPTransformer(cfg)
model = nn.Sequential(OrderedDict([("body", body)]))
return model | null |
10,056 | from collections import OrderedDict
import torch
from torch import nn
from maskrcnn_benchmark.modeling import registry
from . import bert_model
from . import rnn_model
from . import clip_model
from . import word_utils
def build_backbone(cfg):
assert cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE in registry.LANGUAGE_BACKBONES, \
"cfg.MODEL.LANGUAGE_BACKBONE.TYPE: {} is not registered in registry".format(
cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE
)
return registry.LANGUAGE_BACKBONES[cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE](cfg) | null |
10,057 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
from typing import Union, List
import torch
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") | null |
10,058 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
from typing import Union, List
import torch
The provided code snippet includes necessary dependencies for implementing the `bytes_to_unicode` function. Write a Python function `def bytes_to_unicode()` to solve the following problem:
Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
Here is the function:
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs)) | Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. |
10,059 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
from typing import Union, List
import torch
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
Here is the function:
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). |
10,060 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
from typing import Union, List
import torch
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip() | null |
10,061 | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
from typing import Union, List
import torch
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text | null |
10,062 | from .simple_tokenizer import SimpleTokenizer
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def get_vocab_size(self):
return 49408
def get_eot_token(self):
return self.encoder["<|endoftext|>"]
def get_sot_token(self):
return self.encoder["<|startoftext|>"]
def check_added_tokens(self):
return 0
def get_tokenizer_obj(self):
return None
def tokenize(self, texts: Union[str, List[str]], context_length: int = 77):
if isinstance(texts, str):
texts = [texts]
sot_token = self.encoder["<|startoftext|>"]
eot_token = self.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
tokens = tokens[:context_length]
# raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
def __call__(self, texts: Union[str, List[str]], context_length: int = 77):
return self.tokenize(texts, context_length)
class HFPTTokenizer(object):
def __init__(self, pt_name=None):
self.pt_name = pt_name
self.added_sep_token = 0
self.added_cls_token = 0
self.enable_add_tokens = False
self.gpt_special_case = ((not self.enable_add_tokens) and ('gpt' in self.pt_name))
if (pt_name is None):
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
else:
self.tokenizer = AutoTokenizer.from_pretrained(pt_name)
# Adding tokens to GPT causing NaN training loss.
# Disable for now until further investigation.
if (self.enable_add_tokens):
if (self.tokenizer.sep_token is None):
self.tokenizer.add_special_tokens({'sep_token': '<SEP>'})
self.added_sep_token = 1
if (self.tokenizer.cls_token is None):
self.tokenizer.add_special_tokens({'cls_token': '<CLS>'})
self.added_cls_token = 1
if (self.gpt_special_case):
self.tokenizer.pad_token = self.tokenizer.eos_token
self.tokenizer.sep_token = self.tokenizer.eos_token
def get_eot_token(self):
return self.tokenizer.encode(self.tokenizer.sep_token, add_special_tokens=False)[0]
def get_sot_token(self):
return self.tokenizer.encode(self.tokenizer.cls_token, add_special_tokens=False)[0]
def get_eot_token_list(self):
return self.tokenizer.encode(self.tokenizer.sep_token, add_special_tokens=False)
def get_sot_token_list(self):
return self.tokenizer.encode(self.tokenizer.cls_token, add_special_tokens=False)
def get_tokenizer_obj(self):
return self.tokenizer
# Language model needs to know if new tokens
# were added to the dictionary.
def check_added_tokens(self):
return self.added_sep_token + self.added_cls_token
def tokenize(self, texts: Union[str, List[str]], context_length: int = 77):
if isinstance(texts, str):
texts = [texts]
padding = 'max_length'
seqstart = []
seqtok = []
seqend = []
max_length = context_length
if (self.added_cls_token > 0):
seqstart = self.get_sot_token_list()
max_length = max_length - 1
if (self.added_sep_token > 0):
seqend = self.get_eot_token_list()
max_length = max_length - 1
tokens = self.tokenizer(
texts, padding=padding,
truncation=True,
max_length=max_length
)['input_ids']
for i in range(len(tokens)):
tokens[i] = seqstart + tokens[i] + seqend
if (self.gpt_special_case):
for i in range(len(tokens)):
tokens[i][-1] = self.get_eot_token()
# print(str(tokens))
result = torch.Tensor(tokens).type(torch.LongTensor)
return result
def get_vocab_size(self):
return self.tokenizer.vocab_size
def __call__(self, texts: Union[str, List[str]], context_length: int = 77):
return self.tokenize(texts, context_length)
def build_tokenizer(tokenizer_name):
tokenizer = None
if tokenizer_name == 'clip':
tokenizer = SimpleTokenizer()
elif 'hf_' in tokenizer_name:
from .hfpt_tokenizer import HFPTTokenizer
tokenizer = HFPTTokenizer(pt_name=tokenizer_name[3:])
elif 'hfc_' in tokenizer_name:
from .hfpt_tokenizer import HFPTTokenizer
tokenizer = HFPTTokenizer(pt_name=tokenizer_name[4:])
else:
raise ValueError('Unknown tokenizer')
return tokenizer | null |
10,063 | import torch
import itertools
from .lr_scheduler import WarmupMultiStepLR, WarmupCosineAnnealingLR, WarmupReduceLROnPlateau
def make_optimizer(cfg, model):
def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
return FullModelGradientClippingOptimizer if enable else optim
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
# different lr schedule
if "language_backbone" in key:
lr = cfg.SOLVER.LANG_LR
if "backbone.body" in key and "language_backbone.body" not in key:
lr = cfg.SOLVER.BASE_LR * cfg.SOLVER.BACKBONE_BODY_LR_FACTOR
if "bias" in key:
lr *= cfg.SOLVER.BIAS_LR_FACTOR
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
if 'norm' in key or 'Norm' in key:
weight_decay *= cfg.SOLVER.WEIGHT_DECAY_NORM_FACTOR
print("Setting weight decay of {} to {}".format(key, weight_decay))
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if cfg.SOLVER.OPTIMIZER == "SGD":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(params, lr, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.OPTIMIZER == "ADAMW":
optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(params, lr)
return optimizer | null |
10,064 | import torch
import itertools
from .lr_scheduler import WarmupMultiStepLR, WarmupCosineAnnealingLR, WarmupReduceLROnPlateau
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
class WarmupCosineAnnealingLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
max_iters,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
eta_min = 0,
last_epoch=-1,
):
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.max_iters = max_iters
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.eta_min = eta_min
super(WarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
for base_lr in self.base_lrs
]
else:
return [
self.eta_min
+ (base_lr - self.eta_min)
* (1 + math.cos(math.pi * (self.last_epoch - self.warmup_iters) / self.max_iters)) / 2
for base_lr in self.base_lrs
]
class WarmupReduceLROnPlateau(torch.optim.lr_scheduler.ReduceLROnPlateau):
def __init__(
self,
optimizer,
max_iters,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
eta_min = 0,
last_epoch=-1,
patience = 5,
verbose = False,
):
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
self.eta_min = eta_min
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
super(WarmupReduceLROnPlateau, self).__init__(optimizer, factor=gamma, patience=patience, mode='max', min_lr=eta_min, verbose = verbose)
def step(self, metrics=None):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
if self.last_epoch >= self.warmup_iters-1:
warmup_factor = 1.0
warmup_lrs = [
base_lr
* warmup_factor
for base_lr in self.base_lrs
]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lrs):
param_group['lr'] = lr
self.last_epoch += 1
elif metrics:
super().step(metrics)
def make_lr_scheduler(cfg, optimizer):
if cfg.SOLVER.MULTI_MAX_EPOCH:
assert len(cfg.SOLVER.MULTI_MAX_EPOCH) == len(cfg.SOLVER.STEPS)
lr_scheduler = []
for stage_step, stage_max_epoch in zip(cfg.SOLVER.STEPS, cfg.SOLVER.MULTI_MAX_ITER):
milestones = []
for step in stage_step:
milestones.append(round(step * stage_max_epoch))
lr_scheduler.append(WarmupMultiStepLR(optimizer,
milestones,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD, )
)
return lr_scheduler
elif cfg.SOLVER.USE_COSINE:
max_iters = cfg.SOLVER.MAX_ITER
return WarmupCosineAnnealingLR(
optimizer,
max_iters,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
eta_min=cfg.SOLVER.MIN_LR
)
elif cfg.SOLVER.USE_AUTOSTEP:
max_iters = cfg.SOLVER.MAX_ITER
return WarmupReduceLROnPlateau(
optimizer,
max_iters,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
eta_min=cfg.SOLVER.MIN_LR,
patience=cfg.SOLVER.STEP_PATIENCE,
verbose=True
)
else:
milestones = []
for step in cfg.SOLVER.STEPS:
if step < 1:
milestones.append(round(step * cfg.SOLVER.MAX_ITER))
else:
milestones.append(step)
return WarmupMultiStepLR(
optimizer,
milestones,
cfg.SOLVER.GAMMA,
warmup_factor=cfg.SOLVER.WARMUP_FACTOR,
warmup_iters=cfg.SOLVER.WARMUP_ITERS,
warmup_method=cfg.SOLVER.WARMUP_METHOD,
) | null |
10,065 | import os
def try_to_find(file, return_dir=False, search_path=['./DATASET', './OUTPUT', './data', './MODEL']):
if not file:
return file
if file.startswith('catalog://'):
return file
DATASET_PATH = ['./']
if 'DATASET' in os.environ:
DATASET_PATH.append(os.environ['DATASET'])
DATASET_PATH += search_path
for path in DATASET_PATH:
if os.path.exists(os.path.join(path, file)):
if return_dir:
return path
else:
return os.path.join(path, file)
print('Cannot find {} in {}'.format(file, DATASET_PATH))
exit(1) | null |
10,066 | import datetime
import logging
import time
import os
import re
import torch
from tqdm import tqdm
from collections import defaultdict
from maskrcnn_benchmark.data.datasets.evaluation import evaluate, im_detect_bbox_aug
from ..utils.comm import is_main_process
from ..utils.comm import all_gather
from ..utils.comm import synchronize
import pdb
from maskrcnn_benchmark.data.datasets.evaluation.flickr.flickr_eval import FlickrEvaluator
from maskrcnn_benchmark.structures.bounding_box import BoxList
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from maskrcnn_benchmark.data.datasets.tsv import load_from_yaml_file
def create_one_hot_dict(labels, no_minus_one_for_one_hot = False):
positive_map_token_to_label = defaultdict(int)
positive_map_label_to_token = defaultdict(int)
for i in range(len(labels)):
positive_map_token_to_label[i] = labels[i]
positive_map_label_to_token[labels[i]] = i
if no_minus_one_for_one_hot:
positive_map_token_to_label = defaultdict(int)
positive_map_label_to_token = defaultdict(int)
for i in range(len(labels)):
positive_map_token_to_label[i+1] = labels[i]
positive_map_label_to_token[labels[i]] = i + 1
return positive_map_token_to_label, positive_map_label_to_token | null |
10,067 | import time
import pickle
import logging
import os
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
from yaml import safe_dump
from yacs.config import load_cfg, CfgNod
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.engine.inference import _accumulate_predictions_from_multiple_gpus
from maskrcnn_benchmark.modeling.backbone.nas import get_layer_name
from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process, get_world_size, all_gather
from maskrcnn_benchmark.data.datasets.evaluation import evaluate
from maskrcnn_benchmark.utils.flops import profile
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather_candidates(all_candidates):
all_candidates = all_gather(all_candidates)
all_candidates = [cand for candidates in all_candidates for cand in candidates]
return list(set(all_candidates)) | null |
10,068 | import time
import pickle
import logging
import os
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
from yaml import safe_dump
from yacs.config import load_cfg, CfgNod
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.engine.inference import _accumulate_predictions_from_multiple_gpus
from maskrcnn_benchmark.modeling.backbone.nas import get_layer_name
from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process, get_world_size, all_gather
from maskrcnn_benchmark.data.datasets.evaluation import evaluate
from maskrcnn_benchmark.utils.flops import profile
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather_stats(all_candidates):
all_candidates = all_gather(all_candidates)
reduced_statcs = {}
for candidates in all_candidates:
reduced_statcs.update(candidates) # will replace the existing key with last value if more than one exists
return reduced_statcs | null |
10,069 | import time
import pickle
import logging
import os
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
from yaml import safe_dump
from yacs.config import load_cfg, CfgNod
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.engine.inference import _accumulate_predictions_from_multiple_gpus
from maskrcnn_benchmark.modeling.backbone.nas import get_layer_name
from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process, get_world_size, all_gather
from maskrcnn_benchmark.data.datasets.evaluation import evaluate
from maskrcnn_benchmark.utils.flops import profile
def bn_statistic(model, rngs, data_loader, device=cfg.MODEL.DEVICE, max_iter=500):
for name, param in model.named_buffers():
if 'running_mean' in name:
nn.init.constant_(param, 0)
if 'running_var' in name:
nn.init.constant_(param, 1)
model.train()
for iteration, (images, targets, _) in enumerate(data_loader, 1):
images = images.to(device)
targets = [target.to(device) for target in targets]
with torch.no_grad():
loss_dict = model(images, targets, rngs)
if iteration >= max_iter:
break
return model | null |
10,070 | import time
import pickle
import logging
import os
import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
from yaml import safe_dump
from yacs.config import load_cfg, CfgNod
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.engine.inference import _accumulate_predictions_from_multiple_gpus
from maskrcnn_benchmark.modeling.backbone.nas import get_layer_name
from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process, get_world_size, all_gather
from maskrcnn_benchmark.data.datasets.evaluation import evaluate
from maskrcnn_benchmark.utils.flops import profile
def inference(
model,
rngs,
data_loader,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
):
# convert to a torch.device for efficiency
device = torch.device(device)
dataset = data_loader.dataset
predictions = compute_on_dataset(model, rngs, data_loader, device)
# wait for all processes to complete before measuring the time
synchronize()
predictions = _accumulate_predictions_from_multiple_gpus(predictions)
if not is_main_process():
return
extra_args = dict(
box_only=box_only,
iou_types=iou_types,
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return evaluate(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args)
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def fitness(cfg, model, rngs, val_loaders):
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
for data_loader_val in val_loaders:
results = inference(
model,
rngs,
data_loader_val,
iou_types=iou_types,
box_only=False,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
)
synchronize()
return results | null |
10,071 | import cv2
import torch
import re
import numpy as np
from typing import List, Union
import nltk
import inflect
from transformers import AutoTokenizer
from torchvision import transforms as T
import pdb
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.utils import cv2_util
import timeit
def create_positive_map_label_to_token_from_positive_map(positive_map, plus=0):
positive_map_label_to_token = {}
for i in range(len(positive_map)):
positive_map_label_to_token[i + plus] = torch.nonzero(positive_map[i], as_tuple=True)[0].tolist()
return positive_map_label_to_token | null |
10,072 | import cv2
import torch
import re
import numpy as np
from typing import List, Union
import nltk
import inflect
from transformers import AutoTokenizer
from torchvision import transforms as T
import pdb
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.utils import cv2_util
import timeit
The provided code snippet includes necessary dependencies for implementing the `create_positive_map` function. Write a Python function `def create_positive_map(tokenized, tokens_positive)` to solve the following problem:
construct a map such that positive_map[i,j] = True iff box i is associated to token j
Here is the function:
def create_positive_map(tokenized, tokens_positive):
"""construct a map such that positive_map[i,j] = True iff box i is associated to token j"""
positive_map = torch.zeros((len(tokens_positive), 256), dtype=torch.float)
for j, tok_list in enumerate(tokens_positive):
for (beg, end) in tok_list:
try:
beg_pos = tokenized.char_to_token(beg)
end_pos = tokenized.char_to_token(end - 1)
except Exception as e:
print("beg:", beg, "end:", end)
print("token_positive:", tokens_positive)
# print("beg_pos:", beg_pos, "end_pos:", end_pos)
raise e
if beg_pos is None:
try:
beg_pos = tokenized.char_to_token(beg + 1)
if beg_pos is None:
beg_pos = tokenized.char_to_token(beg + 2)
except:
beg_pos = None
if end_pos is None:
try:
end_pos = tokenized.char_to_token(end - 2)
if end_pos is None:
end_pos = tokenized.char_to_token(end - 3)
except:
end_pos = None
if beg_pos is None or end_pos is None:
continue
assert beg_pos is not None and end_pos is not None
positive_map[j, beg_pos: end_pos + 1].fill_(1)
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) | construct a map such that positive_map[i,j] = True iff box i is associated to token j |
10,073 | import cv2
import torch
import re
import numpy as np
from typing import List, Union
import nltk
import inflect
from transformers import AutoTokenizer
from torchvision import transforms as T
import pdb
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.utils import cv2_util
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
import timeit
def find_noun_phrases(caption: str) -> List[str]:
caption = caption.lower()
tokens = nltk.word_tokenize(caption)
pos_tags = nltk.pos_tag(tokens)
grammar = "NP: {<DT>?<JJ.*>*<NN.*>+}"
cp = nltk.RegexpParser(grammar)
result = cp.parse(pos_tags)
noun_phrases = list()
for subtree in result.subtrees():
if subtree.label() == 'NP':
noun_phrases.append(' '.join(t[0] for t in subtree.leaves()))
return noun_phrases | null |
10,074 | import cv2
import torch
import re
import numpy as np
from typing import List, Union
import nltk
import inflect
from transformers import AutoTokenizer
from torchvision import transforms as T
import pdb
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark.utils import cv2_util
import timeit
def remove_punctuation(text: str) -> str:
punct = ['|', ':', ';', '@', '(', ')', '[', ']', '{', '}', '^',
'\'', '\"', '’', '`', '?', '$', '%', '#', '!', '&', '*', '+', ',', '.'
]
for p in punct:
text = text.replace(p, '')
return text.strip() | null |
10,075 | import cv2
import torch
import numpy as np
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.utils import cv2_util
import timeit
The provided code snippet includes necessary dependencies for implementing the `vis_keypoints` function. Write a Python function `def vis_keypoints(img, kps, kp_thresh=0, alpha=0.7, names=None, connections=None)` to solve the following problem:
Visualizes keypoints (adapted from vis_one_image). kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
Here is the function:
def vis_keypoints(img, kps, kp_thresh=0, alpha=0.7, names=None, connections=None):
"""Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
"""
dataset_keypoints = names
kp_lines = connections
# simple rainbow color map implementation
blue_red_ratio = 0.8
gx = lambda x: (6-2*blue_red_ratio)*x + blue_red_ratio
colors = [[256*max(0, (3-abs(gx(i)-4)-abs(gx(i)-5))/2),
256*max(0, (3-abs(gx(i)-2)-abs(gx(i)-4))/2),
256*max(0, (3-abs(gx(i)-1)-abs(gx(i)-2))/2),] for i in np.linspace(0, 1, len(kp_lines) + 2)]
# Perform the drawing on a copy of the image, to allow for blending.
kp_mask = np.copy(img)
# Draw mid shoulder / mid hip first for better visualization.
mid_shoulder = (
kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(
kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
nose_idx = dataset_keypoints.index('nose')
if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
if 'right_hip' in names and 'left_hip' in names:
mid_hip = (
kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(
kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(mid_hip),
color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)
# Draw the keypoints.
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
p1 = kps[0, i1], kps[1, i1]
p2 = kps[0, i2], kps[1, i2]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
cv2.line(
kp_mask, p1, p2,
color=colors[l], thickness=2, lineType=cv2.LINE_AA)
if kps[2, i1] > kp_thresh:
cv2.circle(
kp_mask, p1,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
if kps[2, i2] > kp_thresh:
cv2.circle(
kp_mask, p2,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
# Blend the keypoints.
return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) | Visualizes keypoints (adapted from vis_one_image). kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob). |
10,076 | import datetime
import logging
import time
import random
import torch
import torch.distributed as dist
from maskrcnn_benchmark.utils.comm import get_world_size, synchronize, broadcast_data
from maskrcnn_benchmark.utils.metric_logger import MetricLogger
from maskrcnn_benchmark.utils.ema import ModelEma
def reduce_loss_dict(loss_dict):
def broadcast_data(data):
class MetricLogger(object):
def __init__(self, delimiter="\t"):
def update(self, **kwargs):
def __getattr__(self, attr):
def __str__(self):
class ModelEma:
def __init__(self, model, decay=0.9999, device=''):
def load_checkpoint(self, checkpoint):
def state_dict(self):
def update(self, model):
def do_train(
cfg,
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
rngs=None
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
model_ema = None
if cfg.SOLVER.MODEL_EMA>0:
model_ema = ModelEma(model, decay=cfg.SOLVER.MODEL_EMA)
start_training_time = time.time()
end = time.time()
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
if any(len(target) < 1 for target in targets):
logger.error("Iteration={iteration + 1} || Image Ids used for training {_} || targets Length={[len(target) for target in targets]}" )
continue
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
images = images.to(device)
targets = [target.to(device) for target in targets]
# synchronize rngs
if rngs is None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
mix_nums = model.module.mix_nums
else:
mix_nums = model.mix_nums
rngs = [random.randint(0, mix-1) for mix in mix_nums]
rngs = broadcast_data(rngs)
for param in model.parameters():
param.requires_grad = False
loss_dict = model(images, targets, rngs)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
scheduler.step()
if model_ema is not None:
model_ema.update(model)
arguments["model_ema"] = model_ema.state_dict()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
if model_ema is not None:
model.load_state_dict(model_ema.state_dict())
checkpointer.save("model_final", **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
) | null |
10,077 | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import math
from maskrcnn_benchmark.modeling.utils import cat, concat_box_prediction_layers, permute_and_flatten
from timm.models.layers import DropPath
from transformers.activations import ACT2FN
def _make_conv(input_dim, output_dim, k, stride=1):
pad = (k - 1) // 2
return nn.Sequential(
nn.Conv2d(input_dim, output_dim, (k, k), padding=(pad, pad), stride=(stride, stride)),
nn.BatchNorm2d(output_dim),
nn.ReLU(inplace=True)
) | null |
10,078 | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import math
from maskrcnn_benchmark.modeling.utils import cat, concat_box_prediction_layers, permute_and_flatten
from timm.models.layers import DropPath
from transformers.activations import ACT2FN
def _make_mlp(input_dim, output_dim, drop):
return nn.Sequential(nn.Linear(input_dim, output_dim),
nn.BatchNorm1d(output_dim),
nn.ReLU(inplace=True),
nn.Dropout(drop),
nn.Linear(output_dim, output_dim),
nn.BatchNorm1d(output_dim),
nn.ReLU(inplace=True)) | null |
10,079 | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import math
from maskrcnn_benchmark.modeling.utils import cat, concat_box_prediction_layers, permute_and_flatten
from timm.models.layers import DropPath
from transformers.activations import ACT2FN
def cat(tensors, dim=0):
"""
Efficient version of torch.cat that avoids a copy if there is only a single element in a list
"""
assert isinstance(tensors, (list, tuple))
if len(tensors) == 1:
return tensors[0]
return torch.cat(tensors, dim)
def _make_coord(batch, height, width):
# relative position encoding
xv, yv = torch.meshgrid([torch.arange(0, height), torch.arange(0, width)])
xv_min = (xv.float() * 2 - width) / width
yv_min = (yv.float() * 2 - height) / height
xv_max = ((xv + 1).float() * 2 - width) / width
yv_max = ((yv + 1).float() * 2 - height) / height
xv_ctr = (xv_min + xv_max) / 2
yv_ctr = (yv_min + yv_max) / 2
hmap = torch.ones(height, width) * (1. / height)
wmap = torch.ones(height, width) * (1. / width)
coord = torch.autograd.Variable(torch.cat([xv_min.unsqueeze(0), yv_min.unsqueeze(0), \
xv_max.unsqueeze(0), yv_max.unsqueeze(0), \
xv_ctr.unsqueeze(0), yv_ctr.unsqueeze(0), \
hmap.unsqueeze(0), wmap.unsqueeze(0)], dim=0))
coord = coord.unsqueeze(0).repeat(batch, 1, 1, 1)
return coord | null |
10,080 | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import math
from maskrcnn_benchmark.modeling.utils import cat, concat_box_prediction_layers, permute_and_flatten
from timm.models.layers import DropPath
from transformers.activations import ACT2FN
The provided code snippet includes necessary dependencies for implementing the `l1norm` function. Write a Python function `def l1norm(X, dim, eps=1e-8)` to solve the following problem:
L1-normalize columns of X
Here is the function:
def l1norm(X, dim, eps=1e-8):
"""L1-normalize columns of X
"""
norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps
X = torch.div(X, norm)
return X | L1-normalize columns of X |
10,081 | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
import math
from maskrcnn_benchmark.modeling.utils import cat, concat_box_prediction_layers, permute_and_flatten
from timm.models.layers import DropPath
from transformers.activations import ACT2FN
def l2norm(X, dim, eps=1e-8):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
X = torch.div(X, norm)
return X
The provided code snippet includes necessary dependencies for implementing the `func_attention` function. Write a Python function `def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8)` to solve the following problem:
query: (n_context, queryL, d) context: (n_context, sourceL, d)
Here is the function:
def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8):
"""
query: (n_context, queryL, d)
context: (n_context, sourceL, d)
"""
batch_size_q, queryL = query.size(0), query.size(1)
batch_size, sourceL = context.size(0), context.size(1)
# Get attention
# --> (batch, d, queryL)
queryT = torch.transpose(query, 1, 2)
# (batch, sourceL, d)(batch, d, queryL)
# --> (batch, sourceL, queryL)
attn = torch.bmm(context, queryT)
if raw_feature_norm == "softmax":
# --> (batch*sourceL, queryL)
attn = attn.view(batch_size * sourceL, queryL)
attn = nn.Softmax()(attn)
# --> (batch, sourceL, queryL)
attn = attn.view(batch_size, sourceL, queryL)
elif raw_feature_norm == "l2norm":
attn = l2norm(attn, 2)
elif raw_feature_norm == "clipped_l2norm":
attn = nn.LeakyReLU(0.1)(attn)
attn = l2norm(attn, 2)
else:
raise ValueError("unknown first norm type:", raw_feature_norm)
# --> (batch, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
# --> (batch*queryL, sourceL)
attn = attn.view(batch_size * queryL, sourceL)
attn = nn.Softmax()(attn * smooth)
# --> (batch, queryL, sourceL)
attn = attn.view(batch_size, queryL, sourceL)
# --> (batch, sourceL, queryL)
attnT = torch.transpose(attn, 1, 2).contiguous()
# --> (batch, d, sourceL)
contextT = torch.transpose(context, 1, 2)
# (batch x d x sourceL)(batch x sourceL x queryL)
# --> (batch, d, queryL)
weightedContext = torch.bmm(contextT, attnT)
# --> (batch, queryL, d)
weightedContext = torch.transpose(weightedContext, 1, 2)
return weightedContext, attnT | query: (n_context, queryL, d) context: (n_context, sourceL, d) |
10,082 | import PIL
from torch.utils.collect_env import get_pretty_env_info
def get_pil_version():
return "\n Pillow ({})".format(PIL.__version__)
def collect_env_info():
env_str = get_pretty_env_info()
env_str += get_pil_version()
return env_str | null |
10,083 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num, units=None, precision=2):
if units is None:
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + ' M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + ' k'
else:
return str(params_num)
else:
if units == 'M':
return str(round(params_num / 10.**6, precision)) + ' ' + units
elif units == 'K':
return str(round(params_num / 10.**3, precision)) + ' ' + units
else:
return str(params_num)
def print_model_with_flops(model, total_flops, total_params, units='GMac',
precision=3, ost=sys.stdout):
def accumulate_params(self):
if is_supported_instance(self):
return self.__params__
else:
sum = 0
for m in self.children():
sum += m.accumulate_params()
return sum
def flops_repr(self):
accumulated_params_num = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops() / model.__batch_counter__
return ', '.join([params_to_string(accumulated_params_num,
units='M', precision=precision),
'{:.3%} Params'.format(accumulated_params_num / total_params),
flops_to_string(accumulated_flops_cost,
units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(repr(model), file=ost)
model.apply(del_extra_repr)
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(
net_main_module)
net_main_module.reset_flops_count()
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
for m in self.modules():
m.accumulate_flops = accumulate_flops.__get__(m)
flops_sum = self.accumulate_flops()
for m in self.modules():
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
params_sum = get_model_parameters_number(self)
return flops_sum / self.__batch_counter__, params_sum
def start_flops_count(self, **kwargs):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
seen_types = set()
def add_flops_counter_hook_function(module, ost, verbose, ignore_list):
if type(module) in ignore_list:
seen_types.add(type(module))
if is_supported_instance(module):
module.__params__ = 0
elif is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if type(module) in CUSTOM_MODULES_MAPPING:
handle = module.register_forward_hook(
CUSTOM_MODULES_MAPPING[type(module)])
elif getattr(module, 'compute_macs', False):
handle = module.register_forward_hook(
module.compute_macs
)
else:
handle = module.register_forward_hook(MODULES_MAPPING[type(module)])
module.__flops_handle__ = handle
seen_types.add(type(module))
else:
if verbose and not type(module) in (nn.Sequential, nn.ModuleList) and \
not type(module) in seen_types:
print('Warning: module ' + type(module).__name__ +
' is treated as a zero-op.', file=ost)
seen_types.add(type(module))
self.apply(partial(add_flops_counter_hook_function, **kwargs))
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
CUSTOM_MODULES_MAPPING = {}
def get_model_complexity_info(model, input_res,
print_per_layer_stat=True,
as_strings=True,
input_constructor=None, ost=sys.stdout,
verbose=False, ignore_modules=[],
custom_modules_hooks={}):
assert type(input_res) is tuple
assert len(input_res) >= 1
assert isinstance(model, nn.Module)
global CUSTOM_MODULES_MAPPING
CUSTOM_MODULES_MAPPING = custom_modules_hooks
flops_model = add_flops_counting_methods(model)
flops_model.eval()
flops_model.start_flops_count(ost=ost, verbose=verbose,
ignore_list=ignore_modules)
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
try:
batch = torch.ones(()).new_empty((1, *input_res),
dtype=next(flops_model.parameters()).dtype,
device=next(flops_model.parameters()).device)
except StopIteration:
batch = torch.ones(()).new_empty((1, *input_res))
_ = flops_model(batch)
flops_count, params_count = flops_model.compute_average_flops_cost()
if print_per_layer_stat:
print_model_with_flops(flops_model, flops_count, params_count, ost=ost)
flops_model.stop_flops_count()
CUSTOM_MODULES_MAPPING = {}
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count | null |
10,084 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0 | null |
10,085 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count) | null |
10,086 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count) | null |
10,087 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def linear_flops_counter_hook(module, input, output):
input = input[0]
# pytorch checks dimensions, so here we don't care much
output_last_dim = output.shape[-1]
bias_flops = output_last_dim if module.bias is not None else 0
module.__flops__ += int(np.prod(input.shape) * output_last_dim + bias_flops) | null |
10,088 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape)) | null |
10,089 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def bn_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops) | null |
10,090 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = int(np.prod(kernel_dims)) * \
in_channels * filters_per_channel
active_elements_count = batch_size * int(np.prod(output_dims))
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += int(overall_flops) | null |
10,091 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def rnn_flops(flops, rnn_module, w_ih, w_hh, input_size):
# matrix matrix mult ih state and internal state
flops += w_ih.shape[0]*w_ih.shape[1]
# matrix matrix mult hh state and internal state
flops += w_hh.shape[0]*w_hh.shape[1]
if isinstance(rnn_module, (nn.RNN, nn.RNNCell)):
# add both operations
flops += rnn_module.hidden_size
elif isinstance(rnn_module, (nn.GRU, nn.GRUCell)):
# hadamard of r
flops += rnn_module.hidden_size
# adding operations from both states
flops += rnn_module.hidden_size*3
# last two hadamard product and add
flops += rnn_module.hidden_size*3
elif isinstance(rnn_module, (nn.LSTM, nn.LSTMCell)):
# adding operations from both states
flops += rnn_module.hidden_size*4
# two hadamard product and add for C state
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
# final hadamard
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
return flops
The provided code snippet includes necessary dependencies for implementing the `rnn_flops_counter_hook` function. Write a Python function `def rnn_flops_counter_hook(rnn_module, input, output)` to solve the following problem:
Takes into account batch goes at first position, contrary to pytorch common rule (but actually it doesn't matter). IF sigmoid and tanh are made hard, only a comparison FLOPS should be accurate
Here is the function:
def rnn_flops_counter_hook(rnn_module, input, output):
"""
Takes into account batch goes at first position, contrary
to pytorch common rule (but actually it doesn't matter).
IF sigmoid and tanh are made hard, only a comparison FLOPS should be accurate
"""
flops = 0
# input is a tuple containing a sequence to process and (optionally) hidden state
inp = input[0]
batch_size = inp.shape[0]
seq_length = inp.shape[1]
num_layers = rnn_module.num_layers
for i in range(num_layers):
w_ih = rnn_module.__getattr__('weight_ih_l' + str(i))
w_hh = rnn_module.__getattr__('weight_hh_l' + str(i))
if i == 0:
input_size = rnn_module.input_size
else:
input_size = rnn_module.hidden_size
flops = rnn_flops(flops, rnn_module, w_ih, w_hh, input_size)
if rnn_module.bias:
b_ih = rnn_module.__getattr__('bias_ih_l' + str(i))
b_hh = rnn_module.__getattr__('bias_hh_l' + str(i))
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
flops *= seq_length
if rnn_module.bidirectional:
flops *= 2
rnn_module.__flops__ += int(flops) | Takes into account batch goes at first position, contrary to pytorch common rule (but actually it doesn't matter). IF sigmoid and tanh are made hard, only a comparison FLOPS should be accurate |
10,092 | import sys
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from maskrcnn_benchmark.layers import *
def rnn_flops(flops, rnn_module, w_ih, w_hh, input_size):
# matrix matrix mult ih state and internal state
flops += w_ih.shape[0]*w_ih.shape[1]
# matrix matrix mult hh state and internal state
flops += w_hh.shape[0]*w_hh.shape[1]
if isinstance(rnn_module, (nn.RNN, nn.RNNCell)):
# add both operations
flops += rnn_module.hidden_size
elif isinstance(rnn_module, (nn.GRU, nn.GRUCell)):
# hadamard of r
flops += rnn_module.hidden_size
# adding operations from both states
flops += rnn_module.hidden_size*3
# last two hadamard product and add
flops += rnn_module.hidden_size*3
elif isinstance(rnn_module, (nn.LSTM, nn.LSTMCell)):
# adding operations from both states
flops += rnn_module.hidden_size*4
# two hadamard product and add for C state
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
# final hadamard
flops += rnn_module.hidden_size + rnn_module.hidden_size + rnn_module.hidden_size
return flops
def rnn_cell_flops_counter_hook(rnn_cell_module, input, output):
flops = 0
inp = input[0]
batch_size = inp.shape[0]
w_ih = rnn_cell_module.__getattr__('weight_ih')
w_hh = rnn_cell_module.__getattr__('weight_hh')
input_size = inp.shape[1]
flops = rnn_flops(flops, rnn_cell_module, w_ih, w_hh, input_size)
if rnn_cell_module.bias:
b_ih = rnn_cell_module.__getattr__('bias_ih')
b_hh = rnn_cell_module.__getattr__('bias_hh')
flops += b_ih.shape[0] + b_hh.shape[0]
flops *= batch_size
rnn_cell_module.__flops__ += int(flops) | null |
10,093 | import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*block{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
if 'unit01' in old_key:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
print("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
# Calculate SD conv weight
w = state_dict[old_key]
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
state_dict[new_key] = w
del state_dict[old_key]
return state_dict
def load_big_format(cfg, f):
model = OrderedDict()
weights = np.load(f)
cmap = {'a':1, 'b':2, 'c':3}
for key, val in weights.items():
old_key = key.replace('resnet/', '')
if 'root_block' in old_key:
new_key = 'root.conv.weight'
elif '/proj/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/proj/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.downsample.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/standardized_conv2d/kernel' in old_key:
key_pattern = old_key.replace('/standardized_conv2d/kernel', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.conv{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/gamma' in old_key:
key_pattern = old_key.replace('/group_norm/gamma', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.weight'.format(bname,uname,cmap[cidx])
elif '/group_norm/beta' in old_key:
key_pattern = old_key.replace('/group_norm/beta', '').replace('resnet/', '')
bname, uname, cidx = key_pattern.split('/')
new_key = '{}.{}.gn{}.bias'.format(bname,uname,cmap[cidx])
else:
print('Unknown key {}'.format(old_key))
continue
print('Map {} -> {}'.format(key, new_key))
model[new_key] = tf2th(val)
model = _rename_conv_weights_for_deformable_conv_layers(model, cfg)
return dict(model=model) | null |
10,094 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
register_hooks = {
Scale: None,
Conv2d: count_conv2d,
nn.Conv2d: count_conv2d,
ModulatedDeformConv: count_conv2d,
StdConv2d: count_conv2d,
nn.BatchNorm1d: count_bn,
nn.BatchNorm2d: count_bn,
nn.BatchNorm3d: count_bn,
FrozenBatchNorm2d: count_bn,
nn.GroupNorm: count_bn,
NaiveSyncBatchNorm2d: count_bn,
nn.ReLU: count_relu,
nn.ReLU6: count_relu,
swish: None,
nn.ConstantPad2d: None,
SPPLayer: count_LastLevelMaxPool,
LastLevelMaxPool: count_LastLevelMaxPool,
nn.MaxPool1d: count_maxpool,
nn.MaxPool2d: count_maxpool,
nn.MaxPool3d: count_maxpool,
nn.AdaptiveMaxPool1d: count_adap_maxpool,
nn.AdaptiveMaxPool2d: count_adap_maxpool,
nn.AdaptiveMaxPool3d: count_adap_maxpool,
nn.AvgPool1d: count_avgpool,
nn.AvgPool2d: count_avgpool,
nn.AvgPool3d: count_avgpool,
nn.AdaptiveAvgPool1d: count_adap_avgpool,
nn.AdaptiveAvgPool2d: count_adap_avgpool,
nn.AdaptiveAvgPool3d: count_adap_avgpool,
nn.Linear: count_linear,
nn.Upsample: None,
nn.Dropout: None,
nn.Sigmoid: None,
DropBlock2D: None,
ROIAlign: count_ROIAlign,
RPNPostProcessor: None,
PostProcessor: None,
BufferList: None,
RetinaPostProcessor: None,
FCOSPostProcessor: None,
ATSSPostProcessor: None,
}
def profile(model, input_size, custom_ops={}, device="cpu", verbose=False, extra_args={}, return_time=False):
handler_collection = []
def add_hooks(m):
if len(list(m.children())) > 0:
return
m.register_buffer('total_ops', torch.zeros(1))
m.register_buffer('total_params', torch.zeros(1))
for p in m.parameters():
m.total_params += torch.Tensor([p.numel()])
m_type = type(m)
fn = None
if m_type in custom_ops:
fn = custom_ops[m_type]
elif m_type in register_hooks:
fn = register_hooks[m_type]
else:
print("Not implemented for ", m)
if fn is not None:
if verbose:
print("Register FLOP counter for module %s" % str(m))
handler = m.register_forward_hook(fn)
handler_collection.append(handler)
original_device = model.parameters().__next__().device
training = model.training
model.eval().to(device)
model.apply(add_hooks)
x = torch.zeros(input_size).to(device)
with torch.no_grad():
tic = timeit.time.perf_counter()
model(x, **extra_args)
toc = timeit.time.perf_counter()
total_time = toc-tic
total_ops = 0
total_params = 0
for m in model.modules():
if len(list(m.children())) > 0: # skip for non-leaf module
continue
total_ops += m.total_ops
total_params += m.total_params
total_ops = total_ops.item()
total_params = total_params.item()
model.train(training).to(original_device)
for handler in handler_collection:
handler.remove()
if return_time:
return total_ops, total_params, total_time
else:
return total_ops, total_params | null |
10,095 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
multiply_adds = 1
def count_conv2d(m, x, y):
x = x[0]
cin = m.in_channels
cout = m.out_channels
kh, kw = m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
output_elements = batch_size * out_w * out_h * cout
total_ops = output_elements * ops_per_element
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,096 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
multiply_adds = 1
def count_convtranspose2d(m, x, y):
x = x[0]
cin = m.in_channels
cout = m.out_channels
kh, kw = m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kh * kw * cin // m.groups
bias_ops = 1 if m.bias is not None else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
# output_elements = batch_size * out_w * out_h * cout
ops_per_element = m.weight.nelement()
output_elements = y.nelement()
total_ops = output_elements * ops_per_element
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,097 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_bn(m, x, y):
x = x[0]
nelements = x.numel()
# subtract, divide, gamma, beta
total_ops = 4*nelements
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,098 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_relu(m, x, y):
x = x[0]
nelements = x.numel()
total_ops = nelements
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,099 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_softmax(m, x, y):
x = x[0]
batch_size, nfeatures = x.size()
total_exp = nfeatures
total_add = nfeatures - 1
total_div = nfeatures
total_ops = batch_size * (total_exp + total_add + total_div)
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,100 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_maxpool(m, x, y):
kernel_ops = torch.prod(torch.Tensor([m.kernel_size]))
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,101 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_adap_maxpool(m, x, y):
kernel = torch.Tensor([*(x[0].shape[2:])])//torch.Tensor(list((m.output_size,))).squeeze()
kernel_ops = torch.prod(kernel)
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,102 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_avgpool(m, x, y):
total_add = torch.prod(torch.Tensor([m.kernel_size]))
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,103 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_adap_avgpool(m, x, y):
kernel = torch.Tensor([*(x[0].shape[2:])])//torch.Tensor(list((m.output_size,))).squeeze()
total_add = torch.prod(kernel)
total_div = 1
kernel_ops = total_add + total_div
num_elements = y.numel()
total_ops = kernel_ops * num_elements
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,104 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_linear(m, x, y):
# per output element
total_mul = m.in_features
total_add = m.in_features - 1
num_elements = y.numel()
total_ops = (total_mul + total_add) * num_elements
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,105 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_LastLevelMaxPool(m, x, y):
num_elements = y[-1].numel()
total_ops = num_elements
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,106 | import argparse
import logging
import torch
import torch.nn as nn
import timeit
from maskrcnn_benchmark.layers import *
from maskrcnn_benchmark.modeling.backbone.resnet_big import StdConv2d
from maskrcnn_benchmark.modeling.backbone.fpn import *
from maskrcnn_benchmark.modeling.rpn.inference import *
from maskrcnn_benchmark.modeling.roi_heads.box_head.inference import PostProcessor
from maskrcnn_benchmark.modeling.rpn.anchor_generator import BufferList
def count_ROIAlign(m, x, y):
num_elements = y.numel()
total_ops = num_elements*4
m.total_ops = torch.Tensor([int(total_ops)]) | null |
10,107 | import pickle
import time
import functools
import logging
import torch
import torch.distributed as dist
import numpy as np
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
The provided code snippet includes necessary dependencies for implementing the `reduce_dict` function. Write a Python function `def reduce_dict(input_dict, average=True)` to solve the following problem:
Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that process with rank 0 has the averaged results. Returns a dict with the same fields as input_dict, after reduction.
Here is the function:
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict | Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that process with rank 0 has the averaged results. Returns a dict with the same fields as input_dict, after reduction. |
10,108 | import pickle
import time
import functools
import logging
import torch
import torch.distributed as dist
import numpy as np
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def reduce_sum(tensor):
if get_world_size() <= 1:
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
return tensor | null |
10,109 | import pickle
import time
import functools
import logging
import torch
import torch.distributed as dist
import numpy as np
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
The provided code snippet includes necessary dependencies for implementing the `shared_random_seed` function. Write a Python function `def shared_random_seed()` to solve the following problem:
Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock.
Here is the function:
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0] | Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock. |
10,111 | import torch
import maskrcnn_benchmark.utils.dist as dist
def normalized_positive_map(positive_map):
positive_map = positive_map.float()
positive_map_num_pos = positive_map.sum(2)
positive_map_num_pos[positive_map_num_pos == 0] = 1e-6
positive_map = positive_map / positive_map_num_pos.unsqueeze(-1)
return positive_map | null |
10,112 | import torch
import maskrcnn_benchmark.utils.dist as dist
def pad_tensor_given_dim_length(tensor, dim, length, padding_value=0, batch_first=True):
new_size = list(tensor.size()[:dim]) + [length] + list(tensor.size()[dim + 1:])
out_tensor = tensor.data.new(*new_size).fill_(padding_value)
if batch_first:
out_tensor[:, :tensor.size(1), ...] = tensor
else:
out_tensor[:tensor.size(0), ...] = tensor
return out_tensor | null |
10,113 | import torch
import maskrcnn_benchmark.utils.dist as dist
def pad_random_negative_tensor_given_length(positive_tensor, negative_padding_tensor, length=None):
assert positive_tensor.shape[0] + negative_padding_tensor.shape[0] == length
return torch.cat((positive_tensor, negative_padding_tensor), dim=0) | null |
10,114 | import torch
import maskrcnn_benchmark.utils.dist as dist
The provided code snippet includes necessary dependencies for implementing the `gather_tensors` function. Write a Python function `def gather_tensors(tensor)` to solve the following problem:
Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient.
Here is the function:
def gather_tensors(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
if not dist.is_dist_avail_and_initialized():
return torch.stack([tensor], dim=0)
total = dist.get_world_size()
rank = torch.distributed.get_rank()
# gathered_normalized_img_emb = [torch.zeros_like(normalized_img_emb) for _ in range(total)]
# torch.distributed.all_gather(gathered_normalized_img_emb, normalized_img_emb)
tensors_gather = [
torch.zeros_like(tensor)
for _ in range(total)
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
# need to do this to restore propagation of the gradients
tensors_gather[rank] = tensor
output = torch.stack(tensors_gather, dim=0)
return output | Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. |
10,115 | import torch
import maskrcnn_benchmark.utils.dist as dist
def convert_to_roi_format(boxes):
concat_boxes = boxes.bbox
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = torch.full((len(boxes), 1), 0, dtype=dtype, device=device)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois | null |
10,116 | import os
import sys
from maskrcnn_benchmark.utils.comm import is_main_process
from maskrcnn_benchmark.utils.comm import synchronize
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
The provided code snippet includes necessary dependencies for implementing the `cache_url` function. Write a Python function `def cache_url(url, model_dir='model', progress=True)` to solve the following problem:
r"""Loads the Torch serialized object at the given URL. If the object is already present in `model_dir`, it's deserialized and returned. The filename part of the URL should follow the naming convention ``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more digits of the SHA256 hash of the contents of the file. The hash is used to ensure unique names and to verify the contents of the file. The default value of `model_dir` is ``$TORCH_HOME/models`` where ``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be overridden with the ``$TORCH_MODEL_ZOO`` environment variable. Args: url (string): URL of the object to download model_dir (string, optional): directory in which to save the object progress (bool, optional): whether or not to display a progress bar to stderr Example: >>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
Here is the function:
def cache_url(url, model_dir='model', progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv("TORCH_HOME", "~/.torch"))
model_dir = os.getenv("TORCH_MODEL_ZOO", os.path.join(torch_home, "models"))
if not os.path.exists(model_dir):
os.makedirs(model_dir, exist_ok=True)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file | r"""Loads the Torch serialized object at the given URL. If the object is already present in `model_dir`, it's deserialized and returned. The filename part of the URL should follow the naming convention ``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more digits of the SHA256 hash of the contents of the file. The hash is used to ensure unique names and to verify the contents of the file. The default value of `model_dir` is ``$TORCH_HOME/models`` where ``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be overridden with the ``$TORCH_MODEL_ZOO`` environment variable. Args: url (string): URL of the object to download model_dir (string, optional): directory in which to save the object progress (bool, optional): whether or not to display a progress bar to stderr Example: >>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth') |
10,117 | import functools
import io
import os
import torch
import torch.distributed as dist
def get_world_size():
"""
Returns:
The number of processes in the process group
"""
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `reduce_dict` function. Write a Python function `def reduce_dict(input_dict, average=True)` to solve the following problem:
Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction.
Here is the function:
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict | Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. |
10,118 | import functools
import io
import os
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_rank():
"""
Returns:
The rank of the current process within the global process group.
"""
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
The provided code snippet includes necessary dependencies for implementing the `get_local_rank` function. Write a Python function `def get_local_rank() -> int` to solve the following problem:
Returns: The rank of the current process within the local (per-machine) process group.
Here is the function:
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP) | Returns: The rank of the current process within the local (per-machine) process group. |
10,119 | import functools
import io
import os
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_world_size():
"""
Returns:
The number of processes in the process group
"""
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `get_local_size` function. Write a Python function `def get_local_size() -> int` to solve the following problem:
Returns: The size of the per-machine process group, i.e. the number of processes per machine.
Here is the function:
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) | Returns: The size of the per-machine process group, i.e. the number of processes per machine. |
10,120 | import functools
import io
import os
import torch
import torch.distributed as dist
def is_main_process():
"""Return true if the current process is the main one"""
return get_rank() == 0
The provided code snippet includes necessary dependencies for implementing the `save_on_master` function. Write a Python function `def save_on_master(*args, **kwargs)` to solve the following problem:
Utility function to save only from the main process
Here is the function:
def save_on_master(*args, **kwargs):
"""Utility function to save only from the main process"""
if is_main_process():
torch.save(*args, **kwargs) | Utility function to save only from the main process |
10,121 | import functools
import io
import os
import torch
import torch.distributed as dist
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
The provided code snippet includes necessary dependencies for implementing the `init_distributed_mode` function. Write a Python function `def init_distributed_mode(args)` to solve the following problem:
Initialize distributed training, if appropriate
Here is the function:
def init_distributed_mode(args):
"""Initialize distributed training, if appropriate"""
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
dist.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
dist.barrier()
setup_for_distributed(args.rank == 0) | Initialize distributed training, if appropriate |
10,122 | import functools
import io
import os
import datetime
import torch
import torch.distributed as dist
def get_world_size():
"""
Returns:
The number of processes in the process group
"""
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `reduce_dict` function. Write a Python function `def reduce_dict(input_dict, average=True)` to solve the following problem:
Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction.
Here is the function:
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict | Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. |
10,123 | import functools
import io
import os
import datetime
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_rank():
"""
Returns:
The rank of the current process within the global process group.
"""
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
The provided code snippet includes necessary dependencies for implementing the `get_local_rank` function. Write a Python function `def get_local_rank() -> int` to solve the following problem:
Returns: The rank of the current process within the local (per-machine) process group.
Here is the function:
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP) | Returns: The rank of the current process within the local (per-machine) process group. |
10,124 | import functools
import io
import os
import datetime
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
def get_world_size():
"""
Returns:
The number of processes in the process group
"""
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
The provided code snippet includes necessary dependencies for implementing the `get_local_size` function. Write a Python function `def get_local_size() -> int` to solve the following problem:
Returns: The size of the per-machine process group, i.e. the number of processes per machine.
Here is the function:
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) | Returns: The size of the per-machine process group, i.e. the number of processes per machine. |
10,125 | import functools
import io
import os
import datetime
import torch
import torch.distributed as dist
def is_main_process():
"""Return true if the current process is the main one"""
return get_rank() == 0
The provided code snippet includes necessary dependencies for implementing the `save_on_master` function. Write a Python function `def save_on_master(*args, **kwargs)` to solve the following problem:
Utility function to save only from the main process
Here is the function:
def save_on_master(*args, **kwargs):
"""Utility function to save only from the main process"""
if is_main_process():
torch.save(*args, **kwargs) | Utility function to save only from the main process |
10,126 | import functools
import io
import os
import datetime
import torch
import torch.distributed as dist
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
The provided code snippet includes necessary dependencies for implementing the `init_distributed_mode` function. Write a Python function `def init_distributed_mode(args)` to solve the following problem:
Initialize distributed training, if appropriate
Here is the function:
def init_distributed_mode(args):
"""Initialize distributed training, if appropriate"""
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
dist.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank,
timeout=datetime.timedelta(0, 7200)
)
dist.barrier()
setup_for_distributed(args.debug or args.rank == 0) | Initialize distributed training, if appropriate |
10,127 | import errno
import os
from .comm import is_main_process
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise | null |
10,128 | import errno
import os
from .comm import is_main_process
def is_main_process():
return get_rank() == 0
def save_config(cfg, path):
if is_main_process():
with open(path, 'w') as f:
f.write(cfg.dump()) | null |
10,129 | from contextlib import contextmanager
def nullcontext(enter_result=None, **kwargs):
yield enter_result | null |
10,130 | from collections import OrderedDict, defaultdict
import logging
import math
import torch
from maskrcnn_benchmark.utils.imports import import_file
def align_and_update_state_dicts(model_state_dict, loaded_state_dict, reshape_keys=['pos_bias_table'], use_weightmap=False):
def strip_prefix_if_present(state_dict, prefix):
def load_state_dict(model, loaded_state_dict):
model_state_dict = model.state_dict()
# if the state_dict comes from a model that was wrapped in a
# DataParallel or DistributedDataParallel during serialization,
# remove the "module" prefix before performing the matching
loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.")
align_and_update_state_dicts(model_state_dict, loaded_state_dict)
# use strict loading
model.load_state_dict(model_state_dict) | null |
10,131 | import numpy as np
import torch
import torch.nn as nn
from collections import OrderedDict
def _remove_bn_statics(state_dict):
layer_keys = sorted(state_dict.keys())
remove_list = []
for key in layer_keys:
if 'running_mean' in key or 'running_var' in key or 'num_batches_tracked' in key:
remove_list.append(key)
for key in remove_list:
del state_dict[key]
return state_dict
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*layer{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
if 'unit01' in old_key:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
print("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
state_dict[new_key] = state_dict[old_key]
del state_dict[old_key]
return state_dict
def load_pretrain_format(cfg, f):
model = torch.load(f)
model = _remove_bn_statics(model)
model = _rename_conv_weights_for_deformable_conv_layers(model, cfg)
return dict(model=model) | null |
10,132 | import torch
def import_file(module_name, file_path, make_importable=False):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if make_importable:
sys.modules[module_name] = module
return module | null |
10,133 | import torch
def import_file(module_name, file_path, make_importable=None):
module = imp.load_source(module_name, file_path)
return module | null |
10,134 | import logging
import os
import sys
def setup_logger(name, save_dir, distributed_rank):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, "log.txt"))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger | null |
10,135 | import logging
import pickle
from collections import OrderedDict
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from maskrcnn_benchmark.utils.registry import Registry
def _rename_weights_for_resnet(weights, stage_names):
original_keys = sorted(weights.keys())
layer_keys = sorted(weights.keys())
# for X-101, rename output to fc1000 to avoid conflicts afterwards
layer_keys = [k if k != "pred_b" else "fc1000_b" for k in layer_keys]
layer_keys = [k if k != "pred_w" else "fc1000_w" for k in layer_keys]
# performs basic renaming: _ -> . , etc
layer_keys = _rename_basic_resnet_weights(layer_keys)
# FPN
layer_keys = _rename_fpn_weights(layer_keys, stage_names)
# Mask R-CNN
layer_keys = [k.replace("mask.fcn.logits", "mask_fcn_logits") for k in layer_keys]
layer_keys = [k.replace(".[mask].fcn", "mask_fcn") for k in layer_keys]
layer_keys = [k.replace("conv5.mask", "conv5_mask") for k in layer_keys]
# Keypoint R-CNN
layer_keys = [k.replace("kps.score.lowres", "kps_score_lowres") for k in layer_keys]
layer_keys = [k.replace("kps.score", "kps_score") for k in layer_keys]
layer_keys = [k.replace("conv.fcn", "conv_fcn") for k in layer_keys]
# Rename for our RPN structure
layer_keys = [k.replace("rpn.", "rpn.head.") for k in layer_keys]
key_map = {k: v for k, v in zip(original_keys, layer_keys)}
logger = logging.getLogger(__name__)
logger.info("Remapping C2 weights")
max_c2_key_size = max([len(k) for k in original_keys if "_momentum" not in k])
new_weights = OrderedDict()
for k in original_keys:
v = weights[k]
if "_momentum" in k:
continue
if 'weight_order' in k:
continue
# if 'fc1000' in k:
# continue
w = torch.from_numpy(v)
# if "bn" in k:
# w = w.view(1, -1, 1, 1)
logger.info("C2 name: {: <{}} mapped name: {}".format(k, max_c2_key_size, key_map[k]))
new_weights[key_map[k]] = w
return new_weights
def _load_c2_pickled_weights(file_path):
with open(file_path, "rb") as f:
if torch._six.PY3:
data = pickle.load(f, encoding="latin1")
else:
data = pickle.load(f)
if "blobs" in data:
weights = data["blobs"]
else:
weights = data
return weights
def _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg):
import re
logger = logging.getLogger(__name__)
logger.info("Remapping conv weights for deformable conv weights")
layer_keys = sorted(state_dict.keys())
for ix, stage_with_dcn in enumerate(cfg.MODEL.RESNETS.STAGE_WITH_DCN, 1):
if not stage_with_dcn:
continue
for old_key in layer_keys:
pattern = ".*layer{}.*conv2.*".format(ix)
r = re.match(pattern, old_key)
if r is None:
continue
for param in ["weight", "bias"]:
if old_key.find(param) is -1:
continue
new_key = old_key.replace(
"conv2.{}".format(param), "conv2.conv.{}".format(param)
)
logger.info("pattern: {}, old_key: {}, new_key: {}".format(
pattern, old_key, new_key
))
state_dict[new_key] = state_dict[old_key]
del state_dict[old_key]
return state_dict
_C2_STAGE_NAMES = {
"R-50": ["1.2", "2.3", "3.5", "4.2"],
"R-101": ["1.2", "2.3", "3.22", "4.2"],
}
def load_resnet_c2_format(cfg, f):
state_dict = _load_c2_pickled_weights(f)
conv_body = cfg.MODEL.BACKBONE.CONV_BODY
arch = conv_body.replace("-C4", "").replace("-C5", "").replace("-FPN", "").replace("-RETINANET", "").replace("-FCOS", "")
stages = _C2_STAGE_NAMES[arch]
state_dict = _rename_weights_for_resnet(state_dict, stages)
# ***********************************
# for deformable convolutional layer
state_dict = _rename_conv_weights_for_deformable_conv_layers(state_dict, cfg)
# ***********************************
return dict(model=state_dict) | null |
10,136 | import logging
import pickle
from collections import OrderedDict
import torch
from maskrcnn_benchmark.utils.model_serialization import load_state_dict
from maskrcnn_benchmark.utils.registry import Registry
C2_FORMAT_LOADER = Registry()
def load_c2_format(cfg, f):
return C2_FORMAT_LOADER[cfg.MODEL.BACKBONE.CONV_BODY](cfg, f) | null |
10,137 | from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import glob
import pdb
import torch
from maskrcnn_benchmark.config import cfg, try_to_find
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.engine.alter_trainer import do_train as alternative_train
from maskrcnn_benchmark.engine.stage_trainer import do_train as multi_stage_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir, save_config
from maskrcnn_benchmark.utils.metric_logger import (MetricLogger, TensorboardLogger)
import shutil
def removekey(d, prefix):
r = dict(d)
listofkeys = []
for key in r.keys():
if key.startswith(prefix):
listofkeys.append(key)
for key in listofkeys:
print('key: {} is removed'.format(key))
r.pop(key)
return r | null |
10,138 | from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import glob
import pdb
import torch
from maskrcnn_benchmark.config import cfg, try_to_find
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.engine.alter_trainer import do_train as alternative_train
from maskrcnn_benchmark.engine.stage_trainer import do_train as multi_stage_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir, save_config
from maskrcnn_benchmark.utils.metric_logger import (MetricLogger, TensorboardLogger)
import shutil
def report_freeze_options(cfg):
print("Backbone Freeze:", cfg.MODEL.BACKBONE.FREEZE)
print("FPN Freeze:", cfg.MODEL.FPN.FREEZE)
print("RPN Freeze:", cfg.MODEL.RPN.FREEZE)
print("Linear Probe:", cfg.MODEL.LINEAR_PROB)
print("Language Freeze:", cfg.MODEL.LANGUAGE_BACKBONE.FREEZE)
print("Linear Layer (True Prmopt Tuning):", cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER)
print("High Level Override:", cfg.SOLVER.TUNING_HIGHLEVEL_OVERRIDE)
def do_train(
cfg,
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
val_data_loader=None,
meters=None,
zero_shot=False
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
# meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
model_ema = None
if cfg.SOLVER.MODEL_EMA > 0:
model_ema = ModelEma(model, decay=cfg.SOLVER.MODEL_EMA)
start_training_time = time.time()
end = time.time()
if cfg.SOLVER.USE_AMP:
scaler = GradScaler()
global_rank = get_rank()
if cfg.SOLVER.CHECKPOINT_PER_EPOCH != -1 and cfg.SOLVER.MAX_EPOCH >= 1:
checkpoint_period = len(data_loader) * cfg.SOLVER.CHECKPOINT_PER_EPOCH // cfg.SOLVER.MAX_EPOCH
if global_rank <= 0 and cfg.SOLVER.MAX_EPOCH >= 1:
print("Iter per epoch ", len(data_loader) // cfg.SOLVER.MAX_EPOCH )
if cfg.SOLVER.AUTO_TERMINATE_PATIENCE != -1:
patience_counter = 0
previous_best = 0.0
# Adapt the weight decay
if cfg.SOLVER.WEIGHT_DECAY_SCHEDULE and hasattr(scheduler, 'milestones'):
milestone_target = 0
for i, milstone in enumerate(list(scheduler.milestones)):
if scheduler.last_epoch >= milstone * cfg.SOLVER.WEIGHT_DECAY_SCHEDULE_RATIO:
milestone_target = i+1
for iteration, (images, targets, idxs, positive_map, positive_map_eval, greenlight_map) in enumerate(data_loader, start_iter):
nnegative = sum(len(target) < 1 for target in targets)
nsample = len(targets)
if nsample == nnegative or nnegative > nsample * cfg.SOLVER.MAX_NEG_PER_BATCH:
logger.info('[WARNING] Sampled {} negative in {} in a batch, greater the allowed ratio {}, skip'.
format(nnegative, nsample, cfg.SOLVER.MAX_NEG_PER_BATCH))
continue
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
images = images.to(device)
captions = None
try:
targets = [target.to(device) for target in targets]
captions = [t.get_field("caption") for t in targets if "caption" in t.fields()]
except:
pass
# Freeze language backbone
if cfg.MODEL.LANGUAGE_BACKBONE.FREEZE:
if hasattr(model, "module"):
model.module.language_backbone.eval()
else:
model.language_backbone.eval()
if cfg.SOLVER.USE_AMP:
with autocast():
if len(captions) > 0:
loss_dict = model(images, targets, captions, positive_map, greenlight_map = greenlight_map)
else:
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# save checkpoints for further debug if nan happens
# loss_value = losses.item()
# if not math.isfinite(loss_value):
# logging.error(f'=> loss is {loss_value}, stopping training')
# logging.error("Losses are : {}".format(loss_dict))
# time_str = time.strftime('%Y-%m-%d-%H-%M')
# fname = os.path.join(checkpointer.save_dir, f'{time_str}_states.pth')
# logging.info(f'=> save error state to {fname}')
# dict_to_save = {
# 'x': images,
# 'y': targets,
# 'loss': losses,
# 'states': model.module.state_dict() if hasattr(model, 'module') else model.state_dict()
# }
# if len(captions) > 0:
# dict_to_save['captions'] = captions
# dict_to_save['positive_map'] = positive_map
# torch.save(
# dict_to_save,
# fname
# )
if torch.isnan(losses) or torch.isinf(losses):
logging.error("NaN encountered, ignoring")
losses[losses != losses] = 0
optimizer.zero_grad()
scaler.scale(losses).backward()
scaler.step(optimizer)
scaler.update()
scheduler.step()
else:
if len(captions) > 0:
loss_dict = model(images, targets, captions, positive_map)
else:
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# loss_value = losses.item()
# if not math.isfinite(loss_value):
# logging.error(f'=> loss is {loss_value}, stopping training')
# time_str = time.strftime('%Y-%m-%d-%H-%M')
# fname = os.path.join(checkpointer.save_dir, f'{time_str}_states.pth')
# logging.info(f'=> save error state to {fname}')
# dict_to_save = {
# 'x': images,
# 'y': targets,
# 'loss': losses,
# 'states': model.module.state_dict() if hasattr(model, 'module') else model.state_dict()
# }
# if len(captions) > 0:
# dict_to_save['captions'] = captions
# dict_to_save['positive_map'] = positive_map
# torch.save(
# dict_to_save,
# fname
# )
if torch.isnan(losses) or torch.isinf(losses):
losses[losses != losses] = 0
optimizer.zero_grad()
losses.backward()
optimizer.step()
scheduler.step()
# Adapt the weight decay: only support multiStepLR
if cfg.SOLVER.WEIGHT_DECAY_SCHEDULE and hasattr(scheduler, 'milestones'):
if milestone_target < len(scheduler.milestones):
next_milestone = list(scheduler.milestones)[milestone_target]
else:
next_milestone = float('inf')
if scheduler.last_epoch >= next_milestone * cfg.SOLVER.WEIGHT_DECAY_SCHEDULE_RATIO:
gamma = scheduler.gamma
logger.info("Drop the weight decay by {}!".format(gamma))
for param in optimizer.param_groups:
if 'weight_decay' in param:
param['weight_decay'] *= gamma
# move the target forward
milestone_target += 1
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
if model_ema is not None:
model_ema.update(model)
arguments["model_ema"] = model_ema.state_dict()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
# if iteration % 1 == 0 or iteration == max_iter:
#logger.info(
if global_rank <= 0:
print(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"wd: {wd:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
wd=optimizer.param_groups[0]["weight_decay"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if val_data_loader and (iteration % checkpoint_period == 0 or iteration == max_iter):
if is_main_process():
print("Evaluating")
eval_result = 0.0
model.eval()
if cfg.SOLVER.TEST_WITH_INFERENCE:
with torch.no_grad():
try:
_model = model.module
except:
_model = model
_result = inference(
model = _model,
data_loader = val_data_loader,
dataset_name="val",
device=device,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=None,
cfg=cfg,
verbose=False
)
if is_main_process():
eval_result = _result[0].results['bbox']['AP']
else:
results_dict = {}
cpu_device = torch.device("cpu")
for i, batch in enumerate(val_data_loader):
images, targets, image_ids, positive_map, *_ = batch
with torch.no_grad():
images = images.to(device)
if positive_map is None:
output = model(images)
else:
captions = [t.get_field("caption") for t in targets if "caption" in t.fields()]
output = model(images, captions, positive_map)
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
all_predictions = all_gather(results_dict)
if is_main_process():
predictions = {}
for p in all_predictions:
predictions.update(p)
predictions = [predictions[i] for i in list(sorted(predictions.keys()))]
eval_result, _ = evaluate(val_data_loader.dataset, predictions, output_folder=None,
box_only=cfg.DATASETS.CLASS_AGNOSTIC)
if cfg.DATASETS.CLASS_AGNOSTIC:
eval_result = eval_result.results['box_proposal']['AR@100']
else:
eval_result = eval_result.results['bbox']['AP']
model.train()
if model_ema is not None and cfg.SOLVER.USE_EMA_FOR_MONITOR:
model_ema.ema.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for i, batch in enumerate(val_data_loader):
images, targets, image_ids, positive_map, positive_map_eval = batch
with torch.no_grad():
images = images.to(device)
if positive_map is None:
output = model_ema.ema(images)
else:
captions = [t.get_field("caption") for t in targets if "caption" in t.fields()]
output = model_ema.ema(images, captions, positive_map)
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
all_predictions = all_gather(results_dict)
if is_main_process():
predictions = {}
for p in all_predictions:
predictions.update(p)
predictions = [predictions[i] for i in list(sorted(predictions.keys()))]
eval_result, _ = evaluate(val_data_loader.dataset, predictions, output_folder=None,
box_only=cfg.DATASETS.CLASS_AGNOSTIC)
if cfg.DATASETS.CLASS_AGNOSTIC:
eval_result = eval_result.results['box_proposal']['AR@100']
else:
eval_result = eval_result.results['bbox']['AP']
arguments.update(eval_result=eval_result)
if cfg.SOLVER.USE_AUTOSTEP:
eval_result = all_gather(eval_result)[0] #broadcast_data([eval_result])[0]
# print("Rank {} eval result gathered".format(cfg.local_rank), eval_result)
scheduler.step(eval_result)
if cfg.SOLVER.AUTO_TERMINATE_PATIENCE != -1:
if eval_result < previous_best:
patience_counter += 1
else:
patience_counter = 0
previous_best = eval_result
checkpointer.save("model_best", **arguments)
print("Previous Best", previous_best, "Patience Counter", patience_counter, "Eval Result", eval_result)
if patience_counter >= cfg.SOLVER.AUTO_TERMINATE_PATIENCE:
if is_main_process():
print("\n\n\n\nAuto Termination at {}, current best {}\n\n\n".format(iteration, previous_best))
break
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
break
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = min(len(task_loader) for task_loader in data_loader)
start_iter = arguments["iteration"]
model.train()
start_training_time = time.time()
end = time.time()
for iteration, task_loader in enumerate(zip(*data_loader), start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
all_task_loss_dict = []
for task, (images, targets, _) in enumerate(task_loader, 1):
if all(len(target) < 1 for target in targets):
logger.warning('Sampled all negative batches, skip')
continue
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets, task)
all_task_loss_dict.append(loss_dict)
losses = sum(loss for loss_dict in all_task_loss_dict for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(all_task_loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
scheduler.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
epoch_per_stage = arguments['epoch_per_stage']
max_iter = sum(len(stage_loader) * epoch_per_stage[si] for si, stage_loader in enumerate(data_loader))
max_iter += epoch_per_stage[-1] * min(len(stage_loader) for stage_loader in data_loader)
model.train()
start_training_time = time.time()
end = time.time()
for stage_i, stage_loader in enumerate(data_loader):
for ep in range(epoch_per_stage[stage_i]):
start_iter = arguments["iteration"]
for iteration, (images, targets, _) in enumerate(stage_loader, start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
scheduler[stage_i].step()
all_stage_loss_dict = []
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets, stage_i)
all_stage_loss_dict.append(loss_dict)
losses = sum(loss for loss_dict in all_stage_loss_dict for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(all_stage_loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
for ep in range(epoch_per_stage[-1]):
start_iter = arguments["iteration"]
for iteration, stage_loader in enumerate(zip(*data_loader), start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
scheduler[-1].step()
all_task_loss_dict = []
for stage_i, (images, targets, _) in enumerate(stage_loader):
images = images.to(device)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets, stage_i)
all_task_loss_dict.append(loss_dict)
losses = sum(loss for loss_dict in all_task_loss_dict for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(all_task_loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
def build_detection_model(cfg):
meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE]
return meta_arch(cfg)
class DetectronCheckpointer(Checkpointer):
def __init__(
self,
cfg,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
super(DetectronCheckpointer, self).__init__(
model, optimizer, scheduler, save_dir, save_to_disk, logger
)
self.cfg = cfg.clone()
def _load_file(self, f):
# catalog lookup
if f.startswith("catalog://"):
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", self.cfg.PATHS_CATALOG, True
)
catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
self.logger.info("{} points to {}".format(f, catalog_f))
f = catalog_f
# download url files
if f.startswith("http"):
# if the file is a url path, download it and cache it
cached_f = cache_url(f)
self.logger.info("url {} cached in {}".format(f, cached_f))
f = cached_f
# convert Caffe2 checkpoint from pkl
if f.endswith(".pkl"):
return load_c2_format(self.cfg, f)
if f.endswith(".big"):
return load_big_format(self.cfg, f)
if f.endswith(".pretrain"):
return load_pretrain_format(self.cfg, f)
# load native detectron.pytorch checkpoint
loaded = super(DetectronCheckpointer, self)._load_file(f)
if "model" not in loaded:
loaded = dict(model=loaded)
return loaded
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
def train(cfg, local_rank, distributed, zero_shot, skip_optimizer_resume=False, save_config_path = None):
data_loader = make_data_loader(
cfg,
is_train=True,
is_distributed=distributed,
start_iter=0 #<TODO> Sample data from resume is disabled, due to the conflict with max_epoch
)
if cfg.TEST.DURING_TRAINING:
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
data_loaders_val = data_loaders_val[0]
else:
data_loaders_val = None
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
if cfg.MODEL.LINEAR_PROB:
assert cfg.MODEL.BACKBONE.FREEZE, "For linear probing, backbone should be frozen!"
if hasattr(model.backbone, 'fpn'):
assert cfg.MODEL.FPN.FREEZE, "For linear probing, FPN should be frozen!"
if cfg.MODEL.BACKBONE.FREEZE:
for p in model.backbone.body.parameters():
p.requires_grad = False
if cfg.MODEL.FPN.FREEZE:
for p in model.backbone.fpn.parameters():
p.requires_grad = False
if cfg.MODEL.RPN.FREEZE:
for p in model.rpn.parameters():
p.requires_grad = False
if cfg.MODEL.LINEAR_PROB:
if model.rpn is not None:
for key, p in model.rpn.named_parameters():
if not ('bbox_pred' in key or 'cls_logits' in key or 'centerness' in key or 'cosine_scale' in key or 'dot_product_projection_text' in key or 'head.log_scale' in key or 'head.bias_lang' in key or 'head.bias0' in key):
p.requires_grad = False
if model.roi_heads is not None:
for key, p in model.roi_heads.named_parameters():
if not ('bbox_pred' in key or 'cls_logits' in key or 'centerness' in key or 'cosine_scale' in key or 'dot_product_projection_text' in key or 'head.log_scale' in key or 'head.bias_lang' in key or 'head.bias0' in key):
p.requires_grad = False
if cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER:
if model.rpn is not None:
for key, p in model.rpn.named_parameters():
if 'tunable_linear' in key:
p.requires_grad = True
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
# this should be removed if we update BatchNorm stats
broadcast_buffers=False,
find_unused_parameters=cfg.SOLVER.FIND_UNUSED_PARAMETERS
)
arguments = {}
arguments["iteration"] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
if checkpointer.has_checkpoint():
extra_checkpoint_data = checkpointer.load(skip_optimizer=skip_optimizer_resume)
arguments.update(extra_checkpoint_data)
else:
state_dict = checkpointer._load_file(try_to_find(cfg.MODEL.WEIGHT))
checkpointer._load_model(state_dict)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
meters = MetricLogger(delimiter=" ")
if zero_shot:
return model
if is_main_process():
for name, p in model.named_parameters():
if p.requires_grad:
print(name, " : Not Frozen")
else:
print(name, " : Frozen")
report_freeze_options(cfg)
if cfg.DATASETS.ALTERNATIVE_TRAINING:
alternative_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
)
elif cfg.DATASETS.MULTISTAGE_TRAINING:
arguments['epoch_per_stage'] = cfg.SOLVER.MULTI_MAX_EPOCH
multi_stage_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
)
else:
meters = MetricLogger(delimiter=" ")
do_train(
cfg,
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
data_loaders_val,
meters=meters
)
return model | null |
10,139 | from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import glob
import pdb
import torch
from maskrcnn_benchmark.config import cfg, try_to_find
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.engine.alter_trainer import do_train as alternative_train
from maskrcnn_benchmark.engine.stage_trainer import do_train as multi_stage_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank, is_main_process
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir, save_config
from maskrcnn_benchmark.utils.metric_logger import (MetricLogger, TensorboardLogger)
import shutil
def tuning_highlevel_override(cfg,):
if cfg.SOLVER.TUNING_HIGHLEVEL_OVERRIDE == "full":
cfg.MODEL.BACKBONE.FREEZE = False
cfg.MODEL.FPN.FREEZE = False
cfg.MODEL.RPN.FREEZE = False
cfg.MODEL.LINEAR_PROB = False
cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER = False
cfg.MODEL.LANGUAGE_BACKBONE.FREEZE = False
elif cfg.SOLVER.TUNING_HIGHLEVEL_OVERRIDE == "linear_prob":
cfg.MODEL.BACKBONE.FREEZE = True
cfg.MODEL.FPN.FREEZE = True
cfg.MODEL.RPN.FREEZE = False
cfg.MODEL.LINEAR_PROB = True
cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER = False
cfg.MODEL.LANGUAGE_BACKBONE.FREEZE = True
cfg.MODEL.LANGUAGE_BACKBONE.FREEZE = True
cfg.MODEL.DYHEAD.USE_CHECKPOINT = False # Disable checkpoint
elif cfg.SOLVER.TUNING_HIGHLEVEL_OVERRIDE == "language_prompt_v1":
cfg.MODEL.BACKBONE.FREEZE = True
cfg.MODEL.FPN.FREEZE = True
cfg.MODEL.RPN.FREEZE = True
cfg.MODEL.LINEAR_PROB = False
cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER = False
cfg.MODEL.LANGUAGE_BACKBONE.FREEZE = False
elif cfg.SOLVER.TUNING_HIGHLEVEL_OVERRIDE == "language_prompt_v2":
cfg.MODEL.BACKBONE.FREEZE = True
cfg.MODEL.FPN.FREEZE = True
cfg.MODEL.RPN.FREEZE = True
cfg.MODEL.LINEAR_PROB = False
cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER = True
cfg.MODEL.LANGUAGE_BACKBONE.FREEZE = True
elif cfg.SOLVER.TUNING_HIGHLEVEL_OVERRIDE == "language_prompt_v3":
cfg.MODEL.BACKBONE.FREEZE = True
cfg.MODEL.FPN.FREEZE = True
cfg.MODEL.RPN.FREEZE = True
cfg.MODEL.LINEAR_PROB = True # Turn on linear probe
cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER = False
cfg.MODEL.LANGUAGE_BACKBONE.FREEZE = False # Turn on language backbone
elif cfg.SOLVER.TUNING_HIGHLEVEL_OVERRIDE == "language_prompt_v4":
cfg.MODEL.BACKBONE.FREEZE = True
cfg.MODEL.FPN.FREEZE = True
cfg.MODEL.RPN.FREEZE = True
cfg.MODEL.LINEAR_PROB = True # Turn on linear probe
cfg.MODEL.DYHEAD.FUSE_CONFIG.ADD_LINEAR_LAYER = True
cfg.MODEL.LANGUAGE_BACKBONE.FREEZE = True # Turn off language backbone
return cfg | null |
10,140 | from __future__ import print_function, absolute_import, division
import os, sys
sys.path.append( os.path.normpath( os.path.join( os.path.dirname( __file__ ) , '..' , 'helpers' ) ) )
from csHelpers import *
from cityscapesscripts.evaluation.instance import *
from cityscapesscripts.helpers.csHelpers import *
import cv2
from maskrcnn_benchmark.utils import cv2_util
def instances2dict_with_polygons(imageFileList, verbose=False):
imgCount = 0
instanceDict = {}
if not isinstance(imageFileList, list):
imageFileList = [imageFileList]
if verbose:
print("Processing {} images...".format(len(imageFileList)))
for imageFileName in imageFileList:
# Load image
img = Image.open(imageFileName)
# Image as numpy array
imgNp = np.array(img)
# Initialize label categories
instances = {}
for label in labels:
instances[label.name] = []
# Loop through all instance ids in instance image
for instanceId in np.unique(imgNp):
if instanceId < 1000:
continue
instanceObj = Instance(imgNp, instanceId)
instanceObj_dict = instanceObj.toDict()
#instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
if id2label[instanceObj.labelID].hasInstances:
mask = (imgNp == instanceId).astype(np.uint8)
contour, hier = cv2_util.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
polygons = [c.reshape(-1).tolist() for c in contour]
instanceObj_dict['contours'] = polygons
instances[id2label[instanceObj.labelID].name].append(instanceObj_dict)
imgKey = os.path.abspath(imageFileName)
instanceDict[imgKey] = instances
imgCount += 1
if verbose:
print("\rImages Processed: {}".format(imgCount), end=' ')
sys.stdout.flush()
if verbose:
print("")
return instanceDict | null |
10,141 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import cityscapesscripts.evaluation.instances2dict_with_polygons as cs
import detectron.utils.segms as segms_util
import detectron.utils.boxes as bboxs_util
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="cocostuff, cityscapes", default=None, type=str)
parser.add_argument(
'--outdir', help="output dir for json files", default=None, type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted",
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args() | null |
10,142 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import cityscapesscripts.evaluation.instances2dict_with_polygons as cs
import detectron.utils.segms as segms_util
import detectron.utils.boxes as bboxs_util
The provided code snippet includes necessary dependencies for implementing the `convert_coco_stuff_mat` function. Write a Python function `def convert_coco_stuff_mat(data_dir, out_dir)` to solve the following problem:
Convert to png and save json with path. This currently only contains the segmentation labels for objects+stuff in cocostuff - if we need to combine with other labels from original COCO that will be a TODO.
Here is the function:
def convert_coco_stuff_mat(data_dir, out_dir):
"""Convert to png and save json with path. This currently only contains
the segmentation labels for objects+stuff in cocostuff - if we need to
combine with other labels from original COCO that will be a TODO."""
sets = ['train', 'val']
categories = []
json_name = 'coco_stuff_%s.json'
ann_dict = {}
for data_set in sets:
file_list = os.path.join(data_dir, '%s.txt')
images = []
with open(file_list % data_set) as f:
for img_id, img_name in enumerate(f):
img_name = img_name.replace('coco', 'COCO').strip('\n')
image = {}
mat_file = os.path.join(
data_dir, 'annotations/%s.mat' % img_name)
data = h5py.File(mat_file, 'r')
labelMap = data.get('S')
if len(categories) == 0:
labelNames = data.get('names')
for idx, n in enumerate(labelNames):
categories.append(
{"id": idx, "name": ''.join(chr(i) for i in data[
n[0]])})
ann_dict['categories'] = categories
scipy.misc.imsave(
os.path.join(data_dir, img_name + '.png'), labelMap)
image['width'] = labelMap.shape[0]
image['height'] = labelMap.shape[1]
image['file_name'] = img_name
image['seg_file_name'] = img_name
image['id'] = img_id
images.append(image)
ann_dict['images'] = images
print("Num images: %s" % len(images))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict)) | Convert to png and save json with path. This currently only contains the segmentation labels for objects+stuff in cocostuff - if we need to combine with other labels from original COCO that will be a TODO. |
10,143 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import cityscapesscripts.evaluation.instances2dict_with_polygons as cs
import detectron.utils.segms as segms_util
import detectron.utils.boxes as bboxs_util
def getLabelID(self, instID):
if (instID < 1000):
return instID
else:
return int(instID / 1000) | null |
10,144 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import cityscapesscripts.evaluation.instances2dict_with_polygons as cs
import detectron.utils.segms as segms_util
import detectron.utils.boxes as bboxs_util
The provided code snippet includes necessary dependencies for implementing the `convert_cityscapes_instance_only` function. Write a Python function `def convert_cityscapes_instance_only( data_dir, out_dir)` to solve the following problem:
Convert from cityscapes format to COCO instance seg format - polygons
Here is the function:
def convert_cityscapes_instance_only(
data_dir, out_dir):
"""Convert from cityscapes format to COCO instance seg format - polygons"""
sets = [
'gtFine_val',
'gtFine_train',
'gtFine_test',
# 'gtCoarse_train',
# 'gtCoarse_val',
# 'gtCoarse_train_extra'
]
ann_dirs = [
'gtFine_trainvaltest/gtFine/val',
'gtFine_trainvaltest/gtFine/train',
'gtFine_trainvaltest/gtFine/test',
# 'gtCoarse/train',
# 'gtCoarse/train_extra',
# 'gtCoarse/val'
]
json_name = 'instancesonly_filtered_%s.json'
ends_in = '%s_polygons.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = [
'person',
'rider',
'car',
'truck',
'bus',
'train',
'motorcycle',
'bicycle',
]
for data_set, ann_dir in zip(sets, ann_dirs):
print('Starting %s' % data_set)
ann_dict = {}
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for root, _, files in os.walk(ann_dir):
for filename in files:
if filename.endswith(ends_in % data_set.split('_')[0]):
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
image['width'] = json_ann['imgWidth']
image['height'] = json_ann['imgHeight']
image['file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + 'leftImg8bit.png'
image['seg_file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + \
'%s_instanceIds.png' % data_set.split('_')[0]
images.append(image)
fullname = os.path.join(root, image['seg_file_name'])
objects = cs.instances2dict_with_polygons(
[fullname], verbose=False)[fullname]
for object_cls in objects:
if object_cls not in category_instancesonly:
continue # skip non-instance categories
for obj in objects[object_cls]:
if obj['contours'] == []:
print('Warning: empty contours.')
continue # skip non-instance categories
len_p = [len(p) for p in obj['contours']]
if min(len_p) <= 4:
print('Warning: invalid contours.')
continue # skip non-instance categories
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = obj['contours']
if object_cls not in category_dict:
category_dict[object_cls] = cat_id
cat_id += 1
ann['category_id'] = category_dict[object_cls]
ann['iscrowd'] = 0
ann['area'] = obj['pixelCount']
ann['bbox'] = bboxs_util.xyxy_to_xywh(
segms_util.polys_to_boxes(
[ann['segmentation']])).tolist()[0]
annotations.append(ann)
ann_dict['images'] = images
categories = [{"id": category_dict[name], "name": name} for name in
category_dict]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name % data_set), 'w') as outfile:
outfile.write(json.dumps(ann_dict)) | Convert from cityscapes format to COCO instance seg format - polygons |
10,145 | from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import requests
from io import BytesIO
from PIL import Image
import numpy as np
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo
import yaml
import json
import pdb
import os
import random
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
from maskrcnn_benchmark.utils.stats import get_model_complexity_info
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import os
import functools
import io
import os
import datetime
import torch
import torch.distributed as dist
def load(url_or_file_name):
try:
response = requests.get(url_or_file_name)
except:
response = None
if response is None:
pil_image = Image.open(url_or_file_name).convert("RGB")
else:
pil_image = Image.open(BytesIO(response.content)).convert("RGB")
# convert to BGR format
image = np.array(pil_image)[:, :, [2, 1, 0]]
return image | null |
10,146 | from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import requests
from io import BytesIO
from PIL import Image
import numpy as np
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo
import yaml
import json
import pdb
import os
import random
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
from maskrcnn_benchmark.utils.stats import get_model_complexity_info
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import os
import functools
import io
import os
import datetime
import torch
import torch.distributed as dist
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
The provided code snippet includes necessary dependencies for implementing the `init_distributed_mode` function. Write a Python function `def init_distributed_mode(args)` to solve the following problem:
Initialize distributed training, if appropriate
Here is the function:
def init_distributed_mode(args):
"""Initialize distributed training, if appropriate"""
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
else:
print("Not using distributed mode")
args.distributed = False
return
#args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
dist.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank,
timeout=datetime.timedelta(0, 7200)
)
dist.barrier()
setup_for_distributed(args.rank == 0) | Initialize distributed training, if appropriate |
10,147 | from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import requests
from io import BytesIO
from PIL import Image
import numpy as np
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo
import yaml
import json
import pdb
import os
import random
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
from maskrcnn_benchmark.utils.stats import get_model_complexity_info
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import os
import functools
import io
import os
import datetime
import torch
import torch.distributed as dist
def imshow(img, file_name = "tmp.jpg"):
plt.imshow(img[:, :, [2, 1, 0]])
plt.axis("off")
plt.figtext(0.5, 0.09, "test", wrap=True, horizontalalignment='center', fontsize=20)
plt.savefig(file_name) | null |
10,148 | from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg, try_to_find
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.metric_logger import (MetricLogger, TensorboardLogger)
from maskrcnn_benchmark.utils.miscellaneous import mkdir, save_config
import numpy as np
import random
from maskrcnn_benchmark.utils.amp import autocast, GradScaler
def do_train(
cfg,
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
val_data_loader=None,
meters=None,
zero_shot=False
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
# meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
model_ema = None
if cfg.SOLVER.MODEL_EMA > 0:
model_ema = ModelEma(model, decay=cfg.SOLVER.MODEL_EMA)
start_training_time = time.time()
end = time.time()
if cfg.SOLVER.USE_AMP:
scaler = GradScaler()
global_rank = get_rank()
if cfg.SOLVER.CHECKPOINT_PER_EPOCH != -1 and cfg.SOLVER.MAX_EPOCH >= 1:
checkpoint_period = len(data_loader) * cfg.SOLVER.CHECKPOINT_PER_EPOCH // cfg.SOLVER.MAX_EPOCH
if global_rank <= 0 and cfg.SOLVER.MAX_EPOCH >= 1:
print("Iter per epoch ", len(data_loader) // cfg.SOLVER.MAX_EPOCH )
if cfg.SOLVER.AUTO_TERMINATE_PATIENCE != -1:
patience_counter = 0
previous_best = 0.0
# Adapt the weight decay
if cfg.SOLVER.WEIGHT_DECAY_SCHEDULE and hasattr(scheduler, 'milestones'):
milestone_target = 0
for i, milstone in enumerate(list(scheduler.milestones)):
if scheduler.last_epoch >= milstone * cfg.SOLVER.WEIGHT_DECAY_SCHEDULE_RATIO:
milestone_target = i+1
for iteration, (images, targets, idxs, positive_map, positive_map_eval, greenlight_map) in enumerate(data_loader, start_iter):
nnegative = sum(len(target) < 1 for target in targets)
nsample = len(targets)
if nsample == nnegative or nnegative > nsample * cfg.SOLVER.MAX_NEG_PER_BATCH:
logger.info('[WARNING] Sampled {} negative in {} in a batch, greater the allowed ratio {}, skip'.
format(nnegative, nsample, cfg.SOLVER.MAX_NEG_PER_BATCH))
continue
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
images = images.to(device)
captions = None
try:
targets = [target.to(device) for target in targets]
captions = [t.get_field("caption") for t in targets if "caption" in t.fields()]
except:
pass
# Freeze language backbone
if cfg.MODEL.LANGUAGE_BACKBONE.FREEZE:
if hasattr(model, "module"):
model.module.language_backbone.eval()
else:
model.language_backbone.eval()
if cfg.SOLVER.USE_AMP:
with autocast():
if len(captions) > 0:
loss_dict = model(images, targets, captions, positive_map, greenlight_map = greenlight_map)
else:
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# save checkpoints for further debug if nan happens
# loss_value = losses.item()
# if not math.isfinite(loss_value):
# logging.error(f'=> loss is {loss_value}, stopping training')
# logging.error("Losses are : {}".format(loss_dict))
# time_str = time.strftime('%Y-%m-%d-%H-%M')
# fname = os.path.join(checkpointer.save_dir, f'{time_str}_states.pth')
# logging.info(f'=> save error state to {fname}')
# dict_to_save = {
# 'x': images,
# 'y': targets,
# 'loss': losses,
# 'states': model.module.state_dict() if hasattr(model, 'module') else model.state_dict()
# }
# if len(captions) > 0:
# dict_to_save['captions'] = captions
# dict_to_save['positive_map'] = positive_map
# torch.save(
# dict_to_save,
# fname
# )
if torch.isnan(losses) or torch.isinf(losses):
logging.error("NaN encountered, ignoring")
losses[losses != losses] = 0
optimizer.zero_grad()
scaler.scale(losses).backward()
scaler.step(optimizer)
scaler.update()
scheduler.step()
else:
if len(captions) > 0:
loss_dict = model(images, targets, captions, positive_map)
else:
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# loss_value = losses.item()
# if not math.isfinite(loss_value):
# logging.error(f'=> loss is {loss_value}, stopping training')
# time_str = time.strftime('%Y-%m-%d-%H-%M')
# fname = os.path.join(checkpointer.save_dir, f'{time_str}_states.pth')
# logging.info(f'=> save error state to {fname}')
# dict_to_save = {
# 'x': images,
# 'y': targets,
# 'loss': losses,
# 'states': model.module.state_dict() if hasattr(model, 'module') else model.state_dict()
# }
# if len(captions) > 0:
# dict_to_save['captions'] = captions
# dict_to_save['positive_map'] = positive_map
# torch.save(
# dict_to_save,
# fname
# )
if torch.isnan(losses) or torch.isinf(losses):
losses[losses != losses] = 0
optimizer.zero_grad()
losses.backward()
optimizer.step()
scheduler.step()
# Adapt the weight decay: only support multiStepLR
if cfg.SOLVER.WEIGHT_DECAY_SCHEDULE and hasattr(scheduler, 'milestones'):
if milestone_target < len(scheduler.milestones):
next_milestone = list(scheduler.milestones)[milestone_target]
else:
next_milestone = float('inf')
if scheduler.last_epoch >= next_milestone * cfg.SOLVER.WEIGHT_DECAY_SCHEDULE_RATIO:
gamma = scheduler.gamma
logger.info("Drop the weight decay by {}!".format(gamma))
for param in optimizer.param_groups:
if 'weight_decay' in param:
param['weight_decay'] *= gamma
# move the target forward
milestone_target += 1
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
if model_ema is not None:
model_ema.update(model)
arguments["model_ema"] = model_ema.state_dict()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
# if iteration % 1 == 0 or iteration == max_iter:
#logger.info(
if global_rank <= 0:
print(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"wd: {wd:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
wd=optimizer.param_groups[0]["weight_decay"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if val_data_loader and (iteration % checkpoint_period == 0 or iteration == max_iter):
if is_main_process():
print("Evaluating")
eval_result = 0.0
model.eval()
if cfg.SOLVER.TEST_WITH_INFERENCE:
with torch.no_grad():
try:
_model = model.module
except:
_model = model
_result = inference(
model = _model,
data_loader = val_data_loader,
dataset_name="val",
device=device,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=None,
cfg=cfg,
verbose=False
)
if is_main_process():
eval_result = _result[0].results['bbox']['AP']
else:
results_dict = {}
cpu_device = torch.device("cpu")
for i, batch in enumerate(val_data_loader):
images, targets, image_ids, positive_map, *_ = batch
with torch.no_grad():
images = images.to(device)
if positive_map is None:
output = model(images)
else:
captions = [t.get_field("caption") for t in targets if "caption" in t.fields()]
output = model(images, captions, positive_map)
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
all_predictions = all_gather(results_dict)
if is_main_process():
predictions = {}
for p in all_predictions:
predictions.update(p)
predictions = [predictions[i] for i in list(sorted(predictions.keys()))]
eval_result, _ = evaluate(val_data_loader.dataset, predictions, output_folder=None,
box_only=cfg.DATASETS.CLASS_AGNOSTIC)
if cfg.DATASETS.CLASS_AGNOSTIC:
eval_result = eval_result.results['box_proposal']['AR@100']
else:
eval_result = eval_result.results['bbox']['AP']
model.train()
if model_ema is not None and cfg.SOLVER.USE_EMA_FOR_MONITOR:
model_ema.ema.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for i, batch in enumerate(val_data_loader):
images, targets, image_ids, positive_map, positive_map_eval = batch
with torch.no_grad():
images = images.to(device)
if positive_map is None:
output = model_ema.ema(images)
else:
captions = [t.get_field("caption") for t in targets if "caption" in t.fields()]
output = model_ema.ema(images, captions, positive_map)
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
all_predictions = all_gather(results_dict)
if is_main_process():
predictions = {}
for p in all_predictions:
predictions.update(p)
predictions = [predictions[i] for i in list(sorted(predictions.keys()))]
eval_result, _ = evaluate(val_data_loader.dataset, predictions, output_folder=None,
box_only=cfg.DATASETS.CLASS_AGNOSTIC)
if cfg.DATASETS.CLASS_AGNOSTIC:
eval_result = eval_result.results['box_proposal']['AR@100']
else:
eval_result = eval_result.results['bbox']['AP']
arguments.update(eval_result=eval_result)
if cfg.SOLVER.USE_AUTOSTEP:
eval_result = all_gather(eval_result)[0] #broadcast_data([eval_result])[0]
# print("Rank {} eval result gathered".format(cfg.local_rank), eval_result)
scheduler.step(eval_result)
if cfg.SOLVER.AUTO_TERMINATE_PATIENCE != -1:
if eval_result < previous_best:
patience_counter += 1
else:
patience_counter = 0
previous_best = eval_result
checkpointer.save("model_best", **arguments)
print("Previous Best", previous_best, "Patience Counter", patience_counter, "Eval Result", eval_result)
if patience_counter >= cfg.SOLVER.AUTO_TERMINATE_PATIENCE:
if is_main_process():
print("\n\n\n\nAuto Termination at {}, current best {}\n\n\n".format(iteration, previous_best))
break
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
break
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
def build_detection_model(cfg):
meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE]
return meta_arch(cfg)
class DetectronCheckpointer(Checkpointer):
def __init__(
self,
cfg,
model,
optimizer=None,
scheduler=None,
save_dir="",
save_to_disk=None,
logger=None,
):
super(DetectronCheckpointer, self).__init__(
model, optimizer, scheduler, save_dir, save_to_disk, logger
)
self.cfg = cfg.clone()
def _load_file(self, f):
# catalog lookup
if f.startswith("catalog://"):
paths_catalog = import_file(
"maskrcnn_benchmark.config.paths_catalog", self.cfg.PATHS_CATALOG, True
)
catalog_f = paths_catalog.ModelCatalog.get(f[len("catalog://") :])
self.logger.info("{} points to {}".format(f, catalog_f))
f = catalog_f
# download url files
if f.startswith("http"):
# if the file is a url path, download it and cache it
cached_f = cache_url(f)
self.logger.info("url {} cached in {}".format(f, cached_f))
f = cached_f
# convert Caffe2 checkpoint from pkl
if f.endswith(".pkl"):
return load_c2_format(self.cfg, f)
if f.endswith(".big"):
return load_big_format(self.cfg, f)
if f.endswith(".pretrain"):
return load_pretrain_format(self.cfg, f)
# load native detectron.pytorch checkpoint
loaded = super(DetectronCheckpointer, self)._load_file(f)
if "model" not in loaded:
loaded = dict(model=loaded)
return loaded
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
class TensorboardLogger(MetricLogger):
def __init__(self,
log_dir,
start_iter=0,
delimiter='\t'
):
super(TensorboardLogger, self).__init__(delimiter)
self.iteration = start_iter
self.writer = self._get_tensorboard_writer(log_dir)
def _get_tensorboard_writer(log_dir):
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError(
'To use tensorboard please install tensorboardX '
'[ pip install tensorflow tensorboardX ].'
)
if is_main_process():
# timestamp = datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%H:%M')
tb_logger = SummaryWriter('{}'.format(log_dir))
return tb_logger
else:
return None
def update(self, **kwargs):
super(TensorboardLogger, self).update(**kwargs)
if self.writer:
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(k, v, self.iteration)
self.iteration += 1
def train(cfg, local_rank, distributed, use_tensorboard=False,):
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
if cfg.MODEL.BACKBONE.RESET_BN:
for name, param in model.named_buffers():
if 'running_mean' in name:
torch.nn.init.constant_(param, 0)
if 'running_var' in name:
torch.nn.init.constant_(param, 1)
if cfg.SOLVER.GRAD_CLIP > 0:
clip_value = cfg.SOLVER.GRAD_CLIP
for p in filter(lambda p: p.grad is not None, model.parameters()):
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
data_loader = make_data_loader(
cfg,
is_train=True,
is_distributed=distributed,
start_iter=0 # <TODO> Sample data from resume is disabled, due to the conflict with max_epoch
)
if cfg.TEST.DURING_TRAINING or cfg.SOLVER.USE_AUTOSTEP:
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
data_loaders_val = data_loaders_val[0]
else:
data_loaders_val = None
if cfg.MODEL.BACKBONE.FREEZE:
for p in model.backbone.body.parameters():
p.requires_grad = False
if cfg.MODEL.LANGUAGE_BACKBONE.FREEZE:
print("LANGUAGE_BACKBONE FROZEN.")
for p in model.language_backbone.body.parameters():
p.requires_grad = False
if cfg.MODEL.FPN.FREEZE:
for p in model.backbone.fpn.parameters():
p.requires_grad = False
if cfg.MODEL.RPN.FREEZE:
for p in model.rpn.parameters():
p.requires_grad = False
# if cfg.SOLVER.PROMPT_PROBING_LEVEL != -1:
# if cfg.SOLVER.PROMPT_PROBING_LEVEL == 1:
# for p in model.parameters():
# p.requires_grad = False
# for p in model.language_backbone.body.parameters():
# p.requires_grad = True
# for name, p in model.named_parameters():
# if p.requires_grad:
# print(name, " : Not Frozen")
# else:
# print(name, " : Frozen")
# else:
# assert(0)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[local_rank], output_device=local_rank,
broadcast_buffers=cfg.MODEL.BACKBONE.USE_BN,
find_unused_parameters=cfg.SOLVER.FIND_UNUSED_PARAMETERS
)
arguments = {}
arguments["iteration"] = 0
output_dir = cfg.OUTPUT_DIR
save_to_disk = get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
extra_checkpoint_data = checkpointer.load(try_to_find(cfg.MODEL.WEIGHT))
arguments.update(extra_checkpoint_data)
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
if use_tensorboard:
meters = TensorboardLogger(
log_dir=cfg.OUTPUT_DIR,
start_iter=arguments["iteration"],
delimiter=" "
)
else:
meters = MetricLogger(delimiter=" ")
do_train(
cfg,
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
data_loaders_val,
meters
)
return model | null |
10,149 | from maskrcnn_benchmark.utils.env import setup_environment
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg, try_to_find
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.solver import make_lr_scheduler
from maskrcnn_benchmark.solver import make_optimizer
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.engine.trainer import do_train
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.imports import import_file
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.metric_logger import (MetricLogger, TensorboardLogger)
from maskrcnn_benchmark.utils.miscellaneous import mkdir, save_config
import numpy as np
import random
from maskrcnn_benchmark.utils.amp import autocast, GradScaler
The provided code snippet includes necessary dependencies for implementing the `setup_for_distributed` function. Write a Python function `def setup_for_distributed(is_master)` to solve the following problem:
This function disables printing when not in master process
Here is the function:
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print | This function disables printing when not in master process |
10,150 | import argparse
import dataclasses
import glob
import os
import shutil
import numpy as np
from tqdm import tqdm
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
global torch_linear_init_backup
global torch_layer_norm_init_backup
torch_linear_init_backup = torch.nn.Linear.reset_parameters
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
torch_layer_norm_init_backup = torch.nn.LayerNorm.reset_parameters
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def restore_torch_init():
"""Rollback the change made by disable_torch_init."""
import torch
setattr(torch.nn.Linear, "reset_parameters", torch_linear_init_backup)
setattr(torch.nn.LayerNorm, "reset_parameters", torch_layer_norm_init_backup)
The provided code snippet includes necessary dependencies for implementing the `download_opt_weights_old` function. Write a Python function `def download_opt_weights_old(model_name, path)` to solve the following problem:
Download weights from huggingface.
Here is the function:
def download_opt_weights_old(model_name, path):
"""Download weights from huggingface."""
import torch
from transformers import OPTForCausalLM, BloomForCausalLM
if "/" in model_name:
model_name = model_name.split("/")[1].lower()
path = os.path.join(path, f"{model_name}-np")
path = os.path.abspath(os.path.expanduser(path))
if "opt" in model_name:
hf_model_name = "facebook/" + model_name
model_class = OPTForCausalLM
elif "bloom" in model_name:
hf_model_name = "bigscience/" + model_name
model_class = BloomForCausalLM
elif "galactica" in model_name:
hf_model_name = "facebook/" + model_name
else:
raise ValueError("Invalid model name: {model_name}")
print(f"Load the pre-trained pytorch weights of {model_name} from huggingface. "
f"The downloading and cpu loading can take dozens of minutes. "
f"If it seems to get stuck, you can monitor the progress by "
f"checking the memory usage of this process.")
disable_torch_init()
model = model_class.from_pretrained(hf_model_name, torch_dtype=torch.float16,
_fast_init=True)
restore_torch_init()
os.makedirs(path, exist_ok=True)
print(f"Convert the weights to numpy format under {path} ...")
if "opt" in model_name:
for name, param in tqdm(list(model.model.named_parameters())):
name = name.replace("decoder.final_layer_norm", "decoder.layer_norm")
param_path = os.path.join(path, name)
with open(param_path, "wb") as f:
np.save(f, param.cpu().detach().numpy())
elif "galactica" in model_name:
for name, param in tqdm(list(model.model.named_parameters())):
name = name.replace("decoder.final_layer_norm", "decoder.layer_norm")
param_path = os.path.join(path, name)
with open(param_path, "wb") as f:
np.save(f, param.cpu().detach().numpy())
elif "bloom" in model_name:
for name, param in tqdm(list(model.transformer.named_parameters())):
param_path = os.path.join(path, name)
with open(param_path, "wb") as f:
np.save(f, param.cpu().detach().numpy())
else:
raise ValueError("Invalid model name: {model_name}") | Download weights from huggingface. |
10,151 | import argparse
import dataclasses
import glob
import os
import shutil
import numpy as np
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `disable_hf_opt_init` function. Write a Python function `def disable_hf_opt_init()` to solve the following problem:
Disable the redundant default initialization to accelerate model creation.
Here is the function:
def disable_hf_opt_init():
"""
Disable the redundant default initialization to accelerate model creation.
"""
import transformers
setattr(transformers.models.opt.modeling_opt.OPTPreTrainedModel,
"_init_weights", lambda *args, **kwargs: None) | Disable the redundant default initialization to accelerate model creation. |
10,152 | import argparse
import dataclasses
import glob
import os
import shutil
import numpy as np
from tqdm import tqdm
def download_opt_weights(model_name, path):
from huggingface_hub import snapshot_download
import torch
print(f"Load the pre-trained pytorch weights of {model_name} from huggingface. "
f"The downloading and cpu loading can take dozens of minutes. "
f"If it seems to get stuck, you can monitor the progress by "
f"checking the memory usage of this process.")
if "opt" in model_name:
hf_model_name = "facebook/" + model_name
elif "galactica" in model_name:
hf_model_name = "facebook/" + model_name
folder = snapshot_download(hf_model_name, allow_patterns="*.bin")
bin_files = glob.glob(os.path.join(folder, "*.bin"))
if "/" in model_name:
model_name = model_name.split("/")[1].lower()
path = os.path.join(path, f"{model_name}-np")
path = os.path.abspath(os.path.expanduser(path))
os.makedirs(path, exist_ok=True)
for bin_file in tqdm(bin_files, desc="Convert format"):
state = torch.load(bin_file)
for name, param in tqdm(state.items(), leave=False):
name = name.replace("model.", "")
name = name.replace("decoder.final_layer_norm", "decoder.layer_norm")
param_path = os.path.join(path, name)
with open(param_path, "wb") as f:
np.save(f, param.cpu().detach().numpy())
# shared embedding
if "decoder.embed_tokens.weight" in name:
shutil.copy(param_path, param_path.replace(
"decoder.embed_tokens.weight", "lm_head.weight")) | null |
10,153 | import argparse
import numpy as np
import os
import time
import torch
from flexgen.utils import GB, MB, KB
def benchmark_func(func, number, repeat, warmup=3):
for i in range(warmup):
func()
costs = [0]
for i in range(repeat):
torch.cuda.synchronize()
tic = time.time()
for i in range(number):
func()
torch.cuda.synchronize()
costs.append((time.time() - tic) / number)
return costs
MB = 1 << 20
GB = 1 << 30
def profile_bandwidth(path):
s, h = 512, 512
path_dir = os.path.dirname(path)
os.makedirs(path_dir, exist_ok=True)
links = [("cpu", "gpu"), ("gpu", "cpu"), ("gpu", "gpu"), ("cpu", "cpu"),
("cpu", "disk"), ("disk", "cpu")]
for (dst, src) in links:
for b in [1, 128, 512]:
if dst == "cpu":
dst_tensor = torch.ones((b, s, h), dtype=torch.int8, pin_memory=True)
elif dst == "gpu":
dst_tensor = torch.ones((b, s, h), dtype=torch.int8, device="cuda:0")
elif dst == "disk":
np.lib.format.open_memmap(path, mode="w+", shape=((b,s,h)), dtype=np.int8)
dst_tensor = path
if src == "cpu":
src_tensor = torch.ones((b, s, h), dtype=torch.int8, pin_memory=True)
elif src == "gpu":
src_tensor = torch.ones((b, s, h), dtype=torch.int8, device="cuda:0")
elif src == "disk":
np.lib.format.open_memmap(path, mode="w+", shape=((b,s,h)), dtype=np.int8)
src_tensor = path
dst_indices = (slice(0, b), slice(0, s), slice(0, h))
src_indices = (slice(0, b), slice(0, s), slice(0, h))
def func():
if isinstance(src_tensor, str):
src_tensor_ = torch.from_numpy(np.lib.format.open_memmap(src_tensor))
else:
src_tensor_ = src_tensor
if isinstance(dst_tensor, str):
dst_tensor_ = torch.from_numpy(np.lib.format.open_memmap(dst_tensor))
else:
dst_tensor_ = dst_tensor
dst_tensor_[dst_indices].copy_(src_tensor_[src_indices])
size = np.prod([(x.stop - x.start) / (x.step or 1) for x in dst_indices])
cost = np.mean(benchmark_func(func, number=5, repeat=3))
bandwidth = size / cost / GB
print(f"size: {size / MB:6.2f} MB, {src}-to-{dst} bandwidth: {bandwidth:.3f} GB/s")
print() | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.