code stringlengths 17 6.64M |
|---|
def _normalize_for_match(x):
return [t for t in _TOKENIZER.findall(x.lower())]
|
def _compare(operator, src, tgt):
if (operator == _Operator.EQUALS):
return (src == tgt)
elif (operator == _Operator.GREATER):
return (src > tgt)
elif (operator == _Operator.LESSER):
return (src < tgt)
raise ValueError(f'Unknown operator: {operator}')
|
def _parse_value(table, column, cell_value):
'Convert numeric values to floats and keeps everything else as string.'
types = table['types']
return _TYPE_CONVERTER[types[column]](cell_value)
|
def _is_string(x):
return isinstance(x, str)
|
def _respect_conditions(table, row, conditions):
"True if 'row' satisfies all 'conditions'."
for cond in conditions:
table_value = row[cond.column]
cmp_value = _parse_value(table, cond.column, cond.cmp_value)
if (_is_string(table_value) and _is_string(cmp_value)):
table_val... |
def _get_float_answer(table, answer_coordinates, aggregation_op):
'Applies operation to produce reference float answer.'
if (not answer_coordinates):
if (aggregation_op == _Aggregation.COUNT):
return 0.0
else:
return EMPTY_ANSWER_AGG
if (aggregation_op == _Aggregati... |
def _get_answer_coordinates(table, sql_query):
'Retrieves references coordinates by executing SQL.'
aggregation_op_index = sql_query['agg']
if (aggregation_op_index >= 3):
aggregation_op = _Aggregation(aggregation_op_index)
else:
aggregation_op = _Aggregation.NONE
target_column = s... |
def _get_answer_text(table, answer_coordinates, float_answer):
if (float_answer is not None):
return [str(float_answer)]
return [str(table['real_rows'][r][c]) for (r, c) in answer_coordinates]
|
def retrieve_wikisql_query_answer_tapas(table, example) -> List:
(answer_coordinates, aggregation_op) = _get_answer_coordinates(table, example)
float_answer = _get_float_answer(table, answer_coordinates, aggregation_op)
answer_text = _get_answer_text(table, answer_coordinates, float_answer)
if (len(an... |
def preprocess_function_with_template(examples, tokenizer, template, lowercase, **kwargs):
'\n The is_training FLAG is used to identify if we could use the supervision\n to truncate the table content if it is required.\n '
assert ('input_fields' in examples)
input_fields = examples['input_fields'... |
def preprocess_function(examples, tokenizer, lowercase, **kwargs):
'\n The is_training FLAG is used to identify if we could use the supervision\n to truncate the table content if it is required.\n '
if lowercase:
examples['input'] = [example.lower() for example in examples['input']]
model... |
class TableLinearize(abc.ABC):
PROMPT_MESSAGE = '\n Please check that your table must follow the following format:\n {"header": ["col1", "col2", "col3"], "rows": [["row11", "row12", "row13"], ["row21", "row22", "row23"]]}\n '
def process_table(self, table_content: Dict) -> str:
'\n ... |
class IndexedRowTableLinearize(TableLinearize):
'\n FORMAT: col: col1 | col2 | col3 row 1 : val1 | val2 | val3 row 2 : ...\n '
def process_table(self, table_content: Dict):
'\n Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.\n '
... |
class MarkdownTableLinearize(TableLinearize):
'\n FORMAT: col: col1 | col2 | col3 row 1 : val1 | val2 | val3 row 2 : ...\n '
def process_table(self, table_content: Dict):
'\n Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.\n '
... |
class NaturalTableLinearize(TableLinearize):
'\n FORMAT: col: col1 | col2 | col3 row 1 : val1 | val2 | val3 row 2 : ...\n '
def process_table(self, table_content: Dict):
'\n Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.\n '
... |
class CodexTableLinearize(TableLinearize):
'\n FORMAT: col: col1 | col2 | col3 row 1 : val1 | val2 | val3 row 2 : ...\n '
def process_table(self, table_content: Dict):
'\n Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.\n '
... |
class TableProcessor(object):
def __init__(self, table_linearize_func: TableLinearize, table_truncate_funcs: List[TableTruncate], target_delimiter: str=DEL):
self.table_linearize_func = table_linearize_func
self.table_truncate_funcs = table_truncate_funcs
self.target_delimiter = target_de... |
def get_default_processor(max_cell_length, max_input_length, model_name):
table_linearize_func = IndexedRowTableLinearize()
table_truncate_funcs = [CellLimitTruncate(max_cell_length=max_cell_length, tokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name), max_input_length=max_input_l... |
def get_natural_processor(max_cell_length, max_input_length, model_name):
table_linearize_func = NaturalTableLinearize()
table_truncate_funcs = [CellLimitTruncate(max_cell_length=max_cell_length, tokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name), max_input_length=max_input_leng... |
def get_codex_processor(max_cell_length, max_input_length, model_name):
table_linearize_func = CodexTableLinearize()
table_truncate_funcs = [CellLimitTruncate(max_cell_length=max_cell_length, tokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_name), max_input_length=max_input_length),... |
class TableTruncate(ABC):
def __init__(self, tokenizer: BasicTokenizer=None, max_input_length: int=1024):
"\n The class `TableTruncate` is used to compress a table to fit in memory.\n :param tokenizer: a huggingface transformer's tokenizer, to be used on BPE encoding to estimate expected to... |
class CellLimitTruncate(TableTruncate):
'\n Limit the maximum length of cell values in a table to truncate the overall length\n '
def __init__(self, max_cell_length: int=15, **kwargs):
super().__init__(**kwargs)
self.max_cell_length = max_cell_length
def truncate_table(self, table_... |
class RowDeleteTruncate(TableTruncate):
'\n The row deleting principle is straightforward: randomly deleting rows to fit the table into memory,\n but do not make it too small (e.g., just lower than the limitation is ok).\n '
def __init__(self, table_linearize: TableLinearize, **kwargs):
supe... |
@dataclass
class ModelArguments():
'\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n '
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=Non... |
@dataclass
class DataTrainingArguments():
'\n Arguments pertaining to what data we are going to input our model for training and eval.\n '
dataset_name: Optional[str] = field(default='sail/symbolic-instruction-tuning', metadata={'help': 'The name of the dataset to use (via the datasets library).'})
... |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, d... |
def _mp_fn(index):
main()
|
def load_state_dict(module, state_dict, strict=False, logger=None):
"Load state_dict to a module.\n\n This method is modified from :meth:`torch.nn.Module.load_state_dict`.\n Default value for ``strict`` is set to ``False`` and the message for\n param mismatch will be shown even if strict is False.\n\n ... |
class CheckpointLoader():
'A general checkpoint loader to manage all schemes.'
_schemes = {}
@classmethod
def _register_scheme(cls, prefixes, loader, force=False):
if isinstance(prefixes, str):
prefixes = [prefixes]
else:
assert isinstance(prefixes, (list, tupl... |
def _load_checkpoint(filename, map_location=None, logger=None):
'Load checkpoint from somewhere (modelzoo, file, url).\n\n Args:\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for\n details.\n ... |
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None, revise_keys=[('^module\\.', '')]):
"Load checkpoint from a file or URI.\n\n Args:\n model (Module): Module to load checkpoint.\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``ope... |
def save_checkpoint(model, filename, optimizer=None, meta=None):
'Save checkpoint to file.\n\n The checkpoint will have 4 fields: ``meta``, ``state_dict`` and\n ``optimizer``, ``amp``. By default ``meta`` will contain version\n and time info.\n\n Args:\n model (Module): Module whose params are ... |
@RUNNERS.register_module()
class EpochBasedRunnerAmp(EpochBasedRunner):
'Epoch-based Runner with AMP support.\n\n This runner train models epoch by epoch.\n '
def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True, meta=None, create_symlink=True):
'Save the checkpo... |
@HOOKS.register_module()
class DistOptimizerHook(OptimizerHook):
'Optimizer hook for distributed training.'
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=(- 1), use_fp16=False):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_... |
def set_random_seed(seed, deterministic=False):
'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.... |
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
if ('imgs_per_gpu' in cfg.data):
logger.warning('"imgs_per_gpu" is deprecated in M... |
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--work-dir', help='the directory to save the file containing evalua... |
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and ... |
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
... |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports']... |
def _cfg(url='', **kwargs):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', **kwargs}
|
class AddPositionEmb(nn.Module):
'Module to add position embedding to input features\n '
def __init__(self, dim=384, spatial_shape=[14, 14]):
super().__init__()
if isinstance(spatial_shape, int):
spatial_shape = [spatial_shape]
assert isinstance(spatial_shape, Sequence)... |
class Pooling(nn.Module):
'\n Implementation of pooling for PoolFormer\n --pool_size: pooling size\n '
def __init__(self, pool_size=3, **kwargs):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=(pool_size // 2), count_include_pad=False)
def forward(self,... |
class Attention(nn.Module):
'Attention module that can take tensor with [B, N, C] or [B, C, H, W] as input.\n Modified from: \n https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\n '
def __init__(self, dim, head_dim=32, qkv_bias=False, attn_drop=0.0, proj... |
class SpatialFc(nn.Module):
'SpatialFc module that take features with shape of (B,C,*) as input.\n '
def __init__(self, spatial_shape=[14, 14], **kwargs):
super().__init__()
if isinstance(spatial_shape, int):
spatial_shape = [spatial_shape]
assert isinstance(spatial_sha... |
class MetaFormerBlock(nn.Module):
'\n Implementation of one MetaFormer block.\n --dim: embedding dim\n --token_mixer: token mixer module\n --mlp_ratio: mlp expansion ratio\n --act_layer: activation\n --norm_layer: normalization\n --drop: dropout rate\n --drop path: Stochastic Depth, \n ... |
def basic_blocks(dim, index, layers, token_mixer=nn.Identity, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=LayerNormChannel, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-05):
'\n generate PoolFormer blocks for a stage\n return: PoolFormer blocks \n '
blocks = []
... |
class MetaFormer(nn.Module):
'\n MetaFormer, the main class of our model\n --layers: [x,x,x,x], number of blocks for the 4 stages\n --embed_dims, --mlp_ratios: the embedding dims and mlp ratios for the 4 stages\n --token_mixers: token mixers of different stages\n --norm_layer, --act_layer: define t... |
@register_model
def metaformer_id_s12(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
token_mixers = ([nn.Identity] * len(layers))
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = MetaFormer(layers, embed_dims=embed_dims, token_mixers... |
@register_model
def metaformer_pppa_s12_224(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
add_pos_embs = [None, None, None, partial(AddPositionEmb, spatial_shape=[7, 7])]
token_mixers = [Pooling, Pooling, Pooling, Attention]
mlp_ratios = [4, 4, 4, 4]
downs... |
@register_model
def metaformer_ppaa_s12_224(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
add_pos_embs = [None, None, partial(AddPositionEmb, spatial_shape=[14, 14]), None]
token_mixers = [Pooling, Pooling, Attention, Attention]
mlp_ratios = [4, 4, 4, 4]
d... |
@register_model
def metaformer_pppf_s12_224(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
token_mixers = [Pooling, Pooling, Pooling, partial(SpatialFc, spatial_shape=[7, 7])]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = MetaForm... |
@register_model
def metaformer_ppff_s12_224(pretrained=False, **kwargs):
layers = [2, 2, 6, 2]
embed_dims = [64, 128, 320, 512]
token_mixers = [Pooling, Pooling, partial(SpatialFc, spatial_shape=[14, 14]), partial(SpatialFc, spatial_shape=[7, 7])]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, Tru... |
def _cfg(url='', **kwargs):
return {'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', **kwargs}
|
class PatchEmbed(nn.Module):
'\n Patch Embedding that is implemented by a layer of conv. \n Input: tensor in shape [B, C, H, W]\n Output: tensor in shape [B, C, H/stride, W/stride]\n '
def __init__(self, patch_size=16, stride=16, padding=0, in_chans=3, embed_dim=768, norm_layer=None):
sup... |
class LayerNormChannel(nn.Module):
'\n LayerNorm only for Channel Dimension.\n Input: tensor in shape [B, C, H, W]\n '
def __init__(self, num_channels, eps=1e-05):
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num... |
class GroupNorm(nn.GroupNorm):
'\n Group Normalization with 1 group.\n Input: tensor in shape [B, C, H, W]\n '
def __init__(self, num_channels, **kwargs):
super().__init__(1, num_channels, **kwargs)
|
class Pooling(nn.Module):
'\n Implementation of pooling for PoolFormer\n --pool_size: pooling size\n '
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=(pool_size // 2), count_include_pad=False)
def forward(self, x):
... |
class Mlp(nn.Module):
'\n Implementation of MLP with 1*1 convolutions.\n Input: tensor with shape [B, C, H, W]\n '
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
... |
class PoolFormerBlock(nn.Module):
'\n Implementation of one PoolFormer block.\n --dim: embedding dim\n --pool_size: pooling size\n --mlp_ratio: mlp expansion ratio\n --act_layer: activation\n --norm_layer: normalization\n --drop: dropout rate\n --drop path: Stochastic Depth, \n refe... |
def basic_blocks(dim, index, layers, pool_size=3, mlp_ratio=4.0, act_layer=nn.GELU, norm_layer=GroupNorm, drop_rate=0.0, drop_path_rate=0.0, use_layer_scale=True, layer_scale_init_value=1e-05):
'\n generate PoolFormer blocks for a stage\n return: PoolFormer blocks \n '
blocks = []
for block_idx i... |
class PoolFormer(nn.Module):
'\n PoolFormer, the main class of our model\n --layers: [x,x,x,x], number of blocks for the 4 stages\n --embed_dims, --mlp_ratios, --pool_size: the embedding dims, mlp ratios and \n pooling size for the 4 stages\n --downsamples: flags to apply downsampling or not\n ... |
@register_model
def poolformer_s12(pretrained=False, **kwargs):
'\n PoolFormer-S12 model, Params: 12M\n --layers: [x,x,x,x], numbers of layers for the four stages\n --embed_dims, --mlp_ratios: \n embedding dims and mlp ratios for the four stages\n --downsamples: flags to apply downsampling or n... |
@register_model
def poolformer_s24(pretrained=False, **kwargs):
'\n PoolFormer-S24 model, Params: 21M\n '
layers = [4, 4, 12, 4]
embed_dims = [64, 128, 320, 512]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = PoolFormer(layers, embed_dims=embed_dims, mlp_ratios=... |
@register_model
def poolformer_s36(pretrained=False, **kwargs):
'\n PoolFormer-S36 model, Params: 31M\n '
layers = [6, 6, 18, 6]
embed_dims = [64, 128, 320, 512]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = PoolFormer(layers, embed_dims=embed_dims, mlp_ratios=... |
@register_model
def poolformer_m36(pretrained=False, **kwargs):
'\n PoolFormer-M36 model, Params: 56M\n '
layers = [6, 6, 18, 6]
embed_dims = [96, 192, 384, 768]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = PoolFormer(layers, embed_dims=embed_dims, mlp_ratios=... |
@register_model
def poolformer_m48(pretrained=False, **kwargs):
'\n PoolFormer-M48 model, Params: 73M\n '
layers = [8, 8, 24, 8]
embed_dims = [96, 192, 384, 768]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = PoolFormer(layers, embed_dims=embed_dims, mlp_ratios=... |
@PIPELINES.register_module()
class AlignResize(object):
'Resize images & seg. Align\n '
def __init__(self, img_scale=None, multiscale_mode='range', ratio_range=None, keep_ratio=True, size_divisor=32):
if (img_scale is None):
self.img_scale = None
else:
if isinstance... |
def parse_args():
parser = argparse.ArgumentParser(description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--aug-test', action='store_true', help='Use Flip and Multi scale au... |
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and ... |
def plot_curve(log_dicts, args):
if (args.backend is not None):
plt.switch_backend(args.backend)
sns.set_style(args.style)
legend = args.legend
if (legend is None):
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'... |
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
parser.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser.add_argument('--keys', type=str, nargs='+', default=['mIoU'], help='the metric that you want to plot')
parser.add_arg... |
def load_json_logs(json_logs):
log_dicts = [dict() for _ in json_logs]
for (json_log, log_dict) in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
if ('epoch' not in log):
... |
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
plot_curve(log_dicts, args)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMSeg benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--log-interval', type=int, default=50, help='interval of logging')
ar... |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
torch.backends.cudnn.benchmark = False
cfg.model.pretrained = None
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.wo... |
def convert_json_to_label(json_file):
label_file = json_file.replace('_polygons.json', '_labelTrainIds.png')
json2labelImg(json_file, label_file, 'trainIds')
|
def parse_args():
parser = argparse.ArgumentParser(description='Convert Cityscapes annotations to TrainIds')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
pa... |
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = (args.out_dir if args.out_dir else cityscapes_path)
mmcv.mkdir_or_exist(out_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
poly_files = []
for poly in mmcv.scandir(gt_dir, '_polygons.json', recursive=Tru... |
def convert_to_trainID(tuple_path, in_img_dir, in_ann_dir, out_img_dir, out_mask_dir, is_train):
(imgpath, maskpath) = tuple_path
shutil.copyfile(osp.join(in_img_dir, imgpath), (osp.join(out_img_dir, 'train2014', imgpath) if is_train else osp.join(out_img_dir, 'test2014', imgpath)))
annotate = loadmat(osp... |
def generate_coco_list(folder):
train_list = osp.join(folder, 'imageLists', 'train.txt')
test_list = osp.join(folder, 'imageLists', 'test.txt')
train_paths = []
test_paths = []
with open(train_list) as f:
for filename in f:
basename = filename.strip()
imgpath = (bas... |
def parse_args():
parser = argparse.ArgumentParser(description='Convert COCO Stuff 10k annotations to mmsegmentation format')
parser.add_argument('coco_path', help='coco stuff path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument('--nproc', default=16, type=int, help='n... |
def main():
args = parse_args()
coco_path = args.coco_path
nproc = args.nproc
out_dir = (args.out_dir or coco_path)
out_img_dir = osp.join(out_dir, 'images')
out_mask_dir = osp.join(out_dir, 'annotations')
mmcv.mkdir_or_exist(osp.join(out_img_dir, 'train2014'))
mmcv.mkdir_or_exist(osp.... |
def convert_to_trainID(maskpath, out_mask_dir, is_train):
mask = np.array(Image.open(maskpath))
mask_copy = mask.copy()
for (clsID, trID) in clsID_to_trID.items():
mask_copy[(mask == clsID)] = trID
seg_filename = (osp.join(out_mask_dir, 'train2017', (osp.basename(maskpath).split('.')[0] + '_la... |
def parse_args():
parser = argparse.ArgumentParser(description='Convert COCO Stuff 164k annotations to mmsegmentation format')
parser.add_argument('coco_path', help='coco stuff path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument('--nproc', default=16, type=int, help='... |
def main():
args = parse_args()
coco_path = args.coco_path
nproc = args.nproc
out_dir = (args.out_dir or coco_path)
out_img_dir = osp.join(out_dir, 'images')
out_mask_dir = osp.join(out_dir, 'annotations')
mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'train2017'))
mmcv.mkdir_or_exist(osp... |
def generate_labels(img_id, detail, out_dir):
def _class_to_index(mask, _mapping, _key):
values = np.unique(mask)
for i in range(len(values)):
assert (values[i] in _mapping)
index = np.digitize(mask.ravel(), _mapping, right=True)
return _key[index].reshape(mask.shape)
... |
def parse_args():
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmsegmentation format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('json_path', help='annoation json filepath')
parser.add_argument('-o', '--out_dir', help='outp... |
def main():
args = parse_args()
devkit_path = args.devkit_path
if (args.out_dir is None):
out_dir = osp.join(devkit_path, 'VOC2010', 'SegmentationClassContext')
else:
out_dir = args.out_dir
json_path = args.json_path
mmcv.mkdir_or_exist(out_dir)
img_dir = osp.join(devkit_pa... |
def convert_mat(mat_file, in_dir, out_dir):
data = loadmat(osp.join(in_dir, mat_file))
mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8)
seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png'))
Image.fromarray(mask).save(seg_filename, 'PNG')
|
def generate_aug_list(merged_list, excluded_list):
return list((set(merged_list) - set(excluded_list)))
|
def parse_args():
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmsegmentation format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('aug_path', help='pascal voc aug path')
parser.add_argument('-o', '--out_dir', help='output pa... |
def main():
args = parse_args()
devkit_path = args.devkit_path
aug_path = args.aug_path
nproc = args.nproc
if (args.out_dir is None):
out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug')
else:
out_dir = args.out_dir
mmcv.mkdir_or_exist(out_dir)
in_dir = os... |
class ONNXRuntimeSegmentor(BaseSegmentor):
def __init__(self, onnx_file: str, cfg: Any, device_id: int):
super(ONNXRuntimeSegmentor, self).__init__()
import onnxruntime as ort
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_cus... |
class TensorRTSegmentor(BaseSegmentor):
def __init__(self, trt_file: str, cfg: Any, device_id: int):
super(TensorRTSegmentor, self).__init__()
from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError... |
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='mmseg backend test (and eval)')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Input model file')
parser.add_argument('--backend', help='Backend of the model.', choices... |
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and ... |
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[2048, 1024], help='input image size')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
cfg.model.pr... |
def convert_mit(ckpt):
new_ckpt = OrderedDict()
for (k, v) in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('patch_embed'):
stage_i = int(k.split('.')[0].replace('patch_embed', ''))
new_k = k.replace(f'patch_embed{stage_i}', f'layers.{(st... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.