Spaces:
Runtime error
Runtime error
Create build.py
Browse files- 3rdparty/densepose/data/build.py +736 -0
3rdparty/densepose/data/build.py
ADDED
|
@@ -0,0 +1,736 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import itertools
|
| 4 |
+
import logging
|
| 5 |
+
import numpy as np
|
| 6 |
+
from collections import UserDict, defaultdict
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple
|
| 9 |
+
import torch
|
| 10 |
+
from torch.utils.data.dataset import Dataset
|
| 11 |
+
|
| 12 |
+
from detectron2.config import CfgNode
|
| 13 |
+
from detectron2.data.build import build_detection_test_loader as d2_build_detection_test_loader
|
| 14 |
+
from detectron2.data.build import build_detection_train_loader as d2_build_detection_train_loader
|
| 15 |
+
from detectron2.data.build import (
|
| 16 |
+
load_proposals_into_dataset,
|
| 17 |
+
print_instances_class_histogram,
|
| 18 |
+
trivial_batch_collator,
|
| 19 |
+
worker_init_reset_seed,
|
| 20 |
+
)
|
| 21 |
+
from detectron2.data.catalog import DatasetCatalog, Metadata, MetadataCatalog
|
| 22 |
+
from detectron2.data.samplers import TrainingSampler
|
| 23 |
+
from detectron2.utils.comm import get_world_size
|
| 24 |
+
|
| 25 |
+
from densepose.config import get_bootstrap_dataset_config
|
| 26 |
+
from densepose.modeling import build_densepose_embedder
|
| 27 |
+
|
| 28 |
+
from .combined_loader import CombinedDataLoader, Loader
|
| 29 |
+
from .dataset_mapper import DatasetMapper
|
| 30 |
+
from .datasets.coco import DENSEPOSE_CSE_KEYS_WITHOUT_MASK, DENSEPOSE_IUV_KEYS_WITHOUT_MASK
|
| 31 |
+
from .datasets.dataset_type import DatasetType
|
| 32 |
+
from .inference_based_loader import InferenceBasedLoader, ScoreBasedFilter
|
| 33 |
+
from .samplers import (
|
| 34 |
+
DensePoseConfidenceBasedSampler,
|
| 35 |
+
DensePoseCSEConfidenceBasedSampler,
|
| 36 |
+
DensePoseCSEUniformSampler,
|
| 37 |
+
DensePoseUniformSampler,
|
| 38 |
+
MaskFromDensePoseSampler,
|
| 39 |
+
PredictionToGroundTruthSampler,
|
| 40 |
+
)
|
| 41 |
+
from .transform import ImageResizeTransform
|
| 42 |
+
from .utils import get_category_to_class_mapping, get_class_to_mesh_name_mapping
|
| 43 |
+
from .video import (
|
| 44 |
+
FirstKFramesSelector,
|
| 45 |
+
FrameSelectionStrategy,
|
| 46 |
+
LastKFramesSelector,
|
| 47 |
+
RandomKFramesSelector,
|
| 48 |
+
VideoKeyframeDataset,
|
| 49 |
+
video_list_from_file,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
__all__ = ["build_detection_train_loader", "build_detection_test_loader"]
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
Instance = Dict[str, Any]
|
| 56 |
+
InstancePredicate = Callable[[Instance], bool]
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def _compute_num_images_per_worker(cfg: CfgNode) -> int:
|
| 60 |
+
num_workers = get_world_size()
|
| 61 |
+
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
|
| 62 |
+
assert (
|
| 63 |
+
images_per_batch % num_workers == 0
|
| 64 |
+
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
|
| 65 |
+
images_per_batch, num_workers
|
| 66 |
+
)
|
| 67 |
+
assert (
|
| 68 |
+
images_per_batch >= num_workers
|
| 69 |
+
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
|
| 70 |
+
images_per_batch, num_workers
|
| 71 |
+
)
|
| 72 |
+
images_per_worker = images_per_batch // num_workers
|
| 73 |
+
return images_per_worker
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _map_category_id_to_contiguous_id(dataset_name: str, dataset_dicts: Iterable[Instance]) -> None:
|
| 77 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 78 |
+
for dataset_dict in dataset_dicts:
|
| 79 |
+
for ann in dataset_dict["annotations"]:
|
| 80 |
+
ann["category_id"] = meta.thing_dataset_id_to_contiguous_id[ann["category_id"]]
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@dataclass
|
| 84 |
+
class _DatasetCategory:
|
| 85 |
+
"""
|
| 86 |
+
Class representing category data in a dataset:
|
| 87 |
+
- id: category ID, as specified in the dataset annotations file
|
| 88 |
+
- name: category name, as specified in the dataset annotations file
|
| 89 |
+
- mapped_id: category ID after applying category maps (DATASETS.CATEGORY_MAPS config option)
|
| 90 |
+
- mapped_name: category name after applying category maps
|
| 91 |
+
- dataset_name: dataset in which the category is defined
|
| 92 |
+
|
| 93 |
+
For example, when training models in a class-agnostic manner, one could take LVIS 1.0
|
| 94 |
+
dataset and map the animal categories to the same category as human data from COCO:
|
| 95 |
+
id = 225
|
| 96 |
+
name = "cat"
|
| 97 |
+
mapped_id = 1
|
| 98 |
+
mapped_name = "person"
|
| 99 |
+
dataset_name = "lvis_v1_animals_dp_train"
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
id: int
|
| 103 |
+
name: str
|
| 104 |
+
mapped_id: int
|
| 105 |
+
mapped_name: str
|
| 106 |
+
dataset_name: str
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
_MergedCategoriesT = Dict[int, List[_DatasetCategory]]
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _add_category_id_to_contiguous_id_maps_to_metadata(
|
| 113 |
+
merged_categories: _MergedCategoriesT,
|
| 114 |
+
) -> None:
|
| 115 |
+
merged_categories_per_dataset = {}
|
| 116 |
+
for contiguous_cat_id, cat_id in enumerate(sorted(merged_categories.keys())):
|
| 117 |
+
for cat in merged_categories[cat_id]:
|
| 118 |
+
if cat.dataset_name not in merged_categories_per_dataset:
|
| 119 |
+
merged_categories_per_dataset[cat.dataset_name] = defaultdict(list)
|
| 120 |
+
merged_categories_per_dataset[cat.dataset_name][cat_id].append(
|
| 121 |
+
(
|
| 122 |
+
contiguous_cat_id,
|
| 123 |
+
cat,
|
| 124 |
+
)
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
logger = logging.getLogger(__name__)
|
| 128 |
+
for dataset_name, merged_categories in merged_categories_per_dataset.items():
|
| 129 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 130 |
+
if not hasattr(meta, "thing_classes"):
|
| 131 |
+
meta.thing_classes = []
|
| 132 |
+
meta.thing_dataset_id_to_contiguous_id = {}
|
| 133 |
+
meta.thing_dataset_id_to_merged_id = {}
|
| 134 |
+
else:
|
| 135 |
+
meta.thing_classes.clear()
|
| 136 |
+
meta.thing_dataset_id_to_contiguous_id.clear()
|
| 137 |
+
meta.thing_dataset_id_to_merged_id.clear()
|
| 138 |
+
logger.info(f"Dataset {dataset_name}: category ID to contiguous ID mapping:")
|
| 139 |
+
for _cat_id, categories in sorted(merged_categories.items()):
|
| 140 |
+
added_to_thing_classes = False
|
| 141 |
+
for contiguous_cat_id, cat in categories:
|
| 142 |
+
if not added_to_thing_classes:
|
| 143 |
+
meta.thing_classes.append(cat.mapped_name)
|
| 144 |
+
added_to_thing_classes = True
|
| 145 |
+
meta.thing_dataset_id_to_contiguous_id[cat.id] = contiguous_cat_id
|
| 146 |
+
meta.thing_dataset_id_to_merged_id[cat.id] = cat.mapped_id
|
| 147 |
+
logger.info(f"{cat.id} ({cat.name}) -> {contiguous_cat_id}")
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def _maybe_create_general_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
|
| 151 |
+
def has_annotations(instance: Instance) -> bool:
|
| 152 |
+
return "annotations" in instance
|
| 153 |
+
|
| 154 |
+
def has_only_crowd_anotations(instance: Instance) -> bool:
|
| 155 |
+
for ann in instance["annotations"]:
|
| 156 |
+
if ann.get("is_crowd", 0) == 0:
|
| 157 |
+
return False
|
| 158 |
+
return True
|
| 159 |
+
|
| 160 |
+
def general_keep_instance_predicate(instance: Instance) -> bool:
|
| 161 |
+
return has_annotations(instance) and not has_only_crowd_anotations(instance)
|
| 162 |
+
|
| 163 |
+
if not cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS:
|
| 164 |
+
return None
|
| 165 |
+
return general_keep_instance_predicate
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def _maybe_create_keypoints_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
|
| 169 |
+
|
| 170 |
+
min_num_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
|
| 171 |
+
|
| 172 |
+
def has_sufficient_num_keypoints(instance: Instance) -> bool:
|
| 173 |
+
num_kpts = sum(
|
| 174 |
+
(np.array(ann["keypoints"][2::3]) > 0).sum()
|
| 175 |
+
for ann in instance["annotations"]
|
| 176 |
+
if "keypoints" in ann
|
| 177 |
+
)
|
| 178 |
+
return num_kpts >= min_num_keypoints
|
| 179 |
+
|
| 180 |
+
if cfg.MODEL.KEYPOINT_ON and (min_num_keypoints > 0):
|
| 181 |
+
return has_sufficient_num_keypoints
|
| 182 |
+
return None
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _maybe_create_mask_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
|
| 186 |
+
if not cfg.MODEL.MASK_ON:
|
| 187 |
+
return None
|
| 188 |
+
|
| 189 |
+
def has_mask_annotations(instance: Instance) -> bool:
|
| 190 |
+
return any("segmentation" in ann for ann in instance["annotations"])
|
| 191 |
+
|
| 192 |
+
return has_mask_annotations
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _maybe_create_densepose_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
|
| 196 |
+
if not cfg.MODEL.DENSEPOSE_ON:
|
| 197 |
+
return None
|
| 198 |
+
|
| 199 |
+
use_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
|
| 200 |
+
|
| 201 |
+
def has_densepose_annotations(instance: Instance) -> bool:
|
| 202 |
+
for ann in instance["annotations"]:
|
| 203 |
+
if all(key in ann for key in DENSEPOSE_IUV_KEYS_WITHOUT_MASK) or all(
|
| 204 |
+
key in ann for key in DENSEPOSE_CSE_KEYS_WITHOUT_MASK
|
| 205 |
+
):
|
| 206 |
+
return True
|
| 207 |
+
if use_masks and "segmentation" in ann:
|
| 208 |
+
return True
|
| 209 |
+
return False
|
| 210 |
+
|
| 211 |
+
return has_densepose_annotations
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
def _maybe_create_specific_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
|
| 215 |
+
specific_predicate_creators = [
|
| 216 |
+
_maybe_create_keypoints_keep_instance_predicate,
|
| 217 |
+
_maybe_create_mask_keep_instance_predicate,
|
| 218 |
+
_maybe_create_densepose_keep_instance_predicate,
|
| 219 |
+
]
|
| 220 |
+
predicates = [creator(cfg) for creator in specific_predicate_creators]
|
| 221 |
+
predicates = [p for p in predicates if p is not None]
|
| 222 |
+
if not predicates:
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
def combined_predicate(instance: Instance) -> bool:
|
| 226 |
+
return any(p(instance) for p in predicates)
|
| 227 |
+
|
| 228 |
+
return combined_predicate
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def _get_train_keep_instance_predicate(cfg: CfgNode):
|
| 232 |
+
general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
|
| 233 |
+
combined_specific_keep_predicate = _maybe_create_specific_keep_instance_predicate(cfg)
|
| 234 |
+
|
| 235 |
+
def combined_general_specific_keep_predicate(instance: Instance) -> bool:
|
| 236 |
+
return general_keep_predicate(instance) and combined_specific_keep_predicate(instance)
|
| 237 |
+
|
| 238 |
+
if (general_keep_predicate is None) and (combined_specific_keep_predicate is None):
|
| 239 |
+
return None
|
| 240 |
+
if general_keep_predicate is None:
|
| 241 |
+
return combined_specific_keep_predicate
|
| 242 |
+
if combined_specific_keep_predicate is None:
|
| 243 |
+
return general_keep_predicate
|
| 244 |
+
return combined_general_specific_keep_predicate
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def _get_test_keep_instance_predicate(cfg: CfgNode):
|
| 248 |
+
general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
|
| 249 |
+
return general_keep_predicate
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def _maybe_filter_and_map_categories(
|
| 253 |
+
dataset_name: str, dataset_dicts: List[Instance]
|
| 254 |
+
) -> List[Instance]:
|
| 255 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 256 |
+
category_id_map = meta.thing_dataset_id_to_contiguous_id
|
| 257 |
+
filtered_dataset_dicts = []
|
| 258 |
+
for dataset_dict in dataset_dicts:
|
| 259 |
+
anns = []
|
| 260 |
+
for ann in dataset_dict["annotations"]:
|
| 261 |
+
cat_id = ann["category_id"]
|
| 262 |
+
if cat_id not in category_id_map:
|
| 263 |
+
continue
|
| 264 |
+
ann["category_id"] = category_id_map[cat_id]
|
| 265 |
+
anns.append(ann)
|
| 266 |
+
dataset_dict["annotations"] = anns
|
| 267 |
+
filtered_dataset_dicts.append(dataset_dict)
|
| 268 |
+
return filtered_dataset_dicts
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def _add_category_whitelists_to_metadata(cfg: CfgNode) -> None:
|
| 272 |
+
for dataset_name, whitelisted_cat_ids in cfg.DATASETS.WHITELISTED_CATEGORIES.items():
|
| 273 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 274 |
+
meta.whitelisted_categories = whitelisted_cat_ids
|
| 275 |
+
logger = logging.getLogger(__name__)
|
| 276 |
+
logger.info(
|
| 277 |
+
"Whitelisted categories for dataset {}: {}".format(
|
| 278 |
+
dataset_name, meta.whitelisted_categories
|
| 279 |
+
)
|
| 280 |
+
)
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _add_category_maps_to_metadata(cfg: CfgNode) -> None:
|
| 284 |
+
for dataset_name, category_map in cfg.DATASETS.CATEGORY_MAPS.items():
|
| 285 |
+
category_map = {
|
| 286 |
+
int(cat_id_src): int(cat_id_dst) for cat_id_src, cat_id_dst in category_map.items()
|
| 287 |
+
}
|
| 288 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 289 |
+
meta.category_map = category_map
|
| 290 |
+
logger = logging.getLogger(__name__)
|
| 291 |
+
logger.info("Category maps for dataset {}: {}".format(dataset_name, meta.category_map))
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def _add_category_info_to_bootstrapping_metadata(dataset_name: str, dataset_cfg: CfgNode) -> None:
|
| 295 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 296 |
+
meta.category_to_class_mapping = get_category_to_class_mapping(dataset_cfg)
|
| 297 |
+
meta.categories = dataset_cfg.CATEGORIES
|
| 298 |
+
meta.max_count_per_category = dataset_cfg.MAX_COUNT_PER_CATEGORY
|
| 299 |
+
logger = logging.getLogger(__name__)
|
| 300 |
+
logger.info(
|
| 301 |
+
"Category to class mapping for dataset {}: {}".format(
|
| 302 |
+
dataset_name, meta.category_to_class_mapping
|
| 303 |
+
)
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def _maybe_add_class_to_mesh_name_map_to_metadata(dataset_names: List[str], cfg: CfgNode) -> None:
|
| 308 |
+
for dataset_name in dataset_names:
|
| 309 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 310 |
+
if not hasattr(meta, "class_to_mesh_name"):
|
| 311 |
+
meta.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def _merge_categories(dataset_names: Collection[str]) -> _MergedCategoriesT:
|
| 315 |
+
merged_categories = defaultdict(list)
|
| 316 |
+
category_names = {}
|
| 317 |
+
for dataset_name in dataset_names:
|
| 318 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 319 |
+
whitelisted_categories = meta.get("whitelisted_categories")
|
| 320 |
+
category_map = meta.get("category_map", {})
|
| 321 |
+
cat_ids = (
|
| 322 |
+
whitelisted_categories if whitelisted_categories is not None else meta.categories.keys()
|
| 323 |
+
)
|
| 324 |
+
for cat_id in cat_ids:
|
| 325 |
+
cat_name = meta.categories[cat_id]
|
| 326 |
+
cat_id_mapped = category_map.get(cat_id, cat_id)
|
| 327 |
+
if cat_id_mapped == cat_id or cat_id_mapped in cat_ids:
|
| 328 |
+
category_names[cat_id] = cat_name
|
| 329 |
+
else:
|
| 330 |
+
category_names[cat_id] = str(cat_id_mapped)
|
| 331 |
+
# assign temporary mapped category name, this name can be changed
|
| 332 |
+
# during the second pass, since mapped ID can correspond to a category
|
| 333 |
+
# from a different dataset
|
| 334 |
+
cat_name_mapped = meta.categories[cat_id_mapped]
|
| 335 |
+
merged_categories[cat_id_mapped].append(
|
| 336 |
+
_DatasetCategory(
|
| 337 |
+
id=cat_id,
|
| 338 |
+
name=cat_name,
|
| 339 |
+
mapped_id=cat_id_mapped,
|
| 340 |
+
mapped_name=cat_name_mapped,
|
| 341 |
+
dataset_name=dataset_name,
|
| 342 |
+
)
|
| 343 |
+
)
|
| 344 |
+
# second pass to assign proper mapped category names
|
| 345 |
+
for cat_id, categories in merged_categories.items():
|
| 346 |
+
for cat in categories:
|
| 347 |
+
if cat_id in category_names and cat.mapped_name != category_names[cat_id]:
|
| 348 |
+
cat.mapped_name = category_names[cat_id]
|
| 349 |
+
|
| 350 |
+
return merged_categories
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _warn_if_merged_different_categories(merged_categories: _MergedCategoriesT) -> None:
|
| 354 |
+
logger = logging.getLogger(__name__)
|
| 355 |
+
for cat_id in merged_categories:
|
| 356 |
+
merged_categories_i = merged_categories[cat_id]
|
| 357 |
+
first_cat_name = merged_categories_i[0].name
|
| 358 |
+
if len(merged_categories_i) > 1 and not all(
|
| 359 |
+
cat.name == first_cat_name for cat in merged_categories_i[1:]
|
| 360 |
+
):
|
| 361 |
+
cat_summary_str = ", ".join(
|
| 362 |
+
[f"{cat.id} ({cat.name}) from {cat.dataset_name}" for cat in merged_categories_i]
|
| 363 |
+
)
|
| 364 |
+
logger.warning(
|
| 365 |
+
f"Merged category {cat_id} corresponds to the following categories: "
|
| 366 |
+
f"{cat_summary_str}"
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def combine_detection_dataset_dicts(
|
| 371 |
+
dataset_names: Collection[str],
|
| 372 |
+
keep_instance_predicate: Optional[InstancePredicate] = None,
|
| 373 |
+
proposal_files: Optional[Collection[str]] = None,
|
| 374 |
+
) -> List[Instance]:
|
| 375 |
+
"""
|
| 376 |
+
Load and prepare dataset dicts for training / testing
|
| 377 |
+
|
| 378 |
+
Args:
|
| 379 |
+
dataset_names (Collection[str]): a list of dataset names
|
| 380 |
+
keep_instance_predicate (Callable: Dict[str, Any] -> bool): predicate
|
| 381 |
+
applied to instance dicts which defines whether to keep the instance
|
| 382 |
+
proposal_files (Collection[str]): if given, a list of object proposal files
|
| 383 |
+
that match each dataset in `dataset_names`.
|
| 384 |
+
"""
|
| 385 |
+
assert len(dataset_names)
|
| 386 |
+
if proposal_files is None:
|
| 387 |
+
proposal_files = [None] * len(dataset_names)
|
| 388 |
+
assert len(dataset_names) == len(proposal_files)
|
| 389 |
+
# load datasets and metadata
|
| 390 |
+
dataset_name_to_dicts = {}
|
| 391 |
+
for dataset_name in dataset_names:
|
| 392 |
+
dataset_name_to_dicts[dataset_name] = DatasetCatalog.get(dataset_name)
|
| 393 |
+
assert len(dataset_name_to_dicts), f"Dataset '{dataset_name}' is empty!"
|
| 394 |
+
# merge categories, requires category metadata to be loaded
|
| 395 |
+
# cat_id -> [(orig_cat_id, cat_name, dataset_name)]
|
| 396 |
+
merged_categories = _merge_categories(dataset_names)
|
| 397 |
+
_warn_if_merged_different_categories(merged_categories)
|
| 398 |
+
merged_category_names = [
|
| 399 |
+
merged_categories[cat_id][0].mapped_name for cat_id in sorted(merged_categories)
|
| 400 |
+
]
|
| 401 |
+
# map to contiguous category IDs
|
| 402 |
+
_add_category_id_to_contiguous_id_maps_to_metadata(merged_categories)
|
| 403 |
+
# load annotations and dataset metadata
|
| 404 |
+
for dataset_name, proposal_file in zip(dataset_names, proposal_files):
|
| 405 |
+
dataset_dicts = dataset_name_to_dicts[dataset_name]
|
| 406 |
+
assert len(dataset_dicts), f"Dataset '{dataset_name}' is empty!"
|
| 407 |
+
if proposal_file is not None:
|
| 408 |
+
dataset_dicts = load_proposals_into_dataset(dataset_dicts, proposal_file)
|
| 409 |
+
dataset_dicts = _maybe_filter_and_map_categories(dataset_name, dataset_dicts)
|
| 410 |
+
print_instances_class_histogram(dataset_dicts, merged_category_names)
|
| 411 |
+
dataset_name_to_dicts[dataset_name] = dataset_dicts
|
| 412 |
+
|
| 413 |
+
if keep_instance_predicate is not None:
|
| 414 |
+
all_datasets_dicts_plain = [
|
| 415 |
+
d
|
| 416 |
+
for d in itertools.chain.from_iterable(dataset_name_to_dicts.values())
|
| 417 |
+
if keep_instance_predicate(d)
|
| 418 |
+
]
|
| 419 |
+
else:
|
| 420 |
+
all_datasets_dicts_plain = list(
|
| 421 |
+
itertools.chain.from_iterable(dataset_name_to_dicts.values())
|
| 422 |
+
)
|
| 423 |
+
return all_datasets_dicts_plain
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def build_detection_train_loader(cfg: CfgNode, mapper=None):
|
| 427 |
+
"""
|
| 428 |
+
A data loader is created in a way similar to that of Detectron2.
|
| 429 |
+
The main differences are:
|
| 430 |
+
- it allows to combine datasets with different but compatible object category sets
|
| 431 |
+
|
| 432 |
+
The data loader is created by the following steps:
|
| 433 |
+
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
|
| 434 |
+
2. Start workers to work on the dicts. Each worker will:
|
| 435 |
+
* Map each metadata dict into another format to be consumed by the model.
|
| 436 |
+
* Batch them by simply putting dicts into a list.
|
| 437 |
+
The batched ``list[mapped_dict]`` is what this dataloader will return.
|
| 438 |
+
|
| 439 |
+
Args:
|
| 440 |
+
cfg (CfgNode): the config
|
| 441 |
+
mapper (callable): a callable which takes a sample (dict) from dataset and
|
| 442 |
+
returns the format to be consumed by the model.
|
| 443 |
+
By default it will be `DatasetMapper(cfg, True)`.
|
| 444 |
+
|
| 445 |
+
Returns:
|
| 446 |
+
an infinite iterator of training data
|
| 447 |
+
"""
|
| 448 |
+
|
| 449 |
+
_add_category_whitelists_to_metadata(cfg)
|
| 450 |
+
_add_category_maps_to_metadata(cfg)
|
| 451 |
+
_maybe_add_class_to_mesh_name_map_to_metadata(cfg.DATASETS.TRAIN, cfg)
|
| 452 |
+
dataset_dicts = combine_detection_dataset_dicts(
|
| 453 |
+
cfg.DATASETS.TRAIN,
|
| 454 |
+
keep_instance_predicate=_get_train_keep_instance_predicate(cfg),
|
| 455 |
+
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
|
| 456 |
+
)
|
| 457 |
+
if mapper is None:
|
| 458 |
+
mapper = DatasetMapper(cfg, True)
|
| 459 |
+
return d2_build_detection_train_loader(cfg, dataset=dataset_dicts, mapper=mapper)
|
| 460 |
+
|
| 461 |
+
|
| 462 |
+
def build_detection_test_loader(cfg, dataset_name, mapper=None):
|
| 463 |
+
"""
|
| 464 |
+
Similar to `build_detection_train_loader`.
|
| 465 |
+
But this function uses the given `dataset_name` argument (instead of the names in cfg),
|
| 466 |
+
and uses batch size 1.
|
| 467 |
+
|
| 468 |
+
Args:
|
| 469 |
+
cfg: a detectron2 CfgNode
|
| 470 |
+
dataset_name (str): a name of the dataset that's available in the DatasetCatalog
|
| 471 |
+
mapper (callable): a callable which takes a sample (dict) from dataset
|
| 472 |
+
and returns the format to be consumed by the model.
|
| 473 |
+
By default it will be `DatasetMapper(cfg, False)`.
|
| 474 |
+
|
| 475 |
+
Returns:
|
| 476 |
+
DataLoader: a torch DataLoader, that loads the given detection
|
| 477 |
+
dataset, with test-time transformation and batching.
|
| 478 |
+
"""
|
| 479 |
+
_add_category_whitelists_to_metadata(cfg)
|
| 480 |
+
_add_category_maps_to_metadata(cfg)
|
| 481 |
+
_maybe_add_class_to_mesh_name_map_to_metadata([dataset_name], cfg)
|
| 482 |
+
dataset_dicts = combine_detection_dataset_dicts(
|
| 483 |
+
[dataset_name],
|
| 484 |
+
keep_instance_predicate=_get_test_keep_instance_predicate(cfg),
|
| 485 |
+
proposal_files=[
|
| 486 |
+
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
|
| 487 |
+
]
|
| 488 |
+
if cfg.MODEL.LOAD_PROPOSALS
|
| 489 |
+
else None,
|
| 490 |
+
)
|
| 491 |
+
sampler = None
|
| 492 |
+
if not cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE:
|
| 493 |
+
sampler = torch.utils.data.SequentialSampler(dataset_dicts)
|
| 494 |
+
if mapper is None:
|
| 495 |
+
mapper = DatasetMapper(cfg, False)
|
| 496 |
+
return d2_build_detection_test_loader(
|
| 497 |
+
dataset_dicts, mapper=mapper, num_workers=cfg.DATALOADER.NUM_WORKERS, sampler=sampler
|
| 498 |
+
)
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
def build_frame_selector(cfg: CfgNode):
|
| 502 |
+
strategy = FrameSelectionStrategy(cfg.STRATEGY)
|
| 503 |
+
if strategy == FrameSelectionStrategy.RANDOM_K:
|
| 504 |
+
frame_selector = RandomKFramesSelector(cfg.NUM_IMAGES)
|
| 505 |
+
elif strategy == FrameSelectionStrategy.FIRST_K:
|
| 506 |
+
frame_selector = FirstKFramesSelector(cfg.NUM_IMAGES)
|
| 507 |
+
elif strategy == FrameSelectionStrategy.LAST_K:
|
| 508 |
+
frame_selector = LastKFramesSelector(cfg.NUM_IMAGES)
|
| 509 |
+
elif strategy == FrameSelectionStrategy.ALL:
|
| 510 |
+
frame_selector = None
|
| 511 |
+
# pyre-fixme[61]: `frame_selector` may not be initialized here.
|
| 512 |
+
return frame_selector
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
def build_transform(cfg: CfgNode, data_type: str):
|
| 516 |
+
if cfg.TYPE == "resize":
|
| 517 |
+
if data_type == "image":
|
| 518 |
+
return ImageResizeTransform(cfg.MIN_SIZE, cfg.MAX_SIZE)
|
| 519 |
+
raise ValueError(f"Unknown transform {cfg.TYPE} for data type {data_type}")
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
def build_combined_loader(cfg: CfgNode, loaders: Collection[Loader], ratios: Sequence[float]):
|
| 523 |
+
images_per_worker = _compute_num_images_per_worker(cfg)
|
| 524 |
+
return CombinedDataLoader(loaders, images_per_worker, ratios)
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def build_bootstrap_dataset(dataset_name: str, cfg: CfgNode) -> Sequence[torch.Tensor]:
|
| 528 |
+
"""
|
| 529 |
+
Build dataset that provides data to bootstrap on
|
| 530 |
+
|
| 531 |
+
Args:
|
| 532 |
+
dataset_name (str): Name of the dataset, needs to have associated metadata
|
| 533 |
+
to load the data
|
| 534 |
+
cfg (CfgNode): bootstrapping config
|
| 535 |
+
Returns:
|
| 536 |
+
Sequence[Tensor] - dataset that provides image batches, Tensors of size
|
| 537 |
+
[N, C, H, W] of type float32
|
| 538 |
+
"""
|
| 539 |
+
logger = logging.getLogger(__name__)
|
| 540 |
+
_add_category_info_to_bootstrapping_metadata(dataset_name, cfg)
|
| 541 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 542 |
+
factory = BootstrapDatasetFactoryCatalog.get(meta.dataset_type)
|
| 543 |
+
dataset = None
|
| 544 |
+
if factory is not None:
|
| 545 |
+
dataset = factory(meta, cfg)
|
| 546 |
+
if dataset is None:
|
| 547 |
+
logger.warning(f"Failed to create dataset {dataset_name} of type {meta.dataset_type}")
|
| 548 |
+
return dataset
|
| 549 |
+
|
| 550 |
+
|
| 551 |
+
def build_data_sampler(cfg: CfgNode, sampler_cfg: CfgNode, embedder: Optional[torch.nn.Module]):
|
| 552 |
+
if sampler_cfg.TYPE == "densepose_uniform":
|
| 553 |
+
data_sampler = PredictionToGroundTruthSampler()
|
| 554 |
+
# transform densepose pred -> gt
|
| 555 |
+
data_sampler.register_sampler(
|
| 556 |
+
"pred_densepose",
|
| 557 |
+
"gt_densepose",
|
| 558 |
+
DensePoseUniformSampler(count_per_class=sampler_cfg.COUNT_PER_CLASS),
|
| 559 |
+
)
|
| 560 |
+
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
|
| 561 |
+
return data_sampler
|
| 562 |
+
elif sampler_cfg.TYPE == "densepose_UV_confidence":
|
| 563 |
+
data_sampler = PredictionToGroundTruthSampler()
|
| 564 |
+
# transform densepose pred -> gt
|
| 565 |
+
data_sampler.register_sampler(
|
| 566 |
+
"pred_densepose",
|
| 567 |
+
"gt_densepose",
|
| 568 |
+
DensePoseConfidenceBasedSampler(
|
| 569 |
+
confidence_channel="sigma_2",
|
| 570 |
+
count_per_class=sampler_cfg.COUNT_PER_CLASS,
|
| 571 |
+
search_proportion=0.5,
|
| 572 |
+
),
|
| 573 |
+
)
|
| 574 |
+
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
|
| 575 |
+
return data_sampler
|
| 576 |
+
elif sampler_cfg.TYPE == "densepose_fine_segm_confidence":
|
| 577 |
+
data_sampler = PredictionToGroundTruthSampler()
|
| 578 |
+
# transform densepose pred -> gt
|
| 579 |
+
data_sampler.register_sampler(
|
| 580 |
+
"pred_densepose",
|
| 581 |
+
"gt_densepose",
|
| 582 |
+
DensePoseConfidenceBasedSampler(
|
| 583 |
+
confidence_channel="fine_segm_confidence",
|
| 584 |
+
count_per_class=sampler_cfg.COUNT_PER_CLASS,
|
| 585 |
+
search_proportion=0.5,
|
| 586 |
+
),
|
| 587 |
+
)
|
| 588 |
+
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
|
| 589 |
+
return data_sampler
|
| 590 |
+
elif sampler_cfg.TYPE == "densepose_coarse_segm_confidence":
|
| 591 |
+
data_sampler = PredictionToGroundTruthSampler()
|
| 592 |
+
# transform densepose pred -> gt
|
| 593 |
+
data_sampler.register_sampler(
|
| 594 |
+
"pred_densepose",
|
| 595 |
+
"gt_densepose",
|
| 596 |
+
DensePoseConfidenceBasedSampler(
|
| 597 |
+
confidence_channel="coarse_segm_confidence",
|
| 598 |
+
count_per_class=sampler_cfg.COUNT_PER_CLASS,
|
| 599 |
+
search_proportion=0.5,
|
| 600 |
+
),
|
| 601 |
+
)
|
| 602 |
+
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
|
| 603 |
+
return data_sampler
|
| 604 |
+
elif sampler_cfg.TYPE == "densepose_cse_uniform":
|
| 605 |
+
assert embedder is not None
|
| 606 |
+
data_sampler = PredictionToGroundTruthSampler()
|
| 607 |
+
# transform densepose pred -> gt
|
| 608 |
+
data_sampler.register_sampler(
|
| 609 |
+
"pred_densepose",
|
| 610 |
+
"gt_densepose",
|
| 611 |
+
DensePoseCSEUniformSampler(
|
| 612 |
+
cfg=cfg,
|
| 613 |
+
use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES,
|
| 614 |
+
embedder=embedder,
|
| 615 |
+
count_per_class=sampler_cfg.COUNT_PER_CLASS,
|
| 616 |
+
),
|
| 617 |
+
)
|
| 618 |
+
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
|
| 619 |
+
return data_sampler
|
| 620 |
+
elif sampler_cfg.TYPE == "densepose_cse_coarse_segm_confidence":
|
| 621 |
+
assert embedder is not None
|
| 622 |
+
data_sampler = PredictionToGroundTruthSampler()
|
| 623 |
+
# transform densepose pred -> gt
|
| 624 |
+
data_sampler.register_sampler(
|
| 625 |
+
"pred_densepose",
|
| 626 |
+
"gt_densepose",
|
| 627 |
+
DensePoseCSEConfidenceBasedSampler(
|
| 628 |
+
cfg=cfg,
|
| 629 |
+
use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES,
|
| 630 |
+
embedder=embedder,
|
| 631 |
+
confidence_channel="coarse_segm_confidence",
|
| 632 |
+
count_per_class=sampler_cfg.COUNT_PER_CLASS,
|
| 633 |
+
search_proportion=0.5,
|
| 634 |
+
),
|
| 635 |
+
)
|
| 636 |
+
data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
|
| 637 |
+
return data_sampler
|
| 638 |
+
|
| 639 |
+
raise ValueError(f"Unknown data sampler type {sampler_cfg.TYPE}")
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
def build_data_filter(cfg: CfgNode):
|
| 643 |
+
if cfg.TYPE == "detection_score":
|
| 644 |
+
min_score = cfg.MIN_VALUE
|
| 645 |
+
return ScoreBasedFilter(min_score=min_score)
|
| 646 |
+
raise ValueError(f"Unknown data filter type {cfg.TYPE}")
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
def build_inference_based_loader(
|
| 650 |
+
cfg: CfgNode,
|
| 651 |
+
dataset_cfg: CfgNode,
|
| 652 |
+
model: torch.nn.Module,
|
| 653 |
+
embedder: Optional[torch.nn.Module] = None,
|
| 654 |
+
) -> InferenceBasedLoader:
|
| 655 |
+
"""
|
| 656 |
+
Constructs data loader based on inference results of a model.
|
| 657 |
+
"""
|
| 658 |
+
dataset = build_bootstrap_dataset(dataset_cfg.DATASET, dataset_cfg.IMAGE_LOADER)
|
| 659 |
+
meta = MetadataCatalog.get(dataset_cfg.DATASET)
|
| 660 |
+
training_sampler = TrainingSampler(len(dataset))
|
| 661 |
+
data_loader = torch.utils.data.DataLoader(
|
| 662 |
+
dataset, # pyre-ignore[6]
|
| 663 |
+
batch_size=dataset_cfg.IMAGE_LOADER.BATCH_SIZE,
|
| 664 |
+
sampler=training_sampler,
|
| 665 |
+
num_workers=dataset_cfg.IMAGE_LOADER.NUM_WORKERS,
|
| 666 |
+
collate_fn=trivial_batch_collator,
|
| 667 |
+
worker_init_fn=worker_init_reset_seed,
|
| 668 |
+
)
|
| 669 |
+
return InferenceBasedLoader(
|
| 670 |
+
model,
|
| 671 |
+
data_loader=data_loader,
|
| 672 |
+
data_sampler=build_data_sampler(cfg, dataset_cfg.DATA_SAMPLER, embedder),
|
| 673 |
+
data_filter=build_data_filter(dataset_cfg.FILTER),
|
| 674 |
+
shuffle=True,
|
| 675 |
+
batch_size=dataset_cfg.INFERENCE.OUTPUT_BATCH_SIZE,
|
| 676 |
+
inference_batch_size=dataset_cfg.INFERENCE.INPUT_BATCH_SIZE,
|
| 677 |
+
category_to_class_mapping=meta.category_to_class_mapping,
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
|
| 681 |
+
def has_inference_based_loaders(cfg: CfgNode) -> bool:
|
| 682 |
+
"""
|
| 683 |
+
Returns True, if at least one inferense-based loader must
|
| 684 |
+
be instantiated for training
|
| 685 |
+
"""
|
| 686 |
+
return len(cfg.BOOTSTRAP_DATASETS) > 0
|
| 687 |
+
|
| 688 |
+
|
| 689 |
+
def build_inference_based_loaders(
|
| 690 |
+
cfg: CfgNode, model: torch.nn.Module
|
| 691 |
+
) -> Tuple[List[InferenceBasedLoader], List[float]]:
|
| 692 |
+
loaders = []
|
| 693 |
+
ratios = []
|
| 694 |
+
embedder = build_densepose_embedder(cfg).to(device=model.device) # pyre-ignore[16]
|
| 695 |
+
for dataset_spec in cfg.BOOTSTRAP_DATASETS:
|
| 696 |
+
dataset_cfg = get_bootstrap_dataset_config().clone()
|
| 697 |
+
dataset_cfg.merge_from_other_cfg(CfgNode(dataset_spec))
|
| 698 |
+
loader = build_inference_based_loader(cfg, dataset_cfg, model, embedder)
|
| 699 |
+
loaders.append(loader)
|
| 700 |
+
ratios.append(dataset_cfg.RATIO)
|
| 701 |
+
return loaders, ratios
|
| 702 |
+
|
| 703 |
+
|
| 704 |
+
def build_video_list_dataset(meta: Metadata, cfg: CfgNode):
|
| 705 |
+
video_list_fpath = meta.video_list_fpath
|
| 706 |
+
video_base_path = meta.video_base_path
|
| 707 |
+
category = meta.category
|
| 708 |
+
if cfg.TYPE == "video_keyframe":
|
| 709 |
+
frame_selector = build_frame_selector(cfg.SELECT)
|
| 710 |
+
transform = build_transform(cfg.TRANSFORM, data_type="image")
|
| 711 |
+
video_list = video_list_from_file(video_list_fpath, video_base_path)
|
| 712 |
+
keyframe_helper_fpath = getattr(cfg, "KEYFRAME_HELPER", None)
|
| 713 |
+
return VideoKeyframeDataset(
|
| 714 |
+
video_list, category, frame_selector, transform, keyframe_helper_fpath
|
| 715 |
+
)
|
| 716 |
+
|
| 717 |
+
|
| 718 |
+
class _BootstrapDatasetFactoryCatalog(UserDict):
|
| 719 |
+
"""
|
| 720 |
+
A global dictionary that stores information about bootstrapped datasets creation functions
|
| 721 |
+
from metadata and config, for diverse DatasetType
|
| 722 |
+
"""
|
| 723 |
+
|
| 724 |
+
def register(self, dataset_type: DatasetType, factory: Callable[[Metadata, CfgNode], Dataset]):
|
| 725 |
+
"""
|
| 726 |
+
Args:
|
| 727 |
+
dataset_type (DatasetType): a DatasetType e.g. DatasetType.VIDEO_LIST
|
| 728 |
+
factory (Callable[Metadata, CfgNode]): a callable which takes Metadata and cfg
|
| 729 |
+
arguments and returns a dataset object.
|
| 730 |
+
"""
|
| 731 |
+
assert dataset_type not in self, "Dataset '{}' is already registered!".format(dataset_type)
|
| 732 |
+
self[dataset_type] = factory
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
BootstrapDatasetFactoryCatalog = _BootstrapDatasetFactoryCatalog()
|
| 736 |
+
BootstrapDatasetFactoryCatalog.register(DatasetType.VIDEO_LIST, build_video_list_dataset)
|