code stringlengths 17 6.64M |
|---|
def cityscapes_classes():
return ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
|
def oid_challenge_classes():
return ['Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', 'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink', 'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table', 'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light', 'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum', 'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat', 'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt', 'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear', 'Vehicle registration plate', 'Microphone', 'Musical keyboard', 'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable', 'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries', 'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane', 'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail', 'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle', 'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat', 'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame', 'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet', 'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag', 'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree', 'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine', 'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance', 'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard', 'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf', 'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch', 'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster', 'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal', 'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer', 'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer', 'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace', 'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry', 'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot', 'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite', 'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper', 'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft', 'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter', 'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra', 'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard', 'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building', 'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll', 'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon', 'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock', 'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance', 'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair', 'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat', 'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen', 'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust', 'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot', 'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken', 'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod', 'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet', 'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture', 'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat', 'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep', 'Tablet computer', 'Pillow', 'Kitchen & dining room table', 'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree', 'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread', 'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope', 'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber', 'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies', 'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch', 'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags', 'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock', 'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza', 'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store', 'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry', 'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase', 'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft', 'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer', 'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon', 'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger', 'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball', 'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin', 'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle', 'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot', 'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle', 'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman', 'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper', 'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone', 'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear', 'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail', 'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn', 'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango', 'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell', 'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase', 'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup', 'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula', 'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon']
|
def oid_v6_classes():
return ['Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', 'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber', 'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick', 'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle', 'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot', 'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy', 'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt', 'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear', 'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot', 'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee', 'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw', 'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern', 'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace', 'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer', 'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock', 'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft', 'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile', 'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel', 'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola', 'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building', 'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor', 'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment', 'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini', 'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur', 'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula', 'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser', 'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero', 'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener', 'Goggles', 'Human body', 'Roller skates', 'Coffee cup', 'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign', 'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker', 'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food', 'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove', 'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax', 'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart', 'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind', 'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light', 'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear', 'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle', 'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat', 'Baseball bat', 'Baseball glove', 'Mixing bowl', 'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House', 'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed', 'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer', 'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster', 'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw', 'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate', 'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove', 'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)', 'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet', 'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife', 'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse', 'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard', 'Billiard table', 'Mammal', 'Mouse', 'Motorcycle', 'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow', 'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk', 'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom', 'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device', 'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard', 'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball', 'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl', 'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta', 'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer', 'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile', 'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda', 'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood', 'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi', 'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine', 'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table', 'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco', 'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree', 'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray', 'Trousers', 'Bowling equipment', 'Football helmet', 'Truck', 'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag', 'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale', 'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion', 'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck', 'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper', 'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog', 'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer', 'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark', 'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser', 'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger', 'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus', 'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull', 'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench', 'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange', 'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet', 'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut', 'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera', 'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable', 'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish', 'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple', 'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower', 'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug', 'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow', 'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone', 'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray', 'Kitchen & dining room table', 'Dog bed', 'Cake stand', 'Cat furniture', 'Bathroom accessory', 'Facial tissue holder', 'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler', 'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry', 'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily', 'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant', 'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon', 'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich', 'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod', 'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume', 'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair', 'Rugby ball', 'Armadillo', 'Maracas', 'Helmet']
|
def get_classes(dataset):
'Get class names of a dataset.'
alias2name = {}
for (name, aliases) in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if (dataset in alias2name):
labels = eval((alias2name[dataset] + '_classes()'))
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
|
def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
assert mmcv.is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend([dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.extend([dynamic_interval[1] for dynamic_interval in dynamic_interval_list])
return (dynamic_milestones, dynamic_intervals)
|
class EvalHook(BaseEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(EvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = (dynamic_intervals is not None)
if self.use_dynamic_intervals:
(self.dynamic_milestones, self.dynamic_intervals) = _calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = (runner.epoch if self.by_epoch else runner.iter)
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
self.interval = self.dynamic_intervals[(step - 1)]
def before_train_epoch(self, runner):
'Evaluate the model only at the start of training by epoch.'
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
'perform evaluation and save ckpt.'
if (not self._should_evaluate(runner)):
return
from mmdet.apis import single_gpu_test
results = single_gpu_test(runner.model, self.dataloader, show=False)
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if (self.save_best and key_score):
self._save_ckpt(runner, key_score)
|
class DistEvalHook(BaseDistEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(DistEvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = (dynamic_intervals is not None)
if self.use_dynamic_intervals:
(self.dynamic_milestones, self.dynamic_intervals) = _calc_dynamic_intervals(self.interval, dynamic_intervals)
def _decide_interval(self, runner):
if self.use_dynamic_intervals:
progress = (runner.epoch if self.by_epoch else runner.iter)
step = bisect.bisect(self.dynamic_milestones, (progress + 1))
self.interval = self.dynamic_intervals[(step - 1)]
def before_train_epoch(self, runner):
'Evaluate the model only at the start of training by epoch.'
self._decide_interval(runner)
super().before_train_epoch(runner)
def before_train_iter(self, runner):
self._decide_interval(runner)
super().before_train_iter(runner)
def _do_evaluate(self, runner):
'perform evaluation and save ckpt.'
if self.broadcast_bn_buffer:
model = runner.model
for (name, module) in model.named_modules():
if (isinstance(module, _BatchNorm) and module.track_running_stats):
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if (not self._should_evaluate(runner)):
return
tmpdir = self.tmpdir
if (tmpdir is None):
tmpdir = osp.join(runner.work_dir, '.eval_hook')
from mmdet.apis import multi_gpu_test
results = multi_gpu_test(runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect)
if (runner.rank == 0):
print('\n')
runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
key_score = self.evaluate(runner, results)
if (self.save_best and key_score):
self._save_ckpt(runner, key_score)
|
def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config, cfg_options=None):
"Prepare sample input and wrap model for ONNX export.\n\n The ONNX export API only accept args, and all inputs should be\n torch.Tensor or corresponding types (such as tuple of tensor).\n So we should call this function before exporting. This function will:\n\n 1. generate corresponding inputs which are used to execute the model.\n 2. Wrap the model's forward function.\n\n For example, the MMDet models' forward function has a parameter\n ``return_loss:bool``. As we want to set it as False while export API\n supports neither bool type or kwargs. So we have to replace the forward\n method like ``model.forward = partial(model.forward, return_loss=False)``.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n input_config (dict): the exactly data in this dict depends on the\n framework. For MMSeg, we can just declare the input shape,\n and generate the dummy data accordingly. However, for MMDet,\n we may pass the real img path, or the NMS will return None\n as there is no legal bbox.\n\n Returns:\n tuple: (model, tensor_data) wrapped model which can be called by\n ``model(*tensor_data)`` and a list of inputs which are used to\n execute the model while exporting.\n "
model = build_model_from_cfg(config_path, checkpoint_path, cfg_options=cfg_options)
(one_img, one_meta) = preprocess_example_input(input_config)
tensor_data = [one_img]
model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False)
opset_version = 11
try:
from mmcv.onnx.symbolic import register_extra_symbolics
except ModuleNotFoundError:
raise NotImplementedError('please update mmcv to version>=v1.0.4')
register_extra_symbolics(opset_version)
return (model, tensor_data)
|
def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
'Build a model from config and load the given checkpoint.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n\n Returns:\n torch.nn.Module: the built model\n '
from mmdet.models import build_detector
cfg = mmcv.Config.fromfile(config_path)
if (cfg_options is not None):
cfg.merge_from_dict(cfg_options)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu')
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
from mmdet.datasets import DATASETS
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
model.CLASSES = dataset.CLASSES
model.cpu().eval()
return model
|
def preprocess_example_input(input_config):
"Prepare an example input image for ``generate_inputs_and_wrap_model``.\n\n Args:\n input_config (dict): customized config describing the example input.\n\n Returns:\n tuple: (one_img, one_meta), tensor of the example input image and meta information for the example input image.\n\n Examples:\n >>> from mmdet.core.export import preprocess_example_input\n >>> input_config = {\n >>> 'input_shape': (1,3,224,224),\n >>> 'input_path': 'demo/demo.jpg',\n >>> 'normalize_cfg': {\n >>> 'mean': (123.675, 116.28, 103.53),\n >>> 'std': (58.395, 57.12, 57.375)\n >>> }\n >>> }\n >>> one_img, one_meta = preprocess_example_input(input_config)\n >>> print(one_img.shape)\n torch.Size([1, 3, 224, 224])\n >>> print(one_meta)\n {'img_shape': (224, 224, 3),\n 'ori_shape': (224, 224, 3),\n 'pad_shape': (224, 224, 3),\n 'filename': '<demo>.png',\n 'scale_factor': 1.0,\n 'flip': False}\n "
input_path = input_config['input_path']
input_shape = input_config['input_shape']
one_img = mmcv.imread(input_path)
one_img = mmcv.imresize(one_img, input_shape[2:][::(- 1)])
show_img = one_img.copy()
if ('normalize_cfg' in input_config.keys()):
normalize_cfg = input_config['normalize_cfg']
mean = np.array(normalize_cfg['mean'], dtype=np.float32)
std = np.array(normalize_cfg['std'], dtype=np.float32)
to_rgb = normalize_cfg.get('to_rgb', True)
one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)
one_img = one_img.transpose(2, 0, 1)
one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(True)
(_, C, H, W) = input_shape
one_meta = {'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': np.ones(4, dtype=np.float32), 'flip': False, 'show_img': show_img, 'flip_direction': None}
return (one_img, one_meta)
|
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
'Check invalid loss hook.\n\n This hook will regularly check whether the loss is valid\n during training.\n\n Args:\n interval (int): Checking interval (every k iterations).\n Default: 50.\n '
def __init__(self, interval=50):
self.interval = interval
def after_train_iter(self, runner):
if self.every_n_iters(runner, self.interval):
assert torch.isfinite(runner.outputs['loss']), runner.logger.info('loss become infinite or NaN!')
|
class BaseEMAHook(Hook):
"Exponential Moving Average Hook.\n\n Use Exponential Moving Average on all parameters of model in training\n process. All parameters have a ema backup, which update by the formula\n as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,\n the original model parameters are actually saved in ema field after train.\n\n Args:\n momentum (float): The momentum used for updating ema parameter.\n Ema's parameter are updated with the formula:\n `ema_param = (1-momentum) * ema_param + momentum * cur_param`.\n Defaults to 0.0002.\n skip_buffers (bool): Whether to skip the model buffers, such as\n batchnorm running stats (running_mean, running_var), it does not\n perform the ema operation. Default to False.\n interval (int): Update ema parameter every interval iteration.\n Defaults to 1.\n resume_from (str, optional): The checkpoint path. Defaults to None.\n momentum_fun (func, optional): The function to change momentum\n during early iteration (also warmup) to help early training.\n It uses `momentum` as a constant. Defaults to None.\n "
def __init__(self, momentum=0.0002, interval=1, skip_buffers=False, resume_from=None, momentum_fun=None):
assert (0 < momentum < 1)
self.momentum = momentum
self.skip_buffers = skip_buffers
self.interval = interval
self.checkpoint = resume_from
self.momentum_fun = momentum_fun
def before_run(self, runner):
"To resume model with it's ema parameters more friendly.\n\n Register ema parameter as ``named_buffer`` to model.\n "
model = runner.model
if is_module_wrapper(model):
model = model.module
self.param_ema_buffer = {}
if self.skip_buffers:
self.model_parameters = dict(model.named_parameters())
else:
self.model_parameters = model.state_dict()
for (name, value) in self.model_parameters.items():
buffer_name = f"ema_{name.replace('.', '_')}"
self.param_ema_buffer[name] = buffer_name
model.register_buffer(buffer_name, value.data.clone())
self.model_buffers = dict(model.named_buffers())
if (self.checkpoint is not None):
runner.resume(self.checkpoint)
def get_momentum(self, runner):
return (self.momentum_fun(runner.iter) if self.momentum_fun else self.momentum)
def after_train_iter(self, runner):
'Update ema parameter every self.interval iterations.'
if (((runner.iter + 1) % self.interval) != 0):
return
momentum = self.get_momentum(runner)
for (name, parameter) in self.model_parameters.items():
if parameter.dtype.is_floating_point:
buffer_name = self.param_ema_buffer[name]
buffer_parameter = self.model_buffers[buffer_name]
buffer_parameter.mul_((1 - momentum)).add_(parameter.data, alpha=momentum)
def after_train_epoch(self, runner):
'We load parameter values from ema backup to model before the\n EvalHook.'
self._swap_ema_parameters()
def before_train_epoch(self, runner):
"We recover model's parameter from ema backup after last epoch's\n EvalHook."
self._swap_ema_parameters()
def _swap_ema_parameters(self):
'Swap the parameter of model with parameter in ema_buffer.'
for (name, value) in self.model_parameters.items():
temp = value.data.clone()
ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
value.data.copy_(ema_buffer.data)
ema_buffer.data.copy_(temp)
|
@HOOKS.register_module()
class ExpMomentumEMAHook(BaseEMAHook):
'EMAHook using exponential momentum strategy.\n\n Args:\n total_iter (int): The total number of iterations of EMA momentum.\n Defaults to 2000.\n '
def __init__(self, total_iter=2000, **kwargs):
super(ExpMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = (lambda x: (((1 - self.momentum) * math.exp(((- (1 + x)) / total_iter))) + self.momentum))
|
@HOOKS.register_module()
class LinearMomentumEMAHook(BaseEMAHook):
'EMAHook using linear momentum strategy.\n\n Args:\n warm_up (int): During first warm_up steps, we may use smaller decay\n to update ema parameters more slowly. Defaults to 100.\n '
def __init__(self, warm_up=100, **kwargs):
super(LinearMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = (lambda x: min((self.momentum ** self.interval), ((1 + x) / (warm_up + x))))
|
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"Set runner's epoch information to the model."
def before_train_epoch(self, runner):
epoch = runner.epoch
model = runner.model
if is_module_wrapper(model):
model = model.module
model.set_epoch(epoch)
|
def get_norm_states(module):
async_norm_states = OrderedDict()
for (name, child) in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for (k, v) in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_states
|
@HOOKS.register_module()
class SyncNormHook(Hook):
'Synchronize Norm states after training epoch, currently used in YOLOX.\n\n Args:\n num_last_epochs (int): The number of latter epochs in the end of the\n training to switch to synchronizing norm interval. Default: 15.\n interval (int): Synchronizing norm interval. Default: 1.\n '
def __init__(self, num_last_epochs=15, interval=1):
self.interval = interval
self.num_last_epochs = num_last_epochs
def before_train_epoch(self, runner):
epoch = runner.epoch
if ((epoch + 1) == (runner.max_epochs - self.num_last_epochs)):
self.interval = 1
def after_train_epoch(self, runner):
'Synchronizing norm.'
epoch = runner.epoch
module = runner.model
if (((epoch + 1) % self.interval) == 0):
(_, world_size) = get_dist_info()
if (world_size == 1):
return
norm_states = get_norm_states(module)
if (len(norm_states) == 0):
return
norm_states = all_reduce_dict(norm_states, op='mean')
module.load_state_dict(norm_states, strict=False)
|
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"Change and synchronize the random image size across ranks.\n SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve\n similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),\n (832, 832)], multiscale_mode='range', keep_ratio=True)`.\n\n Note: Due to the multi-process dataloader, its behavior is different\n from YOLOX's official implementation, the official is to change the\n size every fixed iteration interval and what we achieved is a fixed\n epoch interval.\n\n Args:\n ratio_range (tuple[int]): Random ratio range. It will be multiplied\n by 32, and then change the dataset output image size.\n Default: (14, 26).\n img_scale (tuple[int]): Size of input image. Default: (640, 640).\n interval (int): The epoch interval of change image size. Default: 1.\n device (torch.device | str): device for returned tensors.\n Default: 'cuda'.\n "
def __init__(self, ratio_range=(14, 26), img_scale=(640, 640), interval=1, device='cuda'):
warnings.warn("DeprecationWarning: SyncRandomSizeHook is deprecated. Please use Resize pipeline to achieve similar functions. Due to the multi-process dataloader, its behavior is different from YOLOX's official implementation, the official is to change the size every fixed iteration interval and what we achieved is a fixed epoch interval.")
(self.rank, world_size) = get_dist_info()
self.is_distributed = (world_size > 1)
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
'Change the dataset output image size.'
if ((self.ratio_range is not None) and (((runner.epoch + 1) % self.interval) == 0)):
tensor = torch.LongTensor(2).to(self.device)
if (self.rank == 0):
size_factor = ((self.img_scale[1] * 1.0) / self.img_scale[0])
size = random.randint(*self.ratio_range)
size = (int((32 * size)), (32 * int((size * size_factor))))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale((tensor[0].item(), tensor[1].item()))
|
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
'YOLOX learning rate scheme.\n\n There are two main differences between YOLOXLrUpdaterHook\n and CosineAnnealingLrUpdaterHook.\n\n 1. When the current running epoch is greater than\n `max_epoch-last_epoch`, a fixed learning rate will be used\n 2. The exp warmup scheme is different with LrUpdaterHook in MMCV\n\n Args:\n num_last_epochs (int): The number of epochs with a fixed learning rate\n before the end of the training.\n '
def __init__(self, num_last_epochs, **kwargs):
self.num_last_epochs = num_last_epochs
super(YOLOXLrUpdaterHook, self).__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
def _get_warmup_lr(cur_iters, regular_lr):
k = (self.warmup_ratio * pow(((cur_iters + 1) / float(self.warmup_iters)), 2))
warmup_lr = [(_lr * k) for _lr in regular_lr]
return warmup_lr
if isinstance(self.base_lr, dict):
lr_groups = {}
for (key, base_lr) in self.base_lr.items():
lr_groups[key] = _get_warmup_lr(cur_iters, base_lr)
return lr_groups
else:
return _get_warmup_lr(cur_iters, self.base_lr)
def get_lr(self, runner, base_lr):
last_iter = (len(runner.data_loader) * self.num_last_epochs)
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
progress += 1
if (self.min_lr_ratio is not None):
target_lr = (base_lr * self.min_lr_ratio)
else:
target_lr = self.min_lr
if (progress >= (max_progress - last_iter)):
return target_lr
else:
return annealing_cos(base_lr, target_lr, ((progress - self.warmup_iters) / ((max_progress - self.warmup_iters) - last_iter)))
|
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"Switch the mode of YOLOX during training.\n\n This hook turns off the mosaic and mixup data augmentation and switches\n to use L1 loss in bbox_head.\n\n Args:\n num_last_epochs (int): The number of latter epochs in the end of the\n training to close the data augmentation and switch to L1 loss.\n Default: 15.\n skip_type_keys (list[str], optional): Sequence of type string to be\n skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp')\n "
def __init__(self, num_last_epochs=15, skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')):
self.num_last_epochs = num_last_epochs
self.skip_type_keys = skip_type_keys
self._restart_dataloader = False
def before_train_epoch(self, runner):
'Close mosaic and mixup augmentation and switches to use L1 loss.'
epoch = runner.epoch
train_loader = runner.data_loader
model = runner.model
if is_module_wrapper(model):
model = model.module
if ((epoch + 1) == (runner.max_epochs - self.num_last_epochs)):
runner.logger.info('No mosaic and mixup aug now!')
train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
if (hasattr(train_loader, 'persistent_workers') and (train_loader.persistent_workers is True)):
train_loader._DataLoader__initialized = False
train_loader._iterator = None
self._restart_dataloader = True
runner.logger.info('Add additional L1 loss now!')
model.bbox_head.use_l1 = True
elif self._restart_dataloader:
train_loader._DataLoader__initialized = True
|
def mask_matrix_nms(masks, labels, scores, filter_thr=(- 1), nms_pre=(- 1), max_num=(- 1), kernel='gaussian', sigma=2.0, mask_area=None):
"Matrix NMS for multi-class masks.\n\n Args:\n masks (Tensor): Has shape (num_instances, h, w)\n labels (Tensor): Labels of corresponding masks,\n has shape (num_instances,).\n scores (Tensor): Mask scores of corresponding masks,\n has shape (num_instances).\n filter_thr (float): Score threshold to filter the masks\n after matrix nms. Default: -1, which means do not\n use filter_thr.\n nms_pre (int): The max number of instances to do the matrix nms.\n Default: -1, which means do not use nms_pre.\n max_num (int, optional): If there are more than max_num masks after\n matrix, only top max_num will be kept. Default: -1, which means\n do not use max_num.\n kernel (str): 'linear' or 'gaussian'.\n sigma (float): std in gaussian method.\n mask_area (Tensor): The sum of seg_masks.\n\n Returns:\n tuple(Tensor): Processed mask results.\n\n - scores (Tensor): Updated scores, has shape (n,).\n - labels (Tensor): Remained labels, has shape (n,).\n - masks (Tensor): Remained masks, has shape (n, w, h).\n - keep_inds (Tensor): The indices number of\n the remaining mask in the input mask, has shape (n,).\n "
assert (len(labels) == len(masks) == len(scores))
if (len(labels) == 0):
return (scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(0, *masks.shape[(- 2):]), labels.new_zeros(0))
if (mask_area is None):
mask_area = masks.sum((1, 2)).float()
else:
assert (len(masks) == len(mask_area))
(scores, sort_inds) = torch.sort(scores, descending=True)
keep_inds = sort_inds
if ((nms_pre > 0) and (len(sort_inds) > nms_pre)):
sort_inds = sort_inds[:nms_pre]
keep_inds = keep_inds[:nms_pre]
scores = scores[:nms_pre]
masks = masks[sort_inds]
mask_area = mask_area[sort_inds]
labels = labels[sort_inds]
num_masks = len(labels)
flatten_masks = masks.reshape(num_masks, (- 1)).float()
inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))
expanded_mask_area = mask_area.expand(num_masks, num_masks)
iou_matrix = (inter_matrix / ((expanded_mask_area + expanded_mask_area.transpose(1, 0)) - inter_matrix)).triu(diagonal=1)
expanded_labels = labels.expand(num_masks, num_masks)
label_matrix = (expanded_labels == expanded_labels.transpose(1, 0)).triu(diagonal=1)
(compensate_iou, _) = (iou_matrix * label_matrix).max(0)
compensate_iou = compensate_iou.expand(num_masks, num_masks).transpose(1, 0)
decay_iou = (iou_matrix * label_matrix)
if (kernel == 'gaussian'):
decay_matrix = torch.exp((((- 1) * sigma) * (decay_iou ** 2)))
compensate_matrix = torch.exp((((- 1) * sigma) * (compensate_iou ** 2)))
(decay_coefficient, _) = (decay_matrix / compensate_matrix).min(0)
elif (kernel == 'linear'):
decay_matrix = ((1 - decay_iou) / (1 - compensate_iou))
(decay_coefficient, _) = decay_matrix.min(0)
else:
raise NotImplementedError(f'{kernel} kernel is not supported in matrix nms!')
scores = (scores * decay_coefficient)
if (filter_thr > 0):
keep = (scores >= filter_thr)
keep_inds = keep_inds[keep]
if (not keep.any()):
return (scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(0, *masks.shape[(- 2):]), labels.new_zeros(0))
masks = masks[keep]
scores = scores[keep]
labels = labels[keep]
(scores, sort_inds) = torch.sort(scores, descending=True)
keep_inds = keep_inds[sort_inds]
if ((max_num > 0) and (len(sort_inds) > max_num)):
sort_inds = sort_inds[:max_num]
keep_inds = keep_inds[:max_num]
scores = scores[:max_num]
masks = masks[sort_inds]
labels = labels[sort_inds]
return (scores, labels, masks, keep_inds)
|
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)):
if (bucket_size_mb > 0):
bucket_size_bytes = ((bucket_size_mb * 1024) * 1024)
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if (tp not in buckets):
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for (tensor, synced) in zip(bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
|
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)):
'Allreduce gradients.\n\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n Defaults to -1.\n '
grads = [param.grad.data for param in params if (param.requires_grad and (param.grad is not None))]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
|
class DistOptimizerHook(OptimizerHook):
'Deprecated optimizer hook for distributed training.'
def __init__(self, *args, **kwargs):
warnings.warn('"DistOptimizerHook" is deprecated, please switch to"mmcv.runner.OptimizerHook".')
super().__init__(*args, **kwargs)
|
def reduce_mean(tensor):
'"Obtain the mean of tensor on different GPUs.'
if (not (dist.is_available() and dist.is_initialized())):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
|
def obj2tensor(pyobj, device='cuda'):
'Serialize picklable python object to tensor.'
storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
return torch.ByteTensor(storage).to(device=device)
|
def tensor2obj(tensor):
'Deserialize tensor to picklable python object.'
return pickle.loads(tensor.cpu().numpy().tobytes())
|
@functools.lru_cache()
def _get_global_gloo_group():
'Return a process group based on gloo backend, containing all the ranks\n The result is cached.'
if (dist.get_backend() == 'nccl'):
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
|
def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
"Apply all reduce function for python dict object.\n\n The code is modified from https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.\n\n NOTE: make sure that py_dict in different ranks has the same keys and\n the values should be in the same shape. Currently only supports\n nccl backend.\n\n Args:\n py_dict (dict): Dict to be applied all reduce op.\n op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'\n group (:obj:`torch.distributed.group`, optional): Distributed group,\n Default: None.\n to_float (bool): Whether to convert all values of dict to float.\n Default: True.\n\n Returns:\n OrderedDict: reduced python dict object.\n "
warnings.warn('group` is deprecated. Currently only supports NCCL backend.')
(_, world_size) = get_dist_info()
if (world_size == 1):
return py_dict
py_key = list(py_dict.keys())
if (not isinstance(py_dict, OrderedDict)):
py_key_tensor = obj2tensor(py_key)
dist.broadcast(py_key_tensor, src=0)
py_key = tensor2obj(py_key_tensor)
tensor_shapes = [py_dict[k].shape for k in py_key]
tensor_numels = [py_dict[k].numel() for k in py_key]
if to_float:
warnings.warn('Note: the "to_float" is True, you need to ensure that the behavior is reasonable.')
flatten_tensor = torch.cat([py_dict[k].flatten().float() for k in py_key])
else:
flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)
if (op == 'mean'):
flatten_tensor /= world_size
split_tensors = [x.reshape(shape) for (x, shape) in zip(torch.split(flatten_tensor, tensor_numels), tensor_shapes)]
out_dict = {k: v for (k, v) in zip(py_key, split_tensors)}
if isinstance(py_dict, OrderedDict):
out_dict = OrderedDict(out_dict)
return out_dict
|
def palette_val(palette):
'Convert palette to matplotlib palette.\n\n Args:\n palette List[tuple]: A list of color tuples.\n\n Returns:\n List[tuple[float]]: A list of RGB matplotlib color tuples.\n '
new_palette = []
for color in palette:
color = [(c / 255) for c in color]
new_palette.append(tuple(color))
return new_palette
|
def get_palette(palette, num_classes):
'Get palette from various inputs.\n\n Args:\n palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs.\n num_classes (int): the number of classes.\n\n Returns:\n list[tuple[int]]: A list of color tuples.\n '
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = ([palette] * num_classes)
elif ((palette == 'random') or (palette is None)):
state = np.random.get_state()
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif (palette == 'coco'):
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.PALETTE
if (len(dataset_palette) < num_classes):
dataset_palette = CocoPanopticDataset.PALETTE
elif (palette == 'citys'):
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.PALETTE
elif (palette == 'voc'):
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.PALETTE
elif mmcv.is_str(palette):
dataset_palette = ([mmcv.color_val(palette)[::(- 1)]] * num_classes)
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert (len(dataset_palette) >= num_classes), 'The length of palette should not be less than `num_classes`.'
return dataset_palette
|
class COCO(_COCO):
'This class is almost the same as official pycocotools package.\n\n It implements some snake case function aliases. So that the COCO class has\n the same interface as LVIS class.\n '
def __init__(self, annotation_file=None):
if (getattr(pycocotools, '__version__', '0') >= '12.0.2'):
warnings.warn('mmpycocotools is deprecated. Please install official pycocotools by "pip install pycocotools"', UserWarning)
super().__init__(annotation_file=annotation_file)
self.img_ann_map = self.imgToAnns
self.cat_img_map = self.catToImgs
def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)
def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
return self.getCatIds(cat_names, sup_names, cat_ids)
def get_img_ids(self, img_ids=[], cat_ids=[]):
return self.getImgIds(img_ids, cat_ids)
def load_anns(self, ids):
return self.loadAnns(ids)
def load_cats(self, ids):
return self.loadCats(ids)
def load_imgs(self, ids):
return self.loadImgs(ids)
|
def pq_compute_single_core(proc_id, annotation_set, gt_folder, pred_folder, categories, file_client=None):
'The single core function to evaluate the metric of Panoptic\n Segmentation.\n\n Same as the function with the same name in `panopticapi`. Only the function\n to load the images is changed to use the file client.\n\n Args:\n proc_id (int): The id of the mini process.\n gt_folder (str): The path of the ground truth images.\n pred_folder (str): The path of the prediction images.\n categories (str): The categories of the dataset.\n file_client (object): The file client of the dataset. If None,\n the backend will be set to `disk`.\n '
if (PQStat is None):
raise RuntimeError('panopticapi is not installed, please install it by: pip install git+https://github.com/cocodataset/panopticapi.git.')
if (file_client is None):
file_client_args = dict(backend='disk')
file_client = mmcv.FileClient(**file_client_args)
pq_stat = PQStat()
idx = 0
for (gt_ann, pred_ann) in annotation_set:
if ((idx % 100) == 0):
print('Core: {}, {} from {} images processed'.format(proc_id, idx, len(annotation_set)))
idx += 1
img_bytes = file_client.get(os.path.join(gt_folder, gt_ann['file_name']))
pan_gt = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb')
pan_gt = rgb2id(pan_gt)
pan_pred = mmcv.imread(os.path.join(pred_folder, pred_ann['file_name']), flag='color', channel_order='rgb')
pan_pred = rgb2id(pan_pred)
gt_segms = {el['id']: el for el in gt_ann['segments_info']}
pred_segms = {el['id']: el for el in pred_ann['segments_info']}
pred_labels_set = set((el['id'] for el in pred_ann['segments_info']))
(labels, labels_cnt) = np.unique(pan_pred, return_counts=True)
for (label, label_cnt) in zip(labels, labels_cnt):
if (label not in pred_segms):
if (label == VOID):
continue
raise KeyError('In the image with ID {} segment with ID {} is presented in PNG and not presented in JSON.'.format(gt_ann['image_id'], label))
pred_segms[label]['area'] = label_cnt
pred_labels_set.remove(label)
if (pred_segms[label]['category_id'] not in categories):
raise KeyError('In the image with ID {} segment with ID {} has unknown category_id {}.'.format(gt_ann['image_id'], label, pred_segms[label]['category_id']))
if (len(pred_labels_set) != 0):
raise KeyError('In the image with ID {} the following segment IDs {} are presented in JSON and not presented in PNG.'.format(gt_ann['image_id'], list(pred_labels_set)))
pan_gt_pred = ((pan_gt.astype(np.uint64) * OFFSET) + pan_pred.astype(np.uint64))
gt_pred_map = {}
(labels, labels_cnt) = np.unique(pan_gt_pred, return_counts=True)
for (label, intersection) in zip(labels, labels_cnt):
gt_id = (label // OFFSET)
pred_id = (label % OFFSET)
gt_pred_map[(gt_id, pred_id)] = intersection
gt_matched = set()
pred_matched = set()
for (label_tuple, intersection) in gt_pred_map.items():
(gt_label, pred_label) = label_tuple
if (gt_label not in gt_segms):
continue
if (pred_label not in pred_segms):
continue
if (gt_segms[gt_label]['iscrowd'] == 1):
continue
if (gt_segms[gt_label]['category_id'] != pred_segms[pred_label]['category_id']):
continue
union = (((pred_segms[pred_label]['area'] + gt_segms[gt_label]['area']) - intersection) - gt_pred_map.get((VOID, pred_label), 0))
iou = (intersection / union)
if (iou > 0.5):
pq_stat[gt_segms[gt_label]['category_id']].tp += 1
pq_stat[gt_segms[gt_label]['category_id']].iou += iou
gt_matched.add(gt_label)
pred_matched.add(pred_label)
crowd_labels_dict = {}
for (gt_label, gt_info) in gt_segms.items():
if (gt_label in gt_matched):
continue
if (gt_info['iscrowd'] == 1):
crowd_labels_dict[gt_info['category_id']] = gt_label
continue
pq_stat[gt_info['category_id']].fn += 1
for (pred_label, pred_info) in pred_segms.items():
if (pred_label in pred_matched):
continue
intersection = gt_pred_map.get((VOID, pred_label), 0)
if (pred_info['category_id'] in crowd_labels_dict):
intersection += gt_pred_map.get((crowd_labels_dict[pred_info['category_id']], pred_label), 0)
if ((intersection / pred_info['area']) > 0.5):
continue
pq_stat[pred_info['category_id']].fp += 1
print('Core: {}, all {} images processed'.format(proc_id, len(annotation_set)))
return pq_stat
|
def pq_compute_multi_core(matched_annotations_list, gt_folder, pred_folder, categories, file_client=None):
'Evaluate the metrics of Panoptic Segmentation with multithreading.\n\n Same as the function with the same name in `panopticapi`.\n\n Args:\n matched_annotations_list (list): The matched annotation list. Each\n element is a tuple of annotations of the same image with the\n format (gt_anns, pred_anns).\n gt_folder (str): The path of the ground truth images.\n pred_folder (str): The path of the prediction images.\n categories (str): The categories of the dataset.\n file_client (object): The file client of the dataset. If None,\n the backend will be set to `disk`.\n '
if (PQStat is None):
raise RuntimeError('panopticapi is not installed, please install it by: pip install git+https://github.com/cocodataset/panopticapi.git.')
if (file_client is None):
file_client_args = dict(backend='disk')
file_client = mmcv.FileClient(**file_client_args)
cpu_num = multiprocessing.cpu_count()
annotations_split = np.array_split(matched_annotations_list, cpu_num)
print('Number of cores: {}, images per core: {}'.format(cpu_num, len(annotations_split[0])))
workers = multiprocessing.Pool(processes=cpu_num)
processes = []
for (proc_id, annotation_set) in enumerate(annotations_split):
p = workers.apply_async(pq_compute_single_core, (proc_id, annotation_set, gt_folder, pred_folder, categories, file_client))
processes.append(p)
pq_stat = PQStat()
for p in processes:
pq_stat += p.get()
return pq_stat
|
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', True)
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if ('separate_eval' in data_cfg):
data_cfg.pop('separate_eval')
data_cfg['ann_file'] = ann_files[i]
if isinstance(img_prefixes, (list, tuple)):
data_cfg['img_prefix'] = img_prefixes[i]
if isinstance(seg_prefixes, (list, tuple)):
data_cfg['seg_prefix'] = seg_prefixes[i]
if isinstance(proposal_files, (list, tuple)):
data_cfg['proposal_file'] = proposal_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
|
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import ClassBalancedDataset, ConcatDataset, MultiImageMixDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'ConcatDataset'):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg['datasets']], cfg.get('separate_eval', True))
elif (cfg['type'] == 'RepeatDataset'):
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
elif (cfg['type'] == 'ClassBalancedDataset'):
dataset = ClassBalancedDataset(build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif (cfg['type'] == 'MultiImageMixDataset'):
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
|
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, runner_type='EpochBasedRunner', persistent_workers=False, **kwargs):
'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n seed (int, Optional): Seed to be used. Default: None.\n runner_type (str): Type of runner. Default: `EpochBasedRunner`\n persistent_workers (bool): If True, the data loader will not shutdown\n the worker processes after a dataset has been consumed once.\n This allows to maintain the workers `Dataset` instances alive.\n This argument is only valid when PyTorch>=1.7.0. Default: False.\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n '
(rank, world_size) = get_dist_info()
if dist:
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
batch_size = (num_gpus * samples_per_gpu)
num_workers = (num_gpus * workers_per_gpu)
if (runner_type == 'IterBasedRunner'):
if shuffle:
batch_sampler = InfiniteGroupBatchSampler(dataset, batch_size, world_size, rank, seed=seed)
else:
batch_sampler = InfiniteBatchSampler(dataset, batch_size, world_size, rank, seed=seed, shuffle=False)
batch_size = 1
sampler = None
else:
if dist:
if shuffle:
sampler = DistributedGroupSampler(dataset, samples_per_gpu, world_size, rank, seed=seed)
else:
sampler = DistributedSampler(dataset, world_size, rank, shuffle=False, seed=seed)
else:
sampler = (GroupSampler(dataset, samples_per_gpu) if shuffle else None)
batch_sampler = None
init_fn = (partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if (seed is not None) else None)
if ((TORCH_VERSION != 'parrots') and (digit_version(TORCH_VERSION) >= digit_version('1.7.0'))):
kwargs['persistent_workers'] = persistent_workers
elif (persistent_workers is True):
warnings.warn('persistent_workers is invalid because your pytorch version is lower than 1.7.0')
data_loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=False, worker_init_fn=init_fn, **kwargs)
return data_loader
|
def worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed)
|
@DATASETS.register_module()
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]
def _filter_imgs(self, min_size=32):
'Filter images too small or without ground truths.'
valid_inds = []
ids_with_ann = set((_['image_id'] for _ in self.coco.anns.values()))
ids_in_cat = set()
for (i, class_id) in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
ids_in_cat &= ids_with_ann
valid_img_ids = []
for (i, img_info) in enumerate(self.data_infos):
img_id = img_info['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
all_iscrowd = all([_['iscrowd'] for _ in ann_info])
if (self.filter_empty_gt and ((self.img_ids[i] not in ids_in_cat) or all_iscrowd)):
continue
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
'Parse bbox and mask annotation.\n\n Args:\n img_info (dict): Image info of an image.\n ann_info (list[dict]): Annotation info of an image.\n\n Returns:\n dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. "masks" are already decoded into binary masks.\n '
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for (i, ann) in enumerate(ann_info):
if ann.get('ignore', False):
continue
(x1, y1, w, h) = ann['bbox']
if ((ann['area'] <= 0) or (w < 1) or (h < 1)):
continue
if (ann['category_id'] not in self.cat_ids):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann['segmentation'])
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file'])
return ann
def results2txt(self, results, outfile_prefix):
'Dump the detection results to a txt file.\n\n Args:\n results (list[list | tuple]): Testing results of the\n dataset.\n outfile_prefix (str): The filename prefix of the json files.\n If the prefix is "somepath/xxx",\n the txt files will be named "somepath/xxx.txt".\n\n Returns:\n list[str]: Result txt files which contains corresponding instance segmentation images.\n '
try:
import cityscapesscripts.helpers.labels as CSLabels
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
result_files = []
os.makedirs(outfile_prefix, exist_ok=True)
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.data_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
pred_txt = osp.join(outfile_prefix, (basename + '_pred.txt'))
(bbox_result, segm_result) = result
bboxes = np.vstack(bbox_result)
if isinstance(segm_result, tuple):
segms = mmcv.concat_list(segm_result[0])
mask_score = segm_result[1]
else:
segms = mmcv.concat_list(segm_result)
mask_score = [bbox[(- 1)] for bbox in bboxes]
labels = [np.full(bbox.shape[0], i, dtype=np.int32) for (i, bbox) in enumerate(bbox_result)]
labels = np.concatenate(labels)
assert (len(bboxes) == len(segms) == len(labels))
num_instances = len(bboxes)
prog_bar.update()
with open(pred_txt, 'w') as fout:
for i in range(num_instances):
pred_class = labels[i]
classes = self.CLASSES[pred_class]
class_id = CSLabels.name2label[classes].id
score = mask_score[i]
mask = maskUtils.decode(segms[i]).astype(np.uint8)
png_filename = osp.join(outfile_prefix, (basename + f'_{i}_{classes}.png'))
mmcv.imwrite(mask, png_filename)
fout.write(f'''{osp.basename(png_filename)} {class_id} {score}
''')
result_files.append(pred_txt)
return result_files
def format_results(self, results, txtfile_prefix=None):
'Format the results to txt (standard format for Cityscapes\n evaluation).\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of txt files. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving txt/png files when txtfile_prefix is not specified.\n '
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), 'The length of results is not equal to the dataset len: {} != {}'.format(len(results), len(self))
if (txtfile_prefix is None):
tmp_dir = tempfile.TemporaryDirectory()
txtfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2txt(results, txtfile_prefix)
return (result_files, tmp_dir)
def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)):
'Evaluation in Cityscapes/COCO protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n \'bbox\', \'segm\', \'proposal\', \'proposal_fast\'.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n outfile_prefix (str | None): The prefix of output file. It includes\n the file path and the prefix of filename, e.g., "a/b/prefix".\n If results are evaluated with COCO protocol, it would be the\n prefix of output json file. For example, the metric is \'bbox\'\n and \'segm\', then json files would be "a/b/prefix.bbox.json" and\n "a/b/prefix.segm.json".\n If results are evaluated with cityscapes protocol, it would be\n the prefix of output txt/png files. The output files would be\n png images under folder "a/b/prefix/xxx/" and the file name of\n images would be written into a txt file\n "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of\n cityscapes. If not specified, a temp file will be created.\n Default: None.\n classwise (bool): Whether to evaluating the AP for each class.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thrs (Sequence[float]): IoU threshold used for evaluating\n recalls. If set to a list, the average recall of all IoUs will\n also be computed. Default: 0.5.\n\n Returns:\n dict[str, float]: COCO style evaluation metric or cityscapes mAP and AP@50.\n '
eval_results = dict()
metrics = (metric.copy() if isinstance(metric, list) else [metric])
if ('cityscapes' in metrics):
eval_results.update(self._evaluate_cityscapes(results, outfile_prefix, logger))
metrics.remove('cityscapes')
if (len(metrics) > 0):
self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, None, self.data_root, self.img_prefix, self.seg_prefix, self.proposal_file, self.test_mode, self.filter_empty_gt)
self_coco.CLASSES = self.CLASSES
self_coco.data_infos = self_coco.load_annotations(self.ann_file)
eval_results.update(self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs))
return eval_results
def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
"Evaluation in Cityscapes protocol.\n\n Args:\n results (list): Testing results of the dataset.\n txtfile_prefix (str | None): The prefix of output txt file\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n\n Returns:\n dict[str: float]: Cityscapes evaluation results, contains 'mAP' and 'AP@50'.\n "
try:
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval
except ImportError:
raise ImportError('Please run "pip install citscapesscripts" to install cityscapesscripts first.')
msg = 'Evaluating in Cityscapes style'
if (logger is None):
msg = ('\n' + msg)
print_log(msg, logger=logger)
(result_files, tmp_dir) = self.format_results(results, txtfile_prefix)
if (tmp_dir is None):
result_dir = osp.join(txtfile_prefix, 'results')
else:
result_dir = osp.join(tmp_dir.name, 'results')
eval_results = OrderedDict()
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
CSEval.args.predictionPath = os.path.abspath(result_dir)
CSEval.args.predictionWalk = None
CSEval.args.JSONOutput = False
CSEval.args.colorized = False
CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json')
CSEval.args.groundTruthSearch = os.path.join(self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png')
groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
assert len(groundTruthImgList), f'Cannot find ground truth images in {CSEval.args.groundTruthSearch}.'
predictionImgList = []
for gt in groundTruthImgList:
predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages']
eval_results['mAP'] = CSEval_results['allAp']
eval_results['AP@50'] = CSEval_results['allAp50%']
if (tmp_dir is not None):
tmp_dir.cleanup()
return eval_results
|
@DATASETS.register_module()
class CustomDataset(Dataset):
"Custom dataset for detection.\n\n The annotation format is shown as follows. The `ann` field is optional for\n testing.\n\n .. code-block:: none\n\n [\n {\n 'filename': 'a.jpg',\n 'width': 1280,\n 'height': 720,\n 'ann': {\n 'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.\n 'labels': <np.ndarray> (n, ),\n 'bboxes_ignore': <np.ndarray> (k, 4), (optional field)\n 'labels_ignore': <np.ndarray> (k, 4) (optional field)\n }\n },\n ...\n ]\n\n Args:\n ann_file (str): Annotation file path.\n pipeline (list[dict]): Processing pipeline.\n classes (str | Sequence[str], optional): Specify classes to load.\n If is None, ``cls.CLASSES`` will be used. Default: None.\n data_root (str, optional): Data root for ``ann_file``,\n ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.\n test_mode (bool, optional): If set True, annotation will not be loaded.\n filter_empty_gt (bool, optional): If set true, images without bounding\n boxes of the dataset's classes will be filtered out. This option\n only works when `test_mode=False`, i.e., we never filter images\n during tests.\n "
CLASSES = None
PALETTE = None
def __init__(self, ann_file, pipeline, classes=None, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True, file_client_args=dict(backend='disk')):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.CLASSES = self.get_classes(classes)
self.file_client = mmcv.FileClient(**file_client_args)
if (self.data_root is not None):
if (not osp.isabs(self.ann_file)):
self.ann_file = osp.join(self.data_root, self.ann_file)
if (not ((self.img_prefix is None) or osp.isabs(self.img_prefix))):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if (not ((self.seg_prefix is None) or osp.isabs(self.seg_prefix))):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if (not ((self.proposal_file is None) or osp.isabs(self.proposal_file))):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(self.ann_file) as local_path:
self.data_infos = self.load_annotations(local_path)
else:
warnings.warn(f'The used MMCV version does not have get_local_path. We treat the {self.ann_file} as local paths and it might cause errors if the path is not a local path. Please use MMCV>= 1.3.16 if you meet errors.')
self.data_infos = self.load_annotations(self.ann_file)
if (self.proposal_file is not None):
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(self.proposal_file) as local_path:
self.proposals = self.load_proposals(local_path)
else:
warnings.warn(f'The used MMCV version does not have get_local_path. We treat the {self.ann_file} as local paths and it might cause errors if the path is not a local path. Please use MMCV>= 1.3.16 if you meet errors.')
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
if (not test_mode):
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if (self.proposals is not None):
self.proposals = [self.proposals[i] for i in valid_inds]
self._set_group_flag()
self.pipeline = Compose(pipeline)
def __len__(self):
'Total number of samples of data.'
return len(self.data_infos)
def load_annotations(self, ann_file):
'Load annotation from annotation file.'
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
'Load proposal from proposal file.'
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
'Get annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
return self.data_infos[idx]['ann']
def get_cat_ids(self, idx):
'Get category ids by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n '
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
'Prepare results dict for pipeline.'
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
'Filter images too small.'
if self.filter_empty_gt:
warnings.warn('CustomDataset does not support filtering empty gt images.')
valid_inds = []
for (i, img_info) in enumerate(self.data_infos):
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
'Set flag according to image aspect ratio.\n\n Images with aspect ratio greater than 1 will be set as group 1,\n otherwise group 0.\n '
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if ((img_info['width'] / img_info['height']) > 1):
self.flag[i] = 1
def _rand_another(self, idx):
'Get another random index from the same group as the given index.'
pool = np.where((self.flag == self.flag[idx]))[0]
return np.random.choice(pool)
def __getitem__(self, idx):
'Get training/test data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training/test data (with annotation if `test_mode` is set True).\n '
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if (data is None):
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
'Get training data and annotations after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Training data and annotation after pipeline with new keys introduced by pipeline.\n '
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
'Get testing data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Testing data after pipeline with new keys introduced by pipeline.\n '
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
@classmethod
def get_classes(cls, classes=None):
'Get class names of current dataset.\n\n Args:\n classes (Sequence[str] | str | None): If classes is None, use\n default CLASSES defined by builtin dataset. If classes is a\n string, take it as a file name. The file contains the name of\n classes where each line contains one class name. If classes is\n a tuple or list, override the CLASSES defined by the dataset.\n\n Returns:\n tuple[str] or list[str]: Names of categories of the dataset.\n '
if (classes is None):
return cls.CLASSES
if isinstance(classes, str):
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def format_results(self, results, **kwargs):
'Place holder to format result to dataset specific output.'
def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None):
'Evaluate the dataset.\n\n Args:\n results (list): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | None | str): Logger used for printing\n related information during evaluation. Default: None.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.\n Default: None.\n '
if (not isinstance(metric, str)):
assert (len(metric) == 1)
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr)
if (metric == 'mAP'):
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'''
{('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''')
(mean_ap, _) = eval_map(results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = (sum(mean_aps) / len(mean_aps))
elif (metric == 'recall'):
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for (i, num) in enumerate(proposal_nums):
for (j, iou) in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou}'] = recalls[(i, j)]
if (recalls.shape[1] > 1):
ar = recalls.mean(axis=1)
for (i, num) in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
def __repr__(self):
'Print the number of instance number.'
dataset_type = ('Test' if self.test_mode else 'Train')
result = f'''
{self.__class__.__name__} {dataset_type} dataset with number of images {len(self)}, and instance counts:
'''
if (self.CLASSES is None):
result += 'Category names are not provided. \n'
return result
instance_count = np.zeros((len(self.CLASSES) + 1)).astype(int)
for idx in range(len(self)):
label = self.get_ann_info(idx)['labels']
(unique, counts) = np.unique(label, return_counts=True)
if (len(unique) > 0):
instance_count[unique] += counts
else:
instance_count[(- 1)] += 1
table_data = [(['category', 'count'] * 5)]
row_data = []
for (cls, count) in enumerate(instance_count):
if (cls < len(self.CLASSES)):
row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}']
else:
row_data += ['-1 background', f'{count}']
if (len(row_data) == 10):
table_data.append(row_data)
row_data = []
if (len(row_data) >= 2):
if (row_data[(- 1)] == '0'):
row_data = row_data[:(- 2)]
if (len(row_data) >= 2):
table_data.append([])
table_data.append(row_data)
table = AsciiTable(table_data)
result += table.table
return result
|
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
'A wrapper of concatenated dataset.\n\n Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but\n concat the group flag for image aspect ratio.\n\n Args:\n datasets (list[:obj:`Dataset`]): A list of datasets.\n separate_eval (bool): Whether to evaluate the results\n separately if it is used as validation dataset.\n Defaults to True.\n '
def __init__(self, datasets, separate_eval=True):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.PALETTE = getattr(datasets[0], 'PALETTE', None)
self.separate_eval = separate_eval
if (not separate_eval):
if any([isinstance(ds, CocoDataset) for ds in datasets]):
raise NotImplementedError('Evaluating concatenated CocoDataset as a whole is not supported! Please set "separate_eval=True"')
elif (len(set([type(ds) for ds in datasets])) != 1):
raise NotImplementedError('All the datasets should have same types')
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
def get_cat_ids(self, idx):
'Get category ids of concatenated dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n '
if (idx < 0):
if ((- idx) > len(self)):
raise ValueError('absolute value of index should not exceed dataset length')
idx = (len(self) + idx)
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
def get_ann_info(self, idx):
'Get annotation of concatenated dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
if (idx < 0):
if ((- idx) > len(self)):
raise ValueError('absolute value of index should not exceed dataset length')
idx = (len(self) + idx)
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if (dataset_idx == 0):
sample_idx = idx
else:
sample_idx = (idx - self.cumulative_sizes[(dataset_idx - 1)])
return self.datasets[dataset_idx].get_ann_info(sample_idx)
def evaluate(self, results, logger=None, **kwargs):
'Evaluate the results.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n\n Returns:\n dict[str: float]: AP results of the total dataset or each separate\n dataset if `self.separate_eval=True`.\n '
assert (len(results) == self.cumulative_sizes[(- 1)]), f'Dataset and results have different sizes: {self.cumulative_sizes[(- 1)]} v.s. {len(results)}'
for dataset in self.datasets:
assert hasattr(dataset, 'evaluate'), f'{type(dataset)} does not implement evaluate function'
if self.separate_eval:
dataset_idx = (- 1)
total_eval_results = dict()
for (size, dataset) in zip(self.cumulative_sizes, self.datasets):
start_idx = (0 if (dataset_idx == (- 1)) else self.cumulative_sizes[dataset_idx])
end_idx = self.cumulative_sizes[(dataset_idx + 1)]
results_per_dataset = results[start_idx:end_idx]
print_log(f'''
Evaluateing {dataset.ann_file} with {len(results_per_dataset)} images now''', logger=logger)
eval_results_per_dataset = dataset.evaluate(results_per_dataset, logger=logger, **kwargs)
dataset_idx += 1
for (k, v) in eval_results_per_dataset.items():
total_eval_results.update({f'{dataset_idx}_{k}': v})
return total_eval_results
elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
raise NotImplementedError('Evaluating concatenated CocoDataset as a whole is not supported! Please set "separate_eval=True"')
elif (len(set([type(ds) for ds in self.datasets])) != 1):
raise NotImplementedError('All the datasets should have same types')
else:
original_data_infos = self.datasets[0].data_infos
self.datasets[0].data_infos = sum([dataset.data_infos for dataset in self.datasets], [])
eval_results = self.datasets[0].evaluate(results, logger=logger, **kwargs)
self.datasets[0].data_infos = original_data_infos
return eval_results
|
@DATASETS.register_module()
class RepeatDataset():
'A wrapper of repeated dataset.\n\n The length of repeated dataset will be `times` larger than the original\n dataset. This is useful when the data loading time is long but the dataset\n is small. Using RepeatDataset can reduce the data loading time between\n epochs.\n\n Args:\n dataset (:obj:`Dataset`): The dataset to be repeated.\n times (int): Repeat times.\n '
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[(idx % self._ori_len)]
def get_cat_ids(self, idx):
'Get category ids of repeat dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n '
return self.dataset.get_cat_ids((idx % self._ori_len))
def get_ann_info(self, idx):
'Get annotation of repeat dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
return self.dataset.get_ann_info((idx % self._ori_len))
def __len__(self):
'Length after repetition.'
return (self.times * self._ori_len)
|
@DATASETS.register_module()
class ClassBalancedDataset():
'A wrapper of repeated dataset with repeat factor.\n\n Suitable for training on class imbalanced datasets like LVIS. Following\n the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,\n in each epoch, an image may appear multiple times based on its\n "repeat factor".\n The repeat factor for an image is a function of the frequency the rarest\n category labeled in that image. The "frequency of category c" in [0, 1]\n is defined by the fraction of images in the training set (without repeats)\n in which category c appears.\n The dataset needs to instantiate :func:`self.get_cat_ids` to support\n ClassBalancedDataset.\n\n The repeat factor is computed as followed.\n\n 1. For each category c, compute the fraction # of images\n that contain it: :math:`f(c)`\n 2. For each category c, compute the category-level repeat factor:\n :math:`r(c) = max(1, sqrt(t/f(c)))`\n 3. For each image I, compute the image-level repeat factor:\n :math:`r(I) = max_{c in I} r(c)`\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset to be repeated.\n oversample_thr (float): frequency threshold below which data is\n repeated. For categories with ``f_c >= oversample_thr``, there is\n no oversampling. For categories with ``f_c < oversample_thr``, the\n degree of oversampling following the square-root inverse frequency\n heuristic above.\n filter_empty_gt (bool, optional): If set true, images without bounding\n boxes will not be oversampled. Otherwise, they will be categorized\n as the pure background class and involved into the oversampling.\n Default: True.\n '
def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.filter_empty_gt = filter_empty_gt
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for (dataset_idx, repeat_factor) in enumerate(repeat_factors):
repeat_indices.extend(([dataset_idx] * math.ceil(repeat_factor)))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for (flag, repeat_factor) in zip(self.dataset.flag, repeat_factors):
flags.extend(([flag] * int(math.ceil(repeat_factor))))
assert (len(flags) == len(repeat_indices))
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
'Get repeat factor for each images in the dataset.\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset\n repeat_thr (float): The threshold of frequency. If an image\n contains the categories whose frequency below the threshold,\n it would be repeated.\n\n Returns:\n list[float]: The repeat factors for each images in the dataset.\n '
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if ((len(cat_ids) == 0) and (not self.filter_empty_gt)):
cat_ids = set([len(self.CLASSES)])
for cat_id in cat_ids:
category_freq[cat_id] += 1
for (k, v) in category_freq.items():
category_freq[k] = (v / num_images)
category_repeat = {cat_id: max(1.0, math.sqrt((repeat_thr / cat_freq))) for (cat_id, cat_freq) in category_freq.items()}
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if ((len(cat_ids) == 0) and (not self.filter_empty_gt)):
cat_ids = set([len(self.CLASSES)])
repeat_factor = 1
if (len(cat_ids) > 0):
repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def get_ann_info(self, idx):
'Get annotation of dataset by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
ori_index = self.repeat_indices[idx]
return self.dataset.get_ann_info(ori_index)
def __len__(self):
'Length after repetition.'
return len(self.repeat_indices)
|
@DATASETS.register_module()
class MultiImageMixDataset():
'A wrapper of multiple images mixed dataset.\n\n Suitable for training on multiple images mixed data augmentation like\n mosaic and mixup. For the augmentation pipeline of mixed image data,\n the `get_indexes` method needs to be provided to obtain the image\n indexes, and you can set `skip_flags` to change the pipeline running\n process. At the same time, we provide the `dynamic_scale` parameter\n to dynamically change the output image size.\n\n Args:\n dataset (:obj:`CustomDataset`): The dataset to be mixed.\n pipeline (Sequence[dict]): Sequence of transform object or\n config dict to be composed.\n dynamic_scale (tuple[int], optional): The image scale can be changed\n dynamically. Default to None. It is deprecated.\n skip_type_keys (list[str], optional): Sequence of type string to\n be skip pipeline. Default to None.\n '
def __init__(self, dataset, pipeline, dynamic_scale=None, skip_type_keys=None):
if (dynamic_scale is not None):
raise RuntimeError('dynamic_scale is deprecated. Please use Resize pipeline to achieve similar functions')
assert isinstance(pipeline, collections.abc.Sequence)
if (skip_type_keys is not None):
assert all([isinstance(skip_type_key, str) for skip_type_key in skip_type_keys])
self._skip_type_keys = skip_type_keys
self.pipeline = []
self.pipeline_types = []
for transform in pipeline:
if isinstance(transform, dict):
self.pipeline_types.append(transform['type'])
transform = build_from_cfg(transform, PIPELINES)
self.pipeline.append(transform)
else:
raise TypeError('pipeline must be a dict')
self.dataset = dataset
self.CLASSES = dataset.CLASSES
self.PALETTE = getattr(dataset, 'PALETTE', None)
if hasattr(self.dataset, 'flag'):
self.flag = dataset.flag
self.num_samples = len(dataset)
def __len__(self):
return self.num_samples
def __getitem__(self, idx):
results = copy.deepcopy(self.dataset[idx])
for (transform, transform_type) in zip(self.pipeline, self.pipeline_types):
if ((self._skip_type_keys is not None) and (transform_type in self._skip_type_keys)):
continue
if hasattr(transform, 'get_indexes'):
indexes = transform.get_indexes(self.dataset)
if (not isinstance(indexes, collections.abc.Sequence)):
indexes = [indexes]
mix_results = [copy.deepcopy(self.dataset[index]) for index in indexes]
results['mix_results'] = mix_results
results = transform(results)
if ('mix_results' in results):
results.pop('mix_results')
return results
def update_skip_type_keys(self, skip_type_keys):
'Update skip_type_keys. It is called by an external hook.\n\n Args:\n skip_type_keys (list[str], optional): Sequence of type\n string to be skip pipeline.\n '
assert all([isinstance(skip_type_key, str) for skip_type_key in skip_type_keys])
self._skip_type_keys = skip_type_keys
|
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 'skin', 'face')
PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96), (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192), (128, 0, 96), (128, 0, 192), (0, 32, 192)]
|
@PIPELINES.register_module()
class Compose():
'Compose multiple transforms sequentially.\n\n Args:\n transforms (Sequence[dict | callable]): Sequence of transform object or\n config dict to be composed.\n '
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
'Call function to apply transforms sequentially.\n\n Args:\n data (dict): A result dict contains the data to transform.\n\n Returns:\n dict: Transformed data.\n '
for t in self.transforms:
data = t(data)
if (data is None):
return None
return data
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
for t in self.transforms:
str_ = t.__repr__()
if ('Compose(' in str_):
str_ = str_.replace('\n', '\n ')
format_string += '\n'
format_string += f' {str_}'
format_string += '\n)'
return format_string
|
def to_tensor(data):
'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n be converted.\n '
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif (isinstance(data, Sequence) and (not mmcv.is_str(data))):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
|
@PIPELINES.register_module()
class ToTensor():
'Convert some results to :obj:`torch.Tensor` by given keys.\n\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function to convert data in results to :obj:`torch.Tensor`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted\n to :obj:`torch.Tensor`.\n '
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys})')
|
@PIPELINES.register_module()
class ImageToTensor():
'Convert image to :obj:`torch.Tensor` by given keys.\n\n The dimension order of input image is (H, W, C). The pipeline will convert\n it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n (1, H, W).\n\n Args:\n keys (Sequence[str]): Key of images to be converted to Tensor.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function to convert image in results to :obj:`torch.Tensor` and\n transpose the channel order.\n\n Args:\n results (dict): Result dict contains the image data to convert.\n\n Returns:\n dict: The result dict contains the image converted\n to :obj:`torch.Tensor` and transposed to (C, H, W) order.\n '
for key in self.keys:
img = results[key]
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
results[key] = to_tensor(img.transpose(2, 0, 1)).contiguous()
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys})')
|
@PIPELINES.register_module()
class Transpose():
'Transpose some results by given keys.\n\n Args:\n keys (Sequence[str]): Keys of results to be transposed.\n order (Sequence[int]): Order of transpose.\n '
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
'Call function to transpose the channel order of data in results.\n\n Args:\n results (dict): Result dict contains the data to transpose.\n\n Returns:\n dict: The result dict contains the data transposed to ``self.order``.\n '
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, order={self.order})')
|
@PIPELINES.register_module()
class ToDataContainer():
"Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n Args:\n fields (Sequence[dict]): Each field is a dict like\n ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.\n Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),\n dict(key='gt_labels'))``.\n "
def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
'Call function to convert data in results to\n :obj:`mmcv.DataContainer`.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data converted to :obj:`mmcv.DataContainer`.\n '
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return (self.__class__.__name__ + f'(fields={self.fields})')
|
@PIPELINES.register_module()
class DefaultFormatBundle():
'Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including "img",\n "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".\n These fields are formatted as follows.\n\n - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)\n - proposals: (1)to tensor, (2)to DataContainer\n - gt_bboxes: (1)to tensor, (2)to DataContainer\n - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n - gt_labels: (1)to tensor, (2)to DataContainer\n - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, (3)to DataContainer (stack=True)\n\n Args:\n img_to_float (bool): Whether to force the image to be converted to\n float type. Default: True.\n pad_val (dict): A dict for padding value in batch collating,\n the default value is `dict(img=0, masks=0, seg=255)`.\n Without this argument, the padding value of "gt_semantic_seg"\n will be set to 0 by default, which should be 255.\n '
def __init__(self, img_to_float=True, pad_val=dict(img=0, masks=0, seg=255)):
self.img_to_float = img_to_float
self.pad_val = pad_val
def __call__(self, results):
'Call function to transform and format common fields in results.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n dict: The result dict contains the data that is formatted with default bundle.\n '
if ('img' in results):
img = results['img']
if ((self.img_to_float is True) and (img.dtype == np.uint8)):
img = img.astype(np.float32)
results = self._add_default_meta_keys(results)
if (len(img.shape) < 3):
img = np.expand_dims(img, (- 1))
img = np.ascontiguousarray(img.transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), padding_value=self.pad_val['img'], stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if (key not in results):
continue
results[key] = DC(to_tensor(results[key]))
if ('gt_masks' in results):
results['gt_masks'] = DC(results['gt_masks'], padding_value=self.pad_val['masks'], cpu_only=True)
if ('gt_semantic_seg' in results):
results['gt_semantic_seg'] = DC(to_tensor(results['gt_semantic_seg'][(None, ...)]), padding_value=self.pad_val['seg'], stack=True)
return results
def _add_default_meta_keys(self, results):
'Add default meta keys.\n\n We set default meta keys including `pad_shape`, `scale_factor` and\n `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and\n `Pad` are implemented during the whole pipeline.\n\n Args:\n results (dict): Result dict contains the data to convert.\n\n Returns:\n results (dict): Updated result dict contains the data to convert.\n '
img = results['img']
results.setdefault('pad_shape', img.shape)
results.setdefault('scale_factor', 1.0)
num_channels = (1 if (len(img.shape) < 3) else img.shape[2])
results.setdefault('img_norm_cfg', dict(mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False))
return results
def __repr__(self):
return (self.__class__.__name__ + f'(img_to_float={self.img_to_float})')
|
@PIPELINES.register_module()
class Collect():
'Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of "img", "proposals", "gt_bboxes",\n "gt_bboxes_ignore", "gt_labels", and/or "gt_masks".\n\n The "img_meta" item is always populated. The contents of the "img_meta"\n dictionary depends on "meta_keys". By default this includes:\n\n - "img_shape": shape of the image input to the network as a tuple (h, w, c). Note that images may be zero padded on the bottom/right if the batch tensor is larger than this shape.\n\n - "scale_factor": a float indicating the preprocessing scale\n\n - "flip": a boolean indicating if image flip transform was used\n\n - "filename": path to the image file\n\n - "ori_shape": original shape of the image as a tuple (h, w, c)\n\n - "pad_shape": image shape after padding\n\n - "img_norm_cfg": a dict of normalization information:\n\n - mean - per channel mean subtraction\n - std - per channel std divisor\n - to_rgb - bool indicating if bgr was converted to rgb\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``(\'filename\', \'ori_filename\', \'ori_shape\', \'img_shape\',\n \'pad_shape\', \'scale_factor\', \'flip\', \'flip_direction\',\n \'img_norm_cfg\')``\n '
def __init__(self, keys, meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
'Call function to collect keys in results. The keys in ``meta_keys``\n will be converted to :obj:mmcv.DataContainer.\n\n Args:\n results (dict): Result dict contains the data to collect.\n\n Returns:\n dict: The result dict contains the following keys\n\n - keys in``self.keys``\n - ``img_metas``\n '
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return (self.__class__.__name__ + f'(keys={self.keys}, meta_keys={self.meta_keys})')
|
@PIPELINES.register_module()
class WrapFieldsToLists():
"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='Pad', size_divisor=32),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapFieldsToLists')\n >>> ]\n "
def __call__(self, results):
'Call function to wrap fields into lists.\n\n Args:\n results (dict): Result dict contains the data to wrap.\n\n Returns:\n dict: The result dict where value of ``self.keys`` are wrapped into list.\n '
for (key, val) in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()'
|
@PIPELINES.register_module()
class InstaBoost():
'Data augmentation method in `InstaBoost: Boosting Instance\n Segmentation Via Probability Map Guided Copy-Pasting\n <https://arxiv.org/abs/1908.07801>`_.\n\n Refer to https://github.com/GothicAi/Instaboost for implementation details.\n\n Args:\n action_candidate (tuple): Action candidates. "normal", "horizontal", \\\n "vertical", "skip" are supported. Default: (\'normal\', \\\n \'horizontal\', \'skip\').\n action_prob (tuple): Corresponding action probabilities. Should be \\\n the same length as action_candidate. Default: (1, 0, 0).\n scale (tuple): (min scale, max scale). Default: (0.8, 1.2).\n dx (int): The maximum x-axis shift will be (instance width) / dx.\n Default 15.\n dy (int): The maximum y-axis shift will be (instance height) / dy.\n Default 15.\n theta (tuple): (min rotation degree, max rotation degree). \\\n Default: (-1, 1).\n color_prob (float): Probability of images for color augmentation.\n Default 0.5.\n heatmap_flag (bool): Whether to use heatmap guided. Default False.\n aug_ratio (float): Probability of applying this transformation. \\\n Default 0.5.\n '
def __init__(self, action_candidate=('normal', 'horizontal', 'skip'), action_prob=(1, 0, 0), scale=(0.8, 1.2), dx=15, dy=15, theta=((- 1), 1), color_prob=0.5, hflag=False, aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob, scale, dx, dy, theta, color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
(x1, y1, x2, y2) = bbox
bbox = [x1, y1, (x2 - x1), (y2 - y1)]
anns.append({'category_id': label, 'segmentation': mask, 'bbox': bbox})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
(x1, y1, w, h) = ann['bbox']
if ((w <= 0) or (h <= 0)):
continue
bbox = [x1, y1, (x1 + w), (y1 + h)]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
orig_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[(1 - self.aug_ratio), self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" to install instaboostfast first.')
(anns, img) = instaboost.get_new_data(anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(orig_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
|
@PIPELINES.register_module()
class MultiScaleFlipAug():
'Test-time augmentation with multiple scales and flipping.\n\n An example configuration is as followed:\n\n .. code-block::\n\n img_scale=[(1333, 400), (1333, 800)],\n flip=True,\n transforms=[\n dict(type=\'Resize\', keep_ratio=True),\n dict(type=\'RandomFlip\'),\n dict(type=\'Normalize\', **img_norm_cfg),\n dict(type=\'Pad\', size_divisor=32),\n dict(type=\'ImageToTensor\', keys=[\'img\']),\n dict(type=\'Collect\', keys=[\'img\']),\n ]\n\n After MultiScaleFLipAug with above configuration, the results are wrapped\n into lists of the same length as followed:\n\n .. code-block::\n\n dict(\n img=[...],\n img_shape=[...],\n scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]\n flip=[False, True, False, True]\n ...\n )\n\n Args:\n transforms (list[dict]): Transforms to apply in each augmentation.\n img_scale (tuple | list[tuple] | None): Images scales for resizing.\n scale_factor (float | list[float] | None): Scale factors for resizing.\n flip (bool): Whether apply flip augmentation. Default: False.\n flip_direction (str | list[str]): Flip augmentation directions,\n options are "horizontal", "vertical" and "diagonal". If\n flip_direction is a list, multiple flip augmentations will be\n applied. It has no effect when flip == False. Default:\n "horizontal".\n '
def __init__(self, transforms, img_scale=None, scale_factor=None, flip=False, flip_direction='horizontal'):
self.transforms = Compose(transforms)
assert ((img_scale is None) ^ (scale_factor is None)), 'Must have but only one variable can be set'
if (img_scale is not None):
self.img_scale = (img_scale if isinstance(img_scale, list) else [img_scale])
self.scale_key = 'scale'
assert mmcv.is_list_of(self.img_scale, tuple)
else:
self.img_scale = (scale_factor if isinstance(scale_factor, list) else [scale_factor])
self.scale_key = 'scale_factor'
self.flip = flip
self.flip_direction = (flip_direction if isinstance(flip_direction, list) else [flip_direction])
assert mmcv.is_list_of(self.flip_direction, str)
if ((not self.flip) and (self.flip_direction != ['horizontal'])):
warnings.warn('flip_direction has no effect when flip is set to False')
if (self.flip and (not any([(t['type'] == 'RandomFlip') for t in transforms]))):
warnings.warn('flip has no effect when RandomFlip is not in transforms')
def __call__(self, results):
'Call function to apply test time augment transforms on results.\n\n Args:\n results (dict): Result dict contains the data to transform.\n\n Returns:\n dict[str: list]: The augmented data, where each value is wrapped\n into a list.\n '
aug_data = []
flip_args = [(False, None)]
if self.flip:
flip_args += [(True, direction) for direction in self.flip_direction]
for scale in self.img_scale:
for (flip, direction) in flip_args:
_results = results.copy()
_results[self.scale_key] = scale
_results['flip'] = flip
_results['flip_direction'] = direction
data = self.transforms(_results)
aug_data.append(data)
aug_data_dict = {key: [] for key in aug_data[0]}
for data in aug_data:
for (key, val) in data.items():
aug_data_dict[key].append(val)
return aug_data_dict
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(transforms={self.transforms}, '
repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
repr_str += f'flip_direction={self.flip_direction})'
return repr_str
|
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
self.seed = (seed if (seed is not None) else 0)
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed((self.epoch + self.seed))
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
indices = (indices * math.ceil((self.total_size / len(indices))))[:self.total_size]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices)
|
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for (i, size) in enumerate(self.group_sizes):
self.num_samples += (int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu)
def __iter__(self):
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size == 0):
continue
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
np.random.shuffle(indice)
num_extra = ((int(np.ceil((size / self.samples_per_gpu))) * self.samples_per_gpu) - len(indice))
indice = np.concatenate([indice, np.random.choice(indice, num_extra)])
indices.append(indice)
indices = np.concatenate(indices)
indices = [indices[(i * self.samples_per_gpu):((i + 1) * self.samples_per_gpu)] for i in np.random.permutation(range((len(indices) // self.samples_per_gpu)))]
indices = np.concatenate(indices)
indices = indices.astype(np.int64).tolist()
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
|
class DistributedGroupSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n\n .. note::\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset: Dataset used for sampling.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n seed (int, optional): random seed used to shuffle the sampler if\n ``shuffle=True``. This number should be identical across all\n processes in the distributed group. Default: 0.\n '
def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None, seed=0):
(_rank, _num_replicas) = get_dist_info()
if (num_replicas is None):
num_replicas = _num_replicas
if (rank is None):
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.seed = (seed if (seed is not None) else 0)
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for (i, j) in enumerate(self.group_sizes):
self.num_samples += (int(math.ceil((((self.group_sizes[i] * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu)
self.total_size = (self.num_samples * self.num_replicas)
def __iter__(self):
g = torch.Generator()
g.manual_seed((self.epoch + self.seed))
indices = []
for (i, size) in enumerate(self.group_sizes):
if (size > 0):
indice = np.where((self.flag == i))[0]
assert (len(indice) == size)
indice = indice[list(torch.randperm(int(size), generator=g).numpy())].tolist()
extra = (((int(math.ceil((((size * 1.0) / self.samples_per_gpu) / self.num_replicas))) * self.samples_per_gpu) * self.num_replicas) - len(indice))
tmp = indice.copy()
for _ in range((extra // size)):
indice.extend(tmp)
indice.extend(tmp[:(extra % size)])
indices.extend(indice)
assert (len(indices) == self.total_size)
indices = [indices[j] for i in list(torch.randperm((len(indices) // self.samples_per_gpu), generator=g)) for j in range((i * self.samples_per_gpu), ((i + 1) * self.samples_per_gpu))]
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class InfiniteGroupBatchSampler(Sampler):
'Similar to `BatchSampler` warping a `GroupSampler. It is designed for\n iteration-based runners like `IterBasedRunner` and yields a mini-batch\n indices each time, all indices in a batch should be in the same group.\n\n The implementation logic is referred to\n https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py\n\n Args:\n dataset (object): The dataset.\n batch_size (int): When model is :obj:`DistributedDataParallel`,\n it is the number of training samples on each GPU.\n When model is :obj:`DataParallel`, it is\n `num_gpus * samples_per_gpu`.\n Default : 1.\n world_size (int, optional): Number of processes participating in\n distributed training. Default: None.\n rank (int, optional): Rank of current process. Default: None.\n seed (int): Random seed. Default: 0.\n shuffle (bool): Whether shuffle the indices of a dummy `epoch`, it\n should be noted that `shuffle` can not guarantee that you can\n generate sequential indices because it need to ensure\n that all indices in a batch is in a group. Default: True.\n '
def __init__(self, dataset, batch_size=1, world_size=None, rank=None, seed=0, shuffle=True):
(_rank, _world_size) = get_dist_info()
if (world_size is None):
world_size = _world_size
if (rank is None):
rank = _rank
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.batch_size = batch_size
self.seed = (seed if (seed is not None) else 0)
self.shuffle = shuffle
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.buffer_per_group = {k: [] for k in range(len(self.group_sizes))}
self.size = len(dataset)
self.indices = self._indices_of_rank()
def _infinite_indices(self):
'Infinitely yield a sequence of indices.'
g = torch.Generator()
g.manual_seed(self.seed)
while True:
if self.shuffle:
(yield from torch.randperm(self.size, generator=g).tolist())
else:
(yield from torch.arange(self.size).tolist())
def _indices_of_rank(self):
'Slice the infinite indices by rank.'
(yield from itertools.islice(self._infinite_indices(), self.rank, None, self.world_size))
def __iter__(self):
for idx in self.indices:
flag = self.flag[idx]
group_buffer = self.buffer_per_group[flag]
group_buffer.append(idx)
if (len(group_buffer) == self.batch_size):
(yield group_buffer[:])
del group_buffer[:]
def __len__(self):
'Length of base dataset.'
return self.size
def set_epoch(self, epoch):
'Not supported in `IterationBased` runner.'
raise NotImplementedError
|
class InfiniteBatchSampler(Sampler):
'Similar to `BatchSampler` warping a `DistributedSampler. It is designed\n iteration-based runners like `IterBasedRunner` and yields a mini-batch\n indices each time.\n\n The implementation logic is referred to\n https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py\n\n Args:\n dataset (object): The dataset.\n batch_size (int): When model is :obj:`DistributedDataParallel`,\n it is the number of training samples on each GPU,\n When model is :obj:`DataParallel`, it is\n `num_gpus * samples_per_gpu`.\n Default : 1.\n world_size (int, optional): Number of processes participating in\n distributed training. Default: None.\n rank (int, optional): Rank of current process. Default: None.\n seed (int): Random seed. Default: 0.\n shuffle (bool): Whether shuffle the dataset or not. Default: True.\n '
def __init__(self, dataset, batch_size=1, world_size=None, rank=None, seed=0, shuffle=True):
(_rank, _world_size) = get_dist_info()
if (world_size is None):
world_size = _world_size
if (rank is None):
rank = _rank
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.batch_size = batch_size
self.seed = (seed if (seed is not None) else 0)
self.shuffle = shuffle
self.size = len(dataset)
self.indices = self._indices_of_rank()
def _infinite_indices(self):
'Infinitely yield a sequence of indices.'
g = torch.Generator()
g.manual_seed(self.seed)
while True:
if self.shuffle:
(yield from torch.randperm(self.size, generator=g).tolist())
else:
(yield from torch.arange(self.size).tolist())
def _indices_of_rank(self):
'Slice the infinite indices by rank.'
(yield from itertools.islice(self._infinite_indices(), self.rank, None, self.world_size))
def __iter__(self):
batch_buffer = []
for idx in self.indices:
batch_buffer.append(idx)
if (len(batch_buffer) == self.batch_size):
(yield batch_buffer)
batch_buffer = []
def __len__(self):
'Length of base dataset.'
return self.size
def set_epoch(self, epoch):
'Not supported in `IterationBased` runner.'
raise NotImplementedError
|
def replace_ImageToTensor(pipelines):
"Replace the ImageToTensor transform in a data pipeline to\n DefaultFormatBundle, which is normally useful in batch inference.\n\n Args:\n pipelines (list[dict]): Data pipeline configs.\n\n Returns:\n list: The new pipeline list with all ImageToTensor replaced by\n DefaultFormatBundle.\n\n Examples:\n >>> pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(\n ... type='MultiScaleFlipAug',\n ... img_scale=(1333, 800),\n ... flip=False,\n ... transforms=[\n ... dict(type='Resize', keep_ratio=True),\n ... dict(type='RandomFlip'),\n ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='ImageToTensor', keys=['img']),\n ... dict(type='Collect', keys=['img']),\n ... ])\n ... ]\n >>> expected_pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(\n ... type='MultiScaleFlipAug',\n ... img_scale=(1333, 800),\n ... flip=False,\n ... transforms=[\n ... dict(type='Resize', keep_ratio=True),\n ... dict(type='RandomFlip'),\n ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='DefaultFormatBundle'),\n ... dict(type='Collect', keys=['img']),\n ... ])\n ... ]\n >>> assert expected_pipelines == replace_ImageToTensor(pipelines)\n "
pipelines = copy.deepcopy(pipelines)
for (i, pipeline) in enumerate(pipelines):
if (pipeline['type'] == 'MultiScaleFlipAug'):
assert ('transforms' in pipeline)
pipeline['transforms'] = replace_ImageToTensor(pipeline['transforms'])
elif (pipeline['type'] == 'ImageToTensor'):
warnings.warn('"ImageToTensor" pipeline is replaced by "DefaultFormatBundle" for batch inference. It is recommended to manually replace it in the test data pipeline in your config file.', UserWarning)
pipelines[i] = {'type': 'DefaultFormatBundle'}
return pipelines
|
def get_loading_pipeline(pipeline):
"Only keep loading image and annotations related configuration.\n\n Args:\n pipeline (list[dict]): Data pipeline configs.\n\n Returns:\n list[dict]: The new pipeline list with only keep\n loading image and annotations related configuration.\n\n Examples:\n >>> pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(type='LoadAnnotations', with_bbox=True),\n ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n ... dict(type='RandomFlip', flip_ratio=0.5),\n ... dict(type='Normalize', **img_norm_cfg),\n ... dict(type='Pad', size_divisor=32),\n ... dict(type='DefaultFormatBundle'),\n ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n ... ]\n >>> expected_pipelines = [\n ... dict(type='LoadImageFromFile'),\n ... dict(type='LoadAnnotations', with_bbox=True)\n ... ]\n >>> assert expected_pipelines == ... get_loading_pipeline(pipelines)\n "
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = PIPELINES.get(cfg['type'])
if ((obj_cls is not None) and (obj_cls in (LoadImageFromFile, LoadAnnotations, LoadPanopticAnnotations))):
loading_pipeline_cfg.append(cfg)
assert (len(loading_pipeline_cfg) == 2), 'The data pipeline in your config file must include loading image and annotations related pipeline.'
return loading_pipeline_cfg
|
@HOOKS.register_module()
class NumClassCheckHook(Hook):
def _check_head(self, runner):
'Check whether the `num_classes` in head matches the length of\n `CLASSES` in `dataset`.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n '
model = runner.model
dataset = runner.data_loader.dataset
if (dataset.CLASSES is None):
runner.logger.warning(f'Please set `CLASSES` in the {dataset.__class__.__name__} andcheck if it is consistent with the `num_classes` of head')
else:
assert (type(dataset.CLASSES) is not str), f'`CLASSES` in {dataset.__class__.__name__}should be a tuple of str.Add comma if number of classes is 1 as CLASSES = ({dataset.CLASSES},)'
for (name, module) in model.named_modules():
if (hasattr(module, 'num_classes') and (not isinstance(module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)))):
assert (module.num_classes == len(dataset.CLASSES)), f'The `num_classes` ({module.num_classes}) in {module.__class__.__name__} of {model.__class__.__name__} does not matches the length of `CLASSES` {len(dataset.CLASSES)}) in {dataset.__class__.__name__}'
def before_train_epoch(self, runner):
'Check whether the training dataset is compatible with head.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n '
self._check_head(runner)
def before_val_epoch(self, runner):
'Check whether the dataset in val epoch is compatible with head.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n '
self._check_head(runner)
|
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192), (197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255), (153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252), (182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0), (0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if ('VOC2007' in self.img_prefix):
self.year = 2007
elif ('VOC2012' in self.img_prefix):
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None):
"Evaluate in VOC protocol.\n\n Args:\n results (list[list | tuple]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated. Options are\n 'mAP', 'recall'.\n logger (logging.Logger | str, optional): Logger used for printing\n related information during evaluation. Default: None.\n proposal_nums (Sequence[int]): Proposal number used for evaluating\n recalls, such as recall@100, recall@1000.\n Default: (100, 300, 1000).\n iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n scale_ranges (list[tuple], optional): Scale ranges for evaluating\n mAP. If not specified, all bounding boxes would be included in\n evaluation. Default: None.\n\n Returns:\n dict[str, float]: AP/recall metrics.\n "
if (not isinstance(metric, str)):
assert (len(metric) == 1)
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr)
if (metric == 'mAP'):
assert isinstance(iou_thrs, list)
if (self.year == 2007):
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'''
{('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''')
(mean_ap, _) = eval_map(results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=ds_name, logger=logger, use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = (sum(mean_aps) / len(mean_aps))
eval_results.move_to_end('mAP', last=False)
elif (metric == 'recall'):
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thrs, logger=logger, use_legacy_coordinate=True)
for (i, num) in enumerate(proposal_nums):
for (j, iou_thr) in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[(i, j)]
if (recalls.shape[1] > 1):
ar = recalls.mean(axis=1)
for (i, num) in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
'Reader for the WIDER Face dataset in PASCAL VOC format.\n\n Conversion scripts can be found in\n https://github.com/sovrasov/wider-face-pascal-voc-annotations\n '
CLASSES = ('face',)
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
'Load annotation from WIDERFace XML style annotation file.\n\n Args:\n ann_file (str): Path of XML file.\n\n Returns:\n list[dict]: Annotation info from XML file.\n '
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(dict(id=img_id, filename=osp.join(folder, filename), width=width, height=height))
return data_infos
|
@DATASETS.register_module()
class XMLDataset(CustomDataset):
'XML dataset for detection.\n\n Args:\n min_size (int | float, optional): The minimum size of bounding\n boxes in the images. If the size of a bounding box is less than\n ``min_size``, it would be add to ignored field.\n img_subdir (str): Subdir where images are stored. Default: JPEGImages.\n ann_subdir (str): Subdir where annotations are. Default: Annotations.\n '
def __init__(self, min_size=None, img_subdir='JPEGImages', ann_subdir='Annotations', **kwargs):
assert (self.CLASSES or kwargs.get('classes', None)), 'CLASSES in `XMLDataset` can not be None.'
self.img_subdir = img_subdir
self.ann_subdir = ann_subdir
super(XMLDataset, self).__init__(**kwargs)
self.cat2label = {cat: i for (i, cat) in enumerate(self.CLASSES)}
self.min_size = min_size
def load_annotations(self, ann_file):
'Load annotation from XML style ann_file.\n\n Args:\n ann_file (str): Path of XML file.\n\n Returns:\n list[dict]: Annotation info from XML file.\n '
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = osp.join(self.img_subdir, f'{img_id}.jpg')
xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
if (size is not None):
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = osp.join(self.img_prefix, filename)
img = Image.open(img_path)
(width, height) = img.size
data_infos.append(dict(id=img_id, filename=filename, width=width, height=height))
return data_infos
def _filter_imgs(self, min_size=32):
'Filter images too small or without annotation.'
valid_inds = []
for (i, img_info) in enumerate(self.data_infos):
if (min(img_info['width'], img_info['height']) < min_size):
continue
if self.filter_empty_gt:
img_id = img_info['id']
xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if (name in self.CLASSES):
valid_inds.append(i)
break
else:
valid_inds.append(i)
return valid_inds
def get_ann_info(self, idx):
'Get annotation from XML file by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n '
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
if (name not in self.CLASSES):
continue
label = self.cat2label[name]
difficult = obj.find('difficult')
difficult = (0 if (difficult is None) else int(difficult.text))
bnd_box = obj.find('bndbox')
bbox = [int(float(bnd_box.find('xmin').text)), int(float(bnd_box.find('ymin').text)), int(float(bnd_box.find('xmax').text)), int(float(bnd_box.find('ymax').text))]
ignore = False
if self.min_size:
assert (not self.test_mode)
w = (bbox[2] - bbox[0])
h = (bbox[3] - bbox[1])
if ((w < self.min_size) or (h < self.min_size)):
ignore = True
if (difficult or ignore):
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
ann = dict(bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64))
return ann
def get_cat_ids(self, idx):
'Get category ids in XML file by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n list[int]: All categories in the image of specified index.\n '
cat_ids = []
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if (name not in self.CLASSES):
continue
label = self.cat2label[name]
cat_ids.append(label)
return cat_ids
|
class ResBlock(BaseModule):
"The basic residual block used in Darknet. Each ResBlock consists of two\n ConvModules and the input is added to the final output. Each ConvModule is\n composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer\n has half of the number of the filters as much as the second convLayer. The\n first convLayer has filter size of 1x1 and the second one has the filter\n size of 3x3.\n\n Args:\n in_channels (int): The input channels. Must be even.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n "
def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=None):
super(ResBlock, self).__init__(init_cfg)
assert ((in_channels % 2) == 0)
half_in_channels = (in_channels // 2)
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
self.conv2 = ConvModule(half_in_channels, in_channels, 3, padding=1, **cfg)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
out = (out + residual)
return out
|
@BACKBONES.register_module()
class Darknet(BaseModule):
"Darknet backbone.\n\n Args:\n depth (int): Depth of Darknet. Currently only support 53.\n out_indices (Sequence[int]): Output from which stages.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters. Default: -1.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import Darknet\n >>> import torch\n >>> self = Darknet(depth=53)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 416, 416)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n ...\n (1, 256, 52, 52)\n (1, 512, 26, 26)\n (1, 1024, 13, 13)\n "
arch_settings = {53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512), (512, 1024)))}
def __init__(self, depth=53, out_indices=(3, 4, 5), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), norm_eval=True, pretrained=None, init_cfg=None):
super(Darknet, self).__init__(init_cfg)
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for darknet')
self.depth = depth
self.out_indices = out_indices
self.frozen_stages = frozen_stages
(self.layers, self.channels) = self.arch_settings[depth]
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
self.cr_blocks = ['conv1']
for (i, n_layers) in enumerate(self.layers):
layer_name = f'conv_res_block{(i + 1)}'
(in_c, out_c) = self.channels[i]
self.add_module(layer_name, self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
self.cr_blocks.append(layer_name)
self.norm_eval = norm_eval
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
for (i, layer_name) in enumerate(self.cr_blocks):
cr_block = getattr(self, layer_name)
x = cr_block(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for i in range(self.frozen_stages):
m = getattr(self, self.cr_blocks[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
super(Darknet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
@staticmethod
def make_conv_res_block(in_channels, out_channels, res_repeat, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
"In Darknet backbone, ConvLayer is usually followed by ResBlock. This\n function will make that. The Conv layers always have 3x3 filters with\n stride=2. The number of the filters in Conv layer is the same as the\n out channels of the ResBlock.\n\n Args:\n in_channels (int): The number of input channels.\n out_channels (int): The number of output channels.\n res_repeat (int): The number of ResBlocks.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n Default: dict(type='BN', requires_grad=True)\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='LeakyReLU', negative_slope=0.1).\n "
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
model = nn.Sequential()
model.add_module('conv', ConvModule(in_channels, out_channels, 3, stride=2, padding=1, **cfg))
for idx in range(res_repeat):
model.add_module('res{}'.format(idx), ResBlock(out_channels, **cfg))
return model
|
class Bottleneck(_Bottleneck):
'Bottleneck for the ResNet backbone in `DetectoRS\n <https://arxiv.org/pdf/2006.02334.pdf>`_.\n\n This bottleneck allows the users to specify whether to use\n SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).\n\n Args:\n inplanes (int): The number of input channels.\n planes (int): The number of output channels before expansion.\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n sac (dict, optional): Dictionary to construct SAC. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n '
expansion = 4
def __init__(self, inplanes, planes, rfp_inplanes=None, sac=None, init_cfg=None, **kwargs):
super(Bottleneck, self).__init__(inplanes, planes, init_cfg=init_cfg, **kwargs)
assert ((sac is None) or isinstance(sac, dict))
self.sac = sac
self.with_sac = (sac is not None)
if self.with_sac:
self.conv2 = build_conv_layer(self.sac, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False)
self.rfp_inplanes = rfp_inplanes
if self.rfp_inplanes:
self.rfp_conv = build_conv_layer(None, self.rfp_inplanes, (planes * self.expansion), 1, stride=1, bias=True)
if (init_cfg is None):
self.init_cfg = dict(type='Constant', val=0, override=dict(name='rfp_conv'))
def rfp_forward(self, x, rfp_feat):
'The forward function that also takes the RFP features as input.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
if self.rfp_inplanes:
rfp_feat = self.rfp_conv(rfp_feat)
out = (out + rfp_feat)
out = self.relu(out)
return out
|
class ResLayer(Sequential):
"ResLayer to build ResNet style backbone for RPF in detectoRS.\n\n The difference between this module and base class is that we pass\n ``rfp_inplanes`` to the first block.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n downsample_first (bool): Downsample at the first block or last block.\n False for Hourglass, True for ResNet. Default: True\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n "
def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=False, conv_cfg=None, norm_cfg=dict(type='BN'), downsample_first=True, rfp_inplanes=None, **kwargs):
self.block = block
assert downsample_first, f'downsample_first={downsample_first} is not supported in DetectoRS'
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = []
conv_stride = stride
if (avg_down and (stride != 1)):
conv_stride = 1
downsample.append(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False))
downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]])
downsample = nn.Sequential(*downsample)
layers = []
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, rfp_inplanes=rfp_inplanes, **kwargs))
inplanes = (planes * block.expansion)
for _ in range(1, num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, **kwargs))
super(ResLayer, self).__init__(*layers)
|
@BACKBONES.register_module()
class DetectoRS_ResNet(ResNet):
'ResNet backbone for DetectoRS.\n\n Args:\n sac (dict, optional): Dictionary to construct SAC (Switchable Atrous\n Convolution). Default: None.\n stage_with_sac (list): Which stage to use sac. Default: (False, False,\n False, False).\n rfp_inplanes (int, optional): The number of channels from RFP.\n Default: None. If specified, an additional conv layer will be\n added for ``rfp_feat``. Otherwise, the structure is the same as\n base class.\n output_img (bool): If ``True``, the input image will be inserted into\n the starting position of output. Default: False.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, sac=None, stage_with_sac=(False, False, False, False), rfp_inplanes=None, output_img=False, pretrained=None, init_cfg=None, **kwargs):
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
self.pretrained = pretrained
if (init_cfg is not None):
assert isinstance(init_cfg, dict), f'init_cfg must be a dict, but got {type(init_cfg)}'
if ('type' in init_cfg):
assert (init_cfg.get('type') == 'Pretrained'), 'Only can initialize module by loading a pretrained model'
else:
raise KeyError('`init_cfg` must contain the key "type"')
self.pretrained = init_cfg.get('checkpoint')
self.sac = sac
self.stage_with_sac = stage_with_sac
self.rfp_inplanes = rfp_inplanes
self.output_img = output_img
super(DetectoRS_ResNet, self).__init__(**kwargs)
self.inplanes = self.stem_channels
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
sac = (self.sac if self.stage_with_sac[i] else None)
if (self.plugins is not None):
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
planes = (self.base_channels * (2 ** i))
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, sac=sac, rfp_inplanes=(rfp_inplanes if (i > 0) else None), plugins=stage_plugins)
self.inplanes = (planes * self.block.expansion)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
def init_weights(self):
if isinstance(self.pretrained, str):
logger = get_root_logger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif (self.pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if (self.dcn is not None):
for m in self.modules():
if (isinstance(m, Bottleneck) and hasattr(m.conv2, 'conv_offset')):
constant_init(m.conv2.conv_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.'
return ResLayer(**kwargs)
def forward(self, x):
'Forward function.'
outs = list(super(DetectoRS_ResNet, self).forward(x))
if self.output_img:
outs.insert(0, x)
return tuple(outs)
def rfp_forward(self, x, rfp_feats):
'Forward function for RFP.'
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
rfp_feat = (rfp_feats[i] if (i > 0) else None)
for layer in res_layer:
x = layer.rfp_forward(x, rfp_feat)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
|
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs):
'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / base_channels))) * groups)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if self.with_sac:
self.conv2 = build_conv_layer(self.sac, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
elif ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
|
@BACKBONES.register_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
'ResNeXt backbone for DetectoRS.\n\n Args:\n groups (int): The number of groups in ResNeXt.\n base_width (int): The base width of ResNeXt.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(DetectoRS_ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
return super().make_res_layer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
|
class HourglassModule(BaseModule):
"Hourglass Module for HourglassNet backbone.\n\n Generate module recursively and use BasicBlock as the base unit.\n\n Args:\n depth (int): Depth of current HourglassModule.\n stage_channels (list[int]): Feature channels of sub-modules in current\n and follow-up HourglassModule.\n stage_blocks (list[int]): Number of sub-modules stacked in current and\n follow-up HourglassModule.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n upsample_cfg (dict, optional): Config dict for interpolate layer.\n Default: `dict(mode='nearest')`\n "
def __init__(self, depth, stage_channels, stage_blocks, norm_cfg=dict(type='BN', requires_grad=True), init_cfg=None, upsample_cfg=dict(mode='nearest')):
super(HourglassModule, self).__init__(init_cfg)
self.depth = depth
cur_block = stage_blocks[0]
next_block = stage_blocks[1]
cur_channel = stage_channels[0]
next_channel = stage_channels[1]
self.up1 = ResLayer(BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
self.low1 = ResLayer(BasicBlock, cur_channel, next_channel, cur_block, stride=2, norm_cfg=norm_cfg)
if (self.depth > 1):
self.low2 = HourglassModule((depth - 1), stage_channels[1:], stage_blocks[1:])
else:
self.low2 = ResLayer(BasicBlock, next_channel, next_channel, next_block, norm_cfg=norm_cfg)
self.low3 = ResLayer(BasicBlock, next_channel, cur_channel, cur_block, norm_cfg=norm_cfg, downsample_first=False)
self.up2 = F.interpolate
self.upsample_cfg = upsample_cfg
def forward(self, x):
'Forward function.'
up1 = self.up1(x)
low1 = self.low1(x)
low2 = self.low2(low1)
low3 = self.low3(low2)
if ('scale_factor' in self.upsample_cfg):
up2 = self.up2(low3, **self.upsample_cfg)
else:
shape = up1.shape[2:]
up2 = self.up2(low3, size=shape, **self.upsample_cfg)
return (up1 + up2)
|
@BACKBONES.register_module()
class HourglassNet(BaseModule):
'HourglassNet backbone.\n\n Stacked Hourglass Networks for Human Pose Estimation.\n More details can be found in the `paper\n <https://arxiv.org/abs/1603.06937>`_ .\n\n Args:\n downsample_times (int): Downsample times in a HourglassModule.\n num_stacks (int): Number of HourglassModule modules stacked,\n 1 for Hourglass-52, 2 for Hourglass-104.\n stage_channels (list[int]): Feature channel of each sub-module in a\n HourglassModule.\n stage_blocks (list[int]): Number of sub-modules stacked in a\n HourglassModule.\n feat_channel (int): Feature channel of conv after a HourglassModule.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import HourglassNet\n >>> import torch\n >>> self = HourglassNet()\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 511, 511)\n >>> level_outputs = self.forward(inputs)\n >>> for level_output in level_outputs:\n ... print(tuple(level_output.shape))\n (1, 256, 128, 128)\n (1, 256, 128, 128)\n '
def __init__(self, downsample_times=5, num_stacks=2, stage_channels=(256, 256, 384, 384, 384, 512), stage_blocks=(2, 2, 2, 2, 2, 4), feat_channel=256, norm_cfg=dict(type='BN', requires_grad=True), pretrained=None, init_cfg=None):
assert (init_cfg is None), 'To prevent abnormal initialization behavior, init_cfg is not allowed to be set'
super(HourglassNet, self).__init__(init_cfg)
self.num_stacks = num_stacks
assert (self.num_stacks >= 1)
assert (len(stage_channels) == len(stage_blocks))
assert (len(stage_channels) > downsample_times)
cur_channel = stage_channels[0]
self.stem = nn.Sequential(ConvModule(3, (cur_channel // 2), 7, padding=3, stride=2, norm_cfg=norm_cfg), ResLayer(BasicBlock, (cur_channel // 2), cur_channel, 1, stride=2, norm_cfg=norm_cfg))
self.hourglass_modules = nn.ModuleList([HourglassModule(downsample_times, stage_channels, stage_blocks) for _ in range(num_stacks)])
self.inters = ResLayer(BasicBlock, cur_channel, cur_channel, (num_stacks - 1), norm_cfg=norm_cfg)
self.conv1x1s = nn.ModuleList([ConvModule(cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range((num_stacks - 1))])
self.out_convs = nn.ModuleList([ConvModule(cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) for _ in range(num_stacks)])
self.remap_convs = nn.ModuleList([ConvModule(feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) for _ in range((num_stacks - 1))])
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
'Init module weights.'
super(HourglassNet, self).init_weights()
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.reset_parameters()
def forward(self, x):
'Forward function.'
inter_feat = self.stem(x)
out_feats = []
for ind in range(self.num_stacks):
single_hourglass = self.hourglass_modules[ind]
out_conv = self.out_convs[ind]
hourglass_feat = single_hourglass(inter_feat)
out_feat = out_conv(hourglass_feat)
out_feats.append(out_feat)
if (ind < (self.num_stacks - 1)):
inter_feat = (self.conv1x1s[ind](inter_feat) + self.remap_convs[ind](out_feat))
inter_feat = self.inters[ind](self.relu(inter_feat))
return out_feats
|
class HRModule(BaseModule):
'High-Resolution Module for HRNet.\n\n In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange\n is in this module.\n '
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), block_init_cfg=None, init_cfg=None):
super(HRModule, self).__init__(init_cfg)
self.block_init_cfg = block_init_cfg
self._check_branches(num_branches, num_blocks, in_channels, num_channels)
self.in_channels = in_channels
self.num_branches = num_branches
self.multiscale_output = multiscale_output
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
self.with_cp = with_cp
self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(inplace=False)
def _check_branches(self, num_branches, num_blocks, in_channels, num_channels):
if (num_branches != len(num_blocks)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_BLOCKS({len(num_blocks)})'
raise ValueError(error_msg)
if (num_branches != len(num_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_CHANNELS({len(num_channels)})'
raise ValueError(error_msg)
if (num_branches != len(in_channels)):
error_msg = f'NUM_BRANCHES({num_branches}) != NUM_INCHANNELS({len(in_channels)})'
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if ((stride != 1) or (self.in_channels[branch_index] != (num_channels[branch_index] * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, self.in_channels[branch_index], (num_channels[branch_index] * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (num_channels[branch_index] * block.expansion))[1])
layers = []
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg))
self.in_channels[branch_index] = (num_channels[branch_index] * block.expansion)
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=self.block_init_cfg))
return Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
return ModuleList(branches)
def _make_fuse_layers(self):
if (self.num_branches == 1):
return None
num_branches = self.num_branches
in_channels = self.in_channels
fuse_layers = []
num_out_branches = (num_branches if self.multiscale_output else 1)
for i in range(num_out_branches):
fuse_layer = []
for j in range(num_branches):
if (j > i):
fuse_layer.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample(scale_factor=(2 ** (j - i)), mode='nearest')))
elif (j == i):
fuse_layer.append(None)
else:
conv_downsamples = []
for k in range((i - j)):
if (k == ((i - j) - 1)):
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1]))
else:
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False)))
fuse_layer.append(nn.Sequential(*conv_downsamples))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def forward(self, x):
'Forward function.'
if (self.num_branches == 1):
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = 0
for j in range(self.num_branches):
if (i == j):
y += x[j]
else:
y += self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
|
@BACKBONES.register_module()
class HRNet(BaseModule):
"HRNet backbone.\n\n `High-Resolution Representations for Labeling Pixels and Regions\n arXiv: <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n extra (dict): Detailed configuration for each stage of HRNet.\n There must be 4 stages, the configuration for each stage must have\n 5 keys:\n\n - num_modules(int): The number of HRModule in this stage.\n - num_branches(int): The number of branches in the HRModule.\n - block(str): The type of convolution block.\n - num_blocks(tuple): The number of blocks in each branch.\n The length must be equal to num_branches.\n - num_channels(tuple): The number of channels in each branch.\n The length must be equal to num_branches.\n in_channels (int): Number of input image channels. Default: 3.\n conv_cfg (dict): Dictionary to construct and config conv layer.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: True.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity. Default: False.\n multiscale_output (bool): Whether to output multi-level features\n produced by multiple branches. If False, only the first level\n feature will be output. Default: True.\n pretrained (str, optional): Model pretrained path. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n\n Example:\n >>> from mmdet.models import HRNet\n >>> import torch\n >>> extra = dict(\n >>> stage1=dict(\n >>> num_modules=1,\n >>> num_branches=1,\n >>> block='BOTTLENECK',\n >>> num_blocks=(4, ),\n >>> num_channels=(64, )),\n >>> stage2=dict(\n >>> num_modules=1,\n >>> num_branches=2,\n >>> block='BASIC',\n >>> num_blocks=(4, 4),\n >>> num_channels=(32, 64)),\n >>> stage3=dict(\n >>> num_modules=4,\n >>> num_branches=3,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4),\n >>> num_channels=(32, 64, 128)),\n >>> stage4=dict(\n >>> num_modules=3,\n >>> num_branches=4,\n >>> block='BASIC',\n >>> num_blocks=(4, 4, 4, 4),\n >>> num_channels=(32, 64, 128, 256)))\n >>> self = HRNet(extra, in_channels=1)\n >>> self.eval()\n >>> inputs = torch.rand(1, 1, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 32, 8, 8)\n (1, 64, 4, 4)\n (1, 128, 2, 2)\n (1, 256, 1, 1)\n "
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False, multiscale_output=True, pretrained=None, init_cfg=None):
super(HRNet, self).__init__(init_cfg)
self.pretrained = pretrained
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
else:
raise TypeError('pretrained must be a str or None')
assert (('stage1' in extra) and ('stage2' in extra) and ('stage3' in extra) and ('stage4' in extra))
for i in range(4):
cfg = extra[f'stage{(i + 1)}']
assert ((len(cfg['num_blocks']) == cfg['num_branches']) and (len(cfg['num_channels']) == cfg['num_branches']))
self.extra = extra
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.zero_init_residual = zero_init_residual
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, 64, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, 64, postfix=2)
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = self.extra['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = self.stage1_cfg['block']
num_blocks = self.stage1_cfg['num_blocks'][0]
block = self.blocks_dict[block_type]
stage1_out_channels = (num_channels * block.expansion)
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
self.stage2_cfg = self.extra['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = self.stage2_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels)
(self.stage2, pre_stage_channels) = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = self.extra['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = self.stage3_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage3, pre_stage_channels) = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = self.extra['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = self.stage4_cfg['block']
block = self.blocks_dict[block_type]
num_channels = [(channel * block.expansion) for channel in num_channels]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
(self.stage4, pre_stage_channels) = self._make_stage(self.stage4_cfg, num_channels, multiscale_output=multiscale_output)
@property
def norm1(self):
'nn.Module: the normalization layer named "norm1" '
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: the normalization layer named "norm2" '
return getattr(self, self.norm2_name)
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if (i < num_branches_pre):
if (num_channels_cur_layer[i] != num_channels_pre_layer[i]):
transition_layers.append(nn.Sequential(build_conv_layer(self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv_downsamples = []
for j in range(((i + 1) - num_branches_pre)):
in_channels = num_channels_pre_layer[(- 1)]
out_channels = (num_channels_cur_layer[i] if (j == (i - num_branches_pre)) else in_channels)
conv_downsamples.append(nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv_downsamples))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(build_conv_layer(self.conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, (planes * block.expansion))[1])
layers = []
block_init_cfg = None
if ((self.pretrained is None) and (not hasattr(self, 'init_cfg')) and self.zero_init_residual):
if (block is BasicBlock):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2'))
elif (block is Bottleneck):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
layers.append(block(inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, init_cfg=block_init_cfg))
return Sequential(*layers)
def _make_stage(self, layer_config, in_channels, multiscale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block = self.blocks_dict[layer_config['block']]
hr_modules = []
block_init_cfg = None
if ((self.pretrained is None) and (not hasattr(self, 'init_cfg')) and self.zero_init_residual):
if (block is BasicBlock):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2'))
elif (block is Bottleneck):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
for i in range(num_modules):
if ((not multiscale_output) and (i == (num_modules - 1))):
reset_multiscale_output = False
else:
reset_multiscale_output = True
hr_modules.append(HRModule(num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg, block_init_cfg=block_init_cfg))
return (Sequential(*hr_modules), in_channels)
def forward(self, x):
'Forward function.'
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['num_branches']):
if (self.transition1[i] is not None):
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['num_branches']):
if (self.transition2[i] is not None):
x_list.append(self.transition2[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['num_branches']):
if (self.transition3[i] is not None):
x_list.append(self.transition3[i](y_list[(- 1)]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
return y_list
def train(self, mode=True):
'Convert the model into training mode will keeping the normalization\n layer freezed.'
super(HRNet, self).train(mode)
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
@BACKBONES.register_module()
class MobileNetV2(BaseModule):
"MobileNetV2 backbone.\n\n Args:\n widen_factor (float): Width multiplier, multiply number of\n channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int], optional): Output from which stages.\n Default: (1, 2, 4, 7).\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n "
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
def __init__(self, widen_factor=1.0, out_indices=(1, 2, 4, 7), frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU6'), norm_eval=False, with_cp=False, pretrained=None, init_cfg=None):
super(MobileNetV2, self).__init__(init_cfg)
self.pretrained = pretrained
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
else:
raise TypeError('pretrained must be a str or None')
self.widen_factor = widen_factor
self.out_indices = out_indices
if (not set(out_indices).issubset(set(range(0, 8)))):
raise ValueError(f'out_indices must be a subset of range(0, 8). But received {out_indices}')
if (frozen_stages not in range((- 1), 8)):
raise ValueError(f'frozen_stages must be in range(-1, 8). But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible((32 * widen_factor), 8)
self.conv1 = ConvModule(in_channels=3, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.layers = []
for (i, layer_cfg) in enumerate(self.arch_settings):
(expand_ratio, channel, num_blocks, stride) = layer_cfg
out_channels = make_divisible((channel * widen_factor), 8)
inverted_res_layer = self.make_layer(out_channels=out_channels, num_blocks=num_blocks, stride=stride, expand_ratio=expand_ratio)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if (widen_factor > 1.0):
self.out_channel = int((1280 * widen_factor))
else:
self.out_channel = 1280
layer = ConvModule(in_channels=self.in_channels, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
'Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n Args:\n out_channels (int): out_channels of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n expand_ratio (int): Expand the number of channels of the\n hidden layer in InvertedResidual by this ratio. Default: 6.\n '
layers = []
for i in range(num_blocks):
if (i >= 1):
stride = 1
layers.append(InvertedResidual(self.in_channels, out_channels, mid_channels=int(round((self.in_channels * expand_ratio))), stride=stride, with_expand_conv=(expand_ratio != 1), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def forward(self, x):
'Forward function.'
x = self.conv1(x)
outs = []
for (i, layer_name) in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def train(self, mode=True):
'Convert the model into training mode while keep normalization layer\n frozen.'
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
@BACKBONES.register_module()
class RegNet(ResNet):
'RegNet backbone.\n\n More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .\n\n Args:\n arch (dict): The parameter of RegNets.\n\n - w0 (int): initial width\n - wa (float): slope of width\n - wm (float): quantization parameter to quantize the width\n - depth (int): depth of the backbone\n - group_w (int): width of group\n - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.\n strides (Sequence[int]): Strides of the first block of each stage.\n base_channels (int): Base channels after stem layer.\n in_channels (int): Number of input image channels. Default: 3.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import RegNet\n >>> import torch\n >>> self = RegNet(\n arch=dict(\n w0=88,\n wa=26.31,\n wm=2.25,\n group_w=48,\n depth=25,\n bot_mul=1.0))\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 96, 8, 8)\n (1, 192, 4, 4)\n (1, 432, 2, 2)\n (1, 1008, 1, 1)\n '
arch_settings = {'regnetx_400mf': dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), 'regnetx_800mf': dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), 'regnetx_1.6gf': dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), 'regnetx_3.2gf': dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), 'regnetx_4.0gf': dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), 'regnetx_6.4gf': dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), 'regnetx_8.0gf': dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), 'regnetx_12gf': dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0)}
def __init__(self, arch, in_channels=3, stem_channels=32, base_channels=32, strides=(2, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None):
super(ResNet, self).__init__(init_cfg)
if isinstance(arch, str):
assert (arch in self.arch_settings), f'"arch": "{arch}" is not one of the arch_settings'
arch = self.arch_settings[arch]
elif (not isinstance(arch, dict)):
raise ValueError(f'Expect "arch" to be either a string or a dict, got {type(arch)}')
(widths, num_stages) = self.generate_regnet(arch['w0'], arch['wa'], arch['wm'], arch['depth'])
(stage_widths, stage_blocks) = self.get_stages_from_blocks(widths)
group_widths = [arch['group_w'] for _ in range(num_stages)]
self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]
(stage_widths, group_widths) = self.adjust_width_group(stage_widths, self.bottleneck_ratio, group_widths)
self.stage_widths = stage_widths
self.group_widths = group_widths
self.depth = sum(stage_blocks)
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if (dcn is not None):
assert (len(stage_with_dcn) == num_stages)
self.plugins = plugins
self.zero_init_residual = zero_init_residual
self.block = Bottleneck
expansion_bak = self.block.expansion
self.block.expansion = 1
self.stage_blocks = stage_blocks[:num_stages]
self._make_stem_layer(in_channels, stem_channels)
block_init_cfg = None
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
if self.zero_init_residual:
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.inplanes = stem_channels
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = self.strides[i]
dilation = self.dilations[i]
group_width = self.group_widths[i]
width = int(round((self.stage_widths[i] * self.bottleneck_ratio[i])))
stage_groups = (width // group_width)
dcn = (self.dcn if self.stage_with_dcn[i] else None)
if (self.plugins is not None):
stage_plugins = self.make_stage_plugins(self.plugins, i)
else:
stage_plugins = None
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=self.stage_widths[i], num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, groups=stage_groups, base_width=group_width, base_channels=self.stage_widths[i], init_cfg=block_init_cfg)
self.inplanes = self.stage_widths[i]
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = stage_widths[(- 1)]
self.block.expansion = expansion_bak
def _make_stem_layer(self, in_channels, base_channels):
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, base_channels, kernel_size=3, stride=2, padding=1, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, base_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
def generate_regnet(self, initial_width, width_slope, width_parameter, depth, divisor=8):
'Generates per block width from RegNet parameters.\n\n Args:\n initial_width ([int]): Initial width of the backbone\n width_slope ([float]): Slope of the quantized linear function\n width_parameter ([int]): Parameter used to quantize the width.\n depth ([int]): Depth of the backbone.\n divisor (int, optional): The divisor of channels. Defaults to 8.\n\n Returns:\n list, int: return a list of widths of each stage and the number of stages\n '
assert (width_slope >= 0)
assert (initial_width > 0)
assert (width_parameter > 1)
assert ((initial_width % divisor) == 0)
widths_cont = ((np.arange(depth) * width_slope) + initial_width)
ks = np.round((np.log((widths_cont / initial_width)) / np.log(width_parameter)))
widths = (initial_width * np.power(width_parameter, ks))
widths = (np.round(np.divide(widths, divisor)) * divisor)
num_stages = len(np.unique(widths))
(widths, widths_cont) = (widths.astype(int).tolist(), widths_cont.tolist())
return (widths, num_stages)
@staticmethod
def quantize_float(number, divisor):
'Converts a float to closest non-zero int divisible by divisor.\n\n Args:\n number (int): Original number to be quantized.\n divisor (int): Divisor used to quantize the number.\n\n Returns:\n int: quantized number that is divisible by devisor.\n '
return int((round((number / divisor)) * divisor))
def adjust_width_group(self, widths, bottleneck_ratio, groups):
'Adjusts the compatibility of widths and groups.\n\n Args:\n widths (list[int]): Width of each stage.\n bottleneck_ratio (float): Bottleneck ratio.\n groups (int): number of groups in each stage\n\n Returns:\n tuple(list): The adjusted widths and groups of each stage.\n '
bottleneck_width = [int((w * b)) for (w, b) in zip(widths, bottleneck_ratio)]
groups = [min(g, w_bot) for (g, w_bot) in zip(groups, bottleneck_width)]
bottleneck_width = [self.quantize_float(w_bot, g) for (w_bot, g) in zip(bottleneck_width, groups)]
widths = [int((w_bot / b)) for (w_bot, b) in zip(bottleneck_width, bottleneck_ratio)]
return (widths, groups)
def get_stages_from_blocks(self, widths):
'Gets widths/stage_blocks of network at each stage.\n\n Args:\n widths (list[int]): Width in each stage.\n\n Returns:\n tuple(list): width and depth of each stage\n '
width_diff = [(width != width_prev) for (width, width_prev) in zip((widths + [0]), ([0] + widths))]
stage_widths = [width for (width, diff) in zip(widths, width_diff[:(- 1)]) if diff]
stage_blocks = np.diff([depth for (depth, diff) in zip(range(len(width_diff)), width_diff) if diff]).tolist()
return (stage_widths, stage_blocks)
def forward(self, x):
'Forward function.'
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
|
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs):
'Bottle2neck block for Res2Net.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
assert (scales > 1), 'Res2Net degenerates to ResNet when scales = 1.'
width = int(math.floor((self.planes * (base_width / base_channels))))
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, (width * scales), postfix=1)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, (width * scales), kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
if ((stage_type == 'stage') and (self.conv2_stride != 1)):
self.pool = nn.AvgPool2d(kernel_size=3, stride=self.conv2_stride, padding=1)
convs = []
bns = []
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
for i in range((scales - 1)):
convs.append(build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False))
bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
for i in range((scales - 1)):
convs.append(build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, bias=False))
bns.append(build_norm_layer(self.norm_cfg, width, postfix=(i + 1))[1])
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = build_conv_layer(self.conv_cfg, (width * scales), (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.stage_type = stage_type
self.scales = scales
self.width = width
delattr(self, 'conv2')
delattr(self, self.norm2_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
spx = torch.split(out, self.width, 1)
sp = self.convs[0](spx[0].contiguous())
sp = self.relu(self.bns[0](sp))
out = sp
for i in range(1, (self.scales - 1)):
if (self.stage_type == 'stage'):
sp = spx[i]
else:
sp = (sp + spx[i])
sp = self.convs[i](sp.contiguous())
sp = self.relu(self.bns[i](sp))
out = torch.cat((out, sp), 1)
if ((self.stage_type == 'normal') or (self.conv2_stride == 1)):
out = torch.cat((out, spx[(self.scales - 1)]), 1)
elif (self.stage_type == 'stage'):
out = torch.cat((out, self.pool(spx[(self.scales - 1)])), 1)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
class Res2Layer(Sequential):
"Res2Layer to build Res2Net style backbone.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottle2neck. Default: False\n conv_cfg (dict): dictionary to construct and config conv layer.\n Default: None\n norm_cfg (dict): dictionary to construct and config norm layer.\n Default: dict(type='BN')\n scales (int): Scales used in Res2Net. Default: 4\n base_width (int): Basic width of each scale. Default: 26\n "
def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=True, conv_cfg=None, norm_cfg=dict(type='BN'), scales=4, base_width=26, **kwargs):
self.block = block
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False), build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=1, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1])
layers = []
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, stage_type='stage', **kwargs))
inplanes = (planes * block.expansion)
for i in range(1, num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, **kwargs))
super(Res2Layer, self).__init__(*layers)
|
@BACKBONES.register_module()
class Res2Net(ResNet):
'Res2Net backbone.\n\n Args:\n scales (int): Scales used in Res2Net. Default: 4\n base_width (int): Basic width of each scale. Default: 26\n depth (int): Depth of res2net, from {50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Res2net stages. Default: 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottle2neck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n - position (str, required): Position inside block to insert\n plugin, options are \'after_conv1\', \'after_conv2\', \'after_conv3\'.\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as \'num_stages\'.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import Res2Net\n >>> import torch\n >>> self = Res2Net(depth=50, scales=4, base_width=26)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 256, 8, 8)\n (1, 512, 4, 4)\n (1, 1024, 2, 2)\n (1, 2048, 1, 1)\n '
arch_settings = {50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3))}
def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, pretrained=None, init_cfg=None, **kwargs):
self.scales = scales
self.base_width = base_width
super(Res2Net, self).__init__(style='pytorch', deep_stem=True, avg_down=True, pretrained=pretrained, init_cfg=init_cfg, **kwargs)
def make_res_layer(self, **kwargs):
return Res2Layer(scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
|
class RSoftmax(nn.Module):
'Radix Softmax module in ``SplitAttentionConv2d``.\n\n Args:\n radix (int): Radix of input.\n groups (int): Groups of input.\n '
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forward(self, x):
batch = x.size(0)
if (self.radix > 1):
x = x.view(batch, self.groups, self.radix, (- 1)).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, (- 1))
else:
x = torch.sigmoid(x)
return x
|
class SplitAttentionConv2d(BaseModule):
'Split-Attention Conv2d in ResNeSt.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n channels (int): Number of intermediate channels.\n kernel_size (int | tuple[int]): Size of the convolution kernel.\n stride (int | tuple[int]): Stride of the convolution.\n padding (int | tuple[int]): Zero-padding added to both sides of\n dilation (int | tuple[int]): Spacing between kernel elements.\n groups (int): Number of blocked connections from input channels to\n output channels.\n groups (int): Same as nn.Conv2d.\n radix (int): Radix of SpltAtConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels. Default: 4.\n conv_cfg (dict): Config dict for convolution layer. Default: None,\n which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n dcn (dict): Config dict for DCN. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n '
def __init__(self, in_channels, channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, radix=2, reduction_factor=4, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, init_cfg=None):
super(SplitAttentionConv2d, self).__init__(init_cfg)
inter_channels = max(((in_channels * radix) // reduction_factor), 32)
self.radix = radix
self.groups = groups
self.channels = channels
self.with_dcn = (dcn is not None)
self.dcn = dcn
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if (self.with_dcn and (not fallback_on_stride)):
assert (conv_cfg is None), 'conv_cfg must be None for DCN'
conv_cfg = dcn
self.conv = build_conv_layer(conv_cfg, in_channels, (channels * radix), kernel_size, stride=stride, padding=padding, dilation=dilation, groups=(groups * radix), bias=False)
(self.norm0_name, norm0) = build_norm_layer(norm_cfg, (channels * radix), postfix=0)
self.add_module(self.norm0_name, norm0)
self.relu = nn.ReLU(inplace=True)
self.fc1 = build_conv_layer(None, channels, inter_channels, 1, groups=self.groups)
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, inter_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.fc2 = build_conv_layer(None, inter_channels, (channels * radix), 1, groups=self.groups)
self.rsoftmax = RSoftmax(radix, groups)
@property
def norm0(self):
'nn.Module: the normalization layer named "norm0" '
return getattr(self, self.norm0_name)
@property
def norm1(self):
'nn.Module: the normalization layer named "norm1" '
return getattr(self, self.norm1_name)
def forward(self, x):
x = self.conv(x)
x = self.norm0(x)
x = self.relu(x)
(batch, rchannel) = x.shape[:2]
batch = x.size(0)
if (self.radix > 1):
splits = x.view(batch, self.radix, (- 1), *x.shape[2:])
gap = splits.sum(dim=1)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
gap = self.norm1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, (- 1), 1, 1)
if (self.radix > 1):
attens = atten.view(batch, self.radix, (- 1), *atten.shape[2:])
out = torch.sum((attens * splits), dim=1)
else:
out = (atten * x)
return out.contiguous()
|
class Bottleneck(_Bottleneck):
'Bottleneck block for ResNeSt.\n\n Args:\n inplane (int): Input planes of this block.\n planes (int): Middle planes of this block.\n groups (int): Groups of conv2.\n base_width (int): Base of width in terms of base channels. Default: 4.\n base_channels (int): Base of channels for calculating width.\n Default: 64.\n radix (int): Radix of SpltAtConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels in\n SplitAttentionConv2d. Default: 4.\n avg_down_stride (bool): Whether to use average pool for stride in\n Bottleneck. Default: True.\n kwargs (dict): Key word arguments for base class.\n '
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs):
'Bottleneck block for ResNeSt.'
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / base_channels))) * groups)
self.avg_down_stride = (avg_down_stride and (self.conv2_stride > 1))
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
self.with_modulated_dcn = False
self.conv2 = SplitAttentionConv2d(width, width, kernel_size=3, stride=(1 if self.avg_down_stride else self.conv2_stride), padding=self.dilation, dilation=self.dilation, groups=groups, radix=radix, reduction_factor=reduction_factor, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=self.dcn)
delattr(self, self.norm2_name)
if self.avg_down_stride:
self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
if self.avg_down_stride:
out = self.avd_layer(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
@BACKBONES.register_module()
class ResNeSt(ResNetV1d):
'ResNeSt backbone.\n\n Args:\n groups (int): Number of groups of Bottleneck. Default: 1\n base_width (int): Base width of Bottleneck. Default: 4\n radix (int): Radix of SplitAttentionConv2d. Default: 2\n reduction_factor (int): Reduction factor of inter_channels in\n SplitAttentionConv2d. Default: 4.\n avg_down_stride (bool): Whether to use average pool for stride in\n Bottleneck. Default: True.\n kwargs (dict): Keyword arguments for ResNet.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)), 200: (Bottleneck, (3, 24, 36, 3))}
def __init__(self, groups=1, base_width=4, radix=2, reduction_factor=4, avg_down_stride=True, **kwargs):
self.groups = groups
self.base_width = base_width
self.radix = radix
self.reduction_factor = reduction_factor
self.avg_down_stride = avg_down_stride
super(ResNeSt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer``.'
return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, radix=self.radix, reduction_factor=self.reduction_factor, avg_down_stride=self.avg_down_stride, **kwargs)
|
class BasicBlock(BaseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None):
super(BasicBlock, self).__init__(init_cfg)
assert (dcn is None), 'Not implemented yet.'
assert (plugins is None), 'Not implemented yet.'
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
'nn.Module: normalization layer after the first convolution layer'
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm2_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
class Bottleneck(BaseModule):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None):
'Bottleneck block for ResNet.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__(init_cfg)
assert (style in ['pytorch', 'caffe'])
assert ((dcn is None) or isinstance(dcn, dict))
assert ((plugins is None) or isinstance(plugins, list))
if (plugins is not None):
allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
assert all(((p['position'] in allowed_position) for p in plugins))
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = (dcn is not None)
self.plugins = plugins
self.with_plugins = (plugins is not None)
if self.with_plugins:
self.after_conv1_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv1')]
self.after_conv2_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv2')]
self.after_conv3_plugins = [plugin['cfg'] for plugin in plugins if (plugin['position'] == 'after_conv3')]
if (self.style == 'pytorch'):
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
(self.norm1_name, norm1) = build_norm_layer(norm_cfg, planes, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(norm_cfg, planes, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(norm_cfg, (planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
if self.with_dcn:
fallback_on_stride = dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(dcn, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(conv_cfg, planes, (planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_plugins:
self.after_conv1_plugin_names = self.make_block_plugins(planes, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(planes, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins((planes * self.expansion), self.after_conv3_plugins)
def make_block_plugins(self, in_channels, plugins):
'make plugins for block.\n\n Args:\n in_channels (int): Input channels of plugin.\n plugins (list[dict]): List of plugins cfg to build.\n\n Returns:\n list[str]: List of the names of plugin.\n '
assert isinstance(plugins, list)
plugin_names = []
for plugin in plugins:
plugin = plugin.copy()
(name, layer) = build_plugin_layer(plugin, in_channels=in_channels, postfix=plugin.pop('postfix', ''))
assert (not hasattr(self, name)), f'duplicate plugin {name}'
self.add_module(name, layer)
plugin_names.append(name)
return plugin_names
def forward_plugin(self, x, plugin_names):
out = x
for name in plugin_names:
out = getattr(self, name)(x)
return out
@property
def norm1(self):
'nn.Module: normalization layer after the first convolution layer'
return getattr(self, self.norm1_name)
@property
def norm2(self):
'nn.Module: normalization layer after the second convolution layer'
return getattr(self, self.norm2_name)
@property
def norm3(self):
'nn.Module: normalization layer after the third convolution layer'
return getattr(self, self.norm3_name)
def forward(self, x):
'Forward function.'
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv1_plugin_names)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv2_plugin_names)
out = self.conv3(out)
out = self.norm3(out)
if self.with_plugins:
out = self.forward_plugin(out, self.after_conv3_plugin_names)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
@BACKBONES.register_module()
class ResNet(BaseModule):
'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n stem_channels (int | None): Number of stem channels. If not specified,\n it will be the same as `base_channels`. Default: None.\n base_channels (int): Number of base channels of res layer. Default: 64.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n avg_down (bool): Use AvgPool instead of stride conv when\n downsampling in the bottleneck.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_cfg (dict): Dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n plugins (list[dict]): List of plugins for stages, each dict contains:\n\n - cfg (dict, required): Cfg dict to build plugin.\n - position (str, required): Position inside block to insert\n plugin, options are \'after_conv1\', \'after_conv2\', \'after_conv3\'.\n - stages (tuple[bool], optional): Stages to apply plugin, length\n should be same as \'num_stages\'.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n\n Example:\n >>> from mmdet.models import ResNet\n >>> import torch\n >>> self = ResNet(depth=18)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 32, 32)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 64, 8, 8)\n (1, 128, 4, 4)\n (1, 256, 2, 2)\n (1, 512, 1, 1)\n '
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, in_channels=3, stem_channels=None, base_channels=64, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', deep_stem=False, avg_down=False, frozen_stages=(- 1), conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True, pretrained=None, init_cfg=None):
super(ResNet, self).__init__(init_cfg)
self.zero_init_residual = zero_init_residual
if (depth not in self.arch_settings):
raise KeyError(f'invalid depth {depth} for resnet')
block_init_cfg = None
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
if (init_cfg is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm'])]
block = self.arch_settings[depth][0]
if self.zero_init_residual:
if (block is BasicBlock):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm2'))
elif (block is Bottleneck):
block_init_cfg = dict(type='Constant', val=0, override=dict(name='norm3'))
else:
raise TypeError('pretrained must be a str or None')
self.depth = depth
if (stem_channels is None):
stem_channels = base_channels
self.stem_channels = stem_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert ((num_stages >= 1) and (num_stages <= 4))
self.strides = strides
self.dilations = dilations
assert (len(strides) == len(dilations) == num_stages)
self.out_indices = out_indices
assert (max(out_indices) < num_stages)
self.style = style
self.deep_stem = deep_stem
self.avg_down = avg_down
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if (dcn is not None):
assert (len(stage_with_dcn) == num_stages)
self.plugins = plugins
(self.block, stage_blocks) = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = stem_channels
self._make_stem_layer(in_channels, stem_channels)
self.res_layers = []
for (i, num_blocks) in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = (self.dcn if self.stage_with_dcn[i] else None)
if (plugins is not None):
stage_plugins = self.make_stage_plugins(plugins, i)
else:
stage_plugins = None
planes = (base_channels * (2 ** i))
res_layer = self.make_res_layer(block=self.block, inplanes=self.inplanes, planes=planes, num_blocks=num_blocks, stride=stride, dilation=dilation, style=self.style, avg_down=self.avg_down, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=stage_plugins, init_cfg=block_init_cfg)
self.inplanes = (planes * self.block.expansion)
layer_name = f'layer{(i + 1)}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
self.feat_dim = ((self.block.expansion * base_channels) * (2 ** (len(self.stage_blocks) - 1)))
def make_stage_plugins(self, plugins, stage_idx):
"Make plugins for ResNet ``stage_idx`` th stage.\n\n Currently we support to insert ``context_block``,\n ``empirical_attention_block``, ``nonlocal_block`` into the backbone\n like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n Bottleneck.\n\n An example of plugins format could be:\n\n Examples:\n >>> plugins=[\n ... dict(cfg=dict(type='xxx', arg1='xxx'),\n ... stages=(False, True, True, True),\n ... position='after_conv2'),\n ... dict(cfg=dict(type='yyy'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='1'),\n ... stages=(True, True, True, True),\n ... position='after_conv3'),\n ... dict(cfg=dict(type='zzz', postfix='2'),\n ... stages=(True, True, True, True),\n ... position='after_conv3')\n ... ]\n >>> self = ResNet(depth=18)\n >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n >>> assert len(stage_plugins) == 3\n\n Suppose ``stage_idx=0``, the structure of blocks in the stage would be:\n\n .. code-block:: none\n\n conv1-> conv2->conv3->yyy->zzz1->zzz2\n\n Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n\n .. code-block:: none\n\n conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n If stages is missing, the plugin would be applied to all stages.\n\n Args:\n plugins (list[dict]): List of plugins cfg to build. The postfix is\n required if multiple same type plugins are inserted.\n stage_idx (int): Index of stage to build\n\n Returns:\n list[dict]: Plugins for current stage\n "
stage_plugins = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert ((stages is None) or (len(stages) == self.num_stages))
if ((stages is None) or stages[stage_idx]):
stage_plugins.append(plugin)
return stage_plugins
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer``.'
return ResLayer(**kwargs)
@property
def norm1(self):
'nn.Module: the normalization layer named "norm1" '
return getattr(self, self.norm1_name)
def _make_stem_layer(self, in_channels, stem_channels):
if self.deep_stem:
self.stem = nn.Sequential(build_conv_layer(self.conv_cfg, in_channels, (stem_channels // 2), kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), (stem_channels // 2), kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, (stem_channels // 2))[1], nn.ReLU(inplace=True), build_conv_layer(self.conv_cfg, (stem_channels // 2), stem_channels, kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, stem_channels)[1], nn.ReLU(inplace=True))
else:
self.conv1 = build_conv_layer(self.conv_cfg, in_channels, stem_channels, kernel_size=7, stride=2, padding=3, bias=False)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, stem_channels, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
if self.deep_stem:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
else:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x):
'Forward function.'
if self.deep_stem:
x = self.stem(x)
else:
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
return tuple(outs)
def train(self, mode=True):
'Convert the model into training mode while keep normalization layer\n freezed.'
super(ResNet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
@BACKBONES.register_module()
class ResNetV1d(ResNet):
'ResNetV1d variant described in `Bag of Tricks\n <https://arxiv.org/pdf/1812.01187.pdf>`_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n '
def __init__(self, **kwargs):
super(ResNetV1d, self).__init__(deep_stem=True, avg_down=True, **kwargs)
|
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs):
'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
if (groups == 1):
width = self.planes
else:
width = (math.floor((self.planes * (base_width / base_channels))) * groups)
(self.norm1_name, norm1) = build_norm_layer(self.norm_cfg, width, postfix=1)
(self.norm2_name, norm2) = build_norm_layer(self.norm_cfg, width, postfix=2)
(self.norm3_name, norm3) = build_norm_layer(self.norm_cfg, (self.planes * self.expansion), postfix=3)
self.conv1 = build_conv_layer(self.conv_cfg, self.inplanes, width, kernel_size=1, stride=self.conv1_stride, bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
if ((not self.with_dcn) or fallback_on_stride):
self.conv2 = build_conv_layer(self.conv_cfg, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
else:
assert (self.conv_cfg is None), 'conv_cfg must be None for DCN'
self.conv2 = build_conv_layer(self.dcn, width, width, kernel_size=3, stride=self.conv2_stride, padding=self.dilation, dilation=self.dilation, groups=groups, bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(self.conv_cfg, width, (self.planes * self.expansion), kernel_size=1, bias=False)
self.add_module(self.norm3_name, norm3)
if self.with_plugins:
self._del_block_plugins(((self.after_conv1_plugin_names + self.after_conv2_plugin_names) + self.after_conv3_plugin_names))
self.after_conv1_plugin_names = self.make_block_plugins(width, self.after_conv1_plugins)
self.after_conv2_plugin_names = self.make_block_plugins(width, self.after_conv2_plugins)
self.after_conv3_plugin_names = self.make_block_plugins((self.planes * self.expansion), self.after_conv3_plugins)
def _del_block_plugins(self, plugin_names):
'delete plugins for block if exist.\n\n Args:\n plugin_names (list[str]): List of plugins name to delete.\n '
assert isinstance(plugin_names, list)
for plugin_name in plugin_names:
del self._modules[plugin_name]
|
@BACKBONES.register_module()
class ResNeXt(ResNet):
'ResNeXt backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n groups (int): Group of resnext.\n base_width (int): Base width of resnext.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n norm_cfg (dict): dictionary to construct and config norm layer.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n zero_init_residual (bool): whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, groups=1, base_width=4, **kwargs):
self.groups = groups
self.base_width = base_width
super(ResNeXt, self).__init__(**kwargs)
def make_res_layer(self, **kwargs):
'Pack all blocks in a stage into a ``ResLayer``'
return ResLayer(groups=self.groups, base_width=self.base_width, base_channels=self.base_channels, **kwargs)
|
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
'VGG Backbone network for single-shot-detection.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_last_pool (bool): Whether to add a pooling layer at the last\n of the model\n ceil_mode (bool): When True, will use `ceil` instead of `floor`\n to compute the output shape.\n out_indices (Sequence[int]): Output from which stages.\n out_feature_indices (Sequence[int]): Output from which feature map.\n pretrained (str, optional): model pretrained path. Default: None\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n input_size (int, optional): Deprecated argumment.\n Width and height of input, from {300, 512}.\n l2_norm_scale (float, optional) : Deprecated argumment.\n L2 normalization layer init scale.\n\n Example:\n >>> self = SSDVGG(input_size=300, depth=11)\n >>> self.eval()\n >>> inputs = torch.rand(1, 3, 300, 300)\n >>> level_outputs = self.forward(inputs)\n >>> for level_out in level_outputs:\n ... print(tuple(level_out.shape))\n (1, 1024, 19, 19)\n (1, 512, 10, 10)\n (1, 256, 5, 5)\n (1, 256, 3, 3)\n (1, 256, 1, 1)\n '
extra_setting = {300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128)}
def __init__(self, depth, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), pretrained=None, init_cfg=None, input_size=None, l2_norm_scale=None):
super(SSDVGG, self).__init__(depth, with_last_pool=with_last_pool, ceil_mode=ceil_mode, out_indices=out_indices)
self.features.add_module(str(len(self.features)), nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(str(len(self.features)), nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
assert (not (init_cfg and pretrained)), 'init_cfg and pretrained cannot be specified at the same time'
if (init_cfg is not None):
self.init_cfg = init_cfg
elif isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif (pretrained is None):
self.init_cfg = [dict(type='Kaiming', layer='Conv2d'), dict(type='Constant', val=1, layer='BatchNorm2d'), dict(type='Normal', std=0.01, layer='Linear')]
else:
raise TypeError('pretrained must be a str or None')
if (input_size is not None):
warnings.warn('DeprecationWarning: input_size is deprecated')
if (l2_norm_scale is not None):
warnings.warn('DeprecationWarning: l2_norm_scale in VGG is deprecated, it has been moved to SSDNeck.')
def init_weights(self, pretrained=None):
super(VGG, self).init_weights()
def forward(self, x):
'Forward function.'
outs = []
for (i, layer) in enumerate(self.features):
x = layer(x)
if (i in self.out_feature_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
else:
return tuple(outs)
|
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py is deprecated, please use L2Norm in mmdet/models/necks/ssd_neck.py instead')
|
class TridentConv(BaseModule):
'Trident Convolution Module.\n\n Args:\n in_channels (int): Number of channels in input.\n out_channels (int): Number of channels in output.\n kernel_size (int): Size of convolution kernel.\n stride (int, optional): Convolution stride. Default: 1.\n trident_dilations (tuple[int, int, int], optional): Dilations of\n different trident branch. Default: (1, 2, 3).\n test_branch_idx (int, optional): In inference, all 3 branches will\n be used if `test_branch_idx==-1`, otherwise only branch with\n index `test_branch_idx` will be used. Default: 1.\n bias (bool, optional): Whether to use bias in convolution or not.\n Default: False.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None\n '
def __init__(self, in_channels, out_channels, kernel_size, stride=1, trident_dilations=(1, 2, 3), test_branch_idx=1, bias=False, init_cfg=None):
super(TridentConv, self).__init__(init_cfg)
self.num_branch = len(trident_dilations)
self.with_bias = bias
self.test_branch_idx = test_branch_idx
self.stride = _pair(stride)
self.kernel_size = _pair(kernel_size)
self.paddings = _pair(trident_dilations)
self.dilations = trident_dilations
self.in_channels = in_channels
self.out_channels = out_channels
self.bias = bias
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
def extra_repr(self):
tmpstr = f'in_channels={self.in_channels}'
tmpstr += f', out_channels={self.out_channels}'
tmpstr += f', kernel_size={self.kernel_size}'
tmpstr += f', num_branch={self.num_branch}'
tmpstr += f', test_branch_idx={self.test_branch_idx}'
tmpstr += f', stride={self.stride}'
tmpstr += f', paddings={self.paddings}'
tmpstr += f', dilations={self.dilations}'
tmpstr += f', bias={self.bias}'
return tmpstr
def forward(self, inputs):
if (self.training or (self.test_branch_idx == (- 1))):
outputs = [F.conv2d(input, self.weight, self.bias, self.stride, padding, dilation) for (input, dilation, padding) in zip(inputs, self.dilations, self.paddings)]
else:
assert (len(inputs) == 1)
outputs = [F.conv2d(inputs[0], self.weight, self.bias, self.stride, self.paddings[self.test_branch_idx], self.dilations[self.test_branch_idx])]
return outputs
|
class TridentBottleneck(Bottleneck):
'BottleBlock for TridentResNet.\n\n Args:\n trident_dilations (tuple[int, int, int]): Dilations of different\n trident branch.\n test_branch_idx (int): In inference, all 3 branches will be used\n if `test_branch_idx==-1`, otherwise only branch with index\n `test_branch_idx` will be used.\n concat_output (bool): Whether to concat the output list to a Tensor.\n `True` only in the last Block.\n '
def __init__(self, trident_dilations, test_branch_idx, concat_output, **kwargs):
super(TridentBottleneck, self).__init__(**kwargs)
self.trident_dilations = trident_dilations
self.num_branch = len(trident_dilations)
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
self.conv2 = TridentConv(self.planes, self.planes, kernel_size=3, stride=self.conv2_stride, bias=False, trident_dilations=self.trident_dilations, test_branch_idx=test_branch_idx, init_cfg=dict(type='Kaiming', distribution='uniform', mode='fan_in', override=dict(name='conv2')))
def forward(self, x):
def _inner_forward(x):
num_branch = (self.num_branch if (self.training or (self.test_branch_idx == (- 1))) else 1)
identity = x
if (not isinstance(x, list)):
x = ((x,) * num_branch)
identity = x
if (self.downsample is not None):
identity = [self.downsample(b) for b in x]
out = [self.conv1(b) for b in x]
out = [self.norm1(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k], self.after_conv1_plugin_names)
out = self.conv2(out)
out = [self.norm2(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k], self.after_conv2_plugin_names)
out = [self.conv3(b) for b in out]
out = [self.norm3(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k], self.after_conv3_plugin_names)
out = [(out_b + identity_b) for (out_b, identity_b) in zip(out, identity)]
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = [self.relu(b) for b in out]
if self.concat_output:
out = torch.cat(out, dim=0)
return out
|
def make_trident_res_layer(block, inplanes, planes, num_blocks, stride=1, trident_dilations=(1, 2, 3), style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, test_branch_idx=(- 1)):
'Build Trident Res Layers.'
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = []
conv_stride = stride
downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]])
downsample = nn.Sequential(*downsample)
layers = []
for i in range(num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=(stride if (i == 0) else 1), trident_dilations=trident_dilations, downsample=(downsample if (i == 0) else None), style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=plugins, test_branch_idx=test_branch_idx, concat_output=(True if (i == (num_blocks - 1)) else False)))
inplanes = (planes * block.expansion)
return nn.Sequential(*layers)
|
@BACKBONES.register_module()
class TridentResNet(ResNet):
'The stem layer, stage 1 and stage 2 in Trident ResNet are identical to\n ResNet, while in stage 3, Trident BottleBlock is utilized to replace the\n normal BottleBlock to yield trident output. Different branch shares the\n convolution weight but uses different dilations to achieve multi-scale\n output.\n\n / stage3(b0) x - stem - stage1 - stage2 - stage3(b1) - output\n \\ stage3(b2) /\n\n Args:\n depth (int): Depth of resnet, from {50, 101, 152}.\n num_branch (int): Number of branches in TridentNet.\n test_branch_idx (int): In inference, all 3 branches will be used\n if `test_branch_idx==-1`, otherwise only branch with index\n `test_branch_idx` will be used.\n trident_dilations (tuple[int]): Dilations of different trident branch.\n len(trident_dilations) should be equal to num_branch.\n '
def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, **kwargs):
assert (num_branch == len(trident_dilations))
assert (depth in (50, 101, 152))
super(TridentResNet, self).__init__(depth, **kwargs)
assert (self.num_stages == 3)
self.test_branch_idx = test_branch_idx
self.num_branch = num_branch
last_stage_idx = (self.num_stages - 1)
stride = self.strides[last_stage_idx]
dilation = trident_dilations
dcn = (self.dcn if self.stage_with_dcn[last_stage_idx] else None)
if (self.plugins is not None):
stage_plugins = self.make_stage_plugins(self.plugins, last_stage_idx)
else:
stage_plugins = None
planes = (self.base_channels * (2 ** last_stage_idx))
res_layer = make_trident_res_layer(TridentBottleneck, inplanes=((self.block.expansion * self.base_channels) * (2 ** (last_stage_idx - 1))), planes=planes, num_blocks=self.stage_blocks[last_stage_idx], stride=stride, trident_dilations=dilation, style=self.style, with_cp=self.with_cp, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, dcn=dcn, plugins=stage_plugins, test_branch_idx=self.test_branch_idx)
layer_name = f'layer{(last_stage_idx + 1)}'
self.__setattr__(layer_name, res_layer)
self.res_layers.pop(last_stage_idx)
self.res_layers.insert(last_stage_idx, layer_name)
self._freeze_stages()
|
def build_backbone(cfg):
'Build backbone.'
return BACKBONES.build(cfg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.