Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- CCEdit-main/src/taming-transformers/data/coco_annotations_100/val2017/000000013291.jpg +3 -0
- CCEdit-main/src/taming-transformers/data/coco_annotations_100/val2017/000000013923.jpg +3 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/datasets/chase_db1.py +59 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/datasets/hrf.py +59 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py +60 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/cgnet.py +35 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py +46 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py +48 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/fcn_r50-d8.py +45 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py +68 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/pointrend_r50.py +56 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py +49 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_160k.py +9 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_20k.py +9 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_40k.py +9 -0
- FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_80k.py +9 -0
- FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/config.py +38 -0
- FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/run.sh +10 -0
- FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/test.sh +10 -0
- FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_g.py +38 -0
- FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_h32.py +39 -0
- FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_w32.py +39 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/__init__.py +15 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/arraymisc/__init__.py +4 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/arraymisc/quantization.py +55 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/engine/__init__.py +8 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/engine/test.py +202 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/fileio/file_client.py +1148 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/fileio/io.py +151 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/fileio/parse.py +97 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/__init__.py +28 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/colorspace.py +306 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/geometric.py +728 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/io.py +258 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/misc.py +44 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/photometric.py +428 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/model_zoo/deprecated.json +6 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/model_zoo/mmcls.json +31 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/model_zoo/open_mmlab.json +50 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py +204 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/deprecated_wrappers.py +43 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/furthest_point_sample.py +83 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/fused_bias_leakyrelu.py +268 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/nms.py +417 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/points_in_boxes.py +133 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/psa_mask.py +92 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/roi_align.py +223 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/roipoint_pool3d.py +77 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/tin_shift.py +68 -0
- FRESCO/src/ControlNet/annotator/uniformer/mmcv/parallel/__init__.py +13 -0
CCEdit-main/src/taming-transformers/data/coco_annotations_100/val2017/000000013291.jpg
ADDED
|
Git LFS Details
|
CCEdit-main/src/taming-transformers/data/coco_annotations_100/val2017/000000013923.jpg
ADDED
|
Git LFS Details
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/datasets/chase_db1.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'ChaseDB1Dataset'
|
| 3 |
+
data_root = 'data/CHASE_DB1'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (960, 999)
|
| 7 |
+
crop_size = (128, 128)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/datasets/hrf.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'HRFDataset'
|
| 3 |
+
data_root = 'data/HRF'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
img_scale = (2336, 3504)
|
| 7 |
+
crop_size = (256, 256)
|
| 8 |
+
train_pipeline = [
|
| 9 |
+
dict(type='LoadImageFromFile'),
|
| 10 |
+
dict(type='LoadAnnotations'),
|
| 11 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 12 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 13 |
+
dict(type='RandomFlip', prob=0.5),
|
| 14 |
+
dict(type='PhotoMetricDistortion'),
|
| 15 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 16 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 17 |
+
dict(type='DefaultFormatBundle'),
|
| 18 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
| 19 |
+
]
|
| 20 |
+
test_pipeline = [
|
| 21 |
+
dict(type='LoadImageFromFile'),
|
| 22 |
+
dict(
|
| 23 |
+
type='MultiScaleFlipAug',
|
| 24 |
+
img_scale=img_scale,
|
| 25 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
| 26 |
+
flip=False,
|
| 27 |
+
transforms=[
|
| 28 |
+
dict(type='Resize', keep_ratio=True),
|
| 29 |
+
dict(type='RandomFlip'),
|
| 30 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 31 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 32 |
+
dict(type='Collect', keys=['img'])
|
| 33 |
+
])
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
data = dict(
|
| 37 |
+
samples_per_gpu=4,
|
| 38 |
+
workers_per_gpu=4,
|
| 39 |
+
train=dict(
|
| 40 |
+
type='RepeatDataset',
|
| 41 |
+
times=40000,
|
| 42 |
+
dataset=dict(
|
| 43 |
+
type=dataset_type,
|
| 44 |
+
data_root=data_root,
|
| 45 |
+
img_dir='images/training',
|
| 46 |
+
ann_dir='annotations/training',
|
| 47 |
+
pipeline=train_pipeline)),
|
| 48 |
+
val=dict(
|
| 49 |
+
type=dataset_type,
|
| 50 |
+
data_root=data_root,
|
| 51 |
+
img_dir='images/validation',
|
| 52 |
+
ann_dir='annotations/validation',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='images/validation',
|
| 58 |
+
ann_dir='annotations/validation',
|
| 59 |
+
pipeline=test_pipeline))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# dataset settings
|
| 2 |
+
dataset_type = 'PascalContextDataset59'
|
| 3 |
+
data_root = 'data/VOCdevkit/VOC2010/'
|
| 4 |
+
img_norm_cfg = dict(
|
| 5 |
+
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
| 6 |
+
|
| 7 |
+
img_scale = (520, 520)
|
| 8 |
+
crop_size = (480, 480)
|
| 9 |
+
|
| 10 |
+
train_pipeline = [
|
| 11 |
+
dict(type='LoadImageFromFile'),
|
| 12 |
+
dict(type='LoadAnnotations', reduce_zero_label=True),
|
| 13 |
+
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
| 14 |
+
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
| 15 |
+
dict(type='RandomFlip', prob=0.5),
|
| 16 |
+
dict(type='PhotoMetricDistortion'),
|
| 17 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 18 |
+
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
| 19 |
+
dict(type='DefaultFormatBundle'),
|
| 20 |
+
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
| 21 |
+
]
|
| 22 |
+
test_pipeline = [
|
| 23 |
+
dict(type='LoadImageFromFile'),
|
| 24 |
+
dict(
|
| 25 |
+
type='MultiScaleFlipAug',
|
| 26 |
+
img_scale=img_scale,
|
| 27 |
+
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
| 28 |
+
flip=False,
|
| 29 |
+
transforms=[
|
| 30 |
+
dict(type='Resize', keep_ratio=True),
|
| 31 |
+
dict(type='RandomFlip'),
|
| 32 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 33 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 34 |
+
dict(type='Collect', keys=['img']),
|
| 35 |
+
])
|
| 36 |
+
]
|
| 37 |
+
data = dict(
|
| 38 |
+
samples_per_gpu=4,
|
| 39 |
+
workers_per_gpu=4,
|
| 40 |
+
train=dict(
|
| 41 |
+
type=dataset_type,
|
| 42 |
+
data_root=data_root,
|
| 43 |
+
img_dir='JPEGImages',
|
| 44 |
+
ann_dir='SegmentationClassContext',
|
| 45 |
+
split='ImageSets/SegmentationContext/train.txt',
|
| 46 |
+
pipeline=train_pipeline),
|
| 47 |
+
val=dict(
|
| 48 |
+
type=dataset_type,
|
| 49 |
+
data_root=data_root,
|
| 50 |
+
img_dir='JPEGImages',
|
| 51 |
+
ann_dir='SegmentationClassContext',
|
| 52 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 53 |
+
pipeline=test_pipeline),
|
| 54 |
+
test=dict(
|
| 55 |
+
type=dataset_type,
|
| 56 |
+
data_root=data_root,
|
| 57 |
+
img_dir='JPEGImages',
|
| 58 |
+
ann_dir='SegmentationClassContext',
|
| 59 |
+
split='ImageSets/SegmentationContext/val.txt',
|
| 60 |
+
pipeline=test_pipeline))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/cgnet.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
backbone=dict(
|
| 6 |
+
type='CGNet',
|
| 7 |
+
norm_cfg=norm_cfg,
|
| 8 |
+
in_channels=3,
|
| 9 |
+
num_channels=(32, 64, 128),
|
| 10 |
+
num_blocks=(3, 21),
|
| 11 |
+
dilations=(2, 4),
|
| 12 |
+
reductions=(8, 16)),
|
| 13 |
+
decode_head=dict(
|
| 14 |
+
type='FCNHead',
|
| 15 |
+
in_channels=256,
|
| 16 |
+
in_index=2,
|
| 17 |
+
channels=256,
|
| 18 |
+
num_convs=0,
|
| 19 |
+
concat_input=False,
|
| 20 |
+
dropout_ratio=0,
|
| 21 |
+
num_classes=19,
|
| 22 |
+
norm_cfg=norm_cfg,
|
| 23 |
+
loss_decode=dict(
|
| 24 |
+
type='CrossEntropyLoss',
|
| 25 |
+
use_sigmoid=False,
|
| 26 |
+
loss_weight=1.0,
|
| 27 |
+
class_weight=[
|
| 28 |
+
2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352,
|
| 29 |
+
10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905,
|
| 30 |
+
10.347791, 6.3927646, 10.226669, 10.241062, 10.280587,
|
| 31 |
+
10.396974, 10.055647
|
| 32 |
+
])),
|
| 33 |
+
# model training and testing settings
|
| 34 |
+
train_cfg=dict(sampler=None),
|
| 35 |
+
test_cfg=dict(mode='whole'))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='DepthwiseSeparableASPPHead',
|
| 19 |
+
in_channels=2048,
|
| 20 |
+
in_index=3,
|
| 21 |
+
channels=512,
|
| 22 |
+
dilations=(1, 12, 24, 36),
|
| 23 |
+
c1_in_channels=256,
|
| 24 |
+
c1_channels=48,
|
| 25 |
+
dropout_ratio=0.1,
|
| 26 |
+
num_classes=19,
|
| 27 |
+
norm_cfg=norm_cfg,
|
| 28 |
+
align_corners=False,
|
| 29 |
+
loss_decode=dict(
|
| 30 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 31 |
+
auxiliary_head=dict(
|
| 32 |
+
type='FCNHead',
|
| 33 |
+
in_channels=1024,
|
| 34 |
+
in_index=2,
|
| 35 |
+
channels=256,
|
| 36 |
+
num_convs=1,
|
| 37 |
+
concat_input=False,
|
| 38 |
+
dropout_ratio=0.1,
|
| 39 |
+
num_classes=19,
|
| 40 |
+
norm_cfg=norm_cfg,
|
| 41 |
+
align_corners=False,
|
| 42 |
+
loss_decode=dict(
|
| 43 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 44 |
+
# model training and testing settings
|
| 45 |
+
train_cfg=dict(),
|
| 46 |
+
test_cfg=dict(mode='whole'))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='EncHead',
|
| 19 |
+
in_channels=[512, 1024, 2048],
|
| 20 |
+
in_index=(1, 2, 3),
|
| 21 |
+
channels=512,
|
| 22 |
+
num_codes=32,
|
| 23 |
+
use_se_loss=True,
|
| 24 |
+
add_lateral=False,
|
| 25 |
+
dropout_ratio=0.1,
|
| 26 |
+
num_classes=19,
|
| 27 |
+
norm_cfg=norm_cfg,
|
| 28 |
+
align_corners=False,
|
| 29 |
+
loss_decode=dict(
|
| 30 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
| 31 |
+
loss_se_decode=dict(
|
| 32 |
+
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
|
| 33 |
+
auxiliary_head=dict(
|
| 34 |
+
type='FCNHead',
|
| 35 |
+
in_channels=1024,
|
| 36 |
+
in_index=2,
|
| 37 |
+
channels=256,
|
| 38 |
+
num_convs=1,
|
| 39 |
+
concat_input=False,
|
| 40 |
+
dropout_ratio=0.1,
|
| 41 |
+
num_classes=19,
|
| 42 |
+
norm_cfg=norm_cfg,
|
| 43 |
+
align_corners=False,
|
| 44 |
+
loss_decode=dict(
|
| 45 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 46 |
+
# model training and testing settings
|
| 47 |
+
train_cfg=dict(),
|
| 48 |
+
test_cfg=dict(mode='whole'))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/fcn_r50-d8.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='FCNHead',
|
| 19 |
+
in_channels=2048,
|
| 20 |
+
in_index=3,
|
| 21 |
+
channels=512,
|
| 22 |
+
num_convs=2,
|
| 23 |
+
concat_input=True,
|
| 24 |
+
dropout_ratio=0.1,
|
| 25 |
+
num_classes=19,
|
| 26 |
+
norm_cfg=norm_cfg,
|
| 27 |
+
align_corners=False,
|
| 28 |
+
loss_decode=dict(
|
| 29 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 30 |
+
auxiliary_head=dict(
|
| 31 |
+
type='FCNHead',
|
| 32 |
+
in_channels=1024,
|
| 33 |
+
in_index=2,
|
| 34 |
+
channels=256,
|
| 35 |
+
num_convs=1,
|
| 36 |
+
concat_input=False,
|
| 37 |
+
dropout_ratio=0.1,
|
| 38 |
+
num_classes=19,
|
| 39 |
+
norm_cfg=norm_cfg,
|
| 40 |
+
align_corners=False,
|
| 41 |
+
loss_decode=dict(
|
| 42 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 43 |
+
# model training and testing settings
|
| 44 |
+
train_cfg=dict(),
|
| 45 |
+
test_cfg=dict(mode='whole'))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='CascadeEncoderDecoder',
|
| 5 |
+
num_stages=2,
|
| 6 |
+
pretrained='open-mmlab://msra/hrnetv2_w18',
|
| 7 |
+
backbone=dict(
|
| 8 |
+
type='HRNet',
|
| 9 |
+
norm_cfg=norm_cfg,
|
| 10 |
+
norm_eval=False,
|
| 11 |
+
extra=dict(
|
| 12 |
+
stage1=dict(
|
| 13 |
+
num_modules=1,
|
| 14 |
+
num_branches=1,
|
| 15 |
+
block='BOTTLENECK',
|
| 16 |
+
num_blocks=(4, ),
|
| 17 |
+
num_channels=(64, )),
|
| 18 |
+
stage2=dict(
|
| 19 |
+
num_modules=1,
|
| 20 |
+
num_branches=2,
|
| 21 |
+
block='BASIC',
|
| 22 |
+
num_blocks=(4, 4),
|
| 23 |
+
num_channels=(18, 36)),
|
| 24 |
+
stage3=dict(
|
| 25 |
+
num_modules=4,
|
| 26 |
+
num_branches=3,
|
| 27 |
+
block='BASIC',
|
| 28 |
+
num_blocks=(4, 4, 4),
|
| 29 |
+
num_channels=(18, 36, 72)),
|
| 30 |
+
stage4=dict(
|
| 31 |
+
num_modules=3,
|
| 32 |
+
num_branches=4,
|
| 33 |
+
block='BASIC',
|
| 34 |
+
num_blocks=(4, 4, 4, 4),
|
| 35 |
+
num_channels=(18, 36, 72, 144)))),
|
| 36 |
+
decode_head=[
|
| 37 |
+
dict(
|
| 38 |
+
type='FCNHead',
|
| 39 |
+
in_channels=[18, 36, 72, 144],
|
| 40 |
+
channels=sum([18, 36, 72, 144]),
|
| 41 |
+
in_index=(0, 1, 2, 3),
|
| 42 |
+
input_transform='resize_concat',
|
| 43 |
+
kernel_size=1,
|
| 44 |
+
num_convs=1,
|
| 45 |
+
concat_input=False,
|
| 46 |
+
dropout_ratio=-1,
|
| 47 |
+
num_classes=19,
|
| 48 |
+
norm_cfg=norm_cfg,
|
| 49 |
+
align_corners=False,
|
| 50 |
+
loss_decode=dict(
|
| 51 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 52 |
+
dict(
|
| 53 |
+
type='OCRHead',
|
| 54 |
+
in_channels=[18, 36, 72, 144],
|
| 55 |
+
in_index=(0, 1, 2, 3),
|
| 56 |
+
input_transform='resize_concat',
|
| 57 |
+
channels=512,
|
| 58 |
+
ocr_channels=256,
|
| 59 |
+
dropout_ratio=-1,
|
| 60 |
+
num_classes=19,
|
| 61 |
+
norm_cfg=norm_cfg,
|
| 62 |
+
align_corners=False,
|
| 63 |
+
loss_decode=dict(
|
| 64 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 65 |
+
],
|
| 66 |
+
# model training and testing settings
|
| 67 |
+
train_cfg=dict(),
|
| 68 |
+
test_cfg=dict(mode='whole'))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/pointrend_r50.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='CascadeEncoderDecoder',
|
| 5 |
+
num_stages=2,
|
| 6 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 7 |
+
backbone=dict(
|
| 8 |
+
type='ResNetV1c',
|
| 9 |
+
depth=50,
|
| 10 |
+
num_stages=4,
|
| 11 |
+
out_indices=(0, 1, 2, 3),
|
| 12 |
+
dilations=(1, 1, 1, 1),
|
| 13 |
+
strides=(1, 2, 2, 2),
|
| 14 |
+
norm_cfg=norm_cfg,
|
| 15 |
+
norm_eval=False,
|
| 16 |
+
style='pytorch',
|
| 17 |
+
contract_dilation=True),
|
| 18 |
+
neck=dict(
|
| 19 |
+
type='FPN',
|
| 20 |
+
in_channels=[256, 512, 1024, 2048],
|
| 21 |
+
out_channels=256,
|
| 22 |
+
num_outs=4),
|
| 23 |
+
decode_head=[
|
| 24 |
+
dict(
|
| 25 |
+
type='FPNHead',
|
| 26 |
+
in_channels=[256, 256, 256, 256],
|
| 27 |
+
in_index=[0, 1, 2, 3],
|
| 28 |
+
feature_strides=[4, 8, 16, 32],
|
| 29 |
+
channels=128,
|
| 30 |
+
dropout_ratio=-1,
|
| 31 |
+
num_classes=19,
|
| 32 |
+
norm_cfg=norm_cfg,
|
| 33 |
+
align_corners=False,
|
| 34 |
+
loss_decode=dict(
|
| 35 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 36 |
+
dict(
|
| 37 |
+
type='PointHead',
|
| 38 |
+
in_channels=[256],
|
| 39 |
+
in_index=[0],
|
| 40 |
+
channels=256,
|
| 41 |
+
num_fcs=3,
|
| 42 |
+
coarse_pred_each_layer=True,
|
| 43 |
+
dropout_ratio=-1,
|
| 44 |
+
num_classes=19,
|
| 45 |
+
align_corners=False,
|
| 46 |
+
loss_decode=dict(
|
| 47 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
|
| 48 |
+
],
|
| 49 |
+
# model training and testing settings
|
| 50 |
+
train_cfg=dict(
|
| 51 |
+
num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75),
|
| 52 |
+
test_cfg=dict(
|
| 53 |
+
mode='whole',
|
| 54 |
+
subdivision_steps=2,
|
| 55 |
+
subdivision_num_points=8196,
|
| 56 |
+
scale_factor=2))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# model settings
|
| 2 |
+
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
| 3 |
+
model = dict(
|
| 4 |
+
type='EncoderDecoder',
|
| 5 |
+
pretrained='open-mmlab://resnet50_v1c',
|
| 6 |
+
backbone=dict(
|
| 7 |
+
type='ResNetV1c',
|
| 8 |
+
depth=50,
|
| 9 |
+
num_stages=4,
|
| 10 |
+
out_indices=(0, 1, 2, 3),
|
| 11 |
+
dilations=(1, 1, 2, 4),
|
| 12 |
+
strides=(1, 2, 1, 1),
|
| 13 |
+
norm_cfg=norm_cfg,
|
| 14 |
+
norm_eval=False,
|
| 15 |
+
style='pytorch',
|
| 16 |
+
contract_dilation=True),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
type='PSAHead',
|
| 19 |
+
in_channels=2048,
|
| 20 |
+
in_index=3,
|
| 21 |
+
channels=512,
|
| 22 |
+
mask_size=(97, 97),
|
| 23 |
+
psa_type='bi-direction',
|
| 24 |
+
compact=False,
|
| 25 |
+
shrink_factor=2,
|
| 26 |
+
normalization_factor=1.0,
|
| 27 |
+
psa_softmax=True,
|
| 28 |
+
dropout_ratio=0.1,
|
| 29 |
+
num_classes=19,
|
| 30 |
+
norm_cfg=norm_cfg,
|
| 31 |
+
align_corners=False,
|
| 32 |
+
loss_decode=dict(
|
| 33 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
| 34 |
+
auxiliary_head=dict(
|
| 35 |
+
type='FCNHead',
|
| 36 |
+
in_channels=1024,
|
| 37 |
+
in_index=2,
|
| 38 |
+
channels=256,
|
| 39 |
+
num_convs=1,
|
| 40 |
+
concat_input=False,
|
| 41 |
+
dropout_ratio=0.1,
|
| 42 |
+
num_classes=19,
|
| 43 |
+
norm_cfg=norm_cfg,
|
| 44 |
+
align_corners=False,
|
| 45 |
+
loss_decode=dict(
|
| 46 |
+
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
| 47 |
+
# model training and testing settings
|
| 48 |
+
train_cfg=dict(),
|
| 49 |
+
test_cfg=dict(mode='whole'))
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_160k.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# optimizer
|
| 2 |
+
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
| 3 |
+
optimizer_config = dict()
|
| 4 |
+
# learning policy
|
| 5 |
+
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
|
| 6 |
+
# runtime settings
|
| 7 |
+
runner = dict(type='IterBasedRunner', max_iters=160000)
|
| 8 |
+
checkpoint_config = dict(by_epoch=False, interval=16000)
|
| 9 |
+
evaluation = dict(interval=16000, metric='mIoU')
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_20k.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# optimizer
|
| 2 |
+
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
| 3 |
+
optimizer_config = dict()
|
| 4 |
+
# learning policy
|
| 5 |
+
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
|
| 6 |
+
# runtime settings
|
| 7 |
+
runner = dict(type='IterBasedRunner', max_iters=20000)
|
| 8 |
+
checkpoint_config = dict(by_epoch=False, interval=2000)
|
| 9 |
+
evaluation = dict(interval=2000, metric='mIoU')
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_40k.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# optimizer
|
| 2 |
+
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
| 3 |
+
optimizer_config = dict()
|
| 4 |
+
# learning policy
|
| 5 |
+
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
|
| 6 |
+
# runtime settings
|
| 7 |
+
runner = dict(type='IterBasedRunner', max_iters=40000)
|
| 8 |
+
checkpoint_config = dict(by_epoch=False, interval=4000)
|
| 9 |
+
evaluation = dict(interval=4000, metric='mIoU')
|
FRESCO/src/ControlNet/annotator/uniformer/configs/_base_/schedules/schedule_80k.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# optimizer
|
| 2 |
+
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
|
| 3 |
+
optimizer_config = dict()
|
| 4 |
+
# learning policy
|
| 5 |
+
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
|
| 6 |
+
# runtime settings
|
| 7 |
+
runner = dict(type='IterBasedRunner', max_iters=80000)
|
| 8 |
+
checkpoint_config = dict(by_epoch=False, interval=8000)
|
| 9 |
+
evaluation = dict(interval=8000, metric='mIoU')
|
FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/config.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = [
|
| 2 |
+
'../../configs/_base_/models/upernet_uniformer.py',
|
| 3 |
+
'../../configs/_base_/datasets/ade20k.py',
|
| 4 |
+
'../../configs/_base_/default_runtime.py',
|
| 5 |
+
'../../configs/_base_/schedules/schedule_160k.py'
|
| 6 |
+
]
|
| 7 |
+
model = dict(
|
| 8 |
+
backbone=dict(
|
| 9 |
+
type='UniFormer',
|
| 10 |
+
embed_dim=[64, 128, 320, 512],
|
| 11 |
+
layers=[3, 4, 8, 3],
|
| 12 |
+
head_dim=64,
|
| 13 |
+
drop_path_rate=0.25,
|
| 14 |
+
windows=False,
|
| 15 |
+
hybrid=False
|
| 16 |
+
),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
in_channels=[64, 128, 320, 512],
|
| 19 |
+
num_classes=150
|
| 20 |
+
),
|
| 21 |
+
auxiliary_head=dict(
|
| 22 |
+
in_channels=320,
|
| 23 |
+
num_classes=150
|
| 24 |
+
))
|
| 25 |
+
|
| 26 |
+
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
|
| 27 |
+
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
|
| 28 |
+
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
|
| 29 |
+
'relative_position_bias_table': dict(decay_mult=0.),
|
| 30 |
+
'norm': dict(decay_mult=0.)}))
|
| 31 |
+
|
| 32 |
+
lr_config = dict(_delete_=True, policy='poly',
|
| 33 |
+
warmup='linear',
|
| 34 |
+
warmup_iters=1500,
|
| 35 |
+
warmup_ratio=1e-6,
|
| 36 |
+
power=1.0, min_lr=0.0, by_epoch=False)
|
| 37 |
+
|
| 38 |
+
data=dict(samples_per_gpu=2)
|
FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/run.sh
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
|
| 3 |
+
work_path=$(dirname $0)
|
| 4 |
+
PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
|
| 5 |
+
python -m torch.distributed.launch --nproc_per_node=8 \
|
| 6 |
+
tools/train.py ${work_path}/config.py \
|
| 7 |
+
--launcher pytorch \
|
| 8 |
+
--options model.backbone.pretrained_path='your_model_path/uniformer_small_in1k.pth' \
|
| 9 |
+
--work-dir ${work_path}/ckpt \
|
| 10 |
+
2>&1 | tee -a ${work_path}/log.txt
|
FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/test.sh
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
|
| 3 |
+
work_path=$(dirname $0)
|
| 4 |
+
PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
|
| 5 |
+
python -m torch.distributed.launch --nproc_per_node=8 \
|
| 6 |
+
tools/test.py ${work_path}/test_config_h32.py \
|
| 7 |
+
${work_path}/ckpt/latest.pth \
|
| 8 |
+
--launcher pytorch \
|
| 9 |
+
--eval mIoU \
|
| 10 |
+
2>&1 | tee -a ${work_path}/log.txt
|
FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_g.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = [
|
| 2 |
+
'../../configs/_base_/models/upernet_uniformer.py',
|
| 3 |
+
'../../configs/_base_/datasets/ade20k.py',
|
| 4 |
+
'../../configs/_base_/default_runtime.py',
|
| 5 |
+
'../../configs/_base_/schedules/schedule_160k.py'
|
| 6 |
+
]
|
| 7 |
+
model = dict(
|
| 8 |
+
backbone=dict(
|
| 9 |
+
type='UniFormer',
|
| 10 |
+
embed_dim=[64, 128, 320, 512],
|
| 11 |
+
layers=[3, 4, 8, 3],
|
| 12 |
+
head_dim=64,
|
| 13 |
+
drop_path_rate=0.25,
|
| 14 |
+
windows=False,
|
| 15 |
+
hybrid=False,
|
| 16 |
+
),
|
| 17 |
+
decode_head=dict(
|
| 18 |
+
in_channels=[64, 128, 320, 512],
|
| 19 |
+
num_classes=150
|
| 20 |
+
),
|
| 21 |
+
auxiliary_head=dict(
|
| 22 |
+
in_channels=320,
|
| 23 |
+
num_classes=150
|
| 24 |
+
))
|
| 25 |
+
|
| 26 |
+
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
|
| 27 |
+
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
|
| 28 |
+
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
|
| 29 |
+
'relative_position_bias_table': dict(decay_mult=0.),
|
| 30 |
+
'norm': dict(decay_mult=0.)}))
|
| 31 |
+
|
| 32 |
+
lr_config = dict(_delete_=True, policy='poly',
|
| 33 |
+
warmup='linear',
|
| 34 |
+
warmup_iters=1500,
|
| 35 |
+
warmup_ratio=1e-6,
|
| 36 |
+
power=1.0, min_lr=0.0, by_epoch=False)
|
| 37 |
+
|
| 38 |
+
data=dict(samples_per_gpu=2)
|
FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_h32.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = [
|
| 2 |
+
'../../configs/_base_/models/upernet_uniformer.py',
|
| 3 |
+
'../../configs/_base_/datasets/ade20k.py',
|
| 4 |
+
'../../configs/_base_/default_runtime.py',
|
| 5 |
+
'../../configs/_base_/schedules/schedule_160k.py'
|
| 6 |
+
]
|
| 7 |
+
model = dict(
|
| 8 |
+
backbone=dict(
|
| 9 |
+
type='UniFormer',
|
| 10 |
+
embed_dim=[64, 128, 320, 512],
|
| 11 |
+
layers=[3, 4, 8, 3],
|
| 12 |
+
head_dim=64,
|
| 13 |
+
drop_path_rate=0.25,
|
| 14 |
+
windows=False,
|
| 15 |
+
hybrid=True,
|
| 16 |
+
window_size=32
|
| 17 |
+
),
|
| 18 |
+
decode_head=dict(
|
| 19 |
+
in_channels=[64, 128, 320, 512],
|
| 20 |
+
num_classes=150
|
| 21 |
+
),
|
| 22 |
+
auxiliary_head=dict(
|
| 23 |
+
in_channels=320,
|
| 24 |
+
num_classes=150
|
| 25 |
+
))
|
| 26 |
+
|
| 27 |
+
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
|
| 28 |
+
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
|
| 29 |
+
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
|
| 30 |
+
'relative_position_bias_table': dict(decay_mult=0.),
|
| 31 |
+
'norm': dict(decay_mult=0.)}))
|
| 32 |
+
|
| 33 |
+
lr_config = dict(_delete_=True, policy='poly',
|
| 34 |
+
warmup='linear',
|
| 35 |
+
warmup_iters=1500,
|
| 36 |
+
warmup_ratio=1e-6,
|
| 37 |
+
power=1.0, min_lr=0.0, by_epoch=False)
|
| 38 |
+
|
| 39 |
+
data=dict(samples_per_gpu=2)
|
FRESCO/src/ControlNet/annotator/uniformer/exp/upernet_global_small/test_config_w32.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_ = [
|
| 2 |
+
'../../configs/_base_/models/upernet_uniformer.py',
|
| 3 |
+
'../../configs/_base_/datasets/ade20k.py',
|
| 4 |
+
'../../configs/_base_/default_runtime.py',
|
| 5 |
+
'../../configs/_base_/schedules/schedule_160k.py'
|
| 6 |
+
]
|
| 7 |
+
model = dict(
|
| 8 |
+
backbone=dict(
|
| 9 |
+
type='UniFormer',
|
| 10 |
+
embed_dim=[64, 128, 320, 512],
|
| 11 |
+
layers=[3, 4, 8, 3],
|
| 12 |
+
head_dim=64,
|
| 13 |
+
drop_path_rate=0.25,
|
| 14 |
+
windows=True,
|
| 15 |
+
hybrid=False,
|
| 16 |
+
window_size=32
|
| 17 |
+
),
|
| 18 |
+
decode_head=dict(
|
| 19 |
+
in_channels=[64, 128, 320, 512],
|
| 20 |
+
num_classes=150
|
| 21 |
+
),
|
| 22 |
+
auxiliary_head=dict(
|
| 23 |
+
in_channels=320,
|
| 24 |
+
num_classes=150
|
| 25 |
+
))
|
| 26 |
+
|
| 27 |
+
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
|
| 28 |
+
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
|
| 29 |
+
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
|
| 30 |
+
'relative_position_bias_table': dict(decay_mult=0.),
|
| 31 |
+
'norm': dict(decay_mult=0.)}))
|
| 32 |
+
|
| 33 |
+
lr_config = dict(_delete_=True, policy='poly',
|
| 34 |
+
warmup='linear',
|
| 35 |
+
warmup_iters=1500,
|
| 36 |
+
warmup_ratio=1e-6,
|
| 37 |
+
power=1.0, min_lr=0.0, by_epoch=False)
|
| 38 |
+
|
| 39 |
+
data=dict(samples_per_gpu=2)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
# flake8: noqa
|
| 3 |
+
from .arraymisc import *
|
| 4 |
+
from .fileio import *
|
| 5 |
+
from .image import *
|
| 6 |
+
from .utils import *
|
| 7 |
+
from .version import *
|
| 8 |
+
from .video import *
|
| 9 |
+
from .visualization import *
|
| 10 |
+
|
| 11 |
+
# The following modules are not imported to this level, so mmcv may be used
|
| 12 |
+
# without PyTorch.
|
| 13 |
+
# - runner
|
| 14 |
+
# - parallel
|
| 15 |
+
# - op
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/arraymisc/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .quantization import dequantize, quantize
|
| 3 |
+
|
| 4 |
+
__all__ = ['quantize', 'dequantize']
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/arraymisc/quantization.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def quantize(arr, min_val, max_val, levels, dtype=np.int64):
|
| 6 |
+
"""Quantize an array of (-inf, inf) to [0, levels-1].
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
arr (ndarray): Input array.
|
| 10 |
+
min_val (scalar): Minimum value to be clipped.
|
| 11 |
+
max_val (scalar): Maximum value to be clipped.
|
| 12 |
+
levels (int): Quantization levels.
|
| 13 |
+
dtype (np.type): The type of the quantized array.
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
tuple: Quantized array.
|
| 17 |
+
"""
|
| 18 |
+
if not (isinstance(levels, int) and levels > 1):
|
| 19 |
+
raise ValueError(
|
| 20 |
+
f'levels must be a positive integer, but got {levels}')
|
| 21 |
+
if min_val >= max_val:
|
| 22 |
+
raise ValueError(
|
| 23 |
+
f'min_val ({min_val}) must be smaller than max_val ({max_val})')
|
| 24 |
+
|
| 25 |
+
arr = np.clip(arr, min_val, max_val) - min_val
|
| 26 |
+
quantized_arr = np.minimum(
|
| 27 |
+
np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1)
|
| 28 |
+
|
| 29 |
+
return quantized_arr
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
|
| 33 |
+
"""Dequantize an array.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
arr (ndarray): Input array.
|
| 37 |
+
min_val (scalar): Minimum value to be clipped.
|
| 38 |
+
max_val (scalar): Maximum value to be clipped.
|
| 39 |
+
levels (int): Quantization levels.
|
| 40 |
+
dtype (np.type): The type of the dequantized array.
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
tuple: Dequantized array.
|
| 44 |
+
"""
|
| 45 |
+
if not (isinstance(levels, int) and levels > 1):
|
| 46 |
+
raise ValueError(
|
| 47 |
+
f'levels must be a positive integer, but got {levels}')
|
| 48 |
+
if min_val >= max_val:
|
| 49 |
+
raise ValueError(
|
| 50 |
+
f'min_val ({min_val}) must be smaller than max_val ({max_val})')
|
| 51 |
+
|
| 52 |
+
dequantized_arr = (arr + 0.5).astype(dtype) * (max_val -
|
| 53 |
+
min_val) / levels + min_val
|
| 54 |
+
|
| 55 |
+
return dequantized_arr
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/engine/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .test import (collect_results_cpu, collect_results_gpu, multi_gpu_test,
|
| 3 |
+
single_gpu_test)
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'collect_results_cpu', 'collect_results_gpu', 'multi_gpu_test',
|
| 7 |
+
'single_gpu_test'
|
| 8 |
+
]
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/engine/test.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import os.path as osp
|
| 3 |
+
import pickle
|
| 4 |
+
import shutil
|
| 5 |
+
import tempfile
|
| 6 |
+
import time
|
| 7 |
+
|
| 8 |
+
import torch
|
| 9 |
+
import torch.distributed as dist
|
| 10 |
+
|
| 11 |
+
import annotator.uniformer.mmcv as mmcv
|
| 12 |
+
from annotator.uniformer.mmcv.runner import get_dist_info
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def single_gpu_test(model, data_loader):
|
| 16 |
+
"""Test model with a single gpu.
|
| 17 |
+
|
| 18 |
+
This method tests model with a single gpu and displays test progress bar.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
model (nn.Module): Model to be tested.
|
| 22 |
+
data_loader (nn.Dataloader): Pytorch data loader.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
list: The prediction results.
|
| 26 |
+
"""
|
| 27 |
+
model.eval()
|
| 28 |
+
results = []
|
| 29 |
+
dataset = data_loader.dataset
|
| 30 |
+
prog_bar = mmcv.ProgressBar(len(dataset))
|
| 31 |
+
for data in data_loader:
|
| 32 |
+
with torch.no_grad():
|
| 33 |
+
result = model(return_loss=False, **data)
|
| 34 |
+
results.extend(result)
|
| 35 |
+
|
| 36 |
+
# Assume result has the same length of batch_size
|
| 37 |
+
# refer to https://github.com/open-mmlab/mmcv/issues/985
|
| 38 |
+
batch_size = len(result)
|
| 39 |
+
for _ in range(batch_size):
|
| 40 |
+
prog_bar.update()
|
| 41 |
+
return results
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
|
| 45 |
+
"""Test model with multiple gpus.
|
| 46 |
+
|
| 47 |
+
This method tests model with multiple gpus and collects the results
|
| 48 |
+
under two different modes: gpu and cpu modes. By setting
|
| 49 |
+
``gpu_collect=True``, it encodes results to gpu tensors and use gpu
|
| 50 |
+
communication for results collection. On cpu mode it saves the results on
|
| 51 |
+
different gpus to ``tmpdir`` and collects them by the rank 0 worker.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
model (nn.Module): Model to be tested.
|
| 55 |
+
data_loader (nn.Dataloader): Pytorch data loader.
|
| 56 |
+
tmpdir (str): Path of directory to save the temporary results from
|
| 57 |
+
different gpus under cpu mode.
|
| 58 |
+
gpu_collect (bool): Option to use either gpu or cpu to collect results.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
list: The prediction results.
|
| 62 |
+
"""
|
| 63 |
+
model.eval()
|
| 64 |
+
results = []
|
| 65 |
+
dataset = data_loader.dataset
|
| 66 |
+
rank, world_size = get_dist_info()
|
| 67 |
+
if rank == 0:
|
| 68 |
+
prog_bar = mmcv.ProgressBar(len(dataset))
|
| 69 |
+
time.sleep(2) # This line can prevent deadlock problem in some cases.
|
| 70 |
+
for i, data in enumerate(data_loader):
|
| 71 |
+
with torch.no_grad():
|
| 72 |
+
result = model(return_loss=False, **data)
|
| 73 |
+
results.extend(result)
|
| 74 |
+
|
| 75 |
+
if rank == 0:
|
| 76 |
+
batch_size = len(result)
|
| 77 |
+
batch_size_all = batch_size * world_size
|
| 78 |
+
if batch_size_all + prog_bar.completed > len(dataset):
|
| 79 |
+
batch_size_all = len(dataset) - prog_bar.completed
|
| 80 |
+
for _ in range(batch_size_all):
|
| 81 |
+
prog_bar.update()
|
| 82 |
+
|
| 83 |
+
# collect results from all ranks
|
| 84 |
+
if gpu_collect:
|
| 85 |
+
results = collect_results_gpu(results, len(dataset))
|
| 86 |
+
else:
|
| 87 |
+
results = collect_results_cpu(results, len(dataset), tmpdir)
|
| 88 |
+
return results
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def collect_results_cpu(result_part, size, tmpdir=None):
|
| 92 |
+
"""Collect results under cpu mode.
|
| 93 |
+
|
| 94 |
+
On cpu mode, this function will save the results on different gpus to
|
| 95 |
+
``tmpdir`` and collect them by the rank 0 worker.
|
| 96 |
+
|
| 97 |
+
Args:
|
| 98 |
+
result_part (list): Result list containing result parts
|
| 99 |
+
to be collected.
|
| 100 |
+
size (int): Size of the results, commonly equal to length of
|
| 101 |
+
the results.
|
| 102 |
+
tmpdir (str | None): temporal directory for collected results to
|
| 103 |
+
store. If set to None, it will create a random temporal directory
|
| 104 |
+
for it.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
list: The collected results.
|
| 108 |
+
"""
|
| 109 |
+
rank, world_size = get_dist_info()
|
| 110 |
+
# create a tmp dir if it is not specified
|
| 111 |
+
if tmpdir is None:
|
| 112 |
+
MAX_LEN = 512
|
| 113 |
+
# 32 is whitespace
|
| 114 |
+
dir_tensor = torch.full((MAX_LEN, ),
|
| 115 |
+
32,
|
| 116 |
+
dtype=torch.uint8,
|
| 117 |
+
device='cuda')
|
| 118 |
+
if rank == 0:
|
| 119 |
+
mmcv.mkdir_or_exist('.dist_test')
|
| 120 |
+
tmpdir = tempfile.mkdtemp(dir='.dist_test')
|
| 121 |
+
tmpdir = torch.tensor(
|
| 122 |
+
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
|
| 123 |
+
dir_tensor[:len(tmpdir)] = tmpdir
|
| 124 |
+
dist.broadcast(dir_tensor, 0)
|
| 125 |
+
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
|
| 126 |
+
else:
|
| 127 |
+
mmcv.mkdir_or_exist(tmpdir)
|
| 128 |
+
# dump the part result to the dir
|
| 129 |
+
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
|
| 130 |
+
dist.barrier()
|
| 131 |
+
# collect all parts
|
| 132 |
+
if rank != 0:
|
| 133 |
+
return None
|
| 134 |
+
else:
|
| 135 |
+
# load results of all parts from tmp dir
|
| 136 |
+
part_list = []
|
| 137 |
+
for i in range(world_size):
|
| 138 |
+
part_file = osp.join(tmpdir, f'part_{i}.pkl')
|
| 139 |
+
part_result = mmcv.load(part_file)
|
| 140 |
+
# When data is severely insufficient, an empty part_result
|
| 141 |
+
# on a certain gpu could makes the overall outputs empty.
|
| 142 |
+
if part_result:
|
| 143 |
+
part_list.append(part_result)
|
| 144 |
+
# sort the results
|
| 145 |
+
ordered_results = []
|
| 146 |
+
for res in zip(*part_list):
|
| 147 |
+
ordered_results.extend(list(res))
|
| 148 |
+
# the dataloader may pad some samples
|
| 149 |
+
ordered_results = ordered_results[:size]
|
| 150 |
+
# remove tmp dir
|
| 151 |
+
shutil.rmtree(tmpdir)
|
| 152 |
+
return ordered_results
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def collect_results_gpu(result_part, size):
|
| 156 |
+
"""Collect results under gpu mode.
|
| 157 |
+
|
| 158 |
+
On gpu mode, this function will encode results to gpu tensors and use gpu
|
| 159 |
+
communication for results collection.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
result_part (list): Result list containing result parts
|
| 163 |
+
to be collected.
|
| 164 |
+
size (int): Size of the results, commonly equal to length of
|
| 165 |
+
the results.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
list: The collected results.
|
| 169 |
+
"""
|
| 170 |
+
rank, world_size = get_dist_info()
|
| 171 |
+
# dump result part to tensor with pickle
|
| 172 |
+
part_tensor = torch.tensor(
|
| 173 |
+
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
|
| 174 |
+
# gather all result part tensor shape
|
| 175 |
+
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
|
| 176 |
+
shape_list = [shape_tensor.clone() for _ in range(world_size)]
|
| 177 |
+
dist.all_gather(shape_list, shape_tensor)
|
| 178 |
+
# padding result part tensor to max length
|
| 179 |
+
shape_max = torch.tensor(shape_list).max()
|
| 180 |
+
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
|
| 181 |
+
part_send[:shape_tensor[0]] = part_tensor
|
| 182 |
+
part_recv_list = [
|
| 183 |
+
part_tensor.new_zeros(shape_max) for _ in range(world_size)
|
| 184 |
+
]
|
| 185 |
+
# gather all result part
|
| 186 |
+
dist.all_gather(part_recv_list, part_send)
|
| 187 |
+
|
| 188 |
+
if rank == 0:
|
| 189 |
+
part_list = []
|
| 190 |
+
for recv, shape in zip(part_recv_list, shape_list):
|
| 191 |
+
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
|
| 192 |
+
# When data is severely insufficient, an empty part_result
|
| 193 |
+
# on a certain gpu could makes the overall outputs empty.
|
| 194 |
+
if part_result:
|
| 195 |
+
part_list.append(part_result)
|
| 196 |
+
# sort the results
|
| 197 |
+
ordered_results = []
|
| 198 |
+
for res in zip(*part_list):
|
| 199 |
+
ordered_results.extend(list(res))
|
| 200 |
+
# the dataloader may pad some samples
|
| 201 |
+
ordered_results = ordered_results[:size]
|
| 202 |
+
return ordered_results
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/fileio/file_client.py
ADDED
|
@@ -0,0 +1,1148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import inspect
|
| 3 |
+
import os
|
| 4 |
+
import os.path as osp
|
| 5 |
+
import re
|
| 6 |
+
import tempfile
|
| 7 |
+
import warnings
|
| 8 |
+
from abc import ABCMeta, abstractmethod
|
| 9 |
+
from contextlib import contextmanager
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
from typing import Iterable, Iterator, Optional, Tuple, Union
|
| 12 |
+
from urllib.request import urlopen
|
| 13 |
+
|
| 14 |
+
import annotator.uniformer.mmcv as mmcv
|
| 15 |
+
from annotator.uniformer.mmcv.utils.misc import has_method
|
| 16 |
+
from annotator.uniformer.mmcv.utils.path import is_filepath
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class BaseStorageBackend(metaclass=ABCMeta):
|
| 20 |
+
"""Abstract class of storage backends.
|
| 21 |
+
|
| 22 |
+
All backends need to implement two apis: ``get()`` and ``get_text()``.
|
| 23 |
+
``get()`` reads the file as a byte stream and ``get_text()`` reads the file
|
| 24 |
+
as texts.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
# a flag to indicate whether the backend can create a symlink for a file
|
| 28 |
+
_allow_symlink = False
|
| 29 |
+
|
| 30 |
+
@property
|
| 31 |
+
def name(self):
|
| 32 |
+
return self.__class__.__name__
|
| 33 |
+
|
| 34 |
+
@property
|
| 35 |
+
def allow_symlink(self):
|
| 36 |
+
return self._allow_symlink
|
| 37 |
+
|
| 38 |
+
@abstractmethod
|
| 39 |
+
def get(self, filepath):
|
| 40 |
+
pass
|
| 41 |
+
|
| 42 |
+
@abstractmethod
|
| 43 |
+
def get_text(self, filepath):
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
class CephBackend(BaseStorageBackend):
|
| 48 |
+
"""Ceph storage backend (for internal use).
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
path_mapping (dict|None): path mapping dict from local path to Petrel
|
| 52 |
+
path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``
|
| 53 |
+
will be replaced by ``dst``. Default: None.
|
| 54 |
+
|
| 55 |
+
.. warning::
|
| 56 |
+
:class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
|
| 57 |
+
please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
def __init__(self, path_mapping=None):
|
| 61 |
+
try:
|
| 62 |
+
import ceph
|
| 63 |
+
except ImportError:
|
| 64 |
+
raise ImportError('Please install ceph to enable CephBackend.')
|
| 65 |
+
|
| 66 |
+
warnings.warn(
|
| 67 |
+
'CephBackend will be deprecated, please use PetrelBackend instead')
|
| 68 |
+
self._client = ceph.S3Client()
|
| 69 |
+
assert isinstance(path_mapping, dict) or path_mapping is None
|
| 70 |
+
self.path_mapping = path_mapping
|
| 71 |
+
|
| 72 |
+
def get(self, filepath):
|
| 73 |
+
filepath = str(filepath)
|
| 74 |
+
if self.path_mapping is not None:
|
| 75 |
+
for k, v in self.path_mapping.items():
|
| 76 |
+
filepath = filepath.replace(k, v)
|
| 77 |
+
value = self._client.Get(filepath)
|
| 78 |
+
value_buf = memoryview(value)
|
| 79 |
+
return value_buf
|
| 80 |
+
|
| 81 |
+
def get_text(self, filepath, encoding=None):
|
| 82 |
+
raise NotImplementedError
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class PetrelBackend(BaseStorageBackend):
|
| 86 |
+
"""Petrel storage backend (for internal use).
|
| 87 |
+
|
| 88 |
+
PetrelBackend supports reading and writing data to multiple clusters.
|
| 89 |
+
If the file path contains the cluster name, PetrelBackend will read data
|
| 90 |
+
from specified cluster or write data to it. Otherwise, PetrelBackend will
|
| 91 |
+
access the default cluster.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
path_mapping (dict, optional): Path mapping dict from local path to
|
| 95 |
+
Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in
|
| 96 |
+
``filepath`` will be replaced by ``dst``. Default: None.
|
| 97 |
+
enable_mc (bool, optional): Whether to enable memcached support.
|
| 98 |
+
Default: True.
|
| 99 |
+
|
| 100 |
+
Examples:
|
| 101 |
+
>>> filepath1 = 's3://path/of/file'
|
| 102 |
+
>>> filepath2 = 'cluster-name:s3://path/of/file'
|
| 103 |
+
>>> client = PetrelBackend()
|
| 104 |
+
>>> client.get(filepath1) # get data from default cluster
|
| 105 |
+
>>> client.get(filepath2) # get data from 'cluster-name' cluster
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
def __init__(self,
|
| 109 |
+
path_mapping: Optional[dict] = None,
|
| 110 |
+
enable_mc: bool = True):
|
| 111 |
+
try:
|
| 112 |
+
from petrel_client import client
|
| 113 |
+
except ImportError:
|
| 114 |
+
raise ImportError('Please install petrel_client to enable '
|
| 115 |
+
'PetrelBackend.')
|
| 116 |
+
|
| 117 |
+
self._client = client.Client(enable_mc=enable_mc)
|
| 118 |
+
assert isinstance(path_mapping, dict) or path_mapping is None
|
| 119 |
+
self.path_mapping = path_mapping
|
| 120 |
+
|
| 121 |
+
def _map_path(self, filepath: Union[str, Path]) -> str:
|
| 122 |
+
"""Map ``filepath`` to a string path whose prefix will be replaced by
|
| 123 |
+
:attr:`self.path_mapping`.
|
| 124 |
+
|
| 125 |
+
Args:
|
| 126 |
+
filepath (str): Path to be mapped.
|
| 127 |
+
"""
|
| 128 |
+
filepath = str(filepath)
|
| 129 |
+
if self.path_mapping is not None:
|
| 130 |
+
for k, v in self.path_mapping.items():
|
| 131 |
+
filepath = filepath.replace(k, v)
|
| 132 |
+
return filepath
|
| 133 |
+
|
| 134 |
+
def _format_path(self, filepath: str) -> str:
|
| 135 |
+
"""Convert a ``filepath`` to standard format of petrel oss.
|
| 136 |
+
|
| 137 |
+
If the ``filepath`` is concatenated by ``os.path.join``, in a Windows
|
| 138 |
+
environment, the ``filepath`` will be the format of
|
| 139 |
+
's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the
|
| 140 |
+
above ``filepath`` will be converted to 's3://bucket_name/image.jpg'.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
filepath (str): Path to be formatted.
|
| 144 |
+
"""
|
| 145 |
+
return re.sub(r'\\+', '/', filepath)
|
| 146 |
+
|
| 147 |
+
def get(self, filepath: Union[str, Path]) -> memoryview:
|
| 148 |
+
"""Read data from a given ``filepath`` with 'rb' mode.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
filepath (str or Path): Path to read data.
|
| 152 |
+
|
| 153 |
+
Returns:
|
| 154 |
+
memoryview: A memory view of expected bytes object to avoid
|
| 155 |
+
copying. The memoryview object can be converted to bytes by
|
| 156 |
+
``value_buf.tobytes()``.
|
| 157 |
+
"""
|
| 158 |
+
filepath = self._map_path(filepath)
|
| 159 |
+
filepath = self._format_path(filepath)
|
| 160 |
+
value = self._client.Get(filepath)
|
| 161 |
+
value_buf = memoryview(value)
|
| 162 |
+
return value_buf
|
| 163 |
+
|
| 164 |
+
def get_text(self,
|
| 165 |
+
filepath: Union[str, Path],
|
| 166 |
+
encoding: str = 'utf-8') -> str:
|
| 167 |
+
"""Read data from a given ``filepath`` with 'r' mode.
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
filepath (str or Path): Path to read data.
|
| 171 |
+
encoding (str): The encoding format used to open the ``filepath``.
|
| 172 |
+
Default: 'utf-8'.
|
| 173 |
+
|
| 174 |
+
Returns:
|
| 175 |
+
str: Expected text reading from ``filepath``.
|
| 176 |
+
"""
|
| 177 |
+
return str(self.get(filepath), encoding=encoding)
|
| 178 |
+
|
| 179 |
+
def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
|
| 180 |
+
"""Save data to a given ``filepath``.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
obj (bytes): Data to be saved.
|
| 184 |
+
filepath (str or Path): Path to write data.
|
| 185 |
+
"""
|
| 186 |
+
filepath = self._map_path(filepath)
|
| 187 |
+
filepath = self._format_path(filepath)
|
| 188 |
+
self._client.put(filepath, obj)
|
| 189 |
+
|
| 190 |
+
def put_text(self,
|
| 191 |
+
obj: str,
|
| 192 |
+
filepath: Union[str, Path],
|
| 193 |
+
encoding: str = 'utf-8') -> None:
|
| 194 |
+
"""Save data to a given ``filepath``.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
obj (str): Data to be written.
|
| 198 |
+
filepath (str or Path): Path to write data.
|
| 199 |
+
encoding (str): The encoding format used to encode the ``obj``.
|
| 200 |
+
Default: 'utf-8'.
|
| 201 |
+
"""
|
| 202 |
+
self.put(bytes(obj, encoding=encoding), filepath)
|
| 203 |
+
|
| 204 |
+
def remove(self, filepath: Union[str, Path]) -> None:
|
| 205 |
+
"""Remove a file.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
filepath (str or Path): Path to be removed.
|
| 209 |
+
"""
|
| 210 |
+
if not has_method(self._client, 'delete'):
|
| 211 |
+
raise NotImplementedError(
|
| 212 |
+
('Current version of Petrel Python SDK has not supported '
|
| 213 |
+
'the `delete` method, please use a higher version or dev'
|
| 214 |
+
' branch instead.'))
|
| 215 |
+
|
| 216 |
+
filepath = self._map_path(filepath)
|
| 217 |
+
filepath = self._format_path(filepath)
|
| 218 |
+
self._client.delete(filepath)
|
| 219 |
+
|
| 220 |
+
def exists(self, filepath: Union[str, Path]) -> bool:
|
| 221 |
+
"""Check whether a file path exists.
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
filepath (str or Path): Path to be checked whether exists.
|
| 225 |
+
|
| 226 |
+
Returns:
|
| 227 |
+
bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
|
| 228 |
+
"""
|
| 229 |
+
if not (has_method(self._client, 'contains')
|
| 230 |
+
and has_method(self._client, 'isdir')):
|
| 231 |
+
raise NotImplementedError(
|
| 232 |
+
('Current version of Petrel Python SDK has not supported '
|
| 233 |
+
'the `contains` and `isdir` methods, please use a higher'
|
| 234 |
+
'version or dev branch instead.'))
|
| 235 |
+
|
| 236 |
+
filepath = self._map_path(filepath)
|
| 237 |
+
filepath = self._format_path(filepath)
|
| 238 |
+
return self._client.contains(filepath) or self._client.isdir(filepath)
|
| 239 |
+
|
| 240 |
+
def isdir(self, filepath: Union[str, Path]) -> bool:
|
| 241 |
+
"""Check whether a file path is a directory.
|
| 242 |
+
|
| 243 |
+
Args:
|
| 244 |
+
filepath (str or Path): Path to be checked whether it is a
|
| 245 |
+
directory.
|
| 246 |
+
|
| 247 |
+
Returns:
|
| 248 |
+
bool: Return ``True`` if ``filepath`` points to a directory,
|
| 249 |
+
``False`` otherwise.
|
| 250 |
+
"""
|
| 251 |
+
if not has_method(self._client, 'isdir'):
|
| 252 |
+
raise NotImplementedError(
|
| 253 |
+
('Current version of Petrel Python SDK has not supported '
|
| 254 |
+
'the `isdir` method, please use a higher version or dev'
|
| 255 |
+
' branch instead.'))
|
| 256 |
+
|
| 257 |
+
filepath = self._map_path(filepath)
|
| 258 |
+
filepath = self._format_path(filepath)
|
| 259 |
+
return self._client.isdir(filepath)
|
| 260 |
+
|
| 261 |
+
def isfile(self, filepath: Union[str, Path]) -> bool:
|
| 262 |
+
"""Check whether a file path is a file.
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
filepath (str or Path): Path to be checked whether it is a file.
|
| 266 |
+
|
| 267 |
+
Returns:
|
| 268 |
+
bool: Return ``True`` if ``filepath`` points to a file, ``False``
|
| 269 |
+
otherwise.
|
| 270 |
+
"""
|
| 271 |
+
if not has_method(self._client, 'contains'):
|
| 272 |
+
raise NotImplementedError(
|
| 273 |
+
('Current version of Petrel Python SDK has not supported '
|
| 274 |
+
'the `contains` method, please use a higher version or '
|
| 275 |
+
'dev branch instead.'))
|
| 276 |
+
|
| 277 |
+
filepath = self._map_path(filepath)
|
| 278 |
+
filepath = self._format_path(filepath)
|
| 279 |
+
return self._client.contains(filepath)
|
| 280 |
+
|
| 281 |
+
def join_path(self, filepath: Union[str, Path],
|
| 282 |
+
*filepaths: Union[str, Path]) -> str:
|
| 283 |
+
"""Concatenate all file paths.
|
| 284 |
+
|
| 285 |
+
Args:
|
| 286 |
+
filepath (str or Path): Path to be concatenated.
|
| 287 |
+
|
| 288 |
+
Returns:
|
| 289 |
+
str: The result after concatenation.
|
| 290 |
+
"""
|
| 291 |
+
filepath = self._format_path(self._map_path(filepath))
|
| 292 |
+
if filepath.endswith('/'):
|
| 293 |
+
filepath = filepath[:-1]
|
| 294 |
+
formatted_paths = [filepath]
|
| 295 |
+
for path in filepaths:
|
| 296 |
+
formatted_paths.append(self._format_path(self._map_path(path)))
|
| 297 |
+
return '/'.join(formatted_paths)
|
| 298 |
+
|
| 299 |
+
@contextmanager
|
| 300 |
+
def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]:
|
| 301 |
+
"""Download a file from ``filepath`` and return a temporary path.
|
| 302 |
+
|
| 303 |
+
``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
|
| 304 |
+
can be called with ``with`` statement, and when exists from the
|
| 305 |
+
``with`` statement, the temporary path will be released.
|
| 306 |
+
|
| 307 |
+
Args:
|
| 308 |
+
filepath (str | Path): Download a file from ``filepath``.
|
| 309 |
+
|
| 310 |
+
Examples:
|
| 311 |
+
>>> client = PetrelBackend()
|
| 312 |
+
>>> # After existing from the ``with`` clause,
|
| 313 |
+
>>> # the path will be removed
|
| 314 |
+
>>> with client.get_local_path('s3://path/of/your/file') as path:
|
| 315 |
+
... # do something here
|
| 316 |
+
|
| 317 |
+
Yields:
|
| 318 |
+
Iterable[str]: Only yield one temporary path.
|
| 319 |
+
"""
|
| 320 |
+
filepath = self._map_path(filepath)
|
| 321 |
+
filepath = self._format_path(filepath)
|
| 322 |
+
assert self.isfile(filepath)
|
| 323 |
+
try:
|
| 324 |
+
f = tempfile.NamedTemporaryFile(delete=False)
|
| 325 |
+
f.write(self.get(filepath))
|
| 326 |
+
f.close()
|
| 327 |
+
yield f.name
|
| 328 |
+
finally:
|
| 329 |
+
os.remove(f.name)
|
| 330 |
+
|
| 331 |
+
def list_dir_or_file(self,
|
| 332 |
+
dir_path: Union[str, Path],
|
| 333 |
+
list_dir: bool = True,
|
| 334 |
+
list_file: bool = True,
|
| 335 |
+
suffix: Optional[Union[str, Tuple[str]]] = None,
|
| 336 |
+
recursive: bool = False) -> Iterator[str]:
|
| 337 |
+
"""Scan a directory to find the interested directories or files in
|
| 338 |
+
arbitrary order.
|
| 339 |
+
|
| 340 |
+
Note:
|
| 341 |
+
Petrel has no concept of directories but it simulates the directory
|
| 342 |
+
hierarchy in the filesystem through public prefixes. In addition,
|
| 343 |
+
if the returned path ends with '/', it means the path is a public
|
| 344 |
+
prefix which is a logical directory.
|
| 345 |
+
|
| 346 |
+
Note:
|
| 347 |
+
:meth:`list_dir_or_file` returns the path relative to ``dir_path``.
|
| 348 |
+
In addition, the returned path of directory will not contains the
|
| 349 |
+
suffix '/' which is consistent with other backends.
|
| 350 |
+
|
| 351 |
+
Args:
|
| 352 |
+
dir_path (str | Path): Path of the directory.
|
| 353 |
+
list_dir (bool): List the directories. Default: True.
|
| 354 |
+
list_file (bool): List the path of files. Default: True.
|
| 355 |
+
suffix (str or tuple[str], optional): File suffix
|
| 356 |
+
that we are interested in. Default: None.
|
| 357 |
+
recursive (bool): If set to True, recursively scan the
|
| 358 |
+
directory. Default: False.
|
| 359 |
+
|
| 360 |
+
Yields:
|
| 361 |
+
Iterable[str]: A relative path to ``dir_path``.
|
| 362 |
+
"""
|
| 363 |
+
if not has_method(self._client, 'list'):
|
| 364 |
+
raise NotImplementedError(
|
| 365 |
+
('Current version of Petrel Python SDK has not supported '
|
| 366 |
+
'the `list` method, please use a higher version or dev'
|
| 367 |
+
' branch instead.'))
|
| 368 |
+
|
| 369 |
+
dir_path = self._map_path(dir_path)
|
| 370 |
+
dir_path = self._format_path(dir_path)
|
| 371 |
+
if list_dir and suffix is not None:
|
| 372 |
+
raise TypeError(
|
| 373 |
+
'`list_dir` should be False when `suffix` is not None')
|
| 374 |
+
|
| 375 |
+
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
|
| 376 |
+
raise TypeError('`suffix` must be a string or tuple of strings')
|
| 377 |
+
|
| 378 |
+
# Petrel's simulated directory hierarchy assumes that directory paths
|
| 379 |
+
# should end with `/`
|
| 380 |
+
if not dir_path.endswith('/'):
|
| 381 |
+
dir_path += '/'
|
| 382 |
+
|
| 383 |
+
root = dir_path
|
| 384 |
+
|
| 385 |
+
def _list_dir_or_file(dir_path, list_dir, list_file, suffix,
|
| 386 |
+
recursive):
|
| 387 |
+
for path in self._client.list(dir_path):
|
| 388 |
+
# the `self.isdir` is not used here to determine whether path
|
| 389 |
+
# is a directory, because `self.isdir` relies on
|
| 390 |
+
# `self._client.list`
|
| 391 |
+
if path.endswith('/'): # a directory path
|
| 392 |
+
next_dir_path = self.join_path(dir_path, path)
|
| 393 |
+
if list_dir:
|
| 394 |
+
# get the relative path and exclude the last
|
| 395 |
+
# character '/'
|
| 396 |
+
rel_dir = next_dir_path[len(root):-1]
|
| 397 |
+
yield rel_dir
|
| 398 |
+
if recursive:
|
| 399 |
+
yield from _list_dir_or_file(next_dir_path, list_dir,
|
| 400 |
+
list_file, suffix,
|
| 401 |
+
recursive)
|
| 402 |
+
else: # a file path
|
| 403 |
+
absolute_path = self.join_path(dir_path, path)
|
| 404 |
+
rel_path = absolute_path[len(root):]
|
| 405 |
+
if (suffix is None
|
| 406 |
+
or rel_path.endswith(suffix)) and list_file:
|
| 407 |
+
yield rel_path
|
| 408 |
+
|
| 409 |
+
return _list_dir_or_file(dir_path, list_dir, list_file, suffix,
|
| 410 |
+
recursive)
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
class MemcachedBackend(BaseStorageBackend):
|
| 414 |
+
"""Memcached storage backend.
|
| 415 |
+
|
| 416 |
+
Attributes:
|
| 417 |
+
server_list_cfg (str): Config file for memcached server list.
|
| 418 |
+
client_cfg (str): Config file for memcached client.
|
| 419 |
+
sys_path (str | None): Additional path to be appended to `sys.path`.
|
| 420 |
+
Default: None.
|
| 421 |
+
"""
|
| 422 |
+
|
| 423 |
+
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
|
| 424 |
+
if sys_path is not None:
|
| 425 |
+
import sys
|
| 426 |
+
sys.path.append(sys_path)
|
| 427 |
+
try:
|
| 428 |
+
import mc
|
| 429 |
+
except ImportError:
|
| 430 |
+
raise ImportError(
|
| 431 |
+
'Please install memcached to enable MemcachedBackend.')
|
| 432 |
+
|
| 433 |
+
self.server_list_cfg = server_list_cfg
|
| 434 |
+
self.client_cfg = client_cfg
|
| 435 |
+
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
|
| 436 |
+
self.client_cfg)
|
| 437 |
+
# mc.pyvector servers as a point which points to a memory cache
|
| 438 |
+
self._mc_buffer = mc.pyvector()
|
| 439 |
+
|
| 440 |
+
def get(self, filepath):
|
| 441 |
+
filepath = str(filepath)
|
| 442 |
+
import mc
|
| 443 |
+
self._client.Get(filepath, self._mc_buffer)
|
| 444 |
+
value_buf = mc.ConvertBuffer(self._mc_buffer)
|
| 445 |
+
return value_buf
|
| 446 |
+
|
| 447 |
+
def get_text(self, filepath, encoding=None):
|
| 448 |
+
raise NotImplementedError
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
class LmdbBackend(BaseStorageBackend):
|
| 452 |
+
"""Lmdb storage backend.
|
| 453 |
+
|
| 454 |
+
Args:
|
| 455 |
+
db_path (str): Lmdb database path.
|
| 456 |
+
readonly (bool, optional): Lmdb environment parameter. If True,
|
| 457 |
+
disallow any write operations. Default: True.
|
| 458 |
+
lock (bool, optional): Lmdb environment parameter. If False, when
|
| 459 |
+
concurrent access occurs, do not lock the database. Default: False.
|
| 460 |
+
readahead (bool, optional): Lmdb environment parameter. If False,
|
| 461 |
+
disable the OS filesystem readahead mechanism, which may improve
|
| 462 |
+
random read performance when a database is larger than RAM.
|
| 463 |
+
Default: False.
|
| 464 |
+
|
| 465 |
+
Attributes:
|
| 466 |
+
db_path (str): Lmdb database path.
|
| 467 |
+
"""
|
| 468 |
+
|
| 469 |
+
def __init__(self,
|
| 470 |
+
db_path,
|
| 471 |
+
readonly=True,
|
| 472 |
+
lock=False,
|
| 473 |
+
readahead=False,
|
| 474 |
+
**kwargs):
|
| 475 |
+
try:
|
| 476 |
+
import lmdb
|
| 477 |
+
except ImportError:
|
| 478 |
+
raise ImportError('Please install lmdb to enable LmdbBackend.')
|
| 479 |
+
|
| 480 |
+
self.db_path = str(db_path)
|
| 481 |
+
self._client = lmdb.open(
|
| 482 |
+
self.db_path,
|
| 483 |
+
readonly=readonly,
|
| 484 |
+
lock=lock,
|
| 485 |
+
readahead=readahead,
|
| 486 |
+
**kwargs)
|
| 487 |
+
|
| 488 |
+
def get(self, filepath):
|
| 489 |
+
"""Get values according to the filepath.
|
| 490 |
+
|
| 491 |
+
Args:
|
| 492 |
+
filepath (str | obj:`Path`): Here, filepath is the lmdb key.
|
| 493 |
+
"""
|
| 494 |
+
filepath = str(filepath)
|
| 495 |
+
with self._client.begin(write=False) as txn:
|
| 496 |
+
value_buf = txn.get(filepath.encode('ascii'))
|
| 497 |
+
return value_buf
|
| 498 |
+
|
| 499 |
+
def get_text(self, filepath, encoding=None):
|
| 500 |
+
raise NotImplementedError
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
class HardDiskBackend(BaseStorageBackend):
|
| 504 |
+
"""Raw hard disks storage backend."""
|
| 505 |
+
|
| 506 |
+
_allow_symlink = True
|
| 507 |
+
|
| 508 |
+
def get(self, filepath: Union[str, Path]) -> bytes:
|
| 509 |
+
"""Read data from a given ``filepath`` with 'rb' mode.
|
| 510 |
+
|
| 511 |
+
Args:
|
| 512 |
+
filepath (str or Path): Path to read data.
|
| 513 |
+
|
| 514 |
+
Returns:
|
| 515 |
+
bytes: Expected bytes object.
|
| 516 |
+
"""
|
| 517 |
+
with open(filepath, 'rb') as f:
|
| 518 |
+
value_buf = f.read()
|
| 519 |
+
return value_buf
|
| 520 |
+
|
| 521 |
+
def get_text(self,
|
| 522 |
+
filepath: Union[str, Path],
|
| 523 |
+
encoding: str = 'utf-8') -> str:
|
| 524 |
+
"""Read data from a given ``filepath`` with 'r' mode.
|
| 525 |
+
|
| 526 |
+
Args:
|
| 527 |
+
filepath (str or Path): Path to read data.
|
| 528 |
+
encoding (str): The encoding format used to open the ``filepath``.
|
| 529 |
+
Default: 'utf-8'.
|
| 530 |
+
|
| 531 |
+
Returns:
|
| 532 |
+
str: Expected text reading from ``filepath``.
|
| 533 |
+
"""
|
| 534 |
+
with open(filepath, 'r', encoding=encoding) as f:
|
| 535 |
+
value_buf = f.read()
|
| 536 |
+
return value_buf
|
| 537 |
+
|
| 538 |
+
def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
|
| 539 |
+
"""Write data to a given ``filepath`` with 'wb' mode.
|
| 540 |
+
|
| 541 |
+
Note:
|
| 542 |
+
``put`` will create a directory if the directory of ``filepath``
|
| 543 |
+
does not exist.
|
| 544 |
+
|
| 545 |
+
Args:
|
| 546 |
+
obj (bytes): Data to be written.
|
| 547 |
+
filepath (str or Path): Path to write data.
|
| 548 |
+
"""
|
| 549 |
+
mmcv.mkdir_or_exist(osp.dirname(filepath))
|
| 550 |
+
with open(filepath, 'wb') as f:
|
| 551 |
+
f.write(obj)
|
| 552 |
+
|
| 553 |
+
def put_text(self,
|
| 554 |
+
obj: str,
|
| 555 |
+
filepath: Union[str, Path],
|
| 556 |
+
encoding: str = 'utf-8') -> None:
|
| 557 |
+
"""Write data to a given ``filepath`` with 'w' mode.
|
| 558 |
+
|
| 559 |
+
Note:
|
| 560 |
+
``put_text`` will create a directory if the directory of
|
| 561 |
+
``filepath`` does not exist.
|
| 562 |
+
|
| 563 |
+
Args:
|
| 564 |
+
obj (str): Data to be written.
|
| 565 |
+
filepath (str or Path): Path to write data.
|
| 566 |
+
encoding (str): The encoding format used to open the ``filepath``.
|
| 567 |
+
Default: 'utf-8'.
|
| 568 |
+
"""
|
| 569 |
+
mmcv.mkdir_or_exist(osp.dirname(filepath))
|
| 570 |
+
with open(filepath, 'w', encoding=encoding) as f:
|
| 571 |
+
f.write(obj)
|
| 572 |
+
|
| 573 |
+
def remove(self, filepath: Union[str, Path]) -> None:
|
| 574 |
+
"""Remove a file.
|
| 575 |
+
|
| 576 |
+
Args:
|
| 577 |
+
filepath (str or Path): Path to be removed.
|
| 578 |
+
"""
|
| 579 |
+
os.remove(filepath)
|
| 580 |
+
|
| 581 |
+
def exists(self, filepath: Union[str, Path]) -> bool:
|
| 582 |
+
"""Check whether a file path exists.
|
| 583 |
+
|
| 584 |
+
Args:
|
| 585 |
+
filepath (str or Path): Path to be checked whether exists.
|
| 586 |
+
|
| 587 |
+
Returns:
|
| 588 |
+
bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
|
| 589 |
+
"""
|
| 590 |
+
return osp.exists(filepath)
|
| 591 |
+
|
| 592 |
+
def isdir(self, filepath: Union[str, Path]) -> bool:
|
| 593 |
+
"""Check whether a file path is a directory.
|
| 594 |
+
|
| 595 |
+
Args:
|
| 596 |
+
filepath (str or Path): Path to be checked whether it is a
|
| 597 |
+
directory.
|
| 598 |
+
|
| 599 |
+
Returns:
|
| 600 |
+
bool: Return ``True`` if ``filepath`` points to a directory,
|
| 601 |
+
``False`` otherwise.
|
| 602 |
+
"""
|
| 603 |
+
return osp.isdir(filepath)
|
| 604 |
+
|
| 605 |
+
def isfile(self, filepath: Union[str, Path]) -> bool:
|
| 606 |
+
"""Check whether a file path is a file.
|
| 607 |
+
|
| 608 |
+
Args:
|
| 609 |
+
filepath (str or Path): Path to be checked whether it is a file.
|
| 610 |
+
|
| 611 |
+
Returns:
|
| 612 |
+
bool: Return ``True`` if ``filepath`` points to a file, ``False``
|
| 613 |
+
otherwise.
|
| 614 |
+
"""
|
| 615 |
+
return osp.isfile(filepath)
|
| 616 |
+
|
| 617 |
+
def join_path(self, filepath: Union[str, Path],
|
| 618 |
+
*filepaths: Union[str, Path]) -> str:
|
| 619 |
+
"""Concatenate all file paths.
|
| 620 |
+
|
| 621 |
+
Join one or more filepath components intelligently. The return value
|
| 622 |
+
is the concatenation of filepath and any members of *filepaths.
|
| 623 |
+
|
| 624 |
+
Args:
|
| 625 |
+
filepath (str or Path): Path to be concatenated.
|
| 626 |
+
|
| 627 |
+
Returns:
|
| 628 |
+
str: The result of concatenation.
|
| 629 |
+
"""
|
| 630 |
+
return osp.join(filepath, *filepaths)
|
| 631 |
+
|
| 632 |
+
@contextmanager
|
| 633 |
+
def get_local_path(
|
| 634 |
+
self, filepath: Union[str, Path]) -> Iterable[Union[str, Path]]:
|
| 635 |
+
"""Only for unified API and do nothing."""
|
| 636 |
+
yield filepath
|
| 637 |
+
|
| 638 |
+
def list_dir_or_file(self,
|
| 639 |
+
dir_path: Union[str, Path],
|
| 640 |
+
list_dir: bool = True,
|
| 641 |
+
list_file: bool = True,
|
| 642 |
+
suffix: Optional[Union[str, Tuple[str]]] = None,
|
| 643 |
+
recursive: bool = False) -> Iterator[str]:
|
| 644 |
+
"""Scan a directory to find the interested directories or files in
|
| 645 |
+
arbitrary order.
|
| 646 |
+
|
| 647 |
+
Note:
|
| 648 |
+
:meth:`list_dir_or_file` returns the path relative to ``dir_path``.
|
| 649 |
+
|
| 650 |
+
Args:
|
| 651 |
+
dir_path (str | Path): Path of the directory.
|
| 652 |
+
list_dir (bool): List the directories. Default: True.
|
| 653 |
+
list_file (bool): List the path of files. Default: True.
|
| 654 |
+
suffix (str or tuple[str], optional): File suffix
|
| 655 |
+
that we are interested in. Default: None.
|
| 656 |
+
recursive (bool): If set to True, recursively scan the
|
| 657 |
+
directory. Default: False.
|
| 658 |
+
|
| 659 |
+
Yields:
|
| 660 |
+
Iterable[str]: A relative path to ``dir_path``.
|
| 661 |
+
"""
|
| 662 |
+
if list_dir and suffix is not None:
|
| 663 |
+
raise TypeError('`suffix` should be None when `list_dir` is True')
|
| 664 |
+
|
| 665 |
+
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
|
| 666 |
+
raise TypeError('`suffix` must be a string or tuple of strings')
|
| 667 |
+
|
| 668 |
+
root = dir_path
|
| 669 |
+
|
| 670 |
+
def _list_dir_or_file(dir_path, list_dir, list_file, suffix,
|
| 671 |
+
recursive):
|
| 672 |
+
for entry in os.scandir(dir_path):
|
| 673 |
+
if not entry.name.startswith('.') and entry.is_file():
|
| 674 |
+
rel_path = osp.relpath(entry.path, root)
|
| 675 |
+
if (suffix is None
|
| 676 |
+
or rel_path.endswith(suffix)) and list_file:
|
| 677 |
+
yield rel_path
|
| 678 |
+
elif osp.isdir(entry.path):
|
| 679 |
+
if list_dir:
|
| 680 |
+
rel_dir = osp.relpath(entry.path, root)
|
| 681 |
+
yield rel_dir
|
| 682 |
+
if recursive:
|
| 683 |
+
yield from _list_dir_or_file(entry.path, list_dir,
|
| 684 |
+
list_file, suffix,
|
| 685 |
+
recursive)
|
| 686 |
+
|
| 687 |
+
return _list_dir_or_file(dir_path, list_dir, list_file, suffix,
|
| 688 |
+
recursive)
|
| 689 |
+
|
| 690 |
+
|
| 691 |
+
class HTTPBackend(BaseStorageBackend):
|
| 692 |
+
"""HTTP and HTTPS storage bachend."""
|
| 693 |
+
|
| 694 |
+
def get(self, filepath):
|
| 695 |
+
value_buf = urlopen(filepath).read()
|
| 696 |
+
return value_buf
|
| 697 |
+
|
| 698 |
+
def get_text(self, filepath, encoding='utf-8'):
|
| 699 |
+
value_buf = urlopen(filepath).read()
|
| 700 |
+
return value_buf.decode(encoding)
|
| 701 |
+
|
| 702 |
+
@contextmanager
|
| 703 |
+
def get_local_path(self, filepath: str) -> Iterable[str]:
|
| 704 |
+
"""Download a file from ``filepath``.
|
| 705 |
+
|
| 706 |
+
``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
|
| 707 |
+
can be called with ``with`` statement, and when exists from the
|
| 708 |
+
``with`` statement, the temporary path will be released.
|
| 709 |
+
|
| 710 |
+
Args:
|
| 711 |
+
filepath (str): Download a file from ``filepath``.
|
| 712 |
+
|
| 713 |
+
Examples:
|
| 714 |
+
>>> client = HTTPBackend()
|
| 715 |
+
>>> # After existing from the ``with`` clause,
|
| 716 |
+
>>> # the path will be removed
|
| 717 |
+
>>> with client.get_local_path('http://path/of/your/file') as path:
|
| 718 |
+
... # do something here
|
| 719 |
+
"""
|
| 720 |
+
try:
|
| 721 |
+
f = tempfile.NamedTemporaryFile(delete=False)
|
| 722 |
+
f.write(self.get(filepath))
|
| 723 |
+
f.close()
|
| 724 |
+
yield f.name
|
| 725 |
+
finally:
|
| 726 |
+
os.remove(f.name)
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
class FileClient:
|
| 730 |
+
"""A general file client to access files in different backends.
|
| 731 |
+
|
| 732 |
+
The client loads a file or text in a specified backend from its path
|
| 733 |
+
and returns it as a binary or text file. There are two ways to choose a
|
| 734 |
+
backend, the name of backend and the prefix of path. Although both of them
|
| 735 |
+
can be used to choose a storage backend, ``backend`` has a higher priority
|
| 736 |
+
that is if they are all set, the storage backend will be chosen by the
|
| 737 |
+
backend argument. If they are all `None`, the disk backend will be chosen.
|
| 738 |
+
Note that It can also register other backend accessor with a given name,
|
| 739 |
+
prefixes, and backend class. In addition, We use the singleton pattern to
|
| 740 |
+
avoid repeated object creation. If the arguments are the same, the same
|
| 741 |
+
object will be returned.
|
| 742 |
+
|
| 743 |
+
Args:
|
| 744 |
+
backend (str, optional): The storage backend type. Options are "disk",
|
| 745 |
+
"ceph", "memcached", "lmdb", "http" and "petrel". Default: None.
|
| 746 |
+
prefix (str, optional): The prefix of the registered storage backend.
|
| 747 |
+
Options are "s3", "http", "https". Default: None.
|
| 748 |
+
|
| 749 |
+
Examples:
|
| 750 |
+
>>> # only set backend
|
| 751 |
+
>>> file_client = FileClient(backend='petrel')
|
| 752 |
+
>>> # only set prefix
|
| 753 |
+
>>> file_client = FileClient(prefix='s3')
|
| 754 |
+
>>> # set both backend and prefix but use backend to choose client
|
| 755 |
+
>>> file_client = FileClient(backend='petrel', prefix='s3')
|
| 756 |
+
>>> # if the arguments are the same, the same object is returned
|
| 757 |
+
>>> file_client1 = FileClient(backend='petrel')
|
| 758 |
+
>>> file_client1 is file_client
|
| 759 |
+
True
|
| 760 |
+
|
| 761 |
+
Attributes:
|
| 762 |
+
client (:obj:`BaseStorageBackend`): The backend object.
|
| 763 |
+
"""
|
| 764 |
+
|
| 765 |
+
_backends = {
|
| 766 |
+
'disk': HardDiskBackend,
|
| 767 |
+
'ceph': CephBackend,
|
| 768 |
+
'memcached': MemcachedBackend,
|
| 769 |
+
'lmdb': LmdbBackend,
|
| 770 |
+
'petrel': PetrelBackend,
|
| 771 |
+
'http': HTTPBackend,
|
| 772 |
+
}
|
| 773 |
+
# This collection is used to record the overridden backends, and when a
|
| 774 |
+
# backend appears in the collection, the singleton pattern is disabled for
|
| 775 |
+
# that backend, because if the singleton pattern is used, then the object
|
| 776 |
+
# returned will be the backend before overwriting
|
| 777 |
+
_overridden_backends = set()
|
| 778 |
+
_prefix_to_backends = {
|
| 779 |
+
's3': PetrelBackend,
|
| 780 |
+
'http': HTTPBackend,
|
| 781 |
+
'https': HTTPBackend,
|
| 782 |
+
}
|
| 783 |
+
_overridden_prefixes = set()
|
| 784 |
+
|
| 785 |
+
_instances = {}
|
| 786 |
+
|
| 787 |
+
def __new__(cls, backend=None, prefix=None, **kwargs):
|
| 788 |
+
if backend is None and prefix is None:
|
| 789 |
+
backend = 'disk'
|
| 790 |
+
if backend is not None and backend not in cls._backends:
|
| 791 |
+
raise ValueError(
|
| 792 |
+
f'Backend {backend} is not supported. Currently supported ones'
|
| 793 |
+
f' are {list(cls._backends.keys())}')
|
| 794 |
+
if prefix is not None and prefix not in cls._prefix_to_backends:
|
| 795 |
+
raise ValueError(
|
| 796 |
+
f'prefix {prefix} is not supported. Currently supported ones '
|
| 797 |
+
f'are {list(cls._prefix_to_backends.keys())}')
|
| 798 |
+
|
| 799 |
+
# concatenate the arguments to a unique key for determining whether
|
| 800 |
+
# objects with the same arguments were created
|
| 801 |
+
arg_key = f'{backend}:{prefix}'
|
| 802 |
+
for key, value in kwargs.items():
|
| 803 |
+
arg_key += f':{key}:{value}'
|
| 804 |
+
|
| 805 |
+
# if a backend was overridden, it will create a new object
|
| 806 |
+
if (arg_key in cls._instances
|
| 807 |
+
and backend not in cls._overridden_backends
|
| 808 |
+
and prefix not in cls._overridden_prefixes):
|
| 809 |
+
_instance = cls._instances[arg_key]
|
| 810 |
+
else:
|
| 811 |
+
# create a new object and put it to _instance
|
| 812 |
+
_instance = super().__new__(cls)
|
| 813 |
+
if backend is not None:
|
| 814 |
+
_instance.client = cls._backends[backend](**kwargs)
|
| 815 |
+
else:
|
| 816 |
+
_instance.client = cls._prefix_to_backends[prefix](**kwargs)
|
| 817 |
+
|
| 818 |
+
cls._instances[arg_key] = _instance
|
| 819 |
+
|
| 820 |
+
return _instance
|
| 821 |
+
|
| 822 |
+
@property
|
| 823 |
+
def name(self):
|
| 824 |
+
return self.client.name
|
| 825 |
+
|
| 826 |
+
@property
|
| 827 |
+
def allow_symlink(self):
|
| 828 |
+
return self.client.allow_symlink
|
| 829 |
+
|
| 830 |
+
@staticmethod
|
| 831 |
+
def parse_uri_prefix(uri: Union[str, Path]) -> Optional[str]:
|
| 832 |
+
"""Parse the prefix of a uri.
|
| 833 |
+
|
| 834 |
+
Args:
|
| 835 |
+
uri (str | Path): Uri to be parsed that contains the file prefix.
|
| 836 |
+
|
| 837 |
+
Examples:
|
| 838 |
+
>>> FileClient.parse_uri_prefix('s3://path/of/your/file')
|
| 839 |
+
's3'
|
| 840 |
+
|
| 841 |
+
Returns:
|
| 842 |
+
str | None: Return the prefix of uri if the uri contains '://'
|
| 843 |
+
else ``None``.
|
| 844 |
+
"""
|
| 845 |
+
assert is_filepath(uri)
|
| 846 |
+
uri = str(uri)
|
| 847 |
+
if '://' not in uri:
|
| 848 |
+
return None
|
| 849 |
+
else:
|
| 850 |
+
prefix, _ = uri.split('://')
|
| 851 |
+
# In the case of PetrelBackend, the prefix may contains the cluster
|
| 852 |
+
# name like clusterName:s3
|
| 853 |
+
if ':' in prefix:
|
| 854 |
+
_, prefix = prefix.split(':')
|
| 855 |
+
return prefix
|
| 856 |
+
|
| 857 |
+
@classmethod
|
| 858 |
+
def infer_client(cls,
|
| 859 |
+
file_client_args: Optional[dict] = None,
|
| 860 |
+
uri: Optional[Union[str, Path]] = None) -> 'FileClient':
|
| 861 |
+
"""Infer a suitable file client based on the URI and arguments.
|
| 862 |
+
|
| 863 |
+
Args:
|
| 864 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 865 |
+
FileClient. Default: None.
|
| 866 |
+
uri (str | Path, optional): Uri to be parsed that contains the file
|
| 867 |
+
prefix. Default: None.
|
| 868 |
+
|
| 869 |
+
Examples:
|
| 870 |
+
>>> uri = 's3://path/of/your/file'
|
| 871 |
+
>>> file_client = FileClient.infer_client(uri=uri)
|
| 872 |
+
>>> file_client_args = {'backend': 'petrel'}
|
| 873 |
+
>>> file_client = FileClient.infer_client(file_client_args)
|
| 874 |
+
|
| 875 |
+
Returns:
|
| 876 |
+
FileClient: Instantiated FileClient object.
|
| 877 |
+
"""
|
| 878 |
+
assert file_client_args is not None or uri is not None
|
| 879 |
+
if file_client_args is None:
|
| 880 |
+
file_prefix = cls.parse_uri_prefix(uri) # type: ignore
|
| 881 |
+
return cls(prefix=file_prefix)
|
| 882 |
+
else:
|
| 883 |
+
return cls(**file_client_args)
|
| 884 |
+
|
| 885 |
+
@classmethod
|
| 886 |
+
def _register_backend(cls, name, backend, force=False, prefixes=None):
|
| 887 |
+
if not isinstance(name, str):
|
| 888 |
+
raise TypeError('the backend name should be a string, '
|
| 889 |
+
f'but got {type(name)}')
|
| 890 |
+
if not inspect.isclass(backend):
|
| 891 |
+
raise TypeError(
|
| 892 |
+
f'backend should be a class but got {type(backend)}')
|
| 893 |
+
if not issubclass(backend, BaseStorageBackend):
|
| 894 |
+
raise TypeError(
|
| 895 |
+
f'backend {backend} is not a subclass of BaseStorageBackend')
|
| 896 |
+
if not force and name in cls._backends:
|
| 897 |
+
raise KeyError(
|
| 898 |
+
f'{name} is already registered as a storage backend, '
|
| 899 |
+
'add "force=True" if you want to override it')
|
| 900 |
+
|
| 901 |
+
if name in cls._backends and force:
|
| 902 |
+
cls._overridden_backends.add(name)
|
| 903 |
+
cls._backends[name] = backend
|
| 904 |
+
|
| 905 |
+
if prefixes is not None:
|
| 906 |
+
if isinstance(prefixes, str):
|
| 907 |
+
prefixes = [prefixes]
|
| 908 |
+
else:
|
| 909 |
+
assert isinstance(prefixes, (list, tuple))
|
| 910 |
+
for prefix in prefixes:
|
| 911 |
+
if prefix not in cls._prefix_to_backends:
|
| 912 |
+
cls._prefix_to_backends[prefix] = backend
|
| 913 |
+
elif (prefix in cls._prefix_to_backends) and force:
|
| 914 |
+
cls._overridden_prefixes.add(prefix)
|
| 915 |
+
cls._prefix_to_backends[prefix] = backend
|
| 916 |
+
else:
|
| 917 |
+
raise KeyError(
|
| 918 |
+
f'{prefix} is already registered as a storage backend,'
|
| 919 |
+
' add "force=True" if you want to override it')
|
| 920 |
+
|
| 921 |
+
@classmethod
|
| 922 |
+
def register_backend(cls, name, backend=None, force=False, prefixes=None):
|
| 923 |
+
"""Register a backend to FileClient.
|
| 924 |
+
|
| 925 |
+
This method can be used as a normal class method or a decorator.
|
| 926 |
+
|
| 927 |
+
.. code-block:: python
|
| 928 |
+
|
| 929 |
+
class NewBackend(BaseStorageBackend):
|
| 930 |
+
|
| 931 |
+
def get(self, filepath):
|
| 932 |
+
return filepath
|
| 933 |
+
|
| 934 |
+
def get_text(self, filepath):
|
| 935 |
+
return filepath
|
| 936 |
+
|
| 937 |
+
FileClient.register_backend('new', NewBackend)
|
| 938 |
+
|
| 939 |
+
or
|
| 940 |
+
|
| 941 |
+
.. code-block:: python
|
| 942 |
+
|
| 943 |
+
@FileClient.register_backend('new')
|
| 944 |
+
class NewBackend(BaseStorageBackend):
|
| 945 |
+
|
| 946 |
+
def get(self, filepath):
|
| 947 |
+
return filepath
|
| 948 |
+
|
| 949 |
+
def get_text(self, filepath):
|
| 950 |
+
return filepath
|
| 951 |
+
|
| 952 |
+
Args:
|
| 953 |
+
name (str): The name of the registered backend.
|
| 954 |
+
backend (class, optional): The backend class to be registered,
|
| 955 |
+
which must be a subclass of :class:`BaseStorageBackend`.
|
| 956 |
+
When this method is used as a decorator, backend is None.
|
| 957 |
+
Defaults to None.
|
| 958 |
+
force (bool, optional): Whether to override the backend if the name
|
| 959 |
+
has already been registered. Defaults to False.
|
| 960 |
+
prefixes (str or list[str] or tuple[str], optional): The prefixes
|
| 961 |
+
of the registered storage backend. Default: None.
|
| 962 |
+
`New in version 1.3.15.`
|
| 963 |
+
"""
|
| 964 |
+
if backend is not None:
|
| 965 |
+
cls._register_backend(
|
| 966 |
+
name, backend, force=force, prefixes=prefixes)
|
| 967 |
+
return
|
| 968 |
+
|
| 969 |
+
def _register(backend_cls):
|
| 970 |
+
cls._register_backend(
|
| 971 |
+
name, backend_cls, force=force, prefixes=prefixes)
|
| 972 |
+
return backend_cls
|
| 973 |
+
|
| 974 |
+
return _register
|
| 975 |
+
|
| 976 |
+
def get(self, filepath: Union[str, Path]) -> Union[bytes, memoryview]:
|
| 977 |
+
"""Read data from a given ``filepath`` with 'rb' mode.
|
| 978 |
+
|
| 979 |
+
Note:
|
| 980 |
+
There are two types of return values for ``get``, one is ``bytes``
|
| 981 |
+
and the other is ``memoryview``. The advantage of using memoryview
|
| 982 |
+
is that you can avoid copying, and if you want to convert it to
|
| 983 |
+
``bytes``, you can use ``.tobytes()``.
|
| 984 |
+
|
| 985 |
+
Args:
|
| 986 |
+
filepath (str or Path): Path to read data.
|
| 987 |
+
|
| 988 |
+
Returns:
|
| 989 |
+
bytes | memoryview: Expected bytes object or a memory view of the
|
| 990 |
+
bytes object.
|
| 991 |
+
"""
|
| 992 |
+
return self.client.get(filepath)
|
| 993 |
+
|
| 994 |
+
def get_text(self, filepath: Union[str, Path], encoding='utf-8') -> str:
|
| 995 |
+
"""Read data from a given ``filepath`` with 'r' mode.
|
| 996 |
+
|
| 997 |
+
Args:
|
| 998 |
+
filepath (str or Path): Path to read data.
|
| 999 |
+
encoding (str): The encoding format used to open the ``filepath``.
|
| 1000 |
+
Default: 'utf-8'.
|
| 1001 |
+
|
| 1002 |
+
Returns:
|
| 1003 |
+
str: Expected text reading from ``filepath``.
|
| 1004 |
+
"""
|
| 1005 |
+
return self.client.get_text(filepath, encoding)
|
| 1006 |
+
|
| 1007 |
+
def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
|
| 1008 |
+
"""Write data to a given ``filepath`` with 'wb' mode.
|
| 1009 |
+
|
| 1010 |
+
Note:
|
| 1011 |
+
``put`` should create a directory if the directory of ``filepath``
|
| 1012 |
+
does not exist.
|
| 1013 |
+
|
| 1014 |
+
Args:
|
| 1015 |
+
obj (bytes): Data to be written.
|
| 1016 |
+
filepath (str or Path): Path to write data.
|
| 1017 |
+
"""
|
| 1018 |
+
self.client.put(obj, filepath)
|
| 1019 |
+
|
| 1020 |
+
def put_text(self, obj: str, filepath: Union[str, Path]) -> None:
|
| 1021 |
+
"""Write data to a given ``filepath`` with 'w' mode.
|
| 1022 |
+
|
| 1023 |
+
Note:
|
| 1024 |
+
``put_text`` should create a directory if the directory of
|
| 1025 |
+
``filepath`` does not exist.
|
| 1026 |
+
|
| 1027 |
+
Args:
|
| 1028 |
+
obj (str): Data to be written.
|
| 1029 |
+
filepath (str or Path): Path to write data.
|
| 1030 |
+
encoding (str, optional): The encoding format used to open the
|
| 1031 |
+
`filepath`. Default: 'utf-8'.
|
| 1032 |
+
"""
|
| 1033 |
+
self.client.put_text(obj, filepath)
|
| 1034 |
+
|
| 1035 |
+
def remove(self, filepath: Union[str, Path]) -> None:
|
| 1036 |
+
"""Remove a file.
|
| 1037 |
+
|
| 1038 |
+
Args:
|
| 1039 |
+
filepath (str, Path): Path to be removed.
|
| 1040 |
+
"""
|
| 1041 |
+
self.client.remove(filepath)
|
| 1042 |
+
|
| 1043 |
+
def exists(self, filepath: Union[str, Path]) -> bool:
|
| 1044 |
+
"""Check whether a file path exists.
|
| 1045 |
+
|
| 1046 |
+
Args:
|
| 1047 |
+
filepath (str or Path): Path to be checked whether exists.
|
| 1048 |
+
|
| 1049 |
+
Returns:
|
| 1050 |
+
bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
|
| 1051 |
+
"""
|
| 1052 |
+
return self.client.exists(filepath)
|
| 1053 |
+
|
| 1054 |
+
def isdir(self, filepath: Union[str, Path]) -> bool:
|
| 1055 |
+
"""Check whether a file path is a directory.
|
| 1056 |
+
|
| 1057 |
+
Args:
|
| 1058 |
+
filepath (str or Path): Path to be checked whether it is a
|
| 1059 |
+
directory.
|
| 1060 |
+
|
| 1061 |
+
Returns:
|
| 1062 |
+
bool: Return ``True`` if ``filepath`` points to a directory,
|
| 1063 |
+
``False`` otherwise.
|
| 1064 |
+
"""
|
| 1065 |
+
return self.client.isdir(filepath)
|
| 1066 |
+
|
| 1067 |
+
def isfile(self, filepath: Union[str, Path]) -> bool:
|
| 1068 |
+
"""Check whether a file path is a file.
|
| 1069 |
+
|
| 1070 |
+
Args:
|
| 1071 |
+
filepath (str or Path): Path to be checked whether it is a file.
|
| 1072 |
+
|
| 1073 |
+
Returns:
|
| 1074 |
+
bool: Return ``True`` if ``filepath`` points to a file, ``False``
|
| 1075 |
+
otherwise.
|
| 1076 |
+
"""
|
| 1077 |
+
return self.client.isfile(filepath)
|
| 1078 |
+
|
| 1079 |
+
def join_path(self, filepath: Union[str, Path],
|
| 1080 |
+
*filepaths: Union[str, Path]) -> str:
|
| 1081 |
+
"""Concatenate all file paths.
|
| 1082 |
+
|
| 1083 |
+
Join one or more filepath components intelligently. The return value
|
| 1084 |
+
is the concatenation of filepath and any members of *filepaths.
|
| 1085 |
+
|
| 1086 |
+
Args:
|
| 1087 |
+
filepath (str or Path): Path to be concatenated.
|
| 1088 |
+
|
| 1089 |
+
Returns:
|
| 1090 |
+
str: The result of concatenation.
|
| 1091 |
+
"""
|
| 1092 |
+
return self.client.join_path(filepath, *filepaths)
|
| 1093 |
+
|
| 1094 |
+
@contextmanager
|
| 1095 |
+
def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]:
|
| 1096 |
+
"""Download data from ``filepath`` and write the data to local path.
|
| 1097 |
+
|
| 1098 |
+
``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
|
| 1099 |
+
can be called with ``with`` statement, and when exists from the
|
| 1100 |
+
``with`` statement, the temporary path will be released.
|
| 1101 |
+
|
| 1102 |
+
Note:
|
| 1103 |
+
If the ``filepath`` is a local path, just return itself.
|
| 1104 |
+
|
| 1105 |
+
.. warning::
|
| 1106 |
+
``get_local_path`` is an experimental interface that may change in
|
| 1107 |
+
the future.
|
| 1108 |
+
|
| 1109 |
+
Args:
|
| 1110 |
+
filepath (str or Path): Path to be read data.
|
| 1111 |
+
|
| 1112 |
+
Examples:
|
| 1113 |
+
>>> file_client = FileClient(prefix='s3')
|
| 1114 |
+
>>> with file_client.get_local_path('s3://bucket/abc.jpg') as path:
|
| 1115 |
+
... # do something here
|
| 1116 |
+
|
| 1117 |
+
Yields:
|
| 1118 |
+
Iterable[str]: Only yield one path.
|
| 1119 |
+
"""
|
| 1120 |
+
with self.client.get_local_path(str(filepath)) as local_path:
|
| 1121 |
+
yield local_path
|
| 1122 |
+
|
| 1123 |
+
def list_dir_or_file(self,
|
| 1124 |
+
dir_path: Union[str, Path],
|
| 1125 |
+
list_dir: bool = True,
|
| 1126 |
+
list_file: bool = True,
|
| 1127 |
+
suffix: Optional[Union[str, Tuple[str]]] = None,
|
| 1128 |
+
recursive: bool = False) -> Iterator[str]:
|
| 1129 |
+
"""Scan a directory to find the interested directories or files in
|
| 1130 |
+
arbitrary order.
|
| 1131 |
+
|
| 1132 |
+
Note:
|
| 1133 |
+
:meth:`list_dir_or_file` returns the path relative to ``dir_path``.
|
| 1134 |
+
|
| 1135 |
+
Args:
|
| 1136 |
+
dir_path (str | Path): Path of the directory.
|
| 1137 |
+
list_dir (bool): List the directories. Default: True.
|
| 1138 |
+
list_file (bool): List the path of files. Default: True.
|
| 1139 |
+
suffix (str or tuple[str], optional): File suffix
|
| 1140 |
+
that we are interested in. Default: None.
|
| 1141 |
+
recursive (bool): If set to True, recursively scan the
|
| 1142 |
+
directory. Default: False.
|
| 1143 |
+
|
| 1144 |
+
Yields:
|
| 1145 |
+
Iterable[str]: A relative path to ``dir_path``.
|
| 1146 |
+
"""
|
| 1147 |
+
yield from self.client.list_dir_or_file(dir_path, list_dir, list_file,
|
| 1148 |
+
suffix, recursive)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/fileio/io.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from io import BytesIO, StringIO
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
from ..utils import is_list_of, is_str
|
| 6 |
+
from .file_client import FileClient
|
| 7 |
+
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
|
| 8 |
+
|
| 9 |
+
file_handlers = {
|
| 10 |
+
'json': JsonHandler(),
|
| 11 |
+
'yaml': YamlHandler(),
|
| 12 |
+
'yml': YamlHandler(),
|
| 13 |
+
'pickle': PickleHandler(),
|
| 14 |
+
'pkl': PickleHandler()
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def load(file, file_format=None, file_client_args=None, **kwargs):
|
| 19 |
+
"""Load data from json/yaml/pickle files.
|
| 20 |
+
|
| 21 |
+
This method provides a unified api for loading data from serialized files.
|
| 22 |
+
|
| 23 |
+
Note:
|
| 24 |
+
In v1.3.16 and later, ``load`` supports loading data from serialized
|
| 25 |
+
files those can be storaged in different backends.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
file (str or :obj:`Path` or file-like object): Filename or a file-like
|
| 29 |
+
object.
|
| 30 |
+
file_format (str, optional): If not specified, the file format will be
|
| 31 |
+
inferred from the file extension, otherwise use the specified one.
|
| 32 |
+
Currently supported formats include "json", "yaml/yml" and
|
| 33 |
+
"pickle/pkl".
|
| 34 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 35 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 36 |
+
Default: None.
|
| 37 |
+
|
| 38 |
+
Examples:
|
| 39 |
+
>>> load('/path/of/your/file') # file is storaged in disk
|
| 40 |
+
>>> load('https://path/of/your/file') # file is storaged in Internet
|
| 41 |
+
>>> load('s3://path/of/your/file') # file is storaged in petrel
|
| 42 |
+
|
| 43 |
+
Returns:
|
| 44 |
+
The content from the file.
|
| 45 |
+
"""
|
| 46 |
+
if isinstance(file, Path):
|
| 47 |
+
file = str(file)
|
| 48 |
+
if file_format is None and is_str(file):
|
| 49 |
+
file_format = file.split('.')[-1]
|
| 50 |
+
if file_format not in file_handlers:
|
| 51 |
+
raise TypeError(f'Unsupported format: {file_format}')
|
| 52 |
+
|
| 53 |
+
handler = file_handlers[file_format]
|
| 54 |
+
if is_str(file):
|
| 55 |
+
file_client = FileClient.infer_client(file_client_args, file)
|
| 56 |
+
if handler.str_like:
|
| 57 |
+
with StringIO(file_client.get_text(file)) as f:
|
| 58 |
+
obj = handler.load_from_fileobj(f, **kwargs)
|
| 59 |
+
else:
|
| 60 |
+
with BytesIO(file_client.get(file)) as f:
|
| 61 |
+
obj = handler.load_from_fileobj(f, **kwargs)
|
| 62 |
+
elif hasattr(file, 'read'):
|
| 63 |
+
obj = handler.load_from_fileobj(file, **kwargs)
|
| 64 |
+
else:
|
| 65 |
+
raise TypeError('"file" must be a filepath str or a file-object')
|
| 66 |
+
return obj
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs):
|
| 70 |
+
"""Dump data to json/yaml/pickle strings or files.
|
| 71 |
+
|
| 72 |
+
This method provides a unified api for dumping data as strings or to files,
|
| 73 |
+
and also supports custom arguments for each file format.
|
| 74 |
+
|
| 75 |
+
Note:
|
| 76 |
+
In v1.3.16 and later, ``dump`` supports dumping data as strings or to
|
| 77 |
+
files which is saved to different backends.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
obj (any): The python object to be dumped.
|
| 81 |
+
file (str or :obj:`Path` or file-like object, optional): If not
|
| 82 |
+
specified, then the object is dumped to a str, otherwise to a file
|
| 83 |
+
specified by the filename or file-like object.
|
| 84 |
+
file_format (str, optional): Same as :func:`load`.
|
| 85 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 86 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 87 |
+
Default: None.
|
| 88 |
+
|
| 89 |
+
Examples:
|
| 90 |
+
>>> dump('hello world', '/path/of/your/file') # disk
|
| 91 |
+
>>> dump('hello world', 's3://path/of/your/file') # ceph or petrel
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
bool: True for success, False otherwise.
|
| 95 |
+
"""
|
| 96 |
+
if isinstance(file, Path):
|
| 97 |
+
file = str(file)
|
| 98 |
+
if file_format is None:
|
| 99 |
+
if is_str(file):
|
| 100 |
+
file_format = file.split('.')[-1]
|
| 101 |
+
elif file is None:
|
| 102 |
+
raise ValueError(
|
| 103 |
+
'file_format must be specified since file is None')
|
| 104 |
+
if file_format not in file_handlers:
|
| 105 |
+
raise TypeError(f'Unsupported format: {file_format}')
|
| 106 |
+
|
| 107 |
+
handler = file_handlers[file_format]
|
| 108 |
+
if file is None:
|
| 109 |
+
return handler.dump_to_str(obj, **kwargs)
|
| 110 |
+
elif is_str(file):
|
| 111 |
+
file_client = FileClient.infer_client(file_client_args, file)
|
| 112 |
+
if handler.str_like:
|
| 113 |
+
with StringIO() as f:
|
| 114 |
+
handler.dump_to_fileobj(obj, f, **kwargs)
|
| 115 |
+
file_client.put_text(f.getvalue(), file)
|
| 116 |
+
else:
|
| 117 |
+
with BytesIO() as f:
|
| 118 |
+
handler.dump_to_fileobj(obj, f, **kwargs)
|
| 119 |
+
file_client.put(f.getvalue(), file)
|
| 120 |
+
elif hasattr(file, 'write'):
|
| 121 |
+
handler.dump_to_fileobj(obj, file, **kwargs)
|
| 122 |
+
else:
|
| 123 |
+
raise TypeError('"file" must be a filename str or a file-object')
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def _register_handler(handler, file_formats):
|
| 127 |
+
"""Register a handler for some file extensions.
|
| 128 |
+
|
| 129 |
+
Args:
|
| 130 |
+
handler (:obj:`BaseFileHandler`): Handler to be registered.
|
| 131 |
+
file_formats (str or list[str]): File formats to be handled by this
|
| 132 |
+
handler.
|
| 133 |
+
"""
|
| 134 |
+
if not isinstance(handler, BaseFileHandler):
|
| 135 |
+
raise TypeError(
|
| 136 |
+
f'handler must be a child of BaseFileHandler, not {type(handler)}')
|
| 137 |
+
if isinstance(file_formats, str):
|
| 138 |
+
file_formats = [file_formats]
|
| 139 |
+
if not is_list_of(file_formats, str):
|
| 140 |
+
raise TypeError('file_formats must be a str or a list of str')
|
| 141 |
+
for ext in file_formats:
|
| 142 |
+
file_handlers[ext] = handler
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def register_handler(file_formats, **kwargs):
|
| 146 |
+
|
| 147 |
+
def wrap(cls):
|
| 148 |
+
_register_handler(cls(**kwargs), file_formats)
|
| 149 |
+
return cls
|
| 150 |
+
|
| 151 |
+
return wrap
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/fileio/parse.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
|
| 3 |
+
from io import StringIO
|
| 4 |
+
|
| 5 |
+
from .file_client import FileClient
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def list_from_file(filename,
|
| 9 |
+
prefix='',
|
| 10 |
+
offset=0,
|
| 11 |
+
max_num=0,
|
| 12 |
+
encoding='utf-8',
|
| 13 |
+
file_client_args=None):
|
| 14 |
+
"""Load a text file and parse the content as a list of strings.
|
| 15 |
+
|
| 16 |
+
Note:
|
| 17 |
+
In v1.3.16 and later, ``list_from_file`` supports loading a text file
|
| 18 |
+
which can be storaged in different backends and parsing the content as
|
| 19 |
+
a list for strings.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
filename (str): Filename.
|
| 23 |
+
prefix (str): The prefix to be inserted to the beginning of each item.
|
| 24 |
+
offset (int): The offset of lines.
|
| 25 |
+
max_num (int): The maximum number of lines to be read,
|
| 26 |
+
zeros and negatives mean no limitation.
|
| 27 |
+
encoding (str): Encoding used to open the file. Default utf-8.
|
| 28 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 29 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 30 |
+
Default: None.
|
| 31 |
+
|
| 32 |
+
Examples:
|
| 33 |
+
>>> list_from_file('/path/of/your/file') # disk
|
| 34 |
+
['hello', 'world']
|
| 35 |
+
>>> list_from_file('s3://path/of/your/file') # ceph or petrel
|
| 36 |
+
['hello', 'world']
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
list[str]: A list of strings.
|
| 40 |
+
"""
|
| 41 |
+
cnt = 0
|
| 42 |
+
item_list = []
|
| 43 |
+
file_client = FileClient.infer_client(file_client_args, filename)
|
| 44 |
+
with StringIO(file_client.get_text(filename, encoding)) as f:
|
| 45 |
+
for _ in range(offset):
|
| 46 |
+
f.readline()
|
| 47 |
+
for line in f:
|
| 48 |
+
if 0 < max_num <= cnt:
|
| 49 |
+
break
|
| 50 |
+
item_list.append(prefix + line.rstrip('\n\r'))
|
| 51 |
+
cnt += 1
|
| 52 |
+
return item_list
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def dict_from_file(filename,
|
| 56 |
+
key_type=str,
|
| 57 |
+
encoding='utf-8',
|
| 58 |
+
file_client_args=None):
|
| 59 |
+
"""Load a text file and parse the content as a dict.
|
| 60 |
+
|
| 61 |
+
Each line of the text file will be two or more columns split by
|
| 62 |
+
whitespaces or tabs. The first column will be parsed as dict keys, and
|
| 63 |
+
the following columns will be parsed as dict values.
|
| 64 |
+
|
| 65 |
+
Note:
|
| 66 |
+
In v1.3.16 and later, ``dict_from_file`` supports loading a text file
|
| 67 |
+
which can be storaged in different backends and parsing the content as
|
| 68 |
+
a dict.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
filename(str): Filename.
|
| 72 |
+
key_type(type): Type of the dict keys. str is user by default and
|
| 73 |
+
type conversion will be performed if specified.
|
| 74 |
+
encoding (str): Encoding used to open the file. Default utf-8.
|
| 75 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 76 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 77 |
+
Default: None.
|
| 78 |
+
|
| 79 |
+
Examples:
|
| 80 |
+
>>> dict_from_file('/path/of/your/file') # disk
|
| 81 |
+
{'key1': 'value1', 'key2': 'value2'}
|
| 82 |
+
>>> dict_from_file('s3://path/of/your/file') # ceph or petrel
|
| 83 |
+
{'key1': 'value1', 'key2': 'value2'}
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
dict: The parsed contents.
|
| 87 |
+
"""
|
| 88 |
+
mapping = {}
|
| 89 |
+
file_client = FileClient.infer_client(file_client_args, filename)
|
| 90 |
+
with StringIO(file_client.get_text(filename, encoding)) as f:
|
| 91 |
+
for line in f:
|
| 92 |
+
items = line.rstrip('\n').split()
|
| 93 |
+
assert len(items) >= 2
|
| 94 |
+
key = key_type(items[0])
|
| 95 |
+
val = items[1:] if len(items) > 2 else items[1]
|
| 96 |
+
mapping[key] = val
|
| 97 |
+
return mapping
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .colorspace import (bgr2gray, bgr2hls, bgr2hsv, bgr2rgb, bgr2ycbcr,
|
| 3 |
+
gray2bgr, gray2rgb, hls2bgr, hsv2bgr, imconvert,
|
| 4 |
+
rgb2bgr, rgb2gray, rgb2ycbcr, ycbcr2bgr, ycbcr2rgb)
|
| 5 |
+
from .geometric import (cutout, imcrop, imflip, imflip_, impad,
|
| 6 |
+
impad_to_multiple, imrescale, imresize, imresize_like,
|
| 7 |
+
imresize_to_multiple, imrotate, imshear, imtranslate,
|
| 8 |
+
rescale_size)
|
| 9 |
+
from .io import imfrombytes, imread, imwrite, supported_backends, use_backend
|
| 10 |
+
from .misc import tensor2imgs
|
| 11 |
+
from .photometric import (adjust_brightness, adjust_color, adjust_contrast,
|
| 12 |
+
adjust_lighting, adjust_sharpness, auto_contrast,
|
| 13 |
+
clahe, imdenormalize, imequalize, iminvert,
|
| 14 |
+
imnormalize, imnormalize_, lut_transform, posterize,
|
| 15 |
+
solarize)
|
| 16 |
+
|
| 17 |
+
__all__ = [
|
| 18 |
+
'bgr2gray', 'bgr2hls', 'bgr2hsv', 'bgr2rgb', 'gray2bgr', 'gray2rgb',
|
| 19 |
+
'hls2bgr', 'hsv2bgr', 'imconvert', 'rgb2bgr', 'rgb2gray', 'imrescale',
|
| 20 |
+
'imresize', 'imresize_like', 'imresize_to_multiple', 'rescale_size',
|
| 21 |
+
'imcrop', 'imflip', 'imflip_', 'impad', 'impad_to_multiple', 'imrotate',
|
| 22 |
+
'imfrombytes', 'imread', 'imwrite', 'supported_backends', 'use_backend',
|
| 23 |
+
'imdenormalize', 'imnormalize', 'imnormalize_', 'iminvert', 'posterize',
|
| 24 |
+
'solarize', 'rgb2ycbcr', 'bgr2ycbcr', 'ycbcr2rgb', 'ycbcr2bgr',
|
| 25 |
+
'tensor2imgs', 'imshear', 'imtranslate', 'adjust_color', 'imequalize',
|
| 26 |
+
'adjust_brightness', 'adjust_contrast', 'lut_transform', 'clahe',
|
| 27 |
+
'adjust_sharpness', 'auto_contrast', 'cutout', 'adjust_lighting'
|
| 28 |
+
]
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/colorspace.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def imconvert(img, src, dst):
|
| 7 |
+
"""Convert an image from the src colorspace to dst colorspace.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
img (ndarray): The input image.
|
| 11 |
+
src (str): The source colorspace, e.g., 'rgb', 'hsv'.
|
| 12 |
+
dst (str): The destination colorspace, e.g., 'rgb', 'hsv'.
|
| 13 |
+
|
| 14 |
+
Returns:
|
| 15 |
+
ndarray: The converted image.
|
| 16 |
+
"""
|
| 17 |
+
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
|
| 18 |
+
out_img = cv2.cvtColor(img, code)
|
| 19 |
+
return out_img
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def bgr2gray(img, keepdim=False):
|
| 23 |
+
"""Convert a BGR image to grayscale image.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
img (ndarray): The input image.
|
| 27 |
+
keepdim (bool): If False (by default), then return the grayscale image
|
| 28 |
+
with 2 dims, otherwise 3 dims.
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
ndarray: The converted grayscale image.
|
| 32 |
+
"""
|
| 33 |
+
out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 34 |
+
if keepdim:
|
| 35 |
+
out_img = out_img[..., None]
|
| 36 |
+
return out_img
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def rgb2gray(img, keepdim=False):
|
| 40 |
+
"""Convert a RGB image to grayscale image.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
img (ndarray): The input image.
|
| 44 |
+
keepdim (bool): If False (by default), then return the grayscale image
|
| 45 |
+
with 2 dims, otherwise 3 dims.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
ndarray: The converted grayscale image.
|
| 49 |
+
"""
|
| 50 |
+
out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
| 51 |
+
if keepdim:
|
| 52 |
+
out_img = out_img[..., None]
|
| 53 |
+
return out_img
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def gray2bgr(img):
|
| 57 |
+
"""Convert a grayscale image to BGR image.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
img (ndarray): The input image.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
ndarray: The converted BGR image.
|
| 64 |
+
"""
|
| 65 |
+
img = img[..., None] if img.ndim == 2 else img
|
| 66 |
+
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
| 67 |
+
return out_img
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def gray2rgb(img):
|
| 71 |
+
"""Convert a grayscale image to RGB image.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
img (ndarray): The input image.
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
ndarray: The converted RGB image.
|
| 78 |
+
"""
|
| 79 |
+
img = img[..., None] if img.ndim == 2 else img
|
| 80 |
+
out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
|
| 81 |
+
return out_img
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _convert_input_type_range(img):
|
| 85 |
+
"""Convert the type and range of the input image.
|
| 86 |
+
|
| 87 |
+
It converts the input image to np.float32 type and range of [0, 1].
|
| 88 |
+
It is mainly used for pre-processing the input image in colorspace
|
| 89 |
+
conversion functions such as rgb2ycbcr and ycbcr2rgb.
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
img (ndarray): The input image. It accepts:
|
| 93 |
+
1. np.uint8 type with range [0, 255];
|
| 94 |
+
2. np.float32 type with range [0, 1].
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
(ndarray): The converted image with type of np.float32 and range of
|
| 98 |
+
[0, 1].
|
| 99 |
+
"""
|
| 100 |
+
img_type = img.dtype
|
| 101 |
+
img = img.astype(np.float32)
|
| 102 |
+
if img_type == np.float32:
|
| 103 |
+
pass
|
| 104 |
+
elif img_type == np.uint8:
|
| 105 |
+
img /= 255.
|
| 106 |
+
else:
|
| 107 |
+
raise TypeError('The img type should be np.float32 or np.uint8, '
|
| 108 |
+
f'but got {img_type}')
|
| 109 |
+
return img
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _convert_output_type_range(img, dst_type):
|
| 113 |
+
"""Convert the type and range of the image according to dst_type.
|
| 114 |
+
|
| 115 |
+
It converts the image to desired type and range. If `dst_type` is np.uint8,
|
| 116 |
+
images will be converted to np.uint8 type with range [0, 255]. If
|
| 117 |
+
`dst_type` is np.float32, it converts the image to np.float32 type with
|
| 118 |
+
range [0, 1].
|
| 119 |
+
It is mainly used for post-processing images in colorspace conversion
|
| 120 |
+
functions such as rgb2ycbcr and ycbcr2rgb.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
img (ndarray): The image to be converted with np.float32 type and
|
| 124 |
+
range [0, 255].
|
| 125 |
+
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
|
| 126 |
+
converts the image to np.uint8 type with range [0, 255]. If
|
| 127 |
+
dst_type is np.float32, it converts the image to np.float32 type
|
| 128 |
+
with range [0, 1].
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
(ndarray): The converted image with desired type and range.
|
| 132 |
+
"""
|
| 133 |
+
if dst_type not in (np.uint8, np.float32):
|
| 134 |
+
raise TypeError('The dst_type should be np.float32 or np.uint8, '
|
| 135 |
+
f'but got {dst_type}')
|
| 136 |
+
if dst_type == np.uint8:
|
| 137 |
+
img = img.round()
|
| 138 |
+
else:
|
| 139 |
+
img /= 255.
|
| 140 |
+
return img.astype(dst_type)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def rgb2ycbcr(img, y_only=False):
|
| 144 |
+
"""Convert a RGB image to YCbCr image.
|
| 145 |
+
|
| 146 |
+
This function produces the same results as Matlab's `rgb2ycbcr` function.
|
| 147 |
+
It implements the ITU-R BT.601 conversion for standard-definition
|
| 148 |
+
television. See more details in
|
| 149 |
+
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
|
| 150 |
+
|
| 151 |
+
It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.
|
| 152 |
+
In OpenCV, it implements a JPEG conversion. See more details in
|
| 153 |
+
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
|
| 154 |
+
|
| 155 |
+
Args:
|
| 156 |
+
img (ndarray): The input image. It accepts:
|
| 157 |
+
1. np.uint8 type with range [0, 255];
|
| 158 |
+
2. np.float32 type with range [0, 1].
|
| 159 |
+
y_only (bool): Whether to only return Y channel. Default: False.
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
ndarray: The converted YCbCr image. The output image has the same type
|
| 163 |
+
and range as input image.
|
| 164 |
+
"""
|
| 165 |
+
img_type = img.dtype
|
| 166 |
+
img = _convert_input_type_range(img)
|
| 167 |
+
if y_only:
|
| 168 |
+
out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0
|
| 169 |
+
else:
|
| 170 |
+
out_img = np.matmul(
|
| 171 |
+
img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
|
| 172 |
+
[24.966, 112.0, -18.214]]) + [16, 128, 128]
|
| 173 |
+
out_img = _convert_output_type_range(out_img, img_type)
|
| 174 |
+
return out_img
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def bgr2ycbcr(img, y_only=False):
|
| 178 |
+
"""Convert a BGR image to YCbCr image.
|
| 179 |
+
|
| 180 |
+
The bgr version of rgb2ycbcr.
|
| 181 |
+
It implements the ITU-R BT.601 conversion for standard-definition
|
| 182 |
+
television. See more details in
|
| 183 |
+
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
|
| 184 |
+
|
| 185 |
+
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
|
| 186 |
+
In OpenCV, it implements a JPEG conversion. See more details in
|
| 187 |
+
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
img (ndarray): The input image. It accepts:
|
| 191 |
+
1. np.uint8 type with range [0, 255];
|
| 192 |
+
2. np.float32 type with range [0, 1].
|
| 193 |
+
y_only (bool): Whether to only return Y channel. Default: False.
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
ndarray: The converted YCbCr image. The output image has the same type
|
| 197 |
+
and range as input image.
|
| 198 |
+
"""
|
| 199 |
+
img_type = img.dtype
|
| 200 |
+
img = _convert_input_type_range(img)
|
| 201 |
+
if y_only:
|
| 202 |
+
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
|
| 203 |
+
else:
|
| 204 |
+
out_img = np.matmul(
|
| 205 |
+
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
|
| 206 |
+
[65.481, -37.797, 112.0]]) + [16, 128, 128]
|
| 207 |
+
out_img = _convert_output_type_range(out_img, img_type)
|
| 208 |
+
return out_img
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
def ycbcr2rgb(img):
|
| 212 |
+
"""Convert a YCbCr image to RGB image.
|
| 213 |
+
|
| 214 |
+
This function produces the same results as Matlab's ycbcr2rgb function.
|
| 215 |
+
It implements the ITU-R BT.601 conversion for standard-definition
|
| 216 |
+
television. See more details in
|
| 217 |
+
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
|
| 218 |
+
|
| 219 |
+
It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.
|
| 220 |
+
In OpenCV, it implements a JPEG conversion. See more details in
|
| 221 |
+
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
|
| 222 |
+
|
| 223 |
+
Args:
|
| 224 |
+
img (ndarray): The input image. It accepts:
|
| 225 |
+
1. np.uint8 type with range [0, 255];
|
| 226 |
+
2. np.float32 type with range [0, 1].
|
| 227 |
+
|
| 228 |
+
Returns:
|
| 229 |
+
ndarray: The converted RGB image. The output image has the same type
|
| 230 |
+
and range as input image.
|
| 231 |
+
"""
|
| 232 |
+
img_type = img.dtype
|
| 233 |
+
img = _convert_input_type_range(img) * 255
|
| 234 |
+
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
|
| 235 |
+
[0, -0.00153632, 0.00791071],
|
| 236 |
+
[0.00625893, -0.00318811, 0]]) * 255.0 + [
|
| 237 |
+
-222.921, 135.576, -276.836
|
| 238 |
+
]
|
| 239 |
+
out_img = _convert_output_type_range(out_img, img_type)
|
| 240 |
+
return out_img
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def ycbcr2bgr(img):
|
| 244 |
+
"""Convert a YCbCr image to BGR image.
|
| 245 |
+
|
| 246 |
+
The bgr version of ycbcr2rgb.
|
| 247 |
+
It implements the ITU-R BT.601 conversion for standard-definition
|
| 248 |
+
television. See more details in
|
| 249 |
+
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
|
| 250 |
+
|
| 251 |
+
It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
|
| 252 |
+
In OpenCV, it implements a JPEG conversion. See more details in
|
| 253 |
+
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
|
| 254 |
+
|
| 255 |
+
Args:
|
| 256 |
+
img (ndarray): The input image. It accepts:
|
| 257 |
+
1. np.uint8 type with range [0, 255];
|
| 258 |
+
2. np.float32 type with range [0, 1].
|
| 259 |
+
|
| 260 |
+
Returns:
|
| 261 |
+
ndarray: The converted BGR image. The output image has the same type
|
| 262 |
+
and range as input image.
|
| 263 |
+
"""
|
| 264 |
+
img_type = img.dtype
|
| 265 |
+
img = _convert_input_type_range(img) * 255
|
| 266 |
+
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
|
| 267 |
+
[0.00791071, -0.00153632, 0],
|
| 268 |
+
[0, -0.00318811, 0.00625893]]) * 255.0 + [
|
| 269 |
+
-276.836, 135.576, -222.921
|
| 270 |
+
]
|
| 271 |
+
out_img = _convert_output_type_range(out_img, img_type)
|
| 272 |
+
return out_img
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def convert_color_factory(src, dst):
|
| 276 |
+
|
| 277 |
+
code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
|
| 278 |
+
|
| 279 |
+
def convert_color(img):
|
| 280 |
+
out_img = cv2.cvtColor(img, code)
|
| 281 |
+
return out_img
|
| 282 |
+
|
| 283 |
+
convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()}
|
| 284 |
+
image.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
img (ndarray or str): The input image.
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
ndarray: The converted {dst.upper()} image.
|
| 291 |
+
"""
|
| 292 |
+
|
| 293 |
+
return convert_color
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
bgr2rgb = convert_color_factory('bgr', 'rgb')
|
| 297 |
+
|
| 298 |
+
rgb2bgr = convert_color_factory('rgb', 'bgr')
|
| 299 |
+
|
| 300 |
+
bgr2hsv = convert_color_factory('bgr', 'hsv')
|
| 301 |
+
|
| 302 |
+
hsv2bgr = convert_color_factory('hsv', 'bgr')
|
| 303 |
+
|
| 304 |
+
bgr2hls = convert_color_factory('bgr', 'hls')
|
| 305 |
+
|
| 306 |
+
hls2bgr = convert_color_factory('hls', 'bgr')
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/geometric.py
ADDED
|
@@ -0,0 +1,728 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numbers
|
| 3 |
+
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from ..utils import to_2tuple
|
| 8 |
+
from .io import imread_backend
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
from PIL import Image
|
| 12 |
+
except ImportError:
|
| 13 |
+
Image = None
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _scale_size(size, scale):
|
| 17 |
+
"""Rescale a size by a ratio.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
size (tuple[int]): (w, h).
|
| 21 |
+
scale (float | tuple(float)): Scaling factor.
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
tuple[int]: scaled size.
|
| 25 |
+
"""
|
| 26 |
+
if isinstance(scale, (float, int)):
|
| 27 |
+
scale = (scale, scale)
|
| 28 |
+
w, h = size
|
| 29 |
+
return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
cv2_interp_codes = {
|
| 33 |
+
'nearest': cv2.INTER_NEAREST,
|
| 34 |
+
'bilinear': cv2.INTER_LINEAR,
|
| 35 |
+
'bicubic': cv2.INTER_CUBIC,
|
| 36 |
+
'area': cv2.INTER_AREA,
|
| 37 |
+
'lanczos': cv2.INTER_LANCZOS4
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
if Image is not None:
|
| 41 |
+
pillow_interp_codes = {
|
| 42 |
+
'nearest': Image.NEAREST,
|
| 43 |
+
'bilinear': Image.BILINEAR,
|
| 44 |
+
'bicubic': Image.BICUBIC,
|
| 45 |
+
'box': Image.BOX,
|
| 46 |
+
'lanczos': Image.LANCZOS,
|
| 47 |
+
'hamming': Image.HAMMING
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def imresize(img,
|
| 52 |
+
size,
|
| 53 |
+
return_scale=False,
|
| 54 |
+
interpolation='bilinear',
|
| 55 |
+
out=None,
|
| 56 |
+
backend=None):
|
| 57 |
+
"""Resize image to a given size.
|
| 58 |
+
|
| 59 |
+
Args:
|
| 60 |
+
img (ndarray): The input image.
|
| 61 |
+
size (tuple[int]): Target size (w, h).
|
| 62 |
+
return_scale (bool): Whether to return `w_scale` and `h_scale`.
|
| 63 |
+
interpolation (str): Interpolation method, accepted values are
|
| 64 |
+
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
|
| 65 |
+
backend, "nearest", "bilinear" for 'pillow' backend.
|
| 66 |
+
out (ndarray): The output destination.
|
| 67 |
+
backend (str | None): The image resize backend type. Options are `cv2`,
|
| 68 |
+
`pillow`, `None`. If backend is None, the global imread_backend
|
| 69 |
+
specified by ``mmcv.use_backend()`` will be used. Default: None.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
|
| 73 |
+
`resized_img`.
|
| 74 |
+
"""
|
| 75 |
+
h, w = img.shape[:2]
|
| 76 |
+
if backend is None:
|
| 77 |
+
backend = imread_backend
|
| 78 |
+
if backend not in ['cv2', 'pillow']:
|
| 79 |
+
raise ValueError(f'backend: {backend} is not supported for resize.'
|
| 80 |
+
f"Supported backends are 'cv2', 'pillow'")
|
| 81 |
+
|
| 82 |
+
if backend == 'pillow':
|
| 83 |
+
assert img.dtype == np.uint8, 'Pillow backend only support uint8 type'
|
| 84 |
+
pil_image = Image.fromarray(img)
|
| 85 |
+
pil_image = pil_image.resize(size, pillow_interp_codes[interpolation])
|
| 86 |
+
resized_img = np.array(pil_image)
|
| 87 |
+
else:
|
| 88 |
+
resized_img = cv2.resize(
|
| 89 |
+
img, size, dst=out, interpolation=cv2_interp_codes[interpolation])
|
| 90 |
+
if not return_scale:
|
| 91 |
+
return resized_img
|
| 92 |
+
else:
|
| 93 |
+
w_scale = size[0] / w
|
| 94 |
+
h_scale = size[1] / h
|
| 95 |
+
return resized_img, w_scale, h_scale
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def imresize_to_multiple(img,
|
| 99 |
+
divisor,
|
| 100 |
+
size=None,
|
| 101 |
+
scale_factor=None,
|
| 102 |
+
keep_ratio=False,
|
| 103 |
+
return_scale=False,
|
| 104 |
+
interpolation='bilinear',
|
| 105 |
+
out=None,
|
| 106 |
+
backend=None):
|
| 107 |
+
"""Resize image according to a given size or scale factor and then rounds
|
| 108 |
+
up the the resized or rescaled image size to the nearest value that can be
|
| 109 |
+
divided by the divisor.
|
| 110 |
+
|
| 111 |
+
Args:
|
| 112 |
+
img (ndarray): The input image.
|
| 113 |
+
divisor (int | tuple): Resized image size will be a multiple of
|
| 114 |
+
divisor. If divisor is a tuple, divisor should be
|
| 115 |
+
(w_divisor, h_divisor).
|
| 116 |
+
size (None | int | tuple[int]): Target size (w, h). Default: None.
|
| 117 |
+
scale_factor (None | float | tuple[float]): Multiplier for spatial
|
| 118 |
+
size. Should match input size if it is a tuple and the 2D style is
|
| 119 |
+
(w_scale_factor, h_scale_factor). Default: None.
|
| 120 |
+
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
|
| 121 |
+
image. Default: False.
|
| 122 |
+
return_scale (bool): Whether to return `w_scale` and `h_scale`.
|
| 123 |
+
interpolation (str): Interpolation method, accepted values are
|
| 124 |
+
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
|
| 125 |
+
backend, "nearest", "bilinear" for 'pillow' backend.
|
| 126 |
+
out (ndarray): The output destination.
|
| 127 |
+
backend (str | None): The image resize backend type. Options are `cv2`,
|
| 128 |
+
`pillow`, `None`. If backend is None, the global imread_backend
|
| 129 |
+
specified by ``mmcv.use_backend()`` will be used. Default: None.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
|
| 133 |
+
`resized_img`.
|
| 134 |
+
"""
|
| 135 |
+
h, w = img.shape[:2]
|
| 136 |
+
if size is not None and scale_factor is not None:
|
| 137 |
+
raise ValueError('only one of size or scale_factor should be defined')
|
| 138 |
+
elif size is None and scale_factor is None:
|
| 139 |
+
raise ValueError('one of size or scale_factor should be defined')
|
| 140 |
+
elif size is not None:
|
| 141 |
+
size = to_2tuple(size)
|
| 142 |
+
if keep_ratio:
|
| 143 |
+
size = rescale_size((w, h), size, return_scale=False)
|
| 144 |
+
else:
|
| 145 |
+
size = _scale_size((w, h), scale_factor)
|
| 146 |
+
|
| 147 |
+
divisor = to_2tuple(divisor)
|
| 148 |
+
size = tuple([int(np.ceil(s / d)) * d for s, d in zip(size, divisor)])
|
| 149 |
+
resized_img, w_scale, h_scale = imresize(
|
| 150 |
+
img,
|
| 151 |
+
size,
|
| 152 |
+
return_scale=True,
|
| 153 |
+
interpolation=interpolation,
|
| 154 |
+
out=out,
|
| 155 |
+
backend=backend)
|
| 156 |
+
if return_scale:
|
| 157 |
+
return resized_img, w_scale, h_scale
|
| 158 |
+
else:
|
| 159 |
+
return resized_img
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def imresize_like(img,
|
| 163 |
+
dst_img,
|
| 164 |
+
return_scale=False,
|
| 165 |
+
interpolation='bilinear',
|
| 166 |
+
backend=None):
|
| 167 |
+
"""Resize image to the same size of a given image.
|
| 168 |
+
|
| 169 |
+
Args:
|
| 170 |
+
img (ndarray): The input image.
|
| 171 |
+
dst_img (ndarray): The target image.
|
| 172 |
+
return_scale (bool): Whether to return `w_scale` and `h_scale`.
|
| 173 |
+
interpolation (str): Same as :func:`resize`.
|
| 174 |
+
backend (str | None): Same as :func:`resize`.
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
|
| 178 |
+
`resized_img`.
|
| 179 |
+
"""
|
| 180 |
+
h, w = dst_img.shape[:2]
|
| 181 |
+
return imresize(img, (w, h), return_scale, interpolation, backend=backend)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def rescale_size(old_size, scale, return_scale=False):
|
| 185 |
+
"""Calculate the new size to be rescaled to.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
old_size (tuple[int]): The old size (w, h) of image.
|
| 189 |
+
scale (float | tuple[int]): The scaling factor or maximum size.
|
| 190 |
+
If it is a float number, then the image will be rescaled by this
|
| 191 |
+
factor, else if it is a tuple of 2 integers, then the image will
|
| 192 |
+
be rescaled as large as possible within the scale.
|
| 193 |
+
return_scale (bool): Whether to return the scaling factor besides the
|
| 194 |
+
rescaled image size.
|
| 195 |
+
|
| 196 |
+
Returns:
|
| 197 |
+
tuple[int]: The new rescaled image size.
|
| 198 |
+
"""
|
| 199 |
+
w, h = old_size
|
| 200 |
+
if isinstance(scale, (float, int)):
|
| 201 |
+
if scale <= 0:
|
| 202 |
+
raise ValueError(f'Invalid scale {scale}, must be positive.')
|
| 203 |
+
scale_factor = scale
|
| 204 |
+
elif isinstance(scale, tuple):
|
| 205 |
+
max_long_edge = max(scale)
|
| 206 |
+
max_short_edge = min(scale)
|
| 207 |
+
scale_factor = min(max_long_edge / max(h, w),
|
| 208 |
+
max_short_edge / min(h, w))
|
| 209 |
+
else:
|
| 210 |
+
raise TypeError(
|
| 211 |
+
f'Scale must be a number or tuple of int, but got {type(scale)}')
|
| 212 |
+
|
| 213 |
+
new_size = _scale_size((w, h), scale_factor)
|
| 214 |
+
|
| 215 |
+
if return_scale:
|
| 216 |
+
return new_size, scale_factor
|
| 217 |
+
else:
|
| 218 |
+
return new_size
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def imrescale(img,
|
| 222 |
+
scale,
|
| 223 |
+
return_scale=False,
|
| 224 |
+
interpolation='bilinear',
|
| 225 |
+
backend=None):
|
| 226 |
+
"""Resize image while keeping the aspect ratio.
|
| 227 |
+
|
| 228 |
+
Args:
|
| 229 |
+
img (ndarray): The input image.
|
| 230 |
+
scale (float | tuple[int]): The scaling factor or maximum size.
|
| 231 |
+
If it is a float number, then the image will be rescaled by this
|
| 232 |
+
factor, else if it is a tuple of 2 integers, then the image will
|
| 233 |
+
be rescaled as large as possible within the scale.
|
| 234 |
+
return_scale (bool): Whether to return the scaling factor besides the
|
| 235 |
+
rescaled image.
|
| 236 |
+
interpolation (str): Same as :func:`resize`.
|
| 237 |
+
backend (str | None): Same as :func:`resize`.
|
| 238 |
+
|
| 239 |
+
Returns:
|
| 240 |
+
ndarray: The rescaled image.
|
| 241 |
+
"""
|
| 242 |
+
h, w = img.shape[:2]
|
| 243 |
+
new_size, scale_factor = rescale_size((w, h), scale, return_scale=True)
|
| 244 |
+
rescaled_img = imresize(
|
| 245 |
+
img, new_size, interpolation=interpolation, backend=backend)
|
| 246 |
+
if return_scale:
|
| 247 |
+
return rescaled_img, scale_factor
|
| 248 |
+
else:
|
| 249 |
+
return rescaled_img
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
def imflip(img, direction='horizontal'):
|
| 253 |
+
"""Flip an image horizontally or vertically.
|
| 254 |
+
|
| 255 |
+
Args:
|
| 256 |
+
img (ndarray): Image to be flipped.
|
| 257 |
+
direction (str): The flip direction, either "horizontal" or
|
| 258 |
+
"vertical" or "diagonal".
|
| 259 |
+
|
| 260 |
+
Returns:
|
| 261 |
+
ndarray: The flipped image.
|
| 262 |
+
"""
|
| 263 |
+
assert direction in ['horizontal', 'vertical', 'diagonal']
|
| 264 |
+
if direction == 'horizontal':
|
| 265 |
+
return np.flip(img, axis=1)
|
| 266 |
+
elif direction == 'vertical':
|
| 267 |
+
return np.flip(img, axis=0)
|
| 268 |
+
else:
|
| 269 |
+
return np.flip(img, axis=(0, 1))
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
def imflip_(img, direction='horizontal'):
|
| 273 |
+
"""Inplace flip an image horizontally or vertically.
|
| 274 |
+
|
| 275 |
+
Args:
|
| 276 |
+
img (ndarray): Image to be flipped.
|
| 277 |
+
direction (str): The flip direction, either "horizontal" or
|
| 278 |
+
"vertical" or "diagonal".
|
| 279 |
+
|
| 280 |
+
Returns:
|
| 281 |
+
ndarray: The flipped image (inplace).
|
| 282 |
+
"""
|
| 283 |
+
assert direction in ['horizontal', 'vertical', 'diagonal']
|
| 284 |
+
if direction == 'horizontal':
|
| 285 |
+
return cv2.flip(img, 1, img)
|
| 286 |
+
elif direction == 'vertical':
|
| 287 |
+
return cv2.flip(img, 0, img)
|
| 288 |
+
else:
|
| 289 |
+
return cv2.flip(img, -1, img)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def imrotate(img,
|
| 293 |
+
angle,
|
| 294 |
+
center=None,
|
| 295 |
+
scale=1.0,
|
| 296 |
+
border_value=0,
|
| 297 |
+
interpolation='bilinear',
|
| 298 |
+
auto_bound=False):
|
| 299 |
+
"""Rotate an image.
|
| 300 |
+
|
| 301 |
+
Args:
|
| 302 |
+
img (ndarray): Image to be rotated.
|
| 303 |
+
angle (float): Rotation angle in degrees, positive values mean
|
| 304 |
+
clockwise rotation.
|
| 305 |
+
center (tuple[float], optional): Center point (w, h) of the rotation in
|
| 306 |
+
the source image. If not specified, the center of the image will be
|
| 307 |
+
used.
|
| 308 |
+
scale (float): Isotropic scale factor.
|
| 309 |
+
border_value (int): Border value.
|
| 310 |
+
interpolation (str): Same as :func:`resize`.
|
| 311 |
+
auto_bound (bool): Whether to adjust the image size to cover the whole
|
| 312 |
+
rotated image.
|
| 313 |
+
|
| 314 |
+
Returns:
|
| 315 |
+
ndarray: The rotated image.
|
| 316 |
+
"""
|
| 317 |
+
if center is not None and auto_bound:
|
| 318 |
+
raise ValueError('`auto_bound` conflicts with `center`')
|
| 319 |
+
h, w = img.shape[:2]
|
| 320 |
+
if center is None:
|
| 321 |
+
center = ((w - 1) * 0.5, (h - 1) * 0.5)
|
| 322 |
+
assert isinstance(center, tuple)
|
| 323 |
+
|
| 324 |
+
matrix = cv2.getRotationMatrix2D(center, -angle, scale)
|
| 325 |
+
if auto_bound:
|
| 326 |
+
cos = np.abs(matrix[0, 0])
|
| 327 |
+
sin = np.abs(matrix[0, 1])
|
| 328 |
+
new_w = h * sin + w * cos
|
| 329 |
+
new_h = h * cos + w * sin
|
| 330 |
+
matrix[0, 2] += (new_w - w) * 0.5
|
| 331 |
+
matrix[1, 2] += (new_h - h) * 0.5
|
| 332 |
+
w = int(np.round(new_w))
|
| 333 |
+
h = int(np.round(new_h))
|
| 334 |
+
rotated = cv2.warpAffine(
|
| 335 |
+
img,
|
| 336 |
+
matrix, (w, h),
|
| 337 |
+
flags=cv2_interp_codes[interpolation],
|
| 338 |
+
borderValue=border_value)
|
| 339 |
+
return rotated
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def bbox_clip(bboxes, img_shape):
|
| 343 |
+
"""Clip bboxes to fit the image shape.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
bboxes (ndarray): Shape (..., 4*k)
|
| 347 |
+
img_shape (tuple[int]): (height, width) of the image.
|
| 348 |
+
|
| 349 |
+
Returns:
|
| 350 |
+
ndarray: Clipped bboxes.
|
| 351 |
+
"""
|
| 352 |
+
assert bboxes.shape[-1] % 4 == 0
|
| 353 |
+
cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype)
|
| 354 |
+
cmin[0::2] = img_shape[1] - 1
|
| 355 |
+
cmin[1::2] = img_shape[0] - 1
|
| 356 |
+
clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0)
|
| 357 |
+
return clipped_bboxes
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
def bbox_scaling(bboxes, scale, clip_shape=None):
|
| 361 |
+
"""Scaling bboxes w.r.t the box center.
|
| 362 |
+
|
| 363 |
+
Args:
|
| 364 |
+
bboxes (ndarray): Shape(..., 4).
|
| 365 |
+
scale (float): Scaling factor.
|
| 366 |
+
clip_shape (tuple[int], optional): If specified, bboxes that exceed the
|
| 367 |
+
boundary will be clipped according to the given shape (h, w).
|
| 368 |
+
|
| 369 |
+
Returns:
|
| 370 |
+
ndarray: Scaled bboxes.
|
| 371 |
+
"""
|
| 372 |
+
if float(scale) == 1.0:
|
| 373 |
+
scaled_bboxes = bboxes.copy()
|
| 374 |
+
else:
|
| 375 |
+
w = bboxes[..., 2] - bboxes[..., 0] + 1
|
| 376 |
+
h = bboxes[..., 3] - bboxes[..., 1] + 1
|
| 377 |
+
dw = (w * (scale - 1)) * 0.5
|
| 378 |
+
dh = (h * (scale - 1)) * 0.5
|
| 379 |
+
scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1)
|
| 380 |
+
if clip_shape is not None:
|
| 381 |
+
return bbox_clip(scaled_bboxes, clip_shape)
|
| 382 |
+
else:
|
| 383 |
+
return scaled_bboxes
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
|
| 387 |
+
"""Crop image patches.
|
| 388 |
+
|
| 389 |
+
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
img (ndarray): Image to be cropped.
|
| 393 |
+
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
|
| 394 |
+
scale (float, optional): Scale ratio of bboxes, the default value
|
| 395 |
+
1.0 means no padding.
|
| 396 |
+
pad_fill (Number | list[Number]): Value to be filled for padding.
|
| 397 |
+
Default: None, which means no padding.
|
| 398 |
+
|
| 399 |
+
Returns:
|
| 400 |
+
list[ndarray] | ndarray: The cropped image patches.
|
| 401 |
+
"""
|
| 402 |
+
chn = 1 if img.ndim == 2 else img.shape[2]
|
| 403 |
+
if pad_fill is not None:
|
| 404 |
+
if isinstance(pad_fill, (int, float)):
|
| 405 |
+
pad_fill = [pad_fill for _ in range(chn)]
|
| 406 |
+
assert len(pad_fill) == chn
|
| 407 |
+
|
| 408 |
+
_bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
|
| 409 |
+
scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
|
| 410 |
+
clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
|
| 411 |
+
|
| 412 |
+
patches = []
|
| 413 |
+
for i in range(clipped_bbox.shape[0]):
|
| 414 |
+
x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
|
| 415 |
+
if pad_fill is None:
|
| 416 |
+
patch = img[y1:y2 + 1, x1:x2 + 1, ...]
|
| 417 |
+
else:
|
| 418 |
+
_x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
|
| 419 |
+
if chn == 1:
|
| 420 |
+
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
|
| 421 |
+
else:
|
| 422 |
+
patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
|
| 423 |
+
patch = np.array(
|
| 424 |
+
pad_fill, dtype=img.dtype) * np.ones(
|
| 425 |
+
patch_shape, dtype=img.dtype)
|
| 426 |
+
x_start = 0 if _x1 >= 0 else -_x1
|
| 427 |
+
y_start = 0 if _y1 >= 0 else -_y1
|
| 428 |
+
w = x2 - x1 + 1
|
| 429 |
+
h = y2 - y1 + 1
|
| 430 |
+
patch[y_start:y_start + h, x_start:x_start + w,
|
| 431 |
+
...] = img[y1:y1 + h, x1:x1 + w, ...]
|
| 432 |
+
patches.append(patch)
|
| 433 |
+
|
| 434 |
+
if bboxes.ndim == 1:
|
| 435 |
+
return patches[0]
|
| 436 |
+
else:
|
| 437 |
+
return patches
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def impad(img,
|
| 441 |
+
*,
|
| 442 |
+
shape=None,
|
| 443 |
+
padding=None,
|
| 444 |
+
pad_val=0,
|
| 445 |
+
padding_mode='constant'):
|
| 446 |
+
"""Pad the given image to a certain shape or pad on all sides with
|
| 447 |
+
specified padding mode and padding value.
|
| 448 |
+
|
| 449 |
+
Args:
|
| 450 |
+
img (ndarray): Image to be padded.
|
| 451 |
+
shape (tuple[int]): Expected padding shape (h, w). Default: None.
|
| 452 |
+
padding (int or tuple[int]): Padding on each border. If a single int is
|
| 453 |
+
provided this is used to pad all borders. If tuple of length 2 is
|
| 454 |
+
provided this is the padding on left/right and top/bottom
|
| 455 |
+
respectively. If a tuple of length 4 is provided this is the
|
| 456 |
+
padding for the left, top, right and bottom borders respectively.
|
| 457 |
+
Default: None. Note that `shape` and `padding` can not be both
|
| 458 |
+
set.
|
| 459 |
+
pad_val (Number | Sequence[Number]): Values to be filled in padding
|
| 460 |
+
areas when padding_mode is 'constant'. Default: 0.
|
| 461 |
+
padding_mode (str): Type of padding. Should be: constant, edge,
|
| 462 |
+
reflect or symmetric. Default: constant.
|
| 463 |
+
|
| 464 |
+
- constant: pads with a constant value, this value is specified
|
| 465 |
+
with pad_val.
|
| 466 |
+
- edge: pads with the last value at the edge of the image.
|
| 467 |
+
- reflect: pads with reflection of image without repeating the
|
| 468 |
+
last value on the edge. For example, padding [1, 2, 3, 4]
|
| 469 |
+
with 2 elements on both sides in reflect mode will result
|
| 470 |
+
in [3, 2, 1, 2, 3, 4, 3, 2].
|
| 471 |
+
- symmetric: pads with reflection of image repeating the last
|
| 472 |
+
value on the edge. For example, padding [1, 2, 3, 4] with
|
| 473 |
+
2 elements on both sides in symmetric mode will result in
|
| 474 |
+
[2, 1, 1, 2, 3, 4, 4, 3]
|
| 475 |
+
|
| 476 |
+
Returns:
|
| 477 |
+
ndarray: The padded image.
|
| 478 |
+
"""
|
| 479 |
+
|
| 480 |
+
assert (shape is not None) ^ (padding is not None)
|
| 481 |
+
if shape is not None:
|
| 482 |
+
padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0])
|
| 483 |
+
|
| 484 |
+
# check pad_val
|
| 485 |
+
if isinstance(pad_val, tuple):
|
| 486 |
+
assert len(pad_val) == img.shape[-1]
|
| 487 |
+
elif not isinstance(pad_val, numbers.Number):
|
| 488 |
+
raise TypeError('pad_val must be a int or a tuple. '
|
| 489 |
+
f'But received {type(pad_val)}')
|
| 490 |
+
|
| 491 |
+
# check padding
|
| 492 |
+
if isinstance(padding, tuple) and len(padding) in [2, 4]:
|
| 493 |
+
if len(padding) == 2:
|
| 494 |
+
padding = (padding[0], padding[1], padding[0], padding[1])
|
| 495 |
+
elif isinstance(padding, numbers.Number):
|
| 496 |
+
padding = (padding, padding, padding, padding)
|
| 497 |
+
else:
|
| 498 |
+
raise ValueError('Padding must be a int or a 2, or 4 element tuple.'
|
| 499 |
+
f'But received {padding}')
|
| 500 |
+
|
| 501 |
+
# check padding mode
|
| 502 |
+
assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
|
| 503 |
+
|
| 504 |
+
border_type = {
|
| 505 |
+
'constant': cv2.BORDER_CONSTANT,
|
| 506 |
+
'edge': cv2.BORDER_REPLICATE,
|
| 507 |
+
'reflect': cv2.BORDER_REFLECT_101,
|
| 508 |
+
'symmetric': cv2.BORDER_REFLECT
|
| 509 |
+
}
|
| 510 |
+
img = cv2.copyMakeBorder(
|
| 511 |
+
img,
|
| 512 |
+
padding[1],
|
| 513 |
+
padding[3],
|
| 514 |
+
padding[0],
|
| 515 |
+
padding[2],
|
| 516 |
+
border_type[padding_mode],
|
| 517 |
+
value=pad_val)
|
| 518 |
+
|
| 519 |
+
return img
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
def impad_to_multiple(img, divisor, pad_val=0):
|
| 523 |
+
"""Pad an image to ensure each edge to be multiple to some number.
|
| 524 |
+
|
| 525 |
+
Args:
|
| 526 |
+
img (ndarray): Image to be padded.
|
| 527 |
+
divisor (int): Padded image edges will be multiple to divisor.
|
| 528 |
+
pad_val (Number | Sequence[Number]): Same as :func:`impad`.
|
| 529 |
+
|
| 530 |
+
Returns:
|
| 531 |
+
ndarray: The padded image.
|
| 532 |
+
"""
|
| 533 |
+
pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
|
| 534 |
+
pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
|
| 535 |
+
return impad(img, shape=(pad_h, pad_w), pad_val=pad_val)
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def cutout(img, shape, pad_val=0):
|
| 539 |
+
"""Randomly cut out a rectangle from the original img.
|
| 540 |
+
|
| 541 |
+
Args:
|
| 542 |
+
img (ndarray): Image to be cutout.
|
| 543 |
+
shape (int | tuple[int]): Expected cutout shape (h, w). If given as a
|
| 544 |
+
int, the value will be used for both h and w.
|
| 545 |
+
pad_val (int | float | tuple[int | float]): Values to be filled in the
|
| 546 |
+
cut area. Defaults to 0.
|
| 547 |
+
|
| 548 |
+
Returns:
|
| 549 |
+
ndarray: The cutout image.
|
| 550 |
+
"""
|
| 551 |
+
|
| 552 |
+
channels = 1 if img.ndim == 2 else img.shape[2]
|
| 553 |
+
if isinstance(shape, int):
|
| 554 |
+
cut_h, cut_w = shape, shape
|
| 555 |
+
else:
|
| 556 |
+
assert isinstance(shape, tuple) and len(shape) == 2, \
|
| 557 |
+
f'shape must be a int or a tuple with length 2, but got type ' \
|
| 558 |
+
f'{type(shape)} instead.'
|
| 559 |
+
cut_h, cut_w = shape
|
| 560 |
+
if isinstance(pad_val, (int, float)):
|
| 561 |
+
pad_val = tuple([pad_val] * channels)
|
| 562 |
+
elif isinstance(pad_val, tuple):
|
| 563 |
+
assert len(pad_val) == channels, \
|
| 564 |
+
'Expected the num of elements in tuple equals the channels' \
|
| 565 |
+
'of input image. Found {} vs {}'.format(
|
| 566 |
+
len(pad_val), channels)
|
| 567 |
+
else:
|
| 568 |
+
raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`')
|
| 569 |
+
|
| 570 |
+
img_h, img_w = img.shape[:2]
|
| 571 |
+
y0 = np.random.uniform(img_h)
|
| 572 |
+
x0 = np.random.uniform(img_w)
|
| 573 |
+
|
| 574 |
+
y1 = int(max(0, y0 - cut_h / 2.))
|
| 575 |
+
x1 = int(max(0, x0 - cut_w / 2.))
|
| 576 |
+
y2 = min(img_h, y1 + cut_h)
|
| 577 |
+
x2 = min(img_w, x1 + cut_w)
|
| 578 |
+
|
| 579 |
+
if img.ndim == 2:
|
| 580 |
+
patch_shape = (y2 - y1, x2 - x1)
|
| 581 |
+
else:
|
| 582 |
+
patch_shape = (y2 - y1, x2 - x1, channels)
|
| 583 |
+
|
| 584 |
+
img_cutout = img.copy()
|
| 585 |
+
patch = np.array(
|
| 586 |
+
pad_val, dtype=img.dtype) * np.ones(
|
| 587 |
+
patch_shape, dtype=img.dtype)
|
| 588 |
+
img_cutout[y1:y2, x1:x2, ...] = patch
|
| 589 |
+
|
| 590 |
+
return img_cutout
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
def _get_shear_matrix(magnitude, direction='horizontal'):
|
| 594 |
+
"""Generate the shear matrix for transformation.
|
| 595 |
+
|
| 596 |
+
Args:
|
| 597 |
+
magnitude (int | float): The magnitude used for shear.
|
| 598 |
+
direction (str): The flip direction, either "horizontal"
|
| 599 |
+
or "vertical".
|
| 600 |
+
|
| 601 |
+
Returns:
|
| 602 |
+
ndarray: The shear matrix with dtype float32.
|
| 603 |
+
"""
|
| 604 |
+
if direction == 'horizontal':
|
| 605 |
+
shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0]])
|
| 606 |
+
elif direction == 'vertical':
|
| 607 |
+
shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0]])
|
| 608 |
+
return shear_matrix
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
def imshear(img,
|
| 612 |
+
magnitude,
|
| 613 |
+
direction='horizontal',
|
| 614 |
+
border_value=0,
|
| 615 |
+
interpolation='bilinear'):
|
| 616 |
+
"""Shear an image.
|
| 617 |
+
|
| 618 |
+
Args:
|
| 619 |
+
img (ndarray): Image to be sheared with format (h, w)
|
| 620 |
+
or (h, w, c).
|
| 621 |
+
magnitude (int | float): The magnitude used for shear.
|
| 622 |
+
direction (str): The flip direction, either "horizontal"
|
| 623 |
+
or "vertical".
|
| 624 |
+
border_value (int | tuple[int]): Value used in case of a
|
| 625 |
+
constant border.
|
| 626 |
+
interpolation (str): Same as :func:`resize`.
|
| 627 |
+
|
| 628 |
+
Returns:
|
| 629 |
+
ndarray: The sheared image.
|
| 630 |
+
"""
|
| 631 |
+
assert direction in ['horizontal',
|
| 632 |
+
'vertical'], f'Invalid direction: {direction}'
|
| 633 |
+
height, width = img.shape[:2]
|
| 634 |
+
if img.ndim == 2:
|
| 635 |
+
channels = 1
|
| 636 |
+
elif img.ndim == 3:
|
| 637 |
+
channels = img.shape[-1]
|
| 638 |
+
if isinstance(border_value, int):
|
| 639 |
+
border_value = tuple([border_value] * channels)
|
| 640 |
+
elif isinstance(border_value, tuple):
|
| 641 |
+
assert len(border_value) == channels, \
|
| 642 |
+
'Expected the num of elements in tuple equals the channels' \
|
| 643 |
+
'of input image. Found {} vs {}'.format(
|
| 644 |
+
len(border_value), channels)
|
| 645 |
+
else:
|
| 646 |
+
raise ValueError(
|
| 647 |
+
f'Invalid type {type(border_value)} for `border_value`')
|
| 648 |
+
shear_matrix = _get_shear_matrix(magnitude, direction)
|
| 649 |
+
sheared = cv2.warpAffine(
|
| 650 |
+
img,
|
| 651 |
+
shear_matrix,
|
| 652 |
+
(width, height),
|
| 653 |
+
# Note case when the number elements in `border_value`
|
| 654 |
+
# greater than 3 (e.g. shearing masks whose channels large
|
| 655 |
+
# than 3) will raise TypeError in `cv2.warpAffine`.
|
| 656 |
+
# Here simply slice the first 3 values in `border_value`.
|
| 657 |
+
borderValue=border_value[:3],
|
| 658 |
+
flags=cv2_interp_codes[interpolation])
|
| 659 |
+
return sheared
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def _get_translate_matrix(offset, direction='horizontal'):
|
| 663 |
+
"""Generate the translate matrix.
|
| 664 |
+
|
| 665 |
+
Args:
|
| 666 |
+
offset (int | float): The offset used for translate.
|
| 667 |
+
direction (str): The translate direction, either
|
| 668 |
+
"horizontal" or "vertical".
|
| 669 |
+
|
| 670 |
+
Returns:
|
| 671 |
+
ndarray: The translate matrix with dtype float32.
|
| 672 |
+
"""
|
| 673 |
+
if direction == 'horizontal':
|
| 674 |
+
translate_matrix = np.float32([[1, 0, offset], [0, 1, 0]])
|
| 675 |
+
elif direction == 'vertical':
|
| 676 |
+
translate_matrix = np.float32([[1, 0, 0], [0, 1, offset]])
|
| 677 |
+
return translate_matrix
|
| 678 |
+
|
| 679 |
+
|
| 680 |
+
def imtranslate(img,
|
| 681 |
+
offset,
|
| 682 |
+
direction='horizontal',
|
| 683 |
+
border_value=0,
|
| 684 |
+
interpolation='bilinear'):
|
| 685 |
+
"""Translate an image.
|
| 686 |
+
|
| 687 |
+
Args:
|
| 688 |
+
img (ndarray): Image to be translated with format
|
| 689 |
+
(h, w) or (h, w, c).
|
| 690 |
+
offset (int | float): The offset used for translate.
|
| 691 |
+
direction (str): The translate direction, either "horizontal"
|
| 692 |
+
or "vertical".
|
| 693 |
+
border_value (int | tuple[int]): Value used in case of a
|
| 694 |
+
constant border.
|
| 695 |
+
interpolation (str): Same as :func:`resize`.
|
| 696 |
+
|
| 697 |
+
Returns:
|
| 698 |
+
ndarray: The translated image.
|
| 699 |
+
"""
|
| 700 |
+
assert direction in ['horizontal',
|
| 701 |
+
'vertical'], f'Invalid direction: {direction}'
|
| 702 |
+
height, width = img.shape[:2]
|
| 703 |
+
if img.ndim == 2:
|
| 704 |
+
channels = 1
|
| 705 |
+
elif img.ndim == 3:
|
| 706 |
+
channels = img.shape[-1]
|
| 707 |
+
if isinstance(border_value, int):
|
| 708 |
+
border_value = tuple([border_value] * channels)
|
| 709 |
+
elif isinstance(border_value, tuple):
|
| 710 |
+
assert len(border_value) == channels, \
|
| 711 |
+
'Expected the num of elements in tuple equals the channels' \
|
| 712 |
+
'of input image. Found {} vs {}'.format(
|
| 713 |
+
len(border_value), channels)
|
| 714 |
+
else:
|
| 715 |
+
raise ValueError(
|
| 716 |
+
f'Invalid type {type(border_value)} for `border_value`.')
|
| 717 |
+
translate_matrix = _get_translate_matrix(offset, direction)
|
| 718 |
+
translated = cv2.warpAffine(
|
| 719 |
+
img,
|
| 720 |
+
translate_matrix,
|
| 721 |
+
(width, height),
|
| 722 |
+
# Note case when the number elements in `border_value`
|
| 723 |
+
# greater than 3 (e.g. translating masks whose channels
|
| 724 |
+
# large than 3) will raise TypeError in `cv2.warpAffine`.
|
| 725 |
+
# Here simply slice the first 3 values in `border_value`.
|
| 726 |
+
borderValue=border_value[:3],
|
| 727 |
+
flags=cv2_interp_codes[interpolation])
|
| 728 |
+
return translated
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/io.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import io
|
| 3 |
+
import os.path as osp
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
import cv2
|
| 7 |
+
import numpy as np
|
| 8 |
+
from cv2 import (IMREAD_COLOR, IMREAD_GRAYSCALE, IMREAD_IGNORE_ORIENTATION,
|
| 9 |
+
IMREAD_UNCHANGED)
|
| 10 |
+
|
| 11 |
+
from annotator.uniformer.mmcv.utils import check_file_exist, is_str, mkdir_or_exist
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
from turbojpeg import TJCS_RGB, TJPF_BGR, TJPF_GRAY, TurboJPEG
|
| 15 |
+
except ImportError:
|
| 16 |
+
TJCS_RGB = TJPF_GRAY = TJPF_BGR = TurboJPEG = None
|
| 17 |
+
|
| 18 |
+
try:
|
| 19 |
+
from PIL import Image, ImageOps
|
| 20 |
+
except ImportError:
|
| 21 |
+
Image = None
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
import tifffile
|
| 25 |
+
except ImportError:
|
| 26 |
+
tifffile = None
|
| 27 |
+
|
| 28 |
+
jpeg = None
|
| 29 |
+
supported_backends = ['cv2', 'turbojpeg', 'pillow', 'tifffile']
|
| 30 |
+
|
| 31 |
+
imread_flags = {
|
| 32 |
+
'color': IMREAD_COLOR,
|
| 33 |
+
'grayscale': IMREAD_GRAYSCALE,
|
| 34 |
+
'unchanged': IMREAD_UNCHANGED,
|
| 35 |
+
'color_ignore_orientation': IMREAD_IGNORE_ORIENTATION | IMREAD_COLOR,
|
| 36 |
+
'grayscale_ignore_orientation':
|
| 37 |
+
IMREAD_IGNORE_ORIENTATION | IMREAD_GRAYSCALE
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
imread_backend = 'cv2'
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def use_backend(backend):
|
| 44 |
+
"""Select a backend for image decoding.
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
backend (str): The image decoding backend type. Options are `cv2`,
|
| 48 |
+
`pillow`, `turbojpeg` (see https://github.com/lilohuang/PyTurboJPEG)
|
| 49 |
+
and `tifffile`. `turbojpeg` is faster but it only supports `.jpeg`
|
| 50 |
+
file format.
|
| 51 |
+
"""
|
| 52 |
+
assert backend in supported_backends
|
| 53 |
+
global imread_backend
|
| 54 |
+
imread_backend = backend
|
| 55 |
+
if imread_backend == 'turbojpeg':
|
| 56 |
+
if TurboJPEG is None:
|
| 57 |
+
raise ImportError('`PyTurboJPEG` is not installed')
|
| 58 |
+
global jpeg
|
| 59 |
+
if jpeg is None:
|
| 60 |
+
jpeg = TurboJPEG()
|
| 61 |
+
elif imread_backend == 'pillow':
|
| 62 |
+
if Image is None:
|
| 63 |
+
raise ImportError('`Pillow` is not installed')
|
| 64 |
+
elif imread_backend == 'tifffile':
|
| 65 |
+
if tifffile is None:
|
| 66 |
+
raise ImportError('`tifffile` is not installed')
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def _jpegflag(flag='color', channel_order='bgr'):
|
| 70 |
+
channel_order = channel_order.lower()
|
| 71 |
+
if channel_order not in ['rgb', 'bgr']:
|
| 72 |
+
raise ValueError('channel order must be either "rgb" or "bgr"')
|
| 73 |
+
|
| 74 |
+
if flag == 'color':
|
| 75 |
+
if channel_order == 'bgr':
|
| 76 |
+
return TJPF_BGR
|
| 77 |
+
elif channel_order == 'rgb':
|
| 78 |
+
return TJCS_RGB
|
| 79 |
+
elif flag == 'grayscale':
|
| 80 |
+
return TJPF_GRAY
|
| 81 |
+
else:
|
| 82 |
+
raise ValueError('flag must be "color" or "grayscale"')
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _pillow2array(img, flag='color', channel_order='bgr'):
|
| 86 |
+
"""Convert a pillow image to numpy array.
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
img (:obj:`PIL.Image.Image`): The image loaded using PIL
|
| 90 |
+
flag (str): Flags specifying the color type of a loaded image,
|
| 91 |
+
candidates are 'color', 'grayscale' and 'unchanged'.
|
| 92 |
+
Default to 'color'.
|
| 93 |
+
channel_order (str): The channel order of the output image array,
|
| 94 |
+
candidates are 'bgr' and 'rgb'. Default to 'bgr'.
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
np.ndarray: The converted numpy array
|
| 98 |
+
"""
|
| 99 |
+
channel_order = channel_order.lower()
|
| 100 |
+
if channel_order not in ['rgb', 'bgr']:
|
| 101 |
+
raise ValueError('channel order must be either "rgb" or "bgr"')
|
| 102 |
+
|
| 103 |
+
if flag == 'unchanged':
|
| 104 |
+
array = np.array(img)
|
| 105 |
+
if array.ndim >= 3 and array.shape[2] >= 3: # color image
|
| 106 |
+
array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR
|
| 107 |
+
else:
|
| 108 |
+
# Handle exif orientation tag
|
| 109 |
+
if flag in ['color', 'grayscale']:
|
| 110 |
+
img = ImageOps.exif_transpose(img)
|
| 111 |
+
# If the image mode is not 'RGB', convert it to 'RGB' first.
|
| 112 |
+
if img.mode != 'RGB':
|
| 113 |
+
if img.mode != 'LA':
|
| 114 |
+
# Most formats except 'LA' can be directly converted to RGB
|
| 115 |
+
img = img.convert('RGB')
|
| 116 |
+
else:
|
| 117 |
+
# When the mode is 'LA', the default conversion will fill in
|
| 118 |
+
# the canvas with black, which sometimes shadows black objects
|
| 119 |
+
# in the foreground.
|
| 120 |
+
#
|
| 121 |
+
# Therefore, a random color (124, 117, 104) is used for canvas
|
| 122 |
+
img_rgba = img.convert('RGBA')
|
| 123 |
+
img = Image.new('RGB', img_rgba.size, (124, 117, 104))
|
| 124 |
+
img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha
|
| 125 |
+
if flag in ['color', 'color_ignore_orientation']:
|
| 126 |
+
array = np.array(img)
|
| 127 |
+
if channel_order != 'rgb':
|
| 128 |
+
array = array[:, :, ::-1] # RGB to BGR
|
| 129 |
+
elif flag in ['grayscale', 'grayscale_ignore_orientation']:
|
| 130 |
+
img = img.convert('L')
|
| 131 |
+
array = np.array(img)
|
| 132 |
+
else:
|
| 133 |
+
raise ValueError(
|
| 134 |
+
'flag must be "color", "grayscale", "unchanged", '
|
| 135 |
+
f'"color_ignore_orientation" or "grayscale_ignore_orientation"'
|
| 136 |
+
f' but got {flag}')
|
| 137 |
+
return array
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def imread(img_or_path, flag='color', channel_order='bgr', backend=None):
|
| 141 |
+
"""Read an image.
|
| 142 |
+
|
| 143 |
+
Args:
|
| 144 |
+
img_or_path (ndarray or str or Path): Either a numpy array or str or
|
| 145 |
+
pathlib.Path. If it is a numpy array (loaded image), then
|
| 146 |
+
it will be returned as is.
|
| 147 |
+
flag (str): Flags specifying the color type of a loaded image,
|
| 148 |
+
candidates are `color`, `grayscale`, `unchanged`,
|
| 149 |
+
`color_ignore_orientation` and `grayscale_ignore_orientation`.
|
| 150 |
+
By default, `cv2` and `pillow` backend would rotate the image
|
| 151 |
+
according to its EXIF info unless called with `unchanged` or
|
| 152 |
+
`*_ignore_orientation` flags. `turbojpeg` and `tifffile` backend
|
| 153 |
+
always ignore image's EXIF info regardless of the flag.
|
| 154 |
+
The `turbojpeg` backend only supports `color` and `grayscale`.
|
| 155 |
+
channel_order (str): Order of channel, candidates are `bgr` and `rgb`.
|
| 156 |
+
backend (str | None): The image decoding backend type. Options are
|
| 157 |
+
`cv2`, `pillow`, `turbojpeg`, `tifffile`, `None`.
|
| 158 |
+
If backend is None, the global imread_backend specified by
|
| 159 |
+
``mmcv.use_backend()`` will be used. Default: None.
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
ndarray: Loaded image array.
|
| 163 |
+
"""
|
| 164 |
+
|
| 165 |
+
if backend is None:
|
| 166 |
+
backend = imread_backend
|
| 167 |
+
if backend not in supported_backends:
|
| 168 |
+
raise ValueError(f'backend: {backend} is not supported. Supported '
|
| 169 |
+
"backends are 'cv2', 'turbojpeg', 'pillow'")
|
| 170 |
+
if isinstance(img_or_path, Path):
|
| 171 |
+
img_or_path = str(img_or_path)
|
| 172 |
+
|
| 173 |
+
if isinstance(img_or_path, np.ndarray):
|
| 174 |
+
return img_or_path
|
| 175 |
+
elif is_str(img_or_path):
|
| 176 |
+
check_file_exist(img_or_path,
|
| 177 |
+
f'img file does not exist: {img_or_path}')
|
| 178 |
+
if backend == 'turbojpeg':
|
| 179 |
+
with open(img_or_path, 'rb') as in_file:
|
| 180 |
+
img = jpeg.decode(in_file.read(),
|
| 181 |
+
_jpegflag(flag, channel_order))
|
| 182 |
+
if img.shape[-1] == 1:
|
| 183 |
+
img = img[:, :, 0]
|
| 184 |
+
return img
|
| 185 |
+
elif backend == 'pillow':
|
| 186 |
+
img = Image.open(img_or_path)
|
| 187 |
+
img = _pillow2array(img, flag, channel_order)
|
| 188 |
+
return img
|
| 189 |
+
elif backend == 'tifffile':
|
| 190 |
+
img = tifffile.imread(img_or_path)
|
| 191 |
+
return img
|
| 192 |
+
else:
|
| 193 |
+
flag = imread_flags[flag] if is_str(flag) else flag
|
| 194 |
+
img = cv2.imread(img_or_path, flag)
|
| 195 |
+
if flag == IMREAD_COLOR and channel_order == 'rgb':
|
| 196 |
+
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
|
| 197 |
+
return img
|
| 198 |
+
else:
|
| 199 |
+
raise TypeError('"img" must be a numpy array or a str or '
|
| 200 |
+
'a pathlib.Path object')
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def imfrombytes(content, flag='color', channel_order='bgr', backend=None):
|
| 204 |
+
"""Read an image from bytes.
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
content (bytes): Image bytes got from files or other streams.
|
| 208 |
+
flag (str): Same as :func:`imread`.
|
| 209 |
+
backend (str | None): The image decoding backend type. Options are
|
| 210 |
+
`cv2`, `pillow`, `turbojpeg`, `None`. If backend is None, the
|
| 211 |
+
global imread_backend specified by ``mmcv.use_backend()`` will be
|
| 212 |
+
used. Default: None.
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
ndarray: Loaded image array.
|
| 216 |
+
"""
|
| 217 |
+
|
| 218 |
+
if backend is None:
|
| 219 |
+
backend = imread_backend
|
| 220 |
+
if backend not in supported_backends:
|
| 221 |
+
raise ValueError(f'backend: {backend} is not supported. Supported '
|
| 222 |
+
"backends are 'cv2', 'turbojpeg', 'pillow'")
|
| 223 |
+
if backend == 'turbojpeg':
|
| 224 |
+
img = jpeg.decode(content, _jpegflag(flag, channel_order))
|
| 225 |
+
if img.shape[-1] == 1:
|
| 226 |
+
img = img[:, :, 0]
|
| 227 |
+
return img
|
| 228 |
+
elif backend == 'pillow':
|
| 229 |
+
buff = io.BytesIO(content)
|
| 230 |
+
img = Image.open(buff)
|
| 231 |
+
img = _pillow2array(img, flag, channel_order)
|
| 232 |
+
return img
|
| 233 |
+
else:
|
| 234 |
+
img_np = np.frombuffer(content, np.uint8)
|
| 235 |
+
flag = imread_flags[flag] if is_str(flag) else flag
|
| 236 |
+
img = cv2.imdecode(img_np, flag)
|
| 237 |
+
if flag == IMREAD_COLOR and channel_order == 'rgb':
|
| 238 |
+
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
|
| 239 |
+
return img
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def imwrite(img, file_path, params=None, auto_mkdir=True):
|
| 243 |
+
"""Write image to file.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
img (ndarray): Image array to be written.
|
| 247 |
+
file_path (str): Image file path.
|
| 248 |
+
params (None or list): Same as opencv :func:`imwrite` interface.
|
| 249 |
+
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
|
| 250 |
+
whether to create it automatically.
|
| 251 |
+
|
| 252 |
+
Returns:
|
| 253 |
+
bool: Successful or not.
|
| 254 |
+
"""
|
| 255 |
+
if auto_mkdir:
|
| 256 |
+
dir_name = osp.abspath(osp.dirname(file_path))
|
| 257 |
+
mkdir_or_exist(dir_name)
|
| 258 |
+
return cv2.imwrite(file_path, img, params)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/misc.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
import annotator.uniformer.mmcv as mmcv
|
| 5 |
+
|
| 6 |
+
try:
|
| 7 |
+
import torch
|
| 8 |
+
except ImportError:
|
| 9 |
+
torch = None
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
|
| 13 |
+
"""Convert tensor to 3-channel images.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
tensor (torch.Tensor): Tensor that contains multiple images, shape (
|
| 17 |
+
N, C, H, W).
|
| 18 |
+
mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0).
|
| 19 |
+
std (tuple[float], optional): Standard deviation of images.
|
| 20 |
+
Defaults to (1, 1, 1).
|
| 21 |
+
to_rgb (bool, optional): Whether the tensor was converted to RGB
|
| 22 |
+
format in the first place. If so, convert it back to BGR.
|
| 23 |
+
Defaults to True.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
list[np.ndarray]: A list that contains multiple images.
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
if torch is None:
|
| 30 |
+
raise RuntimeError('pytorch is not installed')
|
| 31 |
+
assert torch.is_tensor(tensor) and tensor.ndim == 4
|
| 32 |
+
assert len(mean) == 3
|
| 33 |
+
assert len(std) == 3
|
| 34 |
+
|
| 35 |
+
num_imgs = tensor.size(0)
|
| 36 |
+
mean = np.array(mean, dtype=np.float32)
|
| 37 |
+
std = np.array(std, dtype=np.float32)
|
| 38 |
+
imgs = []
|
| 39 |
+
for img_id in range(num_imgs):
|
| 40 |
+
img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
|
| 41 |
+
img = mmcv.imdenormalize(
|
| 42 |
+
img, mean, std, to_bgr=to_rgb).astype(np.uint8)
|
| 43 |
+
imgs.append(np.ascontiguousarray(img))
|
| 44 |
+
return imgs
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/image/photometric.py
ADDED
|
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from ..utils import is_tuple_of
|
| 6 |
+
from .colorspace import bgr2gray, gray2bgr
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def imnormalize(img, mean, std, to_rgb=True):
|
| 10 |
+
"""Normalize an image with mean and std.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
img (ndarray): Image to be normalized.
|
| 14 |
+
mean (ndarray): The mean to be used for normalize.
|
| 15 |
+
std (ndarray): The std to be used for normalize.
|
| 16 |
+
to_rgb (bool): Whether to convert to rgb.
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
ndarray: The normalized image.
|
| 20 |
+
"""
|
| 21 |
+
img = img.copy().astype(np.float32)
|
| 22 |
+
return imnormalize_(img, mean, std, to_rgb)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def imnormalize_(img, mean, std, to_rgb=True):
|
| 26 |
+
"""Inplace normalize an image with mean and std.
|
| 27 |
+
|
| 28 |
+
Args:
|
| 29 |
+
img (ndarray): Image to be normalized.
|
| 30 |
+
mean (ndarray): The mean to be used for normalize.
|
| 31 |
+
std (ndarray): The std to be used for normalize.
|
| 32 |
+
to_rgb (bool): Whether to convert to rgb.
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
ndarray: The normalized image.
|
| 36 |
+
"""
|
| 37 |
+
# cv2 inplace normalization does not accept uint8
|
| 38 |
+
assert img.dtype != np.uint8
|
| 39 |
+
mean = np.float64(mean.reshape(1, -1))
|
| 40 |
+
stdinv = 1 / np.float64(std.reshape(1, -1))
|
| 41 |
+
if to_rgb:
|
| 42 |
+
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
|
| 43 |
+
cv2.subtract(img, mean, img) # inplace
|
| 44 |
+
cv2.multiply(img, stdinv, img) # inplace
|
| 45 |
+
return img
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def imdenormalize(img, mean, std, to_bgr=True):
|
| 49 |
+
assert img.dtype != np.uint8
|
| 50 |
+
mean = mean.reshape(1, -1).astype(np.float64)
|
| 51 |
+
std = std.reshape(1, -1).astype(np.float64)
|
| 52 |
+
img = cv2.multiply(img, std) # make a copy
|
| 53 |
+
cv2.add(img, mean, img) # inplace
|
| 54 |
+
if to_bgr:
|
| 55 |
+
cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace
|
| 56 |
+
return img
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def iminvert(img):
|
| 60 |
+
"""Invert (negate) an image.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
img (ndarray): Image to be inverted.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
ndarray: The inverted image.
|
| 67 |
+
"""
|
| 68 |
+
return np.full_like(img, 255) - img
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def solarize(img, thr=128):
|
| 72 |
+
"""Solarize an image (invert all pixel values above a threshold)
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
img (ndarray): Image to be solarized.
|
| 76 |
+
thr (int): Threshold for solarizing (0 - 255).
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
ndarray: The solarized image.
|
| 80 |
+
"""
|
| 81 |
+
img = np.where(img < thr, img, 255 - img)
|
| 82 |
+
return img
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def posterize(img, bits):
|
| 86 |
+
"""Posterize an image (reduce the number of bits for each color channel)
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
img (ndarray): Image to be posterized.
|
| 90 |
+
bits (int): Number of bits (1 to 8) to use for posterizing.
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
ndarray: The posterized image.
|
| 94 |
+
"""
|
| 95 |
+
shift = 8 - bits
|
| 96 |
+
img = np.left_shift(np.right_shift(img, shift), shift)
|
| 97 |
+
return img
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def adjust_color(img, alpha=1, beta=None, gamma=0):
|
| 101 |
+
r"""It blends the source image and its gray image:
|
| 102 |
+
|
| 103 |
+
.. math::
|
| 104 |
+
output = img * alpha + gray\_img * beta + gamma
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
img (ndarray): The input source image.
|
| 108 |
+
alpha (int | float): Weight for the source image. Default 1.
|
| 109 |
+
beta (int | float): Weight for the converted gray image.
|
| 110 |
+
If None, it's assigned the value (1 - `alpha`).
|
| 111 |
+
gamma (int | float): Scalar added to each sum.
|
| 112 |
+
Same as :func:`cv2.addWeighted`. Default 0.
|
| 113 |
+
|
| 114 |
+
Returns:
|
| 115 |
+
ndarray: Colored image which has the same size and dtype as input.
|
| 116 |
+
"""
|
| 117 |
+
gray_img = bgr2gray(img)
|
| 118 |
+
gray_img = np.tile(gray_img[..., None], [1, 1, 3])
|
| 119 |
+
if beta is None:
|
| 120 |
+
beta = 1 - alpha
|
| 121 |
+
colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma)
|
| 122 |
+
if not colored_img.dtype == np.uint8:
|
| 123 |
+
# Note when the dtype of `img` is not the default `np.uint8`
|
| 124 |
+
# (e.g. np.float32), the value in `colored_img` got from cv2
|
| 125 |
+
# is not guaranteed to be in range [0, 255], so here clip
|
| 126 |
+
# is needed.
|
| 127 |
+
colored_img = np.clip(colored_img, 0, 255)
|
| 128 |
+
return colored_img
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def imequalize(img):
|
| 132 |
+
"""Equalize the image histogram.
|
| 133 |
+
|
| 134 |
+
This function applies a non-linear mapping to the input image,
|
| 135 |
+
in order to create a uniform distribution of grayscale values
|
| 136 |
+
in the output image.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
img (ndarray): Image to be equalized.
|
| 140 |
+
|
| 141 |
+
Returns:
|
| 142 |
+
ndarray: The equalized image.
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
def _scale_channel(im, c):
|
| 146 |
+
"""Scale the data in the corresponding channel."""
|
| 147 |
+
im = im[:, :, c]
|
| 148 |
+
# Compute the histogram of the image channel.
|
| 149 |
+
histo = np.histogram(im, 256, (0, 255))[0]
|
| 150 |
+
# For computing the step, filter out the nonzeros.
|
| 151 |
+
nonzero_histo = histo[histo > 0]
|
| 152 |
+
step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255
|
| 153 |
+
if not step:
|
| 154 |
+
lut = np.array(range(256))
|
| 155 |
+
else:
|
| 156 |
+
# Compute the cumulative sum, shifted by step // 2
|
| 157 |
+
# and then normalized by step.
|
| 158 |
+
lut = (np.cumsum(histo) + (step // 2)) // step
|
| 159 |
+
# Shift lut, prepending with 0.
|
| 160 |
+
lut = np.concatenate([[0], lut[:-1]], 0)
|
| 161 |
+
# handle potential integer overflow
|
| 162 |
+
lut[lut > 255] = 255
|
| 163 |
+
# If step is zero, return the original image.
|
| 164 |
+
# Otherwise, index from lut.
|
| 165 |
+
return np.where(np.equal(step, 0), im, lut[im])
|
| 166 |
+
|
| 167 |
+
# Scales each channel independently and then stacks
|
| 168 |
+
# the result.
|
| 169 |
+
s1 = _scale_channel(img, 0)
|
| 170 |
+
s2 = _scale_channel(img, 1)
|
| 171 |
+
s3 = _scale_channel(img, 2)
|
| 172 |
+
equalized_img = np.stack([s1, s2, s3], axis=-1)
|
| 173 |
+
return equalized_img.astype(img.dtype)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def adjust_brightness(img, factor=1.):
|
| 177 |
+
"""Adjust image brightness.
|
| 178 |
+
|
| 179 |
+
This function controls the brightness of an image. An
|
| 180 |
+
enhancement factor of 0.0 gives a black image.
|
| 181 |
+
A factor of 1.0 gives the original image. This function
|
| 182 |
+
blends the source image and the degenerated black image:
|
| 183 |
+
|
| 184 |
+
.. math::
|
| 185 |
+
output = img * factor + degenerated * (1 - factor)
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
img (ndarray): Image to be brightened.
|
| 189 |
+
factor (float): A value controls the enhancement.
|
| 190 |
+
Factor 1.0 returns the original image, lower
|
| 191 |
+
factors mean less color (brightness, contrast,
|
| 192 |
+
etc), and higher values more. Default 1.
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
ndarray: The brightened image.
|
| 196 |
+
"""
|
| 197 |
+
degenerated = np.zeros_like(img)
|
| 198 |
+
# Note manually convert the dtype to np.float32, to
|
| 199 |
+
# achieve as close results as PIL.ImageEnhance.Brightness.
|
| 200 |
+
# Set beta=1-factor, and gamma=0
|
| 201 |
+
brightened_img = cv2.addWeighted(
|
| 202 |
+
img.astype(np.float32), factor, degenerated.astype(np.float32),
|
| 203 |
+
1 - factor, 0)
|
| 204 |
+
brightened_img = np.clip(brightened_img, 0, 255)
|
| 205 |
+
return brightened_img.astype(img.dtype)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def adjust_contrast(img, factor=1.):
|
| 209 |
+
"""Adjust image contrast.
|
| 210 |
+
|
| 211 |
+
This function controls the contrast of an image. An
|
| 212 |
+
enhancement factor of 0.0 gives a solid grey
|
| 213 |
+
image. A factor of 1.0 gives the original image. It
|
| 214 |
+
blends the source image and the degenerated mean image:
|
| 215 |
+
|
| 216 |
+
.. math::
|
| 217 |
+
output = img * factor + degenerated * (1 - factor)
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
img (ndarray): Image to be contrasted. BGR order.
|
| 221 |
+
factor (float): Same as :func:`mmcv.adjust_brightness`.
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
ndarray: The contrasted image.
|
| 225 |
+
"""
|
| 226 |
+
gray_img = bgr2gray(img)
|
| 227 |
+
hist = np.histogram(gray_img, 256, (0, 255))[0]
|
| 228 |
+
mean = round(np.sum(gray_img) / np.sum(hist))
|
| 229 |
+
degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype)
|
| 230 |
+
degenerated = gray2bgr(degenerated)
|
| 231 |
+
contrasted_img = cv2.addWeighted(
|
| 232 |
+
img.astype(np.float32), factor, degenerated.astype(np.float32),
|
| 233 |
+
1 - factor, 0)
|
| 234 |
+
contrasted_img = np.clip(contrasted_img, 0, 255)
|
| 235 |
+
return contrasted_img.astype(img.dtype)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def auto_contrast(img, cutoff=0):
|
| 239 |
+
"""Auto adjust image contrast.
|
| 240 |
+
|
| 241 |
+
This function maximize (normalize) image contrast by first removing cutoff
|
| 242 |
+
percent of the lightest and darkest pixels from the histogram and remapping
|
| 243 |
+
the image so that the darkest pixel becomes black (0), and the lightest
|
| 244 |
+
becomes white (255).
|
| 245 |
+
|
| 246 |
+
Args:
|
| 247 |
+
img (ndarray): Image to be contrasted. BGR order.
|
| 248 |
+
cutoff (int | float | tuple): The cutoff percent of the lightest and
|
| 249 |
+
darkest pixels to be removed. If given as tuple, it shall be
|
| 250 |
+
(low, high). Otherwise, the single value will be used for both.
|
| 251 |
+
Defaults to 0.
|
| 252 |
+
|
| 253 |
+
Returns:
|
| 254 |
+
ndarray: The contrasted image.
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
def _auto_contrast_channel(im, c, cutoff):
|
| 258 |
+
im = im[:, :, c]
|
| 259 |
+
# Compute the histogram of the image channel.
|
| 260 |
+
histo = np.histogram(im, 256, (0, 255))[0]
|
| 261 |
+
# Remove cut-off percent pixels from histo
|
| 262 |
+
histo_sum = np.cumsum(histo)
|
| 263 |
+
cut_low = histo_sum[-1] * cutoff[0] // 100
|
| 264 |
+
cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100
|
| 265 |
+
histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low
|
| 266 |
+
histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0)
|
| 267 |
+
|
| 268 |
+
# Compute mapping
|
| 269 |
+
low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1]
|
| 270 |
+
# If all the values have been cut off, return the origin img
|
| 271 |
+
if low >= high:
|
| 272 |
+
return im
|
| 273 |
+
scale = 255.0 / (high - low)
|
| 274 |
+
offset = -low * scale
|
| 275 |
+
lut = np.array(range(256))
|
| 276 |
+
lut = lut * scale + offset
|
| 277 |
+
lut = np.clip(lut, 0, 255)
|
| 278 |
+
return lut[im]
|
| 279 |
+
|
| 280 |
+
if isinstance(cutoff, (int, float)):
|
| 281 |
+
cutoff = (cutoff, cutoff)
|
| 282 |
+
else:
|
| 283 |
+
assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \
|
| 284 |
+
f'float or tuple, but got {type(cutoff)} instead.'
|
| 285 |
+
# Auto adjusts contrast for each channel independently and then stacks
|
| 286 |
+
# the result.
|
| 287 |
+
s1 = _auto_contrast_channel(img, 0, cutoff)
|
| 288 |
+
s2 = _auto_contrast_channel(img, 1, cutoff)
|
| 289 |
+
s3 = _auto_contrast_channel(img, 2, cutoff)
|
| 290 |
+
contrasted_img = np.stack([s1, s2, s3], axis=-1)
|
| 291 |
+
return contrasted_img.astype(img.dtype)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def adjust_sharpness(img, factor=1., kernel=None):
|
| 295 |
+
"""Adjust image sharpness.
|
| 296 |
+
|
| 297 |
+
This function controls the sharpness of an image. An
|
| 298 |
+
enhancement factor of 0.0 gives a blurred image. A
|
| 299 |
+
factor of 1.0 gives the original image. And a factor
|
| 300 |
+
of 2.0 gives a sharpened image. It blends the source
|
| 301 |
+
image and the degenerated mean image:
|
| 302 |
+
|
| 303 |
+
.. math::
|
| 304 |
+
output = img * factor + degenerated * (1 - factor)
|
| 305 |
+
|
| 306 |
+
Args:
|
| 307 |
+
img (ndarray): Image to be sharpened. BGR order.
|
| 308 |
+
factor (float): Same as :func:`mmcv.adjust_brightness`.
|
| 309 |
+
kernel (np.ndarray, optional): Filter kernel to be applied on the img
|
| 310 |
+
to obtain the degenerated img. Defaults to None.
|
| 311 |
+
|
| 312 |
+
Note:
|
| 313 |
+
No value sanity check is enforced on the kernel set by users. So with
|
| 314 |
+
an inappropriate kernel, the ``adjust_sharpness`` may fail to perform
|
| 315 |
+
the function its name indicates but end up performing whatever
|
| 316 |
+
transform determined by the kernel.
|
| 317 |
+
|
| 318 |
+
Returns:
|
| 319 |
+
ndarray: The sharpened image.
|
| 320 |
+
"""
|
| 321 |
+
|
| 322 |
+
if kernel is None:
|
| 323 |
+
# adopted from PIL.ImageFilter.SMOOTH
|
| 324 |
+
kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13
|
| 325 |
+
assert isinstance(kernel, np.ndarray), \
|
| 326 |
+
f'kernel must be of type np.ndarray, but got {type(kernel)} instead.'
|
| 327 |
+
assert kernel.ndim == 2, \
|
| 328 |
+
f'kernel must have a dimension of 2, but got {kernel.ndim} instead.'
|
| 329 |
+
|
| 330 |
+
degenerated = cv2.filter2D(img, -1, kernel)
|
| 331 |
+
sharpened_img = cv2.addWeighted(
|
| 332 |
+
img.astype(np.float32), factor, degenerated.astype(np.float32),
|
| 333 |
+
1 - factor, 0)
|
| 334 |
+
sharpened_img = np.clip(sharpened_img, 0, 255)
|
| 335 |
+
return sharpened_img.astype(img.dtype)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True):
|
| 339 |
+
"""AlexNet-style PCA jitter.
|
| 340 |
+
|
| 341 |
+
This data augmentation is proposed in `ImageNet Classification with Deep
|
| 342 |
+
Convolutional Neural Networks
|
| 343 |
+
<https://dl.acm.org/doi/pdf/10.1145/3065386>`_.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
img (ndarray): Image to be adjusted lighting. BGR order.
|
| 347 |
+
eigval (ndarray): the eigenvalue of the convariance matrix of pixel
|
| 348 |
+
values, respectively.
|
| 349 |
+
eigvec (ndarray): the eigenvector of the convariance matrix of pixel
|
| 350 |
+
values, respectively.
|
| 351 |
+
alphastd (float): The standard deviation for distribution of alpha.
|
| 352 |
+
Defaults to 0.1
|
| 353 |
+
to_rgb (bool): Whether to convert img to rgb.
|
| 354 |
+
|
| 355 |
+
Returns:
|
| 356 |
+
ndarray: The adjusted image.
|
| 357 |
+
"""
|
| 358 |
+
assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \
|
| 359 |
+
f'eigval and eigvec should both be of type np.ndarray, got ' \
|
| 360 |
+
f'{type(eigval)} and {type(eigvec)} instead.'
|
| 361 |
+
|
| 362 |
+
assert eigval.ndim == 1 and eigvec.ndim == 2
|
| 363 |
+
assert eigvec.shape == (3, eigval.shape[0])
|
| 364 |
+
n_eigval = eigval.shape[0]
|
| 365 |
+
assert isinstance(alphastd, float), 'alphastd should be of type float, ' \
|
| 366 |
+
f'got {type(alphastd)} instead.'
|
| 367 |
+
|
| 368 |
+
img = img.copy().astype(np.float32)
|
| 369 |
+
if to_rgb:
|
| 370 |
+
cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
|
| 371 |
+
|
| 372 |
+
alpha = np.random.normal(0, alphastd, n_eigval)
|
| 373 |
+
alter = eigvec \
|
| 374 |
+
* np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \
|
| 375 |
+
* np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval))
|
| 376 |
+
alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape)
|
| 377 |
+
img_adjusted = img + alter
|
| 378 |
+
return img_adjusted
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def lut_transform(img, lut_table):
|
| 382 |
+
"""Transform array by look-up table.
|
| 383 |
+
|
| 384 |
+
The function lut_transform fills the output array with values from the
|
| 385 |
+
look-up table. Indices of the entries are taken from the input array.
|
| 386 |
+
|
| 387 |
+
Args:
|
| 388 |
+
img (ndarray): Image to be transformed.
|
| 389 |
+
lut_table (ndarray): look-up table of 256 elements; in case of
|
| 390 |
+
multi-channel input array, the table should either have a single
|
| 391 |
+
channel (in this case the same table is used for all channels) or
|
| 392 |
+
the same number of channels as in the input array.
|
| 393 |
+
|
| 394 |
+
Returns:
|
| 395 |
+
ndarray: The transformed image.
|
| 396 |
+
"""
|
| 397 |
+
assert isinstance(img, np.ndarray)
|
| 398 |
+
assert 0 <= np.min(img) and np.max(img) <= 255
|
| 399 |
+
assert isinstance(lut_table, np.ndarray)
|
| 400 |
+
assert lut_table.shape == (256, )
|
| 401 |
+
|
| 402 |
+
return cv2.LUT(np.array(img, dtype=np.uint8), lut_table)
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)):
|
| 406 |
+
"""Use CLAHE method to process the image.
|
| 407 |
+
|
| 408 |
+
See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
|
| 409 |
+
Graphics Gems, 1994:474-485.` for more information.
|
| 410 |
+
|
| 411 |
+
Args:
|
| 412 |
+
img (ndarray): Image to be processed.
|
| 413 |
+
clip_limit (float): Threshold for contrast limiting. Default: 40.0.
|
| 414 |
+
tile_grid_size (tuple[int]): Size of grid for histogram equalization.
|
| 415 |
+
Input image will be divided into equally sized rectangular tiles.
|
| 416 |
+
It defines the number of tiles in row and column. Default: (8, 8).
|
| 417 |
+
|
| 418 |
+
Returns:
|
| 419 |
+
ndarray: The processed image.
|
| 420 |
+
"""
|
| 421 |
+
assert isinstance(img, np.ndarray)
|
| 422 |
+
assert img.ndim == 2
|
| 423 |
+
assert isinstance(clip_limit, (float, int))
|
| 424 |
+
assert is_tuple_of(tile_grid_size, int)
|
| 425 |
+
assert len(tile_grid_size) == 2
|
| 426 |
+
|
| 427 |
+
clahe = cv2.createCLAHE(clip_limit, tile_grid_size)
|
| 428 |
+
return clahe.apply(np.array(img, dtype=np.uint8))
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/model_zoo/deprecated.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"resnet50_caffe": "detectron/resnet50_caffe",
|
| 3 |
+
"resnet50_caffe_bgr": "detectron2/resnet50_caffe_bgr",
|
| 4 |
+
"resnet101_caffe": "detectron/resnet101_caffe",
|
| 5 |
+
"resnet101_caffe_bgr": "detectron2/resnet101_caffe_bgr"
|
| 6 |
+
}
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/model_zoo/mmcls.json
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vgg11": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth",
|
| 3 |
+
"vgg13": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth",
|
| 4 |
+
"vgg16": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth",
|
| 5 |
+
"vgg19": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth",
|
| 6 |
+
"vgg11_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth",
|
| 7 |
+
"vgg13_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth",
|
| 8 |
+
"vgg16_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth",
|
| 9 |
+
"vgg19_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth",
|
| 10 |
+
"resnet18": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_batch256_imagenet_20200708-34ab8f90.pth",
|
| 11 |
+
"resnet34": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_batch256_imagenet_20200708-32ffb4f7.pth",
|
| 12 |
+
"resnet50": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth",
|
| 13 |
+
"resnet101": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_batch256_imagenet_20200708-753f3608.pth",
|
| 14 |
+
"resnet152": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_batch256_imagenet_20200708-ec25b1f9.pth",
|
| 15 |
+
"resnet50_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_batch256_imagenet_20200708-1ad0ce94.pth",
|
| 16 |
+
"resnet101_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_batch256_imagenet_20200708-9cb302ef.pth",
|
| 17 |
+
"resnet152_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_batch256_imagenet_20200708-e79cb6a2.pth",
|
| 18 |
+
"resnext50_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth",
|
| 19 |
+
"resnext101_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth",
|
| 20 |
+
"resnext101_32x8d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth",
|
| 21 |
+
"resnext152_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth",
|
| 22 |
+
"se-resnet50": "https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth",
|
| 23 |
+
"se-resnet101": "https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth",
|
| 24 |
+
"resnest50": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest50_imagenet_converted-1ebf0afe.pth",
|
| 25 |
+
"resnest101": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest101_imagenet_converted-032caa52.pth",
|
| 26 |
+
"resnest200": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest200_imagenet_converted-581a60f2.pth",
|
| 27 |
+
"resnest269": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest269_imagenet_converted-59930960.pth",
|
| 28 |
+
"shufflenet_v1": "https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth",
|
| 29 |
+
"shufflenet_v2": "https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth",
|
| 30 |
+
"mobilenet_v2": "https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth"
|
| 31 |
+
}
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/model_zoo/open_mmlab.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"vgg16_caffe": "https://download.openmmlab.com/pretrain/third_party/vgg16_caffe-292e1171.pth",
|
| 3 |
+
"detectron/resnet50_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet50_caffe-788b5fa3.pth",
|
| 4 |
+
"detectron2/resnet50_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet50_msra-5891d200.pth",
|
| 5 |
+
"detectron/resnet101_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet101_caffe-3ad79236.pth",
|
| 6 |
+
"detectron2/resnet101_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet101_msra-6cc46731.pth",
|
| 7 |
+
"detectron2/resnext101_32x8d": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x8d-1516f1aa.pth",
|
| 8 |
+
"resnext50_32x4d": "https://download.openmmlab.com/pretrain/third_party/resnext50-32x4d-0ab1a123.pth",
|
| 9 |
+
"resnext101_32x4d": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d-a5af3160.pth",
|
| 10 |
+
"resnext101_64x4d": "https://download.openmmlab.com/pretrain/third_party/resnext101_64x4d-ee2c6f71.pth",
|
| 11 |
+
"contrib/resnet50_gn": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn_thangvubk-ad1730dd.pth",
|
| 12 |
+
"detectron/resnet50_gn": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn-9186a21c.pth",
|
| 13 |
+
"detectron/resnet101_gn": "https://download.openmmlab.com/pretrain/third_party/resnet101_gn-cac0ab98.pth",
|
| 14 |
+
"jhu/resnet50_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn_ws-15beedd8.pth",
|
| 15 |
+
"jhu/resnet101_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnet101_gn_ws-3e3c308c.pth",
|
| 16 |
+
"jhu/resnext50_32x4d_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnext50_32x4d_gn_ws-0d87ac85.pth",
|
| 17 |
+
"jhu/resnext101_32x4d_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d_gn_ws-34ac1a9e.pth",
|
| 18 |
+
"jhu/resnext50_32x4d_gn": "https://download.openmmlab.com/pretrain/third_party/resnext50_32x4d_gn-c7e8b754.pth",
|
| 19 |
+
"jhu/resnext101_32x4d_gn": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d_gn-ac3bb84e.pth",
|
| 20 |
+
"msra/hrnetv2_w18_small": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w18_small-b5a04e21.pth",
|
| 21 |
+
"msra/hrnetv2_w18": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w18-00eb2006.pth",
|
| 22 |
+
"msra/hrnetv2_w32": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w32-dc9eeb4f.pth",
|
| 23 |
+
"msra/hrnetv2_w40": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w40-ed0b031c.pth",
|
| 24 |
+
"msra/hrnetv2_w48": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w48-d2186c55.pth",
|
| 25 |
+
"bninception_caffe": "https://download.openmmlab.com/pretrain/third_party/bn_inception_caffe-ed2e8665.pth",
|
| 26 |
+
"kin400/i3d_r50_f32s2_k400": "https://download.openmmlab.com/pretrain/third_party/i3d_r50_f32s2_k400-2c57e077.pth",
|
| 27 |
+
"kin400/nl3d_r50_f32s2_k400": "https://download.openmmlab.com/pretrain/third_party/nl3d_r50_f32s2_k400-fa7e7caa.pth",
|
| 28 |
+
"res2net101_v1d_26w_4s": "https://download.openmmlab.com/pretrain/third_party/res2net101_v1d_26w_4s_mmdetv2-f0a600f9.pth",
|
| 29 |
+
"regnetx_400mf": "https://download.openmmlab.com/pretrain/third_party/regnetx_400mf-a5b10d96.pth",
|
| 30 |
+
"regnetx_800mf": "https://download.openmmlab.com/pretrain/third_party/regnetx_800mf-1f4be4c7.pth",
|
| 31 |
+
"regnetx_1.6gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_1.6gf-5791c176.pth",
|
| 32 |
+
"regnetx_3.2gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_3.2gf-c2599b0f.pth",
|
| 33 |
+
"regnetx_4.0gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_4.0gf-a88f671e.pth",
|
| 34 |
+
"regnetx_6.4gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_6.4gf-006af45d.pth",
|
| 35 |
+
"regnetx_8.0gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_8.0gf-3c68abe7.pth",
|
| 36 |
+
"regnetx_12gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_12gf-4c2a3350.pth",
|
| 37 |
+
"resnet18_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet18_v1c-b5776b93.pth",
|
| 38 |
+
"resnet50_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet50_v1c-2cccc1ad.pth",
|
| 39 |
+
"resnet101_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet101_v1c-e67eebb6.pth",
|
| 40 |
+
"mmedit/vgg16": "https://download.openmmlab.com/mmediting/third_party/vgg_state_dict.pth",
|
| 41 |
+
"mmedit/res34_en_nomixup": "https://download.openmmlab.com/mmediting/third_party/model_best_resnet34_En_nomixup.pth",
|
| 42 |
+
"mmedit/mobilenet_v2": "https://download.openmmlab.com/mmediting/third_party/mobilenet_v2.pth",
|
| 43 |
+
"contrib/mobilenet_v3_large": "https://download.openmmlab.com/pretrain/third_party/mobilenet_v3_large-bc2c3fd3.pth",
|
| 44 |
+
"contrib/mobilenet_v3_small": "https://download.openmmlab.com/pretrain/third_party/mobilenet_v3_small-47085aa1.pth",
|
| 45 |
+
"resnest50": "https://download.openmmlab.com/pretrain/third_party/resnest50_d2-7497a55b.pth",
|
| 46 |
+
"resnest101": "https://download.openmmlab.com/pretrain/third_party/resnest101_d2-f3b931b2.pth",
|
| 47 |
+
"resnest200": "https://download.openmmlab.com/pretrain/third_party/resnest200_d2-ca88e41f.pth",
|
| 48 |
+
"darknet53": "https://download.openmmlab.com/pretrain/third_party/darknet53-a628ea1b.pth",
|
| 49 |
+
"mmdet/mobilenet_v2": "https://download.openmmlab.com/mmdetection/v2.0/third_party/mobilenet_v2_batch256_imagenet-ff34753d.pth"
|
| 50 |
+
}
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from torch import nn
|
| 3 |
+
from torch.autograd import Function
|
| 4 |
+
from torch.autograd.function import once_differentiable
|
| 5 |
+
from torch.nn.modules.utils import _pair
|
| 6 |
+
|
| 7 |
+
from ..utils import ext_loader
|
| 8 |
+
|
| 9 |
+
ext_module = ext_loader.load_ext(
|
| 10 |
+
'_ext', ['deform_roi_pool_forward', 'deform_roi_pool_backward'])
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class DeformRoIPoolFunction(Function):
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
def symbolic(g, input, rois, offset, output_size, spatial_scale,
|
| 17 |
+
sampling_ratio, gamma):
|
| 18 |
+
return g.op(
|
| 19 |
+
'mmcv::MMCVDeformRoIPool',
|
| 20 |
+
input,
|
| 21 |
+
rois,
|
| 22 |
+
offset,
|
| 23 |
+
pooled_height_i=output_size[0],
|
| 24 |
+
pooled_width_i=output_size[1],
|
| 25 |
+
spatial_scale_f=spatial_scale,
|
| 26 |
+
sampling_ratio_f=sampling_ratio,
|
| 27 |
+
gamma_f=gamma)
|
| 28 |
+
|
| 29 |
+
@staticmethod
|
| 30 |
+
def forward(ctx,
|
| 31 |
+
input,
|
| 32 |
+
rois,
|
| 33 |
+
offset,
|
| 34 |
+
output_size,
|
| 35 |
+
spatial_scale=1.0,
|
| 36 |
+
sampling_ratio=0,
|
| 37 |
+
gamma=0.1):
|
| 38 |
+
if offset is None:
|
| 39 |
+
offset = input.new_zeros(0)
|
| 40 |
+
ctx.output_size = _pair(output_size)
|
| 41 |
+
ctx.spatial_scale = float(spatial_scale)
|
| 42 |
+
ctx.sampling_ratio = int(sampling_ratio)
|
| 43 |
+
ctx.gamma = float(gamma)
|
| 44 |
+
|
| 45 |
+
assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
|
| 46 |
+
|
| 47 |
+
output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
|
| 48 |
+
ctx.output_size[1])
|
| 49 |
+
output = input.new_zeros(output_shape)
|
| 50 |
+
|
| 51 |
+
ext_module.deform_roi_pool_forward(
|
| 52 |
+
input,
|
| 53 |
+
rois,
|
| 54 |
+
offset,
|
| 55 |
+
output,
|
| 56 |
+
pooled_height=ctx.output_size[0],
|
| 57 |
+
pooled_width=ctx.output_size[1],
|
| 58 |
+
spatial_scale=ctx.spatial_scale,
|
| 59 |
+
sampling_ratio=ctx.sampling_ratio,
|
| 60 |
+
gamma=ctx.gamma)
|
| 61 |
+
|
| 62 |
+
ctx.save_for_backward(input, rois, offset)
|
| 63 |
+
return output
|
| 64 |
+
|
| 65 |
+
@staticmethod
|
| 66 |
+
@once_differentiable
|
| 67 |
+
def backward(ctx, grad_output):
|
| 68 |
+
input, rois, offset = ctx.saved_tensors
|
| 69 |
+
grad_input = grad_output.new_zeros(input.shape)
|
| 70 |
+
grad_offset = grad_output.new_zeros(offset.shape)
|
| 71 |
+
|
| 72 |
+
ext_module.deform_roi_pool_backward(
|
| 73 |
+
grad_output,
|
| 74 |
+
input,
|
| 75 |
+
rois,
|
| 76 |
+
offset,
|
| 77 |
+
grad_input,
|
| 78 |
+
grad_offset,
|
| 79 |
+
pooled_height=ctx.output_size[0],
|
| 80 |
+
pooled_width=ctx.output_size[1],
|
| 81 |
+
spatial_scale=ctx.spatial_scale,
|
| 82 |
+
sampling_ratio=ctx.sampling_ratio,
|
| 83 |
+
gamma=ctx.gamma)
|
| 84 |
+
if grad_offset.numel() == 0:
|
| 85 |
+
grad_offset = None
|
| 86 |
+
return grad_input, None, grad_offset, None, None, None, None
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
deform_roi_pool = DeformRoIPoolFunction.apply
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class DeformRoIPool(nn.Module):
|
| 93 |
+
|
| 94 |
+
def __init__(self,
|
| 95 |
+
output_size,
|
| 96 |
+
spatial_scale=1.0,
|
| 97 |
+
sampling_ratio=0,
|
| 98 |
+
gamma=0.1):
|
| 99 |
+
super(DeformRoIPool, self).__init__()
|
| 100 |
+
self.output_size = _pair(output_size)
|
| 101 |
+
self.spatial_scale = float(spatial_scale)
|
| 102 |
+
self.sampling_ratio = int(sampling_ratio)
|
| 103 |
+
self.gamma = float(gamma)
|
| 104 |
+
|
| 105 |
+
def forward(self, input, rois, offset=None):
|
| 106 |
+
return deform_roi_pool(input, rois, offset, self.output_size,
|
| 107 |
+
self.spatial_scale, self.sampling_ratio,
|
| 108 |
+
self.gamma)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class DeformRoIPoolPack(DeformRoIPool):
|
| 112 |
+
|
| 113 |
+
def __init__(self,
|
| 114 |
+
output_size,
|
| 115 |
+
output_channels,
|
| 116 |
+
deform_fc_channels=1024,
|
| 117 |
+
spatial_scale=1.0,
|
| 118 |
+
sampling_ratio=0,
|
| 119 |
+
gamma=0.1):
|
| 120 |
+
super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale,
|
| 121 |
+
sampling_ratio, gamma)
|
| 122 |
+
|
| 123 |
+
self.output_channels = output_channels
|
| 124 |
+
self.deform_fc_channels = deform_fc_channels
|
| 125 |
+
|
| 126 |
+
self.offset_fc = nn.Sequential(
|
| 127 |
+
nn.Linear(
|
| 128 |
+
self.output_size[0] * self.output_size[1] *
|
| 129 |
+
self.output_channels, self.deform_fc_channels),
|
| 130 |
+
nn.ReLU(inplace=True),
|
| 131 |
+
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
|
| 132 |
+
nn.ReLU(inplace=True),
|
| 133 |
+
nn.Linear(self.deform_fc_channels,
|
| 134 |
+
self.output_size[0] * self.output_size[1] * 2))
|
| 135 |
+
self.offset_fc[-1].weight.data.zero_()
|
| 136 |
+
self.offset_fc[-1].bias.data.zero_()
|
| 137 |
+
|
| 138 |
+
def forward(self, input, rois):
|
| 139 |
+
assert input.size(1) == self.output_channels
|
| 140 |
+
x = deform_roi_pool(input, rois, None, self.output_size,
|
| 141 |
+
self.spatial_scale, self.sampling_ratio,
|
| 142 |
+
self.gamma)
|
| 143 |
+
rois_num = rois.size(0)
|
| 144 |
+
offset = self.offset_fc(x.view(rois_num, -1))
|
| 145 |
+
offset = offset.view(rois_num, 2, self.output_size[0],
|
| 146 |
+
self.output_size[1])
|
| 147 |
+
return deform_roi_pool(input, rois, offset, self.output_size,
|
| 148 |
+
self.spatial_scale, self.sampling_ratio,
|
| 149 |
+
self.gamma)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class ModulatedDeformRoIPoolPack(DeformRoIPool):
|
| 153 |
+
|
| 154 |
+
def __init__(self,
|
| 155 |
+
output_size,
|
| 156 |
+
output_channels,
|
| 157 |
+
deform_fc_channels=1024,
|
| 158 |
+
spatial_scale=1.0,
|
| 159 |
+
sampling_ratio=0,
|
| 160 |
+
gamma=0.1):
|
| 161 |
+
super(ModulatedDeformRoIPoolPack,
|
| 162 |
+
self).__init__(output_size, spatial_scale, sampling_ratio, gamma)
|
| 163 |
+
|
| 164 |
+
self.output_channels = output_channels
|
| 165 |
+
self.deform_fc_channels = deform_fc_channels
|
| 166 |
+
|
| 167 |
+
self.offset_fc = nn.Sequential(
|
| 168 |
+
nn.Linear(
|
| 169 |
+
self.output_size[0] * self.output_size[1] *
|
| 170 |
+
self.output_channels, self.deform_fc_channels),
|
| 171 |
+
nn.ReLU(inplace=True),
|
| 172 |
+
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
|
| 173 |
+
nn.ReLU(inplace=True),
|
| 174 |
+
nn.Linear(self.deform_fc_channels,
|
| 175 |
+
self.output_size[0] * self.output_size[1] * 2))
|
| 176 |
+
self.offset_fc[-1].weight.data.zero_()
|
| 177 |
+
self.offset_fc[-1].bias.data.zero_()
|
| 178 |
+
|
| 179 |
+
self.mask_fc = nn.Sequential(
|
| 180 |
+
nn.Linear(
|
| 181 |
+
self.output_size[0] * self.output_size[1] *
|
| 182 |
+
self.output_channels, self.deform_fc_channels),
|
| 183 |
+
nn.ReLU(inplace=True),
|
| 184 |
+
nn.Linear(self.deform_fc_channels,
|
| 185 |
+
self.output_size[0] * self.output_size[1] * 1),
|
| 186 |
+
nn.Sigmoid())
|
| 187 |
+
self.mask_fc[2].weight.data.zero_()
|
| 188 |
+
self.mask_fc[2].bias.data.zero_()
|
| 189 |
+
|
| 190 |
+
def forward(self, input, rois):
|
| 191 |
+
assert input.size(1) == self.output_channels
|
| 192 |
+
x = deform_roi_pool(input, rois, None, self.output_size,
|
| 193 |
+
self.spatial_scale, self.sampling_ratio,
|
| 194 |
+
self.gamma)
|
| 195 |
+
rois_num = rois.size(0)
|
| 196 |
+
offset = self.offset_fc(x.view(rois_num, -1))
|
| 197 |
+
offset = offset.view(rois_num, 2, self.output_size[0],
|
| 198 |
+
self.output_size[1])
|
| 199 |
+
mask = self.mask_fc(x.view(rois_num, -1))
|
| 200 |
+
mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1])
|
| 201 |
+
d = deform_roi_pool(input, rois, offset, self.output_size,
|
| 202 |
+
self.spatial_scale, self.sampling_ratio,
|
| 203 |
+
self.gamma)
|
| 204 |
+
return d * mask
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/deprecated_wrappers.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
# This file is for backward compatibility.
|
| 3 |
+
# Module wrappers for empty tensor have been moved to mmcv.cnn.bricks.
|
| 4 |
+
import warnings
|
| 5 |
+
|
| 6 |
+
from ..cnn.bricks.wrappers import Conv2d, ConvTranspose2d, Linear, MaxPool2d
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Conv2d_deprecated(Conv2d):
|
| 10 |
+
|
| 11 |
+
def __init__(self, *args, **kwargs):
|
| 12 |
+
super().__init__(*args, **kwargs)
|
| 13 |
+
warnings.warn(
|
| 14 |
+
'Importing Conv2d wrapper from "mmcv.ops" will be deprecated in'
|
| 15 |
+
' the future. Please import them from "mmcv.cnn" instead')
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ConvTranspose2d_deprecated(ConvTranspose2d):
|
| 19 |
+
|
| 20 |
+
def __init__(self, *args, **kwargs):
|
| 21 |
+
super().__init__(*args, **kwargs)
|
| 22 |
+
warnings.warn(
|
| 23 |
+
'Importing ConvTranspose2d wrapper from "mmcv.ops" will be '
|
| 24 |
+
'deprecated in the future. Please import them from "mmcv.cnn" '
|
| 25 |
+
'instead')
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class MaxPool2d_deprecated(MaxPool2d):
|
| 29 |
+
|
| 30 |
+
def __init__(self, *args, **kwargs):
|
| 31 |
+
super().__init__(*args, **kwargs)
|
| 32 |
+
warnings.warn(
|
| 33 |
+
'Importing MaxPool2d wrapper from "mmcv.ops" will be deprecated in'
|
| 34 |
+
' the future. Please import them from "mmcv.cnn" instead')
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class Linear_deprecated(Linear):
|
| 38 |
+
|
| 39 |
+
def __init__(self, *args, **kwargs):
|
| 40 |
+
super().__init__(*args, **kwargs)
|
| 41 |
+
warnings.warn(
|
| 42 |
+
'Importing Linear wrapper from "mmcv.ops" will be deprecated in'
|
| 43 |
+
' the future. Please import them from "mmcv.cnn" instead')
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/furthest_point_sample.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch.autograd import Function
|
| 3 |
+
|
| 4 |
+
from ..utils import ext_loader
|
| 5 |
+
|
| 6 |
+
ext_module = ext_loader.load_ext('_ext', [
|
| 7 |
+
'furthest_point_sampling_forward',
|
| 8 |
+
'furthest_point_sampling_with_dist_forward'
|
| 9 |
+
])
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class FurthestPointSampling(Function):
|
| 13 |
+
"""Uses iterative furthest point sampling to select a set of features whose
|
| 14 |
+
corresponding points have the furthest distance."""
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def forward(ctx, points_xyz: torch.Tensor,
|
| 18 |
+
num_points: int) -> torch.Tensor:
|
| 19 |
+
"""
|
| 20 |
+
Args:
|
| 21 |
+
points_xyz (Tensor): (B, N, 3) where N > num_points.
|
| 22 |
+
num_points (int): Number of points in the sampled set.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
Tensor: (B, num_points) indices of the sampled points.
|
| 26 |
+
"""
|
| 27 |
+
assert points_xyz.is_contiguous()
|
| 28 |
+
|
| 29 |
+
B, N = points_xyz.size()[:2]
|
| 30 |
+
output = torch.cuda.IntTensor(B, num_points)
|
| 31 |
+
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
|
| 32 |
+
|
| 33 |
+
ext_module.furthest_point_sampling_forward(
|
| 34 |
+
points_xyz,
|
| 35 |
+
temp,
|
| 36 |
+
output,
|
| 37 |
+
b=B,
|
| 38 |
+
n=N,
|
| 39 |
+
m=num_points,
|
| 40 |
+
)
|
| 41 |
+
if torch.__version__ != 'parrots':
|
| 42 |
+
ctx.mark_non_differentiable(output)
|
| 43 |
+
return output
|
| 44 |
+
|
| 45 |
+
@staticmethod
|
| 46 |
+
def backward(xyz, a=None):
|
| 47 |
+
return None, None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class FurthestPointSamplingWithDist(Function):
|
| 51 |
+
"""Uses iterative furthest point sampling to select a set of features whose
|
| 52 |
+
corresponding points have the furthest distance."""
|
| 53 |
+
|
| 54 |
+
@staticmethod
|
| 55 |
+
def forward(ctx, points_dist: torch.Tensor,
|
| 56 |
+
num_points: int) -> torch.Tensor:
|
| 57 |
+
"""
|
| 58 |
+
Args:
|
| 59 |
+
points_dist (Tensor): (B, N, N) Distance between each point pair.
|
| 60 |
+
num_points (int): Number of points in the sampled set.
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
Tensor: (B, num_points) indices of the sampled points.
|
| 64 |
+
"""
|
| 65 |
+
assert points_dist.is_contiguous()
|
| 66 |
+
|
| 67 |
+
B, N, _ = points_dist.size()
|
| 68 |
+
output = points_dist.new_zeros([B, num_points], dtype=torch.int32)
|
| 69 |
+
temp = points_dist.new_zeros([B, N]).fill_(1e10)
|
| 70 |
+
|
| 71 |
+
ext_module.furthest_point_sampling_with_dist_forward(
|
| 72 |
+
points_dist, temp, output, b=B, n=N, m=num_points)
|
| 73 |
+
if torch.__version__ != 'parrots':
|
| 74 |
+
ctx.mark_non_differentiable(output)
|
| 75 |
+
return output
|
| 76 |
+
|
| 77 |
+
@staticmethod
|
| 78 |
+
def backward(xyz, a=None):
|
| 79 |
+
return None, None
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
furthest_point_sample = FurthestPointSampling.apply
|
| 83 |
+
furthest_point_sample_with_dist = FurthestPointSamplingWithDist.apply
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/fused_bias_leakyrelu.py
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_act.py # noqa:E501
|
| 2 |
+
|
| 3 |
+
# Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
|
| 4 |
+
# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator
|
| 5 |
+
# Augmentation (ADA)
|
| 6 |
+
# =======================================================================
|
| 7 |
+
|
| 8 |
+
# 1. Definitions
|
| 9 |
+
|
| 10 |
+
# "Licensor" means any person or entity that distributes its Work.
|
| 11 |
+
|
| 12 |
+
# "Software" means the original work of authorship made available under
|
| 13 |
+
# this License.
|
| 14 |
+
|
| 15 |
+
# "Work" means the Software and any additions to or derivative works of
|
| 16 |
+
# the Software that are made available under this License.
|
| 17 |
+
|
| 18 |
+
# The terms "reproduce," "reproduction," "derivative works," and
|
| 19 |
+
# "distribution" have the meaning as provided under U.S. copyright law;
|
| 20 |
+
# provided, however, that for the purposes of this License, derivative
|
| 21 |
+
# works shall not include works that remain separable from, or merely
|
| 22 |
+
# link (or bind by name) to the interfaces of, the Work.
|
| 23 |
+
|
| 24 |
+
# Works, including the Software, are "made available" under this License
|
| 25 |
+
# by including in or with the Work either (a) a copyright notice
|
| 26 |
+
# referencing the applicability of this License to the Work, or (b) a
|
| 27 |
+
# copy of this License.
|
| 28 |
+
|
| 29 |
+
# 2. License Grants
|
| 30 |
+
|
| 31 |
+
# 2.1 Copyright Grant. Subject to the terms and conditions of this
|
| 32 |
+
# License, each Licensor grants to you a perpetual, worldwide,
|
| 33 |
+
# non-exclusive, royalty-free, copyright license to reproduce,
|
| 34 |
+
# prepare derivative works of, publicly display, publicly perform,
|
| 35 |
+
# sublicense and distribute its Work and any resulting derivative
|
| 36 |
+
# works in any form.
|
| 37 |
+
|
| 38 |
+
# 3. Limitations
|
| 39 |
+
|
| 40 |
+
# 3.1 Redistribution. You may reproduce or distribute the Work only
|
| 41 |
+
# if (a) you do so under this License, (b) you include a complete
|
| 42 |
+
# copy of this License with your distribution, and (c) you retain
|
| 43 |
+
# without modification any copyright, patent, trademark, or
|
| 44 |
+
# attribution notices that are present in the Work.
|
| 45 |
+
|
| 46 |
+
# 3.2 Derivative Works. You may specify that additional or different
|
| 47 |
+
# terms apply to the use, reproduction, and distribution of your
|
| 48 |
+
# derivative works of the Work ("Your Terms") only if (a) Your Terms
|
| 49 |
+
# provide that the use limitation in Section 3.3 applies to your
|
| 50 |
+
# derivative works, and (b) you identify the specific derivative
|
| 51 |
+
# works that are subject to Your Terms. Notwithstanding Your Terms,
|
| 52 |
+
# this License (including the redistribution requirements in Section
|
| 53 |
+
# 3.1) will continue to apply to the Work itself.
|
| 54 |
+
|
| 55 |
+
# 3.3 Use Limitation. The Work and any derivative works thereof only
|
| 56 |
+
# may be used or intended for use non-commercially. Notwithstanding
|
| 57 |
+
# the foregoing, NVIDIA and its affiliates may use the Work and any
|
| 58 |
+
# derivative works commercially. As used herein, "non-commercially"
|
| 59 |
+
# means for research or evaluation purposes only.
|
| 60 |
+
|
| 61 |
+
# 3.4 Patent Claims. If you bring or threaten to bring a patent claim
|
| 62 |
+
# against any Licensor (including any claim, cross-claim or
|
| 63 |
+
# counterclaim in a lawsuit) to enforce any patents that you allege
|
| 64 |
+
# are infringed by any Work, then your rights under this License from
|
| 65 |
+
# such Licensor (including the grant in Section 2.1) will terminate
|
| 66 |
+
# immediately.
|
| 67 |
+
|
| 68 |
+
# 3.5 Trademarks. This License does not grant any rights to use any
|
| 69 |
+
# Licensor’s or its affiliates’ names, logos, or trademarks, except
|
| 70 |
+
# as necessary to reproduce the notices described in this License.
|
| 71 |
+
|
| 72 |
+
# 3.6 Termination. If you violate any term of this License, then your
|
| 73 |
+
# rights under this License (including the grant in Section 2.1) will
|
| 74 |
+
# terminate immediately.
|
| 75 |
+
|
| 76 |
+
# 4. Disclaimer of Warranty.
|
| 77 |
+
|
| 78 |
+
# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
| 79 |
+
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
|
| 80 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
|
| 81 |
+
# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
|
| 82 |
+
# THIS LICENSE.
|
| 83 |
+
|
| 84 |
+
# 5. Limitation of Liability.
|
| 85 |
+
|
| 86 |
+
# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
|
| 87 |
+
# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
|
| 88 |
+
# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
|
| 89 |
+
# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
|
| 90 |
+
# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
|
| 91 |
+
# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
|
| 92 |
+
# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
|
| 93 |
+
# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
|
| 94 |
+
# THE POSSIBILITY OF SUCH DAMAGES.
|
| 95 |
+
|
| 96 |
+
# =======================================================================
|
| 97 |
+
|
| 98 |
+
import torch
|
| 99 |
+
import torch.nn.functional as F
|
| 100 |
+
from torch import nn
|
| 101 |
+
from torch.autograd import Function
|
| 102 |
+
|
| 103 |
+
from ..utils import ext_loader
|
| 104 |
+
|
| 105 |
+
ext_module = ext_loader.load_ext('_ext', ['fused_bias_leakyrelu'])
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
class FusedBiasLeakyReLUFunctionBackward(Function):
|
| 109 |
+
"""Calculate second order deviation.
|
| 110 |
+
|
| 111 |
+
This function is to compute the second order deviation for the fused leaky
|
| 112 |
+
relu operation.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
@staticmethod
|
| 116 |
+
def forward(ctx, grad_output, out, negative_slope, scale):
|
| 117 |
+
ctx.save_for_backward(out)
|
| 118 |
+
ctx.negative_slope = negative_slope
|
| 119 |
+
ctx.scale = scale
|
| 120 |
+
|
| 121 |
+
empty = grad_output.new_empty(0)
|
| 122 |
+
|
| 123 |
+
grad_input = ext_module.fused_bias_leakyrelu(
|
| 124 |
+
grad_output,
|
| 125 |
+
empty,
|
| 126 |
+
out,
|
| 127 |
+
act=3,
|
| 128 |
+
grad=1,
|
| 129 |
+
alpha=negative_slope,
|
| 130 |
+
scale=scale)
|
| 131 |
+
|
| 132 |
+
dim = [0]
|
| 133 |
+
|
| 134 |
+
if grad_input.ndim > 2:
|
| 135 |
+
dim += list(range(2, grad_input.ndim))
|
| 136 |
+
|
| 137 |
+
grad_bias = grad_input.sum(dim).detach()
|
| 138 |
+
|
| 139 |
+
return grad_input, grad_bias
|
| 140 |
+
|
| 141 |
+
@staticmethod
|
| 142 |
+
def backward(ctx, gradgrad_input, gradgrad_bias):
|
| 143 |
+
out, = ctx.saved_tensors
|
| 144 |
+
|
| 145 |
+
# The second order deviation, in fact, contains two parts, while the
|
| 146 |
+
# the first part is zero. Thus, we direct consider the second part
|
| 147 |
+
# which is similar with the first order deviation in implementation.
|
| 148 |
+
gradgrad_out = ext_module.fused_bias_leakyrelu(
|
| 149 |
+
gradgrad_input,
|
| 150 |
+
gradgrad_bias.to(out.dtype),
|
| 151 |
+
out,
|
| 152 |
+
act=3,
|
| 153 |
+
grad=1,
|
| 154 |
+
alpha=ctx.negative_slope,
|
| 155 |
+
scale=ctx.scale)
|
| 156 |
+
|
| 157 |
+
return gradgrad_out, None, None, None
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
class FusedBiasLeakyReLUFunction(Function):
|
| 161 |
+
|
| 162 |
+
@staticmethod
|
| 163 |
+
def forward(ctx, input, bias, negative_slope, scale):
|
| 164 |
+
empty = input.new_empty(0)
|
| 165 |
+
|
| 166 |
+
out = ext_module.fused_bias_leakyrelu(
|
| 167 |
+
input,
|
| 168 |
+
bias,
|
| 169 |
+
empty,
|
| 170 |
+
act=3,
|
| 171 |
+
grad=0,
|
| 172 |
+
alpha=negative_slope,
|
| 173 |
+
scale=scale)
|
| 174 |
+
ctx.save_for_backward(out)
|
| 175 |
+
ctx.negative_slope = negative_slope
|
| 176 |
+
ctx.scale = scale
|
| 177 |
+
|
| 178 |
+
return out
|
| 179 |
+
|
| 180 |
+
@staticmethod
|
| 181 |
+
def backward(ctx, grad_output):
|
| 182 |
+
out, = ctx.saved_tensors
|
| 183 |
+
|
| 184 |
+
grad_input, grad_bias = FusedBiasLeakyReLUFunctionBackward.apply(
|
| 185 |
+
grad_output, out, ctx.negative_slope, ctx.scale)
|
| 186 |
+
|
| 187 |
+
return grad_input, grad_bias, None, None
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class FusedBiasLeakyReLU(nn.Module):
|
| 191 |
+
"""Fused bias leaky ReLU.
|
| 192 |
+
|
| 193 |
+
This function is introduced in the StyleGAN2:
|
| 194 |
+
http://arxiv.org/abs/1912.04958
|
| 195 |
+
|
| 196 |
+
The bias term comes from the convolution operation. In addition, to keep
|
| 197 |
+
the variance of the feature map or gradients unchanged, they also adopt a
|
| 198 |
+
scale similarly with Kaiming initialization. However, since the
|
| 199 |
+
:math:`1+{alpha}^2` : is too small, we can just ignore it. Therefore, the
|
| 200 |
+
final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
|
| 201 |
+
your own scale.
|
| 202 |
+
|
| 203 |
+
TODO: Implement the CPU version.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
channel (int): The channel number of the feature map.
|
| 207 |
+
negative_slope (float, optional): Same as nn.LeakyRelu.
|
| 208 |
+
Defaults to 0.2.
|
| 209 |
+
scale (float, optional): A scalar to adjust the variance of the feature
|
| 210 |
+
map. Defaults to 2**0.5.
|
| 211 |
+
"""
|
| 212 |
+
|
| 213 |
+
def __init__(self, num_channels, negative_slope=0.2, scale=2**0.5):
|
| 214 |
+
super(FusedBiasLeakyReLU, self).__init__()
|
| 215 |
+
|
| 216 |
+
self.bias = nn.Parameter(torch.zeros(num_channels))
|
| 217 |
+
self.negative_slope = negative_slope
|
| 218 |
+
self.scale = scale
|
| 219 |
+
|
| 220 |
+
def forward(self, input):
|
| 221 |
+
return fused_bias_leakyrelu(input, self.bias, self.negative_slope,
|
| 222 |
+
self.scale)
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5):
|
| 226 |
+
"""Fused bias leaky ReLU function.
|
| 227 |
+
|
| 228 |
+
This function is introduced in the StyleGAN2:
|
| 229 |
+
http://arxiv.org/abs/1912.04958
|
| 230 |
+
|
| 231 |
+
The bias term comes from the convolution operation. In addition, to keep
|
| 232 |
+
the variance of the feature map or gradients unchanged, they also adopt a
|
| 233 |
+
scale similarly with Kaiming initialization. However, since the
|
| 234 |
+
:math:`1+{alpha}^2` : is too small, we can just ignore it. Therefore, the
|
| 235 |
+
final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
|
| 236 |
+
your own scale.
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
input (torch.Tensor): Input feature map.
|
| 240 |
+
bias (nn.Parameter): The bias from convolution operation.
|
| 241 |
+
negative_slope (float, optional): Same as nn.LeakyRelu.
|
| 242 |
+
Defaults to 0.2.
|
| 243 |
+
scale (float, optional): A scalar to adjust the variance of the feature
|
| 244 |
+
map. Defaults to 2**0.5.
|
| 245 |
+
|
| 246 |
+
Returns:
|
| 247 |
+
torch.Tensor: Feature map after non-linear activation.
|
| 248 |
+
"""
|
| 249 |
+
|
| 250 |
+
if not input.is_cuda:
|
| 251 |
+
return bias_leakyrelu_ref(input, bias, negative_slope, scale)
|
| 252 |
+
|
| 253 |
+
return FusedBiasLeakyReLUFunction.apply(input, bias.to(input.dtype),
|
| 254 |
+
negative_slope, scale)
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=2**0.5):
|
| 258 |
+
|
| 259 |
+
if bias is not None:
|
| 260 |
+
assert bias.ndim == 1
|
| 261 |
+
assert bias.shape[0] == x.shape[1]
|
| 262 |
+
x = x + bias.reshape([-1 if i == 1 else 1 for i in range(x.ndim)])
|
| 263 |
+
|
| 264 |
+
x = F.leaky_relu(x, negative_slope)
|
| 265 |
+
if scale != 1:
|
| 266 |
+
x = x * scale
|
| 267 |
+
|
| 268 |
+
return x
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/nms.py
ADDED
|
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from annotator.uniformer.mmcv.utils import deprecated_api_warning
|
| 7 |
+
from ..utils import ext_loader
|
| 8 |
+
|
| 9 |
+
ext_module = ext_loader.load_ext(
|
| 10 |
+
'_ext', ['nms', 'softnms', 'nms_match', 'nms_rotated'])
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# This function is modified from: https://github.com/pytorch/vision/
|
| 14 |
+
class NMSop(torch.autograd.Function):
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def forward(ctx, bboxes, scores, iou_threshold, offset, score_threshold,
|
| 18 |
+
max_num):
|
| 19 |
+
is_filtering_by_score = score_threshold > 0
|
| 20 |
+
if is_filtering_by_score:
|
| 21 |
+
valid_mask = scores > score_threshold
|
| 22 |
+
bboxes, scores = bboxes[valid_mask], scores[valid_mask]
|
| 23 |
+
valid_inds = torch.nonzero(
|
| 24 |
+
valid_mask, as_tuple=False).squeeze(dim=1)
|
| 25 |
+
|
| 26 |
+
inds = ext_module.nms(
|
| 27 |
+
bboxes, scores, iou_threshold=float(iou_threshold), offset=offset)
|
| 28 |
+
|
| 29 |
+
if max_num > 0:
|
| 30 |
+
inds = inds[:max_num]
|
| 31 |
+
if is_filtering_by_score:
|
| 32 |
+
inds = valid_inds[inds]
|
| 33 |
+
return inds
|
| 34 |
+
|
| 35 |
+
@staticmethod
|
| 36 |
+
def symbolic(g, bboxes, scores, iou_threshold, offset, score_threshold,
|
| 37 |
+
max_num):
|
| 38 |
+
from ..onnx import is_custom_op_loaded
|
| 39 |
+
has_custom_op = is_custom_op_loaded()
|
| 40 |
+
# TensorRT nms plugin is aligned with original nms in ONNXRuntime
|
| 41 |
+
is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT'
|
| 42 |
+
if has_custom_op and (not is_trt_backend):
|
| 43 |
+
return g.op(
|
| 44 |
+
'mmcv::NonMaxSuppression',
|
| 45 |
+
bboxes,
|
| 46 |
+
scores,
|
| 47 |
+
iou_threshold_f=float(iou_threshold),
|
| 48 |
+
offset_i=int(offset))
|
| 49 |
+
else:
|
| 50 |
+
from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze
|
| 51 |
+
from ..onnx.onnx_utils.symbolic_helper import _size_helper
|
| 52 |
+
|
| 53 |
+
boxes = unsqueeze(g, bboxes, 0)
|
| 54 |
+
scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
|
| 55 |
+
|
| 56 |
+
if max_num > 0:
|
| 57 |
+
max_num = g.op(
|
| 58 |
+
'Constant',
|
| 59 |
+
value_t=torch.tensor(max_num, dtype=torch.long))
|
| 60 |
+
else:
|
| 61 |
+
dim = g.op('Constant', value_t=torch.tensor(0))
|
| 62 |
+
max_num = _size_helper(g, bboxes, dim)
|
| 63 |
+
max_output_per_class = max_num
|
| 64 |
+
iou_threshold = g.op(
|
| 65 |
+
'Constant',
|
| 66 |
+
value_t=torch.tensor([iou_threshold], dtype=torch.float))
|
| 67 |
+
score_threshold = g.op(
|
| 68 |
+
'Constant',
|
| 69 |
+
value_t=torch.tensor([score_threshold], dtype=torch.float))
|
| 70 |
+
nms_out = g.op('NonMaxSuppression', boxes, scores,
|
| 71 |
+
max_output_per_class, iou_threshold,
|
| 72 |
+
score_threshold)
|
| 73 |
+
return squeeze(
|
| 74 |
+
g,
|
| 75 |
+
select(
|
| 76 |
+
g, nms_out, 1,
|
| 77 |
+
g.op(
|
| 78 |
+
'Constant',
|
| 79 |
+
value_t=torch.tensor([2], dtype=torch.long))), 1)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class SoftNMSop(torch.autograd.Function):
|
| 83 |
+
|
| 84 |
+
@staticmethod
|
| 85 |
+
def forward(ctx, boxes, scores, iou_threshold, sigma, min_score, method,
|
| 86 |
+
offset):
|
| 87 |
+
dets = boxes.new_empty((boxes.size(0), 5), device='cpu')
|
| 88 |
+
inds = ext_module.softnms(
|
| 89 |
+
boxes.cpu(),
|
| 90 |
+
scores.cpu(),
|
| 91 |
+
dets.cpu(),
|
| 92 |
+
iou_threshold=float(iou_threshold),
|
| 93 |
+
sigma=float(sigma),
|
| 94 |
+
min_score=float(min_score),
|
| 95 |
+
method=int(method),
|
| 96 |
+
offset=int(offset))
|
| 97 |
+
return dets, inds
|
| 98 |
+
|
| 99 |
+
@staticmethod
|
| 100 |
+
def symbolic(g, boxes, scores, iou_threshold, sigma, min_score, method,
|
| 101 |
+
offset):
|
| 102 |
+
from packaging import version
|
| 103 |
+
assert version.parse(torch.__version__) >= version.parse('1.7.0')
|
| 104 |
+
nms_out = g.op(
|
| 105 |
+
'mmcv::SoftNonMaxSuppression',
|
| 106 |
+
boxes,
|
| 107 |
+
scores,
|
| 108 |
+
iou_threshold_f=float(iou_threshold),
|
| 109 |
+
sigma_f=float(sigma),
|
| 110 |
+
min_score_f=float(min_score),
|
| 111 |
+
method_i=int(method),
|
| 112 |
+
offset_i=int(offset),
|
| 113 |
+
outputs=2)
|
| 114 |
+
return nms_out
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
@deprecated_api_warning({'iou_thr': 'iou_threshold'})
|
| 118 |
+
def nms(boxes, scores, iou_threshold, offset=0, score_threshold=0, max_num=-1):
|
| 119 |
+
"""Dispatch to either CPU or GPU NMS implementations.
|
| 120 |
+
|
| 121 |
+
The input can be either torch tensor or numpy array. GPU NMS will be used
|
| 122 |
+
if the input is gpu tensor, otherwise CPU NMS
|
| 123 |
+
will be used. The returned type will always be the same as inputs.
|
| 124 |
+
|
| 125 |
+
Arguments:
|
| 126 |
+
boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4).
|
| 127 |
+
scores (torch.Tensor or np.ndarray): scores in shape (N, ).
|
| 128 |
+
iou_threshold (float): IoU threshold for NMS.
|
| 129 |
+
offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset).
|
| 130 |
+
score_threshold (float): score threshold for NMS.
|
| 131 |
+
max_num (int): maximum number of boxes after NMS.
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
tuple: kept dets(boxes and scores) and indice, which is always the \
|
| 135 |
+
same data type as the input.
|
| 136 |
+
|
| 137 |
+
Example:
|
| 138 |
+
>>> boxes = np.array([[49.1, 32.4, 51.0, 35.9],
|
| 139 |
+
>>> [49.3, 32.9, 51.0, 35.3],
|
| 140 |
+
>>> [49.2, 31.8, 51.0, 35.4],
|
| 141 |
+
>>> [35.1, 11.5, 39.1, 15.7],
|
| 142 |
+
>>> [35.6, 11.8, 39.3, 14.2],
|
| 143 |
+
>>> [35.3, 11.5, 39.9, 14.5],
|
| 144 |
+
>>> [35.2, 11.7, 39.7, 15.7]], dtype=np.float32)
|
| 145 |
+
>>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.5, 0.4, 0.3],\
|
| 146 |
+
dtype=np.float32)
|
| 147 |
+
>>> iou_threshold = 0.6
|
| 148 |
+
>>> dets, inds = nms(boxes, scores, iou_threshold)
|
| 149 |
+
>>> assert len(inds) == len(dets) == 3
|
| 150 |
+
"""
|
| 151 |
+
assert isinstance(boxes, (torch.Tensor, np.ndarray))
|
| 152 |
+
assert isinstance(scores, (torch.Tensor, np.ndarray))
|
| 153 |
+
is_numpy = False
|
| 154 |
+
if isinstance(boxes, np.ndarray):
|
| 155 |
+
is_numpy = True
|
| 156 |
+
boxes = torch.from_numpy(boxes)
|
| 157 |
+
if isinstance(scores, np.ndarray):
|
| 158 |
+
scores = torch.from_numpy(scores)
|
| 159 |
+
assert boxes.size(1) == 4
|
| 160 |
+
assert boxes.size(0) == scores.size(0)
|
| 161 |
+
assert offset in (0, 1)
|
| 162 |
+
|
| 163 |
+
if torch.__version__ == 'parrots':
|
| 164 |
+
indata_list = [boxes, scores]
|
| 165 |
+
indata_dict = {
|
| 166 |
+
'iou_threshold': float(iou_threshold),
|
| 167 |
+
'offset': int(offset)
|
| 168 |
+
}
|
| 169 |
+
inds = ext_module.nms(*indata_list, **indata_dict)
|
| 170 |
+
else:
|
| 171 |
+
inds = NMSop.apply(boxes, scores, iou_threshold, offset,
|
| 172 |
+
score_threshold, max_num)
|
| 173 |
+
dets = torch.cat((boxes[inds], scores[inds].reshape(-1, 1)), dim=1)
|
| 174 |
+
if is_numpy:
|
| 175 |
+
dets = dets.cpu().numpy()
|
| 176 |
+
inds = inds.cpu().numpy()
|
| 177 |
+
return dets, inds
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
@deprecated_api_warning({'iou_thr': 'iou_threshold'})
|
| 181 |
+
def soft_nms(boxes,
|
| 182 |
+
scores,
|
| 183 |
+
iou_threshold=0.3,
|
| 184 |
+
sigma=0.5,
|
| 185 |
+
min_score=1e-3,
|
| 186 |
+
method='linear',
|
| 187 |
+
offset=0):
|
| 188 |
+
"""Dispatch to only CPU Soft NMS implementations.
|
| 189 |
+
|
| 190 |
+
The input can be either a torch tensor or numpy array.
|
| 191 |
+
The returned type will always be the same as inputs.
|
| 192 |
+
|
| 193 |
+
Arguments:
|
| 194 |
+
boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4).
|
| 195 |
+
scores (torch.Tensor or np.ndarray): scores in shape (N, ).
|
| 196 |
+
iou_threshold (float): IoU threshold for NMS.
|
| 197 |
+
sigma (float): hyperparameter for gaussian method
|
| 198 |
+
min_score (float): score filter threshold
|
| 199 |
+
method (str): either 'linear' or 'gaussian'
|
| 200 |
+
offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset).
|
| 201 |
+
|
| 202 |
+
Returns:
|
| 203 |
+
tuple: kept dets(boxes and scores) and indice, which is always the \
|
| 204 |
+
same data type as the input.
|
| 205 |
+
|
| 206 |
+
Example:
|
| 207 |
+
>>> boxes = np.array([[4., 3., 5., 3.],
|
| 208 |
+
>>> [4., 3., 5., 4.],
|
| 209 |
+
>>> [3., 1., 3., 1.],
|
| 210 |
+
>>> [3., 1., 3., 1.],
|
| 211 |
+
>>> [3., 1., 3., 1.],
|
| 212 |
+
>>> [3., 1., 3., 1.]], dtype=np.float32)
|
| 213 |
+
>>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.4, 0.0], dtype=np.float32)
|
| 214 |
+
>>> iou_threshold = 0.6
|
| 215 |
+
>>> dets, inds = soft_nms(boxes, scores, iou_threshold, sigma=0.5)
|
| 216 |
+
>>> assert len(inds) == len(dets) == 5
|
| 217 |
+
"""
|
| 218 |
+
|
| 219 |
+
assert isinstance(boxes, (torch.Tensor, np.ndarray))
|
| 220 |
+
assert isinstance(scores, (torch.Tensor, np.ndarray))
|
| 221 |
+
is_numpy = False
|
| 222 |
+
if isinstance(boxes, np.ndarray):
|
| 223 |
+
is_numpy = True
|
| 224 |
+
boxes = torch.from_numpy(boxes)
|
| 225 |
+
if isinstance(scores, np.ndarray):
|
| 226 |
+
scores = torch.from_numpy(scores)
|
| 227 |
+
assert boxes.size(1) == 4
|
| 228 |
+
assert boxes.size(0) == scores.size(0)
|
| 229 |
+
assert offset in (0, 1)
|
| 230 |
+
method_dict = {'naive': 0, 'linear': 1, 'gaussian': 2}
|
| 231 |
+
assert method in method_dict.keys()
|
| 232 |
+
|
| 233 |
+
if torch.__version__ == 'parrots':
|
| 234 |
+
dets = boxes.new_empty((boxes.size(0), 5), device='cpu')
|
| 235 |
+
indata_list = [boxes.cpu(), scores.cpu(), dets.cpu()]
|
| 236 |
+
indata_dict = {
|
| 237 |
+
'iou_threshold': float(iou_threshold),
|
| 238 |
+
'sigma': float(sigma),
|
| 239 |
+
'min_score': min_score,
|
| 240 |
+
'method': method_dict[method],
|
| 241 |
+
'offset': int(offset)
|
| 242 |
+
}
|
| 243 |
+
inds = ext_module.softnms(*indata_list, **indata_dict)
|
| 244 |
+
else:
|
| 245 |
+
dets, inds = SoftNMSop.apply(boxes.cpu(), scores.cpu(),
|
| 246 |
+
float(iou_threshold), float(sigma),
|
| 247 |
+
float(min_score), method_dict[method],
|
| 248 |
+
int(offset))
|
| 249 |
+
|
| 250 |
+
dets = dets[:inds.size(0)]
|
| 251 |
+
|
| 252 |
+
if is_numpy:
|
| 253 |
+
dets = dets.cpu().numpy()
|
| 254 |
+
inds = inds.cpu().numpy()
|
| 255 |
+
return dets, inds
|
| 256 |
+
else:
|
| 257 |
+
return dets.to(device=boxes.device), inds.to(device=boxes.device)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
|
| 261 |
+
"""Performs non-maximum suppression in a batched fashion.
|
| 262 |
+
|
| 263 |
+
Modified from https://github.com/pytorch/vision/blob
|
| 264 |
+
/505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
|
| 265 |
+
In order to perform NMS independently per class, we add an offset to all
|
| 266 |
+
the boxes. The offset is dependent only on the class idx, and is large
|
| 267 |
+
enough so that boxes from different classes do not overlap.
|
| 268 |
+
|
| 269 |
+
Arguments:
|
| 270 |
+
boxes (torch.Tensor): boxes in shape (N, 4).
|
| 271 |
+
scores (torch.Tensor): scores in shape (N, ).
|
| 272 |
+
idxs (torch.Tensor): each index value correspond to a bbox cluster,
|
| 273 |
+
and NMS will not be applied between elements of different idxs,
|
| 274 |
+
shape (N, ).
|
| 275 |
+
nms_cfg (dict): specify nms type and other parameters like iou_thr.
|
| 276 |
+
Possible keys includes the following.
|
| 277 |
+
|
| 278 |
+
- iou_thr (float): IoU threshold used for NMS.
|
| 279 |
+
- split_thr (float): threshold number of boxes. In some cases the
|
| 280 |
+
number of boxes is large (e.g., 200k). To avoid OOM during
|
| 281 |
+
training, the users could set `split_thr` to a small value.
|
| 282 |
+
If the number of boxes is greater than the threshold, it will
|
| 283 |
+
perform NMS on each group of boxes separately and sequentially.
|
| 284 |
+
Defaults to 10000.
|
| 285 |
+
class_agnostic (bool): if true, nms is class agnostic,
|
| 286 |
+
i.e. IoU thresholding happens over all boxes,
|
| 287 |
+
regardless of the predicted class.
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
tuple: kept dets and indice.
|
| 291 |
+
"""
|
| 292 |
+
nms_cfg_ = nms_cfg.copy()
|
| 293 |
+
class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic)
|
| 294 |
+
if class_agnostic:
|
| 295 |
+
boxes_for_nms = boxes
|
| 296 |
+
else:
|
| 297 |
+
max_coordinate = boxes.max()
|
| 298 |
+
offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
|
| 299 |
+
boxes_for_nms = boxes + offsets[:, None]
|
| 300 |
+
|
| 301 |
+
nms_type = nms_cfg_.pop('type', 'nms')
|
| 302 |
+
nms_op = eval(nms_type)
|
| 303 |
+
|
| 304 |
+
split_thr = nms_cfg_.pop('split_thr', 10000)
|
| 305 |
+
# Won't split to multiple nms nodes when exporting to onnx
|
| 306 |
+
if boxes_for_nms.shape[0] < split_thr or torch.onnx.is_in_onnx_export():
|
| 307 |
+
dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_)
|
| 308 |
+
boxes = boxes[keep]
|
| 309 |
+
# -1 indexing works abnormal in TensorRT
|
| 310 |
+
# This assumes `dets` has 5 dimensions where
|
| 311 |
+
# the last dimension is score.
|
| 312 |
+
# TODO: more elegant way to handle the dimension issue.
|
| 313 |
+
# Some type of nms would reweight the score, such as SoftNMS
|
| 314 |
+
scores = dets[:, 4]
|
| 315 |
+
else:
|
| 316 |
+
max_num = nms_cfg_.pop('max_num', -1)
|
| 317 |
+
total_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
|
| 318 |
+
# Some type of nms would reweight the score, such as SoftNMS
|
| 319 |
+
scores_after_nms = scores.new_zeros(scores.size())
|
| 320 |
+
for id in torch.unique(idxs):
|
| 321 |
+
mask = (idxs == id).nonzero(as_tuple=False).view(-1)
|
| 322 |
+
dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_)
|
| 323 |
+
total_mask[mask[keep]] = True
|
| 324 |
+
scores_after_nms[mask[keep]] = dets[:, -1]
|
| 325 |
+
keep = total_mask.nonzero(as_tuple=False).view(-1)
|
| 326 |
+
|
| 327 |
+
scores, inds = scores_after_nms[keep].sort(descending=True)
|
| 328 |
+
keep = keep[inds]
|
| 329 |
+
boxes = boxes[keep]
|
| 330 |
+
|
| 331 |
+
if max_num > 0:
|
| 332 |
+
keep = keep[:max_num]
|
| 333 |
+
boxes = boxes[:max_num]
|
| 334 |
+
scores = scores[:max_num]
|
| 335 |
+
|
| 336 |
+
return torch.cat([boxes, scores[:, None]], -1), keep
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def nms_match(dets, iou_threshold):
|
| 340 |
+
"""Matched dets into different groups by NMS.
|
| 341 |
+
|
| 342 |
+
NMS match is Similar to NMS but when a bbox is suppressed, nms match will
|
| 343 |
+
record the indice of suppressed bbox and form a group with the indice of
|
| 344 |
+
kept bbox. In each group, indice is sorted as score order.
|
| 345 |
+
|
| 346 |
+
Arguments:
|
| 347 |
+
dets (torch.Tensor | np.ndarray): Det boxes with scores, shape (N, 5).
|
| 348 |
+
iou_thr (float): IoU thresh for NMS.
|
| 349 |
+
|
| 350 |
+
Returns:
|
| 351 |
+
List[torch.Tensor | np.ndarray]: The outer list corresponds different
|
| 352 |
+
matched group, the inner Tensor corresponds the indices for a group
|
| 353 |
+
in score order.
|
| 354 |
+
"""
|
| 355 |
+
if dets.shape[0] == 0:
|
| 356 |
+
matched = []
|
| 357 |
+
else:
|
| 358 |
+
assert dets.shape[-1] == 5, 'inputs dets.shape should be (N, 5), ' \
|
| 359 |
+
f'but get {dets.shape}'
|
| 360 |
+
if isinstance(dets, torch.Tensor):
|
| 361 |
+
dets_t = dets.detach().cpu()
|
| 362 |
+
else:
|
| 363 |
+
dets_t = torch.from_numpy(dets)
|
| 364 |
+
indata_list = [dets_t]
|
| 365 |
+
indata_dict = {'iou_threshold': float(iou_threshold)}
|
| 366 |
+
matched = ext_module.nms_match(*indata_list, **indata_dict)
|
| 367 |
+
if torch.__version__ == 'parrots':
|
| 368 |
+
matched = matched.tolist()
|
| 369 |
+
|
| 370 |
+
if isinstance(dets, torch.Tensor):
|
| 371 |
+
return [dets.new_tensor(m, dtype=torch.long) for m in matched]
|
| 372 |
+
else:
|
| 373 |
+
return [np.array(m, dtype=np.int) for m in matched]
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def nms_rotated(dets, scores, iou_threshold, labels=None):
|
| 377 |
+
"""Performs non-maximum suppression (NMS) on the rotated boxes according to
|
| 378 |
+
their intersection-over-union (IoU).
|
| 379 |
+
|
| 380 |
+
Rotated NMS iteratively removes lower scoring rotated boxes which have an
|
| 381 |
+
IoU greater than iou_threshold with another (higher scoring) rotated box.
|
| 382 |
+
|
| 383 |
+
Args:
|
| 384 |
+
boxes (Tensor): Rotated boxes in shape (N, 5). They are expected to \
|
| 385 |
+
be in (x_ctr, y_ctr, width, height, angle_radian) format.
|
| 386 |
+
scores (Tensor): scores in shape (N, ).
|
| 387 |
+
iou_threshold (float): IoU thresh for NMS.
|
| 388 |
+
labels (Tensor): boxes' label in shape (N,).
|
| 389 |
+
|
| 390 |
+
Returns:
|
| 391 |
+
tuple: kept dets(boxes and scores) and indice, which is always the \
|
| 392 |
+
same data type as the input.
|
| 393 |
+
"""
|
| 394 |
+
if dets.shape[0] == 0:
|
| 395 |
+
return dets, None
|
| 396 |
+
multi_label = labels is not None
|
| 397 |
+
if multi_label:
|
| 398 |
+
dets_wl = torch.cat((dets, labels.unsqueeze(1)), 1)
|
| 399 |
+
else:
|
| 400 |
+
dets_wl = dets
|
| 401 |
+
_, order = scores.sort(0, descending=True)
|
| 402 |
+
dets_sorted = dets_wl.index_select(0, order)
|
| 403 |
+
|
| 404 |
+
if torch.__version__ == 'parrots':
|
| 405 |
+
keep_inds = ext_module.nms_rotated(
|
| 406 |
+
dets_wl,
|
| 407 |
+
scores,
|
| 408 |
+
order,
|
| 409 |
+
dets_sorted,
|
| 410 |
+
iou_threshold=iou_threshold,
|
| 411 |
+
multi_label=multi_label)
|
| 412 |
+
else:
|
| 413 |
+
keep_inds = ext_module.nms_rotated(dets_wl, scores, order, dets_sorted,
|
| 414 |
+
iou_threshold, multi_label)
|
| 415 |
+
dets = torch.cat((dets[keep_inds], scores[keep_inds].reshape(-1, 1)),
|
| 416 |
+
dim=1)
|
| 417 |
+
return dets, keep_inds
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/points_in_boxes.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from ..utils import ext_loader
|
| 4 |
+
|
| 5 |
+
ext_module = ext_loader.load_ext('_ext', [
|
| 6 |
+
'points_in_boxes_part_forward', 'points_in_boxes_cpu_forward',
|
| 7 |
+
'points_in_boxes_all_forward'
|
| 8 |
+
])
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def points_in_boxes_part(points, boxes):
|
| 12 |
+
"""Find the box in which each point is (CUDA).
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate
|
| 16 |
+
boxes (torch.Tensor): [B, T, 7],
|
| 17 |
+
num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in
|
| 18 |
+
LiDAR/DEPTH coordinate, (x, y, z) is the bottom center
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
box_idxs_of_pts (torch.Tensor): (B, M), default background = -1
|
| 22 |
+
"""
|
| 23 |
+
assert points.shape[0] == boxes.shape[0], \
|
| 24 |
+
'Points and boxes should have the same batch size, ' \
|
| 25 |
+
f'but got {points.shape[0]} and {boxes.shape[0]}'
|
| 26 |
+
assert boxes.shape[2] == 7, \
|
| 27 |
+
'boxes dimension should be 7, ' \
|
| 28 |
+
f'but got unexpected shape {boxes.shape[2]}'
|
| 29 |
+
assert points.shape[2] == 3, \
|
| 30 |
+
'points dimension should be 3, ' \
|
| 31 |
+
f'but got unexpected shape {points.shape[2]}'
|
| 32 |
+
batch_size, num_points, _ = points.shape
|
| 33 |
+
|
| 34 |
+
box_idxs_of_pts = points.new_zeros((batch_size, num_points),
|
| 35 |
+
dtype=torch.int).fill_(-1)
|
| 36 |
+
|
| 37 |
+
# If manually put the tensor 'points' or 'boxes' on a device
|
| 38 |
+
# which is not the current device, some temporary variables
|
| 39 |
+
# will be created on the current device in the cuda op,
|
| 40 |
+
# and the output will be incorrect.
|
| 41 |
+
# Therefore, we force the current device to be the same
|
| 42 |
+
# as the device of the tensors if it was not.
|
| 43 |
+
# Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305
|
| 44 |
+
# for the incorrect output before the fix.
|
| 45 |
+
points_device = points.get_device()
|
| 46 |
+
assert points_device == boxes.get_device(), \
|
| 47 |
+
'Points and boxes should be put on the same device'
|
| 48 |
+
if torch.cuda.current_device() != points_device:
|
| 49 |
+
torch.cuda.set_device(points_device)
|
| 50 |
+
|
| 51 |
+
ext_module.points_in_boxes_part_forward(boxes.contiguous(),
|
| 52 |
+
points.contiguous(),
|
| 53 |
+
box_idxs_of_pts)
|
| 54 |
+
|
| 55 |
+
return box_idxs_of_pts
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def points_in_boxes_cpu(points, boxes):
|
| 59 |
+
"""Find all boxes in which each point is (CPU). The CPU version of
|
| 60 |
+
:meth:`points_in_boxes_all`.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
points (torch.Tensor): [B, M, 3], [x, y, z] in
|
| 64 |
+
LiDAR/DEPTH coordinate
|
| 65 |
+
boxes (torch.Tensor): [B, T, 7],
|
| 66 |
+
num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],
|
| 67 |
+
(x, y, z) is the bottom center.
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0.
|
| 71 |
+
"""
|
| 72 |
+
assert points.shape[0] == boxes.shape[0], \
|
| 73 |
+
'Points and boxes should have the same batch size, ' \
|
| 74 |
+
f'but got {points.shape[0]} and {boxes.shape[0]}'
|
| 75 |
+
assert boxes.shape[2] == 7, \
|
| 76 |
+
'boxes dimension should be 7, ' \
|
| 77 |
+
f'but got unexpected shape {boxes.shape[2]}'
|
| 78 |
+
assert points.shape[2] == 3, \
|
| 79 |
+
'points dimension should be 3, ' \
|
| 80 |
+
f'but got unexpected shape {points.shape[2]}'
|
| 81 |
+
batch_size, num_points, _ = points.shape
|
| 82 |
+
num_boxes = boxes.shape[1]
|
| 83 |
+
|
| 84 |
+
point_indices = points.new_zeros((batch_size, num_boxes, num_points),
|
| 85 |
+
dtype=torch.int)
|
| 86 |
+
for b in range(batch_size):
|
| 87 |
+
ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(),
|
| 88 |
+
points[b].float().contiguous(),
|
| 89 |
+
point_indices[b])
|
| 90 |
+
point_indices = point_indices.transpose(1, 2)
|
| 91 |
+
|
| 92 |
+
return point_indices
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def points_in_boxes_all(points, boxes):
|
| 96 |
+
"""Find all boxes in which each point is (CUDA).
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate
|
| 100 |
+
boxes (torch.Tensor): [B, T, 7],
|
| 101 |
+
num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],
|
| 102 |
+
(x, y, z) is the bottom center.
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0.
|
| 106 |
+
"""
|
| 107 |
+
assert boxes.shape[0] == points.shape[0], \
|
| 108 |
+
'Points and boxes should have the same batch size, ' \
|
| 109 |
+
f'but got {boxes.shape[0]} and {boxes.shape[0]}'
|
| 110 |
+
assert boxes.shape[2] == 7, \
|
| 111 |
+
'boxes dimension should be 7, ' \
|
| 112 |
+
f'but got unexpected shape {boxes.shape[2]}'
|
| 113 |
+
assert points.shape[2] == 3, \
|
| 114 |
+
'points dimension should be 3, ' \
|
| 115 |
+
f'but got unexpected shape {points.shape[2]}'
|
| 116 |
+
batch_size, num_points, _ = points.shape
|
| 117 |
+
num_boxes = boxes.shape[1]
|
| 118 |
+
|
| 119 |
+
box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes),
|
| 120 |
+
dtype=torch.int).fill_(0)
|
| 121 |
+
|
| 122 |
+
# Same reason as line 25-32
|
| 123 |
+
points_device = points.get_device()
|
| 124 |
+
assert points_device == boxes.get_device(), \
|
| 125 |
+
'Points and boxes should be put on the same device'
|
| 126 |
+
if torch.cuda.current_device() != points_device:
|
| 127 |
+
torch.cuda.set_device(points_device)
|
| 128 |
+
|
| 129 |
+
ext_module.points_in_boxes_all_forward(boxes.contiguous(),
|
| 130 |
+
points.contiguous(),
|
| 131 |
+
box_idxs_of_pts)
|
| 132 |
+
|
| 133 |
+
return box_idxs_of_pts
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/psa_mask.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Modified from https://github.com/hszhao/semseg/blob/master/lib/psa
|
| 2 |
+
from torch import nn
|
| 3 |
+
from torch.autograd import Function
|
| 4 |
+
from torch.nn.modules.utils import _pair
|
| 5 |
+
|
| 6 |
+
from ..utils import ext_loader
|
| 7 |
+
|
| 8 |
+
ext_module = ext_loader.load_ext('_ext',
|
| 9 |
+
['psamask_forward', 'psamask_backward'])
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PSAMaskFunction(Function):
|
| 13 |
+
|
| 14 |
+
@staticmethod
|
| 15 |
+
def symbolic(g, input, psa_type, mask_size):
|
| 16 |
+
return g.op(
|
| 17 |
+
'mmcv::MMCVPSAMask',
|
| 18 |
+
input,
|
| 19 |
+
psa_type_i=psa_type,
|
| 20 |
+
mask_size_i=mask_size)
|
| 21 |
+
|
| 22 |
+
@staticmethod
|
| 23 |
+
def forward(ctx, input, psa_type, mask_size):
|
| 24 |
+
ctx.psa_type = psa_type
|
| 25 |
+
ctx.mask_size = _pair(mask_size)
|
| 26 |
+
ctx.save_for_backward(input)
|
| 27 |
+
|
| 28 |
+
h_mask, w_mask = ctx.mask_size
|
| 29 |
+
batch_size, channels, h_feature, w_feature = input.size()
|
| 30 |
+
assert channels == h_mask * w_mask
|
| 31 |
+
output = input.new_zeros(
|
| 32 |
+
(batch_size, h_feature * w_feature, h_feature, w_feature))
|
| 33 |
+
|
| 34 |
+
ext_module.psamask_forward(
|
| 35 |
+
input,
|
| 36 |
+
output,
|
| 37 |
+
psa_type=psa_type,
|
| 38 |
+
num_=batch_size,
|
| 39 |
+
h_feature=h_feature,
|
| 40 |
+
w_feature=w_feature,
|
| 41 |
+
h_mask=h_mask,
|
| 42 |
+
w_mask=w_mask,
|
| 43 |
+
half_h_mask=(h_mask - 1) // 2,
|
| 44 |
+
half_w_mask=(w_mask - 1) // 2)
|
| 45 |
+
return output
|
| 46 |
+
|
| 47 |
+
@staticmethod
|
| 48 |
+
def backward(ctx, grad_output):
|
| 49 |
+
input = ctx.saved_tensors[0]
|
| 50 |
+
psa_type = ctx.psa_type
|
| 51 |
+
h_mask, w_mask = ctx.mask_size
|
| 52 |
+
batch_size, channels, h_feature, w_feature = input.size()
|
| 53 |
+
grad_input = grad_output.new_zeros(
|
| 54 |
+
(batch_size, channels, h_feature, w_feature))
|
| 55 |
+
ext_module.psamask_backward(
|
| 56 |
+
grad_output,
|
| 57 |
+
grad_input,
|
| 58 |
+
psa_type=psa_type,
|
| 59 |
+
num_=batch_size,
|
| 60 |
+
h_feature=h_feature,
|
| 61 |
+
w_feature=w_feature,
|
| 62 |
+
h_mask=h_mask,
|
| 63 |
+
w_mask=w_mask,
|
| 64 |
+
half_h_mask=(h_mask - 1) // 2,
|
| 65 |
+
half_w_mask=(w_mask - 1) // 2)
|
| 66 |
+
return grad_input, None, None, None
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
psa_mask = PSAMaskFunction.apply
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class PSAMask(nn.Module):
|
| 73 |
+
|
| 74 |
+
def __init__(self, psa_type, mask_size=None):
|
| 75 |
+
super(PSAMask, self).__init__()
|
| 76 |
+
assert psa_type in ['collect', 'distribute']
|
| 77 |
+
if psa_type == 'collect':
|
| 78 |
+
psa_type_enum = 0
|
| 79 |
+
else:
|
| 80 |
+
psa_type_enum = 1
|
| 81 |
+
self.psa_type_enum = psa_type_enum
|
| 82 |
+
self.mask_size = mask_size
|
| 83 |
+
self.psa_type = psa_type
|
| 84 |
+
|
| 85 |
+
def forward(self, input):
|
| 86 |
+
return psa_mask(input, self.psa_type_enum, self.mask_size)
|
| 87 |
+
|
| 88 |
+
def __repr__(self):
|
| 89 |
+
s = self.__class__.__name__
|
| 90 |
+
s += f'(psa_type={self.psa_type}, '
|
| 91 |
+
s += f'mask_size={self.mask_size})'
|
| 92 |
+
return s
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/roi_align.py
ADDED
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torch.autograd import Function
|
| 5 |
+
from torch.autograd.function import once_differentiable
|
| 6 |
+
from torch.nn.modules.utils import _pair
|
| 7 |
+
|
| 8 |
+
from ..utils import deprecated_api_warning, ext_loader
|
| 9 |
+
|
| 10 |
+
ext_module = ext_loader.load_ext('_ext',
|
| 11 |
+
['roi_align_forward', 'roi_align_backward'])
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class RoIAlignFunction(Function):
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio,
|
| 18 |
+
pool_mode, aligned):
|
| 19 |
+
from ..onnx import is_custom_op_loaded
|
| 20 |
+
has_custom_op = is_custom_op_loaded()
|
| 21 |
+
if has_custom_op:
|
| 22 |
+
return g.op(
|
| 23 |
+
'mmcv::MMCVRoiAlign',
|
| 24 |
+
input,
|
| 25 |
+
rois,
|
| 26 |
+
output_height_i=output_size[0],
|
| 27 |
+
output_width_i=output_size[1],
|
| 28 |
+
spatial_scale_f=spatial_scale,
|
| 29 |
+
sampling_ratio_i=sampling_ratio,
|
| 30 |
+
mode_s=pool_mode,
|
| 31 |
+
aligned_i=aligned)
|
| 32 |
+
else:
|
| 33 |
+
from torch.onnx.symbolic_opset9 import sub, squeeze
|
| 34 |
+
from torch.onnx.symbolic_helper import _slice_helper
|
| 35 |
+
from torch.onnx import TensorProtoDataType
|
| 36 |
+
# batch_indices = rois[:, 0].long()
|
| 37 |
+
batch_indices = _slice_helper(
|
| 38 |
+
g, rois, axes=[1], starts=[0], ends=[1])
|
| 39 |
+
batch_indices = squeeze(g, batch_indices, 1)
|
| 40 |
+
batch_indices = g.op(
|
| 41 |
+
'Cast', batch_indices, to_i=TensorProtoDataType.INT64)
|
| 42 |
+
# rois = rois[:, 1:]
|
| 43 |
+
rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5])
|
| 44 |
+
if aligned:
|
| 45 |
+
# rois -= 0.5/spatial_scale
|
| 46 |
+
aligned_offset = g.op(
|
| 47 |
+
'Constant',
|
| 48 |
+
value_t=torch.tensor([0.5 / spatial_scale],
|
| 49 |
+
dtype=torch.float32))
|
| 50 |
+
rois = sub(g, rois, aligned_offset)
|
| 51 |
+
# roi align
|
| 52 |
+
return g.op(
|
| 53 |
+
'RoiAlign',
|
| 54 |
+
input,
|
| 55 |
+
rois,
|
| 56 |
+
batch_indices,
|
| 57 |
+
output_height_i=output_size[0],
|
| 58 |
+
output_width_i=output_size[1],
|
| 59 |
+
spatial_scale_f=spatial_scale,
|
| 60 |
+
sampling_ratio_i=max(0, sampling_ratio),
|
| 61 |
+
mode_s=pool_mode)
|
| 62 |
+
|
| 63 |
+
@staticmethod
|
| 64 |
+
def forward(ctx,
|
| 65 |
+
input,
|
| 66 |
+
rois,
|
| 67 |
+
output_size,
|
| 68 |
+
spatial_scale=1.0,
|
| 69 |
+
sampling_ratio=0,
|
| 70 |
+
pool_mode='avg',
|
| 71 |
+
aligned=True):
|
| 72 |
+
ctx.output_size = _pair(output_size)
|
| 73 |
+
ctx.spatial_scale = spatial_scale
|
| 74 |
+
ctx.sampling_ratio = sampling_ratio
|
| 75 |
+
assert pool_mode in ('max', 'avg')
|
| 76 |
+
ctx.pool_mode = 0 if pool_mode == 'max' else 1
|
| 77 |
+
ctx.aligned = aligned
|
| 78 |
+
ctx.input_shape = input.size()
|
| 79 |
+
|
| 80 |
+
assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
|
| 81 |
+
|
| 82 |
+
output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
|
| 83 |
+
ctx.output_size[1])
|
| 84 |
+
output = input.new_zeros(output_shape)
|
| 85 |
+
if ctx.pool_mode == 0:
|
| 86 |
+
argmax_y = input.new_zeros(output_shape)
|
| 87 |
+
argmax_x = input.new_zeros(output_shape)
|
| 88 |
+
else:
|
| 89 |
+
argmax_y = input.new_zeros(0)
|
| 90 |
+
argmax_x = input.new_zeros(0)
|
| 91 |
+
|
| 92 |
+
ext_module.roi_align_forward(
|
| 93 |
+
input,
|
| 94 |
+
rois,
|
| 95 |
+
output,
|
| 96 |
+
argmax_y,
|
| 97 |
+
argmax_x,
|
| 98 |
+
aligned_height=ctx.output_size[0],
|
| 99 |
+
aligned_width=ctx.output_size[1],
|
| 100 |
+
spatial_scale=ctx.spatial_scale,
|
| 101 |
+
sampling_ratio=ctx.sampling_ratio,
|
| 102 |
+
pool_mode=ctx.pool_mode,
|
| 103 |
+
aligned=ctx.aligned)
|
| 104 |
+
|
| 105 |
+
ctx.save_for_backward(rois, argmax_y, argmax_x)
|
| 106 |
+
return output
|
| 107 |
+
|
| 108 |
+
@staticmethod
|
| 109 |
+
@once_differentiable
|
| 110 |
+
def backward(ctx, grad_output):
|
| 111 |
+
rois, argmax_y, argmax_x = ctx.saved_tensors
|
| 112 |
+
grad_input = grad_output.new_zeros(ctx.input_shape)
|
| 113 |
+
# complex head architecture may cause grad_output uncontiguous.
|
| 114 |
+
grad_output = grad_output.contiguous()
|
| 115 |
+
ext_module.roi_align_backward(
|
| 116 |
+
grad_output,
|
| 117 |
+
rois,
|
| 118 |
+
argmax_y,
|
| 119 |
+
argmax_x,
|
| 120 |
+
grad_input,
|
| 121 |
+
aligned_height=ctx.output_size[0],
|
| 122 |
+
aligned_width=ctx.output_size[1],
|
| 123 |
+
spatial_scale=ctx.spatial_scale,
|
| 124 |
+
sampling_ratio=ctx.sampling_ratio,
|
| 125 |
+
pool_mode=ctx.pool_mode,
|
| 126 |
+
aligned=ctx.aligned)
|
| 127 |
+
return grad_input, None, None, None, None, None, None
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
roi_align = RoIAlignFunction.apply
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class RoIAlign(nn.Module):
|
| 134 |
+
"""RoI align pooling layer.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
output_size (tuple): h, w
|
| 138 |
+
spatial_scale (float): scale the input boxes by this number
|
| 139 |
+
sampling_ratio (int): number of inputs samples to take for each
|
| 140 |
+
output sample. 0 to take samples densely for current models.
|
| 141 |
+
pool_mode (str, 'avg' or 'max'): pooling mode in each bin.
|
| 142 |
+
aligned (bool): if False, use the legacy implementation in
|
| 143 |
+
MMDetection. If True, align the results more perfectly.
|
| 144 |
+
use_torchvision (bool): whether to use roi_align from torchvision.
|
| 145 |
+
|
| 146 |
+
Note:
|
| 147 |
+
The implementation of RoIAlign when aligned=True is modified from
|
| 148 |
+
https://github.com/facebookresearch/detectron2/
|
| 149 |
+
|
| 150 |
+
The meaning of aligned=True:
|
| 151 |
+
|
| 152 |
+
Given a continuous coordinate c, its two neighboring pixel
|
| 153 |
+
indices (in our pixel model) are computed by floor(c - 0.5) and
|
| 154 |
+
ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete
|
| 155 |
+
indices [0] and [1] (which are sampled from the underlying signal
|
| 156 |
+
at continuous coordinates 0.5 and 1.5). But the original roi_align
|
| 157 |
+
(aligned=False) does not subtract the 0.5 when computing
|
| 158 |
+
neighboring pixel indices and therefore it uses pixels with a
|
| 159 |
+
slightly incorrect alignment (relative to our pixel model) when
|
| 160 |
+
performing bilinear interpolation.
|
| 161 |
+
|
| 162 |
+
With `aligned=True`,
|
| 163 |
+
we first appropriately scale the ROI and then shift it by -0.5
|
| 164 |
+
prior to calling roi_align. This produces the correct neighbors;
|
| 165 |
+
|
| 166 |
+
The difference does not make a difference to the model's
|
| 167 |
+
performance if ROIAlign is used together with conv layers.
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
@deprecated_api_warning(
|
| 171 |
+
{
|
| 172 |
+
'out_size': 'output_size',
|
| 173 |
+
'sample_num': 'sampling_ratio'
|
| 174 |
+
},
|
| 175 |
+
cls_name='RoIAlign')
|
| 176 |
+
def __init__(self,
|
| 177 |
+
output_size,
|
| 178 |
+
spatial_scale=1.0,
|
| 179 |
+
sampling_ratio=0,
|
| 180 |
+
pool_mode='avg',
|
| 181 |
+
aligned=True,
|
| 182 |
+
use_torchvision=False):
|
| 183 |
+
super(RoIAlign, self).__init__()
|
| 184 |
+
|
| 185 |
+
self.output_size = _pair(output_size)
|
| 186 |
+
self.spatial_scale = float(spatial_scale)
|
| 187 |
+
self.sampling_ratio = int(sampling_ratio)
|
| 188 |
+
self.pool_mode = pool_mode
|
| 189 |
+
self.aligned = aligned
|
| 190 |
+
self.use_torchvision = use_torchvision
|
| 191 |
+
|
| 192 |
+
def forward(self, input, rois):
|
| 193 |
+
"""
|
| 194 |
+
Args:
|
| 195 |
+
input: NCHW images
|
| 196 |
+
rois: Bx5 boxes. First column is the index into N.\
|
| 197 |
+
The other 4 columns are xyxy.
|
| 198 |
+
"""
|
| 199 |
+
if self.use_torchvision:
|
| 200 |
+
from torchvision.ops import roi_align as tv_roi_align
|
| 201 |
+
if 'aligned' in tv_roi_align.__code__.co_varnames:
|
| 202 |
+
return tv_roi_align(input, rois, self.output_size,
|
| 203 |
+
self.spatial_scale, self.sampling_ratio,
|
| 204 |
+
self.aligned)
|
| 205 |
+
else:
|
| 206 |
+
if self.aligned:
|
| 207 |
+
rois -= rois.new_tensor([0.] +
|
| 208 |
+
[0.5 / self.spatial_scale] * 4)
|
| 209 |
+
return tv_roi_align(input, rois, self.output_size,
|
| 210 |
+
self.spatial_scale, self.sampling_ratio)
|
| 211 |
+
else:
|
| 212 |
+
return roi_align(input, rois, self.output_size, self.spatial_scale,
|
| 213 |
+
self.sampling_ratio, self.pool_mode, self.aligned)
|
| 214 |
+
|
| 215 |
+
def __repr__(self):
|
| 216 |
+
s = self.__class__.__name__
|
| 217 |
+
s += f'(output_size={self.output_size}, '
|
| 218 |
+
s += f'spatial_scale={self.spatial_scale}, '
|
| 219 |
+
s += f'sampling_ratio={self.sampling_ratio}, '
|
| 220 |
+
s += f'pool_mode={self.pool_mode}, '
|
| 221 |
+
s += f'aligned={self.aligned}, '
|
| 222 |
+
s += f'use_torchvision={self.use_torchvision})'
|
| 223 |
+
return s
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/roipoint_pool3d.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch import nn as nn
|
| 2 |
+
from torch.autograd import Function
|
| 3 |
+
|
| 4 |
+
from ..utils import ext_loader
|
| 5 |
+
|
| 6 |
+
ext_module = ext_loader.load_ext('_ext', ['roipoint_pool3d_forward'])
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class RoIPointPool3d(nn.Module):
|
| 10 |
+
"""Encode the geometry-specific features of each 3D proposal.
|
| 11 |
+
|
| 12 |
+
Please refer to `Paper of PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_
|
| 13 |
+
for more details.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
num_sampled_points (int, optional): Number of samples in each roi.
|
| 17 |
+
Default: 512.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, num_sampled_points=512):
|
| 21 |
+
super().__init__()
|
| 22 |
+
self.num_sampled_points = num_sampled_points
|
| 23 |
+
|
| 24 |
+
def forward(self, points, point_features, boxes3d):
|
| 25 |
+
"""
|
| 26 |
+
Args:
|
| 27 |
+
points (torch.Tensor): Input points whose shape is (B, N, C).
|
| 28 |
+
point_features (torch.Tensor): Features of input points whose shape
|
| 29 |
+
is (B, N, C).
|
| 30 |
+
boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
pooled_features (torch.Tensor): The output pooled features whose
|
| 34 |
+
shape is (B, M, 512, 3 + C).
|
| 35 |
+
pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M).
|
| 36 |
+
"""
|
| 37 |
+
return RoIPointPool3dFunction.apply(points, point_features, boxes3d,
|
| 38 |
+
self.num_sampled_points)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class RoIPointPool3dFunction(Function):
|
| 42 |
+
|
| 43 |
+
@staticmethod
|
| 44 |
+
def forward(ctx, points, point_features, boxes3d, num_sampled_points=512):
|
| 45 |
+
"""
|
| 46 |
+
Args:
|
| 47 |
+
points (torch.Tensor): Input points whose shape is (B, N, C).
|
| 48 |
+
point_features (torch.Tensor): Features of input points whose shape
|
| 49 |
+
is (B, N, C).
|
| 50 |
+
boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).
|
| 51 |
+
num_sampled_points (int, optional): The num of sampled points.
|
| 52 |
+
Default: 512.
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
pooled_features (torch.Tensor): The output pooled features whose
|
| 56 |
+
shape is (B, M, 512, 3 + C).
|
| 57 |
+
pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M).
|
| 58 |
+
"""
|
| 59 |
+
assert len(points.shape) == 3 and points.shape[2] == 3
|
| 60 |
+
batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[
|
| 61 |
+
1], point_features.shape[2]
|
| 62 |
+
pooled_boxes3d = boxes3d.view(batch_size, -1, 7)
|
| 63 |
+
pooled_features = point_features.new_zeros(
|
| 64 |
+
(batch_size, boxes_num, num_sampled_points, 3 + feature_len))
|
| 65 |
+
pooled_empty_flag = point_features.new_zeros(
|
| 66 |
+
(batch_size, boxes_num)).int()
|
| 67 |
+
|
| 68 |
+
ext_module.roipoint_pool3d_forward(points.contiguous(),
|
| 69 |
+
pooled_boxes3d.contiguous(),
|
| 70 |
+
point_features.contiguous(),
|
| 71 |
+
pooled_features, pooled_empty_flag)
|
| 72 |
+
|
| 73 |
+
return pooled_features, pooled_empty_flag
|
| 74 |
+
|
| 75 |
+
@staticmethod
|
| 76 |
+
def backward(ctx, grad_out):
|
| 77 |
+
raise NotImplementedError
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/ops/tin_shift.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
# Code reference from "Temporal Interlacing Network"
|
| 3 |
+
# https://github.com/deepcs233/TIN/blob/master/cuda_shift/rtc_wrap.py
|
| 4 |
+
# Hao Shao, Shengju Qian, Yu Liu
|
| 5 |
+
# shaoh19@mails.tsinghua.edu.cn, sjqian@cse.cuhk.edu.hk, yuliu@ee.cuhk.edu.hk
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn as nn
|
| 9 |
+
from torch.autograd import Function
|
| 10 |
+
|
| 11 |
+
from ..utils import ext_loader
|
| 12 |
+
|
| 13 |
+
ext_module = ext_loader.load_ext('_ext',
|
| 14 |
+
['tin_shift_forward', 'tin_shift_backward'])
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class TINShiftFunction(Function):
|
| 18 |
+
|
| 19 |
+
@staticmethod
|
| 20 |
+
def forward(ctx, input, shift):
|
| 21 |
+
C = input.size(2)
|
| 22 |
+
num_segments = shift.size(1)
|
| 23 |
+
if C // num_segments <= 0 or C % num_segments != 0:
|
| 24 |
+
raise ValueError('C should be a multiple of num_segments, '
|
| 25 |
+
f'but got C={C} and num_segments={num_segments}.')
|
| 26 |
+
|
| 27 |
+
ctx.save_for_backward(shift)
|
| 28 |
+
|
| 29 |
+
out = torch.zeros_like(input)
|
| 30 |
+
ext_module.tin_shift_forward(input, shift, out)
|
| 31 |
+
|
| 32 |
+
return out
|
| 33 |
+
|
| 34 |
+
@staticmethod
|
| 35 |
+
def backward(ctx, grad_output):
|
| 36 |
+
|
| 37 |
+
shift = ctx.saved_tensors[0]
|
| 38 |
+
data_grad_input = grad_output.new(*grad_output.size()).zero_()
|
| 39 |
+
shift_grad_input = shift.new(*shift.size()).zero_()
|
| 40 |
+
ext_module.tin_shift_backward(grad_output, shift, data_grad_input)
|
| 41 |
+
|
| 42 |
+
return data_grad_input, shift_grad_input
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
tin_shift = TINShiftFunction.apply
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class TINShift(nn.Module):
|
| 49 |
+
"""Temporal Interlace Shift.
|
| 50 |
+
|
| 51 |
+
Temporal Interlace shift is a differentiable temporal-wise frame shifting
|
| 52 |
+
which is proposed in "Temporal Interlacing Network"
|
| 53 |
+
|
| 54 |
+
Please refer to https://arxiv.org/abs/2001.06499 for more details.
|
| 55 |
+
Code is modified from https://github.com/mit-han-lab/temporal-shift-module
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
def forward(self, input, shift):
|
| 59 |
+
"""Perform temporal interlace shift.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
input (Tensor): Feature map with shape [N, num_segments, C, H * W].
|
| 63 |
+
shift (Tensor): Shift tensor with shape [N, num_segments].
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
Feature map after temporal interlace shift.
|
| 67 |
+
"""
|
| 68 |
+
return tin_shift(input, shift)
|
FRESCO/src/ControlNet/annotator/uniformer/mmcv/parallel/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .collate import collate
|
| 3 |
+
from .data_container import DataContainer
|
| 4 |
+
from .data_parallel import MMDataParallel
|
| 5 |
+
from .distributed import MMDistributedDataParallel
|
| 6 |
+
from .registry import MODULE_WRAPPERS
|
| 7 |
+
from .scatter_gather import scatter, scatter_kwargs
|
| 8 |
+
from .utils import is_module_wrapper
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel',
|
| 12 |
+
'scatter', 'scatter_kwargs', 'is_module_wrapper', 'MODULE_WRAPPERS'
|
| 13 |
+
]
|