fasdfsa commited on
Commit
52a9452
·
1 Parent(s): 9a0bb76

add DB code

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +1 -0
  2. DB/.gitignore +121 -0
  3. DB/README.md +195 -0
  4. DB/assets/ic15_eval/rrc_evaluation_funcs.py +367 -0
  5. DB/assets/ic15_eval/script.py +316 -0
  6. DB/assets/ops/dcn/__init__.py +13 -0
  7. DB/assets/ops/dcn/functions/__init__.py +0 -0
  8. DB/assets/ops/dcn/functions/deform_conv.py +181 -0
  9. DB/assets/ops/dcn/functions/deform_pool.py +69 -0
  10. DB/assets/ops/dcn/modules/__init__.py +0 -0
  11. DB/assets/ops/dcn/modules/deform_conv.py +157 -0
  12. DB/assets/ops/dcn/modules/deform_pool.py +172 -0
  13. DB/assets/ops/dcn/setup.py +15 -0
  14. DB/assets/ops/dcn/src/deform_conv_cuda.cpp +695 -0
  15. DB/assets/ops/dcn/src/deform_conv_cuda_kernel.cu +866 -0
  16. DB/assets/ops/dcn/src/deform_pool_cuda.cpp +87 -0
  17. DB/assets/ops/dcn/src/deform_pool_cuda_kernel.cu +364 -0
  18. DB/backbones/__init__.py +2 -0
  19. DB/backbones/mobilenetv3.py +252 -0
  20. DB/backbones/resnet.py +336 -0
  21. DB/concern/__init__.py +13 -0
  22. DB/concern/average_meter.py +17 -0
  23. DB/concern/box2seg.py +69 -0
  24. DB/concern/config.py +191 -0
  25. DB/concern/convert.py +31 -0
  26. DB/concern/icdar2015_eval/__init__.py +0 -0
  27. DB/concern/icdar2015_eval/detection/__init__.py +0 -0
  28. DB/concern/icdar2015_eval/detection/deteval.py +323 -0
  29. DB/concern/icdar2015_eval/detection/icdar2013.py +290 -0
  30. DB/concern/icdar2015_eval/detection/iou.py +222 -0
  31. DB/concern/icdar2015_eval/detection/mtwi2018.py +285 -0
  32. DB/concern/log.py +196 -0
  33. DB/concern/signal_monitor.py +17 -0
  34. DB/concern/visualizer.py +98 -0
  35. DB/concern/webcv2/__init__.py +19 -0
  36. DB/concern/webcv2/manager.py +57 -0
  37. DB/concern/webcv2/server.py +104 -0
  38. DB/concern/webcv2/templates/index.html +145 -0
  39. DB/convert_to_onnx.py +78 -0
  40. DB/data/__init__.py +5 -0
  41. DB/data/augmenter.py +35 -0
  42. DB/data/data_loader.py +250 -0
  43. DB/data/dataset.py +23 -0
  44. DB/data/image_dataset.py +105 -0
  45. DB/data/make_border_map.py +121 -0
  46. DB/data/make_seg_detector_data.py +96 -0
  47. DB/data/meta_loader.py +92 -0
  48. DB/data/processes/__init__.py +10 -0
  49. DB/data/processes/augment_data.py +82 -0
  50. DB/data/processes/data_process.py +25 -0
.gitignore CHANGED
@@ -1,3 +1,4 @@
 
1
  icdar2015_aliocr/
2
  icdar2015_aliocr_char/
3
  poly.jpg
 
1
+ __pycache__/
2
  icdar2015_aliocr/
3
  icdar2015_aliocr_char/
4
  poly.jpg
DB/.gitignore ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ */__pycache__/*
3
+ workspace
4
+ *.py[cod]
5
+ *$py.class
6
+ *.swp
7
+ *.swo
8
+ *.lock
9
+
10
+ # C extensions
11
+ *.so
12
+ *.nfs*
13
+
14
+ # Distribution / packaging
15
+ .Python
16
+ build/
17
+ develop-eggs/
18
+ dist/
19
+ downloads/
20
+ eggs/
21
+ .eggs/
22
+ lib/
23
+ lib64/
24
+ parts/
25
+ sdist/
26
+ var/
27
+ wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .coverage
47
+ .coverage.*
48
+ .cache
49
+ nosetests.xml
50
+ coverage.xml
51
+ *.cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # pyenv
81
+ .python-version
82
+
83
+ # celery beat schedule file
84
+ celerybeat-schedule
85
+
86
+ # SageMath parsed files
87
+ *.sage.py
88
+
89
+ # Environments
90
+ .env
91
+ .venv
92
+ env/
93
+ venv/
94
+ ENV/
95
+ env.bak/
96
+ venv.bak/
97
+
98
+ # Spyder project settings
99
+ .spyderproject
100
+ .spyproject
101
+
102
+ # Rope project settings
103
+ .ropeproject
104
+
105
+ # mkdocs documentation
106
+ /site
107
+
108
+ .idea
109
+ log.txt # From the naive evaluating of ICDAR15
110
+
111
+ # specific directory
112
+ # datasets
113
+ evaluation
114
+ experiments/backup
115
+ lib
116
+ outputs
117
+ results
118
+ *.zip
119
+ *.pyx
120
+ struture/representers/setup.py
121
+ demo_results
DB/README.md ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## News
2
+ * The ASF module in DBNet++([TPAMI](https://ieeexplore.ieee.org/abstract/document/9726868/), [arxiv](https://arxiv.org/abs/2202.10304)) is released.
3
+ * DB is included in [WeChat OCR engine](https://mp.weixin.qq.com/s/6IGXof3KWVnN8z1i2YOqJA)
4
+ * DB is included in [OpenCV](https://github.com/opencv/opencv/blob/master/doc/tutorials/dnn/dnn_text_spotting/dnn_text_spotting.markdown)
5
+ * DB is included in [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)
6
+
7
+ # Introduction
8
+ This is a PyToch implementation of DBNet([arxiv](https://arxiv.org/abs/1911.08947)) and DBNet++([TPAMI](https://ieeexplore.ieee.org/abstract/document/9726868/), [arxiv](https://arxiv.org/abs/2202.10304)). It presents a real-time arbitrary-shape scene text detector, achieving the state-of-the-art performance on standard benchmarks.
9
+
10
+ Part of the code is inherited from [MegReader](https://github.com/Megvii-CSG/MegReader).
11
+
12
+ ## ToDo List
13
+
14
+ - [x] Release code
15
+ - [x] Document for Installation
16
+ - [x] Trained models
17
+ - [x] Document for testing and training
18
+ - [x] Evaluation
19
+ - [x] Demo script
20
+ - [x] Release DBNet++ code
21
+ - [x] Release DBNet++ models
22
+
23
+
24
+
25
+ ## Installation
26
+
27
+ ### Requirements:
28
+ - Python3
29
+ - PyTorch == 1.2
30
+ - GCC >= 4.9 (This is important for PyTorch)
31
+ - CUDA >= 9.0 (10.1 is recommended)
32
+
33
+
34
+ ```bash
35
+ # first, make sure that your conda is setup properly with the right environment
36
+ # for that, check that `which conda`, `which pip` and `which python` points to the
37
+ # right path. From a clean conda env, this is what you need to do
38
+
39
+ conda create --name DB -y
40
+ conda activate DB
41
+
42
+ # this installs the right pip and dependencies for the fresh python
43
+ conda install ipython pip
44
+
45
+ # python dependencies
46
+ pip install -r requirement.txt
47
+
48
+ # install PyTorch with cuda-10.1
49
+ conda install pytorch torchvision cudatoolkit=10.1 -c pytorch
50
+
51
+ # clone repo
52
+ git clone https://github.com/MhLiao/DB.git
53
+ cd DB/
54
+
55
+ # build deformable convolution opertor
56
+ # make sure your cuda path of $CUDA_HOME is the same version as your cuda in PyTorch
57
+ # make sure GCC >= 4.9
58
+ # you need to delete the build directory before you re-build it.
59
+ echo $CUDA_HOME
60
+ cd assets/ops/dcn/
61
+ python setup.py build_ext --inplace
62
+
63
+ ```
64
+
65
+ ## Models
66
+ New: DBNet++ trained models [Google Drive](https://drive.google.com/drive/folders/1buwe_b6ysoZFCJgHMHIr-yHd-hEivQRK?usp=sharing).
67
+
68
+ Download Trained models [Baidu Drive](https://pan.baidu.com/s/1vxcdpOswTK6MxJyPIJlBkA) (download code: p6u3), [Google Drive](https://drive.google.com/open?id=1T9n0HTP3X3Y_nJ0D1ekMhCQRHntORLJG).
69
+ ```
70
+ pre-trained-model-synthtext -- used to finetune models, not for evaluation
71
+ td500_resnet18
72
+ td500_resnet50
73
+ totaltext_resnet18
74
+ totaltext_resnet50
75
+ ```
76
+
77
+ ## Datasets
78
+ The root of the dataset directory can be ```DB/datasets/```.
79
+
80
+ Download the converted ground-truth and data list [Baidu Drive](https://pan.baidu.com/s/1BPYxcZnLXN87rQKmz9PFYA) (download code: mz0a), [Google Drive](https://drive.google.com/open?id=12ozVTiBIqK8rUFWLUrlquNfoQxL2kAl7). The images of each dataset can be obtained from their official website.
81
+
82
+ ## Testing
83
+ ### Prepar dataset
84
+ An example of the path of test images:
85
+ ```
86
+ datasets/total_text/train_images
87
+ datasets/total_text/train_gts
88
+ datasets/total_text/train_list.txt
89
+ datasets/total_text/test_images
90
+ datasets/total_text/test_gts
91
+ datasets/total_text/test_list.txt
92
+ ```
93
+ The data root directory and the data list file can be defined in ```base_totaltext.yaml```
94
+
95
+ ### Config file
96
+ **The YAML files with the name of ```base*.yaml``` should not be used as the training or testing config file directly.**
97
+
98
+ ### Demo
99
+ Run the model inference with a single image. Here is an example:
100
+
101
+ ```CUDA_VISIBLE_DEVICES=0 python demo.py experiments/seg_detector/totaltext_resnet18_deform_thre.yaml --image_path datasets/total_text/test_images/img10.jpg --resume path-to-model-directory/totaltext_resnet18 --polygon --box_thresh 0.7 --visualize```
102
+
103
+ The results can be find in `demo_results`.
104
+
105
+ ### Evaluate the performance
106
+ Note that we do not provide all the protocols for all benchmarks for simplification. The embedded evaluation protocol in the code is modified from the protocol of ICDAR 2015 dataset while support arbitrary-shape polygons. It almost produces the same results as the pascal evaluation protocol in Total-Text dataset.
107
+
108
+ The `img651.jpg` in the test set of Total-Text contains exif info for a 90° rotation thus the gt does not match the image. You should read and re-write this image to get normal results. The converted image is also provided in the dataset links.
109
+
110
+ The following command can re-implement the results in the paper:
111
+
112
+ ```
113
+ CUDA_VISIBLE_DEVICES=0 python eval.py experiments/seg_detector/totaltext_resnet18_deform_thre.yaml --resume path-to-model-directory/totaltext_resnet18 --polygon --box_thresh 0.7
114
+
115
+ CUDA_VISIBLE_DEVICES=0 python eval.py experiments/seg_detector/totaltext_resnet50_deform_thre.yaml --resume path-to-model-directory/totaltext_resnet50 --polygon --box_thresh 0.6
116
+
117
+ CUDA_VISIBLE_DEVICES=0 python eval.py experiments/seg_detector/td500_resnet18_deform_thre.yaml --resume path-to-model-directory/td500_resnet18 --box_thresh 0.5
118
+
119
+ CUDA_VISIBLE_DEVICES=0 python eval.py experiments/seg_detector/td500_resnet50_deform_thre.yaml --resume path-to-model-directory/td500_resnet50 --box_thresh 0.5
120
+
121
+ # short side 736, which can be changed in base_ic15.yaml
122
+ CUDA_VISIBLE_DEVICES=0 python eval.py experiments/seg_detector/ic15_resnet18_deform_thre.yaml --resume path-to-model-directory/ic15_resnet18 --box_thresh 0.55
123
+
124
+ # short side 736, which can be changed in base_ic15.yaml
125
+ CUDA_VISIBLE_DEVICES=0 python eval.py experiments/seg_detector/ic15_resnet50_deform_thre.yaml --resume path-to-model-directory/ic15_resnet50 --box_thresh 0.6
126
+
127
+ # short side 1152, which can be changed in base_ic15.yaml
128
+ CUDA_VISIBLE_DEVICES=0 python eval.py experiments/seg_detector/ic15_resnet50_deform_thre.yaml --resume path-to-model-directory/ic15_resnet50 --box_thresh 0.6
129
+ ```
130
+
131
+ The results should be as follows:
132
+
133
+ | Model | precision | recall | F-measure | precision (paper) | recall (paper) | F-measure (paper) |
134
+ |:------------------: |:---------: |:------: |:---------: |:-----------------: |:--------------: |:-----------------: |
135
+ | totaltext-resnet18 | 88.9 | 77.6 | 82.9 | 88.3 | 77.9 | 82.8 |
136
+ | totaltext-resnet50 | 88.0 | 81.5 | 84.6 | 87.1 | 82.5 | 84.7 |
137
+ | td500-resnet18 | 86.5 | 79.4 | 82.8 | 90.4 | 76.3 | 82.8 |
138
+ | td500-resnet50 | 91.1 | 80.8 | 85.6 | 91.5 | 79.2 | 84.9 |
139
+ | ic15-resnet18 (736) | 87.7 | 77.5 | 82.3 | 86.8 | 78.4 | 82.3 |
140
+ | ic15-resnet50 (736) | 91.3 | 80.3 | 85.4 | 88.2 | 82.7 | 85.4 |
141
+ | ic15-resnet50 (1152)| 90.7 | 84.0 | 87.2 | 91.8 | 83.2 | 87.3 |
142
+
143
+
144
+ ```box_thresh``` can be used to balance the precision and recall, which may be different for different datasets to get a good F-measure. ```polygon``` is only used for arbitrary-shape text dataset. The size of the input images are defined in ```validate_data->processes->AugmentDetectionData``` in ```base_*.yaml```.
145
+
146
+ ### Evaluate the speed
147
+ Set ```adaptive``` to ```False``` in the yaml file to speedup the inference without decreasing the performance. The speed is evaluated by performing a testing image for 50 times to exclude extra IO time.
148
+
149
+ ```CUDA_VISIBLE_DEVICES=0 python eval.py experiments/seg_detector/totaltext_resnet18_deform_thre.yaml --resume path-to-model-directory/totaltext_resnet18 --polygon --box_thresh 0.7 --speed```
150
+
151
+ Note that the speed is related to both to the GPU and the CPU since the model runs with the GPU and the post-processing algorithm runs with the CPU.
152
+
153
+ ## Training
154
+ Check the paths of data_dir and data_list in the base_*.yaml file. For better performance, you can first per-train the model with SynthText and then fine-tune it with the specific real-world dataset.
155
+
156
+ ```CUDA_VISIBLE_DEVICES=0,1,2,3 python train.py path-to-yaml-file --num_gpus 4```
157
+
158
+ You can also try distributed training (**Note that the distributed mode is not fully tested. I am not sure whether it can achieves the same performance as non-distributed training.**)
159
+
160
+ ```CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 train.py path-to-yaml-file --num_gpus 4```
161
+
162
+ ## Improvements
163
+ Note that the current implementation is written by pure Python code except for the deformable convolution operator. Thus, the code can be further optimized by some optimization skills, such as [TensorRT](https://github.com/NVIDIA/TensorRT) for the model forward and efficient C++ code for the [post-processing function](https://github.com/MhLiao/DB/blob/d0d855df1c66b002297885a089a18d50a265fa30/structure/representers/seg_detector_representer.py#L26).
164
+
165
+ Another option to increase speed is to run the model forward and the post-processing algorithm in parallel through a producer-consumer strategy.
166
+
167
+ Contributions or pull requests are welcome.
168
+
169
+ ## Third-party implementations
170
+ * Keras implementation: [xuannianz/DifferentiableBinarization](https://github.com/xuannianz/DifferentiableBinarization)
171
+ * DB is included in [OpenCV](https://github.com/opencv/opencv/blob/master/doc/tutorials/dnn/dnn_text_spotting/dnn_text_spotting.markdown)
172
+ * DB is included in [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)
173
+
174
+ ## Citing the related works
175
+
176
+ Please cite the related works in your publications if it helps your research:
177
+
178
+ @inproceedings{liao2020real,
179
+ author={Liao, Minghui and Wan, Zhaoyi and Yao, Cong and Chen, Kai and Bai, Xiang},
180
+ title={Real-time Scene Text Detection with Differentiable Binarization},
181
+ booktitle={Proc. AAAI},
182
+ year={2020}
183
+ }
184
+
185
+ @article{liao2022real,
186
+ title={Real-Time Scene Text Detection with Differentiable Binarization and Adaptive Scale Fusion},
187
+ author={Liao, Minghui and Zou, Zhisheng and Wan, Zhaoyi and Yao, Cong and Bai, Xiang},
188
+ journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
189
+ year={2022},
190
+ publisher={IEEE}
191
+ }
192
+
193
+
194
+
195
+
DB/assets/ic15_eval/rrc_evaluation_funcs.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ #encoding: UTF-8
3
+ import json
4
+ import sys;sys.path.append('./')
5
+ import zipfile
6
+ import re
7
+ import sys
8
+ import os
9
+ import codecs
10
+ import importlib
11
+ from StringIO import StringIO
12
+
13
+ def print_help():
14
+ sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> [-o=<outputFolder> -p=<jsonParams>]' %sys.argv[0])
15
+ sys.exit(2)
16
+
17
+
18
+ def load_zip_file_keys(file,fileNameRegExp=''):
19
+ """
20
+ Returns an array with the entries of the ZIP file that match with the regular expression.
21
+ The key's are the names or the file or the capturing group definied in the fileNameRegExp
22
+ """
23
+ try:
24
+ archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
25
+ except :
26
+ raise Exception('Error loading the ZIP archive.')
27
+
28
+ pairs = []
29
+
30
+ for name in archive.namelist():
31
+ addFile = True
32
+ keyName = name
33
+ if fileNameRegExp!="":
34
+ m = re.match(fileNameRegExp,name)
35
+ if m == None:
36
+ addFile = False
37
+ else:
38
+ if len(m.groups())>0:
39
+ keyName = m.group(1)
40
+
41
+ if addFile:
42
+ pairs.append( keyName )
43
+
44
+ return pairs
45
+
46
+
47
+ def load_zip_file(file,fileNameRegExp='',allEntries=False):
48
+ """
49
+ Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
50
+ The key's are the names or the file or the capturing group definied in the fileNameRegExp
51
+ allEntries validates that all entries in the ZIP file pass the fileNameRegExp
52
+ """
53
+ try:
54
+ archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
55
+ except :
56
+ raise Exception('Error loading the ZIP archive')
57
+
58
+ pairs = []
59
+ for name in archive.namelist():
60
+ addFile = True
61
+ keyName = name
62
+ if fileNameRegExp!="":
63
+ m = re.match(fileNameRegExp,name)
64
+ if m == None:
65
+ addFile = False
66
+ else:
67
+ if len(m.groups())>0:
68
+ keyName = m.group(1)
69
+
70
+ if addFile:
71
+ pairs.append( [ keyName , archive.read(name)] )
72
+ else:
73
+ if allEntries:
74
+ raise Exception('ZIP entry not valid: %s' %name)
75
+
76
+ return dict(pairs)
77
+
78
+ def decode_utf8(raw):
79
+ """
80
+ Returns a Unicode object on success, or None on failure
81
+ """
82
+ try:
83
+ raw = codecs.decode(raw,'utf-8', 'replace')
84
+ #extracts BOM if exists
85
+ raw = raw.encode('utf8')
86
+ if raw.startswith(codecs.BOM_UTF8):
87
+ raw = raw.replace(codecs.BOM_UTF8, '', 1)
88
+ return raw.decode('utf-8')
89
+ except:
90
+ return None
91
+
92
+ def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
93
+ """
94
+ This function validates that all lines of the file calling the Line validation function for each line
95
+ """
96
+ utf8File = decode_utf8(file_contents)
97
+ if (utf8File is None) :
98
+ raise Exception("The file %s is not UTF-8" %fileName)
99
+
100
+ lines = utf8File.split( "\r\n" if CRLF else "\n" )
101
+ for line in lines:
102
+ line = line.replace("\r","").replace("\n","")
103
+ if(line != ""):
104
+ try:
105
+ validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
106
+ except Exception as e:
107
+ raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace'))
108
+
109
+
110
+
111
+ def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0):
112
+ """
113
+ Validate the format of the line. If the line is not valid an exception will be raised.
114
+ If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
115
+ Posible values are:
116
+ LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
117
+ LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
118
+ """
119
+ get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
120
+
121
+
122
+ def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
123
+ """
124
+ Validate the format of the line. If the line is not valid an exception will be raised.
125
+ If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
126
+ Posible values are:
127
+ LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
128
+ LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
129
+ Returns values from a textline. Points , [Confidences], [Transcriptions]
130
+ """
131
+ confidence = 0.0
132
+ transcription = "";
133
+ points = []
134
+
135
+ numPoints = 4;
136
+
137
+ if LTRB:
138
+
139
+ numPoints = 4;
140
+
141
+ if withTranscription and withConfidence:
142
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
143
+ if m == None :
144
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
145
+ raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription")
146
+ elif withConfidence:
147
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
148
+ if m == None :
149
+ raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence")
150
+ elif withTranscription:
151
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line)
152
+ if m == None :
153
+ raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription")
154
+ else:
155
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line)
156
+ if m == None :
157
+ raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax")
158
+
159
+ xmin = int(m.group(1))
160
+ ymin = int(m.group(2))
161
+ xmax = int(m.group(3))
162
+ ymax = int(m.group(4))
163
+ if(xmax<xmin):
164
+ raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax))
165
+ if(ymax<ymin):
166
+ raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax))
167
+
168
+ points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
169
+
170
+ if (imWidth>0 and imHeight>0):
171
+ validate_point_inside_bounds(xmin,ymin,imWidth,imHeight);
172
+ validate_point_inside_bounds(xmax,ymax,imWidth,imHeight);
173
+
174
+ else:
175
+
176
+ numPoints = 8;
177
+
178
+ if withTranscription and withConfidence:
179
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
180
+ if m == None :
181
+ raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription")
182
+ elif withConfidence:
183
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
184
+ if m == None :
185
+ raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence")
186
+ elif withTranscription:
187
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line)
188
+ if m == None :
189
+ raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription")
190
+ else:
191
+ m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line)
192
+ if m == None :
193
+ raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4")
194
+
195
+ points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
196
+
197
+ validate_clockwise_points(points)
198
+
199
+ if (imWidth>0 and imHeight>0):
200
+ validate_point_inside_bounds(points[0],points[1],imWidth,imHeight);
201
+ validate_point_inside_bounds(points[2],points[3],imWidth,imHeight);
202
+ validate_point_inside_bounds(points[4],points[5],imWidth,imHeight);
203
+ validate_point_inside_bounds(points[6],points[7],imWidth,imHeight);
204
+
205
+
206
+ if withConfidence:
207
+ try:
208
+ confidence = float(m.group(numPoints+1))
209
+ except ValueError:
210
+ raise Exception("Confidence value must be a float")
211
+
212
+ if withTranscription:
213
+ posTranscription = numPoints + (2 if withConfidence else 1)
214
+ transcription = m.group(posTranscription)
215
+ m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription)
216
+ if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters
217
+ transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"")
218
+
219
+ return points,confidence,transcription
220
+
221
+
222
+ def validate_point_inside_bounds(x,y,imWidth,imHeight):
223
+ if(x<0 or x>imWidth):
224
+ raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight))
225
+ if(y<0 or y>imHeight):
226
+ raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight))
227
+
228
+ def validate_clockwise_points(points):
229
+ """
230
+ Validates that the points that the 4 points that dlimite a polygon are in clockwise order.
231
+ """
232
+
233
+ if len(points) != 8:
234
+ raise Exception("Points list not valid." + str(len(points)))
235
+
236
+ point = [
237
+ [int(points[0]) , int(points[1])],
238
+ [int(points[2]) , int(points[3])],
239
+ [int(points[4]) , int(points[5])],
240
+ [int(points[6]) , int(points[7])]
241
+ ]
242
+ edge = [
243
+ ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),
244
+ ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),
245
+ ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),
246
+ ( point[0][0] - point[3][0])*( point[0][1] + point[3][1])
247
+ ]
248
+
249
+ summatory = edge[0] + edge[1] + edge[2] + edge[3];
250
+ if summatory>0:
251
+ raise Exception("Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.")
252
+
253
+ def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True):
254
+ """
255
+ Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
256
+ xmin,ymin,xmax,ymax,[confidence],[transcription]
257
+ x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
258
+ """
259
+ pointsList = []
260
+ transcriptionsList = []
261
+ confidencesList = []
262
+
263
+ lines = content.split( "\r\n" if CRLF else "\n" )
264
+ for line in lines:
265
+ line = line.replace("\r","").replace("\n","")
266
+ if(line != "") :
267
+ points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight);
268
+ pointsList.append(points)
269
+ transcriptionsList.append(transcription)
270
+ confidencesList.append(confidence)
271
+
272
+ if withConfidence and len(confidencesList)>0 and sort_by_confidences:
273
+ import numpy as np
274
+ sorted_ind = np.argsort(-np.array(confidencesList))
275
+ confidencesList = [confidencesList[i] for i in sorted_ind]
276
+ pointsList = [pointsList[i] for i in sorted_ind]
277
+ transcriptionsList = [transcriptionsList[i] for i in sorted_ind]
278
+
279
+ return pointsList,confidencesList,transcriptionsList
280
+
281
+ def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True):
282
+ """
283
+ This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample.
284
+ Params:
285
+ p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used.
286
+ default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
287
+ validate_data_fn: points to a method that validates the corrct format of the submission
288
+ evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results
289
+ """
290
+
291
+ if (p == None):
292
+ p = dict([s[1:].split('=') for s in sys.argv[2:]])
293
+ model_name=sys.argv[1].split('=')[-1]
294
+ if(len(sys.argv)<4):
295
+ print_help()
296
+
297
+ evalParams = default_evaluation_params_fn()
298
+ if 'p' in p.keys():
299
+ evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) )
300
+
301
+ resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'}
302
+ try:
303
+ validate_data_fn(p['g'], p['s'], evalParams)
304
+ evalData = evaluate_method_fn(p['g'], p['s'], evalParams)
305
+ resDict.update(evalData)
306
+
307
+ except Exception, e:
308
+ resDict['Message']= str(e)
309
+ resDict['calculated']=False
310
+
311
+ if 'o' in p:
312
+ if not os.path.exists(p['o']):
313
+ os.makedirs(p['o'])
314
+
315
+ resultsOutputname = p['o'] + '/results.zip'
316
+ outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True)
317
+
318
+ del resDict['per_sample']
319
+ if 'output_items' in resDict.keys():
320
+ del resDict['output_items']
321
+
322
+ outZip.writestr('method.json',json.dumps(resDict))
323
+
324
+ if not resDict['calculated']:
325
+ if show_result:
326
+ sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n')
327
+ if 'o' in p:
328
+ outZip.close()
329
+ return resDict
330
+
331
+ if 'o' in p:
332
+ if per_sample == True:
333
+ for k,v in evalData['per_sample'].iteritems():
334
+ outZip.writestr( k + '.json',json.dumps(v))
335
+
336
+ if 'output_items' in evalData.keys():
337
+ for k, v in evalData['output_items'].iteritems():
338
+ outZip.writestr( k,v)
339
+
340
+ outZip.close()
341
+
342
+ if show_result:
343
+ sys.stdout.write("Calculated!")
344
+ sys.stdout.write(json.dumps(resDict['method']))
345
+
346
+ return resDict,model_name
347
+
348
+
349
+ def main_validation(default_evaluation_params_fn,validate_data_fn):
350
+ """
351
+ This process validates a method
352
+ Params:
353
+ default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
354
+ validate_data_fn: points to a method that validates the corrct format of the submission
355
+ """
356
+ try:
357
+ p = dict([s[1:].split('=') for s in sys.argv[1:]])
358
+ evalParams = default_evaluation_params_fn()
359
+ if 'p' in p.keys():
360
+ evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) )
361
+
362
+ validate_data_fn(p['g'], p['s'], evalParams)
363
+ print 'SUCCESS'
364
+ sys.exit(0)
365
+ except Exception as e:
366
+ print str(e)
367
+ sys.exit(101)
DB/assets/ic15_eval/script.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ from collections import namedtuple
4
+ import rrc_evaluation_funcs
5
+ import importlib
6
+ import json
7
+ def evaluation_imports():
8
+ """
9
+ evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.
10
+ """
11
+ return {
12
+ 'Polygon':'plg',
13
+ 'numpy':'np'
14
+ }
15
+
16
+ def default_evaluation_params():
17
+ """
18
+ default_evaluation_params: Default parameters to use for the validation and evaluation.
19
+ """
20
+ return {
21
+ 'IOU_CONSTRAINT' :0.5,
22
+ 'AREA_PRECISION_CONSTRAINT' :0.5,
23
+ 'GT_SAMPLE_NAME_2_ID':'gt_img_([0-9]+).txt',
24
+ 'DET_SAMPLE_NAME_2_ID':'res_img_([0-9]+).txt',
25
+ 'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)
26
+ 'CRLF':False, # Lines are delimited by Windows CRLF format
27
+ 'CONFIDENCES':False, #Detections must include confidence value. AP will be calculated
28
+ 'PER_SAMPLE_RESULTS':True #Generate per sample results and produce data for visualization
29
+ }
30
+
31
+ def validate_data(gtFilePath, submFilePath,evaluationParams):
32
+ """
33
+ Method validate_data: validates that all files in the results folder are correct (have the correct name contents).
34
+ Validates also that there are no missing files in the folder.
35
+ If some error detected, the method raises the error
36
+ """
37
+ gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])
38
+
39
+ subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)
40
+
41
+ #Validate format of GroundTruth
42
+ for k in gt:
43
+ rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True)
44
+
45
+ #Validate format of results
46
+ for k in subm:
47
+ if (k in gt) == False :
48
+ raise Exception("The sample %s not present in GT" %k)
49
+
50
+ rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],False,evaluationParams['CONFIDENCES'])
51
+
52
+
53
+ def evaluate_method(gtFilePath, submFilePath, evaluationParams):
54
+ """
55
+ Method evaluate_method: evaluate method and returns the results
56
+ Results. Dictionary with the following values:
57
+ - method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }
58
+ - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }
59
+ """
60
+
61
+ for module,alias in evaluation_imports().iteritems():
62
+ globals()[alias] = importlib.import_module(module)
63
+
64
+ def polygon_from_points(points):
65
+ """
66
+ Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4
67
+ """
68
+ resBoxes=np.empty([1,8],dtype='int32')
69
+ resBoxes[0,0]=int(points[0])
70
+ resBoxes[0,4]=int(points[1])
71
+ resBoxes[0,1]=int(points[2])
72
+ resBoxes[0,5]=int(points[3])
73
+ resBoxes[0,2]=int(points[4])
74
+ resBoxes[0,6]=int(points[5])
75
+ resBoxes[0,3]=int(points[6])
76
+ resBoxes[0,7]=int(points[7])
77
+ pointMat = resBoxes[0].reshape([2,4]).T
78
+ return plg.Polygon( pointMat)
79
+
80
+ def rectangle_to_polygon(rect):
81
+ resBoxes=np.empty([1,8],dtype='int32')
82
+ resBoxes[0,0]=int(rect.xmin)
83
+ resBoxes[0,4]=int(rect.ymax)
84
+ resBoxes[0,1]=int(rect.xmin)
85
+ resBoxes[0,5]=int(rect.ymin)
86
+ resBoxes[0,2]=int(rect.xmax)
87
+ resBoxes[0,6]=int(rect.ymin)
88
+ resBoxes[0,3]=int(rect.xmax)
89
+ resBoxes[0,7]=int(rect.ymax)
90
+
91
+ pointMat = resBoxes[0].reshape([2,4]).T
92
+
93
+ return plg.Polygon( pointMat)
94
+
95
+ def rectangle_to_points(rect):
96
+ points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)]
97
+ return points
98
+
99
+ def get_union(pD,pG):
100
+ areaA = pD.area();
101
+ areaB = pG.area();
102
+ return areaA + areaB - get_intersection(pD, pG);
103
+
104
+ def get_intersection_over_union(pD,pG):
105
+ try:
106
+ return get_intersection(pD, pG) / get_union(pD, pG);
107
+ except:
108
+ return 0
109
+
110
+ def get_intersection(pD,pG):
111
+ pInt = pD & pG
112
+ if len(pInt) == 0:
113
+ return 0
114
+ return pInt.area()
115
+
116
+ def compute_ap(confList, matchList,numGtCare):
117
+ correct = 0
118
+ AP = 0
119
+ if len(confList)>0:
120
+ confList = np.array(confList)
121
+ matchList = np.array(matchList)
122
+ sorted_ind = np.argsort(-confList)
123
+ confList = confList[sorted_ind]
124
+ matchList = matchList[sorted_ind]
125
+ for n in range(len(confList)):
126
+ match = matchList[n]
127
+ if match:
128
+ correct += 1
129
+ AP += float(correct)/(n + 1)
130
+
131
+ if numGtCare>0:
132
+ AP /= numGtCare
133
+
134
+ return AP
135
+
136
+ perSampleMetrics = {}
137
+
138
+ matchedSum = 0
139
+
140
+ Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
141
+
142
+ gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])
143
+ subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)
144
+
145
+ numGlobalCareGt = 0;
146
+ numGlobalCareDet = 0;
147
+
148
+ arrGlobalConfidences = [];
149
+ arrGlobalMatches = [];
150
+
151
+ for resFile in gt:
152
+
153
+ gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])
154
+ recall = 0
155
+ precision = 0
156
+ hmean = 0
157
+
158
+ detMatched = 0
159
+
160
+ iouMat = np.empty([1,1])
161
+
162
+ gtPols = []
163
+ detPols = []
164
+
165
+ gtPolPoints = []
166
+ detPolPoints = []
167
+
168
+ #Array of Ground Truth Polygons' keys marked as don't Care
169
+ gtDontCarePolsNum = []
170
+ #Array of Detected Polygons' matched with a don't Care GT
171
+ detDontCarePolsNum = []
172
+
173
+ pairs = []
174
+ detMatchedNums = []
175
+
176
+ arrSampleConfidences = [];
177
+ arrSampleMatch = [];
178
+ sampleAP = 0;
179
+
180
+ evaluationLog = ""
181
+
182
+ pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False)
183
+ for n in range(len(pointsList)):
184
+ points = pointsList[n]
185
+ transcription = transcriptionsList[n]
186
+ dontCare = transcription == "###"
187
+ if evaluationParams['LTRB']:
188
+ gtRect = Rectangle(*points)
189
+ gtPol = rectangle_to_polygon(gtRect)
190
+ else:
191
+ gtPol = polygon_from_points(points)
192
+ gtPols.append(gtPol)
193
+ gtPolPoints.append(points)
194
+ if dontCare:
195
+ gtDontCarePolsNum.append( len(gtPols)-1 )
196
+
197
+ evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum)>0 else "\n")
198
+
199
+ if resFile in subm:
200
+
201
+ detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])
202
+
203
+ pointsList,confidencesList,_ = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],False,evaluationParams['CONFIDENCES'])
204
+ for n in range(len(pointsList)):
205
+ points = pointsList[n]
206
+
207
+ if evaluationParams['LTRB']:
208
+ detRect = Rectangle(*points)
209
+ detPol = rectangle_to_polygon(detRect)
210
+ else:
211
+ detPol = polygon_from_points(points)
212
+ detPols.append(detPol)
213
+ detPolPoints.append(points)
214
+ if len(gtDontCarePolsNum)>0 :
215
+ for dontCarePol in gtDontCarePolsNum:
216
+ dontCarePol = gtPols[dontCarePol]
217
+ intersected_area = get_intersection(dontCarePol,detPol)
218
+ pdDimensions = detPol.area()
219
+ precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
220
+ if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ):
221
+ detDontCarePolsNum.append( len(detPols)-1 )
222
+ break
223
+
224
+ evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum)>0 else "\n")
225
+
226
+ if len(gtPols)>0 and len(detPols)>0:
227
+ #Calculate IoU and precision matrixs
228
+ outputShape=[len(gtPols),len(detPols)]
229
+ iouMat = np.empty(outputShape)
230
+ gtRectMat = np.zeros(len(gtPols),np.int8)
231
+ detRectMat = np.zeros(len(detPols),np.int8)
232
+ for gtNum in range(len(gtPols)):
233
+ for detNum in range(len(detPols)):
234
+ pG = gtPols[gtNum]
235
+ pD = detPols[detNum]
236
+ iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG)
237
+
238
+ for gtNum in range(len(gtPols)):
239
+ for detNum in range(len(detPols)):
240
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum :
241
+ if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']:
242
+ gtRectMat[gtNum] = 1
243
+ detRectMat[detNum] = 1
244
+ detMatched += 1
245
+ pairs.append({'gt':gtNum,'det':detNum})
246
+ detMatchedNums.append(detNum)
247
+ evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
248
+
249
+ if evaluationParams['CONFIDENCES']:
250
+ for detNum in range(len(detPols)):
251
+ if detNum not in detDontCarePolsNum :
252
+ #we exclude the don't care detections
253
+ match = detNum in detMatchedNums
254
+
255
+ arrSampleConfidences.append(confidencesList[detNum])
256
+ arrSampleMatch.append(match)
257
+
258
+ arrGlobalConfidences.append(confidencesList[detNum]);
259
+ arrGlobalMatches.append(match);
260
+
261
+ numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
262
+ numDetCare = (len(detPols) - len(detDontCarePolsNum))
263
+ if numGtCare == 0:
264
+ recall = float(1)
265
+ precision = float(0) if numDetCare >0 else float(1)
266
+ sampleAP = precision
267
+ else:
268
+ recall = float(detMatched) / numGtCare
269
+ precision = 0 if numDetCare==0 else float(detMatched) / numDetCare
270
+ if evaluationParams['CONFIDENCES'] and evaluationParams['PER_SAMPLE_RESULTS']:
271
+ sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare )
272
+
273
+ hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
274
+
275
+ matchedSum += detMatched
276
+ numGlobalCareGt += numGtCare
277
+ numGlobalCareDet += numDetCare
278
+
279
+ if evaluationParams['PER_SAMPLE_RESULTS']:
280
+ perSampleMetrics[resFile] = {
281
+ 'precision':precision,
282
+ 'recall':recall,
283
+ 'hmean':hmean,
284
+ 'pairs':pairs,
285
+ 'AP':sampleAP,
286
+ 'iouMat':[] if len(detPols)>100 else iouMat.tolist(),
287
+ 'gtPolPoints':gtPolPoints,
288
+ 'detPolPoints':detPolPoints,
289
+ 'gtDontCare':gtDontCarePolsNum,
290
+ 'detDontCare':detDontCarePolsNum,
291
+ 'evaluationParams': evaluationParams,
292
+ 'evaluationLog': evaluationLog
293
+ }
294
+
295
+ # Compute MAP and MAR
296
+ AP = 0
297
+ if evaluationParams['CONFIDENCES']:
298
+ AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)
299
+
300
+ methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt
301
+ methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet
302
+ methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
303
+
304
+ methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP }
305
+
306
+ resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}
307
+
308
+
309
+ return resDict;
310
+
311
+
312
+
313
+ if __name__=='__main__':
314
+ res_dict,model_name=rrc_evaluation_funcs.main_evaluation(None,default_evaluation_params,validate_data,evaluate_method)
315
+ with open('./log.txt','a') as f:
316
+ f.write(model_name+':'+json.dumps(res_dict['method'])+'\n')
DB/assets/ops/dcn/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .functions.deform_conv import deform_conv, modulated_deform_conv
2
+ from .functions.deform_pool import deform_roi_pooling
3
+ from .modules.deform_conv import (DeformConv, ModulatedDeformConv,
4
+ DeformConvPack, ModulatedDeformConvPack)
5
+ from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack,
6
+ ModulatedDeformRoIPoolingPack)
7
+
8
+ __all__ = [
9
+ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv',
10
+ 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
11
+ 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv',
12
+ 'deform_roi_pooling'
13
+ ]
DB/assets/ops/dcn/functions/__init__.py ADDED
File without changes
DB/assets/ops/dcn/functions/deform_conv.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.autograd import Function
3
+ from torch.nn.modules.utils import _pair
4
+
5
+ from .. import deform_conv_cuda
6
+
7
+
8
+ class DeformConvFunction(Function):
9
+
10
+ @staticmethod
11
+ def forward(ctx,
12
+ input,
13
+ offset,
14
+ weight,
15
+ stride=1,
16
+ padding=0,
17
+ dilation=1,
18
+ groups=1,
19
+ deformable_groups=1,
20
+ im2col_step=64):
21
+ if input is not None and input.dim() != 4:
22
+ raise ValueError(
23
+ "Expected 4D tensor as input, got {}D tensor instead.".format(
24
+ input.dim()))
25
+ ctx.stride = _pair(stride)
26
+ ctx.padding = _pair(padding)
27
+ ctx.dilation = _pair(dilation)
28
+ ctx.groups = groups
29
+ ctx.deformable_groups = deformable_groups
30
+ ctx.im2col_step = im2col_step
31
+
32
+ ctx.save_for_backward(input, offset, weight)
33
+
34
+ output = input.new_empty(
35
+ DeformConvFunction._output_size(input, weight, ctx.padding,
36
+ ctx.dilation, ctx.stride))
37
+
38
+ ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
39
+
40
+ if not input.is_cuda:
41
+ raise NotImplementedError
42
+ else:
43
+ cur_im2col_step = min(ctx.im2col_step, input.shape[0])
44
+ assert (input.shape[0] %
45
+ cur_im2col_step) == 0, 'im2col step must divide batchsize'
46
+ deform_conv_cuda.deform_conv_forward_cuda(
47
+ input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1],
48
+ weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0],
49
+ ctx.padding[1], ctx.padding[0], ctx.dilation[1],
50
+ ctx.dilation[0], ctx.groups, ctx.deformable_groups,
51
+ cur_im2col_step)
52
+ return output
53
+
54
+ @staticmethod
55
+ def backward(ctx, grad_output):
56
+ input, offset, weight = ctx.saved_tensors
57
+
58
+ grad_input = grad_offset = grad_weight = None
59
+
60
+ if not grad_output.is_cuda:
61
+ raise NotImplementedError
62
+ else:
63
+ cur_im2col_step = min(ctx.im2col_step, input.shape[0])
64
+ assert (input.shape[0] %
65
+ cur_im2col_step) == 0, 'im2col step must divide batchsize'
66
+
67
+ if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
68
+ grad_input = torch.zeros_like(input)
69
+ grad_offset = torch.zeros_like(offset)
70
+ deform_conv_cuda.deform_conv_backward_input_cuda(
71
+ input, offset, grad_output, grad_input,
72
+ grad_offset, weight, ctx.bufs_[0], weight.size(3),
73
+ weight.size(2), ctx.stride[1], ctx.stride[0],
74
+ ctx.padding[1], ctx.padding[0], ctx.dilation[1],
75
+ ctx.dilation[0], ctx.groups, ctx.deformable_groups,
76
+ cur_im2col_step)
77
+
78
+ if ctx.needs_input_grad[2]:
79
+ grad_weight = torch.zeros_like(weight)
80
+ deform_conv_cuda.deform_conv_backward_parameters_cuda(
81
+ input, offset, grad_output,
82
+ grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3),
83
+ weight.size(2), ctx.stride[1], ctx.stride[0],
84
+ ctx.padding[1], ctx.padding[0], ctx.dilation[1],
85
+ ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1,
86
+ cur_im2col_step)
87
+
88
+ return (grad_input, grad_offset, grad_weight, None, None, None, None,
89
+ None)
90
+
91
+ @staticmethod
92
+ def _output_size(input, weight, padding, dilation, stride):
93
+ channels = weight.size(0)
94
+ output_size = (input.size(0), channels)
95
+ for d in range(input.dim() - 2):
96
+ in_size = input.size(d + 2)
97
+ pad = padding[d]
98
+ kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
99
+ stride_ = stride[d]
100
+ output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
101
+ if not all(map(lambda s: s > 0, output_size)):
102
+ raise ValueError(
103
+ "convolution input is too small (output would be {})".format(
104
+ 'x'.join(map(str, output_size))))
105
+ return output_size
106
+
107
+
108
+ class ModulatedDeformConvFunction(Function):
109
+
110
+ @staticmethod
111
+ def forward(ctx,
112
+ input,
113
+ offset,
114
+ mask,
115
+ weight,
116
+ bias=None,
117
+ stride=1,
118
+ padding=0,
119
+ dilation=1,
120
+ groups=1,
121
+ deformable_groups=1):
122
+ ctx.stride = stride
123
+ ctx.padding = padding
124
+ ctx.dilation = dilation
125
+ ctx.groups = groups
126
+ ctx.deformable_groups = deformable_groups
127
+ ctx.with_bias = bias is not None
128
+ if not ctx.with_bias:
129
+ bias = input.new_empty(1) # fake tensor
130
+ if not input.is_cuda:
131
+ raise NotImplementedError
132
+ if weight.requires_grad or mask.requires_grad or offset.requires_grad \
133
+ or input.requires_grad:
134
+ ctx.save_for_backward(input, offset, mask, weight, bias)
135
+ output = input.new_empty(
136
+ ModulatedDeformConvFunction._infer_shape(ctx, input, weight))
137
+ ctx._bufs = [input.new_empty(0), input.new_empty(0)]
138
+ deform_conv_cuda.modulated_deform_conv_cuda_forward(
139
+ input, weight, bias, ctx._bufs[0], offset, mask, output,
140
+ ctx._bufs[1], weight.shape[2], weight.shape[3], ctx.stride,
141
+ ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,
142
+ ctx.groups, ctx.deformable_groups, ctx.with_bias)
143
+ return output
144
+
145
+ @staticmethod
146
+ def backward(ctx, grad_output):
147
+ if not grad_output.is_cuda:
148
+ raise NotImplementedError
149
+ input, offset, mask, weight, bias = ctx.saved_tensors
150
+ grad_input = torch.zeros_like(input)
151
+ grad_offset = torch.zeros_like(offset)
152
+ grad_mask = torch.zeros_like(mask)
153
+ grad_weight = torch.zeros_like(weight)
154
+ grad_bias = torch.zeros_like(bias)
155
+ deform_conv_cuda.modulated_deform_conv_cuda_backward(
156
+ input, weight, bias, ctx._bufs[0], offset, mask, ctx._bufs[1],
157
+ grad_input, grad_weight, grad_bias, grad_offset, grad_mask,
158
+ grad_output, weight.shape[2], weight.shape[3], ctx.stride,
159
+ ctx.stride, ctx.padding, ctx.padding, ctx.dilation, ctx.dilation,
160
+ ctx.groups, ctx.deformable_groups, ctx.with_bias)
161
+ if not ctx.with_bias:
162
+ grad_bias = None
163
+
164
+ return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
165
+ None, None, None, None, None)
166
+
167
+ @staticmethod
168
+ def _infer_shape(ctx, input, weight):
169
+ n = input.size(0)
170
+ channels_out = weight.size(0)
171
+ height, width = input.shape[2:4]
172
+ kernel_h, kernel_w = weight.shape[2:4]
173
+ height_out = (height + 2 * ctx.padding -
174
+ (ctx.dilation * (kernel_h - 1) + 1)) // ctx.stride + 1
175
+ width_out = (width + 2 * ctx.padding -
176
+ (ctx.dilation * (kernel_w - 1) + 1)) // ctx.stride + 1
177
+ return n, channels_out, height_out, width_out
178
+
179
+
180
+ deform_conv = DeformConvFunction.apply
181
+ modulated_deform_conv = ModulatedDeformConvFunction.apply
DB/assets/ops/dcn/functions/deform_pool.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.autograd import Function
3
+
4
+ from .. import deform_pool_cuda
5
+
6
+
7
+ class DeformRoIPoolingFunction(Function):
8
+
9
+ @staticmethod
10
+ def forward(ctx,
11
+ data,
12
+ rois,
13
+ offset,
14
+ spatial_scale,
15
+ out_size,
16
+ out_channels,
17
+ no_trans,
18
+ group_size=1,
19
+ part_size=None,
20
+ sample_per_part=4,
21
+ trans_std=.0):
22
+ ctx.spatial_scale = spatial_scale
23
+ ctx.out_size = out_size
24
+ ctx.out_channels = out_channels
25
+ ctx.no_trans = no_trans
26
+ ctx.group_size = group_size
27
+ ctx.part_size = out_size if part_size is None else part_size
28
+ ctx.sample_per_part = sample_per_part
29
+ ctx.trans_std = trans_std
30
+
31
+ assert 0.0 <= ctx.trans_std <= 1.0
32
+ if not data.is_cuda:
33
+ raise NotImplementedError
34
+
35
+ n = rois.shape[0]
36
+ output = data.new_empty(n, out_channels, out_size, out_size)
37
+ output_count = data.new_empty(n, out_channels, out_size, out_size)
38
+ deform_pool_cuda.deform_psroi_pooling_cuda_forward(
39
+ data, rois, offset, output, output_count, ctx.no_trans,
40
+ ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
41
+ ctx.part_size, ctx.sample_per_part, ctx.trans_std)
42
+
43
+ if data.requires_grad or rois.requires_grad or offset.requires_grad:
44
+ ctx.save_for_backward(data, rois, offset)
45
+ ctx.output_count = output_count
46
+
47
+ return output
48
+
49
+ @staticmethod
50
+ def backward(ctx, grad_output):
51
+ if not grad_output.is_cuda:
52
+ raise NotImplementedError
53
+
54
+ data, rois, offset = ctx.saved_tensors
55
+ output_count = ctx.output_count
56
+ grad_input = torch.zeros_like(data)
57
+ grad_rois = None
58
+ grad_offset = torch.zeros_like(offset)
59
+
60
+ deform_pool_cuda.deform_psroi_pooling_cuda_backward(
61
+ grad_output, data, rois, offset, output_count, grad_input,
62
+ grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels,
63
+ ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part,
64
+ ctx.trans_std)
65
+ return (grad_input, grad_rois, grad_offset, None, None, None, None,
66
+ None, None, None, None)
67
+
68
+
69
+ deform_roi_pooling = DeformRoIPoolingFunction.apply
DB/assets/ops/dcn/modules/__init__.py ADDED
File without changes
DB/assets/ops/dcn/modules/deform_conv.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ from torch.nn.modules.utils import _pair
6
+
7
+ from ..functions.deform_conv import deform_conv, modulated_deform_conv
8
+
9
+
10
+ class DeformConv(nn.Module):
11
+
12
+ def __init__(self,
13
+ in_channels,
14
+ out_channels,
15
+ kernel_size,
16
+ stride=1,
17
+ padding=0,
18
+ dilation=1,
19
+ groups=1,
20
+ deformable_groups=1,
21
+ bias=False):
22
+ super(DeformConv, self).__init__()
23
+
24
+ assert not bias
25
+ assert in_channels % groups == 0, \
26
+ 'in_channels {} cannot be divisible by groups {}'.format(
27
+ in_channels, groups)
28
+ assert out_channels % groups == 0, \
29
+ 'out_channels {} cannot be divisible by groups {}'.format(
30
+ out_channels, groups)
31
+
32
+ self.in_channels = in_channels
33
+ self.out_channels = out_channels
34
+ self.kernel_size = _pair(kernel_size)
35
+ self.stride = _pair(stride)
36
+ self.padding = _pair(padding)
37
+ self.dilation = _pair(dilation)
38
+ self.groups = groups
39
+ self.deformable_groups = deformable_groups
40
+
41
+ self.weight = nn.Parameter(
42
+ torch.Tensor(out_channels, in_channels // self.groups,
43
+ *self.kernel_size))
44
+
45
+ self.reset_parameters()
46
+
47
+ def reset_parameters(self):
48
+ n = self.in_channels
49
+ for k in self.kernel_size:
50
+ n *= k
51
+ stdv = 1. / math.sqrt(n)
52
+ self.weight.data.uniform_(-stdv, stdv)
53
+
54
+ def forward(self, x, offset):
55
+ return deform_conv(x, offset, self.weight, self.stride, self.padding,
56
+ self.dilation, self.groups, self.deformable_groups)
57
+
58
+
59
+ class DeformConvPack(DeformConv):
60
+
61
+ def __init__(self, *args, **kwargs):
62
+ super(DeformConvPack, self).__init__(*args, **kwargs)
63
+
64
+ self.conv_offset = nn.Conv2d(
65
+ self.in_channels,
66
+ self.deformable_groups * 2 * self.kernel_size[0] *
67
+ self.kernel_size[1],
68
+ kernel_size=self.kernel_size,
69
+ stride=_pair(self.stride),
70
+ padding=_pair(self.padding),
71
+ bias=True)
72
+ self.init_offset()
73
+
74
+ def init_offset(self):
75
+ self.conv_offset.weight.data.zero_()
76
+ self.conv_offset.bias.data.zero_()
77
+
78
+ def forward(self, x):
79
+ offset = self.conv_offset(x)
80
+ return deform_conv(x, offset, self.weight, self.stride, self.padding,
81
+ self.dilation, self.groups, self.deformable_groups)
82
+
83
+
84
+ class ModulatedDeformConv(nn.Module):
85
+
86
+ def __init__(self,
87
+ in_channels,
88
+ out_channels,
89
+ kernel_size,
90
+ stride=1,
91
+ padding=0,
92
+ dilation=1,
93
+ groups=1,
94
+ deformable_groups=1,
95
+ bias=True):
96
+ super(ModulatedDeformConv, self).__init__()
97
+ self.in_channels = in_channels
98
+ self.out_channels = out_channels
99
+ self.kernel_size = _pair(kernel_size)
100
+ self.stride = stride
101
+ self.padding = padding
102
+ self.dilation = dilation
103
+ self.groups = groups
104
+ self.deformable_groups = deformable_groups
105
+ self.with_bias = bias
106
+
107
+ self.weight = nn.Parameter(
108
+ torch.Tensor(out_channels, in_channels // groups,
109
+ *self.kernel_size))
110
+ if bias:
111
+ self.bias = nn.Parameter(torch.Tensor(out_channels))
112
+ else:
113
+ self.register_parameter('bias', None)
114
+ self.reset_parameters()
115
+
116
+ def reset_parameters(self):
117
+ n = self.in_channels
118
+ for k in self.kernel_size:
119
+ n *= k
120
+ stdv = 1. / math.sqrt(n)
121
+ self.weight.data.uniform_(-stdv, stdv)
122
+ if self.bias is not None:
123
+ self.bias.data.zero_()
124
+
125
+ def forward(self, x, offset, mask):
126
+ return modulated_deform_conv(x, offset, mask, self.weight, self.bias,
127
+ self.stride, self.padding, self.dilation,
128
+ self.groups, self.deformable_groups)
129
+
130
+
131
+ class ModulatedDeformConvPack(ModulatedDeformConv):
132
+
133
+ def __init__(self, *args, **kwargs):
134
+ super(ModulatedDeformConvPack, self).__init__(*args, **kwargs)
135
+
136
+ self.conv_offset_mask = nn.Conv2d(
137
+ self.in_channels,
138
+ self.deformable_groups * 3 * self.kernel_size[0] *
139
+ self.kernel_size[1],
140
+ kernel_size=self.kernel_size,
141
+ stride=_pair(self.stride),
142
+ padding=_pair(self.padding),
143
+ bias=True)
144
+ self.init_offset()
145
+
146
+ def init_offset(self):
147
+ self.conv_offset_mask.weight.data.zero_()
148
+ self.conv_offset_mask.bias.data.zero_()
149
+
150
+ def forward(self, x):
151
+ out = self.conv_offset_mask(x)
152
+ o1, o2, mask = torch.chunk(out, 3, dim=1)
153
+ offset = torch.cat((o1, o2), dim=1)
154
+ mask = torch.sigmoid(mask)
155
+ return modulated_deform_conv(x, offset, mask, self.weight, self.bias,
156
+ self.stride, self.padding, self.dilation,
157
+ self.groups, self.deformable_groups)
DB/assets/ops/dcn/modules/deform_pool.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch import nn
2
+
3
+ from ..functions.deform_pool import deform_roi_pooling
4
+
5
+
6
+ class DeformRoIPooling(nn.Module):
7
+
8
+ def __init__(self,
9
+ spatial_scale,
10
+ out_size,
11
+ out_channels,
12
+ no_trans,
13
+ group_size=1,
14
+ part_size=None,
15
+ sample_per_part=4,
16
+ trans_std=.0):
17
+ super(DeformRoIPooling, self).__init__()
18
+ self.spatial_scale = spatial_scale
19
+ self.out_size = out_size
20
+ self.out_channels = out_channels
21
+ self.no_trans = no_trans
22
+ self.group_size = group_size
23
+ self.part_size = out_size if part_size is None else part_size
24
+ self.sample_per_part = sample_per_part
25
+ self.trans_std = trans_std
26
+
27
+ def forward(self, data, rois, offset):
28
+ if self.no_trans:
29
+ offset = data.new_empty(0)
30
+ return deform_roi_pooling(
31
+ data, rois, offset, self.spatial_scale, self.out_size,
32
+ self.out_channels, self.no_trans, self.group_size, self.part_size,
33
+ self.sample_per_part, self.trans_std)
34
+
35
+
36
+ class DeformRoIPoolingPack(DeformRoIPooling):
37
+
38
+ def __init__(self,
39
+ spatial_scale,
40
+ out_size,
41
+ out_channels,
42
+ no_trans,
43
+ group_size=1,
44
+ part_size=None,
45
+ sample_per_part=4,
46
+ trans_std=.0,
47
+ num_offset_fcs=3,
48
+ deform_fc_channels=1024):
49
+ super(DeformRoIPoolingPack,
50
+ self).__init__(spatial_scale, out_size, out_channels, no_trans,
51
+ group_size, part_size, sample_per_part, trans_std)
52
+
53
+ self.num_offset_fcs = num_offset_fcs
54
+ self.deform_fc_channels = deform_fc_channels
55
+
56
+ if not no_trans:
57
+ seq = []
58
+ ic = self.out_size * self.out_size * self.out_channels
59
+ for i in range(self.num_offset_fcs):
60
+ if i < self.num_offset_fcs - 1:
61
+ oc = self.deform_fc_channels
62
+ else:
63
+ oc = self.out_size * self.out_size * 2
64
+ seq.append(nn.Linear(ic, oc))
65
+ ic = oc
66
+ if i < self.num_offset_fcs - 1:
67
+ seq.append(nn.ReLU(inplace=True))
68
+ self.offset_fc = nn.Sequential(*seq)
69
+ self.offset_fc[-1].weight.data.zero_()
70
+ self.offset_fc[-1].bias.data.zero_()
71
+
72
+ def forward(self, data, rois):
73
+ assert data.size(1) == self.out_channels
74
+ if self.no_trans:
75
+ offset = data.new_empty(0)
76
+ return deform_roi_pooling(
77
+ data, rois, offset, self.spatial_scale, self.out_size,
78
+ self.out_channels, self.no_trans, self.group_size,
79
+ self.part_size, self.sample_per_part, self.trans_std)
80
+ else:
81
+ n = rois.shape[0]
82
+ offset = data.new_empty(0)
83
+ x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
84
+ self.out_size, self.out_channels, True,
85
+ self.group_size, self.part_size,
86
+ self.sample_per_part, self.trans_std)
87
+ offset = self.offset_fc(x.view(n, -1))
88
+ offset = offset.view(n, 2, self.out_size, self.out_size)
89
+ return deform_roi_pooling(
90
+ data, rois, offset, self.spatial_scale, self.out_size,
91
+ self.out_channels, self.no_trans, self.group_size,
92
+ self.part_size, self.sample_per_part, self.trans_std)
93
+
94
+
95
+ class ModulatedDeformRoIPoolingPack(DeformRoIPooling):
96
+
97
+ def __init__(self,
98
+ spatial_scale,
99
+ out_size,
100
+ out_channels,
101
+ no_trans,
102
+ group_size=1,
103
+ part_size=None,
104
+ sample_per_part=4,
105
+ trans_std=.0,
106
+ num_offset_fcs=3,
107
+ num_mask_fcs=2,
108
+ deform_fc_channels=1024):
109
+ super(ModulatedDeformRoIPoolingPack, self).__init__(
110
+ spatial_scale, out_size, out_channels, no_trans, group_size,
111
+ part_size, sample_per_part, trans_std)
112
+
113
+ self.num_offset_fcs = num_offset_fcs
114
+ self.num_mask_fcs = num_mask_fcs
115
+ self.deform_fc_channels = deform_fc_channels
116
+
117
+ if not no_trans:
118
+ offset_fc_seq = []
119
+ ic = self.out_size * self.out_size * self.out_channels
120
+ for i in range(self.num_offset_fcs):
121
+ if i < self.num_offset_fcs - 1:
122
+ oc = self.deform_fc_channels
123
+ else:
124
+ oc = self.out_size * self.out_size * 2
125
+ offset_fc_seq.append(nn.Linear(ic, oc))
126
+ ic = oc
127
+ if i < self.num_offset_fcs - 1:
128
+ offset_fc_seq.append(nn.ReLU(inplace=True))
129
+ self.offset_fc = nn.Sequential(*offset_fc_seq)
130
+ self.offset_fc[-1].weight.data.zero_()
131
+ self.offset_fc[-1].bias.data.zero_()
132
+
133
+ mask_fc_seq = []
134
+ ic = self.out_size * self.out_size * self.out_channels
135
+ for i in range(self.num_mask_fcs):
136
+ if i < self.num_mask_fcs - 1:
137
+ oc = self.deform_fc_channels
138
+ else:
139
+ oc = self.out_size * self.out_size
140
+ mask_fc_seq.append(nn.Linear(ic, oc))
141
+ ic = oc
142
+ if i < self.num_mask_fcs - 1:
143
+ mask_fc_seq.append(nn.ReLU(inplace=True))
144
+ else:
145
+ mask_fc_seq.append(nn.Sigmoid())
146
+ self.mask_fc = nn.Sequential(*mask_fc_seq)
147
+ self.mask_fc[-2].weight.data.zero_()
148
+ self.mask_fc[-2].bias.data.zero_()
149
+
150
+ def forward(self, data, rois):
151
+ assert data.size(1) == self.out_channels
152
+ if self.no_trans:
153
+ offset = data.new_empty(0)
154
+ return deform_roi_pooling(
155
+ data, rois, offset, self.spatial_scale, self.out_size,
156
+ self.out_channels, self.no_trans, self.group_size,
157
+ self.part_size, self.sample_per_part, self.trans_std)
158
+ else:
159
+ n = rois.shape[0]
160
+ offset = data.new_empty(0)
161
+ x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
162
+ self.out_size, self.out_channels, True,
163
+ self.group_size, self.part_size,
164
+ self.sample_per_part, self.trans_std)
165
+ offset = self.offset_fc(x.view(n, -1))
166
+ offset = offset.view(n, 2, self.out_size, self.out_size)
167
+ mask = self.mask_fc(x.view(n, -1))
168
+ mask = mask.view(n, 1, self.out_size, self.out_size)
169
+ return deform_roi_pooling(
170
+ data, rois, offset, self.spatial_scale, self.out_size,
171
+ self.out_channels, self.no_trans, self.group_size,
172
+ self.part_size, self.sample_per_part, self.trans_std) * mask
DB/assets/ops/dcn/setup.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup
2
+ from torch.utils.cpp_extension import BuildExtension, CUDAExtension
3
+
4
+ setup(
5
+ name='deform_conv',
6
+ ext_modules=[
7
+ CUDAExtension('deform_conv_cuda', [
8
+ 'src/deform_conv_cuda.cpp',
9
+ 'src/deform_conv_cuda_kernel.cu',
10
+ ]),
11
+ CUDAExtension('deform_pool_cuda', [
12
+ 'src/deform_pool_cuda.cpp', 'src/deform_pool_cuda_kernel.cu'
13
+ ]),
14
+ ],
15
+ cmdclass={'build_ext': BuildExtension})
DB/assets/ops/dcn/src/deform_conv_cuda.cpp ADDED
@@ -0,0 +1,695 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // modify from
2
+ // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda.c
3
+
4
+ #include <torch/extension.h>
5
+
6
+ #include <cmath>
7
+ #include <vector>
8
+
9
+ void deformable_im2col(const at::Tensor data_im, const at::Tensor data_offset,
10
+ const int channels, const int height, const int width,
11
+ const int ksize_h, const int ksize_w, const int pad_h,
12
+ const int pad_w, const int stride_h, const int stride_w,
13
+ const int dilation_h, const int dilation_w,
14
+ const int parallel_imgs, const int deformable_group,
15
+ at::Tensor data_col);
16
+
17
+ void deformable_col2im(const at::Tensor data_col, const at::Tensor data_offset,
18
+ const int channels, const int height, const int width,
19
+ const int ksize_h, const int ksize_w, const int pad_h,
20
+ const int pad_w, const int stride_h, const int stride_w,
21
+ const int dilation_h, const int dilation_w,
22
+ const int parallel_imgs, const int deformable_group,
23
+ at::Tensor grad_im);
24
+
25
+ void deformable_col2im_coord(
26
+ const at::Tensor data_col, const at::Tensor data_im,
27
+ const at::Tensor data_offset, const int channels, const int height,
28
+ const int width, const int ksize_h, const int ksize_w, const int pad_h,
29
+ const int pad_w, const int stride_h, const int stride_w,
30
+ const int dilation_h, const int dilation_w, const int parallel_imgs,
31
+ const int deformable_group, at::Tensor grad_offset);
32
+
33
+ void modulated_deformable_im2col_cuda(
34
+ const at::Tensor data_im, const at::Tensor data_offset,
35
+ const at::Tensor data_mask, const int batch_size, const int channels,
36
+ const int height_im, const int width_im, const int height_col,
37
+ const int width_col, const int kernel_h, const int kenerl_w,
38
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
39
+ const int dilation_h, const int dilation_w, const int deformable_group,
40
+ at::Tensor data_col);
41
+
42
+ void modulated_deformable_col2im_cuda(
43
+ const at::Tensor data_col, const at::Tensor data_offset,
44
+ const at::Tensor data_mask, const int batch_size, const int channels,
45
+ const int height_im, const int width_im, const int height_col,
46
+ const int width_col, const int kernel_h, const int kenerl_w,
47
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
48
+ const int dilation_h, const int dilation_w, const int deformable_group,
49
+ at::Tensor grad_im);
50
+
51
+ void modulated_deformable_col2im_coord_cuda(
52
+ const at::Tensor data_col, const at::Tensor data_im,
53
+ const at::Tensor data_offset, const at::Tensor data_mask,
54
+ const int batch_size, const int channels, const int height_im,
55
+ const int width_im, const int height_col, const int width_col,
56
+ const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w,
57
+ const int stride_h, const int stride_w, const int dilation_h,
58
+ const int dilation_w, const int deformable_group, at::Tensor grad_offset,
59
+ at::Tensor grad_mask);
60
+
61
+ void shape_check(at::Tensor input, at::Tensor offset, at::Tensor *gradOutput,
62
+ at::Tensor weight, int kH, int kW, int dH, int dW, int padH,
63
+ int padW, int dilationH, int dilationW, int group,
64
+ int deformable_group) {
65
+ TORCH_CHECK(weight.ndimension() == 4,
66
+ "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, "
67
+ "but got: %s",
68
+ weight.ndimension());
69
+
70
+ TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
71
+
72
+ TORCH_CHECK(kW > 0 && kH > 0,
73
+ "kernel size should be greater than zero, but got kH: %d kW: %d", kH,
74
+ kW);
75
+
76
+ TORCH_CHECK((weight.size(2) == kH && weight.size(3) == kW),
77
+ "kernel size should be consistent with weight, ",
78
+ "but got kH: %d kW: %d weight.size(2): %d, weight.size(3): %d", kH,
79
+ kW, weight.size(2), weight.size(3));
80
+
81
+ TORCH_CHECK(dW > 0 && dH > 0,
82
+ "stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
83
+
84
+ TORCH_CHECK(
85
+ dilationW > 0 && dilationH > 0,
86
+ "dilation should be greater than 0, but got dilationH: %d dilationW: %d",
87
+ dilationH, dilationW);
88
+
89
+ int ndim = input.ndimension();
90
+ int dimf = 0;
91
+ int dimh = 1;
92
+ int dimw = 2;
93
+
94
+ if (ndim == 4) {
95
+ dimf++;
96
+ dimh++;
97
+ dimw++;
98
+ }
99
+
100
+ TORCH_CHECK(ndim == 3 || ndim == 4, "3D or 4D input tensor expected but got: %s",
101
+ ndim);
102
+
103
+ long nInputPlane = weight.size(1) * group;
104
+ long inputHeight = input.size(dimh);
105
+ long inputWidth = input.size(dimw);
106
+ long nOutputPlane = weight.size(0);
107
+ long outputHeight =
108
+ (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
109
+ long outputWidth =
110
+ (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
111
+
112
+ TORCH_CHECK(nInputPlane % deformable_group == 0,
113
+ "input channels must divide deformable group size");
114
+
115
+ if (outputWidth < 1 || outputHeight < 1)
116
+ AT_ERROR(
117
+ "Given input size: (%ld x %ld x %ld). "
118
+ "Calculated output size: (%ld x %ld x %ld). Output size is too small",
119
+ nInputPlane, inputHeight, inputWidth, nOutputPlane, outputHeight,
120
+ outputWidth);
121
+
122
+ TORCH_CHECK(input.size(1) == nInputPlane,
123
+ "invalid number of input planes, expected: %d, but got: %d",
124
+ nInputPlane, input.size(1));
125
+
126
+ TORCH_CHECK((inputHeight >= kH && inputWidth >= kW),
127
+ "input image is smaller than kernel");
128
+
129
+ TORCH_CHECK((offset.size(2) == outputHeight && offset.size(3) == outputWidth),
130
+ "invalid spatial size of offset, expected height: %d width: %d, but "
131
+ "got height: %d width: %d",
132
+ outputHeight, outputWidth, offset.size(2), offset.size(3));
133
+
134
+ TORCH_CHECK((offset.size(1) == deformable_group * 2 * kH * kW),
135
+ "invalid number of channels of offset");
136
+
137
+ if (gradOutput != NULL) {
138
+ TORCH_CHECK(gradOutput->size(dimf) == nOutputPlane,
139
+ "invalid number of gradOutput planes, expected: %d, but got: %d",
140
+ nOutputPlane, gradOutput->size(dimf));
141
+
142
+ TORCH_CHECK((gradOutput->size(dimh) == outputHeight &&
143
+ gradOutput->size(dimw) == outputWidth),
144
+ "invalid size of gradOutput, expected height: %d width: %d , but "
145
+ "got height: %d width: %d",
146
+ outputHeight, outputWidth, gradOutput->size(dimh),
147
+ gradOutput->size(dimw));
148
+ }
149
+ }
150
+
151
+ int deform_conv_forward_cuda(at::Tensor input, at::Tensor weight,
152
+ at::Tensor offset, at::Tensor output,
153
+ at::Tensor columns, at::Tensor ones, int kW,
154
+ int kH, int dW, int dH, int padW, int padH,
155
+ int dilationW, int dilationH, int group,
156
+ int deformable_group, int im2col_step) {
157
+ // todo: resize columns to include im2col: done
158
+ // todo: add im2col_step as input
159
+ // todo: add new output buffer and transpose it to output (or directly
160
+ // transpose output) todo: possibly change data indexing because of
161
+ // parallel_imgs
162
+
163
+ shape_check(input, offset, NULL, weight, kH, kW, dH, dW, padH, padW,
164
+ dilationH, dilationW, group, deformable_group);
165
+
166
+ input = input.contiguous();
167
+ offset = offset.contiguous();
168
+ weight = weight.contiguous();
169
+
170
+ int batch = 1;
171
+ if (input.ndimension() == 3) {
172
+ // Force batch
173
+ batch = 0;
174
+ input.unsqueeze_(0);
175
+ offset.unsqueeze_(0);
176
+ }
177
+
178
+ // todo: assert batchsize dividable by im2col_step
179
+
180
+ long batchSize = input.size(0);
181
+ long nInputPlane = input.size(1);
182
+ long inputHeight = input.size(2);
183
+ long inputWidth = input.size(3);
184
+
185
+ long nOutputPlane = weight.size(0);
186
+
187
+ long outputWidth =
188
+ (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
189
+ long outputHeight =
190
+ (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
191
+
192
+ TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset");
193
+
194
+ output = output.view({batchSize / im2col_step, im2col_step, nOutputPlane,
195
+ outputHeight, outputWidth});
196
+ columns = at::zeros(
197
+ {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
198
+ input.options());
199
+
200
+ if (ones.ndimension() != 2 ||
201
+ ones.size(0) * ones.size(1) < outputHeight * outputWidth) {
202
+ ones = at::ones({outputHeight, outputWidth}, input.options());
203
+ }
204
+
205
+ input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
206
+ inputHeight, inputWidth});
207
+ offset =
208
+ offset.view({batchSize / im2col_step, im2col_step,
209
+ deformable_group * 2 * kH * kW, outputHeight, outputWidth});
210
+
211
+ at::Tensor output_buffer =
212
+ at::zeros({batchSize / im2col_step, nOutputPlane,
213
+ im2col_step * outputHeight, outputWidth},
214
+ output.options());
215
+
216
+ output_buffer = output_buffer.view(
217
+ {output_buffer.size(0), group, output_buffer.size(1) / group,
218
+ output_buffer.size(2), output_buffer.size(3)});
219
+
220
+ for (int elt = 0; elt < batchSize / im2col_step; elt++) {
221
+ deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight,
222
+ inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
223
+ dilationW, im2col_step, deformable_group, columns);
224
+
225
+ columns = columns.view({group, columns.size(0) / group, columns.size(1)});
226
+ weight = weight.view({group, weight.size(0) / group, weight.size(1),
227
+ weight.size(2), weight.size(3)});
228
+
229
+ for (int g = 0; g < group; g++) {
230
+ output_buffer[elt][g] = output_buffer[elt][g]
231
+ .flatten(1)
232
+ .addmm_(weight[g].flatten(1), columns[g])
233
+ .view_as(output_buffer[elt][g]);
234
+ }
235
+ }
236
+
237
+ output_buffer = output_buffer.view(
238
+ {output_buffer.size(0), output_buffer.size(1) * output_buffer.size(2),
239
+ output_buffer.size(3), output_buffer.size(4)});
240
+
241
+ output_buffer = output_buffer.view({batchSize / im2col_step, nOutputPlane,
242
+ im2col_step, outputHeight, outputWidth});
243
+ output_buffer.transpose_(1, 2);
244
+ output.copy_(output_buffer);
245
+ output = output.view({batchSize, nOutputPlane, outputHeight, outputWidth});
246
+
247
+ input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
248
+ offset = offset.view(
249
+ {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
250
+
251
+ if (batch == 0) {
252
+ output = output.view({nOutputPlane, outputHeight, outputWidth});
253
+ input = input.view({nInputPlane, inputHeight, inputWidth});
254
+ offset = offset.view({offset.size(1), offset.size(2), offset.size(3)});
255
+ }
256
+
257
+ return 1;
258
+ }
259
+
260
+ int deform_conv_backward_input_cuda(at::Tensor input, at::Tensor offset,
261
+ at::Tensor gradOutput, at::Tensor gradInput,
262
+ at::Tensor gradOffset, at::Tensor weight,
263
+ at::Tensor columns, int kW, int kH, int dW,
264
+ int dH, int padW, int padH, int dilationW,
265
+ int dilationH, int group,
266
+ int deformable_group, int im2col_step) {
267
+ shape_check(input, offset, &gradOutput, weight, kH, kW, dH, dW, padH, padW,
268
+ dilationH, dilationW, group, deformable_group);
269
+
270
+ input = input.contiguous();
271
+ offset = offset.contiguous();
272
+ gradOutput = gradOutput.contiguous();
273
+ weight = weight.contiguous();
274
+
275
+ int batch = 1;
276
+
277
+ if (input.ndimension() == 3) {
278
+ // Force batch
279
+ batch = 0;
280
+ input = input.view({1, input.size(0), input.size(1), input.size(2)});
281
+ offset = offset.view({1, offset.size(0), offset.size(1), offset.size(2)});
282
+ gradOutput = gradOutput.view(
283
+ {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)});
284
+ }
285
+
286
+ long batchSize = input.size(0);
287
+ long nInputPlane = input.size(1);
288
+ long inputHeight = input.size(2);
289
+ long inputWidth = input.size(3);
290
+
291
+ long nOutputPlane = weight.size(0);
292
+
293
+ long outputWidth =
294
+ (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
295
+ long outputHeight =
296
+ (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
297
+
298
+ TORCH_CHECK((offset.size(0) == batchSize), 3, "invalid batch size of offset");
299
+ gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth});
300
+ columns = at::zeros(
301
+ {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
302
+ input.options());
303
+
304
+ // change order of grad output
305
+ gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step,
306
+ nOutputPlane, outputHeight, outputWidth});
307
+ gradOutput.transpose_(1, 2);
308
+
309
+ gradInput = gradInput.view({batchSize / im2col_step, im2col_step, nInputPlane,
310
+ inputHeight, inputWidth});
311
+ input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
312
+ inputHeight, inputWidth});
313
+ gradOffset = gradOffset.view({batchSize / im2col_step, im2col_step,
314
+ deformable_group * 2 * kH * kW, outputHeight,
315
+ outputWidth});
316
+ offset =
317
+ offset.view({batchSize / im2col_step, im2col_step,
318
+ deformable_group * 2 * kH * kW, outputHeight, outputWidth});
319
+
320
+ for (int elt = 0; elt < batchSize / im2col_step; elt++) {
321
+ // divide into groups
322
+ columns = columns.view({group, columns.size(0) / group, columns.size(1)});
323
+ weight = weight.view({group, weight.size(0) / group, weight.size(1),
324
+ weight.size(2), weight.size(3)});
325
+ gradOutput = gradOutput.view(
326
+ {gradOutput.size(0), group, gradOutput.size(1) / group,
327
+ gradOutput.size(2), gradOutput.size(3), gradOutput.size(4)});
328
+
329
+ for (int g = 0; g < group; g++) {
330
+ columns[g] = columns[g].addmm_(weight[g].flatten(1).transpose(0, 1),
331
+ gradOutput[elt][g].flatten(1), 0.0f, 1.0f);
332
+ }
333
+
334
+ columns =
335
+ columns.view({columns.size(0) * columns.size(1), columns.size(2)});
336
+ gradOutput = gradOutput.view(
337
+ {gradOutput.size(0), gradOutput.size(1) * gradOutput.size(2),
338
+ gradOutput.size(3), gradOutput.size(4), gradOutput.size(5)});
339
+
340
+ deformable_col2im_coord(columns, input[elt], offset[elt], nInputPlane,
341
+ inputHeight, inputWidth, kH, kW, padH, padW, dH, dW,
342
+ dilationH, dilationW, im2col_step, deformable_group,
343
+ gradOffset[elt]);
344
+
345
+ deformable_col2im(columns, offset[elt], nInputPlane, inputHeight,
346
+ inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
347
+ dilationW, im2col_step, deformable_group, gradInput[elt]);
348
+ }
349
+
350
+ gradOutput.transpose_(1, 2);
351
+ gradOutput =
352
+ gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth});
353
+
354
+ gradInput = gradInput.view({batchSize, nInputPlane, inputHeight, inputWidth});
355
+ input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
356
+ gradOffset = gradOffset.view(
357
+ {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
358
+ offset = offset.view(
359
+ {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
360
+
361
+ if (batch == 0) {
362
+ gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth});
363
+ input = input.view({nInputPlane, inputHeight, inputWidth});
364
+ gradInput = gradInput.view({nInputPlane, inputHeight, inputWidth});
365
+ offset = offset.view({offset.size(1), offset.size(2), offset.size(3)});
366
+ gradOffset =
367
+ gradOffset.view({offset.size(1), offset.size(2), offset.size(3)});
368
+ }
369
+
370
+ return 1;
371
+ }
372
+
373
+ int deform_conv_backward_parameters_cuda(
374
+ at::Tensor input, at::Tensor offset, at::Tensor gradOutput,
375
+ at::Tensor gradWeight, // at::Tensor gradBias,
376
+ at::Tensor columns, at::Tensor ones, int kW, int kH, int dW, int dH,
377
+ int padW, int padH, int dilationW, int dilationH, int group,
378
+ int deformable_group, float scale, int im2col_step) {
379
+ // todo: transpose and reshape outGrad
380
+ // todo: reshape columns
381
+ // todo: add im2col_step as input
382
+
383
+ shape_check(input, offset, &gradOutput, gradWeight, kH, kW, dH, dW, padH,
384
+ padW, dilationH, dilationW, group, deformable_group);
385
+
386
+ input = input.contiguous();
387
+ offset = offset.contiguous();
388
+ gradOutput = gradOutput.contiguous();
389
+
390
+ int batch = 1;
391
+
392
+ if (input.ndimension() == 3) {
393
+ // Force batch
394
+ batch = 0;
395
+ input = input.view(
396
+ at::IntList({1, input.size(0), input.size(1), input.size(2)}));
397
+ gradOutput = gradOutput.view(
398
+ {1, gradOutput.size(0), gradOutput.size(1), gradOutput.size(2)});
399
+ }
400
+
401
+ long batchSize = input.size(0);
402
+ long nInputPlane = input.size(1);
403
+ long inputHeight = input.size(2);
404
+ long inputWidth = input.size(3);
405
+
406
+ long nOutputPlane = gradWeight.size(0);
407
+
408
+ long outputWidth =
409
+ (inputWidth + 2 * padW - (dilationW * (kW - 1) + 1)) / dW + 1;
410
+ long outputHeight =
411
+ (inputHeight + 2 * padH - (dilationH * (kH - 1) + 1)) / dH + 1;
412
+
413
+ TORCH_CHECK((offset.size(0) == batchSize), "invalid batch size of offset");
414
+
415
+ columns = at::zeros(
416
+ {nInputPlane * kW * kH, im2col_step * outputHeight * outputWidth},
417
+ input.options());
418
+
419
+ gradOutput = gradOutput.view({batchSize / im2col_step, im2col_step,
420
+ nOutputPlane, outputHeight, outputWidth});
421
+ gradOutput.transpose_(1, 2);
422
+
423
+ at::Tensor gradOutputBuffer = at::zeros_like(gradOutput);
424
+ gradOutputBuffer =
425
+ gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane, im2col_step,
426
+ outputHeight, outputWidth});
427
+ gradOutputBuffer.copy_(gradOutput);
428
+ gradOutputBuffer =
429
+ gradOutputBuffer.view({batchSize / im2col_step, nOutputPlane,
430
+ im2col_step * outputHeight, outputWidth});
431
+
432
+ gradOutput.transpose_(1, 2);
433
+ gradOutput =
434
+ gradOutput.view({batchSize, nOutputPlane, outputHeight, outputWidth});
435
+
436
+ input = input.view({batchSize / im2col_step, im2col_step, nInputPlane,
437
+ inputHeight, inputWidth});
438
+ offset =
439
+ offset.view({batchSize / im2col_step, im2col_step,
440
+ deformable_group * 2 * kH * kW, outputHeight, outputWidth});
441
+
442
+ for (int elt = 0; elt < batchSize / im2col_step; elt++) {
443
+ deformable_im2col(input[elt], offset[elt], nInputPlane, inputHeight,
444
+ inputWidth, kH, kW, padH, padW, dH, dW, dilationH,
445
+ dilationW, im2col_step, deformable_group, columns);
446
+
447
+ // divide into group
448
+ gradOutputBuffer = gradOutputBuffer.view(
449
+ {gradOutputBuffer.size(0), group, gradOutputBuffer.size(1) / group,
450
+ gradOutputBuffer.size(2), gradOutputBuffer.size(3)});
451
+ columns = columns.view({group, columns.size(0) / group, columns.size(1)});
452
+ gradWeight =
453
+ gradWeight.view({group, gradWeight.size(0) / group, gradWeight.size(1),
454
+ gradWeight.size(2), gradWeight.size(3)});
455
+
456
+ for (int g = 0; g < group; g++) {
457
+ gradWeight[g] = gradWeight[g]
458
+ .flatten(1)
459
+ .addmm_(gradOutputBuffer[elt][g].flatten(1),
460
+ columns[g].transpose(1, 0), 1.0, scale)
461
+ .view_as(gradWeight[g]);
462
+ }
463
+ gradOutputBuffer = gradOutputBuffer.view(
464
+ {gradOutputBuffer.size(0),
465
+ gradOutputBuffer.size(1) * gradOutputBuffer.size(2),
466
+ gradOutputBuffer.size(3), gradOutputBuffer.size(4)});
467
+ columns =
468
+ columns.view({columns.size(0) * columns.size(1), columns.size(2)});
469
+ gradWeight = gradWeight.view({gradWeight.size(0) * gradWeight.size(1),
470
+ gradWeight.size(2), gradWeight.size(3),
471
+ gradWeight.size(4)});
472
+ }
473
+
474
+ input = input.view({batchSize, nInputPlane, inputHeight, inputWidth});
475
+ offset = offset.view(
476
+ {batchSize, deformable_group * 2 * kH * kW, outputHeight, outputWidth});
477
+
478
+ if (batch == 0) {
479
+ gradOutput = gradOutput.view({nOutputPlane, outputHeight, outputWidth});
480
+ input = input.view({nInputPlane, inputHeight, inputWidth});
481
+ }
482
+
483
+ return 1;
484
+ }
485
+
486
+ void modulated_deform_conv_cuda_forward(
487
+ at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones,
488
+ at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns,
489
+ int kernel_h, int kernel_w, const int stride_h, const int stride_w,
490
+ const int pad_h, const int pad_w, const int dilation_h,
491
+ const int dilation_w, const int group, const int deformable_group,
492
+ const bool with_bias) {
493
+ TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
494
+ TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
495
+
496
+ const int batch = input.size(0);
497
+ const int channels = input.size(1);
498
+ const int height = input.size(2);
499
+ const int width = input.size(3);
500
+
501
+ const int channels_out = weight.size(0);
502
+ const int channels_kernel = weight.size(1);
503
+ const int kernel_h_ = weight.size(2);
504
+ const int kernel_w_ = weight.size(3);
505
+
506
+ if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
507
+ AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
508
+ kernel_h_, kernel_w, kernel_h_, kernel_w_);
509
+ if (channels != channels_kernel * group)
510
+ AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
511
+ channels, channels_kernel * group);
512
+
513
+ const int height_out =
514
+ (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
515
+ const int width_out =
516
+ (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
517
+
518
+ if (ones.ndimension() != 2 ||
519
+ ones.size(0) * ones.size(1) < height_out * width_out) {
520
+ // Resize plane and fill with ones...
521
+ ones = at::ones({height_out, width_out}, input.options());
522
+ }
523
+
524
+ // resize output
525
+ output = output.view({batch, channels_out, height_out, width_out}).zero_();
526
+ // resize temporary columns
527
+ columns =
528
+ at::zeros({channels * kernel_h * kernel_w, 1 * height_out * width_out},
529
+ input.options());
530
+
531
+ output = output.view({output.size(0), group, output.size(1) / group,
532
+ output.size(2), output.size(3)});
533
+
534
+ for (int b = 0; b < batch; b++) {
535
+ modulated_deformable_im2col_cuda(
536
+ input[b], offset[b], mask[b], 1, channels, height, width, height_out,
537
+ width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
538
+ dilation_h, dilation_w, deformable_group, columns);
539
+
540
+ // divide into group
541
+ weight = weight.view({group, weight.size(0) / group, weight.size(1),
542
+ weight.size(2), weight.size(3)});
543
+ columns = columns.view({group, columns.size(0) / group, columns.size(1)});
544
+
545
+ for (int g = 0; g < group; g++) {
546
+ output[b][g] = output[b][g]
547
+ .flatten(1)
548
+ .addmm_(weight[g].flatten(1), columns[g])
549
+ .view_as(output[b][g]);
550
+ }
551
+
552
+ weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
553
+ weight.size(3), weight.size(4)});
554
+ columns =
555
+ columns.view({columns.size(0) * columns.size(1), columns.size(2)});
556
+ }
557
+
558
+ output = output.view({output.size(0), output.size(1) * output.size(2),
559
+ output.size(3), output.size(4)});
560
+
561
+ if (with_bias) {
562
+ output += bias.view({1, bias.size(0), 1, 1});
563
+ }
564
+ }
565
+
566
+ void modulated_deform_conv_cuda_backward(
567
+ at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones,
568
+ at::Tensor offset, at::Tensor mask, at::Tensor columns,
569
+ at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias,
570
+ at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output,
571
+ int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_h,
572
+ int pad_w, int dilation_h, int dilation_w, int group, int deformable_group,
573
+ const bool with_bias) {
574
+ TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
575
+ TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
576
+
577
+ const int batch = input.size(0);
578
+ const int channels = input.size(1);
579
+ const int height = input.size(2);
580
+ const int width = input.size(3);
581
+
582
+ const int channels_kernel = weight.size(1);
583
+ const int kernel_h_ = weight.size(2);
584
+ const int kernel_w_ = weight.size(3);
585
+ if (kernel_h_ != kernel_h || kernel_w_ != kernel_w)
586
+ AT_ERROR("Input shape and kernel shape wont match: (%d x %d vs %d x %d).",
587
+ kernel_h_, kernel_w, kernel_h_, kernel_w_);
588
+ if (channels != channels_kernel * group)
589
+ AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
590
+ channels, channels_kernel * group);
591
+
592
+ const int height_out =
593
+ (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
594
+ const int width_out =
595
+ (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
596
+
597
+ if (ones.ndimension() != 2 ||
598
+ ones.size(0) * ones.size(1) < height_out * width_out) {
599
+ // Resize plane and fill with ones...
600
+ ones = at::ones({height_out, width_out}, input.options());
601
+ }
602
+
603
+ grad_input = grad_input.view({batch, channels, height, width});
604
+ columns = at::zeros({channels * kernel_h * kernel_w, height_out * width_out},
605
+ input.options());
606
+
607
+ grad_output =
608
+ grad_output.view({grad_output.size(0), group, grad_output.size(1) / group,
609
+ grad_output.size(2), grad_output.size(3)});
610
+
611
+ for (int b = 0; b < batch; b++) {
612
+ // divide int group
613
+ columns = columns.view({group, columns.size(0) / group, columns.size(1)});
614
+ weight = weight.view({group, weight.size(0) / group, weight.size(1),
615
+ weight.size(2), weight.size(3)});
616
+
617
+ for (int g = 0; g < group; g++) {
618
+ columns[g].addmm_(weight[g].flatten(1).transpose(0, 1),
619
+ grad_output[b][g].flatten(1), 0.0f, 1.0f);
620
+ }
621
+
622
+ columns =
623
+ columns.view({columns.size(0) * columns.size(1), columns.size(2)});
624
+ weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
625
+ weight.size(3), weight.size(4)});
626
+
627
+ // gradient w.r.t. input coordinate data
628
+ modulated_deformable_col2im_coord_cuda(
629
+ columns, input[b], offset[b], mask[b], 1, channels, height, width,
630
+ height_out, width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h,
631
+ stride_w, dilation_h, dilation_w, deformable_group, grad_offset[b],
632
+ grad_mask[b]);
633
+ // gradient w.r.t. input data
634
+ modulated_deformable_col2im_cuda(
635
+ columns, offset[b], mask[b], 1, channels, height, width, height_out,
636
+ width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
637
+ dilation_h, dilation_w, deformable_group, grad_input[b]);
638
+
639
+ // gradient w.r.t. weight, dWeight should accumulate across the batch and
640
+ // group
641
+ modulated_deformable_im2col_cuda(
642
+ input[b], offset[b], mask[b], 1, channels, height, width, height_out,
643
+ width_out, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
644
+ dilation_h, dilation_w, deformable_group, columns);
645
+
646
+ columns = columns.view({group, columns.size(0) / group, columns.size(1)});
647
+ grad_weight = grad_weight.view({group, grad_weight.size(0) / group,
648
+ grad_weight.size(1), grad_weight.size(2),
649
+ grad_weight.size(3)});
650
+ if (with_bias)
651
+ grad_bias = grad_bias.view({group, grad_bias.size(0) / group});
652
+
653
+ for (int g = 0; g < group; g++) {
654
+ grad_weight[g] =
655
+ grad_weight[g]
656
+ .flatten(1)
657
+ .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1))
658
+ .view_as(grad_weight[g]);
659
+ if (with_bias) {
660
+ grad_bias[g] =
661
+ grad_bias[g]
662
+ .view({-1, 1})
663
+ .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}))
664
+ .view(-1);
665
+ }
666
+ }
667
+
668
+ columns =
669
+ columns.view({columns.size(0) * columns.size(1), columns.size(2)});
670
+ grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1),
671
+ grad_weight.size(2), grad_weight.size(3),
672
+ grad_weight.size(4)});
673
+ if (with_bias)
674
+ grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)});
675
+ }
676
+ grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1),
677
+ grad_output.size(2), grad_output.size(3),
678
+ grad_output.size(4)});
679
+ }
680
+
681
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
682
+ m.def("deform_conv_forward_cuda", &deform_conv_forward_cuda,
683
+ "deform forward (CUDA)");
684
+ m.def("deform_conv_backward_input_cuda", &deform_conv_backward_input_cuda,
685
+ "deform_conv_backward_input (CUDA)");
686
+ m.def("deform_conv_backward_parameters_cuda",
687
+ &deform_conv_backward_parameters_cuda,
688
+ "deform_conv_backward_parameters (CUDA)");
689
+ m.def("modulated_deform_conv_cuda_forward",
690
+ &modulated_deform_conv_cuda_forward,
691
+ "modulated deform conv forward (CUDA)");
692
+ m.def("modulated_deform_conv_cuda_backward",
693
+ &modulated_deform_conv_cuda_backward,
694
+ "modulated deform conv backward (CUDA)");
695
+ }
DB/assets/ops/dcn/src/deform_conv_cuda_kernel.cu ADDED
@@ -0,0 +1,866 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ ******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
3
+ *
4
+ * COPYRIGHT
5
+ *
6
+ * All contributions by the University of California:
7
+ * Copyright (c) 2014-2017 The Regents of the University of California (Regents)
8
+ * All rights reserved.
9
+ *
10
+ * All other contributions:
11
+ * Copyright (c) 2014-2017, the respective contributors
12
+ * All rights reserved.
13
+ *
14
+ * Caffe uses a shared copyright model: each contributor holds copyright over
15
+ * their contributions to Caffe. The project versioning records all such
16
+ * contribution and copyright details. If a contributor wants to further mark
17
+ * their specific copyright on a particular contribution, they should indicate
18
+ * their copyright solely in the commit message of the change when it is
19
+ * committed.
20
+ *
21
+ * LICENSE
22
+ *
23
+ * Redistribution and use in source and binary forms, with or without
24
+ * modification, are permitted provided that the following conditions are met:
25
+ *
26
+ * 1. Redistributions of source code must retain the above copyright notice, this
27
+ * list of conditions and the following disclaimer.
28
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
29
+ * this list of conditions and the following disclaimer in the documentation
30
+ * and/or other materials provided with the distribution.
31
+ *
32
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
33
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
34
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
35
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
36
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
37
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
39
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
41
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42
+ *
43
+ * CONTRIBUTION AGREEMENT
44
+ *
45
+ * By contributing to the BVLC/caffe repository through pull-request, comment,
46
+ * or otherwise, the contributor releases their content to the
47
+ * license and copyright terms herein.
48
+ *
49
+ ***************** END Caffe Copyright Notice and Disclaimer ********************
50
+ *
51
+ * Copyright (c) 2018 Microsoft
52
+ * Licensed under The MIT License [see LICENSE for details]
53
+ * \file modulated_deformable_im2col.cuh
54
+ * \brief Function definitions of converting an image to
55
+ * column matrix based on kernel, padding, dilation, and offset.
56
+ * These functions are mainly used in deformable convolution operators.
57
+ * \ref: https://arxiv.org/abs/1703.06211
58
+ * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
59
+ */
60
+
61
+ // modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
62
+
63
+ #include <ATen/ATen.h>
64
+ #include <THC/THCAtomics.cuh>
65
+ #include <stdio.h>
66
+ #include <math.h>
67
+ #include <float.h>
68
+
69
+ using namespace at;
70
+
71
+ #define CUDA_KERNEL_LOOP(i, n) \
72
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
73
+ i += blockDim.x * gridDim.x)
74
+
75
+ const int CUDA_NUM_THREADS = 1024;
76
+ const int kMaxGridNum = 65535;
77
+
78
+ inline int GET_BLOCKS(const int N)
79
+ {
80
+ return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
81
+ }
82
+
83
+ template <typename scalar_t>
84
+ __device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
85
+ const int height, const int width, scalar_t h, scalar_t w)
86
+ {
87
+
88
+ int h_low = floor(h);
89
+ int w_low = floor(w);
90
+ int h_high = h_low + 1;
91
+ int w_high = w_low + 1;
92
+
93
+ scalar_t lh = h - h_low;
94
+ scalar_t lw = w - w_low;
95
+ scalar_t hh = 1 - lh, hw = 1 - lw;
96
+
97
+ scalar_t v1 = 0;
98
+ if (h_low >= 0 && w_low >= 0)
99
+ v1 = bottom_data[h_low * data_width + w_low];
100
+ scalar_t v2 = 0;
101
+ if (h_low >= 0 && w_high <= width - 1)
102
+ v2 = bottom_data[h_low * data_width + w_high];
103
+ scalar_t v3 = 0;
104
+ if (h_high <= height - 1 && w_low >= 0)
105
+ v3 = bottom_data[h_high * data_width + w_low];
106
+ scalar_t v4 = 0;
107
+ if (h_high <= height - 1 && w_high <= width - 1)
108
+ v4 = bottom_data[h_high * data_width + w_high];
109
+
110
+ scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
111
+
112
+ scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
113
+ return val;
114
+ }
115
+
116
+ template <typename scalar_t>
117
+ __device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
118
+ const int h, const int w, const int height, const int width)
119
+ {
120
+
121
+ if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
122
+ {
123
+ //empty
124
+ return 0;
125
+ }
126
+
127
+ int argmax_h_low = floor(argmax_h);
128
+ int argmax_w_low = floor(argmax_w);
129
+ int argmax_h_high = argmax_h_low + 1;
130
+ int argmax_w_high = argmax_w_low + 1;
131
+
132
+ scalar_t weight = 0;
133
+ if (h == argmax_h_low && w == argmax_w_low)
134
+ weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
135
+ if (h == argmax_h_low && w == argmax_w_high)
136
+ weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
137
+ if (h == argmax_h_high && w == argmax_w_low)
138
+ weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
139
+ if (h == argmax_h_high && w == argmax_w_high)
140
+ weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
141
+ return weight;
142
+ }
143
+
144
+ template <typename scalar_t>
145
+ __device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
146
+ const int height, const int width, const scalar_t *im_data,
147
+ const int data_width, const int bp_dir)
148
+ {
149
+
150
+ if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
151
+ {
152
+ //empty
153
+ return 0;
154
+ }
155
+
156
+ int argmax_h_low = floor(argmax_h);
157
+ int argmax_w_low = floor(argmax_w);
158
+ int argmax_h_high = argmax_h_low + 1;
159
+ int argmax_w_high = argmax_w_low + 1;
160
+
161
+ scalar_t weight = 0;
162
+
163
+ if (bp_dir == 0)
164
+ {
165
+ if (argmax_h_low >= 0 && argmax_w_low >= 0)
166
+ weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
167
+ if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
168
+ weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
169
+ if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
170
+ weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
171
+ if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
172
+ weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
173
+ }
174
+ else if (bp_dir == 1)
175
+ {
176
+ if (argmax_h_low >= 0 && argmax_w_low >= 0)
177
+ weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
178
+ if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
179
+ weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
180
+ if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
181
+ weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
182
+ if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
183
+ weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
184
+ }
185
+
186
+ return weight;
187
+ }
188
+
189
+ template <typename scalar_t>
190
+ __global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
191
+ const int height, const int width, const int kernel_h, const int kernel_w,
192
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
193
+ const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
194
+ const int batch_size, const int num_channels, const int deformable_group,
195
+ const int height_col, const int width_col,
196
+ scalar_t *data_col)
197
+ {
198
+ CUDA_KERNEL_LOOP(index, n)
199
+ {
200
+ // index index of output matrix
201
+ const int w_col = index % width_col;
202
+ const int h_col = (index / width_col) % height_col;
203
+ const int b_col = (index / width_col / height_col) % batch_size;
204
+ const int c_im = (index / width_col / height_col) / batch_size;
205
+ const int c_col = c_im * kernel_h * kernel_w;
206
+
207
+ // compute deformable group index
208
+ const int deformable_group_index = c_im / channel_per_deformable_group;
209
+
210
+ const int h_in = h_col * stride_h - pad_h;
211
+ const int w_in = w_col * stride_w - pad_w;
212
+ scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
213
+ //const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
214
+ const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
215
+ const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
216
+
217
+ for (int i = 0; i < kernel_h; ++i)
218
+ {
219
+ for (int j = 0; j < kernel_w; ++j)
220
+ {
221
+ const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
222
+ const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
223
+ const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
224
+ const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
225
+ scalar_t val = static_cast<scalar_t>(0);
226
+ const scalar_t h_im = h_in + i * dilation_h + offset_h;
227
+ const scalar_t w_im = w_in + j * dilation_w + offset_w;
228
+ if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
229
+ {
230
+ //const scalar_t map_h = i * dilation_h + offset_h;
231
+ //const scalar_t map_w = j * dilation_w + offset_w;
232
+ //const int cur_height = height - h_in;
233
+ //const int cur_width = width - w_in;
234
+ //val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
235
+ val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
236
+ }
237
+ *data_col_ptr = val;
238
+ data_col_ptr += batch_size * height_col * width_col;
239
+ }
240
+ }
241
+ }
242
+ }
243
+
244
+ void deformable_im2col(
245
+ const at::Tensor data_im, const at::Tensor data_offset, const int channels,
246
+ const int height, const int width, const int ksize_h, const int ksize_w,
247
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
248
+ const int dilation_h, const int dilation_w, const int parallel_imgs,
249
+ const int deformable_group, at::Tensor data_col)
250
+ {
251
+ // num_axes should be smaller than block size
252
+ // todo: check parallel_imgs is correctly passed in
253
+ int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
254
+ int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
255
+ int num_kernels = channels * height_col * width_col * parallel_imgs;
256
+ int channel_per_deformable_group = channels / deformable_group;
257
+
258
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
259
+ data_im.type(), "deformable_im2col_gpu", ([&] {
260
+ const scalar_t *data_im_ = data_im.data<scalar_t>();
261
+ const scalar_t *data_offset_ = data_offset.data<scalar_t>();
262
+ scalar_t *data_col_ = data_col.data<scalar_t>();
263
+
264
+ deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
265
+ num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
266
+ pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
267
+ channel_per_deformable_group, parallel_imgs, channels, deformable_group,
268
+ height_col, width_col, data_col_);
269
+ }));
270
+
271
+ cudaError_t err = cudaGetLastError();
272
+ if (err != cudaSuccess)
273
+ {
274
+ printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
275
+ }
276
+ }
277
+
278
+ template <typename scalar_t>
279
+ __global__ void deformable_col2im_gpu_kernel(
280
+ const int n, const scalar_t *data_col, const scalar_t *data_offset,
281
+ const int channels, const int height, const int width,
282
+ const int kernel_h, const int kernel_w,
283
+ const int pad_h, const int pad_w,
284
+ const int stride_h, const int stride_w,
285
+ const int dilation_h, const int dilation_w,
286
+ const int channel_per_deformable_group,
287
+ const int batch_size, const int deformable_group,
288
+ const int height_col, const int width_col,
289
+ scalar_t *grad_im)
290
+ {
291
+ CUDA_KERNEL_LOOP(index, n)
292
+ {
293
+ const int j = (index / width_col / height_col / batch_size) % kernel_w;
294
+ const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
295
+ const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
296
+ // compute the start and end of the output
297
+
298
+ const int deformable_group_index = c / channel_per_deformable_group;
299
+
300
+ int w_out = index % width_col;
301
+ int h_out = (index / width_col) % height_col;
302
+ int b = (index / width_col / height_col) % batch_size;
303
+ int w_in = w_out * stride_w - pad_w;
304
+ int h_in = h_out * stride_h - pad_h;
305
+
306
+ const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
307
+ 2 * kernel_h * kernel_w * height_col * width_col;
308
+ const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
309
+ const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
310
+ const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
311
+ const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
312
+ const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
313
+ const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
314
+
315
+ const scalar_t cur_top_grad = data_col[index];
316
+ const int cur_h = (int)cur_inv_h_data;
317
+ const int cur_w = (int)cur_inv_w_data;
318
+ for (int dy = -2; dy <= 2; dy++)
319
+ {
320
+ for (int dx = -2; dx <= 2; dx++)
321
+ {
322
+ if (cur_h + dy >= 0 && cur_h + dy < height &&
323
+ cur_w + dx >= 0 && cur_w + dx < width &&
324
+ abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
325
+ abs(cur_inv_w_data - (cur_w + dx)) < 1)
326
+ {
327
+ int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
328
+ scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
329
+ atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
330
+ }
331
+ }
332
+ }
333
+ }
334
+ }
335
+
336
+ void deformable_col2im(
337
+ const at::Tensor data_col, const at::Tensor data_offset, const int channels,
338
+ const int height, const int width, const int ksize_h,
339
+ const int ksize_w, const int pad_h, const int pad_w,
340
+ const int stride_h, const int stride_w,
341
+ const int dilation_h, const int dilation_w,
342
+ const int parallel_imgs, const int deformable_group,
343
+ at::Tensor grad_im)
344
+ {
345
+
346
+ // todo: make sure parallel_imgs is passed in correctly
347
+ int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
348
+ int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
349
+ int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
350
+ int channel_per_deformable_group = channels / deformable_group;
351
+
352
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
353
+ data_col.type(), "deformable_col2im_gpu", ([&] {
354
+ const scalar_t *data_col_ = data_col.data<scalar_t>();
355
+ const scalar_t *data_offset_ = data_offset.data<scalar_t>();
356
+ scalar_t *grad_im_ = grad_im.data<scalar_t>();
357
+
358
+ deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
359
+ num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
360
+ ksize_w, pad_h, pad_w, stride_h, stride_w,
361
+ dilation_h, dilation_w, channel_per_deformable_group,
362
+ parallel_imgs, deformable_group, height_col, width_col, grad_im_);
363
+ }));
364
+
365
+ cudaError_t err = cudaGetLastError();
366
+ if (err != cudaSuccess)
367
+ {
368
+ printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
369
+ }
370
+ }
371
+
372
+ template <typename scalar_t>
373
+ __global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
374
+ const scalar_t *data_im, const scalar_t *data_offset,
375
+ const int channels, const int height, const int width,
376
+ const int kernel_h, const int kernel_w,
377
+ const int pad_h, const int pad_w,
378
+ const int stride_h, const int stride_w,
379
+ const int dilation_h, const int dilation_w,
380
+ const int channel_per_deformable_group,
381
+ const int batch_size, const int offset_channels, const int deformable_group,
382
+ const int height_col, const int width_col, scalar_t *grad_offset)
383
+ {
384
+ CUDA_KERNEL_LOOP(index, n)
385
+ {
386
+ scalar_t val = 0;
387
+ int w = index % width_col;
388
+ int h = (index / width_col) % height_col;
389
+ int c = (index / width_col / height_col) % offset_channels;
390
+ int b = (index / width_col / height_col) / offset_channels;
391
+ // compute the start and end of the output
392
+
393
+ const int deformable_group_index = c / (2 * kernel_h * kernel_w);
394
+ const int col_step = kernel_h * kernel_w;
395
+ int cnt = 0;
396
+ const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
397
+ batch_size * width_col * height_col;
398
+ const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
399
+ channel_per_deformable_group / kernel_h / kernel_w * height * width;
400
+ const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
401
+ kernel_h * kernel_w * height_col * width_col;
402
+
403
+ const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
404
+
405
+ for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
406
+ {
407
+ const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
408
+ const int bp_dir = offset_c % 2;
409
+
410
+ int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
411
+ int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
412
+ int w_out = col_pos % width_col;
413
+ int h_out = (col_pos / width_col) % height_col;
414
+ int w_in = w_out * stride_w - pad_w;
415
+ int h_in = h_out * stride_h - pad_h;
416
+ const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
417
+ const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
418
+ const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
419
+ const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
420
+ scalar_t inv_h = h_in + i * dilation_h + offset_h;
421
+ scalar_t inv_w = w_in + j * dilation_w + offset_w;
422
+ if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
423
+ {
424
+ inv_h = inv_w = -2;
425
+ }
426
+ const scalar_t weight = get_coordinate_weight(
427
+ inv_h, inv_w,
428
+ height, width, data_im_ptr + cnt * height * width, width, bp_dir);
429
+ val += weight * data_col_ptr[col_pos];
430
+ cnt += 1;
431
+ }
432
+
433
+ grad_offset[index] = val;
434
+ }
435
+ }
436
+
437
+ void deformable_col2im_coord(
438
+ const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
439
+ const int channels, const int height, const int width, const int ksize_h,
440
+ const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
441
+ const int stride_w, const int dilation_h, const int dilation_w,
442
+ const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
443
+ {
444
+
445
+ int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
446
+ int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
447
+ int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
448
+ int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
449
+
450
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
451
+ data_col.type(), "deformable_col2im_coord_gpu", ([&] {
452
+ const scalar_t *data_col_ = data_col.data<scalar_t>();
453
+ const scalar_t *data_im_ = data_im.data<scalar_t>();
454
+ const scalar_t *data_offset_ = data_offset.data<scalar_t>();
455
+ scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
456
+
457
+ deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
458
+ num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
459
+ ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
460
+ dilation_h, dilation_w, channel_per_deformable_group,
461
+ parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
462
+ height_col, width_col, grad_offset_);
463
+ }));
464
+ }
465
+
466
+ template <typename scalar_t>
467
+ __device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
468
+ const int height, const int width, scalar_t h, scalar_t w)
469
+ {
470
+ int h_low = floor(h);
471
+ int w_low = floor(w);
472
+ int h_high = h_low + 1;
473
+ int w_high = w_low + 1;
474
+
475
+ scalar_t lh = h - h_low;
476
+ scalar_t lw = w - w_low;
477
+ scalar_t hh = 1 - lh, hw = 1 - lw;
478
+
479
+ scalar_t v1 = 0;
480
+ if (h_low >= 0 && w_low >= 0)
481
+ v1 = bottom_data[h_low * data_width + w_low];
482
+ scalar_t v2 = 0;
483
+ if (h_low >= 0 && w_high <= width - 1)
484
+ v2 = bottom_data[h_low * data_width + w_high];
485
+ scalar_t v3 = 0;
486
+ if (h_high <= height - 1 && w_low >= 0)
487
+ v3 = bottom_data[h_high * data_width + w_low];
488
+ scalar_t v4 = 0;
489
+ if (h_high <= height - 1 && w_high <= width - 1)
490
+ v4 = bottom_data[h_high * data_width + w_high];
491
+
492
+ scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
493
+
494
+ scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
495
+ return val;
496
+ }
497
+
498
+ template <typename scalar_t>
499
+ __device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
500
+ const int h, const int w, const int height, const int width)
501
+ {
502
+ if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
503
+ {
504
+ //empty
505
+ return 0;
506
+ }
507
+
508
+ int argmax_h_low = floor(argmax_h);
509
+ int argmax_w_low = floor(argmax_w);
510
+ int argmax_h_high = argmax_h_low + 1;
511
+ int argmax_w_high = argmax_w_low + 1;
512
+
513
+ scalar_t weight = 0;
514
+ if (h == argmax_h_low && w == argmax_w_low)
515
+ weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
516
+ if (h == argmax_h_low && w == argmax_w_high)
517
+ weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
518
+ if (h == argmax_h_high && w == argmax_w_low)
519
+ weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
520
+ if (h == argmax_h_high && w == argmax_w_high)
521
+ weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
522
+ return weight;
523
+ }
524
+
525
+ template <typename scalar_t>
526
+ __device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
527
+ const int height, const int width, const scalar_t *im_data,
528
+ const int data_width, const int bp_dir)
529
+ {
530
+ if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
531
+ {
532
+ //empty
533
+ return 0;
534
+ }
535
+
536
+ int argmax_h_low = floor(argmax_h);
537
+ int argmax_w_low = floor(argmax_w);
538
+ int argmax_h_high = argmax_h_low + 1;
539
+ int argmax_w_high = argmax_w_low + 1;
540
+
541
+ scalar_t weight = 0;
542
+
543
+ if (bp_dir == 0)
544
+ {
545
+ if (argmax_h_low >= 0 && argmax_w_low >= 0)
546
+ weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
547
+ if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
548
+ weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
549
+ if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
550
+ weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
551
+ if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
552
+ weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
553
+ }
554
+ else if (bp_dir == 1)
555
+ {
556
+ if (argmax_h_low >= 0 && argmax_w_low >= 0)
557
+ weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
558
+ if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
559
+ weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
560
+ if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
561
+ weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
562
+ if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
563
+ weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
564
+ }
565
+
566
+ return weight;
567
+ }
568
+
569
+ template <typename scalar_t>
570
+ __global__ void modulated_deformable_im2col_gpu_kernel(const int n,
571
+ const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
572
+ const int height, const int width, const int kernel_h, const int kernel_w,
573
+ const int pad_h, const int pad_w,
574
+ const int stride_h, const int stride_w,
575
+ const int dilation_h, const int dilation_w,
576
+ const int channel_per_deformable_group,
577
+ const int batch_size, const int num_channels, const int deformable_group,
578
+ const int height_col, const int width_col,
579
+ scalar_t *data_col)
580
+ {
581
+ CUDA_KERNEL_LOOP(index, n)
582
+ {
583
+ // index index of output matrix
584
+ const int w_col = index % width_col;
585
+ const int h_col = (index / width_col) % height_col;
586
+ const int b_col = (index / width_col / height_col) % batch_size;
587
+ const int c_im = (index / width_col / height_col) / batch_size;
588
+ const int c_col = c_im * kernel_h * kernel_w;
589
+
590
+ // compute deformable group index
591
+ const int deformable_group_index = c_im / channel_per_deformable_group;
592
+
593
+ const int h_in = h_col * stride_h - pad_h;
594
+ const int w_in = w_col * stride_w - pad_w;
595
+
596
+ scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
597
+ //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
598
+ const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
599
+ const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
600
+
601
+ const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
602
+
603
+ for (int i = 0; i < kernel_h; ++i)
604
+ {
605
+ for (int j = 0; j < kernel_w; ++j)
606
+ {
607
+ const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
608
+ const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
609
+ const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
610
+ const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
611
+ const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
612
+ const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
613
+ scalar_t val = static_cast<scalar_t>(0);
614
+ const scalar_t h_im = h_in + i * dilation_h + offset_h;
615
+ const scalar_t w_im = w_in + j * dilation_w + offset_w;
616
+ //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
617
+ if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
618
+ {
619
+ //const float map_h = i * dilation_h + offset_h;
620
+ //const float map_w = j * dilation_w + offset_w;
621
+ //const int cur_height = height - h_in;
622
+ //const int cur_width = width - w_in;
623
+ //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
624
+ val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
625
+ }
626
+ *data_col_ptr = val * mask;
627
+ data_col_ptr += batch_size * height_col * width_col;
628
+ //data_col_ptr += height_col * width_col;
629
+ }
630
+ }
631
+ }
632
+ }
633
+
634
+ template <typename scalar_t>
635
+ __global__ void modulated_deformable_col2im_gpu_kernel(const int n,
636
+ const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
637
+ const int channels, const int height, const int width,
638
+ const int kernel_h, const int kernel_w,
639
+ const int pad_h, const int pad_w,
640
+ const int stride_h, const int stride_w,
641
+ const int dilation_h, const int dilation_w,
642
+ const int channel_per_deformable_group,
643
+ const int batch_size, const int deformable_group,
644
+ const int height_col, const int width_col,
645
+ scalar_t *grad_im)
646
+ {
647
+ CUDA_KERNEL_LOOP(index, n)
648
+ {
649
+ const int j = (index / width_col / height_col / batch_size) % kernel_w;
650
+ const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
651
+ const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
652
+ // compute the start and end of the output
653
+
654
+ const int deformable_group_index = c / channel_per_deformable_group;
655
+
656
+ int w_out = index % width_col;
657
+ int h_out = (index / width_col) % height_col;
658
+ int b = (index / width_col / height_col) % batch_size;
659
+ int w_in = w_out * stride_w - pad_w;
660
+ int h_in = h_out * stride_h - pad_h;
661
+
662
+ const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
663
+ const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
664
+ const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
665
+ const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
666
+ const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
667
+ const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
668
+ const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
669
+ const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
670
+ const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
671
+ const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
672
+
673
+ const scalar_t cur_top_grad = data_col[index] * mask;
674
+ const int cur_h = (int)cur_inv_h_data;
675
+ const int cur_w = (int)cur_inv_w_data;
676
+ for (int dy = -2; dy <= 2; dy++)
677
+ {
678
+ for (int dx = -2; dx <= 2; dx++)
679
+ {
680
+ if (cur_h + dy >= 0 && cur_h + dy < height &&
681
+ cur_w + dx >= 0 && cur_w + dx < width &&
682
+ abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
683
+ abs(cur_inv_w_data - (cur_w + dx)) < 1)
684
+ {
685
+ int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
686
+ scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
687
+ atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
688
+ }
689
+ }
690
+ }
691
+ }
692
+ }
693
+
694
+ template <typename scalar_t>
695
+ __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
696
+ const scalar_t *data_col, const scalar_t *data_im,
697
+ const scalar_t *data_offset, const scalar_t *data_mask,
698
+ const int channels, const int height, const int width,
699
+ const int kernel_h, const int kernel_w,
700
+ const int pad_h, const int pad_w,
701
+ const int stride_h, const int stride_w,
702
+ const int dilation_h, const int dilation_w,
703
+ const int channel_per_deformable_group,
704
+ const int batch_size, const int offset_channels, const int deformable_group,
705
+ const int height_col, const int width_col,
706
+ scalar_t *grad_offset, scalar_t *grad_mask)
707
+ {
708
+ CUDA_KERNEL_LOOP(index, n)
709
+ {
710
+ scalar_t val = 0, mval = 0;
711
+ int w = index % width_col;
712
+ int h = (index / width_col) % height_col;
713
+ int c = (index / width_col / height_col) % offset_channels;
714
+ int b = (index / width_col / height_col) / offset_channels;
715
+ // compute the start and end of the output
716
+
717
+ const int deformable_group_index = c / (2 * kernel_h * kernel_w);
718
+ const int col_step = kernel_h * kernel_w;
719
+ int cnt = 0;
720
+ const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
721
+ const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
722
+ const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
723
+ const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
724
+
725
+ const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
726
+
727
+ for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
728
+ {
729
+ const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
730
+ const int bp_dir = offset_c % 2;
731
+
732
+ int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
733
+ int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
734
+ int w_out = col_pos % width_col;
735
+ int h_out = (col_pos / width_col) % height_col;
736
+ int w_in = w_out * stride_w - pad_w;
737
+ int h_in = h_out * stride_h - pad_h;
738
+ const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
739
+ const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
740
+ const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
741
+ const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
742
+ const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
743
+ const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
744
+ scalar_t inv_h = h_in + i * dilation_h + offset_h;
745
+ scalar_t inv_w = w_in + j * dilation_w + offset_w;
746
+ if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
747
+ {
748
+ inv_h = inv_w = -2;
749
+ }
750
+ else
751
+ {
752
+ mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
753
+ }
754
+ const scalar_t weight = dmcn_get_coordinate_weight(
755
+ inv_h, inv_w,
756
+ height, width, data_im_ptr + cnt * height * width, width, bp_dir);
757
+ val += weight * data_col_ptr[col_pos] * mask;
758
+ cnt += 1;
759
+ }
760
+ // KERNEL_ASSIGN(grad_offset[index], offset_req, val);
761
+ grad_offset[index] = val;
762
+ if (offset_c % 2 == 0)
763
+ // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
764
+ grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
765
+ }
766
+ }
767
+
768
+ void modulated_deformable_im2col_cuda(
769
+ const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
770
+ const int batch_size, const int channels, const int height_im, const int width_im,
771
+ const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
772
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
773
+ const int dilation_h, const int dilation_w,
774
+ const int deformable_group, at::Tensor data_col)
775
+ {
776
+ // num_axes should be smaller than block size
777
+ const int channel_per_deformable_group = channels / deformable_group;
778
+ const int num_kernels = channels * batch_size * height_col * width_col;
779
+
780
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
781
+ data_im.type(), "modulated_deformable_im2col_gpu", ([&] {
782
+ const scalar_t *data_im_ = data_im.data<scalar_t>();
783
+ const scalar_t *data_offset_ = data_offset.data<scalar_t>();
784
+ const scalar_t *data_mask_ = data_mask.data<scalar_t>();
785
+ scalar_t *data_col_ = data_col.data<scalar_t>();
786
+
787
+ modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
788
+ num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
789
+ pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
790
+ batch_size, channels, deformable_group, height_col, width_col, data_col_);
791
+ }));
792
+
793
+ cudaError_t err = cudaGetLastError();
794
+ if (err != cudaSuccess)
795
+ {
796
+ // printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
797
+ }
798
+ }
799
+
800
+ void modulated_deformable_col2im_cuda(
801
+ const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
802
+ const int batch_size, const int channels, const int height_im, const int width_im,
803
+ const int height_col, const int width_col, const int kernel_h, const int kernel_w,
804
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
805
+ const int dilation_h, const int dilation_w,
806
+ const int deformable_group, at::Tensor grad_im)
807
+ {
808
+
809
+ const int channel_per_deformable_group = channels / deformable_group;
810
+ const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
811
+
812
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
813
+ data_col.type(), "modulated_deformable_col2im_gpu", ([&] {
814
+ const scalar_t *data_col_ = data_col.data<scalar_t>();
815
+ const scalar_t *data_offset_ = data_offset.data<scalar_t>();
816
+ const scalar_t *data_mask_ = data_mask.data<scalar_t>();
817
+ scalar_t *grad_im_ = grad_im.data<scalar_t>();
818
+
819
+ modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
820
+ num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
821
+ kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
822
+ dilation_h, dilation_w, channel_per_deformable_group,
823
+ batch_size, deformable_group, height_col, width_col, grad_im_);
824
+ }));
825
+
826
+ cudaError_t err = cudaGetLastError();
827
+ if (err != cudaSuccess)
828
+ {
829
+ printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
830
+ }
831
+ }
832
+
833
+ void modulated_deformable_col2im_coord_cuda(
834
+ const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
835
+ const int batch_size, const int channels, const int height_im, const int width_im,
836
+ const int height_col, const int width_col, const int kernel_h, const int kernel_w,
837
+ const int pad_h, const int pad_w, const int stride_h, const int stride_w,
838
+ const int dilation_h, const int dilation_w,
839
+ const int deformable_group,
840
+ at::Tensor grad_offset, at::Tensor grad_mask)
841
+ {
842
+ const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
843
+ const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
844
+
845
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
846
+ data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] {
847
+ const scalar_t *data_col_ = data_col.data<scalar_t>();
848
+ const scalar_t *data_im_ = data_im.data<scalar_t>();
849
+ const scalar_t *data_offset_ = data_offset.data<scalar_t>();
850
+ const scalar_t *data_mask_ = data_mask.data<scalar_t>();
851
+ scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
852
+ scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
853
+
854
+ modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
855
+ num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
856
+ kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
857
+ dilation_h, dilation_w, channel_per_deformable_group,
858
+ batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
859
+ grad_offset_, grad_mask_);
860
+ }));
861
+ cudaError_t err = cudaGetLastError();
862
+ if (err != cudaSuccess)
863
+ {
864
+ printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
865
+ }
866
+ }
DB/assets/ops/dcn/src/deform_pool_cuda.cpp ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // modify from
2
+ // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/modulated_dcn_cuda.c
3
+
4
+ // based on
5
+ // author: Charles Shang
6
+ // https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
7
+
8
+ #include <torch/extension.h>
9
+
10
+ #include <cmath>
11
+ #include <vector>
12
+
13
+ void DeformablePSROIPoolForward(
14
+ const at::Tensor data, const at::Tensor bbox, const at::Tensor trans,
15
+ at::Tensor out, at::Tensor top_count, const int batch, const int channels,
16
+ const int height, const int width, const int num_bbox,
17
+ const int channels_trans, const int no_trans, const float spatial_scale,
18
+ const int output_dim, const int group_size, const int pooled_size,
19
+ const int part_size, const int sample_per_part, const float trans_std);
20
+
21
+ void DeformablePSROIPoolBackwardAcc(
22
+ const at::Tensor out_grad, const at::Tensor data, const at::Tensor bbox,
23
+ const at::Tensor trans, const at::Tensor top_count, at::Tensor in_grad,
24
+ at::Tensor trans_grad, const int batch, const int channels,
25
+ const int height, const int width, const int num_bbox,
26
+ const int channels_trans, const int no_trans, const float spatial_scale,
27
+ const int output_dim, const int group_size, const int pooled_size,
28
+ const int part_size, const int sample_per_part, const float trans_std);
29
+
30
+ void deform_psroi_pooling_cuda_forward(
31
+ at::Tensor input, at::Tensor bbox, at::Tensor trans, at::Tensor out,
32
+ at::Tensor top_count, const int no_trans, const float spatial_scale,
33
+ const int output_dim, const int group_size, const int pooled_size,
34
+ const int part_size, const int sample_per_part, const float trans_std) {
35
+ TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
36
+
37
+ const int batch = input.size(0);
38
+ const int channels = input.size(1);
39
+ const int height = input.size(2);
40
+ const int width = input.size(3);
41
+ const int channels_trans = no_trans ? 2 : trans.size(1);
42
+
43
+ const int num_bbox = bbox.size(0);
44
+ if (num_bbox != out.size(0))
45
+ AT_ERROR("Output shape and bbox number wont match: (%d vs %d).",
46
+ out.size(0), num_bbox);
47
+
48
+ DeformablePSROIPoolForward(
49
+ input, bbox, trans, out, top_count, batch, channels, height, width,
50
+ num_bbox, channels_trans, no_trans, spatial_scale, output_dim, group_size,
51
+ pooled_size, part_size, sample_per_part, trans_std);
52
+ }
53
+
54
+ void deform_psroi_pooling_cuda_backward(
55
+ at::Tensor out_grad, at::Tensor input, at::Tensor bbox, at::Tensor trans,
56
+ at::Tensor top_count, at::Tensor input_grad, at::Tensor trans_grad,
57
+ const int no_trans, const float spatial_scale, const int output_dim,
58
+ const int group_size, const int pooled_size, const int part_size,
59
+ const int sample_per_part, const float trans_std) {
60
+ TORCH_CHECK(out_grad.is_contiguous(), "out_grad tensor has to be contiguous");
61
+ TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
62
+
63
+ const int batch = input.size(0);
64
+ const int channels = input.size(1);
65
+ const int height = input.size(2);
66
+ const int width = input.size(3);
67
+ const int channels_trans = no_trans ? 2 : trans.size(1);
68
+
69
+ const int num_bbox = bbox.size(0);
70
+ if (num_bbox != out_grad.size(0))
71
+ AT_ERROR("Output shape and bbox number wont match: (%d vs %d).",
72
+ out_grad.size(0), num_bbox);
73
+
74
+ DeformablePSROIPoolBackwardAcc(
75
+ out_grad, input, bbox, trans, top_count, input_grad, trans_grad, batch,
76
+ channels, height, width, num_bbox, channels_trans, no_trans,
77
+ spatial_scale, output_dim, group_size, pooled_size, part_size,
78
+ sample_per_part, trans_std);
79
+ }
80
+
81
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
82
+ m.def("deform_psroi_pooling_cuda_forward", &deform_psroi_pooling_cuda_forward,
83
+ "deform psroi pooling forward(CUDA)");
84
+ m.def("deform_psroi_pooling_cuda_backward",
85
+ &deform_psroi_pooling_cuda_backward,
86
+ "deform psroi pooling backward(CUDA)");
87
+ }
DB/assets/ops/dcn/src/deform_pool_cuda_kernel.cu ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*!
2
+ * Copyright (c) 2017 Microsoft
3
+ * Licensed under The MIT License [see LICENSE for details]
4
+ * \file deformable_psroi_pooling.cu
5
+ * \brief
6
+ * \author Yi Li, Guodong Zhang, Jifeng Dai
7
+ */
8
+ /***************** Adapted by Charles Shang *********************/
9
+ // modify from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/cuda/deform_psroi_pooling_cuda.cu
10
+
11
+ #include <ATen/ATen.h>
12
+ #include <THC/THCAtomics.cuh>
13
+ #include <stdio.h>
14
+ #include <math.h>
15
+ #include <algorithm>
16
+
17
+ using namespace at;
18
+
19
+ #define CUDA_KERNEL_LOOP(i, n) \
20
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
21
+ i < (n); \
22
+ i += blockDim.x * gridDim.x)
23
+
24
+ const int CUDA_NUM_THREADS = 1024;
25
+ inline int GET_BLOCKS(const int N)
26
+ {
27
+ return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
28
+ }
29
+
30
+ template <typename scalar_t>
31
+ __device__ scalar_t bilinear_interp(
32
+ const scalar_t *data,
33
+ const scalar_t x,
34
+ const scalar_t y,
35
+ const int width,
36
+ const int height)
37
+ {
38
+ int x1 = floor(x);
39
+ int x2 = ceil(x);
40
+ int y1 = floor(y);
41
+ int y2 = ceil(y);
42
+ scalar_t dist_x = (scalar_t)(x - x1);
43
+ scalar_t dist_y = (scalar_t)(y - y1);
44
+ scalar_t value11 = data[y1 * width + x1];
45
+ scalar_t value12 = data[y2 * width + x1];
46
+ scalar_t value21 = data[y1 * width + x2];
47
+ scalar_t value22 = data[y2 * width + x2];
48
+ scalar_t value = (1 - dist_x) * (1 - dist_y) * value11 + (1 - dist_x) * dist_y * value12 + dist_x * (1 - dist_y) * value21 + dist_x * dist_y * value22;
49
+ return value;
50
+ }
51
+
52
+ template <typename scalar_t>
53
+ __global__ void DeformablePSROIPoolForwardKernel(
54
+ const int count,
55
+ const scalar_t *bottom_data,
56
+ const scalar_t spatial_scale,
57
+ const int channels,
58
+ const int height, const int width,
59
+ const int pooled_height, const int pooled_width,
60
+ const scalar_t *bottom_rois, const scalar_t *bottom_trans,
61
+ const int no_trans,
62
+ const scalar_t trans_std,
63
+ const int sample_per_part,
64
+ const int output_dim,
65
+ const int group_size,
66
+ const int part_size,
67
+ const int num_classes,
68
+ const int channels_each_class,
69
+ scalar_t *top_data,
70
+ scalar_t *top_count)
71
+ {
72
+ CUDA_KERNEL_LOOP(index, count)
73
+ {
74
+ // The output is in order (n, ctop, ph, pw)
75
+ int pw = index % pooled_width;
76
+ int ph = (index / pooled_width) % pooled_height;
77
+ int ctop = (index / pooled_width / pooled_height) % output_dim;
78
+ int n = index / pooled_width / pooled_height / output_dim;
79
+
80
+ // [start, end) interval for spatial sampling
81
+ const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
82
+ int roi_batch_ind = offset_bottom_rois[0];
83
+ scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
84
+ scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
85
+ scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
86
+ scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
87
+
88
+ // Force too small ROIs to be 1x1
89
+ scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
90
+ scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1);
91
+
92
+ // Compute w and h at bottom
93
+ scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height);
94
+ scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width);
95
+
96
+ scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part);
97
+ scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part);
98
+
99
+ int part_h = floor((scalar_t)(ph) / pooled_height * part_size);
100
+ int part_w = floor((scalar_t)(pw) / pooled_width * part_size);
101
+ int class_id = ctop / channels_each_class;
102
+ scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;
103
+ scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;
104
+
105
+ scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w;
106
+ wstart += trans_x * roi_width;
107
+ scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h;
108
+ hstart += trans_y * roi_height;
109
+
110
+ scalar_t sum = 0;
111
+ int count = 0;
112
+ int gw = floor((scalar_t)(pw)*group_size / pooled_width);
113
+ int gh = floor((scalar_t)(ph)*group_size / pooled_height);
114
+ gw = min(max(gw, 0), group_size - 1);
115
+ gh = min(max(gh, 0), group_size - 1);
116
+
117
+ const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels) * height * width;
118
+ for (int ih = 0; ih < sample_per_part; ih++)
119
+ {
120
+ for (int iw = 0; iw < sample_per_part; iw++)
121
+ {
122
+ scalar_t w = wstart + iw * sub_bin_size_w;
123
+ scalar_t h = hstart + ih * sub_bin_size_h;
124
+ // bilinear interpolation
125
+ if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
126
+ {
127
+ continue;
128
+ }
129
+ w = min(max(w, 0.), width - 1.);
130
+ h = min(max(h, 0.), height - 1.);
131
+ int c = (ctop * group_size + gh) * group_size + gw;
132
+ scalar_t val = bilinear_interp(offset_bottom_data + c * height * width, w, h, width, height);
133
+ sum += val;
134
+ count++;
135
+ }
136
+ }
137
+ top_data[index] = count == 0 ? (scalar_t)(0) : sum / count;
138
+ top_count[index] = count;
139
+ }
140
+ }
141
+
142
+ template <typename scalar_t>
143
+ __global__ void DeformablePSROIPoolBackwardAccKernel(
144
+ const int count,
145
+ const scalar_t *top_diff,
146
+ const scalar_t *top_count,
147
+ const int num_rois,
148
+ const scalar_t spatial_scale,
149
+ const int channels,
150
+ const int height, const int width,
151
+ const int pooled_height, const int pooled_width,
152
+ const int output_dim,
153
+ scalar_t *bottom_data_diff, scalar_t *bottom_trans_diff,
154
+ const scalar_t *bottom_data,
155
+ const scalar_t *bottom_rois,
156
+ const scalar_t *bottom_trans,
157
+ const int no_trans,
158
+ const scalar_t trans_std,
159
+ const int sample_per_part,
160
+ const int group_size,
161
+ const int part_size,
162
+ const int num_classes,
163
+ const int channels_each_class)
164
+ {
165
+ CUDA_KERNEL_LOOP(index, count)
166
+ {
167
+ // The output is in order (n, ctop, ph, pw)
168
+ int pw = index % pooled_width;
169
+ int ph = (index / pooled_width) % pooled_height;
170
+ int ctop = (index / pooled_width / pooled_height) % output_dim;
171
+ int n = index / pooled_width / pooled_height / output_dim;
172
+
173
+ // [start, end) interval for spatial sampling
174
+ const scalar_t *offset_bottom_rois = bottom_rois + n * 5;
175
+ int roi_batch_ind = offset_bottom_rois[0];
176
+ scalar_t roi_start_w = (scalar_t)(round(offset_bottom_rois[1])) * spatial_scale - 0.5;
177
+ scalar_t roi_start_h = (scalar_t)(round(offset_bottom_rois[2])) * spatial_scale - 0.5;
178
+ scalar_t roi_end_w = (scalar_t)(round(offset_bottom_rois[3]) + 1.) * spatial_scale - 0.5;
179
+ scalar_t roi_end_h = (scalar_t)(round(offset_bottom_rois[4]) + 1.) * spatial_scale - 0.5;
180
+
181
+ // Force too small ROIs to be 1x1
182
+ scalar_t roi_width = max(roi_end_w - roi_start_w, 0.1); //avoid 0
183
+ scalar_t roi_height = max(roi_end_h - roi_start_h, 0.1);
184
+
185
+ // Compute w and h at bottom
186
+ scalar_t bin_size_h = roi_height / (scalar_t)(pooled_height);
187
+ scalar_t bin_size_w = roi_width / (scalar_t)(pooled_width);
188
+
189
+ scalar_t sub_bin_size_h = bin_size_h / (scalar_t)(sample_per_part);
190
+ scalar_t sub_bin_size_w = bin_size_w / (scalar_t)(sample_per_part);
191
+
192
+ int part_h = floor((scalar_t)(ph) / pooled_height * part_size);
193
+ int part_w = floor((scalar_t)(pw) / pooled_width * part_size);
194
+ int class_id = ctop / channels_each_class;
195
+ scalar_t trans_x = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;
196
+ scalar_t trans_y = no_trans ? (scalar_t)(0) : bottom_trans[(((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w] * (scalar_t)trans_std;
197
+
198
+ scalar_t wstart = (scalar_t)(pw)*bin_size_w + roi_start_w;
199
+ wstart += trans_x * roi_width;
200
+ scalar_t hstart = (scalar_t)(ph)*bin_size_h + roi_start_h;
201
+ hstart += trans_y * roi_height;
202
+
203
+ if (top_count[index] <= 0)
204
+ {
205
+ continue;
206
+ }
207
+ scalar_t diff_val = top_diff[index] / top_count[index];
208
+ const scalar_t *offset_bottom_data = bottom_data + roi_batch_ind * channels * height * width;
209
+ scalar_t *offset_bottom_data_diff = bottom_data_diff + roi_batch_ind * channels * height * width;
210
+ int gw = floor((scalar_t)(pw)*group_size / pooled_width);
211
+ int gh = floor((scalar_t)(ph)*group_size / pooled_height);
212
+ gw = min(max(gw, 0), group_size - 1);
213
+ gh = min(max(gh, 0), group_size - 1);
214
+
215
+ for (int ih = 0; ih < sample_per_part; ih++)
216
+ {
217
+ for (int iw = 0; iw < sample_per_part; iw++)
218
+ {
219
+ scalar_t w = wstart + iw * sub_bin_size_w;
220
+ scalar_t h = hstart + ih * sub_bin_size_h;
221
+ // bilinear interpolation
222
+ if (w < -0.5 || w > width - 0.5 || h < -0.5 || h > height - 0.5)
223
+ {
224
+ continue;
225
+ }
226
+ w = min(max(w, 0.), width - 1.);
227
+ h = min(max(h, 0.), height - 1.);
228
+ int c = (ctop * group_size + gh) * group_size + gw;
229
+ // backward on feature
230
+ int x0 = floor(w);
231
+ int x1 = ceil(w);
232
+ int y0 = floor(h);
233
+ int y1 = ceil(h);
234
+ scalar_t dist_x = w - x0, dist_y = h - y0;
235
+ scalar_t q00 = (1 - dist_x) * (1 - dist_y);
236
+ scalar_t q01 = (1 - dist_x) * dist_y;
237
+ scalar_t q10 = dist_x * (1 - dist_y);
238
+ scalar_t q11 = dist_x * dist_y;
239
+ int bottom_index_base = c * height * width;
240
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x0, q00 * diff_val);
241
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x0, q01 * diff_val);
242
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y0 * width + x1, q10 * diff_val);
243
+ atomicAdd(offset_bottom_data_diff + bottom_index_base + y1 * width + x1, q11 * diff_val);
244
+
245
+ if (no_trans)
246
+ {
247
+ continue;
248
+ }
249
+ scalar_t U00 = offset_bottom_data[bottom_index_base + y0 * width + x0];
250
+ scalar_t U01 = offset_bottom_data[bottom_index_base + y1 * width + x0];
251
+ scalar_t U10 = offset_bottom_data[bottom_index_base + y0 * width + x1];
252
+ scalar_t U11 = offset_bottom_data[bottom_index_base + y1 * width + x1];
253
+ scalar_t diff_x = (U11 * dist_y + U10 * (1 - dist_y) - U01 * dist_y - U00 * (1 - dist_y)) * trans_std * diff_val;
254
+ diff_x *= roi_width;
255
+ scalar_t diff_y = (U11 * dist_x + U01 * (1 - dist_x) - U10 * dist_x - U00 * (1 - dist_x)) * trans_std * diff_val;
256
+ diff_y *= roi_height;
257
+
258
+ atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2) * part_size + part_h) * part_size + part_w, diff_x);
259
+ atomicAdd(bottom_trans_diff + (((n * num_classes + class_id) * 2 + 1) * part_size + part_h) * part_size + part_w, diff_y);
260
+ }
261
+ }
262
+ }
263
+ }
264
+
265
+ void DeformablePSROIPoolForward(const at::Tensor data,
266
+ const at::Tensor bbox,
267
+ const at::Tensor trans,
268
+ at::Tensor out,
269
+ at::Tensor top_count,
270
+ const int batch,
271
+ const int channels,
272
+ const int height,
273
+ const int width,
274
+ const int num_bbox,
275
+ const int channels_trans,
276
+ const int no_trans,
277
+ const float spatial_scale,
278
+ const int output_dim,
279
+ const int group_size,
280
+ const int pooled_size,
281
+ const int part_size,
282
+ const int sample_per_part,
283
+ const float trans_std)
284
+ {
285
+ const int pooled_height = pooled_size;
286
+ const int pooled_width = pooled_size;
287
+ const int count = num_bbox * output_dim * pooled_height * pooled_width;
288
+ const int num_classes = no_trans ? 1 : channels_trans / 2;
289
+ const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
290
+
291
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
292
+ data.type(), "deformable_psroi_pool_forward", ([&] {
293
+ const scalar_t *bottom_data = data.data<scalar_t>();
294
+ const scalar_t *bottom_rois = bbox.data<scalar_t>();
295
+ const scalar_t *bottom_trans = no_trans ? NULL : trans.data<scalar_t>();
296
+ scalar_t *top_data = out.data<scalar_t>();
297
+ scalar_t *top_count_data = top_count.data<scalar_t>();
298
+
299
+ DeformablePSROIPoolForwardKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(
300
+ count, bottom_data, (scalar_t)spatial_scale, channels, height, width, pooled_height, pooled_width,
301
+ bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part, output_dim,
302
+ group_size, part_size, num_classes, channels_each_class, top_data, top_count_data);
303
+ }));
304
+
305
+ cudaError_t err = cudaGetLastError();
306
+ if (err != cudaSuccess)
307
+ {
308
+ printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err));
309
+ }
310
+ }
311
+
312
+ void DeformablePSROIPoolBackwardAcc(const at::Tensor out_grad,
313
+ const at::Tensor data,
314
+ const at::Tensor bbox,
315
+ const at::Tensor trans,
316
+ const at::Tensor top_count,
317
+ at::Tensor in_grad,
318
+ at::Tensor trans_grad,
319
+ const int batch,
320
+ const int channels,
321
+ const int height,
322
+ const int width,
323
+ const int num_bbox,
324
+ const int channels_trans,
325
+ const int no_trans,
326
+ const float spatial_scale,
327
+ const int output_dim,
328
+ const int group_size,
329
+ const int pooled_size,
330
+ const int part_size,
331
+ const int sample_per_part,
332
+ const float trans_std)
333
+ {
334
+ // LOG(INFO) << "DeformablePSROIPoolBackward";
335
+ const int num_rois = num_bbox;
336
+ const int pooled_height = pooled_size;
337
+ const int pooled_width = pooled_size;
338
+ const int count = num_bbox * output_dim * pooled_height * pooled_width;
339
+ const int num_classes = no_trans ? 1 : channels_trans / 2;
340
+ const int channels_each_class = no_trans ? output_dim : output_dim / num_classes;
341
+
342
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
343
+ out_grad.type(), "deformable_psroi_pool_backward_acc", ([&] {
344
+ const scalar_t *top_diff = out_grad.data<scalar_t>();
345
+ const scalar_t *bottom_data = data.data<scalar_t>();
346
+ const scalar_t *bottom_rois = bbox.data<scalar_t>();
347
+ const scalar_t *bottom_trans = no_trans ? NULL : trans.data<scalar_t>();
348
+ scalar_t *bottom_data_diff = in_grad.data<scalar_t>();
349
+ scalar_t *bottom_trans_diff = no_trans ? NULL : trans_grad.data<scalar_t>();
350
+ const scalar_t *top_count_data = top_count.data<scalar_t>();
351
+
352
+ DeformablePSROIPoolBackwardAccKernel<<<GET_BLOCKS(count), CUDA_NUM_THREADS>>>(
353
+ count, top_diff, top_count_data, num_rois, (scalar_t)spatial_scale, channels, height, width,
354
+ pooled_height, pooled_width, output_dim, bottom_data_diff, bottom_trans_diff,
355
+ bottom_data, bottom_rois, bottom_trans, no_trans, (scalar_t)trans_std, sample_per_part,
356
+ group_size, part_size, num_classes, channels_each_class);
357
+ }));
358
+
359
+ cudaError_t err = cudaGetLastError();
360
+ if (err != cudaSuccess)
361
+ {
362
+ printf("error in DeformablePSROIPoolForward: %s\n", cudaGetErrorString(err));
363
+ }
364
+ }
DB/backbones/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .resnet import resnet18, resnet34, resnet50, resnet101, deformable_resnet50, deformable_resnet18
2
+ from .mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
DB/backbones/mobilenetv3.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/kuan-wang/pytorch-mobilenet-v3
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+
7
+ __all__ = ['MobileNetV3', 'mobilenetv3']
8
+
9
+
10
+ def conv_bn(inp, oup, stride, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
11
+ return nn.Sequential(
12
+ conv_layer(inp, oup, 3, stride, 1, bias=False),
13
+ norm_layer(oup),
14
+ nlin_layer(inplace=True)
15
+ )
16
+
17
+
18
+ def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
19
+ return nn.Sequential(
20
+ conv_layer(inp, oup, 1, 1, 0, bias=False),
21
+ norm_layer(oup),
22
+ nlin_layer(inplace=True)
23
+ )
24
+
25
+
26
+ class Hswish(nn.Module):
27
+ def __init__(self, inplace=True):
28
+ super(Hswish, self).__init__()
29
+ self.inplace = inplace
30
+
31
+ def forward(self, x):
32
+ return x * F.relu6(x + 3., inplace=self.inplace) / 6.
33
+
34
+
35
+ class Hsigmoid(nn.Module):
36
+ def __init__(self, inplace=True):
37
+ super(Hsigmoid, self).__init__()
38
+ self.inplace = inplace
39
+
40
+ def forward(self, x):
41
+ return F.relu6(x + 3., inplace=self.inplace) / 6.
42
+
43
+
44
+ class SEModule(nn.Module):
45
+ def __init__(self, channel, reduction=4):
46
+ super(SEModule, self).__init__()
47
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
48
+ self.fc = nn.Sequential(
49
+ nn.Linear(channel, channel // reduction, bias=False),
50
+ nn.ReLU(inplace=True),
51
+ nn.Linear(channel // reduction, channel, bias=False),
52
+ Hsigmoid()
53
+ # nn.Sigmoid()
54
+ )
55
+
56
+ def forward(self, x):
57
+ b, c, _, _ = x.size()
58
+ y = self.avg_pool(x).view(b, c)
59
+ y = self.fc(y).view(b, c, 1, 1)
60
+ return x * y.expand_as(x)
61
+
62
+
63
+ class Identity(nn.Module):
64
+ def __init__(self, channel):
65
+ super(Identity, self).__init__()
66
+
67
+ def forward(self, x):
68
+ return x
69
+
70
+
71
+ def make_divisible(x, divisible_by=8):
72
+ import numpy as np
73
+ return int(np.ceil(x * 1. / divisible_by) * divisible_by)
74
+
75
+
76
+ class MobileBottleneck(nn.Module):
77
+ def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE'):
78
+ super(MobileBottleneck, self).__init__()
79
+ assert stride in [1, 2]
80
+ assert kernel in [3, 5]
81
+ padding = (kernel - 1) // 2
82
+ self.use_res_connect = stride == 1 and inp == oup
83
+
84
+ conv_layer = nn.Conv2d
85
+ norm_layer = nn.BatchNorm2d
86
+ if nl == 'RE':
87
+ nlin_layer = nn.ReLU # or ReLU6
88
+ elif nl == 'HS':
89
+ nlin_layer = Hswish
90
+ else:
91
+ raise NotImplementedError
92
+ if se:
93
+ SELayer = SEModule
94
+ else:
95
+ SELayer = Identity
96
+
97
+ self.conv = nn.Sequential(
98
+ # pw
99
+ conv_layer(inp, exp, 1, 1, 0, bias=False),
100
+ norm_layer(exp),
101
+ nlin_layer(inplace=True),
102
+ # dw
103
+ conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False),
104
+ norm_layer(exp),
105
+ SELayer(exp),
106
+ nlin_layer(inplace=True),
107
+ # pw-linear
108
+ conv_layer(exp, oup, 1, 1, 0, bias=False),
109
+ norm_layer(oup),
110
+ )
111
+
112
+ def forward(self, x):
113
+ if self.use_res_connect:
114
+ return x + self.conv(x)
115
+ else:
116
+ return self.conv(x)
117
+
118
+
119
+ class MobileNetV3(nn.Module):
120
+ def __init__(self, n_class=1000, input_size=224, dropout=0.8, mode='small', width_mult=1.0):
121
+ super(MobileNetV3, self).__init__()
122
+ input_channel = 16
123
+ last_channel = 1280
124
+ if mode == 'large':
125
+ # refer to Table 1 in paper
126
+ mobile_setting = [
127
+ # k, exp, c, se, nl, s,
128
+ [3, 16, 16, False, 'RE', 1],
129
+ [3, 64, 24, False, 'RE', 2],
130
+ [3, 72, 24, False, 'RE', 1], # 3
131
+ [5, 72, 40, True, 'RE', 2],
132
+ [5, 120, 40, True, 'RE', 1],
133
+ [5, 120, 40, True, 'RE', 1], # 6
134
+ [3, 240, 80, False, 'HS', 2],
135
+ [3, 200, 80, False, 'HS', 1],
136
+ [3, 184, 80, False, 'HS', 1],
137
+ [3, 184, 80, False, 'HS', 1],
138
+ [3, 480, 112, True, 'HS', 1],
139
+ [3, 672, 112, True, 'HS', 1], # 12
140
+ [5, 672, 160, True, 'HS', 2],
141
+ [5, 960, 160, True, 'HS', 1],
142
+ [5, 960, 160, True, 'HS', 1],
143
+ ]
144
+ elif mode == 'small':
145
+ # refer to Table 2 in paper
146
+ mobile_setting = [
147
+ # k, exp, c, se, nl, s,
148
+ [3, 16, 16, True, 'RE', 2],
149
+ [3, 72, 24, False, 'RE', 2],
150
+ [3, 88, 24, False, 'RE', 1],
151
+ [5, 96, 40, True, 'HS', 2],
152
+ [5, 240, 40, True, 'HS', 1],
153
+ [5, 240, 40, True, 'HS', 1],
154
+ [5, 120, 48, True, 'HS', 1],
155
+ [5, 144, 48, True, 'HS', 1],
156
+ [5, 288, 96, True, 'HS', 2],
157
+ [5, 576, 96, True, 'HS', 1],
158
+ [5, 576, 96, True, 'HS', 1],
159
+ ]
160
+ else:
161
+ raise NotImplementedError
162
+
163
+ # building first layer
164
+ assert input_size % 32 == 0
165
+ last_channel = make_divisible(last_channel * width_mult) if width_mult > 1.0 else last_channel
166
+ self.features = nn.ModuleList([conv_bn(3, input_channel, 2, nlin_layer=Hswish)]) # start_idx = 0: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same
167
+ self.classifier = []
168
+
169
+ # building mobile blocks
170
+ for k, exp, c, se, nl, s in mobile_setting:
171
+ output_channel = make_divisible(c * width_mult)
172
+ exp_channel = make_divisible(exp * width_mult)
173
+ self.features.append(MobileBottleneck(input_channel, output_channel, k, s, exp_channel, se, nl))
174
+ input_channel = output_channel
175
+
176
+ # building last several layers
177
+ if mode == 'large':
178
+ last_conv = make_divisible(960 * width_mult)
179
+ self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish)) # 16
180
+ self.features.append(nn.AdaptiveAvgPool2d(1))
181
+ self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
182
+ self.features.append(Hswish(inplace=True))
183
+ elif mode == 'small':
184
+ last_conv = make_divisible(576 * width_mult)
185
+ self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish))
186
+ # self.features.append(SEModule(last_conv)) # refer to paper Table2, but I think this is a mistake
187
+ self.features.append(nn.AdaptiveAvgPool2d(1))
188
+ self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
189
+ self.features.append(Hswish(inplace=True))
190
+ else:
191
+ raise NotImplementedError
192
+
193
+ # make it nn.Sequential
194
+ #self.features = nn.Sequential(*self.features) del for dbnet
195
+
196
+ # building classifier
197
+ self.classifier = nn.Sequential(
198
+ nn.Dropout(p=dropout), # refer to paper section 6
199
+ nn.Linear(last_channel, n_class),
200
+ )
201
+
202
+ self._initialize_weights()
203
+
204
+ def forward(self, x):
205
+ '''x = self.features(x)
206
+ x = x.mean(3).mean(2)
207
+ x = self.classifier(x)
208
+ return x'''
209
+ x2, x3, x4, x5 = None, None, None, None
210
+ for stage in range(17): # https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.1/ppocr/modeling/backbones/det_mobilenet_v3.py
211
+ x = self.features[stage](x)
212
+ if stage == 3: # if s == 2 and start_idx > 3
213
+ x2 = x
214
+ elif stage == 6:
215
+ x3 = x
216
+ elif stage == 12:
217
+ x4 = x
218
+ elif stage == 16:
219
+ x5 = x
220
+ return x2, x3, x4, x5
221
+
222
+ def _initialize_weights(self):
223
+ # weight initialization
224
+ for m in self.modules():
225
+ if isinstance(m, nn.Conv2d):
226
+ nn.init.kaiming_normal_(m.weight, mode='fan_out')
227
+ if m.bias is not None:
228
+ nn.init.zeros_(m.bias)
229
+ elif isinstance(m, nn.BatchNorm2d):
230
+ nn.init.ones_(m.weight)
231
+ nn.init.zeros_(m.bias)
232
+ elif isinstance(m, nn.Linear):
233
+ nn.init.normal_(m.weight, 0, 0.01)
234
+ if m.bias is not None:
235
+ nn.init.zeros_(m.bias)
236
+
237
+
238
+ def mobilenet_v3_large(pretrained=False, **kwargs):
239
+ model = MobileNetV3(mode='large', **kwargs)
240
+ if pretrained:
241
+ state_dict = torch.load('mobilenetv3_large.pth.tar')
242
+ model.load_state_dict(state_dict, strict=True)
243
+ # raise NotImplementedError
244
+ return model
245
+
246
+ def mobilenet_v3_small(pretrained=False, **kwargs):
247
+ model = MobileNetV3(mode='small', **kwargs)
248
+ if pretrained:
249
+ state_dict = torch.load('mobilenetv3_small_67.4.pth.tar')
250
+ model.load_state_dict(state_dict, strict=True)
251
+ # raise NotImplementedError
252
+ return model
DB/backbones/resnet.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import math
3
+ import torch.utils.model_zoo as model_zoo
4
+ BatchNorm2d = nn.BatchNorm2d
5
+
6
+ __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
7
+ 'resnet152']
8
+
9
+
10
+ model_urls = {
11
+ 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
12
+ 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
13
+ 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
14
+ 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
15
+ 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
16
+ }
17
+
18
+
19
+ def constant_init(module, constant, bias=0):
20
+ nn.init.constant_(module.weight, constant)
21
+ if hasattr(module, 'bias'):
22
+ nn.init.constant_(module.bias, bias)
23
+
24
+
25
+ def conv3x3(in_planes, out_planes, stride=1):
26
+ """3x3 convolution with padding"""
27
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
28
+ padding=1, bias=False)
29
+
30
+
31
+ class BasicBlock(nn.Module):
32
+ expansion = 1
33
+
34
+ def __init__(self, inplanes, planes, stride=1, downsample=None, dcn=None):
35
+ super(BasicBlock, self).__init__()
36
+ self.with_dcn = dcn is not None
37
+ self.conv1 = conv3x3(inplanes, planes, stride)
38
+ self.bn1 = BatchNorm2d(planes)
39
+ self.relu = nn.ReLU(inplace=True)
40
+ self.with_modulated_dcn = False
41
+ if self.with_dcn:
42
+ fallback_on_stride = dcn.get('fallback_on_stride', False)
43
+ self.with_modulated_dcn = dcn.get('modulated', False)
44
+ # self.conv2 = conv3x3(planes, planes)
45
+ if not self.with_dcn or fallback_on_stride:
46
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
47
+ padding=1, bias=False)
48
+ else:
49
+ deformable_groups = dcn.get('deformable_groups', 1)
50
+ if not self.with_modulated_dcn:
51
+ from assets.ops.dcn import DeformConv
52
+ conv_op = DeformConv
53
+ offset_channels = 18
54
+ else:
55
+ from assets.ops.dcn import ModulatedDeformConv
56
+ conv_op = ModulatedDeformConv
57
+ offset_channels = 27
58
+ self.conv2_offset = nn.Conv2d(
59
+ planes,
60
+ deformable_groups * offset_channels,
61
+ kernel_size=3,
62
+ padding=1)
63
+ self.conv2 = conv_op(
64
+ planes,
65
+ planes,
66
+ kernel_size=3,
67
+ padding=1,
68
+ deformable_groups=deformable_groups,
69
+ bias=False)
70
+ self.bn2 = BatchNorm2d(planes)
71
+ self.downsample = downsample
72
+ self.stride = stride
73
+
74
+ def forward(self, x):
75
+ residual = x
76
+
77
+ out = self.conv1(x)
78
+ out = self.bn1(out)
79
+ out = self.relu(out)
80
+
81
+ # out = self.conv2(out)
82
+ if not self.with_dcn:
83
+ out = self.conv2(out)
84
+ elif self.with_modulated_dcn:
85
+ offset_mask = self.conv2_offset(out)
86
+ offset = offset_mask[:, :18, :, :]
87
+ mask = offset_mask[:, -9:, :, :].sigmoid()
88
+ out = self.conv2(out, offset, mask)
89
+ else:
90
+ offset = self.conv2_offset(out)
91
+ out = self.conv2(out, offset)
92
+ out = self.bn2(out)
93
+
94
+ if self.downsample is not None:
95
+ residual = self.downsample(x)
96
+
97
+ out += residual
98
+ out = self.relu(out)
99
+
100
+ return out
101
+
102
+
103
+ class Bottleneck(nn.Module):
104
+ expansion = 4
105
+
106
+ def __init__(self, inplanes, planes, stride=1, downsample=None, dcn=None):
107
+ super(Bottleneck, self).__init__()
108
+ self.with_dcn = dcn is not None
109
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
110
+ self.bn1 = BatchNorm2d(planes)
111
+ fallback_on_stride = False
112
+ self.with_modulated_dcn = False
113
+ if self.with_dcn:
114
+ fallback_on_stride = dcn.get('fallback_on_stride', False)
115
+ self.with_modulated_dcn = dcn.get('modulated', False)
116
+ if not self.with_dcn or fallback_on_stride:
117
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
118
+ stride=stride, padding=1, bias=False)
119
+ else:
120
+ deformable_groups = dcn.get('deformable_groups', 1)
121
+ if not self.with_modulated_dcn:
122
+ from assets.ops.dcn import DeformConv
123
+ conv_op = DeformConv
124
+ offset_channels = 18
125
+ else:
126
+ from assets.ops.dcn import ModulatedDeformConv
127
+ conv_op = ModulatedDeformConv
128
+ offset_channels = 27
129
+ self.conv2_offset = nn.Conv2d(
130
+ planes, deformable_groups * offset_channels,
131
+ kernel_size=3,
132
+ padding=1)
133
+ self.conv2 = conv_op(
134
+ planes, planes, kernel_size=3, padding=1, stride=stride,
135
+ deformable_groups=deformable_groups, bias=False)
136
+ self.bn2 = BatchNorm2d(planes)
137
+ self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
138
+ self.bn3 = BatchNorm2d(planes * 4)
139
+ self.relu = nn.ReLU(inplace=True)
140
+ self.downsample = downsample
141
+ self.stride = stride
142
+ self.dcn = dcn
143
+ self.with_dcn = dcn is not None
144
+
145
+ def forward(self, x):
146
+ residual = x
147
+
148
+ out = self.conv1(x)
149
+ out = self.bn1(out)
150
+ out = self.relu(out)
151
+
152
+ # out = self.conv2(out)
153
+ if not self.with_dcn:
154
+ out = self.conv2(out)
155
+ elif self.with_modulated_dcn:
156
+ offset_mask = self.conv2_offset(out)
157
+ offset = offset_mask[:, :18, :, :]
158
+ mask = offset_mask[:, -9:, :, :].sigmoid()
159
+ out = self.conv2(out, offset, mask)
160
+ else:
161
+ offset = self.conv2_offset(out)
162
+ out = self.conv2(out, offset)
163
+ out = self.bn2(out)
164
+ out = self.relu(out)
165
+
166
+ out = self.conv3(out)
167
+ out = self.bn3(out)
168
+
169
+ if self.downsample is not None:
170
+ residual = self.downsample(x)
171
+
172
+ out += residual
173
+ out = self.relu(out)
174
+
175
+ return out
176
+
177
+
178
+ class ResNet(nn.Module):
179
+ def __init__(self, block, layers, num_classes=1000,
180
+ dcn=None, stage_with_dcn=(False, False, False, False)):
181
+ self.dcn = dcn
182
+ self.stage_with_dcn = stage_with_dcn
183
+ self.inplanes = 64
184
+ super(ResNet, self).__init__()
185
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
186
+ bias=False)
187
+ self.bn1 = BatchNorm2d(64)
188
+ self.relu = nn.ReLU(inplace=True)
189
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
190
+ self.layer1 = self._make_layer(block, 64, layers[0])
191
+ self.layer2 = self._make_layer(
192
+ block, 128, layers[1], stride=2, dcn=dcn)
193
+ self.layer3 = self._make_layer(
194
+ block, 256, layers[2], stride=2, dcn=dcn)
195
+ self.layer4 = self._make_layer(
196
+ block, 512, layers[3], stride=2, dcn=dcn)
197
+ self.avgpool = nn.AvgPool2d(7, stride=1)
198
+ self.fc = nn.Linear(512 * block.expansion, num_classes)
199
+
200
+ self.smooth = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=1)
201
+
202
+ for m in self.modules():
203
+ if isinstance(m, nn.Conv2d):
204
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
205
+ m.weight.data.normal_(0, math.sqrt(2. / n))
206
+ elif isinstance(m, BatchNorm2d):
207
+ m.weight.data.fill_(1)
208
+ m.bias.data.zero_()
209
+ if self.dcn is not None:
210
+ for m in self.modules():
211
+ if isinstance(m, Bottleneck) or isinstance(m, BasicBlock):
212
+ if hasattr(m, 'conv2_offset'):
213
+ constant_init(m.conv2_offset, 0)
214
+
215
+ def _make_layer(self, block, planes, blocks, stride=1, dcn=None):
216
+ downsample = None
217
+ if stride != 1 or self.inplanes != planes * block.expansion:
218
+ downsample = nn.Sequential(
219
+ nn.Conv2d(self.inplanes, planes * block.expansion,
220
+ kernel_size=1, stride=stride, bias=False),
221
+ BatchNorm2d(planes * block.expansion),
222
+ )
223
+
224
+ layers = []
225
+ layers.append(block(self.inplanes, planes,
226
+ stride, downsample, dcn=dcn))
227
+ self.inplanes = planes * block.expansion
228
+ for i in range(1, blocks):
229
+ layers.append(block(self.inplanes, planes, dcn=dcn))
230
+
231
+ return nn.Sequential(*layers)
232
+
233
+ def forward(self, x):
234
+ x = self.conv1(x)
235
+ x = self.bn1(x)
236
+ x = self.relu(x)
237
+ x = self.maxpool(x)
238
+
239
+ x2 = self.layer1(x)
240
+ x3 = self.layer2(x2)
241
+ x4 = self.layer3(x3)
242
+ x5 = self.layer4(x4)
243
+
244
+ return x2, x3, x4, x5
245
+
246
+
247
+ def resnet18(pretrained=True, **kwargs):
248
+ """Constructs a ResNet-18 model.
249
+ Args:
250
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
251
+ """
252
+ model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
253
+ if pretrained:
254
+ model.load_state_dict(model_zoo.load_url(
255
+ model_urls['resnet18']), strict=False)
256
+ return model
257
+
258
+ def deformable_resnet18(pretrained=True, **kwargs):
259
+ """Constructs a ResNet-18 model.
260
+ Args:
261
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
262
+ """
263
+ model = ResNet(BasicBlock, [2, 2, 2, 2],
264
+ dcn=dict(modulated=True,
265
+ deformable_groups=1,
266
+ fallback_on_stride=False),
267
+ stage_with_dcn=[False, True, True, True], **kwargs)
268
+ #if pretrained:
269
+ # model.load_state_dict(model_zoo.load_url(
270
+ # model_urls['resnet18']), strict=False)
271
+ return model
272
+
273
+
274
+ def resnet34(pretrained=True, **kwargs):
275
+ """Constructs a ResNet-34 model.
276
+ Args:
277
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
278
+ """
279
+ model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
280
+ if pretrained:
281
+ model.load_state_dict(model_zoo.load_url(
282
+ model_urls['resnet34']), strict=False)
283
+ return model
284
+
285
+
286
+ def resnet50(pretrained=True, **kwargs):
287
+ """Constructs a ResNet-50 model.
288
+ Args:
289
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
290
+ """
291
+ model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
292
+ if pretrained:
293
+ model.load_state_dict(model_zoo.load_url(
294
+ model_urls['resnet50']), strict=False)
295
+ return model
296
+
297
+
298
+ def deformable_resnet50(pretrained=True, **kwargs):
299
+ """Constructs a ResNet-50 model with deformable conv.
300
+ Args:
301
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
302
+ """
303
+ model = ResNet(Bottleneck, [3, 4, 6, 3],
304
+ dcn=dict(modulated=True,
305
+ deformable_groups=1,
306
+ fallback_on_stride=False),
307
+ stage_with_dcn=[False, True, True, True],
308
+ **kwargs)
309
+ if pretrained:
310
+ model.load_state_dict(model_zoo.load_url(
311
+ model_urls['resnet50']), strict=False)
312
+ return model
313
+
314
+
315
+ def resnet101(pretrained=True, **kwargs):
316
+ """Constructs a ResNet-101 model.
317
+ Args:
318
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
319
+ """
320
+ model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
321
+ if pretrained:
322
+ model.load_state_dict(model_zoo.load_url(
323
+ model_urls['resnet101']), strict=False)
324
+ return model
325
+
326
+
327
+ def resnet152(pretrained=True, **kwargs):
328
+ """Constructs a ResNet-152 model.
329
+ Args:
330
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
331
+ """
332
+ model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
333
+ if pretrained:
334
+ model.load_state_dict(model_zoo.load_url(
335
+ model_urls['resnet152']), strict=False)
336
+ return model
DB/concern/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # File : __init__.py
4
+ # Author : Zhaoyi Wan <wanzhaoyi@megvii.com>
5
+ # Date : 21.11.2018
6
+ # Last Modified Date: 08.01.2019
7
+ # Last Modified By : Zhaoyi Wan <wanzhaoyi@megvii.com>
8
+
9
+ from .log import Logger
10
+ from .average_meter import AverageMeter
11
+ from .visualizer import Visualize
12
+ from .box2seg import resize_with_coordinates, box2seg
13
+ from .convert import convert
DB/concern/average_meter.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class AverageMeter(object):
2
+ """Computes and stores the average and current value"""
3
+ def __init__(self):
4
+ self.reset()
5
+
6
+ def reset(self):
7
+ self.val = 0
8
+ self.avg = 0
9
+ self.sum = 0
10
+ self.count = 0
11
+
12
+ def update(self, val, n=1):
13
+ self.val = val
14
+ self.sum += val * n
15
+ self.count += n
16
+ self.avg = self.sum / self.count
17
+ return self
DB/concern/box2seg.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ from scipy import interpolate
4
+
5
+ def intersection(x, p1, p2):
6
+ x1, y1 = p1
7
+ x2, y2 = p2
8
+ if x2 == x1:
9
+ return 0
10
+ k = (x - x1) / (x2 - x1)
11
+ return k * (y2 - y1) + y1
12
+
13
+
14
+ def midpoint(p1, p2, typed=float):
15
+ return [typed((p1[0] + p2[0]) / 2), typed((p1[1] + p2[1]) / 2)]
16
+
17
+
18
+ def resize_with_coordinates(image, width, height, coordinates):
19
+ original_height, original_width = image.shape[:2]
20
+ resized_image = cv2.resize(image, (width, height))
21
+ if coordinates is not None:
22
+ assert coordinates.ndim == 2
23
+ assert coordinates.shape[-1] == 2
24
+
25
+ rate_x = width / original_width
26
+ rate_y = height / original_height
27
+
28
+ coordinates = coordinates * (rate_x, rate_y)
29
+ return resized_image, coordinates
30
+
31
+
32
+ def box2seg(image, boxes, label):
33
+ height, width = image.shape[:2]
34
+ mask = np.zeros((height, width), dtype=np.float32)
35
+ seg = np.zeros((height, width), dtype=np.float32)
36
+ points = []
37
+ for box_index in range(boxes.shape[0]):
38
+ box = boxes[box_index, :, :] # 4x2
39
+ left_top = box[0]
40
+ right_top = box[1]
41
+ right_bottom = box[2]
42
+ left_bottom = box[3]
43
+
44
+ left = [(left_top[0] + left_bottom[0]) / 2, (left_top[1] + left_bottom[1]) / 2]
45
+ right = [(right_top[0] + right_bottom[0]) / 2, (right_top[1] + right_bottom[1]) / 2]
46
+
47
+ center = midpoint(left, right)
48
+ points.append(midpoint(left, center))
49
+ points.append(midpoint(right, center))
50
+
51
+ poly = np.array([midpoint(left_top, center),
52
+ midpoint(right_top, center),
53
+ midpoint(right_bottom, center),
54
+ midpoint(left_bottom, center)
55
+ ])
56
+ seg = cv2.fillPoly(seg, [poly.reshape(4, 1, 2).astype(np.int32)], int(label[box_index]))
57
+
58
+ left_y = intersection(0, points[0], points[1])
59
+ right_y = intersection(width, points[-1], points[-2])
60
+ points.insert(0, [0, left_y])
61
+ points.append([width, right_y])
62
+ points = np.array(points)
63
+
64
+ f = interpolate.interp1d(points[:, 0], points[:, 1], fill_value='extrapolate')
65
+ xnew = np.arange(0, width, 1)
66
+ ynew = f(xnew).clip(0, height-1)
67
+ for x in range(width - 1):
68
+ mask[int(ynew[x]), x] = 1
69
+ return ynew.reshape(1, -1).round(), seg
DB/concern/config.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ from collections import OrderedDict
3
+
4
+ import anyconfig
5
+ import munch
6
+
7
+
8
+ class Config(object):
9
+ def __init__(self):
10
+ pass
11
+
12
+ def load(self, conf):
13
+ conf = anyconfig.load(conf)
14
+ return munch.munchify(conf)
15
+
16
+ def compile(self, conf, return_packages=False):
17
+ packages = conf.get('package', [])
18
+ defines = {}
19
+
20
+ for path in conf.get('import', []):
21
+ parent_conf = self.load(path)
22
+ parent_packages, parent_defines = self.compile(
23
+ parent_conf, return_packages=True)
24
+ packages.extend(parent_packages)
25
+ defines.update(parent_defines)
26
+
27
+ modules = []
28
+ for package in packages:
29
+ module = importlib.import_module(package)
30
+ modules.append(module)
31
+
32
+ if isinstance(conf['define'], dict):
33
+ conf['define'] = [conf['define']]
34
+
35
+ for define in conf['define']:
36
+ name = define.copy().pop('name')
37
+
38
+ if not isinstance(name, str):
39
+ raise RuntimeError('name must be str')
40
+
41
+ defines[name] = self.compile_conf(define, defines, modules)
42
+
43
+ if return_packages:
44
+ return packages, defines
45
+ else:
46
+ return defines
47
+
48
+ def compile_conf(self, conf, defines, modules):
49
+ if isinstance(conf, (int, float)):
50
+ return conf
51
+ elif isinstance(conf, str):
52
+ if conf.startswith('^'):
53
+ return defines[conf[1:]]
54
+ if conf.startswith('$'):
55
+ return {'class': self.find_class_in_modules(conf[1:], modules)}
56
+ return conf
57
+ elif isinstance(conf, dict):
58
+ if 'class' in conf:
59
+ conf['class'] = self.find_class_in_modules(
60
+ conf['class'], modules)
61
+ if 'base' in conf:
62
+ base = conf.copy().pop('base')
63
+
64
+ if not isinstance(base, str):
65
+ raise RuntimeError('base must be str')
66
+
67
+ conf = {
68
+ **defines[base],
69
+ **conf,
70
+ }
71
+ return {key: self.compile_conf(value, defines, modules) for key, value in conf.items()}
72
+ elif isinstance(conf, (list, tuple)):
73
+ return [self.compile_conf(value, defines, modules) for value in conf]
74
+ else:
75
+ return conf
76
+
77
+ def find_class_in_modules(self, cls, modules):
78
+ if not isinstance(cls, str):
79
+ raise RuntimeError('class name must be str')
80
+
81
+ if cls.find('.') != -1:
82
+ package, cls = cls.rsplit('.', 1)
83
+ module = importlib.import_module(package)
84
+ if hasattr(module, cls):
85
+ return module.__name__ + '.' + cls
86
+
87
+ for module in modules:
88
+ if hasattr(module, cls):
89
+ return module.__name__ + '.' + cls
90
+ raise RuntimeError('class not found ' + cls)
91
+
92
+
93
+ class State:
94
+ def __init__(self, autoload=True, default=None):
95
+ self.autoload = autoload
96
+ self.default = default
97
+
98
+
99
+ class StateMeta(type):
100
+ def __new__(mcs, name, bases, attrs):
101
+ current_states = []
102
+ for key, value in attrs.items():
103
+ if isinstance(value, State):
104
+ current_states.append((key, value))
105
+
106
+ current_states.sort(key=lambda x: x[0])
107
+ attrs['states'] = OrderedDict(current_states)
108
+ new_class = super(StateMeta, mcs).__new__(mcs, name, bases, attrs)
109
+
110
+ # Walk through the MRO
111
+ states = OrderedDict()
112
+ for base in reversed(new_class.__mro__):
113
+ if hasattr(base, 'states'):
114
+ states.update(base.states)
115
+ new_class.states = states
116
+
117
+ for key, value in states.items():
118
+ setattr(new_class, key, value.default)
119
+
120
+ return new_class
121
+
122
+
123
+ class Configurable(metaclass=StateMeta):
124
+ def __init__(self, *args, cmd={}, **kwargs):
125
+ self.load_all(cmd=cmd, **kwargs)
126
+
127
+ @staticmethod
128
+ def construct_class_from_config(args):
129
+ cls = Configurable.extract_class_from_args(args)
130
+ return cls(**args)
131
+
132
+ @staticmethod
133
+ def extract_class_from_args(args):
134
+ cls = args.copy().pop('class')
135
+ package, cls = cls.rsplit('.', 1)
136
+ module = importlib.import_module(package)
137
+ cls = getattr(module, cls)
138
+ return cls
139
+
140
+ def load_all(self, **kwargs):
141
+ for name, state in self.states.items():
142
+ if state.autoload:
143
+ self.load(name, **kwargs)
144
+
145
+ def load(self, state_name, **kwargs):
146
+ # FIXME: kwargs should be filtered
147
+ # Args passed from command line
148
+ cmd = kwargs.pop('cmd', dict())
149
+ if state_name in kwargs:
150
+ setattr(self, state_name, self.create_member_from_config(
151
+ (kwargs[state_name], cmd)))
152
+ else:
153
+ setattr(self, state_name, self.states[state_name].default)
154
+
155
+ def create_member_from_config(self, conf):
156
+ args, cmd = conf
157
+ if args is None or isinstance(args, (int, float, str)):
158
+ return args
159
+ elif isinstance(args, (list, tuple)):
160
+ return [self.create_member_from_config((subargs, cmd)) for subargs in args]
161
+ elif isinstance(args, dict):
162
+ if 'class' in args:
163
+ cls = self.extract_class_from_args(args)
164
+ return cls(**args, cmd=cmd)
165
+ return {key: self.create_member_from_config((subargs, cmd)) for key, subargs in args.items()}
166
+ else:
167
+ return args
168
+
169
+ def dump(self):
170
+ state = {}
171
+ state['class'] = self.__class__.__module__ + \
172
+ '.' + self.__class__.__name__
173
+ for name, value in self.states.items():
174
+ obj = getattr(self, name)
175
+ state[name] = self.dump_obj(obj)
176
+ return state
177
+
178
+ def dump_obj(self, obj):
179
+ if obj is None:
180
+ return None
181
+ elif hasattr(obj, 'dump'):
182
+ return obj.dump()
183
+ elif isinstance(obj, (int, float, str)):
184
+ return obj
185
+ elif isinstance(obj, (list, tuple)):
186
+ return [self.dump_obj(value) for value in obj]
187
+ elif isinstance(obj, dict):
188
+ return {key: self.dump_obj(value) for key, value in obj.items()}
189
+ else:
190
+ return str(obj)
191
+
DB/concern/convert.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import cv2
3
+ import base64
4
+ import io
5
+ import numpy as np
6
+
7
+
8
+ def convert(data):
9
+ if isinstance(data, dict):
10
+ ndata = {}
11
+ for key, value in data.items():
12
+ nkey = key.decode()
13
+ if nkey == 'img':
14
+ img = Image.open(io.BytesIO(value))
15
+ img = img.convert('RGB')
16
+ img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
17
+ nvalue = img
18
+ else:
19
+ nvalue = convert(value)
20
+ ndata[nkey] = nvalue
21
+ return ndata
22
+ elif isinstance(data, list):
23
+ return [convert(item) for item in data]
24
+ elif isinstance(data, bytes):
25
+ return data.decode()
26
+ else:
27
+ return data
28
+
29
+
30
+ def to_np(x):
31
+ return x.cpu().data.numpy()
DB/concern/icdar2015_eval/__init__.py ADDED
File without changes
DB/concern/icdar2015_eval/detection/__init__.py ADDED
File without changes
DB/concern/icdar2015_eval/detection/deteval.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ import math
4
+ from collections import namedtuple
5
+ import numpy as np
6
+ from shapely.geometry import Polygon
7
+
8
+
9
+ class DetectionDetEvalEvaluator(object):
10
+ def __init__(
11
+ self,
12
+ area_recall_constraint=0.8, area_precision_constraint=0.4,
13
+ ev_param_ind_center_diff_thr=1,
14
+ mtype_oo_o=1.0, mtype_om_o=0.8, mtype_om_m=1.0
15
+ ):
16
+
17
+
18
+ self.area_recall_constraint = area_recall_constraint
19
+ self.area_precision_constraint = area_precision_constraint
20
+ self.ev_param_ind_center_diff_thr = ev_param_ind_center_diff_thr
21
+ self.mtype_oo_o = mtype_oo_o
22
+ self.mtype_om_o = mtype_om_o
23
+ self.mtype_om_m = mtype_om_m
24
+
25
+ def evaluate_image(self, gt, pred):
26
+
27
+ def get_union(pD,pG):
28
+ return Polygon(pD).union(Polygon(pG)).area
29
+
30
+ def get_intersection_over_union(pD,pG):
31
+ return get_intersection(pD, pG) / get_union(pD, pG)
32
+
33
+ def get_intersection(pD,pG):
34
+ return Polygon(pD).intersection(Polygon(pG)).area
35
+
36
+ def one_to_one_match(row, col):
37
+ cont = 0
38
+ for j in range(len(recallMat[0])):
39
+ if recallMat[row,j] >= self.area_recall_constraint and precisionMat[row,j] >= self.area_precision_constraint:
40
+ cont = cont +1
41
+ if (cont != 1):
42
+ return False
43
+ cont = 0
44
+ for i in range(len(recallMat)):
45
+ if recallMat[i,col] >= self.area_recall_constraint and precisionMat[i,col] >= self.area_precision_constraint:
46
+ cont = cont +1
47
+ if (cont != 1):
48
+ return False
49
+
50
+ if recallMat[row,col] >= self.area_recall_constraint and precisionMat[row,col] >= self.area_precision_constraint:
51
+ return True
52
+ return False
53
+
54
+ def num_overlaps_gt(gtNum):
55
+ cont = 0
56
+ for detNum in range(len(detRects)):
57
+ if detNum not in detDontCareRectsNum:
58
+ if recallMat[gtNum,detNum] > 0 :
59
+ cont = cont +1
60
+ return cont
61
+
62
+ def num_overlaps_det(detNum):
63
+ cont = 0
64
+ for gtNum in range(len(recallMat)):
65
+ if gtNum not in gtDontCareRectsNum:
66
+ if recallMat[gtNum,detNum] > 0 :
67
+ cont = cont +1
68
+ return cont
69
+
70
+ def is_single_overlap(row, col):
71
+ if num_overlaps_gt(row)==1 and num_overlaps_det(col)==1:
72
+ return True
73
+ else:
74
+ return False
75
+
76
+ def one_to_many_match(gtNum):
77
+ many_sum = 0
78
+ detRects = []
79
+ for detNum in range(len(recallMat[0])):
80
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and detNum not in detDontCareRectsNum:
81
+ if precisionMat[gtNum,detNum] >= self.area_precision_constraint:
82
+ many_sum += recallMat[gtNum,detNum]
83
+ detRects.append(detNum)
84
+ if round(many_sum,4) >= self.area_recall_constraint:
85
+ return True,detRects
86
+ else:
87
+ return False,[]
88
+
89
+ def many_to_one_match(detNum):
90
+ many_sum = 0
91
+ gtRects = []
92
+ for gtNum in range(len(recallMat)):
93
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum:
94
+ if recallMat[gtNum,detNum] >= self.area_recall_constraint:
95
+ many_sum += precisionMat[gtNum,detNum]
96
+ gtRects.append(gtNum)
97
+ if round(many_sum,4) >= self.area_precision_constraint:
98
+ return True,gtRects
99
+ else:
100
+ return False,[]
101
+
102
+ def center_distance(r1, r2):
103
+ return ((np.mean(r1, axis=0) - np.mean(r2, axis=0)) ** 2).sum() ** 0.5
104
+
105
+ def diag(r):
106
+ r = np.array(r)
107
+ return ((r[:, 0].max() - r[:, 0].min()) ** 2 + (r[:, 1].max() - r[:, 1].min()) ** 2) ** 0.5
108
+
109
+ perSampleMetrics = {}
110
+
111
+ recall = 0
112
+ precision = 0
113
+ hmean = 0
114
+ recallAccum = 0.
115
+ precisionAccum = 0.
116
+ gtRects = []
117
+ detRects = []
118
+ gtPolPoints = []
119
+ detPolPoints = []
120
+ gtDontCareRectsNum = []#Array of Ground Truth Rectangles' keys marked as don't Care
121
+ detDontCareRectsNum = []#Array of Detected Rectangles' matched with a don't Care GT
122
+ pairs = []
123
+ evaluationLog = ""
124
+
125
+ recallMat = np.empty([1,1])
126
+ precisionMat = np.empty([1,1])
127
+
128
+ for n in range(len(gt)):
129
+ points = gt[n]['points']
130
+ # transcription = gt[n]['text']
131
+ dontCare = gt[n]['ignore']
132
+
133
+ if not Polygon(points).is_valid or not Polygon(points).is_simple:
134
+ continue
135
+
136
+ gtRects.append(points)
137
+ gtPolPoints.append(points)
138
+ if dontCare:
139
+ gtDontCareRectsNum.append( len(gtRects)-1 )
140
+
141
+ evaluationLog += "GT rectangles: " + str(len(gtRects)) + (" (" + str(len(gtDontCareRectsNum)) + " don't care)\n" if len(gtDontCareRectsNum)>0 else "\n")
142
+
143
+ for n in range(len(pred)):
144
+ points = pred[n]['points']
145
+
146
+ if not Polygon(points).is_valid or not Polygon(points).is_simple:
147
+ continue
148
+
149
+ detRect = points
150
+ detRects.append(detRect)
151
+ detPolPoints.append(points)
152
+ if len(gtDontCareRectsNum)>0 :
153
+ for dontCareRectNum in gtDontCareRectsNum:
154
+ dontCareRect = gtRects[dontCareRectNum]
155
+ intersected_area = get_intersection(dontCareRect,detRect)
156
+ rdDimensions = Polygon(detRect).area
157
+ if (rdDimensions==0) :
158
+ precision = 0
159
+ else:
160
+ precision= intersected_area / rdDimensions
161
+ if (precision > self.area_precision_constraint):
162
+ detDontCareRectsNum.append( len(detRects)-1 )
163
+ break
164
+
165
+ evaluationLog += "DET rectangles: " + str(len(detRects)) + (" (" + str(len(detDontCareRectsNum)) + " don't care)\n" if len(detDontCareRectsNum)>0 else "\n")
166
+
167
+ if len(gtRects)==0:
168
+ recall = 1
169
+ precision = 0 if len(detRects)>0 else 1
170
+
171
+ if len(detRects)>0:
172
+ #Calculate recall and precision matrixs
173
+ outputShape=[len(gtRects),len(detRects)]
174
+ recallMat = np.empty(outputShape)
175
+ precisionMat = np.empty(outputShape)
176
+ gtRectMat = np.zeros(len(gtRects),np.int8)
177
+ detRectMat = np.zeros(len(detRects),np.int8)
178
+ for gtNum in range(len(gtRects)):
179
+ for detNum in range(len(detRects)):
180
+ rG = gtRects[gtNum]
181
+ rD = detRects[detNum]
182
+ intersected_area = get_intersection(rG,rD)
183
+ rgDimensions = Polygon(rG).area
184
+ rdDimensions = Polygon(rD).area
185
+ recallMat[gtNum,detNum] = 0 if rgDimensions==0 else intersected_area / rgDimensions
186
+ precisionMat[gtNum,detNum] = 0 if rdDimensions==0 else intersected_area / rdDimensions
187
+
188
+ # Find one-to-one matches
189
+ evaluationLog += "Find one-to-one matches\n"
190
+ for gtNum in range(len(gtRects)):
191
+ for detNum in range(len(detRects)):
192
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum and detNum not in detDontCareRectsNum :
193
+ match = one_to_one_match(gtNum, detNum)
194
+ if match is True :
195
+ #in deteval we have to make other validation before mark as one-to-one
196
+ if is_single_overlap(gtNum, detNum) is True :
197
+ rG = gtRects[gtNum]
198
+ rD = detRects[detNum]
199
+ normDist = center_distance(rG, rD);
200
+ normDist /= diag(rG) + diag(rD);
201
+ normDist *= 2.0;
202
+ if normDist < self.ev_param_ind_center_diff_thr:
203
+ gtRectMat[gtNum] = 1
204
+ detRectMat[detNum] = 1
205
+ recallAccum += self.mtype_oo_o
206
+ precisionAccum += self.mtype_oo_o
207
+ pairs.append({'gt':gtNum,'det':detNum,'type':'OO'})
208
+ evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
209
+ else:
210
+ evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(detNum) + " normDist: " + str(normDist) + " \n"
211
+ else:
212
+ evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(detNum) + " not single overlap\n"
213
+ # Find one-to-many matches
214
+ evaluationLog += "Find one-to-many matches\n"
215
+ for gtNum in range(len(gtRects)):
216
+ if gtNum not in gtDontCareRectsNum:
217
+ match,matchesDet = one_to_many_match(gtNum)
218
+ if match is True :
219
+ evaluationLog += "num_overlaps_gt=" + str(num_overlaps_gt(gtNum))
220
+ #in deteval we have to make other validation before mark as one-to-one
221
+ if num_overlaps_gt(gtNum)>=2 :
222
+ gtRectMat[gtNum] = 1
223
+ recallAccum += (self.mtype_oo_o if len(matchesDet)==1 else self.mtype_om_o)
224
+ precisionAccum += (self.mtype_oo_o if len(matchesDet)==1 else self.mtype_om_o*len(matchesDet))
225
+ pairs.append({'gt':gtNum,'det':matchesDet,'type': 'OO' if len(matchesDet)==1 else 'OM'})
226
+ for detNum in matchesDet :
227
+ detRectMat[detNum] = 1
228
+ evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(matchesDet) + "\n"
229
+ else:
230
+ evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(matchesDet) + " not single overlap\n"
231
+
232
+ # Find many-to-one matches
233
+ evaluationLog += "Find many-to-one matches\n"
234
+ for detNum in range(len(detRects)):
235
+ if detNum not in detDontCareRectsNum:
236
+ match,matchesGt = many_to_one_match(detNum)
237
+ if match is True :
238
+ #in deteval we have to make other validation before mark as one-to-one
239
+ if num_overlaps_det(detNum)>=2 :
240
+ detRectMat[detNum] = 1
241
+ recallAccum += (self.mtype_oo_o if len(matchesGt)==1 else self.mtype_om_m*len(matchesGt))
242
+ precisionAccum += (self.mtype_oo_o if len(matchesGt)==1 else self.mtype_om_m)
243
+ pairs.append({'gt':matchesGt,'det':detNum,'type': 'OO' if len(matchesGt)==1 else 'MO'})
244
+ for gtNum in matchesGt :
245
+ gtRectMat[gtNum] = 1
246
+ evaluationLog += "Match GT #" + str(matchesGt) + " with Det #" + str(detNum) + "\n"
247
+ else:
248
+ evaluationLog += "Match Discarded GT #" + str(matchesGt) + " with Det #" + str(detNum) + " not single overlap\n"
249
+
250
+ numGtCare = (len(gtRects) - len(gtDontCareRectsNum))
251
+ if numGtCare == 0:
252
+ recall = float(1)
253
+ precision = float(0) if len(detRects)>0 else float(1)
254
+ else:
255
+ recall = float(recallAccum) / numGtCare
256
+ precision = float(0) if (len(detRects) - len(detDontCareRectsNum))==0 else float(precisionAccum) / (len(detRects) - len(detDontCareRectsNum))
257
+ hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
258
+
259
+ numGtCare = len(gtRects) - len(gtDontCareRectsNum)
260
+ numDetCare = len(detRects) - len(detDontCareRectsNum)
261
+
262
+ perSampleMetrics = {
263
+ 'precision':precision,
264
+ 'recall':recall,
265
+ 'hmean':hmean,
266
+ 'pairs':pairs,
267
+ 'recallMat':[] if len(detRects)>100 else recallMat.tolist(),
268
+ 'precisionMat':[] if len(detRects)>100 else precisionMat.tolist(),
269
+ 'gtPolPoints':gtPolPoints,
270
+ 'detPolPoints':detPolPoints,
271
+ 'gtCare': numGtCare,
272
+ 'detCare': numDetCare,
273
+ 'gtDontCare':gtDontCareRectsNum,
274
+ 'detDontCare':detDontCareRectsNum,
275
+ 'recallAccum':recallAccum,
276
+ 'precisionAccum':precisionAccum,
277
+ 'evaluationLog': evaluationLog
278
+ }
279
+
280
+ return perSampleMetrics
281
+
282
+ def combine_results(self, results):
283
+ numGt = 0
284
+ numDet = 0
285
+ methodRecallSum = 0
286
+ methodPrecisionSum = 0
287
+
288
+ for result in results:
289
+ numGt += result['gtCare']
290
+ numDet += result['detCare']
291
+ methodRecallSum += result['recallAccum']
292
+ methodPrecisionSum += result['precisionAccum']
293
+
294
+ methodRecall = 0 if numGt==0 else methodRecallSum/numGt
295
+ methodPrecision = 0 if numDet==0 else methodPrecisionSum/numDet
296
+ methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
297
+
298
+ methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean }
299
+
300
+ return methodMetrics
301
+
302
+
303
+ if __name__=='__main__':
304
+ evaluator = DetectionDetEvalEvaluator()
305
+ gts = [[{
306
+ 'points': [(0, 0), (1, 0), (1, 1), (0, 1)],
307
+ 'text': 1234,
308
+ 'ignore': False,
309
+ }, {
310
+ 'points': [(2, 2), (3, 2), (3, 3), (2, 3)],
311
+ 'text': 5678,
312
+ 'ignore': True,
313
+ }]]
314
+ preds = [[{
315
+ 'points': [(0.1, 0.1), (1, 0), (1, 1), (0, 1)],
316
+ 'text': 123,
317
+ 'ignore': False,
318
+ }]]
319
+ results = []
320
+ for gt, pred in zip(gts, preds):
321
+ results.append(evaluator.evaluate_image(gt, pred))
322
+ metrics = evaluator.combine_results(results)
323
+ print(metrics)
DB/concern/icdar2015_eval/detection/icdar2013.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ import math
4
+ from collections import namedtuple
5
+ import numpy as np
6
+ from shapely.geometry import Polygon
7
+
8
+
9
+ class DetectionICDAR2013Evaluator(object):
10
+ def __init__(
11
+ self,
12
+ area_recall_constraint=0.8, area_precision_constraint=0.4,
13
+ ev_param_ind_center_diff_thr=1,
14
+ mtype_oo_o=1.0, mtype_om_o=0.8, mtype_om_m=1.0
15
+ ):
16
+
17
+
18
+ self.area_recall_constraint = area_recall_constraint
19
+ self.area_precision_constraint = area_precision_constraint
20
+ self.ev_param_ind_center_diff_thr = ev_param_ind_center_diff_thr
21
+ self.mtype_oo_o = mtype_oo_o
22
+ self.mtype_om_o = mtype_om_o
23
+ self.mtype_om_m = mtype_om_m
24
+
25
+ def evaluate_image(self, gt, pred):
26
+
27
+ def get_union(pD,pG):
28
+ return Polygon(pD).union(Polygon(pG)).area
29
+
30
+ def get_intersection_over_union(pD,pG):
31
+ return get_intersection(pD, pG) / get_union(pD, pG)
32
+
33
+ def get_intersection(pD,pG):
34
+ return Polygon(pD).intersection(Polygon(pG)).area
35
+
36
+ def one_to_one_match(row, col):
37
+ cont = 0
38
+ for j in range(len(recallMat[0])):
39
+ if recallMat[row,j] >= self.area_recall_constraint and precisionMat[row,j] >= self.area_precision_constraint:
40
+ cont = cont +1
41
+ if (cont != 1):
42
+ return False
43
+ cont = 0
44
+ for i in range(len(recallMat)):
45
+ if recallMat[i,col] >= self.area_recall_constraint and precisionMat[i,col] >= self.area_precision_constraint:
46
+ cont = cont +1
47
+ if (cont != 1):
48
+ return False
49
+
50
+ if recallMat[row,col] >= self.area_recall_constraint and precisionMat[row,col] >= self.area_precision_constraint:
51
+ return True
52
+ return False
53
+
54
+ def one_to_many_match(gtNum):
55
+ many_sum = 0
56
+ detRects = []
57
+ for detNum in range(len(recallMat[0])):
58
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and detNum not in detDontCareRectsNum:
59
+ if precisionMat[gtNum,detNum] >= self.area_precision_constraint:
60
+ many_sum += recallMat[gtNum,detNum]
61
+ detRects.append(detNum)
62
+ if round(many_sum,4) >= self.area_recall_constraint:
63
+ return True,detRects
64
+ else:
65
+ return False,[]
66
+
67
+ def many_to_one_match(detNum):
68
+ many_sum = 0
69
+ gtRects = []
70
+ for gtNum in range(len(recallMat)):
71
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum:
72
+ if recallMat[gtNum,detNum] >= self.area_recall_constraint:
73
+ many_sum += precisionMat[gtNum,detNum]
74
+ gtRects.append(gtNum)
75
+ if round(many_sum,4) >= self.area_precision_constraint:
76
+ return True,gtRects
77
+ else:
78
+ return False,[]
79
+
80
+ def center_distance(r1, r2):
81
+ return ((np.mean(r1, axis=0) - np.mean(r2, axis=0)) ** 2).sum() ** 0.5
82
+
83
+ def diag(r):
84
+ r = np.array(r)
85
+ return ((r[:, 0].max() - r[:, 0].min()) ** 2 + (r[:, 1].max() - r[:, 1].min()) ** 2) ** 0.5
86
+
87
+ perSampleMetrics = {}
88
+
89
+ recall = 0
90
+ precision = 0
91
+ hmean = 0
92
+ recallAccum = 0.
93
+ precisionAccum = 0.
94
+ gtRects = []
95
+ detRects = []
96
+ gtPolPoints = []
97
+ detPolPoints = []
98
+ gtDontCareRectsNum = []#Array of Ground Truth Rectangles' keys marked as don't Care
99
+ detDontCareRectsNum = []#Array of Detected Rectangles' matched with a don't Care GT
100
+ pairs = []
101
+ evaluationLog = ""
102
+
103
+ recallMat = np.empty([1,1])
104
+ precisionMat = np.empty([1,1])
105
+
106
+ for n in range(len(gt)):
107
+ points = gt[n]['points']
108
+ # transcription = gt[n]['text']
109
+ dontCare = gt[n]['ignore']
110
+
111
+ if not Polygon(points).is_valid or not Polygon(points).is_simple:
112
+ continue
113
+
114
+ gtRects.append(points)
115
+ gtPolPoints.append(points)
116
+ if dontCare:
117
+ gtDontCareRectsNum.append( len(gtRects)-1 )
118
+
119
+ evaluationLog += "GT rectangles: " + str(len(gtRects)) + (" (" + str(len(gtDontCareRectsNum)) + " don't care)\n" if len(gtDontCareRectsNum)>0 else "\n")
120
+
121
+ for n in range(len(pred)):
122
+ points = pred[n]['points']
123
+
124
+ if not Polygon(points).is_valid or not Polygon(points).is_simple:
125
+ continue
126
+
127
+ detRect = points
128
+ detRects.append(detRect)
129
+ detPolPoints.append(points)
130
+ if len(gtDontCareRectsNum)>0 :
131
+ for dontCareRectNum in gtDontCareRectsNum:
132
+ dontCareRect = gtRects[dontCareRectNum]
133
+ intersected_area = get_intersection(dontCareRect,detRect)
134
+ rdDimensions = Polygon(detRect).area
135
+ if (rdDimensions==0) :
136
+ precision = 0
137
+ else:
138
+ precision= intersected_area / rdDimensions
139
+ if (precision > self.area_precision_constraint):
140
+ detDontCareRectsNum.append( len(detRects)-1 )
141
+ break
142
+
143
+ evaluationLog += "DET rectangles: " + str(len(detRects)) + (" (" + str(len(detDontCareRectsNum)) + " don't care)\n" if len(detDontCareRectsNum)>0 else "\n")
144
+
145
+ if len(gtRects)==0:
146
+ recall = 1
147
+ precision = 0 if len(detRects)>0 else 1
148
+
149
+ if len(detRects)>0:
150
+ #Calculate recall and precision matrixs
151
+ outputShape=[len(gtRects),len(detRects)]
152
+ recallMat = np.empty(outputShape)
153
+ precisionMat = np.empty(outputShape)
154
+ gtRectMat = np.zeros(len(gtRects),np.int8)
155
+ detRectMat = np.zeros(len(detRects),np.int8)
156
+ for gtNum in range(len(gtRects)):
157
+ for detNum in range(len(detRects)):
158
+ rG = gtRects[gtNum]
159
+ rD = detRects[detNum]
160
+ intersected_area = get_intersection(rG,rD)
161
+ rgDimensions = Polygon(rG).area
162
+ rdDimensions = Polygon(rD).area
163
+ recallMat[gtNum,detNum] = 0 if rgDimensions==0 else intersected_area / rgDimensions
164
+ precisionMat[gtNum,detNum] = 0 if rdDimensions==0 else intersected_area / rdDimensions
165
+
166
+ # Find one-to-one matches
167
+ evaluationLog += "Find one-to-one matches\n"
168
+ for gtNum in range(len(gtRects)):
169
+ for detNum in range(len(detRects)):
170
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum and detNum not in detDontCareRectsNum :
171
+ match = one_to_one_match(gtNum, detNum)
172
+ if match is True :
173
+ #in deteval we have to make other validation before mark as one-to-one
174
+ rG = gtRects[gtNum]
175
+ rD = detRects[detNum]
176
+ normDist = center_distance(rG, rD);
177
+ normDist /= diag(rG) + diag(rD);
178
+ normDist *= 2.0;
179
+ if normDist < self.ev_param_ind_center_diff_thr:
180
+ gtRectMat[gtNum] = 1
181
+ detRectMat[detNum] = 1
182
+ recallAccum += self.mtype_oo_o
183
+ precisionAccum += self.mtype_oo_o
184
+ pairs.append({'gt':gtNum,'det':detNum,'type':'OO'})
185
+ evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
186
+ else:
187
+ evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(detNum) + " normDist: " + str(normDist) + " \n"
188
+ # Find one-to-many matches
189
+ evaluationLog += "Find one-to-many matches\n"
190
+ for gtNum in range(len(gtRects)):
191
+ if gtNum not in gtDontCareRectsNum:
192
+ match,matchesDet = one_to_many_match(gtNum)
193
+ if match is True :
194
+ evaluationLog += "num_overlaps_gt=" + str(num_overlaps_gt(gtNum))
195
+ gtRectMat[gtNum] = 1
196
+ recallAccum += (self.mtype_oo_o if len(matchesDet)==1 else self.mtype_om_o)
197
+ precisionAccum += (self.mtype_oo_o if len(matchesDet)==1 else self.mtype_om_o*len(matchesDet))
198
+ pairs.append({'gt':gtNum,'det':matchesDet,'type': 'OO' if len(matchesDet)==1 else 'OM'})
199
+ for detNum in matchesDet :
200
+ detRectMat[detNum] = 1
201
+ evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(matchesDet) + "\n"
202
+
203
+ # Find many-to-one matches
204
+ evaluationLog += "Find many-to-one matches\n"
205
+ for detNum in range(len(detRects)):
206
+ if detNum not in detDontCareRectsNum:
207
+ match,matchesGt = many_to_one_match(detNum)
208
+ if match is True :
209
+ detRectMat[detNum] = 1
210
+ recallAccum += (self.mtype_oo_o if len(matchesGt)==1 else self.mtype_om_m*len(matchesGt))
211
+ precisionAccum += (self.mtype_oo_o if len(matchesGt)==1 else self.mtype_om_m)
212
+ pairs.append({'gt':matchesGt,'det':detNum,'type': 'OO' if len(matchesGt)==1 else 'MO'})
213
+ for gtNum in matchesGt :
214
+ gtRectMat[gtNum] = 1
215
+ evaluationLog += "Match GT #" + str(matchesGt) + " with Det #" + str(detNum) + "\n"
216
+
217
+ numGtCare = (len(gtRects) - len(gtDontCareRectsNum))
218
+ if numGtCare == 0:
219
+ recall = float(1)
220
+ precision = float(0) if len(detRects)>0 else float(1)
221
+ else:
222
+ recall = float(recallAccum) / numGtCare
223
+ precision = float(0) if (len(detRects) - len(detDontCareRectsNum))==0 else float(precisionAccum) / (len(detRects) - len(detDontCareRectsNum))
224
+ hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
225
+
226
+ numGtCare = len(gtRects) - len(gtDontCareRectsNum)
227
+ numDetCare = len(detRects) - len(detDontCareRectsNum)
228
+
229
+ perSampleMetrics = {
230
+ 'precision':precision,
231
+ 'recall':recall,
232
+ 'hmean':hmean,
233
+ 'pairs':pairs,
234
+ 'recallMat':[] if len(detRects)>100 else recallMat.tolist(),
235
+ 'precisionMat':[] if len(detRects)>100 else precisionMat.tolist(),
236
+ 'gtPolPoints':gtPolPoints,
237
+ 'detPolPoints':detPolPoints,
238
+ 'gtCare': numGtCare,
239
+ 'detCare': numDetCare,
240
+ 'gtDontCare':gtDontCareRectsNum,
241
+ 'detDontCare':detDontCareRectsNum,
242
+ 'recallAccum':recallAccum,
243
+ 'precisionAccum':precisionAccum,
244
+ 'evaluationLog': evaluationLog
245
+ }
246
+
247
+ return perSampleMetrics
248
+
249
+ def combine_results(self, results):
250
+ numGt = 0
251
+ numDet = 0
252
+ methodRecallSum = 0
253
+ methodPrecisionSum = 0
254
+
255
+ for result in results:
256
+ numGt += result['gtCare']
257
+ numDet += result['detCare']
258
+ methodRecallSum += result['recallAccum']
259
+ methodPrecisionSum += result['precisionAccum']
260
+
261
+ methodRecall = 0 if numGt==0 else methodRecallSum/numGt
262
+ methodPrecision = 0 if numDet==0 else methodPrecisionSum/numDet
263
+ methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
264
+
265
+ methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean }
266
+
267
+ return methodMetrics
268
+
269
+
270
+ if __name__=='__main__':
271
+ evaluator = DetectionICDAR2013Evaluator()
272
+ gts = [[{
273
+ 'points': [(0, 0), (1, 0), (1, 1), (0, 1)],
274
+ 'text': 1234,
275
+ 'ignore': False,
276
+ }, {
277
+ 'points': [(2, 2), (3, 2), (3, 3), (2, 3)],
278
+ 'text': 5678,
279
+ 'ignore': True,
280
+ }]]
281
+ preds = [[{
282
+ 'points': [(0.1, 0.1), (1, 0), (1, 1), (0, 1)],
283
+ 'text': 123,
284
+ 'ignore': False,
285
+ }]]
286
+ results = []
287
+ for gt, pred in zip(gts, preds):
288
+ results.append(evaluator.evaluate_image(gt, pred))
289
+ metrics = evaluator.combine_results(results)
290
+ print(metrics)
DB/concern/icdar2015_eval/detection/iou.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ from collections import namedtuple
4
+ import numpy as np
5
+ from shapely.geometry import Polygon
6
+
7
+
8
+ class DetectionIoUEvaluator(object):
9
+ def __init__(self, iou_constraint=0.5, area_precision_constraint=0.5):
10
+ self.iou_constraint = iou_constraint
11
+ self.area_precision_constraint = area_precision_constraint
12
+
13
+ def evaluate_image(self, gt, pred):
14
+
15
+ def get_union(pD, pG):
16
+ return Polygon(pD).union(Polygon(pG)).area
17
+
18
+ def get_intersection_over_union(pD, pG):
19
+ return get_intersection(pD, pG) / get_union(pD, pG)
20
+
21
+ def get_intersection(pD, pG):
22
+ return Polygon(pD).intersection(Polygon(pG)).area
23
+
24
+ def compute_ap(confList, matchList, numGtCare):
25
+ correct = 0
26
+ AP = 0
27
+ if len(confList) > 0:
28
+ confList = np.array(confList)
29
+ matchList = np.array(matchList)
30
+ sorted_ind = np.argsort(-confList)
31
+ confList = confList[sorted_ind]
32
+ matchList = matchList[sorted_ind]
33
+ for n in range(len(confList)):
34
+ match = matchList[n]
35
+ if match:
36
+ correct += 1
37
+ AP += float(correct)/(n + 1)
38
+
39
+ if numGtCare > 0:
40
+ AP /= numGtCare
41
+
42
+ return AP
43
+
44
+ perSampleMetrics = {}
45
+
46
+ matchedSum = 0
47
+
48
+ Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
49
+
50
+ numGlobalCareGt = 0
51
+ numGlobalCareDet = 0
52
+
53
+ arrGlobalConfidences = []
54
+ arrGlobalMatches = []
55
+
56
+ recall = 0
57
+ precision = 0
58
+ hmean = 0
59
+
60
+ detMatched = 0
61
+
62
+ iouMat = np.empty([1, 1])
63
+
64
+ gtPols = []
65
+ detPols = []
66
+
67
+ gtPolPoints = []
68
+ detPolPoints = []
69
+
70
+ # Array of Ground Truth Polygons' keys marked as don't Care
71
+ gtDontCarePolsNum = []
72
+ # Array of Detected Polygons' matched with a don't Care GT
73
+ detDontCarePolsNum = []
74
+
75
+ pairs = []
76
+ detMatchedNums = []
77
+
78
+ arrSampleConfidences = []
79
+ arrSampleMatch = []
80
+
81
+ evaluationLog = ""
82
+
83
+ for n in range(len(gt)):
84
+ points = gt[n]['points']
85
+ # transcription = gt[n]['text']
86
+ dontCare = gt[n]['ignore']
87
+
88
+ if not Polygon(points).is_valid or not Polygon(points).is_simple:
89
+ continue
90
+
91
+ gtPol = points
92
+ gtPols.append(gtPol)
93
+ gtPolPoints.append(points)
94
+ if dontCare:
95
+ gtDontCarePolsNum.append(len(gtPols)-1)
96
+
97
+ evaluationLog += "GT polygons: " + str(len(gtPols)) + (" (" + str(len(
98
+ gtDontCarePolsNum)) + " don't care)\n" if len(gtDontCarePolsNum) > 0 else "\n")
99
+
100
+ for n in range(len(pred)):
101
+ points = pred[n]['points']
102
+ if not Polygon(points).is_valid or not Polygon(points).is_simple:
103
+ continue
104
+
105
+ detPol = points
106
+ detPols.append(detPol)
107
+ detPolPoints.append(points)
108
+ if len(gtDontCarePolsNum) > 0:
109
+ for dontCarePol in gtDontCarePolsNum:
110
+ dontCarePol = gtPols[dontCarePol]
111
+ intersected_area = get_intersection(dontCarePol, detPol)
112
+ pdDimensions = Polygon(detPol).area
113
+ precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions
114
+ if (precision > self.area_precision_constraint):
115
+ detDontCarePolsNum.append(len(detPols)-1)
116
+ break
117
+
118
+ evaluationLog += "DET polygons: " + str(len(detPols)) + (" (" + str(len(
119
+ detDontCarePolsNum)) + " don't care)\n" if len(detDontCarePolsNum) > 0 else "\n")
120
+
121
+ if len(gtPols) > 0 and len(detPols) > 0:
122
+ # Calculate IoU and precision matrixs
123
+ outputShape = [len(gtPols), len(detPols)]
124
+ iouMat = np.empty(outputShape)
125
+ gtRectMat = np.zeros(len(gtPols), np.int8)
126
+ detRectMat = np.zeros(len(detPols), np.int8)
127
+ for gtNum in range(len(gtPols)):
128
+ for detNum in range(len(detPols)):
129
+ pG = gtPols[gtNum]
130
+ pD = detPols[detNum]
131
+ iouMat[gtNum, detNum] = get_intersection_over_union(pD, pG)
132
+
133
+ for gtNum in range(len(gtPols)):
134
+ for detNum in range(len(detPols)):
135
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum:
136
+ if iouMat[gtNum, detNum] > self.iou_constraint:
137
+ gtRectMat[gtNum] = 1
138
+ detRectMat[detNum] = 1
139
+ detMatched += 1
140
+ pairs.append({'gt': gtNum, 'det': detNum})
141
+ detMatchedNums.append(detNum)
142
+ evaluationLog += "Match GT #" + \
143
+ str(gtNum) + " with Det #" + str(detNum) + "\n"
144
+
145
+ numGtCare = (len(gtPols) - len(gtDontCarePolsNum))
146
+ numDetCare = (len(detPols) - len(detDontCarePolsNum))
147
+ if numGtCare == 0:
148
+ recall = float(1)
149
+ precision = float(0) if numDetCare > 0 else float(1)
150
+ else:
151
+ recall = float(detMatched) / numGtCare
152
+ precision = 0 if numDetCare == 0 else float(
153
+ detMatched) / numDetCare
154
+
155
+ hmean = 0 if (precision + recall) == 0 else 2.0 * \
156
+ precision * recall / (precision + recall)
157
+
158
+ matchedSum += detMatched
159
+ numGlobalCareGt += numGtCare
160
+ numGlobalCareDet += numDetCare
161
+
162
+ perSampleMetrics = {
163
+ 'precision': precision,
164
+ 'recall': recall,
165
+ 'hmean': hmean,
166
+ 'pairs': pairs,
167
+ 'iouMat': [] if len(detPols) > 100 else iouMat.tolist(),
168
+ 'gtPolPoints': gtPolPoints,
169
+ 'detPolPoints': detPolPoints,
170
+ 'gtCare': numGtCare,
171
+ 'detCare': numDetCare,
172
+ 'gtDontCare': gtDontCarePolsNum,
173
+ 'detDontCare': detDontCarePolsNum,
174
+ 'detMatched': detMatched,
175
+ 'evaluationLog': evaluationLog
176
+ }
177
+
178
+ return perSampleMetrics
179
+
180
+ def combine_results(self, results):
181
+ numGlobalCareGt = 0
182
+ numGlobalCareDet = 0
183
+ matchedSum = 0
184
+ for result in results:
185
+ numGlobalCareGt += result['gtCare']
186
+ numGlobalCareDet += result['detCare']
187
+ matchedSum += result['detMatched']
188
+
189
+ methodRecall = 0 if numGlobalCareGt == 0 else float(
190
+ matchedSum)/numGlobalCareGt
191
+ methodPrecision = 0 if numGlobalCareDet == 0 else float(
192
+ matchedSum)/numGlobalCareDet
193
+ methodHmean = 0 if methodRecall + methodPrecision == 0 else 2 * \
194
+ methodRecall * methodPrecision / (methodRecall + methodPrecision)
195
+
196
+ methodMetrics = {'precision': methodPrecision,
197
+ 'recall': methodRecall, 'hmean': methodHmean}
198
+
199
+ return methodMetrics
200
+
201
+
202
+ if __name__ == '__main__':
203
+ evaluator = DetectionIoUEvaluator()
204
+ gts = [[{
205
+ 'points': [(0, 0), (1, 0), (1, 1), (0, 1)],
206
+ 'text': 1234,
207
+ 'ignore': False,
208
+ }, {
209
+ 'points': [(2, 2), (3, 2), (3, 3), (2, 3)],
210
+ 'text': 5678,
211
+ 'ignore': False,
212
+ }]]
213
+ preds = [[{
214
+ 'points': [(0.1, 0.1), (1, 0), (1, 1), (0, 1)],
215
+ 'text': 123,
216
+ 'ignore': False,
217
+ }]]
218
+ results = []
219
+ for gt, pred in zip(gts, preds):
220
+ results.append(evaluator.evaluate_image(gt, pred))
221
+ metrics = evaluator.combine_results(results)
222
+ print(metrics)
DB/concern/icdar2015_eval/detection/mtwi2018.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ import math
4
+ from collections import namedtuple
5
+ import numpy as np
6
+ from shapely.geometry import Polygon
7
+
8
+
9
+ class DetectionMTWI2018Evaluator(object):
10
+ def __init__(
11
+ self,
12
+ area_recall_constraint=0.7, area_precision_constraint=0.7,
13
+ ev_param_ind_center_diff_thr=1,
14
+ ):
15
+
16
+
17
+ self.area_recall_constraint = area_recall_constraint
18
+ self.area_precision_constraint = area_precision_constraint
19
+ self.ev_param_ind_center_diff_thr = ev_param_ind_center_diff_thr
20
+
21
+ def evaluate_image(self, gt, pred):
22
+
23
+ def get_union(pD,pG):
24
+ return Polygon(pD).union(Polygon(pG)).area
25
+
26
+ def get_intersection_over_union(pD,pG):
27
+ return get_intersection(pD, pG) / get_union(pD, pG)
28
+
29
+ def get_intersection(pD,pG):
30
+ return Polygon(pD).intersection(Polygon(pG)).area
31
+
32
+ def one_to_one_match(row, col):
33
+ cont = 0
34
+ for j in range(len(recallMat[0])):
35
+ if recallMat[row,j] >= self.area_recall_constraint and precisionMat[row,j] >= self.area_precision_constraint:
36
+ cont = cont +1
37
+ if (cont != 1):
38
+ return False
39
+ cont = 0
40
+ for i in range(len(recallMat)):
41
+ if recallMat[i,col] >= self.area_recall_constraint and precisionMat[i,col] >= self.area_precision_constraint:
42
+ cont = cont +1
43
+ if (cont != 1):
44
+ return False
45
+
46
+ if recallMat[row,col] >= self.area_recall_constraint and precisionMat[row,col] >= self.area_precision_constraint:
47
+ return True
48
+ return False
49
+
50
+ def one_to_many_match(gtNum):
51
+ many_sum = 0
52
+ detRects = []
53
+ for detNum in range(len(recallMat[0])):
54
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and detNum not in detDontCareRectsNum:
55
+ if precisionMat[gtNum,detNum] >= self.area_precision_constraint:
56
+ many_sum += recallMat[gtNum,detNum]
57
+ detRects.append(detNum)
58
+ if round(many_sum,4) >= self.area_recall_constraint:
59
+ return True,detRects
60
+ else:
61
+ return False,[]
62
+
63
+ def many_to_one_match(detNum):
64
+ many_sum = 0
65
+ gtRects = []
66
+ for gtNum in range(len(recallMat)):
67
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum:
68
+ if recallMat[gtNum,detNum] >= self.area_recall_constraint:
69
+ many_sum += precisionMat[gtNum,detNum]
70
+ gtRects.append(gtNum)
71
+ if round(many_sum,4) >= self.area_precision_constraint:
72
+ return True,gtRects
73
+ else:
74
+ return False,[]
75
+
76
+ def center_distance(r1, r2):
77
+ return ((np.mean(r1, axis=0) - np.mean(r2, axis=0)) ** 2).sum() ** 0.5
78
+
79
+ def diag(r):
80
+ r = np.array(r)
81
+ return ((r[:, 0].max() - r[:, 0].min()) ** 2 + (r[:, 1].max() - r[:, 1].min()) ** 2) ** 0.5
82
+
83
+ perSampleMetrics = {}
84
+
85
+ recall = 0
86
+ precision = 0
87
+ hmean = 0
88
+ recallAccum = 0.
89
+ precisionAccum = 0.
90
+ gtRects = []
91
+ detRects = []
92
+ gtPolPoints = []
93
+ detPolPoints = []
94
+ gtDontCareRectsNum = []#Array of Ground Truth Rectangles' keys marked as don't Care
95
+ detDontCareRectsNum = []#Array of Detected Rectangles' matched with a don't Care GT
96
+ pairs = []
97
+ evaluationLog = ""
98
+
99
+ recallMat = np.empty([1,1])
100
+ precisionMat = np.empty([1,1])
101
+
102
+ for n in range(len(gt)):
103
+ points = gt[n]['points']
104
+ # transcription = gt[n]['text']
105
+ dontCare = gt[n]['ignore']
106
+
107
+ if not Polygon(points).is_valid or not Polygon(points).is_simple:
108
+ continue
109
+
110
+ gtRects.append(points)
111
+ gtPolPoints.append(points)
112
+ if dontCare:
113
+ gtDontCareRectsNum.append( len(gtRects)-1 )
114
+
115
+ evaluationLog += "GT rectangles: " + str(len(gtRects)) + (" (" + str(len(gtDontCareRectsNum)) + " don't care)\n" if len(gtDontCareRectsNum)>0 else "\n")
116
+
117
+ for n in range(len(pred)):
118
+ points = pred[n]['points']
119
+
120
+ if not Polygon(points).is_valid or not Polygon(points).is_simple:
121
+ continue
122
+
123
+ detRect = points
124
+ detRects.append(detRect)
125
+ detPolPoints.append(points)
126
+ if len(gtDontCareRectsNum)>0 :
127
+ for dontCareRectNum in gtDontCareRectsNum:
128
+ dontCareRect = gtRects[dontCareRectNum]
129
+ intersected_area = get_intersection(dontCareRect,detRect)
130
+ rdDimensions = Polygon(detRect).area
131
+ if (rdDimensions==0) :
132
+ precision = 0
133
+ else:
134
+ precision= intersected_area / rdDimensions
135
+ if (precision > 0.5):
136
+ detDontCareRectsNum.append( len(detRects)-1 )
137
+ break
138
+
139
+ evaluationLog += "DET rectangles: " + str(len(detRects)) + (" (" + str(len(detDontCareRectsNum)) + " don't care)\n" if len(detDontCareRectsNum)>0 else "\n")
140
+
141
+ if len(gtRects)==0:
142
+ recall = 1
143
+ precision = 0 if len(detRects)>0 else 1
144
+
145
+ if len(detRects)>0:
146
+ #Calculate recall and precision matrixs
147
+ outputShape=[len(gtRects),len(detRects)]
148
+ recallMat = np.empty(outputShape)
149
+ precisionMat = np.empty(outputShape)
150
+ gtRectMat = np.zeros(len(gtRects),np.int8)
151
+ detRectMat = np.zeros(len(detRects),np.int8)
152
+ for gtNum in range(len(gtRects)):
153
+ for detNum in range(len(detRects)):
154
+ rG = gtRects[gtNum]
155
+ rD = detRects[detNum]
156
+ intersected_area = get_intersection(rG,rD)
157
+ rgDimensions = Polygon(rG).area
158
+ rdDimensions = Polygon(rD).area
159
+ recallMat[gtNum,detNum] = 0 if rgDimensions==0 else intersected_area / rgDimensions
160
+ precisionMat[gtNum,detNum] = 0 if rdDimensions==0 else intersected_area / rdDimensions
161
+
162
+ # Find one-to-one matches
163
+ evaluationLog += "Find one-to-one matches\n"
164
+ for gtNum in range(len(gtRects)):
165
+ for detNum in range(len(detRects)):
166
+ if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCareRectsNum and detNum not in detDontCareRectsNum :
167
+ match = one_to_one_match(gtNum, detNum)
168
+ if match is True :
169
+ #in deteval we have to make other validation before mark as one-to-one
170
+ rG = gtRects[gtNum]
171
+ rD = detRects[detNum]
172
+ normDist = center_distance(rG, rD);
173
+ normDist /= diag(rG) + diag(rD);
174
+ normDist *= 2.0;
175
+ if normDist < self.ev_param_ind_center_diff_thr:
176
+ gtRectMat[gtNum] = 1
177
+ detRectMat[detNum] = 1
178
+ recallAccum += 1.0
179
+ precisionAccum += 1.0
180
+ pairs.append({'gt':gtNum,'det':detNum,'type':'OO'})
181
+ evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(detNum) + "\n"
182
+ else:
183
+ evaluationLog += "Match Discarded GT #" + str(gtNum) + " with Det #" + str(detNum) + " normDist: " + str(normDist) + " \n"
184
+ # Find one-to-many matches
185
+ evaluationLog += "Find one-to-many matches\n"
186
+ for gtNum in range(len(gtRects)):
187
+ if gtNum not in gtDontCareRectsNum:
188
+ match,matchesDet = one_to_many_match(gtNum)
189
+ if match is True :
190
+ gtRectMat[gtNum] = 1
191
+ recallAccum += 1.0
192
+ precisionAccum += len(matchesDet) / (1 + math.log(len(matchesDet)))
193
+ pairs.append({'gt':gtNum,'det':matchesDet,'type': 'OO' if len(matchesDet)==1 else 'OM'})
194
+ for detNum in matchesDet :
195
+ detRectMat[detNum] = 1
196
+ evaluationLog += "Match GT #" + str(gtNum) + " with Det #" + str(matchesDet) + "\n"
197
+
198
+ # Find many-to-one matches
199
+ evaluationLog += "Find many-to-one matches\n"
200
+ for detNum in range(len(detRects)):
201
+ if detNum not in detDontCareRectsNum:
202
+ match,matchesGt = many_to_one_match(detNum)
203
+ if match is True :
204
+ detRectMat[detNum] = 1
205
+ recallAccum += len(matchesGt) / (1 + math.log(len(matchesGt)))
206
+ precisionAccum += 1.0
207
+ pairs.append({'gt':matchesGt,'det':detNum,'type': 'OO' if len(matchesGt)==1 else 'MO'})
208
+ for gtNum in matchesGt :
209
+ gtRectMat[gtNum] = 1
210
+ evaluationLog += "Match GT #" + str(matchesGt) + " with Det #" + str(detNum) + "\n"
211
+
212
+ numGtCare = (len(gtRects) - len(gtDontCareRectsNum))
213
+ if numGtCare == 0:
214
+ recall = float(1)
215
+ precision = float(0) if len(detRects)>0 else float(1)
216
+ else:
217
+ recall = float(recallAccum) / numGtCare
218
+ precision = float(0) if (len(detRects) - len(detDontCareRectsNum))==0 else float(precisionAccum) / (len(detRects) - len(detDontCareRectsNum))
219
+ hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)
220
+
221
+ numGtCare = len(gtRects) - len(gtDontCareRectsNum)
222
+ numDetCare = len(detRects) - len(detDontCareRectsNum)
223
+
224
+ perSampleMetrics = {
225
+ 'precision':precision,
226
+ 'recall':recall,
227
+ 'hmean':hmean,
228
+ 'pairs':pairs,
229
+ 'recallMat':[] if len(detRects)>100 else recallMat.tolist(),
230
+ 'precisionMat':[] if len(detRects)>100 else precisionMat.tolist(),
231
+ 'gtPolPoints':gtPolPoints,
232
+ 'detPolPoints':detPolPoints,
233
+ 'gtCare': numGtCare,
234
+ 'detCare': numDetCare,
235
+ 'gtDontCare':gtDontCareRectsNum,
236
+ 'detDontCare':detDontCareRectsNum,
237
+ 'recallAccum':recallAccum,
238
+ 'precisionAccum':precisionAccum,
239
+ 'evaluationLog': evaluationLog
240
+ }
241
+
242
+ return perSampleMetrics
243
+
244
+ def combine_results(self, results):
245
+ numGt = 0
246
+ numDet = 0
247
+ methodRecallSum = 0
248
+ methodPrecisionSum = 0
249
+
250
+ for result in results:
251
+ numGt += result['gtCare']
252
+ numDet += result['detCare']
253
+ methodRecallSum += result['recallAccum']
254
+ methodPrecisionSum += result['precisionAccum']
255
+
256
+ methodRecall = 0 if numGt==0 else methodRecallSum/numGt
257
+ methodPrecision = 0 if numDet==0 else methodPrecisionSum/numDet
258
+ methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)
259
+
260
+ methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean }
261
+
262
+ return methodMetrics
263
+
264
+
265
+ if __name__=='__main__':
266
+ evaluator = DetectionICDAR2013Evaluator()
267
+ gts = [[{
268
+ 'points': [(0, 0), (1, 0), (1, 1), (0, 1)],
269
+ 'text': 1234,
270
+ 'ignore': False,
271
+ }, {
272
+ 'points': [(2, 2), (3, 2), (3, 3), (2, 3)],
273
+ 'text': 5678,
274
+ 'ignore': True,
275
+ }]]
276
+ preds = [[{
277
+ 'points': [(0.1, 0.1), (1, 0), (1, 1), (0, 1)],
278
+ 'text': 123,
279
+ 'ignore': False,
280
+ }]]
281
+ results = []
282
+ for gt, pred in zip(gts, preds):
283
+ results.append(evaluator.evaluate_image(gt, pred))
284
+ metrics = evaluator.combine_results(results)
285
+ print(metrics)
DB/concern/log.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import functools
4
+ import json
5
+ import time
6
+ from datetime import datetime
7
+
8
+ from tensorboardX import SummaryWriter
9
+ import yaml
10
+ import cv2
11
+ import numpy as np
12
+
13
+ from concern.config import Configurable, State
14
+
15
+
16
+ class Logger(Configurable):
17
+ SUMMARY_DIR_NAME = 'summaries'
18
+ VISUALIZE_NAME = 'visualize'
19
+ LOG_FILE_NAME = 'output.log'
20
+ ARGS_FILE_NAME = 'args.log'
21
+ METRICS_FILE_NAME = 'metrics.log'
22
+
23
+ database_dir = State(default='./outputs/')
24
+ log_dir = State(default='workspace')
25
+ verbose = State(default=False)
26
+ level = State(default='info')
27
+ log_interval = State(default=100)
28
+
29
+ def __init__(self, **kwargs):
30
+ self.load_all(**kwargs)
31
+
32
+ self._make_storage()
33
+
34
+ cmd = kwargs['cmd']
35
+ self.name = cmd['name']
36
+ self.log_dir = os.path.join(self.log_dir, self.name)
37
+ try:
38
+ self.verbose = cmd['verbose']
39
+ except:
40
+ print('verbose:', self.verbose)
41
+ if self.verbose:
42
+ print('Initializing log dir for', self.log_dir)
43
+
44
+ if not os.path.exists(self.log_dir):
45
+ os.makedirs(self.log_dir)
46
+
47
+ self.message_logger = self._init_message_logger()
48
+
49
+ summary_path = os.path.join(self.log_dir, self.SUMMARY_DIR_NAME)
50
+ self.tf_board_logger = SummaryWriter(summary_path)
51
+
52
+ self.metrics_writer = open(os.path.join(
53
+ self.log_dir, self.METRICS_FILE_NAME), 'at')
54
+
55
+ self.timestamp = time.time()
56
+ self.logged = -1
57
+ self.speed = None
58
+ self.eta_time = None
59
+
60
+ def _make_storage(self):
61
+ application = os.path.basename(os.getcwd())
62
+ storage_dir = os.path.join(
63
+ self.database_dir, self.log_dir, application)
64
+ if not os.path.exists(storage_dir):
65
+ os.makedirs(storage_dir)
66
+ if not os.path.exists(self.log_dir):
67
+ os.symlink(storage_dir, self.log_dir)
68
+
69
+ def save_dir(self, dir_name):
70
+ return os.path.join(self.log_dir, dir_name)
71
+
72
+ def _init_message_logger(self):
73
+ message_logger = logging.getLogger('messages')
74
+ message_logger.setLevel(
75
+ logging.DEBUG if self.verbose else logging.INFO)
76
+ formatter = logging.Formatter(
77
+ '[%(levelname)s] [%(asctime)s] %(message)s')
78
+ std_handler = logging.StreamHandler()
79
+ std_handler.setLevel(message_logger.level)
80
+ std_handler.setFormatter(formatter)
81
+
82
+ file_handler = logging.FileHandler(
83
+ os.path.join(self.log_dir, self.LOG_FILE_NAME))
84
+ file_handler.setLevel(message_logger.level)
85
+ file_handler.setFormatter(formatter)
86
+
87
+ message_logger.addHandler(std_handler)
88
+ message_logger.addHandler(file_handler)
89
+ return message_logger
90
+
91
+ def report_time(self, name: str):
92
+ if self.verbose:
93
+ self.info(name + " time :" + str(time.time() - self.timestamp))
94
+ self.timestamp = time.time()
95
+
96
+ def report_eta(self, steps, total, epoch):
97
+ self.logged = self.logged % total + 1
98
+ steps = steps % total
99
+ if self.eta_time is None:
100
+ self.eta_time = time.time()
101
+ speed = -1
102
+ else:
103
+ eta_time = time.time()
104
+ speed = eta_time - self.eta_time
105
+ if self.speed is not None:
106
+ speed = ((self.logged - 1) * self.speed + speed) / self.logged
107
+ self.speed = speed
108
+ self.eta_time = eta_time
109
+
110
+ seconds = (total - steps) * speed
111
+ hours = seconds // 3600
112
+ minutes = (seconds - (hours * 3600)) // 60
113
+ seconds = seconds % 60
114
+
115
+ print('%d/%d batches processed in epoch %d, ETA: %2d:%2d:%2d' %
116
+ (steps, total, epoch,
117
+ hours, minutes, seconds), end='\r')
118
+
119
+ def args(self, parameters=None):
120
+ if parameters is None:
121
+ with open(os.path.join(self.log_dir, self.ARGS_FILE_NAME), 'rt') as reader:
122
+ return yaml.load(reader.read())
123
+ with open(os.path.join(self.log_dir, self.ARGS_FILE_NAME), 'wt') as writer:
124
+ yaml.dump(parameters.dump(), writer)
125
+
126
+ def metrics(self, epoch, steps, metrics_dict):
127
+ results = {}
128
+ for name, a in metrics_dict.items():
129
+ results[name] = {'count': a.count, 'value': float(a.avg)}
130
+ self.add_scalar('metrics/' + name, a.avg, steps)
131
+ result_dict = {
132
+ str(datetime.now()): {
133
+ 'epoch': epoch,
134
+ 'steps': steps,
135
+ **results
136
+ }
137
+ }
138
+ string_result = yaml.dump(result_dict)
139
+ self.info(string_result)
140
+ self.metrics_writer.write(string_result)
141
+ self.metrics_writer.flush()
142
+
143
+ def named_number(self, name, num=None, default=0):
144
+ if num is None:
145
+ return int(self.has_signal(name)) or default
146
+ else:
147
+ with open(os.path.join(self.log_dir, name), 'w') as writer:
148
+ writer.write(str(num))
149
+ return num
150
+
151
+ epoch = functools.partialmethod(named_number, 'epoch')
152
+ iter = functools.partialmethod(named_number, 'iter')
153
+
154
+ def message(self, level, content):
155
+ self.message_logger.__getattribute__(level)(content)
156
+
157
+ def images(self, prefix, image_dict, step):
158
+ for name, image in image_dict.items():
159
+ self.add_image(prefix + '/' + name, image, step, dataformats='HWC')
160
+
161
+ def merge_save_images(self, name, images):
162
+ for i, image in enumerate(images):
163
+ if i == 0:
164
+ result = image
165
+ else:
166
+ result = np.concatenate([result, image], 0)
167
+ cv2.imwrite(os.path.join(self.vis_dir(), name+'.jpg'), result)
168
+
169
+ def vis_dir(self):
170
+ vis_dir = os.path.join(self.log_dir, self.VISUALIZE_NAME)
171
+ if not os.path.exists(vis_dir):
172
+ os.mkdir(vis_dir)
173
+ return vis_dir
174
+
175
+ def save_image_dict(self, images, max_size=1024):
176
+ for file_name, image in images.items():
177
+ height, width = image.shape[:2]
178
+ if height > width:
179
+ actual_height = min(height, max_size)
180
+ actual_width = int(round(actual_height * width / height))
181
+ else:
182
+ actual_width = min(width, max_size)
183
+ actual_height = int(round(actual_width * height / width))
184
+ image = cv2.resize(image, (actual_width, actual_height))
185
+ cv2.imwrite(os.path.join(self.vis_dir(), file_name+'.jpg'), image)
186
+
187
+ def __getattr__(self, name):
188
+ message_levels = set(['debug', 'info', 'warning', 'error', 'critical'])
189
+ if name == '__setstate__':
190
+ raise AttributeError('haha')
191
+ if name in message_levels:
192
+ return functools.partial(self.message, name)
193
+ elif hasattr(self.__dict__.get('tf_board_logger'), name):
194
+ return self.tf_board_logger.__getattribute__(name)
195
+ else:
196
+ super()
DB/concern/signal_monitor.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ class SignalMonitor(object):
5
+ def __init__(self, file_path):
6
+ self.file_path = file_path
7
+
8
+ def get_signal(self):
9
+ if self.file_path is None:
10
+ return None
11
+ if os.path.exists(self.file_path):
12
+ with open(self.file_path) as f:
13
+ data = self.file.read()
14
+ os.remove(f)
15
+ return data
16
+ else:
17
+ return None
DB/concern/visualizer.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # File : visualizer.py
4
+ # Author : Zhaoyi Wan <wanzhaoyi@megvii.com>
5
+ # Date : 08.01.2019
6
+ # Last Modified Date: 02.12.2019
7
+ # Last Modified By : Minghui Liao
8
+ import torch
9
+ import numpy as np
10
+ import cv2
11
+
12
+ class Visualize:
13
+ @classmethod
14
+ def visualize(cls, x):
15
+ dimension = len(x.shape)
16
+ if dimension == 2:
17
+ pass
18
+ elif dimension == 3:
19
+ pass
20
+
21
+ @classmethod
22
+ def to_np(cls, x):
23
+ return x.cpu().data.numpy()
24
+
25
+ @classmethod
26
+ def visualize_weights(cls, tensor, format='HW', normalize=True):
27
+ if isinstance(tensor, torch.Tensor):
28
+ x = cls.to_np(tensor.permute(format.index('H'), format.index('W')))
29
+ else:
30
+ x = tensor.transpose(format.index('H'), format.index('W'))
31
+ if normalize:
32
+ x = (x - x.min()) / (x.max() - x.min())
33
+ # return np.tile(x * 255., (3, 1, 1)).swapaxes(0, 2).swapaxes(1, 0).astype(np.uint8)
34
+ return cv2.applyColorMap((x * 255).astype(np.uint8), cv2.COLORMAP_JET)
35
+
36
+ @classmethod
37
+ def visualize_points(cls, image, tensor, radius=5, normalized=True):
38
+ if isinstance(tensor, torch.Tensor):
39
+ points = cls.to_np(tensor)
40
+ else:
41
+ points = tensor
42
+ if normalized:
43
+ points = points * image.shape[:2][::-1]
44
+ for i in range(points.shape[0]):
45
+ color = np.random.randint(
46
+ 0, 255, (3, ), dtype=np.uint8).astype(np.float)
47
+ image = cv2.circle(image,
48
+ tuple(points[i].astype(np.int32).tolist()),
49
+ radius, color, thickness=radius//2)
50
+ return image
51
+
52
+ @classmethod
53
+ def visualize_heatmap(cls, tensor, format='CHW'):
54
+ if isinstance(tensor, torch.Tensor):
55
+ x = cls.to_np(tensor.permute(format.index('H'),
56
+ format.index('W'), format.index('C')))
57
+ else:
58
+ x = tensor.transpose(
59
+ format.index('H'), format.index('W'), format.index('C'))
60
+ canvas = np.zeros((x.shape[0], x.shape[1], 3), dtype=np.float)
61
+
62
+ for c in range(0, x.shape[-1]):
63
+ color = np.random.randint(
64
+ 0, 255, (3, ), dtype=np.uint8).astype(np.float)
65
+ canvas += np.tile(x[:, :, c], (3, 1, 1)
66
+ ).swapaxes(0, 2).swapaxes(1, 0) * color
67
+
68
+ canvas = canvas.astype(np.uint8)
69
+ return canvas
70
+
71
+ @classmethod
72
+ def visualize_classes(cls, x):
73
+ canvas = np.zeros((x.shape[0], x.shape[1], 3), dtype=np.uint8)
74
+ for c in range(int(x.max())):
75
+ color = np.random.randint(
76
+ 0, 255, (3, ), dtype=np.uint8).astype(np.float)
77
+ canvas[np.where(x == c)] = color
78
+ return canvas
79
+
80
+ @classmethod
81
+ def visualize_grid(cls, x, y, stride=16, color=(0, 0, 255), canvas=None):
82
+ h, w = x.shape
83
+ if canvas is None:
84
+ canvas = np.zeros((h, w, 3), dtype=np.uint8)
85
+ # canvas = np.concatenate([canvas, canvas], axis=1)
86
+ i, j = 0, 0
87
+ while i < w:
88
+ j = 0
89
+ while j < h:
90
+ canvas = cv2.circle(canvas, (int(x[i, j] * w + 0.5), int(y[i, j] * h + 0.5)), radius=max(stride//4, 1), color=color, thickness=stride//8)
91
+ j += stride
92
+ i += stride
93
+ return canvas
94
+
95
+ @classmethod
96
+ def visualize_rect(cls, canvas, _rect, color=(0, 0, 255)):
97
+ rect = (_rect + 0.5).astype(np.int32)
98
+ return cv2.rectangle(canvas, (rect[0], rect[1]), (rect[2], rect[3]), color)
DB/concern/webcv2/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env mdl
2
+ class WebCV2:
3
+ def __init__(self):
4
+ import cv2
5
+ self._cv2 = cv2
6
+ from .manager import global_manager as gm
7
+ self._gm = gm
8
+
9
+ def __getattr__(self, name):
10
+ if hasattr(self._gm, name):
11
+ return getattr(self._gm, name)
12
+ elif hasattr(self._cv2, name):
13
+ return getattr(self._cv2, name)
14
+ else:
15
+ raise AttributeError
16
+
17
+ import sys
18
+ sys.modules[__name__] = WebCV2()
19
+
DB/concern/webcv2/manager.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env mdl
2
+ import socket
3
+ import base64
4
+ import cv2
5
+ import numpy as np
6
+ from collections import OrderedDict
7
+
8
+ from .server import get_server
9
+
10
+
11
+ def jpeg_encode(img):
12
+ return cv2.imencode('.png', img)[1]
13
+
14
+
15
+ def get_free_port(rng, low=2000, high=10000):
16
+ in_use = True
17
+ while in_use:
18
+ port = rng.randint(high - low) + low
19
+ in_use = False
20
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
21
+ try:
22
+ s.bind(("0.0.0.0", port))
23
+ except socket.error as e:
24
+ if e.errno == 98: # port already in use
25
+ in_use = True
26
+ s.close()
27
+ return port
28
+
29
+
30
+ class Manager:
31
+ def __init__(self, img_encode_method=jpeg_encode, rng=None):
32
+ self._queue = OrderedDict()
33
+ self._server = None
34
+ self.img_encode_method = img_encode_method
35
+ if rng is None:
36
+ rng = np.random.RandomState(self.get_default_seed())
37
+ self.rng = rng
38
+
39
+ def get_default_seed(self):
40
+ return 0
41
+
42
+ def imshow(self, title, img):
43
+ data = self.img_encode_method(img)
44
+ data = base64.b64encode(data)
45
+ data = data.decode('utf8')
46
+ self._queue[title] = data
47
+
48
+ def waitKey(self, delay=0):
49
+ if self._server is None:
50
+ self.port = get_free_port(self.rng)
51
+ self._server, self._conn = get_server(port=self.port)
52
+ self._conn.send([delay, list(self._queue.items())])
53
+ # self._queue = OrderedDict()
54
+ return self._conn.recv()
55
+
56
+ global_manager = Manager()
57
+
DB/concern/webcv2/server.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env mdl
2
+ import os
3
+ BASE_DIR = os.path.dirname(os.path.realpath(__file__))
4
+ import time
5
+ import json
6
+ import select
7
+ import traceback
8
+ import socket
9
+ from multiprocessing import Process, Pipe
10
+
11
+ import gevent
12
+ from gevent.pywsgi import WSGIServer
13
+ from geventwebsocket.handler import WebSocketHandler
14
+ from flask import Flask, request, render_template, abort
15
+
16
+
17
+ def log_important_msg(msg, *, padding=3):
18
+ msg_len = len(msg)
19
+ width = msg_len + padding * 2 + 2
20
+ print('#' * width)
21
+ print('#' + ' ' * (width - 2) + '#')
22
+ print('#' + ' ' * padding + msg + ' ' * padding + '#')
23
+ print('#' + ' ' * (width - 2) + '#')
24
+ print('#' * width)
25
+
26
+
27
+ def hint_url(url, port):
28
+ log_important_msg(
29
+ 'The server is running at: {}'.format(url))
30
+
31
+
32
+ def _set_server(conn, name='webcv2', port=7788):
33
+ package = None
34
+ package_alive = False
35
+
36
+ app = Flask(name)
37
+ app.root_path = BASE_DIR
38
+
39
+ @app.route('/')
40
+ def index():
41
+ return render_template('index.html', title=name)
42
+
43
+ @app.route('/stream')
44
+ def stream():
45
+ def poll_ws(ws, delay):
46
+ return len(select.select([ws.stream.handler.rfile], [], [], delay / 1000.)[0]) > 0
47
+
48
+ if request.environ.get('wsgi.websocket'):
49
+ ws = request.environ['wsgi.websocket']
50
+ if ws is None:
51
+ abort(404)
52
+ else:
53
+ should_send = True
54
+ while not ws.closed:
55
+ global package
56
+ global package_alive
57
+ if conn.poll():
58
+ package = conn.recv()
59
+ package_alive = True
60
+ should_send = True
61
+ if not should_send:
62
+ continue
63
+ should_send = False
64
+ if package is None:
65
+ ws.send(None)
66
+ else:
67
+ delay, info_lst = package
68
+ ws.send(json.dumps((time.time(), package_alive, delay, info_lst)))
69
+ if package_alive:
70
+ if delay <= 0 or poll_ws(ws, delay):
71
+ message = ws.receive()
72
+ if ws.closed or message is None:
73
+ break
74
+ try:
75
+ if isinstance(message, bytes):
76
+ message = message.decode('utf8')
77
+ message = int(message)
78
+ except:
79
+ traceback.print_exc()
80
+ message = -1
81
+ else:
82
+ message = -1
83
+ conn.send(message)
84
+ package_alive = False
85
+ return ""
86
+
87
+ http_server = WSGIServer(('', port), app, handler_class=WebSocketHandler)
88
+ hint_url('http://{}:{}'.format(socket.getfqdn(), port), port)
89
+ http_server.serve_forever()
90
+
91
+
92
+ def get_server(name='webcv2', port=7788):
93
+ conn_server, conn_factory = Pipe()
94
+ p_server = Process(
95
+ target=_set_server,
96
+ args=(conn_server,),
97
+ kwargs=dict(
98
+ name=name, port=port,
99
+ ),
100
+ )
101
+ p_server.daemon = True
102
+ p_server.start()
103
+ return p_server, conn_factory
104
+
DB/concern/webcv2/templates/index.html ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
5
+ <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1.0, user-scalable=no"/>
6
+ <title>{{title}}</title>
7
+ <link rel="icon" href="//assets.megvii.com/static%2Ffavicon.ico?ver=1498037959257">
8
+ <script src="//cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
9
+ <script src="//cdnjs.cloudflare.com/ajax/libs/vue/2.2.6/vue.min.js"></script>
10
+ </head>
11
+
12
+ <body>
13
+ <script language="javascript" type="text/javascript">
14
+ var vm = null;
15
+ function isCharacterKeyPress(evt) {
16
+ if (typeof evt.which == "undefined") {
17
+ // This is IE, which only fires keypress events for printable keys
18
+ return true;
19
+ } else if (typeof evt.which == "number" && evt.which > 0) {
20
+ // In other browsers except old versions of WebKit, evt.which is
21
+ // only greater than zero if the keypress is a printable key.
22
+ // We need to filter out backspace and ctrl/alt/meta key combinations
23
+ return !evt.ctrlKey && !evt.metaKey && !evt.altKey && evt.which != 8;
24
+ }
25
+ return false;
26
+ };
27
+ $(function() {
28
+ vm = new Vue({
29
+ el: '#board',
30
+ data: {
31
+ websocket: null,
32
+ // resources
33
+ delay: -1,
34
+ data_alive: null,
35
+ img_lst: null,
36
+ // network
37
+ package_size: 0,
38
+ download_time: 0,
39
+ net_speed: 0,
40
+ // timer
41
+ timer_interval: null,
42
+ timer_step: 100,
43
+ },
44
+ created: function() {
45
+ this.create_connection();
46
+ document.addEventListener("keypress", this.send_key, false);
47
+ },
48
+ computed: {
49
+ connection_alive: function() {
50
+ return (this.websocket != null) &&
51
+ !(this.websocket.readyState == this.websocket.CLOSING || this.websocket.readyState == this.websocket.CLOSED);
52
+ },
53
+ },
54
+ methods: {
55
+ get_ws_url: function(s) {
56
+ var l = window.location;
57
+ return ((l.protocol === "https:") ? "wss://" : "ws://") + l.hostname + (((l.port != 80) && (l.port != 443)) ? ":" + l.port : "") + l.pathname + s;
58
+ },
59
+ create_connection: function() {
60
+ this.websocket = new WebSocket(this.get_ws_url("stream"));
61
+ this.websocket.binaryType = "arraybuffer";
62
+ this.websocket.onopen = function(evt) { console.log("connected"); };
63
+ this.websocket.onclose = this.close_connection;
64
+ this.websocket.onerror = function(evt) { console.log("error occurred"); console.log(evt); };
65
+ this.websocket.onmessage = this.receive_message;
66
+ },
67
+ close_connection: function() {
68
+ console.log("disconnected");
69
+ this.websocket = null;
70
+ },
71
+ receive_message: function(evt) {
72
+ var obj = JSON.parse(evt.data);
73
+ var send_time = obj[0], recv_time = (new Date()).getTime() / 1000;
74
+ this.package_size = evt.data.length * 8;
75
+ this.download_time = recv_time - send_time;
76
+ this.net_speed = this.package_size / this.download_time;
77
+ console.log(obj);
78
+ this.data_alive = obj[1];
79
+ this.delay = obj[2];
80
+ this.img_lst = obj[3];
81
+ this.start_timer();
82
+ },
83
+ send_key: function(evt) {
84
+ if (!this.connection_alive || !this.data_alive) return;
85
+ if (!isCharacterKeyPress(evt)) return;
86
+ this.delay = 0;
87
+ this.data_alive = false;
88
+ var keycode = evt.keyCode;
89
+ console.log("key pressed " + keycode + " " + String.fromCharCode(keycode));
90
+ this.websocket.send(keycode);
91
+ },
92
+ update_timer: function() {
93
+ if (this.delay > 0) this.delay -= this.timer_step;
94
+ if (this.delay <= 0) {
95
+ this.data_alive = false;
96
+ if (this.timer_interval != null) {
97
+ clearInterval(this.timer_interval);
98
+ this.timer_interval = null;
99
+ }
100
+ }
101
+ },
102
+ start_timer: function() {
103
+ if (this.timer_interval != null) {
104
+ clearInterval(this.timer_interval);
105
+ this.timer_interval = null;
106
+ }
107
+ if (this.delay > 0) this.timer_interval = setInterval(this.update_timer, this.timer_step);
108
+ },
109
+ },
110
+ filters: {
111
+ decimal: function(num, fixed_point) {
112
+ return num.toFixed(fixed_point);
113
+ },
114
+ },
115
+ });
116
+ });
117
+ </script>
118
+
119
+ {% raw %}
120
+ <!-- Vue template -->
121
+ <div id="board">
122
+ <template v-if="connection_alive">
123
+ <template v-if="data_alive">
124
+ <p v-if="delay > 0">Press any key in {{delay / 1000 | decimal(1)}} seconds. </p>
125
+ <p v-else>Press any key to continue. </p>
126
+ </template>
127
+ <template v-else>
128
+ <p>Waiting for response from server. </p>
129
+ </template>
130
+ </template>
131
+ <template v-else>
132
+ <p>Disconnected from server. </p>
133
+ </template>
134
+ <p>Network: {{net_speed / 1000000 | decimal(2)}} MB/s * {{download_time | decimal(2)}} s = {{package_size / 1000000 | decimal(2)}} MB</p>
135
+ <div style="display:inline-block">
136
+ <div style="display:inline-block; vertical-align: top;" v-for="obj in img_lst">
137
+ <p>{{obj[0]}}</p>
138
+ <img :src="'data:image/png;base64,' + obj[1]" />
139
+ </div>
140
+ </div>
141
+ </div>
142
+ {% endraw %}
143
+
144
+ </body>
145
+ </html>
DB/convert_to_onnx.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import torch
4
+ import numpy as np
5
+ from concern.config import Configurable, Config
6
+
7
+
8
+ def main():
9
+ parser = argparse.ArgumentParser(description='Convert model to ONNX')
10
+ parser.add_argument('exp', type=str)
11
+ parser.add_argument('resume', type=str, help='Resume from checkpoint')
12
+ parser.add_argument('output', type=str, help='Output ONNX path')
13
+
14
+ args = parser.parse_args()
15
+ args = vars(args)
16
+ args = {k: v for k, v in args.items() if v is not None}
17
+
18
+ conf = Config()
19
+ experiment_args = conf.compile(conf.load(args['exp']))['Experiment']
20
+ experiment_args.update(cmd=args)
21
+ experiment = Configurable.construct_class_from_config(experiment_args)
22
+
23
+ Demo(experiment, experiment_args, cmd=args).inference()
24
+
25
+
26
+ class Demo:
27
+ def __init__(self, experiment, args, cmd=dict()):
28
+ self.RGB_MEAN = np.array([122.67891434, 116.66876762, 104.00698793])
29
+ self.experiment = experiment
30
+ experiment.load('evaluation', **args)
31
+ self.args = cmd
32
+ self.structure = experiment.structure
33
+ self.model_path = self.args['resume']
34
+ self.output_path = self.args['output']
35
+
36
+ def init_torch_tensor(self):
37
+ # Use gpu or not
38
+ if torch.cuda.is_available():
39
+ self.device = torch.device('cuda')
40
+ torch.set_default_tensor_type('torch.cuda.FloatTensor')
41
+ else:
42
+ self.device = torch.device('cpu')
43
+ torch.set_default_tensor_type('torch.FloatTensor')
44
+
45
+ def init_model(self):
46
+ model = self.structure.builder.build(self.device)
47
+ return model
48
+
49
+ def resume(self, model, path):
50
+ if not os.path.exists(path):
51
+ print("Checkpoint not found: " + path)
52
+ return
53
+ states = torch.load(path, map_location=self.device)
54
+ model.load_state_dict(states, strict=False)
55
+ print("Resumed from " + path)
56
+
57
+ def inference(self):
58
+ self.init_torch_tensor()
59
+ model = self.init_model()
60
+ self.resume(model, self.model_path)
61
+ model.eval()
62
+
63
+ img = np.random.randint(0, 255, size=(960, 960, 3), dtype=np.uint8)
64
+ img = img.astype(np.float32)
65
+ img = (img / 255. - 0.5) / 0.5 # torch style norm
66
+ img = img.transpose((2, 0, 1))
67
+ img = torch.from_numpy(img).unsqueeze(0).float()
68
+ dynamic_axes = {'input': {0: 'batch_size', 2: 'height', 3: 'width'},
69
+ 'output': {0: 'batch_size', 2: 'height', 3: 'width'}}
70
+ with torch.no_grad():
71
+ img = img.to(self.device)
72
+ torch.onnx.export(model.model.module, img, self.output_path, input_names=['input'],
73
+ output_names=['output'], dynamic_axes=dynamic_axes, keep_initializers_as_inputs=False,
74
+ verbose=False, opset_version=12)
75
+
76
+
77
+ if __name__ == '__main__':
78
+ main()
DB/data/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .make_seg_detector_data import MakeSegDetectorData
2
+ from .transform_data import TransformData
3
+ from .random_crop_aug import RandomCropAug
4
+ from .make_border_map import MakeBorderMap
5
+ from .image_dataset import ImageDataset
DB/data/augmenter.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import imgaug
2
+ import imgaug.augmenters as iaa
3
+
4
+ from concern.config import Configurable, State
5
+
6
+
7
+ class AugmenterBuilder(object):
8
+ def __init__(self):
9
+ pass
10
+
11
+ def build(self, args, root=True):
12
+ if args is None:
13
+ return None
14
+ elif isinstance(args, (int, float, str)):
15
+ return args
16
+ elif isinstance(args, list):
17
+ if root:
18
+ sequence = [self.build(value, root=False) for value in args]
19
+ return iaa.Sequential(sequence)
20
+ else:
21
+ return getattr(iaa, args[0])(
22
+ *[self.to_tuple_if_list(a) for a in args[1:]])
23
+ elif isinstance(args, dict):
24
+ if 'cls' in args:
25
+ cls = getattr(iaa, args['cls'])
26
+ return cls(**{k: self.to_tuple_if_list(v) for k, v in args.items() if not k == 'cls'})
27
+ else:
28
+ return {key: self.build(value, root=False) for key, value in args.items()}
29
+ else:
30
+ raise RuntimeError('unknown augmenter arg: ' + str(args))
31
+
32
+ def to_tuple_if_list(self, obj):
33
+ if isinstance(obj, list):
34
+ return tuple(obj)
35
+ return obj
DB/data/data_loader.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import bisect
3
+
4
+ import imgaug
5
+ import numpy as np
6
+
7
+ import torch
8
+ import torch.distributed as dist
9
+ from torch.utils.data import Sampler, ConcatDataset, BatchSampler
10
+
11
+ from concern.config import Configurable, State
12
+
13
+
14
+ def default_worker_init_fn(worker_id):
15
+ np.random.seed(worker_id)
16
+ imgaug.seed(worker_id)
17
+
18
+
19
+ class DataLoader(Configurable, torch.utils.data.DataLoader):
20
+ dataset = State()
21
+ batch_size = State(default=256)
22
+ num_workers = State(default=10)
23
+ is_train = State(default=True)
24
+ collect_fn = State(default=None)
25
+ drop_last = State(default=True)
26
+ shuffle = State()
27
+
28
+ def __init__(self, **kwargs):
29
+ self.load_all(**kwargs)
30
+ if self.collect_fn is None:
31
+ self.collect_fn = torch.utils.data.dataloader.default_collate
32
+ cmd = kwargs.get('cmd', {})
33
+ self.is_train = cmd['is_train']
34
+ if 'batch_size' in cmd:
35
+ self.batch_size = cmd['batch_size']
36
+ if self.shuffle is None:
37
+ self.shuffle = self.is_train
38
+ self.num_workers = cmd.get('num_workers', self.num_workers)
39
+
40
+ if cmd.get('distributed'):
41
+ sampler = DistributedSampler(
42
+ self.dataset, shuffle=self.shuffle,
43
+ num_replicas=cmd['num_gpus'])
44
+ batch_sampler = BatchSampler(
45
+ sampler, self.batch_size//cmd['num_gpus'], False)
46
+ torch.utils.data.DataLoader.__init__(
47
+ self, self.dataset, batch_sampler=batch_sampler,
48
+ num_workers=self.num_workers, pin_memory=False,
49
+ drop_last=self.drop_last, collate_fn=self.collect_fn,
50
+ worker_init_fn=default_worker_init_fn)
51
+ else:
52
+ torch.utils.data.DataLoader.__init__(
53
+ self, self.dataset,
54
+ batch_size=self.batch_size, num_workers=self.num_workers,
55
+ drop_last=self.drop_last, shuffle=self.shuffle,
56
+ pin_memory=True, collate_fn=self.collect_fn,
57
+ worker_init_fn=default_worker_init_fn)
58
+ self.collect_fn = str(self.collect_fn)
59
+
60
+
61
+ class SuccessiveRandomSampler(Sampler):
62
+ '''Random Sampler that yields sorted data in successive ranges.
63
+ Args:
64
+ dataset: Dataset used for sampling.
65
+ '''
66
+ def __init__(self, dataset):
67
+ self.dataset = dataset
68
+ self.epoch = 0
69
+
70
+ def __iter__(self):
71
+ if self.shuffle:
72
+ # deterministically shuffle based on epoch
73
+ g = torch.Generator()
74
+ g.manual_seed(self.epoch)
75
+ indices = torch.randperm(len(self.dataset)).tolist()
76
+ else:
77
+ indices = torch.arange(len(self.dataset)).tolist()
78
+
79
+ # add extra samples to make it evenly divisible
80
+ indices += indices[: (self.total_size - len(indices))]
81
+ assert len(indices) == self.total_size
82
+
83
+ # subsample
84
+ offset = self.num_samples * self.rank
85
+ indices = indices[offset: offset + self.num_samples]
86
+ assert len(indices) == self.num_samples
87
+
88
+ return iter(indices)
89
+
90
+ def __len__(self):
91
+ return len(self.dataset)
92
+
93
+ def set_epoch(self, epoch):
94
+ self.epoch = epoch
95
+
96
+
97
+ class DistributedSampler(Sampler):
98
+ """Sampler that restricts data loading to a subset of the dataset.
99
+ It is especially useful in conjunction with
100
+ :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
101
+ process can pass a DistributedSampler instance as a DataLoader sampler,
102
+ and load a subset of the original dataset that is exclusive to it.
103
+ .. note::
104
+ Dataset is assumed to be of constant size.
105
+ Arguments:
106
+ dataset: Dataset used for sampling.
107
+ num_replicas (optional): Number of processes participating in
108
+ distributed training.
109
+ rank (optional): Rank of the current process within num_replicas.
110
+ """
111
+
112
+ def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
113
+ if num_replicas is None:
114
+ if not dist.is_available():
115
+ raise RuntimeError(
116
+ "Requires distributed package to be available")
117
+ num_replicas = dist.get_world_size()
118
+ if rank is None:
119
+ if not dist.is_available():
120
+ raise RuntimeError(
121
+ "Requires distributed package to be available")
122
+ rank = dist.get_rank()
123
+ self.dataset = dataset
124
+ self.num_replicas = num_replicas
125
+ self.rank = rank
126
+ self.epoch = 0
127
+ self.num_samples = int(
128
+ math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
129
+ self.total_size = self.num_samples * self.num_replicas
130
+ self.shuffle = shuffle
131
+
132
+ def __iter__(self):
133
+ if self.shuffle:
134
+ # deterministically shuffle based on epoch
135
+ g = torch.Generator()
136
+ g.manual_seed(self.epoch)
137
+ indices = torch.randperm(len(self.dataset)).tolist()
138
+ else:
139
+ indices = torch.arange(len(self.dataset)).tolist()
140
+
141
+ # add extra samples to make it evenly divisible
142
+ indices += indices[: (self.total_size - len(indices))]
143
+ assert len(indices) == self.total_size
144
+
145
+ # subsample
146
+ offset = self.num_samples * self.rank
147
+ indices = indices[offset: offset + self.num_samples]
148
+ assert len(indices) == self.num_samples
149
+
150
+ return iter(indices)
151
+
152
+ def __len__(self):
153
+ return self.num_samples
154
+
155
+ def set_epoch(self, epoch):
156
+ self.epoch = epoch
157
+
158
+
159
+ class InfiniteOrderedSampler(Sampler):
160
+ def __init__(self, data_source, limit_size):
161
+ self.data_source = data_source
162
+ self.limit_size = limit_size
163
+
164
+ def __iter__(self):
165
+ n = len(self.data_source)
166
+
167
+ def wrapper():
168
+ cnt = 0
169
+ while cnt < self.limit_size:
170
+ if cnt % n == 0:
171
+ idx = torch.randperm(n).tolist()
172
+ yield idx[cnt % n]
173
+ cnt += 1
174
+ return wrapper()
175
+
176
+ def __len__(self):
177
+ return self.limit_size
178
+
179
+
180
+ class InfiniteDataLoader(Configurable, torch.utils.data.DataLoader):
181
+ dataset = State()
182
+ batch_size = State(default=256)
183
+ num_workers = State(default=10)
184
+ limit_size = State(default=2 ** 31)
185
+
186
+ def __init__(self, **kwargs):
187
+ self.load_all(**kwargs)
188
+
189
+ cmd = kwargs['cmd']
190
+ if 'batch_size' in cmd:
191
+ self.batch_size = cmd['batch_size']
192
+
193
+ sampler = InfiniteOrderedSampler(self.dataset, self.limit_size)
194
+
195
+ torch.utils.data.DataLoader.__init__(
196
+ self, self.dataset,
197
+ batch_size=self.batch_size, num_workers=self.num_workers,
198
+ sampler=sampler, worker_init_fn=default_worker_init_fn,
199
+ )
200
+
201
+
202
+ class RandomSampleSampler(Sampler):
203
+ def __init__(self, data_source, weights=None, size=2 ** 31):
204
+ self.data_source = data_source
205
+ if weights is None:
206
+ self.probabilities = np.full(len(data_source), 1 / len(data_source))
207
+ else:
208
+ self.probabilities = np.array(weights) / np.sum(weights)
209
+ self.cum_prob = np.cumsum(self.probabilities)
210
+ self.size = size
211
+
212
+ def __iter__(self):
213
+ def wrapper():
214
+ for i in range(self.size):
215
+ yield bisect.bisect(self.cum_prob, torch.rand(1)[0], hi=len(self.data_source) - 1)
216
+ return wrapper()
217
+
218
+ def __len__(self):
219
+ return self.size
220
+
221
+
222
+ class RandomSampleDataLoader(Configurable, torch.utils.data.DataLoader):
223
+ datasets = State()
224
+ weights = State()
225
+ batch_size = State(default=256)
226
+ num_workers = State(default=10)
227
+ size = State(default=2 ** 31)
228
+
229
+ def __init__(self, **kwargs):
230
+ self.load_all(**kwargs)
231
+
232
+ cmd = kwargs['cmd']
233
+ if 'batch_size' in cmd:
234
+ self.batch_size = cmd['batch_size']
235
+
236
+ probs = []
237
+ for dataset, weight in zip(self.datasets, self.weights):
238
+ probs.append(np.full(len(dataset), weight / len(dataset)))
239
+
240
+ dataset = ConcatDataset(self.datasets)
241
+ probs = np.concatenate(probs)
242
+ assert(len(dataset) == len(probs))
243
+
244
+ sampler = RandomSampleSampler(dataset, probs, self.size)
245
+
246
+ torch.utils.data.DataLoader.__init__(
247
+ self, dataset,
248
+ batch_size=self.batch_size, num_workers=self.num_workers,
249
+ sampler=sampler, worker_init_fn=default_worker_init_fn,
250
+ )
DB/data/dataset.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data import Dataset as TorchDataset
2
+
3
+ from concern.config import Configurable, State
4
+
5
+
6
+ class SliceDataset(TorchDataset, Configurable):
7
+ dataset = State()
8
+ start = State()
9
+ end = State()
10
+
11
+ def __init__(self, **kwargs):
12
+ self.load_all(**kwargs)
13
+
14
+ if self.start is None:
15
+ self.start = 0
16
+ if self.end is None:
17
+ self.end = len(self.dataset)
18
+
19
+ def __getitem__(self, idx):
20
+ return self.dataset[self.start + idx]
21
+
22
+ def __len__(self):
23
+ return self.end - self.start
DB/data/image_dataset.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import logging
3
+ import bisect
4
+
5
+ import torch.utils.data as data
6
+ import cv2
7
+ import numpy as np
8
+ import glob
9
+ from concern.config import Configurable, State
10
+ import math
11
+
12
+ class ImageDataset(data.Dataset, Configurable):
13
+ r'''Dataset reading from images.
14
+ Args:
15
+ Processes: A series of Callable object, which accept as parameter and return the data dict,
16
+ typically inherrited the `DataProcess`(data/processes/data_process.py) class.
17
+ '''
18
+ data_dir = State()
19
+ data_list = State()
20
+ processes = State(default=[])
21
+
22
+ def __init__(self, data_dir=None, data_list=None, cmd={}, **kwargs):
23
+ self.load_all(**kwargs)
24
+ self.data_dir = data_dir or self.data_dir
25
+ self.data_list = data_list or self.data_list
26
+ if 'train' in self.data_list[0]:
27
+ self.is_training = True
28
+ else:
29
+ self.is_training = False
30
+ self.debug = cmd.get('debug', False)
31
+ self.image_paths = []
32
+ self.gt_paths = []
33
+ self.get_all_samples()
34
+
35
+ def get_all_samples(self):
36
+ for i in range(len(self.data_dir)):
37
+ with open(self.data_list[i], 'r') as fid:
38
+ image_list = fid.readlines()
39
+ if self.is_training:
40
+ image_path=[self.data_dir[i]+'/train_images/'+timg.strip() for timg in image_list]
41
+ #gt_path=[self.data_dir[i]+'/train_gts/gt_'+timg.strip()+'.txt' for timg in image_list]
42
+ gt_path=[self.data_dir[i]+'/train_gts/gt_'+timg.strip().split('.')[0]+'.txt' for timg in image_list]
43
+ else:
44
+ image_path=[self.data_dir[i]+'/test_images/'+timg.strip() for timg in image_list]
45
+ #gt_path=[self.data_dir[i]+'/train_gts/gt_'+timg.strip()+'.txt' for timg in image_list]
46
+ gt_path=[self.data_dir[i]+'/test_gts/gt_'+timg.strip().split('.')[0]+'.txt' for timg in image_list]
47
+ # image_path=[self.data_dir[i]+'/test_images/gt_'+timg.strip() for timg in image_list]
48
+ # print(self.data_dir[i])
49
+ # if 'TD500' in self.data_list[i] or 'total_text' in self.data_list[i]:
50
+ # #gt_path=[self.data_dir[i]+'/test_gts/'+timg.strip()+'.txt' for timg in image_list]
51
+ # gt_path=[self.data_dir[i]+'/test_gts/'+timg.strip().split('.')[0]+'.gt' for timg in image_list]
52
+ # else:
53
+ # gt_path=[self.data_dir[i]+'/test_gts/'+timg.strip().split('.')[0]+'.gt' for timg in image_list]
54
+ self.image_paths += image_path
55
+ self.gt_paths += gt_path
56
+ self.num_samples = len(self.image_paths)
57
+ self.targets = self.load_ann()
58
+ if self.is_training:
59
+ assert len(self.image_paths) == len(self.targets)
60
+
61
+ def load_ann(self):
62
+ res = []
63
+ for gt in self.gt_paths:
64
+ lines = []
65
+ reader = open(gt, 'r').readlines()
66
+ for line in reader:
67
+ item = {}
68
+ parts = line.strip().split(',')
69
+ label = parts[-1]
70
+ if 'TD' in self.data_dir[0] and label == '1':
71
+ label = '###'
72
+ line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in parts]
73
+ if 'icdar' in self.data_dir[0]:
74
+ poly = np.array(list(map(float, line[:8]))).reshape((-1, 2)).tolist()
75
+ else:
76
+ num_points = math.floor((len(line) - 1) / 2) * 2
77
+ poly = np.array(list(map(float, line[:num_points]))).reshape((-1, 2)).tolist()
78
+ item['poly'] = poly
79
+ item['text'] = label
80
+ lines.append(item)
81
+ res.append(lines)
82
+ return res
83
+
84
+ def __getitem__(self, index, retry=0):
85
+ if index >= self.num_samples:
86
+ index = index % self.num_samples
87
+ data = {}
88
+ image_path = self.image_paths[index]
89
+ img = cv2.imread(image_path, cv2.IMREAD_COLOR).astype('float32')
90
+ if self.is_training:
91
+ data['filename'] = image_path
92
+ data['data_id'] = image_path
93
+ else:
94
+ data['filename'] = image_path.split('/')[-1]
95
+ data['data_id'] = image_path.split('/')[-1]
96
+ data['image'] = img
97
+ target = self.targets[index]
98
+ data['lines'] = target
99
+ if self.processes is not None:
100
+ for data_process in self.processes:
101
+ data = data_process(data)
102
+ return data
103
+
104
+ def __len__(self):
105
+ return len(self.image_paths)
DB/data/make_border_map.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import numpy as np
3
+ import cv2
4
+ from shapely.geometry import Polygon
5
+ import pyclipper
6
+
7
+ from concern.config import Configurable, State
8
+
9
+
10
+ class MakeBorderMap(Configurable):
11
+ shrink_ratio = State(default=0.4)
12
+ thresh_min = State(default=0.3)
13
+ thresh_max = State(default=0.7)
14
+
15
+ def __init__(self, cmd={}, *args, **kwargs):
16
+ self.load_all(cmd=cmd, **kwargs)
17
+ warnings.simplefilter("ignore")
18
+
19
+
20
+ def __call__(self, data, *args, **kwargs):
21
+ image = data['image']
22
+ polygons = data['polygons']
23
+ ignore_tags = data['ignore_tags']
24
+
25
+ canvas = np.zeros(image.shape[:2], dtype=np.float32)
26
+ mask = np.zeros(image.shape[:2], dtype=np.float32)
27
+
28
+ for i in range(len(polygons)):
29
+ if ignore_tags[i]:
30
+ continue
31
+ self.draw_border_map(polygons[i], canvas, mask=mask)
32
+ canvas = canvas * (self.thresh_max - self.thresh_min) + self.thresh_min
33
+ data['thresh_map'] = canvas
34
+ data['thresh_mask'] = mask
35
+ return data
36
+
37
+ def draw_border_map(self, polygon, canvas, mask):
38
+ polygon = np.array(polygon)
39
+ assert polygon.ndim == 2
40
+ assert polygon.shape[1] == 2
41
+
42
+ polygon_shape = Polygon(polygon)
43
+ distance = polygon_shape.area * \
44
+ (1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length
45
+ subject = [tuple(l) for l in polygon]
46
+ padding = pyclipper.PyclipperOffset()
47
+ padding.AddPath(subject, pyclipper.JT_ROUND,
48
+ pyclipper.ET_CLOSEDPOLYGON)
49
+ padded_polygon = np.array(padding.Execute(distance)[0])
50
+ cv2.fillPoly(mask, [padded_polygon.astype(np.int32)], 1.0)
51
+
52
+ xmin = padded_polygon[:, 0].min()
53
+ xmax = padded_polygon[:, 0].max()
54
+ ymin = padded_polygon[:, 1].min()
55
+ ymax = padded_polygon[:, 1].max()
56
+ width = xmax - xmin + 1
57
+ height = ymax - ymin + 1
58
+
59
+ polygon[:, 0] = polygon[:, 0] - xmin
60
+ polygon[:, 1] = polygon[:, 1] - ymin
61
+
62
+ xs = np.broadcast_to(
63
+ np.linspace(0, width - 1, num=width).reshape(1, width), (height, width))
64
+ ys = np.broadcast_to(
65
+ np.linspace(0, height - 1, num=height).reshape(height, 1), (height, width))
66
+
67
+ distance_map = np.zeros(
68
+ (polygon.shape[0], height, width), dtype=np.float32)
69
+ for i in range(polygon.shape[0]):
70
+ j = (i + 1) % polygon.shape[0]
71
+ absolute_distance = self.distance(xs, ys, polygon[i], polygon[j])
72
+ distance_map[i] = np.clip(absolute_distance / distance, 0, 1)
73
+ distance_map = distance_map.min(axis=0)
74
+
75
+ xmin_valid = min(max(0, xmin), canvas.shape[1] - 1)
76
+ xmax_valid = min(max(0, xmax), canvas.shape[1] - 1)
77
+ ymin_valid = min(max(0, ymin), canvas.shape[0] - 1)
78
+ ymax_valid = min(max(0, ymax), canvas.shape[0] - 1)
79
+ canvas[ymin_valid:ymax_valid + 1, xmin_valid:xmax_valid + 1] = np.fmax(
80
+ 1 - distance_map[
81
+ ymin_valid-ymin:ymax_valid-ymax+height,
82
+ xmin_valid-xmin:xmax_valid-xmax+width],
83
+ canvas[ymin_valid:ymax_valid + 1, xmin_valid:xmax_valid + 1])
84
+
85
+ def distance(self, xs, ys, point_1, point_2):
86
+ '''
87
+ compute the distance from point to a line
88
+ ys: coordinates in the first axis
89
+ xs: coordinates in the second axis
90
+ point_1, point_2: (x, y), the end of the line
91
+ '''
92
+ height, width = xs.shape[:2]
93
+ square_distance_1 = np.square(
94
+ xs - point_1[0]) + np.square(ys - point_1[1])
95
+ square_distance_2 = np.square(
96
+ xs - point_2[0]) + np.square(ys - point_2[1])
97
+ square_distance = np.square(
98
+ point_1[0] - point_2[0]) + np.square(point_1[1] - point_2[1])
99
+
100
+ cosin = (square_distance - square_distance_1 - square_distance_2) / \
101
+ (2 * np.sqrt(square_distance_1 * square_distance_2))
102
+ square_sin = 1 - np.square(cosin)
103
+ square_sin = np.nan_to_num(square_sin)
104
+ result = np.sqrt(square_distance_1 * square_distance_2 *
105
+ square_sin / square_distance)
106
+
107
+ result[cosin < 0] = np.sqrt(np.fmin(
108
+ square_distance_1, square_distance_2))[cosin < 0]
109
+ # self.extend_line(point_1, point_2, result)
110
+ return result
111
+
112
+ def extend_line(self, point_1, point_2, result):
113
+ ex_point_1 = (int(round(point_1[0] + (point_1[0] - point_2[0]) * (1 + self.shrink_ratio))),
114
+ int(round(point_1[1] + (point_1[1] - point_2[1]) * (1 + self.shrink_ratio))))
115
+ cv2.line(result, tuple(ex_point_1), tuple(point_1),
116
+ 4096.0, 1, lineType=cv2.LINE_AA, shift=0)
117
+ ex_point_2 = (int(round(point_2[0] + (point_2[0] - point_1[0]) * (1 + self.shrink_ratio))),
118
+ int(round(point_2[1] + (point_2[1] - point_1[1]) * (1 + self.shrink_ratio))))
119
+ cv2.line(result, tuple(ex_point_2), tuple(point_2),
120
+ 4096.0, 1, lineType=cv2.LINE_AA, shift=0)
121
+ return ex_point_1, ex_point_2
DB/data/make_seg_detector_data.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+
3
+ import numpy as np
4
+ import cv2
5
+ from shapely.geometry import Polygon
6
+ import pyclipper
7
+
8
+ from concern.config import Configurable, State
9
+
10
+
11
+ class MakeSegDetectorData(Configurable):
12
+ min_text_size = State(default=8)
13
+ shrink_ratio = State(default=0.4)
14
+
15
+ def __init__(self, **kwargs):
16
+ self.load_all(**kwargs)
17
+
18
+ def __call__(self, data, *args, **kwargs):
19
+ '''
20
+ data: a dict typically returned from `MakeICDARData`,
21
+ where the following keys are contrains:
22
+ image*, polygons*, ignore_tags*, shape, filename
23
+ * means required.
24
+ '''
25
+ image = data['image']
26
+ polygons = data['polygons']
27
+ ignore_tags = data['ignore_tags']
28
+ image = data['image']
29
+ filename = data['filename']
30
+
31
+ h, w = image.shape[:2]
32
+ polygons, ignore_tags = self.validate_polygons(
33
+ polygons, ignore_tags, h, w)
34
+ gt = np.zeros((1, h, w), dtype=np.float32)
35
+ mask = np.ones((h, w), dtype=np.float32)
36
+ for i in range(polygons.shape[0]):
37
+ polygon = polygons[i]
38
+ height = min(np.linalg.norm(polygon[0] - polygon[3]),
39
+ np.linalg.norm(polygon[1] - polygon[2]))
40
+ width = min(np.linalg.norm(polygon[0] - polygon[1]),
41
+ np.linalg.norm(polygon[2] - polygon[3]))
42
+ if ignore_tags[i] or min(height, width) < self.min_text_size:
43
+ cv2.fillPoly(mask, polygon.astype(
44
+ np.int32)[np.newaxis, :, :], 0)
45
+ ignore_tags[i] = True
46
+ else:
47
+ polygon_shape = Polygon(polygon)
48
+ distance = polygon_shape.area * \
49
+ (1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length
50
+ subject = [tuple(l) for l in polygons[i]]
51
+ padding = pyclipper.PyclipperOffset()
52
+ padding.AddPath(subject, pyclipper.JT_ROUND,
53
+ pyclipper.ET_CLOSEDPOLYGON)
54
+ shrinked = padding.Execute(-distance)
55
+ if shrinked == []:
56
+ cv2.fillPoly(mask, polygon.astype(
57
+ np.int32)[np.newaxis, :, :], 0)
58
+ ignore_tags[i] = True
59
+ continue
60
+ shrinked = np.array(shrinked[0]).reshape(-1, 2)
61
+ cv2.fillPoly(gt[0], [shrinked.astype(np.int32)], 1)
62
+
63
+ if filename is None:
64
+ filename = ''
65
+ data.update(image=image,
66
+ polygons=polygons,
67
+ gt=gt, mask=mask, filename=filename)
68
+ return data
69
+
70
+ def validate_polygons(self, polygons, ignore_tags, h, w):
71
+ '''
72
+ polygons (numpy.array, required): of shape (num_instances, num_points, 2)
73
+ '''
74
+ if polygons.shape[0] == 0:
75
+ return polygons, ignore_tags
76
+ assert polygons.shape[0] == len(ignore_tags)
77
+
78
+ polygons[:, :, 0] = np.clip(polygons[:, :, 0], 0, w - 1)
79
+ polygons[:, :, 1] = np.clip(polygons[:, :, 1], 0, h - 1)
80
+
81
+ for i in range(polygons.shape[0]):
82
+ area = self.polygon_area(polygons[i])
83
+ if abs(area) < 1:
84
+ ignore_tags[i] = True
85
+ if area > 0:
86
+ polygons[i] = polygons[i][(0, 3, 2, 1), :]
87
+ return polygons, ignore_tags
88
+
89
+ def polygon_area(self, polygon):
90
+ edge = [
91
+ (polygon[1][0] - polygon[0][0]) * (polygon[1][1] + polygon[0][1]),
92
+ (polygon[2][0] - polygon[1][0]) * (polygon[2][1] + polygon[1][1]),
93
+ (polygon[3][0] - polygon[2][0]) * (polygon[3][1] + polygon[2][1]),
94
+ (polygon[0][0] - polygon[3][0]) * (polygon[0][1] + polygon[3][1])
95
+ ]
96
+ return np.sum(edge) / 2.
DB/data/meta_loader.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import hashlib
3
+ import os
4
+ import io
5
+ import urllib.parse as urlparse
6
+ import warnings
7
+ import numpy as np
8
+ from concern.charset_tool import stringQ2B
9
+ from hanziconv import HanziConv
10
+ from concern.config import Configurable, State
11
+ from data.text_lines import TextLines
12
+
13
+ class DataIdMetaLoader(MetaLoader):
14
+ return_dict = State(default=False)
15
+ scan_meta = False
16
+
17
+ def __init__(self, return_dict=None, cmd={}, **kwargs):
18
+ super().__init__(cmd=cmd, **kwargs)
19
+ if return_dict is not None:
20
+ self.return_dict = return_dict
21
+
22
+ def parse_meta(self, data_id):
23
+ return dict(data_id=data_id)
24
+
25
+ def post_prosess(self, meta):
26
+ if self.return_dict:
27
+ return meta
28
+ return meta['data_id']
29
+
30
+ class MetaCache(Configurable):
31
+ META_FILE = 'meta_cache.pickle'
32
+ client = State(default='all')
33
+
34
+ def __init__(self, **kwargs):
35
+ self.load_all(**kwargs)
36
+
37
+ def cache(self, nori_path, meta=None):
38
+ if meta is None:
39
+ return self.read(nori_path)
40
+ else:
41
+ return self.save(nori_path, meta)
42
+
43
+ def read(self, nori_path):
44
+ raise NotImplementedError
45
+
46
+ def save(self, nori_path, meta):
47
+ raise NotImplementedError
48
+
49
+
50
+ class FileMetaCache(MetaCache):
51
+ storage_dir = State(default='/data/.meta_cache')
52
+
53
+ def __init__(self, storage_dir=None, cmd={}, **kwargs):
54
+ super(FileMetaCache, self).__init__(cmd=cmd, **kwargs)
55
+
56
+ self.storage_dir = cmd.get('storage_dir', self.storage_dir)
57
+ if storage_dir is not None:
58
+ self.storage_dir = storage_dir
59
+ self.debug = cmd.get('debug', False)
60
+
61
+ def ensure_dir(self):
62
+ if not os.path.exists(self.storage_dir):
63
+ os.makedirs(self.storage_dir)
64
+
65
+ def storate_path(self, nori_path):
66
+ return os.path.join(self.storage_dir, self.hash(nori_path) + '.pickle')
67
+
68
+ def hash(self, nori_path: str):
69
+ return hashlib.md5(nori_path.encode('utf-8')).hexdigest() + '-' + self.client
70
+
71
+ def read(self, nori_path):
72
+ file_path = self.storate_path(nori_path)
73
+ if not os.path.exists(file_path):
74
+ warnings.warn(
75
+ 'Meta cache not found: ' + file_path)
76
+ warnings.warn('Now trying to read meta from nori')
77
+ return None
78
+ with open(file_path, 'rb') as reader:
79
+ try:
80
+ return pickle.load(reader)
81
+ except EOFError as e: # recover from broken file
82
+ if self.debug:
83
+ raise e
84
+ return None
85
+
86
+ def save(self, nori_path, meta):
87
+ self.ensure_dir()
88
+
89
+ with open(self.storate_path(nori_path), 'wb') as writer:
90
+ pickle.dump(meta, writer)
91
+ return True
92
+
DB/data/processes/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from .normalize_image import NormalizeImage
2
+ from .make_center_points import MakeCenterPoints
3
+ from .resize_image import ResizeImage, ResizeData
4
+ from .filter_keys import FilterKeys
5
+ from .make_center_map import MakeCenterMap
6
+ from .augment_data import AugmentData, AugmentDetectionData
7
+ from .random_crop_data import RandomCropData
8
+ from .make_icdar_data import MakeICDARData, ICDARCollectFN
9
+ from .make_seg_detection_data import MakeSegDetectionData
10
+ from .make_border_map import MakeBorderMap
DB/data/processes/augment_data.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import imgaug
2
+ import numpy as np
3
+
4
+ from concern.config import State
5
+ from .data_process import DataProcess
6
+ from data.augmenter import AugmenterBuilder
7
+ import cv2
8
+ import math
9
+
10
+
11
+ class AugmentData(DataProcess):
12
+ augmenter_args = State(autoload=False)
13
+
14
+ def __init__(self, **kwargs):
15
+ self.augmenter_args = kwargs.get('augmenter_args')
16
+ self.keep_ratio = kwargs.get('keep_ratio')
17
+ self.only_resize = kwargs.get('only_resize')
18
+ self.augmenter = AugmenterBuilder().build(self.augmenter_args)
19
+
20
+ def may_augment_annotation(self, aug, data):
21
+ pass
22
+
23
+ def resize_image(self, image):
24
+ origin_height, origin_width, _ = image.shape
25
+ resize_shape = self.augmenter_args[0][1]
26
+ height = resize_shape['height']
27
+ width = resize_shape['width']
28
+ if self.keep_ratio:
29
+ width = origin_width * height / origin_height
30
+ N = math.ceil(width / 32)
31
+ width = N * 32
32
+ image = cv2.resize(image, (width, height))
33
+ return image
34
+
35
+ def process(self, data):
36
+ image = data['image']
37
+ aug = None
38
+ shape = image.shape
39
+
40
+ if self.augmenter:
41
+ aug = self.augmenter.to_deterministic()
42
+ if self.only_resize:
43
+ data['image'] = self.resize_image(image)
44
+ else:
45
+ data['image'] = aug.augment_image(image)
46
+ self.may_augment_annotation(aug, data, shape)
47
+
48
+ filename = data.get('filename', data.get('data_id', ''))
49
+ data.update(filename=filename, shape=shape[:2])
50
+ if not self.only_resize:
51
+ data['is_training'] = True
52
+ else:
53
+ data['is_training'] = False
54
+ return data
55
+
56
+
57
+ class AugmentDetectionData(AugmentData):
58
+ def may_augment_annotation(self, aug, data, shape):
59
+ if aug is None:
60
+ return data
61
+
62
+ line_polys = []
63
+ for line in data['lines']:
64
+ if self.only_resize:
65
+ new_poly = [(p[0], p[1]) for p in line['poly']]
66
+ else:
67
+ new_poly = self.may_augment_poly(aug, shape, line['poly'])
68
+ line_polys.append({
69
+ 'points': new_poly,
70
+ 'ignore': line['text'] == '###',
71
+ 'text': line['text'],
72
+ })
73
+ data['polys'] = line_polys
74
+ return data
75
+
76
+ def may_augment_poly(self, aug, img_shape, poly):
77
+ keypoints = [imgaug.Keypoint(p[0], p[1]) for p in poly]
78
+ keypoints = aug.augment_keypoints(
79
+ [imgaug.KeypointsOnImage(keypoints, shape=img_shape)])[0].keypoints
80
+ poly = [(p.x, p.y) for p in keypoints]
81
+ return poly
82
+
DB/data/processes/data_process.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from concern.config import Configurable
2
+
3
+
4
+ class DataProcess(Configurable):
5
+ r'''Processes of data dict.
6
+ '''
7
+
8
+ def __call__(self, data):
9
+ return self.process(data)
10
+
11
+ def process(self, data):
12
+ raise NotImplementedError
13
+
14
+ def render_constant(self, canvas, xmin, xmax, ymin, ymax, value=1, shrink=0):
15
+ def shrink_rect(xmin, xmax, ratio):
16
+ center = (xmin + xmax) / 2
17
+ width = center - xmin
18
+ return int(center - width * ratio + 0.5), int(center + width * ratio + 0.5)
19
+
20
+ if shrink > 0:
21
+ xmin, xmax = shrink_rect(xmin, xmax, shrink)
22
+ ymin, ymax = shrink_rect(ymin, ymax, shrink)
23
+
24
+ canvas[int(ymin+0.5):int(ymax+0.5)+1, int(xmin+0.5):int(xmax+0.5)+1] = value
25
+ return canvas