Commit ·
6eee0fe
0
Parent(s):
Duplicate from opencv/object_detection_yolox
Browse filesCo-authored-by: Abhishek Gola <abhishek-gola@users.noreply.huggingface.co>
- .gitattributes +26 -0
- .gitignore +9 -0
- CMakeLists.txt +29 -0
- LICENSE +201 -0
- README.md +135 -0
- demo.cpp +311 -0
- demo.py +155 -0
- example_outputs/1_res.jpg +3 -0
- example_outputs/2_res.jpg +3 -0
- example_outputs/3_res.jpg +3 -0
- object_detection_yolox_2022nov.onnx +3 -0
- object_detection_yolox_2022nov_int8.onnx +3 -0
- object_detection_yolox_2022nov_int8bq.onnx +3 -0
- yolox.py +85 -0
.gitattributes
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Caffe
|
| 3 |
+
*.caffemodel filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
|
| 5 |
+
# Tensorflow
|
| 6 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.pbtxt filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
|
| 9 |
+
# Torch
|
| 10 |
+
*.t7 filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.net filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
|
| 13 |
+
# Darknet
|
| 14 |
+
*.weights filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
|
| 16 |
+
# ONNX
|
| 17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
|
| 19 |
+
# NPY
|
| 20 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
|
| 22 |
+
# Images
|
| 23 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.pyc
|
| 2 |
+
**/__pycache__
|
| 3 |
+
**/__pycache__/**
|
| 4 |
+
|
| 5 |
+
.vscode
|
| 6 |
+
|
| 7 |
+
build/
|
| 8 |
+
**/build
|
| 9 |
+
**/build/**
|
CMakeLists.txt
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cmake_minimum_required(VERSION 3.24)
|
| 2 |
+
set(project_name "opencv_zoo_object_detection_yolox")
|
| 3 |
+
|
| 4 |
+
PROJECT (${project_name})
|
| 5 |
+
|
| 6 |
+
set(OPENCV_VERSION "4.10.0")
|
| 7 |
+
set(OPENCV_INSTALLATION_PATH "" CACHE PATH "Where to look for OpenCV installation")
|
| 8 |
+
find_package(OpenCV ${OPENCV_VERSION} REQUIRED HINTS ${OPENCV_INSTALLATION_PATH})
|
| 9 |
+
# Find OpenCV, you may need to set OpenCV_DIR variable
|
| 10 |
+
# to the absolute path to the directory containing OpenCVConfig.cmake file
|
| 11 |
+
# via the command line or GUI
|
| 12 |
+
|
| 13 |
+
file(GLOB SourceFile
|
| 14 |
+
"demo.cpp")
|
| 15 |
+
# If the package has been found, several variables will
|
| 16 |
+
# be set, you can find the full list with descriptions
|
| 17 |
+
# in the OpenCVConfig.cmake file.
|
| 18 |
+
# Print some message showing some of them
|
| 19 |
+
message(STATUS "OpenCV library status:")
|
| 20 |
+
message(STATUS " config: ${OpenCV_DIR}")
|
| 21 |
+
message(STATUS " version: ${OpenCV_VERSION}")
|
| 22 |
+
message(STATUS " libraries: ${OpenCV_LIBS}")
|
| 23 |
+
message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
|
| 24 |
+
|
| 25 |
+
# Declare the executable target built from your sources
|
| 26 |
+
add_executable(${project_name} ${SourceFile})
|
| 27 |
+
|
| 28 |
+
# Link your application with OpenCV libraries
|
| 29 |
+
target_link_libraries(${project_name} PRIVATE ${OpenCV_LIBS})
|
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "{}"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright (c) 2021-2022 Megvii Inc. All rights reserved.
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# YOLOX
|
| 2 |
+
|
| 3 |
+
Nanodet: YOLOX is an anchor-free version of YOLO, with a simpler design but better performance! It aims to bridge the gap between research and industrial communities. YOLOX is a high-performing object detector, an improvement to the existing YOLO series. YOLO series are in constant exploration of techniques to improve the object detection techniques for optimal speed and accuracy trade-off for real-time applications.
|
| 4 |
+
|
| 5 |
+
Key features of the YOLOX object detector
|
| 6 |
+
- **Anchor-free detectors** significantly reduce the number of design parameters
|
| 7 |
+
- **A decoupled head for classification, regression, and localization** improves the convergence speed
|
| 8 |
+
- **SimOTA advanced label assignment strategy** reduces training time and avoids additional solver hyperparameters
|
| 9 |
+
- **Strong data augmentations like MixUp and Mosiac** to boost YOLOX performance
|
| 10 |
+
|
| 11 |
+
**Note**:
|
| 12 |
+
- This version of YoloX: YoloX_s
|
| 13 |
+
- `object_detection_yolox_2022nov_int8bq.onnx` represents the block-quantized version in int8 precision and is generated using [block_quantize.py](../../tools/quantize/block_quantize.py) with `block_size=64`.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
## Demo
|
| 17 |
+
|
| 18 |
+
### Python
|
| 19 |
+
|
| 20 |
+
Run the following command to try the demo:
|
| 21 |
+
```shell
|
| 22 |
+
# detect on camera input
|
| 23 |
+
python demo.py
|
| 24 |
+
# detect on an image
|
| 25 |
+
python demo.py --input /path/to/image -v
|
| 26 |
+
```
|
| 27 |
+
Note:
|
| 28 |
+
- image result saved as "result.jpg"
|
| 29 |
+
- this model requires `opencv-python>=4.8.0`
|
| 30 |
+
|
| 31 |
+
### C++
|
| 32 |
+
|
| 33 |
+
Install latest OpenCV and CMake >= 3.24.0 to get started with:
|
| 34 |
+
|
| 35 |
+
```shell
|
| 36 |
+
# A typical and default installation path of OpenCV is /usr/local
|
| 37 |
+
cmake -B build -D OPENCV_INSTALLATION_PATH=/path/to/opencv/installation .
|
| 38 |
+
cmake --build build
|
| 39 |
+
|
| 40 |
+
# detect on camera input
|
| 41 |
+
./build/opencv_zoo_object_detection_yolox
|
| 42 |
+
# detect on an image
|
| 43 |
+
./build/opencv_zoo_object_detection_yolox -m=/path/to/model -i=/path/to/image -v
|
| 44 |
+
# get help messages
|
| 45 |
+
./build/opencv_zoo_object_detection_yolox -h
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
## Results
|
| 50 |
+
|
| 51 |
+
Here are some of the sample results that were observed using the model (**yolox_s.onnx**),
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+

|
| 55 |
+

|
| 56 |
+
|
| 57 |
+
Check [benchmark/download_data.py](../../benchmark/download_data.py) for the original images.
|
| 58 |
+
|
| 59 |
+
## Model metrics:
|
| 60 |
+
|
| 61 |
+
The model is evaluated on [COCO 2017 val](https://cocodataset.org/#download). Results are showed below:
|
| 62 |
+
|
| 63 |
+
<table>
|
| 64 |
+
<tr><th>Average Precision </th><th>Average Recall</th></tr>
|
| 65 |
+
<tr><td>
|
| 66 |
+
|
| 67 |
+
| area | IoU | Average Precision(AP) |
|
| 68 |
+
|:-------|:------|:------------------------|
|
| 69 |
+
| all | 0.50:0.95 | 0.405 |
|
| 70 |
+
| all | 0.50 | 0.593 |
|
| 71 |
+
| all | 0.75 | 0.437 |
|
| 72 |
+
| small | 0.50:0.95 | 0.232 |
|
| 73 |
+
| medium | 0.50:0.95 | 0.448 |
|
| 74 |
+
| large | 0.50:0.95 | 0.541 |
|
| 75 |
+
|
| 76 |
+
</td><td>
|
| 77 |
+
|
| 78 |
+
| area | IoU | Average Recall(AR) |
|
| 79 |
+
|:-------|:------|:----------------|
|
| 80 |
+
| all | 0.50:0.95 | 0.326 |
|
| 81 |
+
| all | 0.50:0.95 | 0.531 |
|
| 82 |
+
| all | 0.50:0.95 | 0.574 |
|
| 83 |
+
| small | 0.50:0.95 | 0.365 |
|
| 84 |
+
| medium | 0.50:0.95 | 0.634 |
|
| 85 |
+
| large | 0.50:0.95 | 0.724 |
|
| 86 |
+
</td></tr> </table>
|
| 87 |
+
|
| 88 |
+
| class | AP | class | AP | class | AP |
|
| 89 |
+
|:--------------|:-------|:-------------|:-------|:---------------|:-------|
|
| 90 |
+
| person | 54.109 | bicycle | 31.580 | car | 40.447 |
|
| 91 |
+
| motorcycle | 43.477 | airplane | 66.070 | bus | 64.183 |
|
| 92 |
+
| train | 64.483 | truck | 35.110 | boat | 24.681 |
|
| 93 |
+
| traffic light | 25.068 | fire hydrant | 64.382 | stop sign | 65.333 |
|
| 94 |
+
| parking meter | 48.439 | bench | 22.653 | bird | 33.324 |
|
| 95 |
+
| cat | 66.394 | dog | 60.096 | horse | 58.080 |
|
| 96 |
+
| sheep | 49.456 | cow | 53.596 | elephant | 65.574 |
|
| 97 |
+
| bear | 70.541 | zebra | 66.461 | giraffe | 66.780 |
|
| 98 |
+
| backpack | 13.095 | umbrella | 41.614 | handbag | 12.865 |
|
| 99 |
+
| tie | 29.453 | suitcase | 39.089 | frisbee | 61.712 |
|
| 100 |
+
| skis | 21.623 | snowboard | 31.326 | sports ball | 39.820 |
|
| 101 |
+
| kite | 41.410 | baseball bat | 27.311 | baseball glove | 36.661 |
|
| 102 |
+
| skateboard | 49.374 | surfboard | 35.524 | tennis racket | 45.569 |
|
| 103 |
+
| bottle | 37.270 | wine glass | 33.088 | cup | 39.835 |
|
| 104 |
+
| fork | 31.620 | knife | 15.265 | spoon | 14.918 |
|
| 105 |
+
| bowl | 43.251 | banana | 27.904 | apple | 17.630 |
|
| 106 |
+
| sandwich | 32.789 | orange | 29.388 | broccoli | 23.187 |
|
| 107 |
+
| carrot | 23.114 | hot dog | 33.716 | pizza | 52.541 |
|
| 108 |
+
| donut | 47.980 | cake | 36.160 | chair | 29.707 |
|
| 109 |
+
| couch | 46.175 | potted plant | 24.781 | bed | 44.323 |
|
| 110 |
+
| dining table | 30.022 | toilet | 64.237 | tv | 57.301 |
|
| 111 |
+
| laptop | 58.362 | mouse | 57.774 | remote | 24.271 |
|
| 112 |
+
| keyboard | 48.020 | cell phone | 32.376 | microwave | 57.220 |
|
| 113 |
+
| oven | 36.168 | toaster | 28.735 | sink | 38.159 |
|
| 114 |
+
| refrigerator | 52.876 | book | 15.030 | clock | 48.622 |
|
| 115 |
+
| vase | 37.013 | scissors | 26.307 | teddy bear | 45.676 |
|
| 116 |
+
| hair drier | 7.255 | toothbrush | 19.374 | | |
|
| 117 |
+
|
| 118 |
+
## License
|
| 119 |
+
|
| 120 |
+
All files in this directory are licensed under [Apache 2.0 License](./LICENSE).
|
| 121 |
+
|
| 122 |
+
#### Contributor Details
|
| 123 |
+
|
| 124 |
+
- Google Summer of Code'22
|
| 125 |
+
- Contributor: Sri Siddarth Chakaravarthy
|
| 126 |
+
- Github Profile: https://github.com/Sidd1609
|
| 127 |
+
- Organisation: OpenCV
|
| 128 |
+
- Project: Lightweight object detection models using OpenCV
|
| 129 |
+
|
| 130 |
+
## Reference
|
| 131 |
+
|
| 132 |
+
- YOLOX article: https://arxiv.org/abs/2107.08430
|
| 133 |
+
- YOLOX weight and scripts for training: https://github.com/Megvii-BaseDetection/YOLOX
|
| 134 |
+
- YOLOX blog: https://arshren.medium.com/yolox-new-improved-yolo-d430c0e4cf20
|
| 135 |
+
- YOLOX-lite: https://github.com/TexasInstruments/edgeai-yolox
|
demo.cpp
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#include <vector>
|
| 2 |
+
#include <string>
|
| 3 |
+
#include <utility>
|
| 4 |
+
|
| 5 |
+
#include <opencv2/opencv.hpp>
|
| 6 |
+
|
| 7 |
+
using namespace std;
|
| 8 |
+
using namespace cv;
|
| 9 |
+
using namespace dnn;
|
| 10 |
+
|
| 11 |
+
vector< pair<dnn::Backend, dnn::Target> > backendTargetPairs = {
|
| 12 |
+
std::make_pair<dnn::Backend, dnn::Target>(dnn::DNN_BACKEND_OPENCV, dnn::DNN_TARGET_CPU),
|
| 13 |
+
std::make_pair<dnn::Backend, dnn::Target>(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA),
|
| 14 |
+
std::make_pair<dnn::Backend, dnn::Target>(dnn::DNN_BACKEND_CUDA, dnn::DNN_TARGET_CUDA_FP16),
|
| 15 |
+
std::make_pair<dnn::Backend, dnn::Target>(dnn::DNN_BACKEND_TIMVX, dnn::DNN_TARGET_NPU),
|
| 16 |
+
std::make_pair<dnn::Backend, dnn::Target>(dnn::DNN_BACKEND_CANN, dnn::DNN_TARGET_NPU) };
|
| 17 |
+
|
| 18 |
+
vector<string> labelYolox = {
|
| 19 |
+
"person", "bicycle", "car", "motorcycle", "airplane", "bus",
|
| 20 |
+
"train", "truck", "boat", "traffic light", "fire hydrant",
|
| 21 |
+
"stop sign", "parking meter", "bench", "bird", "cat", "dog",
|
| 22 |
+
"horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
|
| 23 |
+
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
|
| 24 |
+
"skis", "snowboard", "sports ball", "kite", "baseball bat",
|
| 25 |
+
"baseball glove", "skateboard", "surfboard", "tennis racket",
|
| 26 |
+
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl",
|
| 27 |
+
"banana", "apple", "sandwich", "orange", "broccoli", "carrot",
|
| 28 |
+
"hot dog", "pizza", "donut", "cake", "chair", "couch",
|
| 29 |
+
"potted plant", "bed", "dining table", "toilet", "tv", "laptop",
|
| 30 |
+
"mouse", "remote", "keyboard", "cell phone", "microwave",
|
| 31 |
+
"oven", "toaster", "sink", "refrigerator", "book", "clock",
|
| 32 |
+
"vase", "scissors", "teddy bear", "hair drier", "toothbrush" };
|
| 33 |
+
|
| 34 |
+
class YoloX {
|
| 35 |
+
private:
|
| 36 |
+
Net net;
|
| 37 |
+
string modelPath;
|
| 38 |
+
Size inputSize;
|
| 39 |
+
float confThreshold;
|
| 40 |
+
float nmsThreshold;
|
| 41 |
+
float objThreshold;
|
| 42 |
+
dnn::Backend backendId;
|
| 43 |
+
dnn::Target targetId;
|
| 44 |
+
int num_classes;
|
| 45 |
+
vector<int> strides;
|
| 46 |
+
Mat expandedStrides;
|
| 47 |
+
Mat grids;
|
| 48 |
+
|
| 49 |
+
public:
|
| 50 |
+
YoloX(string modPath, float confThresh = 0.35, float nmsThresh = 0.5, float objThresh = 0.5, dnn::Backend bId = DNN_BACKEND_DEFAULT, dnn::Target tId = DNN_TARGET_CPU) :
|
| 51 |
+
modelPath(modPath), confThreshold(confThresh),
|
| 52 |
+
nmsThreshold(nmsThresh), objThreshold(objThresh),
|
| 53 |
+
backendId(bId), targetId(tId)
|
| 54 |
+
{
|
| 55 |
+
this->num_classes = int(labelYolox.size());
|
| 56 |
+
this->net = readNet(modelPath);
|
| 57 |
+
this->inputSize = Size(640, 640);
|
| 58 |
+
this->strides = vector<int>{ 8, 16, 32 };
|
| 59 |
+
this->net.setPreferableBackend(this->backendId);
|
| 60 |
+
this->net.setPreferableTarget(this->targetId);
|
| 61 |
+
this->generateAnchors();
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
Mat preprocess(Mat img)
|
| 65 |
+
{
|
| 66 |
+
Mat blob;
|
| 67 |
+
Image2BlobParams paramYolox;
|
| 68 |
+
paramYolox.datalayout = DNN_LAYOUT_NCHW;
|
| 69 |
+
paramYolox.ddepth = CV_32F;
|
| 70 |
+
paramYolox.mean = Scalar::all(0);
|
| 71 |
+
paramYolox.scalefactor = Scalar::all(1);
|
| 72 |
+
paramYolox.size = Size(img.cols, img.rows);
|
| 73 |
+
paramYolox.swapRB = true;
|
| 74 |
+
|
| 75 |
+
blob = blobFromImageWithParams(img, paramYolox);
|
| 76 |
+
return blob;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
Mat infer(Mat srcimg)
|
| 80 |
+
{
|
| 81 |
+
Mat inputBlob = this->preprocess(srcimg);
|
| 82 |
+
|
| 83 |
+
this->net.setInput(inputBlob);
|
| 84 |
+
vector<Mat> outs;
|
| 85 |
+
this->net.forward(outs, this->net.getUnconnectedOutLayersNames());
|
| 86 |
+
|
| 87 |
+
Mat predictions = this->postprocess(outs[0]);
|
| 88 |
+
return predictions;
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
Mat postprocess(Mat outputs)
|
| 92 |
+
{
|
| 93 |
+
Mat dets = outputs.reshape(0,outputs.size[1]);
|
| 94 |
+
Mat col01;
|
| 95 |
+
add(dets.colRange(0, 2), this->grids, col01);
|
| 96 |
+
Mat col23;
|
| 97 |
+
exp(dets.colRange(2, 4), col23);
|
| 98 |
+
vector<Mat> col = { col01, col23 };
|
| 99 |
+
Mat boxes;
|
| 100 |
+
hconcat(col, boxes);
|
| 101 |
+
float* ptr = this->expandedStrides.ptr<float>(0);
|
| 102 |
+
for (int r = 0; r < boxes.rows; r++, ptr++)
|
| 103 |
+
{
|
| 104 |
+
boxes.rowRange(r, r + 1) = *ptr * boxes.rowRange(r, r + 1);
|
| 105 |
+
}
|
| 106 |
+
// get boxes
|
| 107 |
+
Mat boxes_xyxy(boxes.rows, boxes.cols, CV_32FC1, Scalar(1));
|
| 108 |
+
Mat scores = dets.colRange(5, dets.cols).clone();
|
| 109 |
+
vector<float> maxScores(dets.rows);
|
| 110 |
+
vector<int> maxScoreIdx(dets.rows);
|
| 111 |
+
vector<Rect2d> boxesXYXY(dets.rows);
|
| 112 |
+
|
| 113 |
+
for (int r = 0; r < boxes_xyxy.rows; r++, ptr++)
|
| 114 |
+
{
|
| 115 |
+
boxes_xyxy.at<float>(r, 0) = boxes.at<float>(r, 0) - boxes.at<float>(r, 2) / 2.f;
|
| 116 |
+
boxes_xyxy.at<float>(r, 1) = boxes.at<float>(r, 1) - boxes.at<float>(r, 3) / 2.f;
|
| 117 |
+
boxes_xyxy.at<float>(r, 2) = boxes.at<float>(r, 0) + boxes.at<float>(r, 2) / 2.f;
|
| 118 |
+
boxes_xyxy.at<float>(r, 3) = boxes.at<float>(r, 1) + boxes.at<float>(r, 3) / 2.f;
|
| 119 |
+
// get scores and class indices
|
| 120 |
+
scores.rowRange(r, r + 1) = scores.rowRange(r, r + 1) * dets.at<float>(r, 4);
|
| 121 |
+
double minVal, maxVal;
|
| 122 |
+
Point maxIdx;
|
| 123 |
+
minMaxLoc(scores.rowRange(r, r+1), &minVal, &maxVal, nullptr, &maxIdx);
|
| 124 |
+
maxScoreIdx[r] = maxIdx.x;
|
| 125 |
+
maxScores[r] = float(maxVal);
|
| 126 |
+
boxesXYXY[r].x = boxes_xyxy.at<float>(r, 0);
|
| 127 |
+
boxesXYXY[r].y = boxes_xyxy.at<float>(r, 1);
|
| 128 |
+
boxesXYXY[r].width = boxes_xyxy.at<float>(r, 2);
|
| 129 |
+
boxesXYXY[r].height = boxes_xyxy.at<float>(r, 3);
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
vector<int> keep;
|
| 133 |
+
NMSBoxesBatched(boxesXYXY, maxScores, maxScoreIdx, this->confThreshold, this->nmsThreshold, keep);
|
| 134 |
+
Mat candidates(int(keep.size()), 6, CV_32FC1);
|
| 135 |
+
int row = 0;
|
| 136 |
+
for (auto idx : keep)
|
| 137 |
+
{
|
| 138 |
+
boxes_xyxy.rowRange(idx, idx + 1).copyTo(candidates(Rect(0, row, 4, 1)));
|
| 139 |
+
candidates.at<float>(row, 4) = maxScores[idx];
|
| 140 |
+
candidates.at<float>(row, 5) = float(maxScoreIdx[idx]);
|
| 141 |
+
row++;
|
| 142 |
+
}
|
| 143 |
+
if (keep.size() == 0)
|
| 144 |
+
return Mat();
|
| 145 |
+
return candidates;
|
| 146 |
+
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
void generateAnchors()
|
| 151 |
+
{
|
| 152 |
+
vector< tuple<int, int, int> > nb;
|
| 153 |
+
int total = 0;
|
| 154 |
+
|
| 155 |
+
for (auto v : this->strides)
|
| 156 |
+
{
|
| 157 |
+
int w = this->inputSize.width / v;
|
| 158 |
+
int h = this->inputSize.height / v;
|
| 159 |
+
nb.push_back(tuple<int, int, int>(w * h, w, v));
|
| 160 |
+
total += w * h;
|
| 161 |
+
}
|
| 162 |
+
this->grids = Mat(total, 2, CV_32FC1);
|
| 163 |
+
this->expandedStrides = Mat(total, 1, CV_32FC1);
|
| 164 |
+
float* ptrGrids = this->grids.ptr<float>(0);
|
| 165 |
+
float* ptrStrides = this->expandedStrides.ptr<float>(0);
|
| 166 |
+
int pos = 0;
|
| 167 |
+
for (auto le : nb)
|
| 168 |
+
{
|
| 169 |
+
int r = get<1>(le);
|
| 170 |
+
for (int i = 0; i < get<0>(le); i++, pos++)
|
| 171 |
+
{
|
| 172 |
+
*ptrGrids++ = float(i % r);
|
| 173 |
+
*ptrGrids++ = float(i / r);
|
| 174 |
+
*ptrStrides++ = float((get<2>(le)));
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
}
|
| 178 |
+
};
|
| 179 |
+
|
| 180 |
+
std::string keys =
|
| 181 |
+
"{ help h | | Print help message. }"
|
| 182 |
+
"{ model m | object_detection_yolox_2022nov.onnx | Usage: Path to the model, defaults to object_detection_yolox_2022nov.onnx }"
|
| 183 |
+
"{ input i | | Path to input image or video file. Skip this argument to capture frames from a camera.}"
|
| 184 |
+
"{ confidence | 0.5 | Class confidence }"
|
| 185 |
+
"{ obj | 0.5 | Enter object threshold }"
|
| 186 |
+
"{ nms | 0.5 | Enter nms IOU threshold }"
|
| 187 |
+
"{ save s | true | Specify to save results. This flag is invalid when using camera. }"
|
| 188 |
+
"{ vis v | 1 | Specify to open a window for result visualization. This flag is invalid when using camera. }"
|
| 189 |
+
"{ backend bt | 0 | Choose one of computation backends: "
|
| 190 |
+
"0: (default) OpenCV implementation + CPU, "
|
| 191 |
+
"1: CUDA + GPU (CUDA), "
|
| 192 |
+
"2: CUDA + GPU (CUDA FP16), "
|
| 193 |
+
"3: TIM-VX + NPU, "
|
| 194 |
+
"4: CANN + NPU}";
|
| 195 |
+
|
| 196 |
+
pair<Mat, double> letterBox(Mat srcimg, Size targetSize = Size(640, 640))
|
| 197 |
+
{
|
| 198 |
+
Mat paddedImg(targetSize.height, targetSize.width, CV_32FC3, Scalar::all(114.0));
|
| 199 |
+
Mat resizeImg;
|
| 200 |
+
|
| 201 |
+
double ratio = min(targetSize.height / double(srcimg.rows), targetSize.width / double(srcimg.cols));
|
| 202 |
+
resize(srcimg, resizeImg, Size(int(srcimg.cols * ratio), int(srcimg.rows * ratio)), INTER_LINEAR);
|
| 203 |
+
resizeImg.copyTo(paddedImg(Rect(0, 0, int(srcimg.cols * ratio), int(srcimg.rows * ratio))));
|
| 204 |
+
return pair<Mat, double>(paddedImg, ratio);
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
Mat unLetterBox(Mat bbox, double letterboxScale)
|
| 208 |
+
{
|
| 209 |
+
return bbox / letterboxScale;
|
| 210 |
+
}
|
| 211 |
+
|
| 212 |
+
Mat visualize(Mat dets, Mat srcimg, double letterbox_scale, double fps = -1)
|
| 213 |
+
{
|
| 214 |
+
Mat resImg = srcimg.clone();
|
| 215 |
+
|
| 216 |
+
if (fps > 0)
|
| 217 |
+
putText(resImg, format("FPS: %.2f", fps), Size(10, 25), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255), 2);
|
| 218 |
+
|
| 219 |
+
for (int row = 0; row < dets.rows; row++)
|
| 220 |
+
{
|
| 221 |
+
Mat boxF = unLetterBox(dets(Rect(0, row, 4, 1)), letterbox_scale);
|
| 222 |
+
Mat box;
|
| 223 |
+
boxF.convertTo(box, CV_32S);
|
| 224 |
+
float score = dets.at<float>(row, 4);
|
| 225 |
+
int clsId = int(dets.at<float>(row, 5));
|
| 226 |
+
|
| 227 |
+
int x0 = box.at<int>(0, 0);
|
| 228 |
+
int y0 = box.at<int>(0, 1);
|
| 229 |
+
int x1 = box.at<int>(0, 2);
|
| 230 |
+
int y1 = box.at<int>(0, 3);
|
| 231 |
+
|
| 232 |
+
string text = format("%s : %f", labelYolox[clsId].c_str(), score * 100);
|
| 233 |
+
int font = FONT_HERSHEY_SIMPLEX;
|
| 234 |
+
int baseLine = 0;
|
| 235 |
+
Size txtSize = getTextSize(text, font, 0.4, 1, &baseLine);
|
| 236 |
+
rectangle(resImg, Point(x0, y0), Point(x1, y1), Scalar(0, 255, 0), 2);
|
| 237 |
+
rectangle(resImg, Point(x0, y0 + 1), Point(x0 + txtSize.width + 1, y0 + int(1.5 * txtSize.height)), Scalar(255, 255, 255), -1);
|
| 238 |
+
putText(resImg, text, Point(x0, y0 + txtSize.height), font, 0.4, Scalar(0, 0, 0), 1);
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
return resImg;
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
int main(int argc, char** argv)
|
| 245 |
+
{
|
| 246 |
+
CommandLineParser parser(argc, argv, keys);
|
| 247 |
+
|
| 248 |
+
parser.about("Use this script to run Yolox deep learning networks in opencv_zoo using OpenCV.");
|
| 249 |
+
if (parser.has("help"))
|
| 250 |
+
{
|
| 251 |
+
parser.printMessage();
|
| 252 |
+
return 0;
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
string model = parser.get<String>("model");
|
| 256 |
+
float confThreshold = parser.get<float>("confidence");
|
| 257 |
+
float objThreshold = parser.get<float>("obj");
|
| 258 |
+
float nmsThreshold = parser.get<float>("nms");
|
| 259 |
+
bool vis = parser.get<bool>("vis");
|
| 260 |
+
bool save = parser.get<bool>("save");
|
| 261 |
+
int backendTargetid = parser.get<int>("backend");
|
| 262 |
+
|
| 263 |
+
if (model.empty())
|
| 264 |
+
{
|
| 265 |
+
CV_Error(Error::StsError, "Model file " + model + " not found");
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
YoloX modelNet(model, confThreshold, nmsThreshold, objThreshold,
|
| 269 |
+
backendTargetPairs[backendTargetid].first, backendTargetPairs[backendTargetid].second);
|
| 270 |
+
//! [Open a video file or an image file or a camera stream]
|
| 271 |
+
VideoCapture cap;
|
| 272 |
+
if (parser.has("input"))
|
| 273 |
+
cap.open(samples::findFile(parser.get<String>("input")));
|
| 274 |
+
else
|
| 275 |
+
cap.open(0);
|
| 276 |
+
if (!cap.isOpened())
|
| 277 |
+
CV_Error(Error::StsError, "Cannot open video or file");
|
| 278 |
+
Mat frame, inputBlob;
|
| 279 |
+
double letterboxScale;
|
| 280 |
+
|
| 281 |
+
static const std::string kWinName = model;
|
| 282 |
+
int nbInference = 0;
|
| 283 |
+
while (waitKey(1) < 0)
|
| 284 |
+
{
|
| 285 |
+
cap >> frame;
|
| 286 |
+
if (frame.empty())
|
| 287 |
+
{
|
| 288 |
+
cout << "Frame is empty" << endl;
|
| 289 |
+
waitKey();
|
| 290 |
+
break;
|
| 291 |
+
}
|
| 292 |
+
pair<Mat, double> w = letterBox(frame);
|
| 293 |
+
inputBlob = get<0>(w);
|
| 294 |
+
letterboxScale = get<1>(w);
|
| 295 |
+
TickMeter tm;
|
| 296 |
+
tm.start();
|
| 297 |
+
Mat predictions = modelNet.infer(inputBlob);
|
| 298 |
+
tm.stop();
|
| 299 |
+
cout << "Inference time: " << tm.getTimeMilli() << " ms\n";
|
| 300 |
+
Mat img = visualize(predictions, frame, letterboxScale, tm.getFPS());
|
| 301 |
+
if (save && parser.has("input"))
|
| 302 |
+
{
|
| 303 |
+
imwrite("result.jpg", img);
|
| 304 |
+
}
|
| 305 |
+
if (vis)
|
| 306 |
+
{
|
| 307 |
+
imshow(kWinName, img);
|
| 308 |
+
}
|
| 309 |
+
}
|
| 310 |
+
return 0;
|
| 311 |
+
}
|
demo.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2 as cv
|
| 3 |
+
import argparse
|
| 4 |
+
|
| 5 |
+
# Check OpenCV version
|
| 6 |
+
opencv_python_version = lambda str_version: tuple(map(int, (str_version.split("."))))
|
| 7 |
+
assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \
|
| 8 |
+
"Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python"
|
| 9 |
+
|
| 10 |
+
from yolox import YoloX
|
| 11 |
+
|
| 12 |
+
# Valid combinations of backends and targets
|
| 13 |
+
backend_target_pairs = [
|
| 14 |
+
[cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU],
|
| 15 |
+
[cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA],
|
| 16 |
+
[cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16],
|
| 17 |
+
[cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU],
|
| 18 |
+
[cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU]
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
classes = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
|
| 22 |
+
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
|
| 23 |
+
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
|
| 24 |
+
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
|
| 25 |
+
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
| 26 |
+
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
|
| 27 |
+
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
|
| 28 |
+
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
|
| 29 |
+
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
|
| 30 |
+
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
| 31 |
+
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
|
| 32 |
+
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
|
| 33 |
+
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
|
| 34 |
+
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
|
| 35 |
+
|
| 36 |
+
def letterbox(srcimg, target_size=(640, 640)):
|
| 37 |
+
padded_img = np.ones((target_size[0], target_size[1], 3)).astype(np.float32) * 114.0
|
| 38 |
+
ratio = min(target_size[0] / srcimg.shape[0], target_size[1] / srcimg.shape[1])
|
| 39 |
+
resized_img = cv.resize(
|
| 40 |
+
srcimg, (int(srcimg.shape[1] * ratio), int(srcimg.shape[0] * ratio)), interpolation=cv.INTER_LINEAR
|
| 41 |
+
).astype(np.float32)
|
| 42 |
+
padded_img[: int(srcimg.shape[0] * ratio), : int(srcimg.shape[1] * ratio)] = resized_img
|
| 43 |
+
|
| 44 |
+
return padded_img, ratio
|
| 45 |
+
|
| 46 |
+
def unletterbox(bbox, letterbox_scale):
|
| 47 |
+
return bbox / letterbox_scale
|
| 48 |
+
|
| 49 |
+
def vis(dets, srcimg, letterbox_scale, fps=None):
|
| 50 |
+
res_img = srcimg.copy()
|
| 51 |
+
|
| 52 |
+
if fps is not None:
|
| 53 |
+
fps_label = "FPS: %.2f" % fps
|
| 54 |
+
cv.putText(res_img, fps_label, (10, 25), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
| 55 |
+
|
| 56 |
+
for det in dets:
|
| 57 |
+
box = unletterbox(det[:4], letterbox_scale).astype(np.int32)
|
| 58 |
+
score = det[-2]
|
| 59 |
+
cls_id = int(det[-1])
|
| 60 |
+
|
| 61 |
+
x0, y0, x1, y1 = box
|
| 62 |
+
|
| 63 |
+
text = '{}:{:.1f}%'.format(classes[cls_id], score * 100)
|
| 64 |
+
font = cv.FONT_HERSHEY_SIMPLEX
|
| 65 |
+
txt_size = cv.getTextSize(text, font, 0.4, 1)[0]
|
| 66 |
+
cv.rectangle(res_img, (x0, y0), (x1, y1), (0, 255, 0), 2)
|
| 67 |
+
cv.rectangle(res_img, (x0, y0 + 1), (x0 + txt_size[0] + 1, y0 + int(1.5 * txt_size[1])), (255, 255, 255), -1)
|
| 68 |
+
cv.putText(res_img, text, (x0, y0 + txt_size[1]), font, 0.4, (0, 0, 0), thickness=1)
|
| 69 |
+
|
| 70 |
+
return res_img
|
| 71 |
+
|
| 72 |
+
if __name__=='__main__':
|
| 73 |
+
parser = argparse.ArgumentParser(description='Nanodet inference using OpenCV an contribution by Sri Siddarth Chakaravarthy part of GSOC_2022')
|
| 74 |
+
parser.add_argument('--input', '-i', type=str,
|
| 75 |
+
help='Path to the input image. Omit for using default camera.')
|
| 76 |
+
parser.add_argument('--model', '-m', type=str, default='object_detection_yolox_2022nov.onnx',
|
| 77 |
+
help="Path to the model")
|
| 78 |
+
parser.add_argument('--backend_target', '-bt', type=int, default=0,
|
| 79 |
+
help='''Choose one of the backend-target pair to run this demo:
|
| 80 |
+
{:d}: (default) OpenCV implementation + CPU,
|
| 81 |
+
{:d}: CUDA + GPU (CUDA),
|
| 82 |
+
{:d}: CUDA + GPU (CUDA FP16),
|
| 83 |
+
{:d}: TIM-VX + NPU,
|
| 84 |
+
{:d}: CANN + NPU
|
| 85 |
+
'''.format(*[x for x in range(len(backend_target_pairs))]))
|
| 86 |
+
parser.add_argument('--confidence', default=0.5, type=float,
|
| 87 |
+
help='Class confidence')
|
| 88 |
+
parser.add_argument('--nms', default=0.5, type=float,
|
| 89 |
+
help='Enter nms IOU threshold')
|
| 90 |
+
parser.add_argument('--obj', default=0.5, type=float,
|
| 91 |
+
help='Enter object threshold')
|
| 92 |
+
parser.add_argument('--save', '-s', action='store_true',
|
| 93 |
+
help='Specify to save results. This flag is invalid when using camera.')
|
| 94 |
+
parser.add_argument('--vis', '-v', action='store_true',
|
| 95 |
+
help='Specify to open a window for result visualization. This flag is invalid when using camera.')
|
| 96 |
+
args = parser.parse_args()
|
| 97 |
+
|
| 98 |
+
backend_id = backend_target_pairs[args.backend_target][0]
|
| 99 |
+
target_id = backend_target_pairs[args.backend_target][1]
|
| 100 |
+
|
| 101 |
+
model_net = YoloX(modelPath= args.model,
|
| 102 |
+
confThreshold=args.confidence,
|
| 103 |
+
nmsThreshold=args.nms,
|
| 104 |
+
objThreshold=args.obj,
|
| 105 |
+
backendId=backend_id,
|
| 106 |
+
targetId=target_id)
|
| 107 |
+
|
| 108 |
+
tm = cv.TickMeter()
|
| 109 |
+
tm.reset()
|
| 110 |
+
if args.input is not None:
|
| 111 |
+
image = cv.imread(args.input)
|
| 112 |
+
input_blob = cv.cvtColor(image, cv.COLOR_BGR2RGB)
|
| 113 |
+
input_blob, letterbox_scale = letterbox(input_blob)
|
| 114 |
+
|
| 115 |
+
# Inference
|
| 116 |
+
tm.start()
|
| 117 |
+
preds = model_net.infer(input_blob)
|
| 118 |
+
tm.stop()
|
| 119 |
+
print("Inference time: {:.2f} ms".format(tm.getTimeMilli()))
|
| 120 |
+
|
| 121 |
+
img = vis(preds, image, letterbox_scale)
|
| 122 |
+
|
| 123 |
+
if args.save:
|
| 124 |
+
print('Results saved to result.jpg\n')
|
| 125 |
+
cv.imwrite('result.jpg', img)
|
| 126 |
+
|
| 127 |
+
if args.vis:
|
| 128 |
+
cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE)
|
| 129 |
+
cv.imshow(args.input, img)
|
| 130 |
+
cv.waitKey(0)
|
| 131 |
+
|
| 132 |
+
else:
|
| 133 |
+
print("Press any key to stop video capture")
|
| 134 |
+
deviceId = 0
|
| 135 |
+
cap = cv.VideoCapture(deviceId)
|
| 136 |
+
|
| 137 |
+
while cv.waitKey(1) < 0:
|
| 138 |
+
hasFrame, frame = cap.read()
|
| 139 |
+
if not hasFrame:
|
| 140 |
+
print('No frames grabbed!')
|
| 141 |
+
break
|
| 142 |
+
|
| 143 |
+
input_blob = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
|
| 144 |
+
input_blob, letterbox_scale = letterbox(input_blob)
|
| 145 |
+
|
| 146 |
+
# Inference
|
| 147 |
+
tm.start()
|
| 148 |
+
preds = model_net.infer(input_blob)
|
| 149 |
+
tm.stop()
|
| 150 |
+
|
| 151 |
+
img = vis(preds, frame, letterbox_scale, fps=tm.getFPS())
|
| 152 |
+
|
| 153 |
+
cv.imshow("YoloX Demo", img)
|
| 154 |
+
|
| 155 |
+
tm.reset()
|
example_outputs/1_res.jpg
ADDED
|
Git LFS Details
|
example_outputs/2_res.jpg
ADDED
|
Git LFS Details
|
example_outputs/3_res.jpg
ADDED
|
Git LFS Details
|
object_detection_yolox_2022nov.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c5c2d13e59ae883e6af3b45daea64af4833a4951c92d116ec270d9ddbe998063
|
| 3 |
+
size 35858002
|
object_detection_yolox_2022nov_int8.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:01a3b0f400b30bc1e45230e991b2e499ab42622485a330021947333fbaf03935
|
| 3 |
+
size 9079452
|
object_detection_yolox_2022nov_int8bq.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dcaae0aaa2fea4167f89235ee340eb869d3707b25712218d4c7ce921ac90e2ba
|
| 3 |
+
size 9744418
|
yolox.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
|
| 4 |
+
class YoloX:
|
| 5 |
+
def __init__(self, modelPath, confThreshold=0.35, nmsThreshold=0.5, objThreshold=0.5, backendId=0, targetId=0):
|
| 6 |
+
self.num_classes = 80
|
| 7 |
+
self.net = cv2.dnn.readNet(modelPath)
|
| 8 |
+
self.input_size = (640, 640)
|
| 9 |
+
self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 1, 3)
|
| 10 |
+
self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 1, 3)
|
| 11 |
+
self.strides = [8, 16, 32]
|
| 12 |
+
self.confThreshold = confThreshold
|
| 13 |
+
self.nmsThreshold = nmsThreshold
|
| 14 |
+
self.objThreshold = objThreshold
|
| 15 |
+
self.backendId = backendId
|
| 16 |
+
self.targetId = targetId
|
| 17 |
+
self.net.setPreferableBackend(self.backendId)
|
| 18 |
+
self.net.setPreferableTarget(self.targetId)
|
| 19 |
+
|
| 20 |
+
self.generateAnchors()
|
| 21 |
+
|
| 22 |
+
@property
|
| 23 |
+
def name(self):
|
| 24 |
+
return self.__class__.__name__
|
| 25 |
+
|
| 26 |
+
def setBackendAndTarget(self, backendId, targetId):
|
| 27 |
+
self.backendId = backendId
|
| 28 |
+
self.targetId = targetId
|
| 29 |
+
self.net.setPreferableBackend(self.backendId)
|
| 30 |
+
self.net.setPreferableTarget(self.targetId)
|
| 31 |
+
|
| 32 |
+
def preprocess(self, img):
|
| 33 |
+
blob = np.transpose(img, (2, 0, 1))
|
| 34 |
+
return blob[np.newaxis, :, :, :]
|
| 35 |
+
|
| 36 |
+
def infer(self, srcimg):
|
| 37 |
+
input_blob = self.preprocess(srcimg)
|
| 38 |
+
|
| 39 |
+
self.net.setInput(input_blob)
|
| 40 |
+
outs = self.net.forward(self.net.getUnconnectedOutLayersNames())
|
| 41 |
+
|
| 42 |
+
predictions = self.postprocess(outs[0])
|
| 43 |
+
return predictions
|
| 44 |
+
|
| 45 |
+
def postprocess(self, outputs):
|
| 46 |
+
dets = outputs[0]
|
| 47 |
+
|
| 48 |
+
dets[:, :2] = (dets[:, :2] + self.grids) * self.expanded_strides
|
| 49 |
+
dets[:, 2:4] = np.exp(dets[:, 2:4]) * self.expanded_strides
|
| 50 |
+
|
| 51 |
+
# get boxes
|
| 52 |
+
boxes = dets[:, :4]
|
| 53 |
+
boxes_xyxy = np.ones_like(boxes)
|
| 54 |
+
boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.
|
| 55 |
+
boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.
|
| 56 |
+
boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.
|
| 57 |
+
boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.
|
| 58 |
+
|
| 59 |
+
# get scores and class indices
|
| 60 |
+
scores = dets[:, 4:5] * dets[:, 5:]
|
| 61 |
+
max_scores = np.amax(scores, axis=1)
|
| 62 |
+
max_scores_idx = np.argmax(scores, axis=1)
|
| 63 |
+
|
| 64 |
+
keep = cv2.dnn.NMSBoxesBatched(boxes_xyxy.tolist(), max_scores.tolist(), max_scores_idx.tolist(), self.confThreshold, self.nmsThreshold)
|
| 65 |
+
|
| 66 |
+
candidates = np.concatenate([boxes_xyxy, max_scores[:, None], max_scores_idx[:, None]], axis=1)
|
| 67 |
+
if len(keep) == 0:
|
| 68 |
+
return np.array([])
|
| 69 |
+
return candidates[keep]
|
| 70 |
+
|
| 71 |
+
def generateAnchors(self):
|
| 72 |
+
self.grids = []
|
| 73 |
+
self.expanded_strides = []
|
| 74 |
+
hsizes = [self.input_size[0] // stride for stride in self.strides]
|
| 75 |
+
wsizes = [self.input_size[1] // stride for stride in self.strides]
|
| 76 |
+
|
| 77 |
+
for hsize, wsize, stride in zip(hsizes, wsizes, self.strides):
|
| 78 |
+
xv, yv = np.meshgrid(np.arange(hsize), np.arange(wsize))
|
| 79 |
+
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
|
| 80 |
+
self.grids.append(grid)
|
| 81 |
+
shape = grid.shape[:2]
|
| 82 |
+
self.expanded_strides.append(np.full((*shape, 1), stride))
|
| 83 |
+
|
| 84 |
+
self.grids = np.concatenate(self.grids, 1)
|
| 85 |
+
self.expanded_strides = np.concatenate(self.expanded_strides, 1)
|