Spaces:
Sleeping
Sleeping
HaochenGong commited on
Commit ·
f1554a2
1
Parent(s): aef871b
create
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .idea/.gitignore +0 -0
- .idea/Cpp4App_test.iml +10 -0
- .idea/inspectionProfiles/Project_Default.xml +16 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +4 -0
- .idea/modules.xml +8 -0
- .idea/vcs.xml +6 -0
- .idea/workspace.xml +59 -0
- CDM/.idea/.gitignore +3 -0
- CDM/.idea/UIED.iml +14 -0
- CDM/.idea/inspectionProfiles/Project_Default.xml +29 -0
- CDM/.idea/misc.xml +4 -0
- CDM/.idea/modules.xml +8 -0
- CDM/.idea/vcs.xml +6 -0
- CDM/LICENSE +201 -0
- CDM/README.md +80 -0
- CDM/cnn/CNN.py +114 -0
- CDM/cnn/Config.py +21 -0
- CDM/cnn/Data.py +69 -0
- CDM/config/CONFIG.py +45 -0
- CDM/config/CONFIG_UIED.py +49 -0
- CDM/detect_classify/classification.py +380 -0
- CDM/detect_compo/deprecated/Block.py +56 -0
- CDM/detect_compo/deprecated/block_division.py +108 -0
- CDM/detect_compo/deprecated/ip_detection_utils.py +461 -0
- CDM/detect_compo/deprecated/ip_segment.py +123 -0
- CDM/detect_compo/deprecated/ocr_classify_text.py +113 -0
- CDM/detect_compo/ip_region_proposal.py +200 -0
- CDM/detect_compo/lib_ip/Bbox.py +122 -0
- CDM/detect_compo/lib_ip/Component.py +238 -0
- CDM/detect_compo/lib_ip/file_utils.py +80 -0
- CDM/detect_compo/lib_ip/ip_detection.py +574 -0
- CDM/detect_compo/lib_ip/ip_draw.py +139 -0
- CDM/detect_compo/lib_ip/ip_preprocessing.py +69 -0
- CDM/detect_compo/model/model-99-resnet18.pkl +3 -0
- CDM/detect_merge/Element.py +113 -0
- CDM/detect_merge/merge.py +361 -0
- CDM/detect_text/Text.py +181 -0
- CDM/detect_text/ocr.py +43 -0
- CDM/detect_text/text_detection.py +289 -0
- CDM/input_examples/README.md +80 -0
- CDM/logs/cfg-for-web.txt +19 -0
- CDM/logs/log.txt +22 -0
- CDM/logs/speed-improvement.txt +12 -0
- CDM/model/model-99-ViT-entire.pkl +3 -0
- CDM/model/model-99-resnet18.pkl +3 -0
- CDM/requirements.txt +0 -0
- CDM/result_classification/README.md +80 -0
- CDM/result_processing/Untitled.ipynb +937 -0
- CDM/result_processing/eval_classes.py +215 -0
.idea/.gitignore
ADDED
|
File without changes
|
.idea/Cpp4App_test.iml
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<module type="PYTHON_MODULE" version="4">
|
| 3 |
+
<component name="NewModuleRootManager">
|
| 4 |
+
<content url="file://$MODULE_DIR$">
|
| 5 |
+
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
| 6 |
+
</content>
|
| 7 |
+
<orderEntry type="jdk" jdkName="Python 3.10 (Cpp4App_test)" jdkType="Python SDK" />
|
| 8 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
| 9 |
+
</component>
|
| 10 |
+
</module>
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<component name="InspectionProjectProfileManager">
|
| 2 |
+
<profile version="1.0">
|
| 3 |
+
<option name="myName" value="Project Default" />
|
| 4 |
+
<inspection_tool class="CssUnknownProperty" enabled="true" level="WARNING" enabled_by_default="true">
|
| 5 |
+
<option name="myCustomPropertiesEnabled" value="true" />
|
| 6 |
+
<option name="myIgnoreVendorSpecificProperties" value="false" />
|
| 7 |
+
<option name="myCustomPropertiesList">
|
| 8 |
+
<value>
|
| 9 |
+
<list size="1">
|
| 10 |
+
<item index="0" class="java.lang.String" itemvalue="transform" />
|
| 11 |
+
</list>
|
| 12 |
+
</value>
|
| 13 |
+
</option>
|
| 14 |
+
</inspection_tool>
|
| 15 |
+
</profile>
|
| 16 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<component name="InspectionProjectProfileManager">
|
| 2 |
+
<settings>
|
| 3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
| 4 |
+
<version value="1.0" />
|
| 5 |
+
</settings>
|
| 6 |
+
</component>
|
.idea/misc.xml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (Cpp4App_test)" project-jdk-type="Python SDK" />
|
| 4 |
+
</project>
|
.idea/modules.xml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectModuleManager">
|
| 4 |
+
<modules>
|
| 5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/Cpp4App_test.iml" filepath="$PROJECT_DIR$/.idea/Cpp4App_test.iml" />
|
| 6 |
+
</modules>
|
| 7 |
+
</component>
|
| 8 |
+
</project>
|
.idea/vcs.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="VcsDirectoryMappings">
|
| 4 |
+
<mapping directory="" vcs="Git" />
|
| 5 |
+
</component>
|
| 6 |
+
</project>
|
.idea/workspace.xml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="AutoImportSettings">
|
| 4 |
+
<option name="autoReloadType" value="SELECTIVE" />
|
| 5 |
+
</component>
|
| 6 |
+
<component name="ChangeListManager">
|
| 7 |
+
<list default="true" id="5e4481c0-7ba2-42e4-bbe6-4c36a0d36baa" name="Changes" comment="">
|
| 8 |
+
<change afterPath="$PROJECT_DIR$/app.py" afterDir="false" />
|
| 9 |
+
<change afterPath="$PROJECT_DIR$/main" afterDir="false" />
|
| 10 |
+
<change afterPath="$PROJECT_DIR$/packages.txt" afterDir="false" />
|
| 11 |
+
<change afterPath="$PROJECT_DIR$/requirements.txt" afterDir="false" />
|
| 12 |
+
<change afterPath="$PROJECT_DIR$/run_sem_test.py" afterDir="false" />
|
| 13 |
+
<change beforePath="$PROJECT_DIR$/README.md" beforeDir="false" afterPath="$PROJECT_DIR$/README.md" afterDir="false" />
|
| 14 |
+
</list>
|
| 15 |
+
<option name="SHOW_DIALOG" value="false" />
|
| 16 |
+
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
| 17 |
+
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
|
| 18 |
+
<option name="LAST_RESOLUTION" value="IGNORE" />
|
| 19 |
+
</component>
|
| 20 |
+
<component name="Git.Settings">
|
| 21 |
+
<option name="RECENT_GIT_ROOT_PATH" value="$PROJECT_DIR$" />
|
| 22 |
+
</component>
|
| 23 |
+
<component name="MarkdownSettingsMigration">
|
| 24 |
+
<option name="stateVersion" value="1" />
|
| 25 |
+
</component>
|
| 26 |
+
<component name="ProjectId" id="2kW6Fg72Z5pXQX4LLQVp4SZZKLB" />
|
| 27 |
+
<component name="ProjectViewState">
|
| 28 |
+
<option name="hideEmptyMiddlePackages" value="true" />
|
| 29 |
+
<option name="showLibraryContents" value="true" />
|
| 30 |
+
</component>
|
| 31 |
+
<component name="PropertiesComponent"><![CDATA[{
|
| 32 |
+
"keyToString": {
|
| 33 |
+
"RunOnceActivity.OpenProjectViewOnStart": "true",
|
| 34 |
+
"RunOnceActivity.ShowReadmeOnStart": "true",
|
| 35 |
+
"WebServerToolWindowFactoryState": "false",
|
| 36 |
+
"node.js.detected.package.eslint": "true",
|
| 37 |
+
"node.js.detected.package.tslint": "true",
|
| 38 |
+
"node.js.selected.package.eslint": "(autodetect)",
|
| 39 |
+
"node.js.selected.package.tslint": "(autodetect)",
|
| 40 |
+
"vue.rearranger.settings.migration": "true"
|
| 41 |
+
}
|
| 42 |
+
}]]></component>
|
| 43 |
+
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
|
| 44 |
+
<component name="TaskManager">
|
| 45 |
+
<task active="true" id="Default" summary="Default task">
|
| 46 |
+
<changelist id="5e4481c0-7ba2-42e4-bbe6-4c36a0d36baa" name="Changes" comment="" />
|
| 47 |
+
<created>1723386888312</created>
|
| 48 |
+
<option name="number" value="Default" />
|
| 49 |
+
<option name="presentableId" value="Default" />
|
| 50 |
+
<updated>1723386888312</updated>
|
| 51 |
+
<workItem from="1723386893769" duration="12000" />
|
| 52 |
+
<workItem from="1723386970626" duration="451000" />
|
| 53 |
+
</task>
|
| 54 |
+
<servers />
|
| 55 |
+
</component>
|
| 56 |
+
<component name="TypeScriptGeneratedFilesManager">
|
| 57 |
+
<option name="version" value="3" />
|
| 58 |
+
</component>
|
| 59 |
+
</project>
|
CDM/.idea/.gitignore
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Default ignored files
|
| 2 |
+
/shelf/
|
| 3 |
+
/workspace.xml
|
CDM/.idea/UIED.iml
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<module type="PYTHON_MODULE" version="4">
|
| 3 |
+
<component name="NewModuleRootManager">
|
| 4 |
+
<content url="file://$MODULE_DIR$">
|
| 5 |
+
<sourceFolder url="file://$MODULE_DIR$" isTestSource="false" />
|
| 6 |
+
<sourceFolder url="file://$MODULE_DIR$/resnet" isTestSource="false" />
|
| 7 |
+
</content>
|
| 8 |
+
<orderEntry type="jdk" jdkName="Python 3.7" jdkType="Python SDK" />
|
| 9 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
| 10 |
+
</component>
|
| 11 |
+
<component name="TestRunnerService">
|
| 12 |
+
<option name="PROJECT_TEST_RUNNER" value="py.test" />
|
| 13 |
+
</component>
|
| 14 |
+
</module>
|
CDM/.idea/inspectionProfiles/Project_Default.xml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<component name="InspectionProjectProfileManager">
|
| 2 |
+
<profile version="1.0">
|
| 3 |
+
<option name="myName" value="Project Default" />
|
| 4 |
+
<inspection_tool class="DuplicatedCode" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
| 5 |
+
<Languages>
|
| 6 |
+
<language minSize="54" name="Python" />
|
| 7 |
+
</Languages>
|
| 8 |
+
</inspection_tool>
|
| 9 |
+
<inspection_tool class="PyInterpreterInspection" enabled="false" level="WARNING" enabled_by_default="false" />
|
| 10 |
+
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
| 11 |
+
<option name="ignoredPackages">
|
| 12 |
+
<value>
|
| 13 |
+
<list size="3">
|
| 14 |
+
<item index="0" class="java.lang.String" itemvalue="Tensorflow" />
|
| 15 |
+
<item index="1" class="java.lang.String" itemvalue="Sklearn" />
|
| 16 |
+
<item index="2" class="java.lang.String" itemvalue="Opencv" />
|
| 17 |
+
</list>
|
| 18 |
+
</value>
|
| 19 |
+
</option>
|
| 20 |
+
</inspection_tool>
|
| 21 |
+
<inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
| 22 |
+
<option name="ignoredErrors">
|
| 23 |
+
<list>
|
| 24 |
+
<option value="E501" />
|
| 25 |
+
</list>
|
| 26 |
+
</option>
|
| 27 |
+
</inspection_tool>
|
| 28 |
+
</profile>
|
| 29 |
+
</component>
|
CDM/.idea/misc.xml
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" />
|
| 4 |
+
</project>
|
CDM/.idea/modules.xml
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="ProjectModuleManager">
|
| 4 |
+
<modules>
|
| 5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/UIED.iml" filepath="$PROJECT_DIR$/.idea/UIED.iml" />
|
| 6 |
+
</modules>
|
| 7 |
+
</component>
|
| 8 |
+
</project>
|
CDM/.idea/vcs.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
<project version="4">
|
| 3 |
+
<component name="VcsDirectoryMappings">
|
| 4 |
+
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
| 5 |
+
</component>
|
| 6 |
+
</project>
|
CDM/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [2021] [UIED mulong.xie@anu.edu.au]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
CDM/README.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UIED - UI element detection, detecting UI elements from UI screenshots or drawnings
|
| 2 |
+
|
| 3 |
+
This project is still ongoing and this repo may be updated irregularly, I developed a web app for the UIED in http://uied.online
|
| 4 |
+
|
| 5 |
+
## Related Publications:
|
| 6 |
+
[1. UIED: a hybrid tool for GUI element detection](https://dl.acm.org/doi/10.1145/3368089.3417940)
|
| 7 |
+
|
| 8 |
+
[2. Object Detection for Graphical User Interface: Old Fashioned or Deep Learning or a Combination?](https://arxiv.org/abs/2008.05132)
|
| 9 |
+
|
| 10 |
+
>The repo has been **upgraded with Google OCR** for GUI text detection, to use the original version in our paper (using [EAST](https://github.com/argman/EAST) as text detector), check the relase [v2.3](https://github.com/MulongXie/UIED/releases/tag/v2.3) and download the pre-trained model in [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing).
|
| 11 |
+
|
| 12 |
+
## What is it?
|
| 13 |
+
|
| 14 |
+
UI Element Detection (UIED) is an old-fashioned computer vision (CV) based element detection approach for graphic user interface.
|
| 15 |
+
|
| 16 |
+
The input of UIED could be various UI image, such as mobile app or web page screenshot, UI design drawn by Photoshop or Sketch, and even some hand-drawn UI design. Then the approach detects and classifies text and graphic UI elements, and exports the detection result as JSON file for future application.
|
| 17 |
+
|
| 18 |
+
UIED comprises two parts to detect UI text and graphic elements, such as button, image and input bar.
|
| 19 |
+
* For text, it leverages [Google OCR](https://cloud.google.com/vision/docs/ocr) to perfrom detection.
|
| 20 |
+
|
| 21 |
+
* For graphical elements, it uses old-fashioned CV approaches to locate the elements and a CNN classifier to achieve classification.
|
| 22 |
+
|
| 23 |
+
> UIED is highly customizable, you can replace both parts by your choice (e.g. other text detection approaches). Unlike black-box end-to-end deep learning approach, you can revise the algorithms in the non-text detection and merging (partially or entirely) easily to fit your task.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
## How to use?
|
| 28 |
+
|
| 29 |
+
### Dependency
|
| 30 |
+
* **Python 3.5**
|
| 31 |
+
* **Opencv 3.4.2**
|
| 32 |
+
* **Pandas**
|
| 33 |
+
<!-- * **Tensorflow 1.10.0**
|
| 34 |
+
* **Keras 2.2.4**
|
| 35 |
+
* **Sklearn 0.22.2** -->
|
| 36 |
+
|
| 37 |
+
### Installation
|
| 38 |
+
<!-- Install the mentioned dependencies, and download two pre-trained models from [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing) for EAST text detection and GUI element classification. -->
|
| 39 |
+
|
| 40 |
+
<!-- Change ``CNN_PATH`` and ``EAST_PATH`` in *config/CONFIG.py* to your locations. -->
|
| 41 |
+
|
| 42 |
+
The new version of UIED equipped with Google OCR is easy to deploy and no pre-trained model is needed. Simply donwload the repo along with the dependencies.
|
| 43 |
+
|
| 44 |
+
> Please replace the Google OCR key at `detect_text/ocr.py line 28` with your own (apply in [Google website](https://cloud.google.com/vision)).
|
| 45 |
+
|
| 46 |
+
### Usage
|
| 47 |
+
To test your own image(s):
|
| 48 |
+
* To test single image, change *input_path_img* in ``run_single.py`` to your input image and the results will be output to *output_root*.
|
| 49 |
+
* To test mutiple images, change *input_img_root* in ``run_batch.py`` to your input directory and the results will be output to *output_root*.
|
| 50 |
+
* To adjust the parameters lively, using ``run_testing.py``
|
| 51 |
+
|
| 52 |
+
> Note: The best set of parameters vary for different types of GUI image (Mobile App, Web, PC). I highly recommend to first play with the ``run_testing.py`` to pick a good set of parameters for your data.
|
| 53 |
+
|
| 54 |
+
## Folder structure
|
| 55 |
+
``cnn/``
|
| 56 |
+
* Used to train classifier for graphic UI elements
|
| 57 |
+
* Set path of the CNN classification model
|
| 58 |
+
|
| 59 |
+
``config/``
|
| 60 |
+
* Set data paths
|
| 61 |
+
* Set parameters for graphic elements detection
|
| 62 |
+
|
| 63 |
+
``data/``
|
| 64 |
+
* Input UI images and output detection results
|
| 65 |
+
|
| 66 |
+
``detect_compo/``
|
| 67 |
+
* Non-text GUI component detection
|
| 68 |
+
|
| 69 |
+
``detect_text/``
|
| 70 |
+
* GUI text detection using Google OCR
|
| 71 |
+
|
| 72 |
+
``detect_merge/``
|
| 73 |
+
* Merge the detection results of non-text and text GUI elements
|
| 74 |
+
|
| 75 |
+
The major detection algorithms are in ``detect_compo/``, ``detect_text/`` and ``detect_merge/``
|
| 76 |
+
|
| 77 |
+
## Demo
|
| 78 |
+
GUI element detection result for web screenshot
|
| 79 |
+
|
| 80 |
+

|
CDM/cnn/CNN.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import keras
|
| 2 |
+
from keras.applications.resnet50 import ResNet50
|
| 3 |
+
from keras.models import Model,load_model
|
| 4 |
+
from keras.layers import Dense, Activation, Flatten, Dropout
|
| 5 |
+
from sklearn.metrics import confusion_matrix
|
| 6 |
+
import numpy as np
|
| 7 |
+
import cv2
|
| 8 |
+
|
| 9 |
+
from config.CONFIG import Config
|
| 10 |
+
cfg = Config()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class CNN:
|
| 14 |
+
def __init__(self, classifier_type, is_load=True):
|
| 15 |
+
'''
|
| 16 |
+
:param classifier_type: 'Text' or 'Noise' or 'Elements'
|
| 17 |
+
'''
|
| 18 |
+
self.data = None
|
| 19 |
+
self.model = None
|
| 20 |
+
|
| 21 |
+
self.classifier_type = classifier_type
|
| 22 |
+
|
| 23 |
+
self.image_shape = (32,32,3)
|
| 24 |
+
self.class_number = None
|
| 25 |
+
self.class_map = None
|
| 26 |
+
self.model_path = None
|
| 27 |
+
self.classifier_type = classifier_type
|
| 28 |
+
if is_load:
|
| 29 |
+
self.load(classifier_type)
|
| 30 |
+
|
| 31 |
+
def build_model(self, epoch_num, is_compile=True):
|
| 32 |
+
base_model = ResNet50(include_top=False, weights='imagenet', input_shape=self.image_shape)
|
| 33 |
+
for layer in base_model.layers:
|
| 34 |
+
layer.trainable = False
|
| 35 |
+
self.model = Flatten()(base_model.output)
|
| 36 |
+
self.model = Dense(128, activation='relu')(self.model)
|
| 37 |
+
self.model = Dropout(0.5)(self.model)
|
| 38 |
+
self.model = Dense(15, activation='softmax')(self.model)
|
| 39 |
+
|
| 40 |
+
self.model = Model(inputs=base_model.input, outputs=self.model)
|
| 41 |
+
if is_compile:
|
| 42 |
+
self.model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
|
| 43 |
+
self.model.fit(self.data.X_train, self.data.Y_train, batch_size=64, epochs=epoch_num, verbose=1,
|
| 44 |
+
validation_data=(self.data.X_test, self.data.Y_test))
|
| 45 |
+
|
| 46 |
+
def train(self, data, epoch_num=30):
|
| 47 |
+
self.data = data
|
| 48 |
+
self.build_model(epoch_num)
|
| 49 |
+
self.model.save(self.model_path)
|
| 50 |
+
print("Trained model is saved to", self.model_path)
|
| 51 |
+
|
| 52 |
+
def load(self, classifier_type):
|
| 53 |
+
if classifier_type == 'Text':
|
| 54 |
+
self.model_path = 'E:/Mulong/Model/rico_compos/cnn-textview-2.h5'
|
| 55 |
+
self.class_map = ['Text', 'Non-Text']
|
| 56 |
+
elif classifier_type == 'Noise':
|
| 57 |
+
self.model_path = 'E:/Mulong/Model/rico_compos/cnn-noise-1.h5'
|
| 58 |
+
self.class_map = ['Noise', 'Non-Noise']
|
| 59 |
+
elif classifier_type == 'Elements':
|
| 60 |
+
# self.model_path = 'E:/Mulong/Model/rico_compos/resnet-ele14-19.h5'
|
| 61 |
+
# self.model_path = 'E:/Mulong/Model/rico_compos/resnet-ele14-28.h5'
|
| 62 |
+
# self.model_path = 'E:/Mulong/Model/rico_compos/resnet-ele14-45.h5'
|
| 63 |
+
self.model_path = cfg.CNN_PATH
|
| 64 |
+
self.class_map = cfg.element_class
|
| 65 |
+
self.image_shape = (64, 64, 3)
|
| 66 |
+
elif classifier_type == 'Image':
|
| 67 |
+
self.model_path = 'E:/Mulong/Model/rico_compos/cnn-image-1.h5'
|
| 68 |
+
self.class_map = ['Image', 'Non-Image']
|
| 69 |
+
self.class_number = len(self.class_map)
|
| 70 |
+
self.model = load_model(self.model_path)
|
| 71 |
+
print('Model Loaded From', self.model_path)
|
| 72 |
+
|
| 73 |
+
def preprocess_img(self, image):
|
| 74 |
+
image = cv2.resize(image, self.image_shape[:2])
|
| 75 |
+
x = (image / 255).astype('float32')
|
| 76 |
+
x = np.array([x])
|
| 77 |
+
return x
|
| 78 |
+
|
| 79 |
+
def predict(self, imgs, compos, load=False, show=False):
|
| 80 |
+
"""
|
| 81 |
+
:type img_path: list of img path
|
| 82 |
+
"""
|
| 83 |
+
if load:
|
| 84 |
+
self.load(self.classifier_type)
|
| 85 |
+
if self.model is None:
|
| 86 |
+
print("*** No model loaded ***")
|
| 87 |
+
return
|
| 88 |
+
for i in range(len(imgs)):
|
| 89 |
+
X = self.preprocess_img(imgs[i])
|
| 90 |
+
Y = self.class_map[np.argmax(self.model.predict(X))]
|
| 91 |
+
compos[i].category = Y
|
| 92 |
+
if show:
|
| 93 |
+
print(Y)
|
| 94 |
+
cv2.imshow('element', imgs[i])
|
| 95 |
+
cv2.waitKey()
|
| 96 |
+
|
| 97 |
+
def evaluate(self, data, load=True):
|
| 98 |
+
if load:
|
| 99 |
+
self.load(self.classifier_type)
|
| 100 |
+
X_test = data.X_test
|
| 101 |
+
Y_test = [np.argmax(y) for y in data.Y_test]
|
| 102 |
+
Y_pre = [np.argmax(y_pre) for y_pre in self.model.predict(X_test, verbose=1)]
|
| 103 |
+
|
| 104 |
+
matrix = confusion_matrix(Y_test, Y_pre)
|
| 105 |
+
print(matrix)
|
| 106 |
+
|
| 107 |
+
TP, FP, FN = 0, 0, 0
|
| 108 |
+
for i in range(len(matrix)):
|
| 109 |
+
TP += matrix[i][i]
|
| 110 |
+
FP += sum(matrix[i][:]) - matrix[i][i]
|
| 111 |
+
FN += sum(matrix[:][i]) - matrix[i][i]
|
| 112 |
+
precision = TP/(TP+FP)
|
| 113 |
+
recall = TP / (TP+FN)
|
| 114 |
+
print("Precision:%.3f, Recall:%.3f" % (precision, recall))
|
CDM/cnn/Config.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
class Config:
|
| 3 |
+
def __init__(self):
|
| 4 |
+
# cnn 4 classes
|
| 5 |
+
# self.MODEL_PATH = 'E:/Mulong/Model/ui_compos/cnn6_icon.h5' # cnn 4 classes
|
| 6 |
+
# self.class_map = ['Image', 'Icon', 'Button', 'Input']
|
| 7 |
+
|
| 8 |
+
# resnet 14 classes
|
| 9 |
+
# self.DATA_PATH = "E:/Mulong/Datasets/rico/elements-14-2"
|
| 10 |
+
# self.MODEL_PATH = 'E:/Mulong/Model/rico_compos/resnet-ele14.h5'
|
| 11 |
+
# self.class_map = ['Button', 'CheckBox', 'Chronometer', 'EditText', 'ImageButton', 'ImageView',
|
| 12 |
+
# 'ProgressBar', 'RadioButton', 'RatingBar', 'SeekBar', 'Spinner', 'Switch',
|
| 13 |
+
# 'ToggleButton', 'VideoView', 'TextView'] # ele-14
|
| 14 |
+
|
| 15 |
+
self.DATA_PATH = "E:\Mulong\Datasets\dataset_webpage\Components3"
|
| 16 |
+
|
| 17 |
+
self.MODEL_PATH = 'E:/Mulong/Model/rico_compos/cnn2-textview.h5'
|
| 18 |
+
self.class_map = ['Text', 'Non-Text']
|
| 19 |
+
|
| 20 |
+
self.image_shape = (32, 32, 3)
|
| 21 |
+
self.class_number = len(self.class_map)
|
CDM/cnn/Data.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from os.path import join as pjoin
|
| 4 |
+
import glob
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from Config import Config
|
| 7 |
+
|
| 8 |
+
cfg = Config()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Data:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
self.data_num = 0
|
| 14 |
+
self.images = []
|
| 15 |
+
self.labels = []
|
| 16 |
+
self.X_train, self.Y_train = None, None
|
| 17 |
+
self.X_test, self.Y_test = None, None
|
| 18 |
+
|
| 19 |
+
self.image_shape = cfg.image_shape
|
| 20 |
+
self.class_number = cfg.class_number
|
| 21 |
+
self.class_map = cfg.class_map
|
| 22 |
+
self.DATA_PATH = cfg.DATA_PATH
|
| 23 |
+
|
| 24 |
+
def load_data(self, resize=True, shape=None, max_number=1000000):
|
| 25 |
+
# if customize shape
|
| 26 |
+
if shape is not None:
|
| 27 |
+
self.image_shape = shape
|
| 28 |
+
else:
|
| 29 |
+
shape = self.image_shape
|
| 30 |
+
|
| 31 |
+
# load data
|
| 32 |
+
for p in glob.glob(pjoin(self.DATA_PATH, '*')):
|
| 33 |
+
print("*** Loading components of %s: %d ***" %(p.split('\\')[-1], int(len(glob.glob(pjoin(p, '*.png'))))))
|
| 34 |
+
label = self.class_map.index(p.split('\\')[-1]) # map to index of classes
|
| 35 |
+
for i, image_path in enumerate(tqdm(glob.glob(pjoin(p, '*.png'))[:max_number])):
|
| 36 |
+
image = cv2.imread(image_path)
|
| 37 |
+
if resize:
|
| 38 |
+
image = cv2.resize(image, shape[:2])
|
| 39 |
+
self.images.append(image)
|
| 40 |
+
self.labels.append(label)
|
| 41 |
+
|
| 42 |
+
assert len(self.images) == len(self.labels)
|
| 43 |
+
self.data_num = len(self.images)
|
| 44 |
+
print('%d Data Loaded' % self.data_num)
|
| 45 |
+
|
| 46 |
+
def generate_training_data(self, train_data_ratio=0.8):
|
| 47 |
+
# transfer int into c dimensions one-hot array
|
| 48 |
+
def expand(label, class_number):
|
| 49 |
+
# return y : (num_class, num_samples)
|
| 50 |
+
y = np.eye(class_number)[label]
|
| 51 |
+
y = np.squeeze(y)
|
| 52 |
+
return y
|
| 53 |
+
|
| 54 |
+
# reshuffle
|
| 55 |
+
np.random.seed(0)
|
| 56 |
+
self.images = np.random.permutation(self.images)
|
| 57 |
+
np.random.seed(0)
|
| 58 |
+
self.labels = np.random.permutation(self.labels)
|
| 59 |
+
Y = expand(self.labels, self.class_number)
|
| 60 |
+
|
| 61 |
+
# separate dataset
|
| 62 |
+
cut = int(train_data_ratio * self.data_num)
|
| 63 |
+
self.X_train = (self.images[:cut] / 255).astype('float32')
|
| 64 |
+
self.X_test = (self.images[cut:] / 255).astype('float32')
|
| 65 |
+
self.Y_train = Y[:cut]
|
| 66 |
+
self.Y_test = Y[cut:]
|
| 67 |
+
|
| 68 |
+
print('X_train:%d, Y_train:%d' % (len(self.X_train), len(self.Y_train)))
|
| 69 |
+
print('X_test:%d, Y_test:%d' % (len(self.X_test), len(self.Y_test)))
|
CDM/config/CONFIG.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from os.path import join as pjoin
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Config:
|
| 6 |
+
|
| 7 |
+
def __init__(self):
|
| 8 |
+
# setting CNN (graphic elements) model
|
| 9 |
+
self.image_shape = (64, 64, 3)
|
| 10 |
+
# self.MODEL_PATH = 'E:\\Mulong\\Model\\UI2CODE\\cnn6_icon.h5'
|
| 11 |
+
# self.class_map = ['button', 'input', 'icon', 'img', 'text']
|
| 12 |
+
self.CNN_PATH = 'E:/Mulong/Model/rico_compos/cnn-rico-1.h5'
|
| 13 |
+
self.element_class = ['Button', 'CheckBox', 'Chronometer', 'EditText', 'ImageButton', 'ImageView',
|
| 14 |
+
'ProgressBar', 'RadioButton', 'RatingBar', 'SeekBar', 'Spinner', 'Switch',
|
| 15 |
+
'ToggleButton', 'VideoView', 'TextView']
|
| 16 |
+
self.class_number = len(self.element_class)
|
| 17 |
+
|
| 18 |
+
# setting EAST (ocr) model
|
| 19 |
+
self.EAST_PATH = 'E:/Mulong/Model/East/east_icdar2015_resnet_v1_50_rbox'
|
| 20 |
+
|
| 21 |
+
self.COLOR = {'Button': (0, 255, 0), 'CheckBox': (0, 0, 255), 'Chronometer': (255, 166, 166),
|
| 22 |
+
'EditText': (255, 166, 0),
|
| 23 |
+
'ImageButton': (77, 77, 255), 'ImageView': (255, 0, 166), 'ProgressBar': (166, 0, 255),
|
| 24 |
+
'RadioButton': (166, 166, 166),
|
| 25 |
+
'RatingBar': (0, 166, 255), 'SeekBar': (0, 166, 10), 'Spinner': (50, 21, 255),
|
| 26 |
+
'Switch': (80, 166, 66), 'ToggleButton': (0, 66, 80), 'VideoView': (88, 66, 0),
|
| 27 |
+
'TextView': (169, 255, 0), 'NonText': (0,0,255),
|
| 28 |
+
'Compo':(0, 0, 255), 'Text':(169, 255, 0), 'Block':(80, 166, 66)}
|
| 29 |
+
|
| 30 |
+
def build_output_folders(self):
|
| 31 |
+
# setting data flow paths
|
| 32 |
+
self.ROOT_INPUT = "E:\\Mulong\\Datasets\\rico\\combined"
|
| 33 |
+
self.ROOT_OUTPUT = "E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3"
|
| 34 |
+
|
| 35 |
+
self.ROOT_IMG_ORG = pjoin(self.ROOT_INPUT, "org")
|
| 36 |
+
self.ROOT_IP = pjoin(self.ROOT_OUTPUT, "ip")
|
| 37 |
+
self.ROOT_OCR = pjoin(self.ROOT_OUTPUT, "ocr")
|
| 38 |
+
self.ROOT_MERGE = pjoin(self.ROOT_OUTPUT, "merge")
|
| 39 |
+
self.ROOT_IMG_COMPONENT = pjoin(self.ROOT_OUTPUT, "components")
|
| 40 |
+
if not os.path.exists(self.ROOT_IP):
|
| 41 |
+
os.mkdir(self.ROOT_IP)
|
| 42 |
+
if not os.path.exists(self.ROOT_OCR):
|
| 43 |
+
os.mkdir(self.ROOT_OCR)
|
| 44 |
+
if not os.path.exists(self.ROOT_MERGE):
|
| 45 |
+
os.mkdir(self.ROOT_MERGE)
|
CDM/config/CONFIG_UIED.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class Config:
|
| 2 |
+
|
| 3 |
+
def __init__(self):
|
| 4 |
+
# Adjustable
|
| 5 |
+
# self.THRESHOLD_PRE_GRADIENT = 4 # dribbble:4 rico:4 web:1
|
| 6 |
+
# self.THRESHOLD_OBJ_MIN_AREA = 55 # bottom line 55 of small circle
|
| 7 |
+
# self.THRESHOLD_BLOCK_GRADIENT = 5
|
| 8 |
+
|
| 9 |
+
# *** Frozen ***
|
| 10 |
+
self.THRESHOLD_REC_MIN_EVENNESS = 0.7
|
| 11 |
+
self.THRESHOLD_REC_MAX_DENT_RATIO = 0.25
|
| 12 |
+
self.THRESHOLD_LINE_THICKNESS = 8
|
| 13 |
+
self.THRESHOLD_LINE_MIN_LENGTH = 0.95
|
| 14 |
+
self.THRESHOLD_COMPO_MAX_SCALE = (0.25, 0.98) # (120/800, 422.5/450) maximum height and width ratio for a atomic compo (button)
|
| 15 |
+
self.THRESHOLD_TEXT_MAX_WORD_GAP = 10
|
| 16 |
+
self.THRESHOLD_TEXT_MAX_HEIGHT = 0.04 # 40/800 maximum height of text
|
| 17 |
+
self.THRESHOLD_TOP_BOTTOM_BAR = (0.045, 0.94) # (36/800, 752/800) height ratio of top and bottom bar
|
| 18 |
+
self.THRESHOLD_BLOCK_MIN_HEIGHT = 0.03 # 24/800
|
| 19 |
+
|
| 20 |
+
# deprecated
|
| 21 |
+
# self.THRESHOLD_OBJ_MIN_PERIMETER = 0
|
| 22 |
+
# self.THRESHOLD_BLOCK_MAX_BORDER_THICKNESS = 8
|
| 23 |
+
# self.THRESHOLD_BLOCK_MAX_CROSS_POINT = 0.1
|
| 24 |
+
# self.THRESHOLD_UICOMPO_MIN_W_H_RATIO = 0.4
|
| 25 |
+
# self.THRESHOLD_TEXT_MAX_WIDTH = 150
|
| 26 |
+
# self.THRESHOLD_LINE_MIN_LENGTH_H = 50
|
| 27 |
+
# self.THRESHOLD_LINE_MIN_LENGTH_V = 50
|
| 28 |
+
# self.OCR_PADDING = 5
|
| 29 |
+
# self.OCR_MIN_WORD_AREA = 0.45
|
| 30 |
+
# self.THRESHOLD_MIN_IOU = 0.1 # dribbble:0.003 rico:0.1 web:0.1
|
| 31 |
+
# self.THRESHOLD_BLOCK_MIN_EDGE_LENGTH = 210 # dribbble:68 rico:210 web:70
|
| 32 |
+
# self.THRESHOLD_UICOMPO_MAX_W_H_RATIO = 10 # dribbble:10 rico:10 web:22
|
| 33 |
+
|
| 34 |
+
self.CLASS_MAP = {'0':'Button', '1':'CheckBox', '2':'Chronometer', '3':'EditText', '4':'ImageButton', '5':'ImageView',
|
| 35 |
+
'6':'ProgressBar', '7':'RadioButton', '8':'RatingBar', '9':'SeekBar', '10':'Spinner', '11':'Switch',
|
| 36 |
+
'12':'ToggleButton', '13':'VideoView', '14':'TextView'}
|
| 37 |
+
self.COLOR = {'Button': (0, 255, 0), 'CheckBox': (0, 0, 255), 'Chronometer': (255, 166, 166),
|
| 38 |
+
'EditText': (255, 166, 0),
|
| 39 |
+
'ImageButton': (77, 77, 255), 'ImageView': (255, 0, 166), 'ProgressBar': (166, 0, 255),
|
| 40 |
+
'RadioButton': (166, 166, 166),
|
| 41 |
+
'RatingBar': (0, 166, 255), 'SeekBar': (0, 166, 10), 'Spinner': (50, 21, 255),
|
| 42 |
+
'Switch': (80, 166, 66), 'ToggleButton': (0, 66, 80), 'VideoView': (88, 66, 0),
|
| 43 |
+
'TextView': (169, 255, 0),
|
| 44 |
+
|
| 45 |
+
'Text':(169, 255, 0), 'Non-Text':(255, 0, 166),
|
| 46 |
+
|
| 47 |
+
'Noise':(6,6,255), 'Non-Noise': (6,255,6),
|
| 48 |
+
|
| 49 |
+
'Image':(255,6,6), 'Non-Image':(6,6,255)}
|
CDM/detect_classify/classification.py
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from CDM.detect_merge.Element import Element
|
| 2 |
+
import CDM.detect_compo.lib_ip.ip_preprocessing as pre
|
| 3 |
+
import time
|
| 4 |
+
import cv2
|
| 5 |
+
import torch
|
| 6 |
+
import numpy as np
|
| 7 |
+
from torchvision import models
|
| 8 |
+
from torch import nn
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import re
|
| 11 |
+
import openai
|
| 12 |
+
import random
|
| 13 |
+
import os
|
| 14 |
+
from CDM.detect_merge.merge import reassign_ids
|
| 15 |
+
import CDM.detect_merge.merge as merge
|
| 16 |
+
from os.path import join as pjoin, exists
|
| 17 |
+
|
| 18 |
+
label_dic ={'72':'Location', '42':'Photos', '77':'Social media', '91':'Voices', '6':'Email', '89':'Social media', '40':'Location', '43':'Phone', '82':'Photos',
|
| 19 |
+
'3':'Contacts', '68':'Contacts', '49':'Profile', '56':'Photos'}
|
| 20 |
+
|
| 21 |
+
keyword_list = {'Name':['name', 'first name', 'last name', 'full name', 'real name', 'surname', 'family name', 'given name'],
|
| 22 |
+
'Birthday':['birthday', 'date of birth', 'birth date', 'DOB', 'dob full birthday', 'birth year'],
|
| 23 |
+
'Address':['mailing address', 'physical address', 'postal address', 'billing address', 'shipping address', 'delivery address', 'residence', 'collect address', 'personal address', 'residential address'],
|
| 24 |
+
'Phone':['phone', 'phone number', 'mobile', 'mobile phone', 'mobile number', 'telephone', 'telephone number', 'call'],
|
| 25 |
+
'Email':['email', 'e-mail', 'email address', 'e-mail address'],
|
| 26 |
+
'Contacts':['contacts', 'phone-book', 'phone book', 'phonebook', 'contact list', 'phone contacts', 'address book'],
|
| 27 |
+
'Location':['location', 'locate', 'geography', 'geo', 'geo-location', 'precision location', 'nearby'],
|
| 28 |
+
'Photos':['camera', 'photo', 'scan', 'album', 'picture', 'gallery', 'photo library', 'storage', 'image', 'video', 'scanner', 'photograph'],
|
| 29 |
+
'Voices':['microphone', 'voice', 'mic', 'speech', 'talk'],
|
| 30 |
+
'Financial info':['credit card', 'pay', 'payment', 'debit card', 'mastercard', 'wallet'],
|
| 31 |
+
'IP':['IP', 'Internet Protocol', 'IP address', 'internet protocol address'],
|
| 32 |
+
'Cookies':['cookies', 'cookie'],
|
| 33 |
+
'Social media':['facebook', 'twitter', 'socialmedia', 'social media'],
|
| 34 |
+
'Profile':['profile', 'account'],
|
| 35 |
+
'Gender':['gender']}
|
| 36 |
+
|
| 37 |
+
def get_data_type(sentence, keywords, use_gpt=True):
|
| 38 |
+
|
| 39 |
+
sent_data_type = "others"
|
| 40 |
+
|
| 41 |
+
if use_gpt:
|
| 42 |
+
openai.api_key = os.environ["OPENAI_API_KEY"]
|
| 43 |
+
|
| 44 |
+
prompt = f"Is this piece of texts \"{sentence}\" related to any following privacy information data types? Or not relevant to any of them? ONLY answer the data type or \"not relevant\". ONLY use following data type list. Data types and their Description:\n" \
|
| 45 |
+
f"Name: How a user refers to themselves," \
|
| 46 |
+
f" Birthday: A user’s birthday," \
|
| 47 |
+
f" Address: A user’s address," \
|
| 48 |
+
f" Phone: A user’s phone number," \
|
| 49 |
+
f" Email: A user’s email address," \
|
| 50 |
+
f" Contacts: A user’s contact information, or the access to the contact permission," \
|
| 51 |
+
f" Location: A user’s location information, or the access to the location permission," \
|
| 52 |
+
f" Photos: A user’s photos, videos, or the access to the camera permission," \
|
| 53 |
+
f" Voices: A user’s voices, recordings, or the access to the microphone permission," \
|
| 54 |
+
f" Financial Info: Information about a user’s financial accounts, purchases, or transactions," \
|
| 55 |
+
f" Profile: A user’s account information," \
|
| 56 |
+
f"Social Media: A user's social media information, or the access to social media accounts"
|
| 57 |
+
|
| 58 |
+
response = openai.ChatCompletion.create(
|
| 59 |
+
# engine="text-davinci-002",
|
| 60 |
+
model="gpt-3.5-turbo",
|
| 61 |
+
messages=[
|
| 62 |
+
# {"role": "system", "content": "You are a helpful assistant."},
|
| 63 |
+
{"role": "user", "content": prompt}
|
| 64 |
+
],
|
| 65 |
+
max_tokens=100,
|
| 66 |
+
n=1,
|
| 67 |
+
stop=None,
|
| 68 |
+
temperature=0,
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# response_full_text = response.choices[0].text.strip()
|
| 72 |
+
response_full_text = response.choices[0].message['content']
|
| 73 |
+
for k in keywords.keys():
|
| 74 |
+
if k == "Financial info" or k == "Social media":
|
| 75 |
+
if k.lower() in response_full_text.lower():
|
| 76 |
+
sent_data_type = k
|
| 77 |
+
break
|
| 78 |
+
else:
|
| 79 |
+
words = re.split(r'\W+', response_full_text.lower())
|
| 80 |
+
if k.lower() in words:
|
| 81 |
+
sent_data_type = k
|
| 82 |
+
break
|
| 83 |
+
|
| 84 |
+
# print("----------------------")
|
| 85 |
+
# print("sentence: ", sentence)
|
| 86 |
+
# print("prompt: ", prompt)
|
| 87 |
+
# print("response: ", response_full_text)
|
| 88 |
+
# print("sent_data_type: ", sent_data_type)
|
| 89 |
+
|
| 90 |
+
else:
|
| 91 |
+
for k in keywords.keys():
|
| 92 |
+
for w in keywords[k]:
|
| 93 |
+
words = re.split(r'\W+', sentence.lower())
|
| 94 |
+
if w.lower() in words:
|
| 95 |
+
sent_data_type = k
|
| 96 |
+
break
|
| 97 |
+
if sent_data_type != "others":
|
| 98 |
+
break
|
| 99 |
+
|
| 100 |
+
return sent_data_type
|
| 101 |
+
|
| 102 |
+
# def get_clf_model(use_resnet18=True, use_gpu=False):
|
| 103 |
+
#
|
| 104 |
+
# device = 'cpu'
|
| 105 |
+
# if use_gpu:
|
| 106 |
+
# device = 'cuda:0'
|
| 107 |
+
#
|
| 108 |
+
# if use_resnet18:
|
| 109 |
+
# model = models.resnet18().to(device)
|
| 110 |
+
# in_feature_num = model.fc.in_features
|
| 111 |
+
# model.fc = nn.Linear(in_feature_num, 99)
|
| 112 |
+
# model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), padding=(3, 3), stride=(2, 2),
|
| 113 |
+
# bias=False)
|
| 114 |
+
#
|
| 115 |
+
# PATH = "./CDM/model/model-99-resnet18.pkl"
|
| 116 |
+
# model.load_state_dict(torch.load(PATH, map_location=torch.device(device)))
|
| 117 |
+
#
|
| 118 |
+
# model.eval()
|
| 119 |
+
# else:
|
| 120 |
+
# # replace with your own model
|
| 121 |
+
# None
|
| 122 |
+
#
|
| 123 |
+
# return model
|
| 124 |
+
|
| 125 |
+
def get_clf_model(clf_model="ResNet18", use_gpu=False):
|
| 126 |
+
|
| 127 |
+
device = 'cpu'
|
| 128 |
+
if use_gpu:
|
| 129 |
+
device = 'cuda:0'
|
| 130 |
+
|
| 131 |
+
if clf_model == "ResNet18":
|
| 132 |
+
model = models.resnet18().to(device)
|
| 133 |
+
in_feature_num = model.fc.in_features
|
| 134 |
+
model.fc = nn.Linear(in_feature_num, 99)
|
| 135 |
+
model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), padding=(3, 3), stride=(2, 2),
|
| 136 |
+
bias=False)
|
| 137 |
+
|
| 138 |
+
PATH = "./CDM/model/model-99-resnet18.pkl"
|
| 139 |
+
model.load_state_dict(torch.load(PATH, map_location=torch.device(device)))
|
| 140 |
+
|
| 141 |
+
model.eval()
|
| 142 |
+
elif clf_model == "ViT":
|
| 143 |
+
model = torch.load('./CDM/model/model-99-ViT-entire.pkl', map_location=torch.device(device))
|
| 144 |
+
model = model.to(device)
|
| 145 |
+
model.eval()
|
| 146 |
+
|
| 147 |
+
else:
|
| 148 |
+
# replace with your own model
|
| 149 |
+
None
|
| 150 |
+
|
| 151 |
+
return model
|
| 152 |
+
|
| 153 |
+
def compo_classification(input_img, output_root, segment_root, merge_json, output_data, resize_by_height=800, clf_model="ResNet18"):
|
| 154 |
+
# load text and non-text compo
|
| 155 |
+
ele_id = 0
|
| 156 |
+
compos = []
|
| 157 |
+
texts = []
|
| 158 |
+
elements = []
|
| 159 |
+
|
| 160 |
+
for compo in merge_json['compos']:
|
| 161 |
+
if compo['class'] == 'Text':
|
| 162 |
+
element = Element(ele_id,
|
| 163 |
+
(compo["position"]['column_min'], compo["position"]['row_min'],
|
| 164 |
+
compo["position"]['column_max'], compo["position"]['row_max']),
|
| 165 |
+
'Text', text_content=compo['text_content'])
|
| 166 |
+
texts.append(element)
|
| 167 |
+
ele_id += 1
|
| 168 |
+
else:
|
| 169 |
+
element = Element(ele_id,
|
| 170 |
+
(compo["position"]['column_min'], compo["position"]['row_min'],
|
| 171 |
+
compo["position"]['column_max'], compo["position"]['row_max']),
|
| 172 |
+
compo['class'])
|
| 173 |
+
compos.append(element)
|
| 174 |
+
ele_id += 1
|
| 175 |
+
|
| 176 |
+
org, grey = pre.read_img(input_img, resize_by_height)
|
| 177 |
+
|
| 178 |
+
grey = grey.astype('float32')
|
| 179 |
+
grey = grey / 255
|
| 180 |
+
|
| 181 |
+
# grey = (grey - grey.mean()) / grey.std()
|
| 182 |
+
|
| 183 |
+
# --------- classification ----------
|
| 184 |
+
|
| 185 |
+
classification_start_time = time.process_time()
|
| 186 |
+
|
| 187 |
+
for compo in compos:
|
| 188 |
+
|
| 189 |
+
# comp_grey = grey[compo.row_min:compo.row_max, compo.col_min:compo.col_max]
|
| 190 |
+
#
|
| 191 |
+
# comp_crop = cv2.resize(comp_grey, (32, 32))
|
| 192 |
+
#
|
| 193 |
+
# comp_crop = comp_crop.reshape(1, 1, 32, 32)
|
| 194 |
+
#
|
| 195 |
+
# comp_tensor = torch.tensor(comp_crop)
|
| 196 |
+
# comp_tensor = comp_tensor.permute(0, 1, 3, 2)
|
| 197 |
+
#
|
| 198 |
+
# model = get_clf_model()
|
| 199 |
+
# pred_label = model(comp_tensor)
|
| 200 |
+
#
|
| 201 |
+
# if str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0]) in label_dic.keys():
|
| 202 |
+
# compo.label = label_dic[str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0])]
|
| 203 |
+
# elements.append(compo)
|
| 204 |
+
# else:
|
| 205 |
+
# compo.label = str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0])
|
| 206 |
+
|
| 207 |
+
if clf_model == "ResNet18":
|
| 208 |
+
|
| 209 |
+
comp_grey = grey[compo.row_min:compo.row_max, compo.col_min:compo.col_max]
|
| 210 |
+
|
| 211 |
+
comp_crop = cv2.resize(comp_grey, (32, 32))
|
| 212 |
+
|
| 213 |
+
comp_crop = comp_crop.reshape(1, 1, 32, 32)
|
| 214 |
+
|
| 215 |
+
comp_tensor = torch.tensor(comp_crop)
|
| 216 |
+
comp_tensor = comp_tensor.permute(0, 1, 3, 2)
|
| 217 |
+
|
| 218 |
+
model = get_clf_model(clf_model)
|
| 219 |
+
pred_label = model(comp_tensor)
|
| 220 |
+
|
| 221 |
+
if str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0]) in label_dic.keys():
|
| 222 |
+
compo.label = label_dic[str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0])]
|
| 223 |
+
elements.append(compo)
|
| 224 |
+
else:
|
| 225 |
+
compo.label = str(np.argmax(pred_label.cpu().data.numpy(), axis=1)[0])
|
| 226 |
+
|
| 227 |
+
elif clf_model == "ViT":
|
| 228 |
+
|
| 229 |
+
comp_grey = grey[compo.row_min:compo.row_max, compo.col_min:compo.col_max]
|
| 230 |
+
|
| 231 |
+
comp_crop = cv2.resize(comp_grey, (224, 224))
|
| 232 |
+
|
| 233 |
+
# Convert the image to tensor
|
| 234 |
+
comp_tensor = torch.from_numpy(comp_crop)
|
| 235 |
+
|
| 236 |
+
# Reshape and repeat along the channel dimension to convert to RGB
|
| 237 |
+
comp_tensor = comp_tensor.view(1, 224, 224).repeat(3, 1, 1)
|
| 238 |
+
|
| 239 |
+
# comp_tensor = comp_tensor.permute(0, 2, 1)
|
| 240 |
+
|
| 241 |
+
comp_tensor = comp_tensor.unsqueeze(0) # add a batch dimension
|
| 242 |
+
|
| 243 |
+
model = get_clf_model(clf_model)
|
| 244 |
+
# pred_label = model(comp_tensor)
|
| 245 |
+
|
| 246 |
+
# Forward pass through the model
|
| 247 |
+
with torch.no_grad():
|
| 248 |
+
output = model(comp_tensor)
|
| 249 |
+
|
| 250 |
+
# Get the predicted label
|
| 251 |
+
_, predicted = torch.max(output.logits, 1)
|
| 252 |
+
|
| 253 |
+
# print("predicted_label: ", predicted.cpu().numpy())
|
| 254 |
+
|
| 255 |
+
if str(predicted.cpu().numpy()[0]) in label_dic.keys():
|
| 256 |
+
compo.label = label_dic[str(predicted.cpu().numpy()[0])]
|
| 257 |
+
elements.append(compo)
|
| 258 |
+
else:
|
| 259 |
+
compo.label = str(predicted.cpu().numpy()[0])
|
| 260 |
+
|
| 261 |
+
else:
|
| 262 |
+
print("clf_model has to be ResNet18 or ViT")
|
| 263 |
+
|
| 264 |
+
time_cost_ic = time.process_time() - classification_start_time
|
| 265 |
+
print("time cost for icon classification: %2.2f s" % time_cost_ic)
|
| 266 |
+
# ic_time_cost_all.append(time_cost_ic)
|
| 267 |
+
|
| 268 |
+
# --------- end classification ----------
|
| 269 |
+
|
| 270 |
+
text_selection_time = time.process_time()
|
| 271 |
+
|
| 272 |
+
for this_text in texts:
|
| 273 |
+
# found_flag = 0
|
| 274 |
+
#
|
| 275 |
+
# for key in keyword_list:
|
| 276 |
+
# for w in keyword_list[key]:
|
| 277 |
+
# words = re.split(r'\W+', this_text.text_content.lower())
|
| 278 |
+
# if w.lower() in words:
|
| 279 |
+
# this_text.label = key
|
| 280 |
+
# elements.append(this_text)
|
| 281 |
+
# found_flag = 1
|
| 282 |
+
# break
|
| 283 |
+
#
|
| 284 |
+
# if found_flag == 0:
|
| 285 |
+
# this_text.label = 'others'
|
| 286 |
+
|
| 287 |
+
retries = 10
|
| 288 |
+
for i in range(retries):
|
| 289 |
+
try:
|
| 290 |
+
text_label = get_data_type(this_text.text_content.lower(), keyword_list, use_gpt=False)
|
| 291 |
+
break
|
| 292 |
+
except openai.error.RateLimitError as e:
|
| 293 |
+
if "overloaded" in str(e):
|
| 294 |
+
# Exponential backoff with jitter
|
| 295 |
+
sleep_time = 2 * (2 ** i) + random.uniform(0, 0.1)
|
| 296 |
+
time.sleep(sleep_time)
|
| 297 |
+
else:
|
| 298 |
+
raise
|
| 299 |
+
except Exception as e:
|
| 300 |
+
raise
|
| 301 |
+
|
| 302 |
+
this_text.label = text_label
|
| 303 |
+
|
| 304 |
+
if this_text.label != "others":
|
| 305 |
+
elements.append(this_text)
|
| 306 |
+
|
| 307 |
+
time_cost_ts = time.process_time() - text_selection_time
|
| 308 |
+
print("time cost for text selection: %2.2f s" % time_cost_ts)
|
| 309 |
+
# ts_time_cost_all.append(time_cost_ts)
|
| 310 |
+
|
| 311 |
+
# ---------- end -------------------------------
|
| 312 |
+
|
| 313 |
+
full_size_org, full_size_grey = pre.read_img(input_img)
|
| 314 |
+
ratio = full_size_org.shape[0]/org.shape[0]
|
| 315 |
+
|
| 316 |
+
show = False
|
| 317 |
+
wait_key = 0
|
| 318 |
+
|
| 319 |
+
reassign_ids(elements)
|
| 320 |
+
board = merge.show_elements(full_size_org, elements, ratio, show=show, win_name='elements after merging', wait_key=wait_key, line=3)
|
| 321 |
+
board_one_element = merge.show_one_element(full_size_org, elements, ratio, show=show, win_name='elements after merging', wait_key=wait_key, line=3)
|
| 322 |
+
|
| 323 |
+
classification_root = pjoin(output_root, 'classification')
|
| 324 |
+
|
| 325 |
+
# save all merged elements, clips and blank background
|
| 326 |
+
name = input_img.replace('\\', '/').split('/')[-1][:-4]
|
| 327 |
+
components = merge.save_elements(pjoin(classification_root, name + '.json'), elements, full_size_org.shape, ratio)
|
| 328 |
+
cv2.imwrite(pjoin(classification_root, name + '.jpg'), board)
|
| 329 |
+
|
| 330 |
+
print("len(board_one_element): ", len(board_one_element))
|
| 331 |
+
|
| 332 |
+
for i in range(len(elements)):
|
| 333 |
+
e_name = str(int(elements[i].id) + 1)
|
| 334 |
+
cv2.imwrite(pjoin(classification_root + '/GUI', name + '-' + e_name + '.jpg'), board_one_element[i])
|
| 335 |
+
|
| 336 |
+
print('[Classification Completed] Input: %s Output: %s' % (input_img, pjoin(classification_root, name + '.jpg')))
|
| 337 |
+
|
| 338 |
+
# ---------- matching result -----------
|
| 339 |
+
|
| 340 |
+
index = input_img.split('/')[-1][:-4]
|
| 341 |
+
app_id = str(index).split('-')[0]
|
| 342 |
+
|
| 343 |
+
index_path = pjoin(segment_root, app_id, 'classified_sentences/keyword_index.txt')
|
| 344 |
+
dict_index = {}
|
| 345 |
+
if exists(index_path):
|
| 346 |
+
with open(index_path, 'r') as g:
|
| 347 |
+
for line in g:
|
| 348 |
+
key, value = line.strip().split(':', 1)
|
| 349 |
+
dict_index[key] = value
|
| 350 |
+
|
| 351 |
+
for item in elements:
|
| 352 |
+
complete_path = pjoin(segment_root, app_id, 'classified_sentences', item.label + '.txt')
|
| 353 |
+
print("complete_path: ", complete_path)
|
| 354 |
+
|
| 355 |
+
if exists(complete_path):
|
| 356 |
+
|
| 357 |
+
with open(complete_path, 'r', encoding='utf-8') as file:
|
| 358 |
+
content = file.read()
|
| 359 |
+
|
| 360 |
+
# Replace line breaks with spaces and strip any extra whitespace
|
| 361 |
+
this_text = ' '.join(content.splitlines()).strip()
|
| 362 |
+
|
| 363 |
+
lines = content.splitlines()
|
| 364 |
+
non_empty_lines = [line for line in lines if line.strip() != ""]
|
| 365 |
+
for i in range(len(non_empty_lines)):
|
| 366 |
+
if non_empty_lines[i][0].isalpha():
|
| 367 |
+
non_empty_lines[i] = non_empty_lines[i][0].upper() + non_empty_lines[i][1:]
|
| 368 |
+
|
| 369 |
+
# output_data = output_data.append({'screenshot': 's' + str(index), 'id': item.id + 1, 'label': item.label, 'index': dict_index[item.label], 'text': this_text, 'sentences': non_empty_lines}, ignore_index=True)
|
| 370 |
+
output_data = pd.concat([output_data, pd.DataFrame([{'screenshot': 's' + str(index), 'id': item.id + 1,
|
| 371 |
+
'label': item.label, 'index': dict_index[item.label],
|
| 372 |
+
'text': this_text, 'sentences': non_empty_lines}])])
|
| 373 |
+
|
| 374 |
+
else:
|
| 375 |
+
# output_data = output_data.append({'screenshot': 's' + str(index), 'id': item.id + 1, 'label': item.label, 'index': "None", 'text': "No information!", 'sentences': "None"},
|
| 376 |
+
# ignore_index=True)
|
| 377 |
+
output_data = pd.concat([output_data, pd.DataFrame([{'screenshot': 's' + str(index), 'id': item.id + 1,
|
| 378 |
+
'label': item.label, 'index': "None",
|
| 379 |
+
'text': "No information!", 'sentences': "None"}])])
|
| 380 |
+
return time_cost_ic, time_cost_ts, output_data, board
|
CDM/detect_compo/deprecated/Block.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
from os.path import join as pjoin
|
| 3 |
+
import time
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from CDM.detect_compo.lib_ip.Component import Component
|
| 7 |
+
from CDM.config.CONFIG_UIED import Config
|
| 8 |
+
C = Config()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class Block(Component):
|
| 12 |
+
def __init__(self, region, image_shape):
|
| 13 |
+
super().__init__(region, image_shape)
|
| 14 |
+
self.category = 'Block'
|
| 15 |
+
self.parent = None
|
| 16 |
+
self.children = []
|
| 17 |
+
self.uicompo_ = None
|
| 18 |
+
self.top_or_botm = None
|
| 19 |
+
self.redundant = False
|
| 20 |
+
|
| 21 |
+
def block_is_uicompo(self, image_shape, max_compo_scale):
|
| 22 |
+
'''
|
| 23 |
+
Check the if the block is a ui component according to its relative size
|
| 24 |
+
'''
|
| 25 |
+
row, column = image_shape[:2]
|
| 26 |
+
# print(height, height / row, max_compo_scale[0], height / row > max_compo_scale[0])
|
| 27 |
+
# draw.draw_bounding_box(org, [corner], show=True)
|
| 28 |
+
# ignore atomic components
|
| 29 |
+
if self.bbox.height / row > max_compo_scale[0] or self.bbox.width / column > max_compo_scale[1]:
|
| 30 |
+
return False
|
| 31 |
+
return True
|
| 32 |
+
|
| 33 |
+
def block_is_top_or_bottom_bar(self, image_shape, top_bottom_height):
|
| 34 |
+
'''
|
| 35 |
+
Check if the block is top bar or bottom bar
|
| 36 |
+
'''
|
| 37 |
+
height, width = image_shape[:2]
|
| 38 |
+
(column_min, row_min, column_max, row_max) = self.bbox.put_bbox()
|
| 39 |
+
if column_min < 5 and row_min < 5 and \
|
| 40 |
+
width - column_max < 5 and row_max < height * top_bottom_height[0]:
|
| 41 |
+
self.uicompo_ = True
|
| 42 |
+
return True
|
| 43 |
+
if column_min < 5 and row_min > height * top_bottom_height[1] and \
|
| 44 |
+
width - column_max < 5 and height - row_max < 5:
|
| 45 |
+
self.uicompo_ = True
|
| 46 |
+
return True
|
| 47 |
+
return False
|
| 48 |
+
|
| 49 |
+
def block_erase_from_bin(self, binary, pad):
|
| 50 |
+
(column_min, row_min, column_max, row_max) = self.put_bbox()
|
| 51 |
+
column_min = max(column_min - pad, 0)
|
| 52 |
+
column_max = min(column_max + pad, binary.shape[1])
|
| 53 |
+
row_min = max(row_min - pad, 0)
|
| 54 |
+
row_max = min(row_max + pad, binary.shape[0])
|
| 55 |
+
cv2.rectangle(binary, (column_min, row_min), (column_max, row_max), (0), -1)
|
| 56 |
+
|
CDM/detect_compo/deprecated/block_division.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from random import randint as rint
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
import CDM.detect_compo.lib_ip.ip_preprocessing as pre
|
| 7 |
+
import CDM.detect_compo.lib_ip.ip_detection as det
|
| 8 |
+
import CDM.detect_compo.lib_ip.ip_draw as draw
|
| 9 |
+
import CDM.detect_compo.lib_ip.ip_segment as seg
|
| 10 |
+
from CDM.detect_compo.lib_ip.Block import Block
|
| 11 |
+
from CDM.config.CONFIG_UIED import Config
|
| 12 |
+
C = Config()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def block_hierarchy(blocks):
|
| 16 |
+
for i in range(len(blocks) - 1):
|
| 17 |
+
for j in range(i + 1, len(blocks)):
|
| 18 |
+
relation = blocks[i].compo_relation(blocks[j])
|
| 19 |
+
if relation == -1:
|
| 20 |
+
blocks[j].children.append(i)
|
| 21 |
+
if relation == 1:
|
| 22 |
+
blocks[i].children.append(j)
|
| 23 |
+
return
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def block_bin_erase_all_blk(binary, blocks, pad=0, show=False):
|
| 27 |
+
'''
|
| 28 |
+
erase the block parts from the binary map
|
| 29 |
+
:param binary: binary map of original image
|
| 30 |
+
:param blocks_corner: corners of detected layout block
|
| 31 |
+
:param show: show or not
|
| 32 |
+
:param pad: expand the bounding boxes of blocks
|
| 33 |
+
:return: binary map without block parts
|
| 34 |
+
'''
|
| 35 |
+
|
| 36 |
+
bin_org = binary.copy()
|
| 37 |
+
for block in blocks:
|
| 38 |
+
block.block_erase_from_bin(binary, pad)
|
| 39 |
+
if show:
|
| 40 |
+
cv2.imshow('before', bin_org)
|
| 41 |
+
cv2.imshow('after', binary)
|
| 42 |
+
cv2.waitKey()
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def block_division(grey, org, grad_thresh,
|
| 46 |
+
show=False, write_path=None,
|
| 47 |
+
step_h=10, step_v=10,
|
| 48 |
+
line_thickness=C.THRESHOLD_LINE_THICKNESS,
|
| 49 |
+
min_rec_evenness=C.THRESHOLD_REC_MIN_EVENNESS,
|
| 50 |
+
max_dent_ratio=C.THRESHOLD_REC_MAX_DENT_RATIO,
|
| 51 |
+
min_block_height_ratio=C.THRESHOLD_BLOCK_MIN_HEIGHT):
|
| 52 |
+
'''
|
| 53 |
+
:param grey: grey-scale of original image
|
| 54 |
+
:return: corners: list of [(top_left, bottom_right)]
|
| 55 |
+
-> top_left: (column_min, row_min)
|
| 56 |
+
-> bottom_right: (column_max, row_max)
|
| 57 |
+
'''
|
| 58 |
+
blocks = []
|
| 59 |
+
mask = np.zeros((grey.shape[0]+2, grey.shape[1]+2), dtype=np.uint8)
|
| 60 |
+
broad = np.zeros((grey.shape[0], grey.shape[1], 3), dtype=np.uint8)
|
| 61 |
+
broad_all = broad.copy()
|
| 62 |
+
|
| 63 |
+
row, column = grey.shape[0], grey.shape[1]
|
| 64 |
+
for x in range(0, row, step_h):
|
| 65 |
+
for y in range(0, column, step_v):
|
| 66 |
+
if mask[x, y] == 0:
|
| 67 |
+
# region = flood_fill_bfs(grey, x, y, mask)
|
| 68 |
+
|
| 69 |
+
# flood fill algorithm to get background (layout block)
|
| 70 |
+
mask_copy = mask.copy()
|
| 71 |
+
ff = cv2.floodFill(grey, mask, (y, x), None, grad_thresh, grad_thresh, cv2.FLOODFILL_MASK_ONLY)
|
| 72 |
+
# ignore small regions
|
| 73 |
+
if ff[0] < 500: continue
|
| 74 |
+
mask_copy = mask - mask_copy
|
| 75 |
+
region = np.reshape(cv2.findNonZero(mask_copy[1:-1, 1:-1]), (-1, 2))
|
| 76 |
+
region = [(p[1], p[0]) for p in region]
|
| 77 |
+
|
| 78 |
+
block = Block(region, grey.shape)
|
| 79 |
+
# draw.draw_region(region, broad_all)
|
| 80 |
+
# if block.height < 40 and block.width < 40:
|
| 81 |
+
# continue
|
| 82 |
+
if block.height < 30:
|
| 83 |
+
continue
|
| 84 |
+
|
| 85 |
+
# print(block.area / (row * column))
|
| 86 |
+
if block.area / (row * column) > 0.9:
|
| 87 |
+
continue
|
| 88 |
+
elif block.area / (row * column) > 0.7:
|
| 89 |
+
block.redundant = True
|
| 90 |
+
|
| 91 |
+
# get the boundary of this region
|
| 92 |
+
# ignore lines
|
| 93 |
+
if block.compo_is_line(line_thickness):
|
| 94 |
+
continue
|
| 95 |
+
# ignore non-rectangle as blocks must be rectangular
|
| 96 |
+
if not block.compo_is_rectangle(min_rec_evenness, max_dent_ratio):
|
| 97 |
+
continue
|
| 98 |
+
# if block.height/row < min_block_height_ratio:
|
| 99 |
+
# continue
|
| 100 |
+
blocks.append(block)
|
| 101 |
+
# draw.draw_region(region, broad)
|
| 102 |
+
if show:
|
| 103 |
+
cv2.imshow('flood-fill all', broad_all)
|
| 104 |
+
cv2.imshow('block', broad)
|
| 105 |
+
cv2.waitKey()
|
| 106 |
+
if write_path is not None:
|
| 107 |
+
cv2.imwrite(write_path, broad)
|
| 108 |
+
return blocks
|
CDM/detect_compo/deprecated/ip_detection_utils.py
ADDED
|
@@ -0,0 +1,461 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
from collections import Counter
|
| 4 |
+
|
| 5 |
+
import lib_ip.ip_draw as draw
|
| 6 |
+
from CDM.config.CONFIG_UIED import Config
|
| 7 |
+
C = Config()
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
# detect object(connected region)
|
| 11 |
+
# def boundary_bfs_connected_area(img, x, y, mark):
|
| 12 |
+
# def neighbor(img, x, y, mark, stack):
|
| 13 |
+
# for i in range(x - 1, x + 2):
|
| 14 |
+
# if i < 0 or i >= img.shape[0]: continue
|
| 15 |
+
# for j in range(y - 1, y + 2):
|
| 16 |
+
# if j < 0 or j >= img.shape[1]: continue
|
| 17 |
+
# if img[i, j] == 255 and mark[i, j] == 0:
|
| 18 |
+
# stack.append([i, j])
|
| 19 |
+
# mark[i, j] = 255
|
| 20 |
+
#
|
| 21 |
+
# stack = [[x, y]] # points waiting for inspection
|
| 22 |
+
# area = [[x, y]] # points of this area
|
| 23 |
+
# mark[x, y] = 255 # drawing broad
|
| 24 |
+
#
|
| 25 |
+
# while len(stack) > 0:
|
| 26 |
+
# point = stack.pop()
|
| 27 |
+
# area.append(point)
|
| 28 |
+
# neighbor(img, point[0], point[1], mark, stack)
|
| 29 |
+
# return area
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# def line_check_perpendicular(lines_h, lines_v, max_thickness):
|
| 33 |
+
# """
|
| 34 |
+
# lines: [line_h, line_v]
|
| 35 |
+
# -> line_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int)
|
| 36 |
+
# -> line_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int}
|
| 37 |
+
# """
|
| 38 |
+
# is_per_h = np.full(len(lines_h), False)
|
| 39 |
+
# is_per_v = np.full(len(lines_v), False)
|
| 40 |
+
# for i in range(len(lines_h)):
|
| 41 |
+
# # save the intersection point of h
|
| 42 |
+
# lines_h[i]['inter_point'] = set()
|
| 43 |
+
# h = lines_h[i]
|
| 44 |
+
#
|
| 45 |
+
# for j in range(len(lines_v)):
|
| 46 |
+
# # save the intersection point of v
|
| 47 |
+
# if 'inter_point' not in lines_v[j]: lines_v[j]['inter_point'] = set()
|
| 48 |
+
# v = lines_v[j]
|
| 49 |
+
#
|
| 50 |
+
# # if h is perpendicular to v in head of v
|
| 51 |
+
# if abs(h['head'][1]-v['head'][1]) <= max_thickness:
|
| 52 |
+
# if abs(h['head'][0] - v['head'][0]) <= max_thickness:
|
| 53 |
+
# lines_h[i]['inter_point'].add('head')
|
| 54 |
+
# lines_v[j]['inter_point'].add('head')
|
| 55 |
+
# is_per_h[i] = True
|
| 56 |
+
# is_per_v[j] = True
|
| 57 |
+
# elif abs(h['end'][0] - v['head'][0]) <= max_thickness:
|
| 58 |
+
# lines_h[i]['inter_point'].add('end')
|
| 59 |
+
# lines_v[j]['inter_point'].add('head')
|
| 60 |
+
# is_per_h[i] = True
|
| 61 |
+
# is_per_v[j] = True
|
| 62 |
+
#
|
| 63 |
+
# # if h is perpendicular to v in end of v
|
| 64 |
+
# elif abs(h['head'][1]-v['end'][1]) <= max_thickness:
|
| 65 |
+
# if abs(h['head'][0] - v['head'][0]) <= max_thickness:
|
| 66 |
+
# lines_h[i]['inter_point'].add('head')
|
| 67 |
+
# lines_v[j]['inter_point'].add('end')
|
| 68 |
+
# is_per_h[i] = True
|
| 69 |
+
# is_per_v[j] = True
|
| 70 |
+
# elif abs(h['end'][0] - v['head'][0]) <= max_thickness:
|
| 71 |
+
# lines_h[i]['inter_point'].add('end')
|
| 72 |
+
# lines_v[j]['inter_point'].add('end')
|
| 73 |
+
# is_per_h[i] = True
|
| 74 |
+
# is_per_v[j] = True
|
| 75 |
+
# per_h = []
|
| 76 |
+
# per_v = []
|
| 77 |
+
# for i in range(len(is_per_h)):
|
| 78 |
+
# if is_per_h[i]:
|
| 79 |
+
# lines_h[i]['inter_point'] = list(lines_h[i]['inter_point'])
|
| 80 |
+
# per_h.append(lines_h[i])
|
| 81 |
+
# for i in range(len(is_per_v)):
|
| 82 |
+
# if is_per_v[i]:
|
| 83 |
+
# lines_v[i]['inter_point'] = list(lines_v[i]['inter_point'])
|
| 84 |
+
# per_v.append(lines_v[i])
|
| 85 |
+
# return per_h, per_v
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# def line_shrink_corners(corner, lines_h, lines_v):
|
| 89 |
+
# """
|
| 90 |
+
# shrink the corner according to lines:
|
| 91 |
+
# col_min_shrink: shrink right (increase)
|
| 92 |
+
# col_max_shrink: shrink left (decrease)
|
| 93 |
+
# row_min_shrink: shrink down (increase)
|
| 94 |
+
# row_max_shrink: shrink up (decrease)
|
| 95 |
+
# :param lines_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int)
|
| 96 |
+
# :param lines_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int}
|
| 97 |
+
# :return: shrunken corner: (top_left, bottom_right)
|
| 98 |
+
# """
|
| 99 |
+
# (col_min, row_min), (col_max, row_max) = corner
|
| 100 |
+
# col_min_shrink, row_min_shrink = col_min, row_min
|
| 101 |
+
# col_max_shrink, row_max_shrink = col_max, row_max
|
| 102 |
+
# valid_frame = False
|
| 103 |
+
#
|
| 104 |
+
# for h in lines_h:
|
| 105 |
+
# # ignore outer border
|
| 106 |
+
# if len(h['inter_point']) == 2:
|
| 107 |
+
# valid_frame = True
|
| 108 |
+
# continue
|
| 109 |
+
# # shrink right -> col_min move to end
|
| 110 |
+
# if h['inter_point'][0] == 'head':
|
| 111 |
+
# col_min_shrink = max(h['end'][0], col_min_shrink)
|
| 112 |
+
# # shrink left -> col_max move to head
|
| 113 |
+
# elif h['inter_point'][0] == 'end':
|
| 114 |
+
# col_max_shrink = min(h['head'][0], col_max_shrink)
|
| 115 |
+
#
|
| 116 |
+
# for v in lines_v:
|
| 117 |
+
# # ignore outer border
|
| 118 |
+
# if len(v['inter_point']) == 2:
|
| 119 |
+
# valid_frame = True
|
| 120 |
+
# continue
|
| 121 |
+
# # shrink down -> row_min move to end
|
| 122 |
+
# if v['inter_point'][0] == 'head':
|
| 123 |
+
# row_min_shrink = max(v['end'][1], row_min_shrink)
|
| 124 |
+
# # shrink up -> row_max move to head
|
| 125 |
+
# elif v['inter_point'][0] == 'end':
|
| 126 |
+
# row_max_shrink = min(v['head'][1], row_max_shrink)
|
| 127 |
+
#
|
| 128 |
+
# # return the shrunken corner if only there is line intersecting with two other lines
|
| 129 |
+
# if valid_frame:
|
| 130 |
+
# return (col_min_shrink, row_min_shrink), (col_max_shrink, row_max_shrink)
|
| 131 |
+
# return corner
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# def line_cvt_relative_position(col_min, row_min, lines_h, lines_v):
|
| 135 |
+
# """
|
| 136 |
+
# convert the relative position of lines in the entire image
|
| 137 |
+
# :param col_min: based column the img lines belong to
|
| 138 |
+
# :param row_min: based row the img lines belong to
|
| 139 |
+
# :param lines_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int)
|
| 140 |
+
# :param lines_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int}
|
| 141 |
+
# :return: lines_h_cvt, lines_v_cvt
|
| 142 |
+
# """
|
| 143 |
+
# for h in lines_h:
|
| 144 |
+
# h['head'][0] += col_min
|
| 145 |
+
# h['head'][1] += row_min
|
| 146 |
+
# h['end'][0] += col_min
|
| 147 |
+
# h['end'][1] += row_min
|
| 148 |
+
# for v in lines_v:
|
| 149 |
+
# v['head'][0] += col_min
|
| 150 |
+
# v['head'][1] += row_min
|
| 151 |
+
# v['end'][0] += col_min
|
| 152 |
+
# v['end'][1] += row_min
|
| 153 |
+
#
|
| 154 |
+
# return lines_h, lines_v
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
# check if an object is so slim
|
| 158 |
+
# @boundary: [border_up, border_bottom, border_left, border_right]
|
| 159 |
+
# -> up, bottom: (column_index, min/max row border)
|
| 160 |
+
# -> left, right: (row_index, min/max column border) detect range of each row
|
| 161 |
+
def clipping_by_line(boundary, boundary_rec, lines):
|
| 162 |
+
boundary = boundary.copy()
|
| 163 |
+
for orient in lines:
|
| 164 |
+
# horizontal
|
| 165 |
+
if orient == 'h':
|
| 166 |
+
# column range of sub area
|
| 167 |
+
r1, r2 = 0, 0
|
| 168 |
+
for line in lines[orient]:
|
| 169 |
+
if line[0] == 0:
|
| 170 |
+
r1 = line[1]
|
| 171 |
+
continue
|
| 172 |
+
r2 = line[0]
|
| 173 |
+
b_top = []
|
| 174 |
+
b_bottom = []
|
| 175 |
+
for i in range(len(boundary[0])):
|
| 176 |
+
if r2 > boundary[0][i][0] >= r1:
|
| 177 |
+
b_top.append(boundary[0][i])
|
| 178 |
+
for i in range(len(boundary[1])):
|
| 179 |
+
if r2 > boundary[1][i][0] >= r1:
|
| 180 |
+
b_bottom.append(boundary[1][i])
|
| 181 |
+
|
| 182 |
+
b_left = [x for x in boundary[2]] # (row_index, min column border)
|
| 183 |
+
for i in range(len(b_left)):
|
| 184 |
+
if b_left[i][1] < r1:
|
| 185 |
+
b_left[i][1] = r1
|
| 186 |
+
b_right = [x for x in boundary[3]] # (row_index, max column border)
|
| 187 |
+
for i in range(len(b_right)):
|
| 188 |
+
if b_right[i][1] > r2:
|
| 189 |
+
b_right[i][1] = r2
|
| 190 |
+
|
| 191 |
+
boundary_rec.append([b_top, b_bottom, b_left, b_right])
|
| 192 |
+
r1 = line[1]
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# remove imgs that contain text
|
| 196 |
+
# def rm_text(org, corners, compo_class,
|
| 197 |
+
# max_text_height=C.THRESHOLD_TEXT_MAX_HEIGHT, max_text_width=C.THRESHOLD_TEXT_MAX_WIDTH,
|
| 198 |
+
# ocr_padding=C.OCR_PADDING, ocr_min_word_area=C.OCR_MIN_WORD_AREA, show=False):
|
| 199 |
+
# """
|
| 200 |
+
# Remove area that full of text
|
| 201 |
+
# :param org: original image
|
| 202 |
+
# :param corners: [(top_left, bottom_right)]
|
| 203 |
+
# -> top_left: (column_min, row_min)
|
| 204 |
+
# -> bottom_right: (column_max, row_max)
|
| 205 |
+
# :param compo_class: classes of corners
|
| 206 |
+
# :param max_text_height: Too large to be text
|
| 207 |
+
# :param max_text_width: Too large to be text
|
| 208 |
+
# :param ocr_padding: Padding for clipping
|
| 209 |
+
# :param ocr_min_word_area: If too text area ratio is too large
|
| 210 |
+
# :param show: Show or not
|
| 211 |
+
# :return: corners without text objects
|
| 212 |
+
# """
|
| 213 |
+
# new_corners = []
|
| 214 |
+
# new_class = []
|
| 215 |
+
# for i in range(len(corners)):
|
| 216 |
+
# corner = corners[i]
|
| 217 |
+
# (top_left, bottom_right) = corner
|
| 218 |
+
# (col_min, row_min) = top_left
|
| 219 |
+
# (col_max, row_max) = bottom_right
|
| 220 |
+
# height = row_max - row_min
|
| 221 |
+
# width = col_max - col_min
|
| 222 |
+
# # highly likely to be block or img if too large
|
| 223 |
+
# if height > max_text_height and width > max_text_width:
|
| 224 |
+
# new_corners.append(corner)
|
| 225 |
+
# new_class.append(compo_class[i])
|
| 226 |
+
# else:
|
| 227 |
+
# row_min = row_min - ocr_padding if row_min - ocr_padding >= 0 else 0
|
| 228 |
+
# row_max = row_max + ocr_padding if row_max + ocr_padding < org.shape[0] else org.shape[0]
|
| 229 |
+
# col_min = col_min - ocr_padding if col_min - ocr_padding >= 0 else 0
|
| 230 |
+
# col_max = col_max + ocr_padding if col_max + ocr_padding < org.shape[1] else org.shape[1]
|
| 231 |
+
# # check if this area is text
|
| 232 |
+
# clip = org[row_min: row_max, col_min: col_max]
|
| 233 |
+
# if not ocr.is_text(clip, ocr_min_word_area, show=show):
|
| 234 |
+
# new_corners.append(corner)
|
| 235 |
+
# new_class.append(compo_class[i])
|
| 236 |
+
# return new_corners, new_class
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
# def rm_img_in_compo(corners_img, corners_compo):
|
| 240 |
+
# """
|
| 241 |
+
# Remove imgs in component
|
| 242 |
+
# """
|
| 243 |
+
# corners_img_new = []
|
| 244 |
+
# for img in corners_img:
|
| 245 |
+
# is_nested = False
|
| 246 |
+
# for compo in corners_compo:
|
| 247 |
+
# if util.corner_relation(img, compo) == -1:
|
| 248 |
+
# is_nested = True
|
| 249 |
+
# break
|
| 250 |
+
# if not is_nested:
|
| 251 |
+
# corners_img_new.append(img)
|
| 252 |
+
# return corners_img_new
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
# def block_or_compo(org, binary, corners,
|
| 256 |
+
# max_thickness=C.THRESHOLD_BLOCK_MAX_BORDER_THICKNESS, max_block_cross_points=C.THRESHOLD_BLOCK_MAX_CROSS_POINT,
|
| 257 |
+
# min_compo_w_h_ratio=C.THRESHOLD_UICOMPO_MIN_W_H_RATIO, max_compo_w_h_ratio=C.THRESHOLD_UICOMPO_MAX_W_H_RATIO,
|
| 258 |
+
# min_block_edge=C.THRESHOLD_BLOCK_MIN_EDGE_LENGTH):
|
| 259 |
+
# """
|
| 260 |
+
# Check if the objects are img components or just block
|
| 261 |
+
# :param org: Original image
|
| 262 |
+
# :param binary: Binary image from pre-processing
|
| 263 |
+
# :param corners: [(top_left, bottom_right)]
|
| 264 |
+
# -> top_left: (column_min, row_min)
|
| 265 |
+
# -> bottom_right: (column_max, row_max)
|
| 266 |
+
# :param max_thickness: The max thickness of border of blocks
|
| 267 |
+
# :param max_block_cross_points: Ratio of point of interaction
|
| 268 |
+
# :return: corners of blocks and imgs
|
| 269 |
+
# """
|
| 270 |
+
# blocks = []
|
| 271 |
+
# imgs = []
|
| 272 |
+
# compos = []
|
| 273 |
+
# for corner in corners:
|
| 274 |
+
# (top_left, bottom_right) = corner
|
| 275 |
+
# (col_min, row_min) = top_left
|
| 276 |
+
# (col_max, row_max) = bottom_right
|
| 277 |
+
# height = row_max - row_min
|
| 278 |
+
# width = col_max - col_min
|
| 279 |
+
#
|
| 280 |
+
# block = False
|
| 281 |
+
# vacancy = [0, 0, 0, 0]
|
| 282 |
+
# for i in range(1, max_thickness):
|
| 283 |
+
# try:
|
| 284 |
+
# # top to bottom
|
| 285 |
+
# if vacancy[0] == 0 and (col_max - col_min - 2 * i) is not 0 and (
|
| 286 |
+
# np.sum(binary[row_min + i, col_min + i: col_max - i]) / 255) / (col_max - col_min - 2 * i) <= max_block_cross_points:
|
| 287 |
+
# vacancy[0] = 1
|
| 288 |
+
# # bottom to top
|
| 289 |
+
# if vacancy[1] == 0 and (col_max - col_min - 2 * i) is not 0 and (
|
| 290 |
+
# np.sum(binary[row_max - i, col_min + i: col_max - i]) / 255) / (col_max - col_min - 2 * i) <= max_block_cross_points:
|
| 291 |
+
# vacancy[1] = 1
|
| 292 |
+
# # left to right
|
| 293 |
+
# if vacancy[2] == 0 and (row_max - row_min - 2 * i) is not 0 and (
|
| 294 |
+
# np.sum(binary[row_min + i: row_max - i, col_min + i]) / 255) / (row_max - row_min - 2 * i) <= max_block_cross_points:
|
| 295 |
+
# vacancy[2] = 1
|
| 296 |
+
# # right to left
|
| 297 |
+
# if vacancy[3] == 0 and (row_max - row_min - 2 * i) is not 0 and (
|
| 298 |
+
# np.sum(binary[row_min + i: row_max - i, col_max - i]) / 255) / (row_max - row_min - 2 * i) <= max_block_cross_points:
|
| 299 |
+
# vacancy[3] = 1
|
| 300 |
+
# if np.sum(vacancy) == 4:
|
| 301 |
+
# block = True
|
| 302 |
+
# except:
|
| 303 |
+
# pass
|
| 304 |
+
#
|
| 305 |
+
# # too big to be UI components
|
| 306 |
+
# if block:
|
| 307 |
+
# if height > min_block_edge and width > min_block_edge:
|
| 308 |
+
# blocks.append(corner)
|
| 309 |
+
# else:
|
| 310 |
+
# if min_compo_w_h_ratio < width / height < max_compo_w_h_ratio:
|
| 311 |
+
# compos.append(corner)
|
| 312 |
+
# # filter out small objects
|
| 313 |
+
# else:
|
| 314 |
+
# if height > min_block_edge:
|
| 315 |
+
# imgs.append(corner)
|
| 316 |
+
# else:
|
| 317 |
+
# if min_compo_w_h_ratio < width / height < max_compo_w_h_ratio:
|
| 318 |
+
# compos.append(corner)
|
| 319 |
+
# return blocks, imgs, compos
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
# def compo_on_img(processing, org, binary, clf,
|
| 323 |
+
# compos_corner, compos_class):
|
| 324 |
+
# """
|
| 325 |
+
# Detect potential UI components inner img;
|
| 326 |
+
# Only leave non-img
|
| 327 |
+
# """
|
| 328 |
+
# pad = 2
|
| 329 |
+
# for i in range(len(compos_corner)):
|
| 330 |
+
# if compos_class[i] != 'img':
|
| 331 |
+
# continue
|
| 332 |
+
# ((col_min, row_min), (col_max, row_max)) = compos_corner[i]
|
| 333 |
+
# col_min = max(col_min - pad, 0)
|
| 334 |
+
# col_max = min(col_max + pad, org.shape[1])
|
| 335 |
+
# row_min = max(row_min - pad, 0)
|
| 336 |
+
# row_max = min(row_max + pad, org.shape[0])
|
| 337 |
+
# area = (col_max - col_min) * (row_max - row_min)
|
| 338 |
+
# if area < 600:
|
| 339 |
+
# continue
|
| 340 |
+
#
|
| 341 |
+
# clip_org = org[row_min:row_max, col_min:col_max]
|
| 342 |
+
# clip_bin_inv = pre.reverse_binary(binary[row_min:row_max, col_min:col_max])
|
| 343 |
+
#
|
| 344 |
+
# compos_boundary_new, compos_corner_new, compos_class_new = processing(clip_org, clip_bin_inv, clf)
|
| 345 |
+
# compos_corner_new = util.corner_cvt_relative_position(compos_corner_new, col_min, row_min)
|
| 346 |
+
#
|
| 347 |
+
# assert len(compos_corner_new) == len(compos_class_new)
|
| 348 |
+
#
|
| 349 |
+
# # only leave non-img elements
|
| 350 |
+
# for i in range(len(compos_corner_new)):
|
| 351 |
+
# ((col_min_new, row_min_new), (col_max_new, row_max_new)) = compos_corner_new[i]
|
| 352 |
+
# area_new = (col_max_new - col_min_new) * (row_max_new - row_min_new)
|
| 353 |
+
# if compos_class_new[i] != 'img' and area_new / area < 0.8:
|
| 354 |
+
# compos_corner.append(compos_corner_new[i])
|
| 355 |
+
# compos_class.append(compos_class_new[i])
|
| 356 |
+
#
|
| 357 |
+
# return compos_corner, compos_class
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
# def strip_img(corners_compo, compos_class, corners_img):
|
| 361 |
+
# """
|
| 362 |
+
# Separate img from other compos
|
| 363 |
+
# :return: compos without img
|
| 364 |
+
# """
|
| 365 |
+
# corners_compo_withuot_img = []
|
| 366 |
+
# compo_class_withuot_img = []
|
| 367 |
+
# for i in range(len(compos_class)):
|
| 368 |
+
# if compos_class[i] == 'img':
|
| 369 |
+
# corners_img.append(corners_compo[i])
|
| 370 |
+
# else:
|
| 371 |
+
# corners_compo_withuot_img.append(corners_compo[i])
|
| 372 |
+
# compo_class_withuot_img.append(compos_class[i])
|
| 373 |
+
# return corners_compo_withuot_img, compo_class_withuot_img
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
# def merge_corner(corners, compos_class, min_selected_IoU=C.THRESHOLD_MIN_IOU, is_merge_nested_same=True):
|
| 377 |
+
# """
|
| 378 |
+
# Calculate the Intersection over Overlap (IoU) and merge corners according to the value of IoU
|
| 379 |
+
# :param is_merge_nested_same: if true, merge the nested corners with same class whatever the IoU is
|
| 380 |
+
# :param corners: corners: [(top_left, bottom_right)]
|
| 381 |
+
# -> top_left: (column_min, row_min)
|
| 382 |
+
# -> bottom_right: (column_max, row_max)
|
| 383 |
+
# :return: new corners
|
| 384 |
+
# """
|
| 385 |
+
# new_corners = []
|
| 386 |
+
# new_class = []
|
| 387 |
+
# for i in range(len(corners)):
|
| 388 |
+
# is_intersected = False
|
| 389 |
+
# for j in range(len(new_corners)):
|
| 390 |
+
# r = util.corner_relation_nms(corners[i], new_corners[j], min_selected_IoU)
|
| 391 |
+
# # r = util.corner_relation(corners[i], new_corners[j])
|
| 392 |
+
# if is_merge_nested_same:
|
| 393 |
+
# if compos_class[i] == new_class[j]:
|
| 394 |
+
# # if corners[i] is in new_corners[j], ignore corners[i]
|
| 395 |
+
# if r == -1:
|
| 396 |
+
# is_intersected = True
|
| 397 |
+
# break
|
| 398 |
+
# # if new_corners[j] is in corners[i], replace new_corners[j] with corners[i]
|
| 399 |
+
# elif r == 1:
|
| 400 |
+
# is_intersected = True
|
| 401 |
+
# new_corners[j] = corners[i]
|
| 402 |
+
#
|
| 403 |
+
# # if above IoU threshold, and corners[i] is in new_corners[j], ignore corners[i]
|
| 404 |
+
# if r == -2:
|
| 405 |
+
# is_intersected = True
|
| 406 |
+
# break
|
| 407 |
+
# # if above IoU threshold, and new_corners[j] is in corners[i], replace new_corners[j] with corners[i]
|
| 408 |
+
# elif r == 2:
|
| 409 |
+
# is_intersected = True
|
| 410 |
+
# new_corners[j] = corners[i]
|
| 411 |
+
# new_class[j] = compos_class[i]
|
| 412 |
+
#
|
| 413 |
+
# # containing and too small
|
| 414 |
+
# elif r == -3:
|
| 415 |
+
# is_intersected = True
|
| 416 |
+
# break
|
| 417 |
+
# elif r == 3:
|
| 418 |
+
# is_intersected = True
|
| 419 |
+
# new_corners[j] = corners[i]
|
| 420 |
+
#
|
| 421 |
+
# # if [i] and [j] are overlapped but no containing relation, merge corners when same class
|
| 422 |
+
# elif r == 4:
|
| 423 |
+
# is_intersected = True
|
| 424 |
+
# if compos_class[i] == new_class[j]:
|
| 425 |
+
# new_corners[j] = util.corner_merge_two_corners(corners[i], new_corners[j])
|
| 426 |
+
#
|
| 427 |
+
# if not is_intersected:
|
| 428 |
+
# new_corners.append(corners[i])
|
| 429 |
+
# new_class.append(compos_class[i])
|
| 430 |
+
# return new_corners, new_class
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
# def select_corner(corners, compos_class, class_name):
|
| 434 |
+
# """
|
| 435 |
+
# Select corners in given compo type
|
| 436 |
+
# """
|
| 437 |
+
# corners_wanted = []
|
| 438 |
+
# for i in range(len(compos_class)):
|
| 439 |
+
# if compos_class[i] == class_name:
|
| 440 |
+
# corners_wanted.append(corners[i])
|
| 441 |
+
# return corners_wanted
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
# def flood_fill_bfs(img, x_start, y_start, mark, grad_thresh):
|
| 445 |
+
# def neighbor(x, y):
|
| 446 |
+
# for i in range(x - 1, x + 2):
|
| 447 |
+
# if i < 0 or i >= img.shape[0]: continue
|
| 448 |
+
# for j in range(y - 1, y + 2):
|
| 449 |
+
# if j < 0 or j >= img.shape[1]: continue
|
| 450 |
+
# if mark[i, j] == 0 and abs(img[i, j] - img[x, y]) < grad_thresh:
|
| 451 |
+
# stack.append([i, j])
|
| 452 |
+
# mark[i, j] = 255
|
| 453 |
+
#
|
| 454 |
+
# stack = [[x_start, y_start]] # points waiting for inspection
|
| 455 |
+
# region = [[x_start, y_start]] # points of this connected region
|
| 456 |
+
# mark[x_start, y_start] = 255 # drawing broad
|
| 457 |
+
# while len(stack) > 0:
|
| 458 |
+
# point = stack.pop()
|
| 459 |
+
# region.append(point)
|
| 460 |
+
# neighbor(point[0], point[1])
|
| 461 |
+
# return region
|
CDM/detect_compo/deprecated/ip_segment.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import shutil
|
| 4 |
+
import os
|
| 5 |
+
from os.path import join as pjoin
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def segment_img(org, segment_size, output_path, overlap=100):
|
| 9 |
+
if not os.path.exists(output_path):
|
| 10 |
+
os.mkdir(output_path)
|
| 11 |
+
|
| 12 |
+
height, width = np.shape(org)[0], np.shape(org)[1]
|
| 13 |
+
top = 0
|
| 14 |
+
bottom = segment_size
|
| 15 |
+
segment_no = 0
|
| 16 |
+
while top < height and bottom < height:
|
| 17 |
+
segment = org[top:bottom]
|
| 18 |
+
cv2.imwrite(os.path.join(output_path, str(segment_no) + '.png'), segment)
|
| 19 |
+
segment_no += 1
|
| 20 |
+
top += segment_size - overlap
|
| 21 |
+
bottom = bottom + segment_size - overlap if bottom + segment_size - overlap <= height else height
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def clipping(img, components, pad=0, show=False):
|
| 25 |
+
"""
|
| 26 |
+
:param adjust: shrink(negative) or expand(positive) the bounding box
|
| 27 |
+
:param img: original image
|
| 28 |
+
:param corners: ((column_min, row_min),(column_max, row_max))
|
| 29 |
+
:return: list of clipping images
|
| 30 |
+
"""
|
| 31 |
+
clips = []
|
| 32 |
+
for component in components:
|
| 33 |
+
clip = component.compo_clipping(img, pad=pad)
|
| 34 |
+
clips.append(clip)
|
| 35 |
+
if show:
|
| 36 |
+
cv2.imshow('clipping', clip)
|
| 37 |
+
cv2.waitKey()
|
| 38 |
+
return clips
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def dissemble_clip_img_hollow(clip_root, org, compos):
|
| 42 |
+
if os.path.exists(clip_root):
|
| 43 |
+
shutil.rmtree(clip_root)
|
| 44 |
+
os.mkdir(clip_root)
|
| 45 |
+
cls_dirs = []
|
| 46 |
+
|
| 47 |
+
bkg = org.copy()
|
| 48 |
+
hollow_out = np.ones(bkg.shape[:2], dtype=np.uint8) * 255
|
| 49 |
+
for compo in compos:
|
| 50 |
+
cls = compo.category
|
| 51 |
+
c_root = pjoin(clip_root, cls)
|
| 52 |
+
c_path = pjoin(c_root, str(compo.id) + '.jpg')
|
| 53 |
+
if cls not in cls_dirs:
|
| 54 |
+
os.mkdir(c_root)
|
| 55 |
+
cls_dirs.append(cls)
|
| 56 |
+
clip = compo.compo_clipping(org)
|
| 57 |
+
cv2.imwrite(c_path, clip)
|
| 58 |
+
|
| 59 |
+
col_min, row_min, col_max, row_max = compo.put_bbox()
|
| 60 |
+
hollow_out[row_min: row_max, col_min: col_max] = 0
|
| 61 |
+
|
| 62 |
+
bkg = cv2.merge((bkg, hollow_out))
|
| 63 |
+
cv2.imwrite(os.path.join(clip_root, 'bkg.png'), bkg)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def dissemble_clip_img_fill(clip_root, org, compos, flag='most'):
|
| 67 |
+
|
| 68 |
+
def average_pix_around(pad=6, offset=3):
|
| 69 |
+
up = row_min - pad if row_min - pad >= 0 else 0
|
| 70 |
+
left = col_min - pad if col_min - pad >= 0 else 0
|
| 71 |
+
bottom = row_max + pad if row_max + pad < org.shape[0] - 1 else org.shape[0] - 1
|
| 72 |
+
right = col_max + pad if col_max + pad < org.shape[1] - 1 else org.shape[1] - 1
|
| 73 |
+
|
| 74 |
+
average = []
|
| 75 |
+
for i in range(3):
|
| 76 |
+
avg_up = np.average(org[up:row_min - offset, left:right, i])
|
| 77 |
+
avg_bot = np.average(org[row_max + offset:bottom, left:right, i])
|
| 78 |
+
avg_left = np.average(org[up:bottom, left:col_min - offset, i])
|
| 79 |
+
avg_right = np.average(org[up:bottom, col_max + offset:right, i])
|
| 80 |
+
average.append(int((avg_up + avg_bot + avg_left + avg_right)/4))
|
| 81 |
+
return average
|
| 82 |
+
|
| 83 |
+
def most_pix_around(pad=6, offset=2):
|
| 84 |
+
up = row_min - pad if row_min - pad >= 0 else 0
|
| 85 |
+
left = col_min - pad if col_min - pad >= 0 else 0
|
| 86 |
+
bottom = row_max + pad if row_max + pad < org.shape[0] - 1 else org.shape[0] - 1
|
| 87 |
+
right = col_max + pad if col_max + pad < org.shape[1] - 1 else org.shape[1] - 1
|
| 88 |
+
|
| 89 |
+
most = []
|
| 90 |
+
for i in range(3):
|
| 91 |
+
val = np.concatenate((org[up:row_min - offset, left:right, i].flatten(),
|
| 92 |
+
org[row_max + offset:bottom, left:right, i].flatten(),
|
| 93 |
+
org[up:bottom, left:col_min - offset, i].flatten(),
|
| 94 |
+
org[up:bottom, col_max + offset:right, i].flatten()))
|
| 95 |
+
# print(val)
|
| 96 |
+
# print(np.argmax(np.bincount(val)))
|
| 97 |
+
most.append(int(np.argmax(np.bincount(val))))
|
| 98 |
+
return most
|
| 99 |
+
|
| 100 |
+
if os.path.exists(clip_root):
|
| 101 |
+
shutil.rmtree(clip_root)
|
| 102 |
+
os.mkdir(clip_root)
|
| 103 |
+
cls_dirs = []
|
| 104 |
+
|
| 105 |
+
bkg = org.copy()
|
| 106 |
+
for compo in compos:
|
| 107 |
+
cls = compo.category
|
| 108 |
+
c_root = pjoin(clip_root, cls)
|
| 109 |
+
c_path = pjoin(c_root, str(compo.id) + '.jpg')
|
| 110 |
+
if cls not in cls_dirs:
|
| 111 |
+
os.mkdir(c_root)
|
| 112 |
+
cls_dirs.append(cls)
|
| 113 |
+
clip = compo.compo_clipping(org)
|
| 114 |
+
cv2.imwrite(c_path, clip)
|
| 115 |
+
|
| 116 |
+
col_min, row_min, col_max, row_max = compo.put_bbox()
|
| 117 |
+
if flag == 'average':
|
| 118 |
+
color = average_pix_around()
|
| 119 |
+
elif flag == 'most':
|
| 120 |
+
color = most_pix_around()
|
| 121 |
+
cv2.rectangle(bkg, (col_min, row_min), (col_max, row_max), color, -1)
|
| 122 |
+
|
| 123 |
+
cv2.imwrite(os.path.join(clip_root, 'bkg.png'), bkg)
|
CDM/detect_compo/deprecated/ocr_classify_text.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytesseract as pyt
|
| 2 |
+
import cv2
|
| 3 |
+
|
| 4 |
+
import lib_ip.ip_draw as draw
|
| 5 |
+
from config.CONFIG_UIED import Config
|
| 6 |
+
|
| 7 |
+
C = Config()
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def is_text(img, min_word_area, show=False):
|
| 11 |
+
broad = img.copy()
|
| 12 |
+
area_word = 0
|
| 13 |
+
area_total = img.shape[0] * img.shape[1]
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
# ocr text detection
|
| 17 |
+
data = pyt.image_to_data(img).split('\n')
|
| 18 |
+
except:
|
| 19 |
+
print(img.shape)
|
| 20 |
+
return -1
|
| 21 |
+
word = []
|
| 22 |
+
for d in data[1:]:
|
| 23 |
+
d = d.split()
|
| 24 |
+
if d[-1] != '-1':
|
| 25 |
+
if d[-1] != '-' and d[-1] != '—' and int(d[-3]) < 50 and int(d[-4]) < 100:
|
| 26 |
+
word.append(d)
|
| 27 |
+
t_l = (int(d[-6]), int(d[-5]))
|
| 28 |
+
b_r = (int(d[-6]) + int(d[-4]), int(d[-5]) + int(d[-3]))
|
| 29 |
+
area_word += int(d[-4]) * int(d[-3])
|
| 30 |
+
cv2.rectangle(broad, t_l, b_r, (0,0,255), 1)
|
| 31 |
+
|
| 32 |
+
if show:
|
| 33 |
+
for d in word: print(d)
|
| 34 |
+
print(area_word/area_total)
|
| 35 |
+
cv2.imshow('a', broad)
|
| 36 |
+
cv2.waitKey(0)
|
| 37 |
+
cv2.destroyAllWindows()
|
| 38 |
+
# no text in this clip or relatively small text area
|
| 39 |
+
if len(word) == 0 or area_word/area_total < min_word_area:
|
| 40 |
+
return False
|
| 41 |
+
return True
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def text_detection(org, img_clean):
|
| 45 |
+
try:
|
| 46 |
+
data = pyt.image_to_data(img_clean).split('\n')
|
| 47 |
+
except:
|
| 48 |
+
return org, None
|
| 49 |
+
corners_word = []
|
| 50 |
+
for d in data[1:]:
|
| 51 |
+
d = d.split()
|
| 52 |
+
if d[-1] != '-1':
|
| 53 |
+
if d[-1] != '-' and d[-1] != '—' and 5 < int(d[-3]) < 40 and 5 < int(d[-4]) < 100:
|
| 54 |
+
t_l = (int(d[-6]), int(d[-5]))
|
| 55 |
+
b_r = (int(d[-6]) + int(d[-4]), int(d[-5]) + int(d[-3]))
|
| 56 |
+
corners_word.append((t_l, b_r))
|
| 57 |
+
return corners_word
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
# def text_merge_word_into_line(org, corners_word, max_words_gap=C.THRESHOLD_TEXT_MAX_WORD_GAP):
|
| 61 |
+
#
|
| 62 |
+
# def is_in_line(word):
|
| 63 |
+
# for i in range(len(lines)):
|
| 64 |
+
# line = lines[i]
|
| 65 |
+
# # at the same row
|
| 66 |
+
# if abs(line['center'][1] - word['center'][1]) < max_words_gap:
|
| 67 |
+
# # small gap between words
|
| 68 |
+
# if (abs(line['center'][0] - word['center'][0]) - abs(line['width']/2 + word['width']/2)) < max_words_gap:
|
| 69 |
+
# return i
|
| 70 |
+
# return -1
|
| 71 |
+
#
|
| 72 |
+
# def merge_line(word, index):
|
| 73 |
+
# line = lines[index]
|
| 74 |
+
# # on the left
|
| 75 |
+
# if word['center'][0] < line['center'][0]:
|
| 76 |
+
# line['col_min'] = word['col_min']
|
| 77 |
+
# # on the right
|
| 78 |
+
# else:
|
| 79 |
+
# line['col_max'] = word['col_max']
|
| 80 |
+
# line['row_min'] = min(line['row_min'], word['row_min'])
|
| 81 |
+
# line['row_max'] = max(line['row_max'], word['row_max'])
|
| 82 |
+
# line['width'] = line['col_max'] - line['col_min']
|
| 83 |
+
# line['height'] = line['row_max'] - line['row_min']
|
| 84 |
+
# line['center'] = ((line['col_max'] + line['col_min'])/2, (line['row_max'] + line['row_min'])/2)
|
| 85 |
+
#
|
| 86 |
+
# words = []
|
| 87 |
+
# for corner in corners_word:
|
| 88 |
+
# word = {}
|
| 89 |
+
# (top_left, bottom_right) = corner
|
| 90 |
+
# (col_min, row_min) = top_left
|
| 91 |
+
# (col_max, row_max) = bottom_right
|
| 92 |
+
# word['col_min'], word['col_max'], word['row_min'], word['row_max'] = col_min, col_max, row_min, row_max
|
| 93 |
+
# word['height'] = row_max - row_min
|
| 94 |
+
# word['width'] = col_max - col_min
|
| 95 |
+
# word['center'] = ((col_max + col_min)/2, (row_max + row_min)/2)
|
| 96 |
+
# words.append(word)
|
| 97 |
+
#
|
| 98 |
+
# lines = []
|
| 99 |
+
# for word in words:
|
| 100 |
+
# line_index = is_in_line(word)
|
| 101 |
+
# # word is in current line
|
| 102 |
+
# if line_index != -1:
|
| 103 |
+
# merge_line(word, line_index)
|
| 104 |
+
# # word is not in current line
|
| 105 |
+
# else:
|
| 106 |
+
# # this single word as a new line
|
| 107 |
+
# lines.append(word)
|
| 108 |
+
#
|
| 109 |
+
# corners_line = []
|
| 110 |
+
# for l in lines:
|
| 111 |
+
# corners_line.append(((l['col_min'], l['row_min']), (l['col_max'], l['row_max'])))
|
| 112 |
+
# return corners_line
|
| 113 |
+
|
CDM/detect_compo/ip_region_proposal.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
from os.path import join as pjoin
|
| 3 |
+
import time
|
| 4 |
+
|
| 5 |
+
import CDM.detect_compo.lib_ip.ip_preprocessing as pre
|
| 6 |
+
import CDM.detect_compo.lib_ip.ip_draw as draw
|
| 7 |
+
import CDM.detect_compo.lib_ip.ip_detection as det
|
| 8 |
+
import CDM.detect_compo.lib_ip.file_utils as file
|
| 9 |
+
import CDM.detect_compo.lib_ip.Component as Compo
|
| 10 |
+
from CDM.config.CONFIG_UIED import Config
|
| 11 |
+
C = Config()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def nesting_inspection(org, grey, compos, ffl_block):
|
| 15 |
+
'''
|
| 16 |
+
Inspect all big compos through block division by flood-fill
|
| 17 |
+
:param ffl_block: gradient threshold for flood-fill
|
| 18 |
+
:return: nesting compos
|
| 19 |
+
'''
|
| 20 |
+
nesting_compos = []
|
| 21 |
+
for i, compo in enumerate(compos):
|
| 22 |
+
if compo.height > 50:
|
| 23 |
+
replace = False
|
| 24 |
+
clip_grey = compo.compo_clipping(grey)
|
| 25 |
+
n_compos = det.nested_components_detection(clip_grey, org, grad_thresh=ffl_block, show=False)
|
| 26 |
+
Compo.cvt_compos_relative_pos(n_compos, compo.bbox.col_min, compo.bbox.row_min)
|
| 27 |
+
|
| 28 |
+
for n_compo in n_compos:
|
| 29 |
+
if n_compo.redundant:
|
| 30 |
+
compos[i] = n_compo
|
| 31 |
+
replace = True
|
| 32 |
+
break
|
| 33 |
+
if not replace:
|
| 34 |
+
nesting_compos += n_compos
|
| 35 |
+
return nesting_compos
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def compo_detection(input_img_path, output_root, uied_params,
|
| 39 |
+
resize_by_height=800, classifier=None, show=False, wai_key=0):
|
| 40 |
+
|
| 41 |
+
start = time.process_time()
|
| 42 |
+
name = input_img_path.split('/')[-1][:-4] if '/' in input_img_path else input_img_path.split('\\')[-1][:-4]
|
| 43 |
+
ip_root = file.build_directory(pjoin(output_root, "ip"))
|
| 44 |
+
|
| 45 |
+
# *** Step 1 *** pre-processing: read img -> get binary map
|
| 46 |
+
org, grey = pre.read_img(input_img_path, resize_by_height)
|
| 47 |
+
binary = pre.binarization(org, grad_min=int(uied_params['min-grad']))
|
| 48 |
+
|
| 49 |
+
full_size_org, full_size_grey = pre.read_img(input_img_path)
|
| 50 |
+
ratio = full_size_org.shape[0] / org.shape[0]
|
| 51 |
+
|
| 52 |
+
# *** Step 2 *** element detection
|
| 53 |
+
det.rm_line(binary, show=show, wait_key=wai_key)
|
| 54 |
+
uicompos = det.component_detection(binary, min_obj_area=int(uied_params['min-ele-area']))
|
| 55 |
+
|
| 56 |
+
# *** Step 3 *** results refinement
|
| 57 |
+
uicompos = det.compo_filter(uicompos, min_area=int(uied_params['min-ele-area']), img_shape=binary.shape)
|
| 58 |
+
uicompos = det.merge_intersected_compos(uicompos)
|
| 59 |
+
det.compo_block_recognition(binary, uicompos)
|
| 60 |
+
if uied_params['merge-contained-ele']:
|
| 61 |
+
uicompos = det.rm_contained_compos_not_in_block(uicompos)
|
| 62 |
+
Compo.compos_update(uicompos, org.shape)
|
| 63 |
+
Compo.compos_containment(uicompos)
|
| 64 |
+
|
| 65 |
+
# *** Step 4 ** nesting inspection: check if big compos have nesting element
|
| 66 |
+
uicompos += nesting_inspection(org, grey, uicompos, ffl_block=uied_params['ffl-block'])
|
| 67 |
+
Compo.compos_update(uicompos, org.shape)
|
| 68 |
+
draw.draw_bounding_box(full_size_org, ratio, uicompos, show=show, name='merged compo', write_path=pjoin(ip_root, name + '.jpg'), wait_key=wai_key)
|
| 69 |
+
|
| 70 |
+
# # classify icons
|
| 71 |
+
# model = models.resnet18().to('cpu')
|
| 72 |
+
# in_feature_num = model.fc.in_features
|
| 73 |
+
# model.fc = nn.Linear(in_feature_num, 99)
|
| 74 |
+
# # model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(3,3), padding=(3,3), stride=(2,2), bias=False)
|
| 75 |
+
# model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), padding=(3, 3), stride=(2, 2),
|
| 76 |
+
# bias=False)
|
| 77 |
+
# # PATH = "C:/ANU/2022 s2/honours project/code/UIED-master/model/model-99-resnet18.pkl"
|
| 78 |
+
# PATH = "./model/model-99-resnet18.pkl"
|
| 79 |
+
# # trained_model = model()
|
| 80 |
+
# model.load_state_dict(torch.load(PATH, map_location=torch.device('cpu')))
|
| 81 |
+
#
|
| 82 |
+
# model.eval()
|
| 83 |
+
#
|
| 84 |
+
# # ----------------- try on semantics dataset---------------------
|
| 85 |
+
#
|
| 86 |
+
# # sample_data = np.load('C:/ANU/2022 s2/honours project/code/semantic-icon-classifier-master/data/training_x.npy')
|
| 87 |
+
# #
|
| 88 |
+
# # array = np.reshape(sample_data[0, :, :, :], [32, 32])
|
| 89 |
+
# #
|
| 90 |
+
# # print("array: ", array)
|
| 91 |
+
# #
|
| 92 |
+
# # cv2.imshow("array", array)
|
| 93 |
+
# # cv2.waitKey(0)
|
| 94 |
+
# #
|
| 95 |
+
# # array = array.astype('float32')
|
| 96 |
+
# # array = array / 255
|
| 97 |
+
# # array = (array - array.mean()) / array.std()
|
| 98 |
+
# #
|
| 99 |
+
# # print("array mean: ", array.mean())
|
| 100 |
+
# # print("array std: ", array.std())
|
| 101 |
+
# #
|
| 102 |
+
# # array = array.reshape(1, 1, 32, 32)
|
| 103 |
+
# #
|
| 104 |
+
# # array = torch.tensor(array)
|
| 105 |
+
# # print("array_tensor: ", array)
|
| 106 |
+
# # array_pred_label = model(array)
|
| 107 |
+
# # print("output: ", array_pred_label)
|
| 108 |
+
#
|
| 109 |
+
# # ----------------- end trying ---------------------
|
| 110 |
+
#
|
| 111 |
+
# grey = grey.astype('float32')
|
| 112 |
+
# grey = grey / 255
|
| 113 |
+
# # grey = grey / np.linalg.norm(grey)
|
| 114 |
+
#
|
| 115 |
+
# grey = (grey-grey.mean())/grey.std()
|
| 116 |
+
# print("grey mean: ", grey.mean())
|
| 117 |
+
# print("grey std: ", grey.std())
|
| 118 |
+
#
|
| 119 |
+
# # grey = grey.to(torch.float32)
|
| 120 |
+
#
|
| 121 |
+
# # plt.imshow(Image.fromarray(binary))
|
| 122 |
+
# # plt.show()
|
| 123 |
+
# # cv2.imshow("grey", grey)
|
| 124 |
+
#
|
| 125 |
+
# privacy_compos = []
|
| 126 |
+
# for comp in uicompos:
|
| 127 |
+
#
|
| 128 |
+
# # cv2.imshow("comp", grey[comp.bbox.row_min:comp.bbox.row_max, comp.bbox.col_min:comp.bbox.col_max])
|
| 129 |
+
# # cv2.waitKey(0)
|
| 130 |
+
#
|
| 131 |
+
# # col_mid = int((comp.bbox.col_min+comp.bbox.col_max)/2)
|
| 132 |
+
# # row_mid = int((comp.bbox.row_min+comp.bbox.row_max)/2)
|
| 133 |
+
# # comp_crop = grey[max(0, row_mid-16):min(grey.shape[1], row_mid+16), max(0, col_mid-16):min(grey.shape[0], col_mid+16)]
|
| 134 |
+
# #
|
| 135 |
+
# # if comp_crop.shape[0] != 32 or comp_crop.shape[1] != 32:
|
| 136 |
+
# # print("A component is not classified, size: ", comp_crop.shape)
|
| 137 |
+
# # print("col_mid: ", col_mid)
|
| 138 |
+
# # print("row_mid: ", row_mid)
|
| 139 |
+
# # print("shape[0]: ", comp_crop.shape[0])
|
| 140 |
+
# # print("shape[1]: ", comp_crop.shape[1])
|
| 141 |
+
# # print("max(0, row_mid-16) and min(binary.shape[1], row_mid+16): ", max(0, row_mid-16), min(grey.shape[1], row_mid+16))
|
| 142 |
+
#
|
| 143 |
+
# comp_grey = grey[comp.bbox.row_min:comp.bbox.row_max, comp.bbox.col_min:comp.bbox.col_max]
|
| 144 |
+
#
|
| 145 |
+
# # cv2.imshow("comp_grey", comp_grey)
|
| 146 |
+
# # cv2.waitKey(0)
|
| 147 |
+
#
|
| 148 |
+
# # print("comp_crop: ", comp_crop)
|
| 149 |
+
# # comp_crop = comp_grey.reshape(1, 1, 32, 32)
|
| 150 |
+
# comp_crop = cv2.resize(comp_grey, (32, 32))
|
| 151 |
+
# print("comp_crop: ", comp_crop)
|
| 152 |
+
#
|
| 153 |
+
# # cv2.imshow("comp_crop", comp_crop)
|
| 154 |
+
# # cv2.waitKey(0)
|
| 155 |
+
#
|
| 156 |
+
# comp_crop = comp_crop.reshape(1, 1, 32, 32)
|
| 157 |
+
#
|
| 158 |
+
# comp_tensor = torch.tensor(comp_crop)
|
| 159 |
+
# comp_tensor = comp_tensor.permute(0, 1, 3, 2)
|
| 160 |
+
# print("comp_tensor: ", comp_tensor)
|
| 161 |
+
# # comp_float = comp_tensor.to(torch.float32)
|
| 162 |
+
# # print("comp_float: ", comp_float)
|
| 163 |
+
# # pred_label = model(comp_float)
|
| 164 |
+
# pred_label = model(comp_tensor)
|
| 165 |
+
# print("output: ", pred_label)
|
| 166 |
+
# print("label: ", np.argmax(pred_label.cpu().data.numpy(), axis=1))
|
| 167 |
+
# if np.argmax(pred_label.cpu().data.numpy(), axis=1) in [72.0, 42.0, 77.0, 91.0, 6.0, 89.0, 40.0, 43.0, 82.0, 3.0, 68.0,
|
| 168 |
+
# 49.0, 56.0, 89.0]:
|
| 169 |
+
# privacy_compos.append(comp)
|
| 170 |
+
#
|
| 171 |
+
# draw.draw_bounding_box(org, privacy_compos, show=show, name='merged compo', write_path=pjoin(ip_root, name + '.jpg'), wait_key=wai_key)
|
| 172 |
+
|
| 173 |
+
# *** Step 5 *** image inspection: recognize image -> remove noise in image -> binarize with larger threshold and reverse -> rectangular compo detection
|
| 174 |
+
# if classifier is not None:
|
| 175 |
+
# classifier['Image'].predict(seg.clipping(org, uicompos), uicompos)
|
| 176 |
+
# draw.draw_bounding_box_class(org, uicompos, show=show)
|
| 177 |
+
# uicompos = det.rm_noise_in_large_img(uicompos, org)
|
| 178 |
+
# draw.draw_bounding_box_class(org, uicompos, show=show)
|
| 179 |
+
# det.detect_compos_in_img(uicompos, binary_org, org)
|
| 180 |
+
# draw.draw_bounding_box(org, uicompos, show=show)
|
| 181 |
+
# if classifier is not None:
|
| 182 |
+
# classifier['Noise'].predict(seg.clipping(org, uicompos), uicompos)
|
| 183 |
+
# draw.draw_bounding_box_class(org, uicompos, show=show)
|
| 184 |
+
# uicompos = det.rm_noise_compos(uicompos)
|
| 185 |
+
|
| 186 |
+
# *** Step 6 *** element classification: all category classification
|
| 187 |
+
# if classifier is not None:
|
| 188 |
+
# classifier['Elements'].predict([compo.compo_clipping(org) for compo in uicompos], uicompos)
|
| 189 |
+
# draw.draw_bounding_box_class(org, uicompos, show=show, name='cls', write_path=pjoin(ip_root, 'result.jpg'))
|
| 190 |
+
# draw.draw_bounding_box_class(org, uicompos, write_path=pjoin(output_root, 'result.jpg'))
|
| 191 |
+
|
| 192 |
+
# *** Step 7 *** save detection result
|
| 193 |
+
|
| 194 |
+
Compo.compos_update(uicompos, org.shape)
|
| 195 |
+
file.save_corners_json(pjoin(ip_root, name + '.json'), uicompos)
|
| 196 |
+
# file.save_corners_json(pjoin(ip_root, name + '.json'), uicompos, full_size_org, ratio)
|
| 197 |
+
|
| 198 |
+
cd_time = time.process_time() - start
|
| 199 |
+
print("[Compo Detection Completed in %.3f s] Input: %s Output: %s" % (cd_time, input_img_path, pjoin(ip_root, name + '.json')))
|
| 200 |
+
return cd_time
|
CDM/detect_compo/lib_ip/Bbox.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import CDM.detect_compo.lib_ip.ip_draw as draw
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Bbox:
|
| 6 |
+
def __init__(self, col_min, row_min, col_max, row_max):
|
| 7 |
+
self.col_min = col_min
|
| 8 |
+
self.row_min = row_min
|
| 9 |
+
self.col_max = col_max
|
| 10 |
+
self.row_max = row_max
|
| 11 |
+
|
| 12 |
+
self.width = col_max - col_min
|
| 13 |
+
self.height = row_max - row_min
|
| 14 |
+
self.box_area = self.width * self.height
|
| 15 |
+
|
| 16 |
+
def put_bbox(self):
|
| 17 |
+
return self.col_min, self.row_min, self.col_max, self.row_max
|
| 18 |
+
|
| 19 |
+
def bbox_cal_area(self):
|
| 20 |
+
self.box_area = self.width * self.height
|
| 21 |
+
return self.box_area
|
| 22 |
+
|
| 23 |
+
def bbox_relation(self, bbox_b):
|
| 24 |
+
"""
|
| 25 |
+
:return: -1 : a in b
|
| 26 |
+
0 : a, b are not intersected
|
| 27 |
+
1 : b in a
|
| 28 |
+
2 : a, b are identical or intersected
|
| 29 |
+
"""
|
| 30 |
+
col_min_a, row_min_a, col_max_a, row_max_a = self.put_bbox()
|
| 31 |
+
col_min_b, row_min_b, col_max_b, row_max_b = bbox_b.put_bbox()
|
| 32 |
+
|
| 33 |
+
# if a is in b
|
| 34 |
+
if col_min_a > col_min_b and row_min_a > row_min_b and col_max_a < col_max_b and row_max_a < row_max_b:
|
| 35 |
+
return -1
|
| 36 |
+
# if b is in a
|
| 37 |
+
elif col_min_a < col_min_b and row_min_a < row_min_b and col_max_a > col_max_b and row_max_a > row_max_b:
|
| 38 |
+
return 1
|
| 39 |
+
# a and b are non-intersect
|
| 40 |
+
elif (col_min_a > col_max_b or row_min_a > row_max_b) or (col_min_b > col_max_a or row_min_b > row_max_a):
|
| 41 |
+
return 0
|
| 42 |
+
# intersection
|
| 43 |
+
else:
|
| 44 |
+
return 2
|
| 45 |
+
|
| 46 |
+
def bbox_relation_nms(self, bbox_b, bias=(0, 0)):
|
| 47 |
+
'''
|
| 48 |
+
Calculate the relation between two rectangles by nms
|
| 49 |
+
:return: -1 : a in b
|
| 50 |
+
0 : a, b are not intersected
|
| 51 |
+
1 : b in a
|
| 52 |
+
2 : a, b are intersected
|
| 53 |
+
'''
|
| 54 |
+
col_min_a, row_min_a, col_max_a, row_max_a = self.put_bbox()
|
| 55 |
+
col_min_b, row_min_b, col_max_b, row_max_b = bbox_b.put_bbox()
|
| 56 |
+
|
| 57 |
+
bias_col, bias_row = bias
|
| 58 |
+
# get the intersected area
|
| 59 |
+
col_min_s = max(col_min_a - bias_col, col_min_b - bias_col)
|
| 60 |
+
row_min_s = max(row_min_a - bias_row, row_min_b - bias_row)
|
| 61 |
+
col_max_s = min(col_max_a + bias_col, col_max_b + bias_col)
|
| 62 |
+
row_max_s = min(row_max_a + bias_row, row_max_b + bias_row)
|
| 63 |
+
w = np.maximum(0, col_max_s - col_min_s)
|
| 64 |
+
h = np.maximum(0, row_max_s - row_min_s)
|
| 65 |
+
inter = w * h
|
| 66 |
+
area_a = (col_max_a - col_min_a) * (row_max_a - row_min_a)
|
| 67 |
+
area_b = (col_max_b - col_min_b) * (row_max_b - row_min_b)
|
| 68 |
+
iou = inter / (area_a + area_b - inter)
|
| 69 |
+
ioa = inter / self.box_area
|
| 70 |
+
iob = inter / bbox_b.box_area
|
| 71 |
+
|
| 72 |
+
if iou == 0 and ioa == 0 and iob == 0:
|
| 73 |
+
return 0
|
| 74 |
+
|
| 75 |
+
# import lib_ip.ip_preprocessing as pre
|
| 76 |
+
# org_iou, _ = pre.read_img('uied/data/input/7.jpg', 800)
|
| 77 |
+
# print(iou, ioa, iob)
|
| 78 |
+
# board = draw.draw_bounding_box(org_iou, [self], color=(255,0,0))
|
| 79 |
+
# draw.draw_bounding_box(board, [bbox_b], color=(0,255,0), show=True)
|
| 80 |
+
|
| 81 |
+
# contained by b
|
| 82 |
+
if ioa >= 1:
|
| 83 |
+
return -1
|
| 84 |
+
# contains b
|
| 85 |
+
if iob >= 1:
|
| 86 |
+
return 1
|
| 87 |
+
# not intersected with each other
|
| 88 |
+
# intersected
|
| 89 |
+
if iou >= 0.02 or iob > 0.2 or ioa > 0.2:
|
| 90 |
+
return 2
|
| 91 |
+
# if iou == 0:
|
| 92 |
+
# print('ioa:%.5f; iob:%.5f; iou:%.5f' % (ioa, iob, iou))
|
| 93 |
+
return 0
|
| 94 |
+
|
| 95 |
+
def bbox_cvt_relative_position(self, col_min_base, row_min_base):
|
| 96 |
+
'''
|
| 97 |
+
Convert to relative position based on base coordinator
|
| 98 |
+
'''
|
| 99 |
+
self.col_min += col_min_base
|
| 100 |
+
self.col_max += col_min_base
|
| 101 |
+
self.row_min += row_min_base
|
| 102 |
+
self.row_max += row_min_base
|
| 103 |
+
|
| 104 |
+
def bbox_merge(self, bbox_b):
|
| 105 |
+
'''
|
| 106 |
+
Merge two intersected bboxes
|
| 107 |
+
'''
|
| 108 |
+
col_min_a, row_min_a, col_max_a, row_max_a = self.put_bbox()
|
| 109 |
+
col_min_b, row_min_b, col_max_b, row_max_b = bbox_b.put_bbox()
|
| 110 |
+
col_min = min(col_min_a, col_min_b)
|
| 111 |
+
col_max = max(col_max_a, col_max_b)
|
| 112 |
+
row_min = min(row_min_a, row_min_b)
|
| 113 |
+
row_max = max(row_max_a, row_max_b)
|
| 114 |
+
new_bbox = Bbox(col_min, row_min, col_max, row_max)
|
| 115 |
+
return new_bbox
|
| 116 |
+
|
| 117 |
+
def bbox_padding(self, image_shape, pad):
|
| 118 |
+
row, col = image_shape[:2]
|
| 119 |
+
self.col_min = max(self.col_min - pad, 0)
|
| 120 |
+
self.col_max = min(self.col_max + pad, col)
|
| 121 |
+
self.row_min = max(self.row_min - pad, 0)
|
| 122 |
+
self.row_max = min(self.row_max + pad, row)
|
CDM/detect_compo/lib_ip/Component.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from CDM.detect_compo.lib_ip.Bbox import Bbox
|
| 2 |
+
import CDM.detect_compo.lib_ip.ip_draw as draw
|
| 3 |
+
|
| 4 |
+
import cv2
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def cvt_compos_relative_pos(compos, col_min_base, row_min_base):
|
| 8 |
+
for compo in compos:
|
| 9 |
+
compo.compo_relative_position(col_min_base, row_min_base)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def compos_containment(compos):
|
| 13 |
+
for i in range(len(compos) - 1):
|
| 14 |
+
for j in range(i + 1, len(compos)):
|
| 15 |
+
relation = compos[i].compo_relation(compos[j])
|
| 16 |
+
if relation == -1:
|
| 17 |
+
compos[j].contain.append(i)
|
| 18 |
+
if relation == 1:
|
| 19 |
+
compos[i].contain.append(j)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def compos_update(compos, org_shape):
|
| 23 |
+
for i, compo in enumerate(compos):
|
| 24 |
+
# start from 1, id 0 is background
|
| 25 |
+
compo.compo_update(i + 1, org_shape)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Component:
|
| 29 |
+
def __init__(self, region, image_shape):
|
| 30 |
+
self.id = None
|
| 31 |
+
self.region = region
|
| 32 |
+
self.boundary = self.compo_get_boundary()
|
| 33 |
+
self.bbox = self.compo_get_bbox()
|
| 34 |
+
self.bbox_area = self.bbox.box_area
|
| 35 |
+
|
| 36 |
+
self.region_area = len(region)
|
| 37 |
+
self.width = len(self.boundary[0])
|
| 38 |
+
self.height = len(self.boundary[2])
|
| 39 |
+
self.image_shape = image_shape
|
| 40 |
+
self.area = self.width * self.height
|
| 41 |
+
|
| 42 |
+
self.category = 'Compo'
|
| 43 |
+
self.contain = []
|
| 44 |
+
|
| 45 |
+
self.rect_ = None
|
| 46 |
+
self.line_ = None
|
| 47 |
+
self.redundant = False
|
| 48 |
+
|
| 49 |
+
def compo_update(self, id, org_shape):
|
| 50 |
+
self.id = id
|
| 51 |
+
self.image_shape = org_shape
|
| 52 |
+
self.width = self.bbox.width
|
| 53 |
+
self.height = self.bbox.height
|
| 54 |
+
self.bbox_area = self.bbox.box_area
|
| 55 |
+
self.area = self.width * self.height
|
| 56 |
+
|
| 57 |
+
def put_bbox(self):
|
| 58 |
+
return self.bbox.put_bbox()
|
| 59 |
+
|
| 60 |
+
def compo_update_bbox_area(self):
|
| 61 |
+
self.bbox_area = self.bbox.bbox_cal_area()
|
| 62 |
+
|
| 63 |
+
def compo_get_boundary(self):
|
| 64 |
+
'''
|
| 65 |
+
get the bounding boundary of an object(region)
|
| 66 |
+
boundary: [top, bottom, left, right]
|
| 67 |
+
-> up, bottom: (column_index, min/max row border)
|
| 68 |
+
-> left, right: (row_index, min/max column border) detect range of each row
|
| 69 |
+
'''
|
| 70 |
+
border_up, border_bottom, border_left, border_right = {}, {}, {}, {}
|
| 71 |
+
for point in self.region:
|
| 72 |
+
# point: (row_index, column_index)
|
| 73 |
+
# up, bottom: (column_index, min/max row border) detect range of each column
|
| 74 |
+
if point[1] not in border_up or border_up[point[1]] > point[0]:
|
| 75 |
+
border_up[point[1]] = point[0]
|
| 76 |
+
if point[1] not in border_bottom or border_bottom[point[1]] < point[0]:
|
| 77 |
+
border_bottom[point[1]] = point[0]
|
| 78 |
+
# left, right: (row_index, min/max column border) detect range of each row
|
| 79 |
+
if point[0] not in border_left or border_left[point[0]] > point[1]:
|
| 80 |
+
border_left[point[0]] = point[1]
|
| 81 |
+
if point[0] not in border_right or border_right[point[0]] < point[1]:
|
| 82 |
+
border_right[point[0]] = point[1]
|
| 83 |
+
|
| 84 |
+
boundary = [border_up, border_bottom, border_left, border_right]
|
| 85 |
+
# descending sort
|
| 86 |
+
for i in range(len(boundary)):
|
| 87 |
+
boundary[i] = [[k, boundary[i][k]] for k in boundary[i].keys()]
|
| 88 |
+
boundary[i] = sorted(boundary[i], key=lambda x: x[0])
|
| 89 |
+
return boundary
|
| 90 |
+
|
| 91 |
+
def compo_get_bbox(self):
|
| 92 |
+
"""
|
| 93 |
+
Get the top left and bottom right points of boundary
|
| 94 |
+
:param boundaries: boundary: [top, bottom, left, right]
|
| 95 |
+
-> up, bottom: (column_index, min/max row border)
|
| 96 |
+
-> left, right: (row_index, min/max column border) detect range of each row
|
| 97 |
+
:return: corners: [(top_left, bottom_right)]
|
| 98 |
+
-> top_left: (column_min, row_min)
|
| 99 |
+
-> bottom_right: (column_max, row_max)
|
| 100 |
+
"""
|
| 101 |
+
col_min, row_min = (int(min(self.boundary[0][0][0], self.boundary[1][-1][0])), int(min(self.boundary[2][0][0], self.boundary[3][-1][0])))
|
| 102 |
+
col_max, row_max = (int(max(self.boundary[0][0][0], self.boundary[1][-1][0])), int(max(self.boundary[2][0][0], self.boundary[3][-1][0])))
|
| 103 |
+
bbox = Bbox(col_min, row_min, col_max, row_max)
|
| 104 |
+
return bbox
|
| 105 |
+
|
| 106 |
+
def compo_is_rectangle(self, min_rec_evenness, max_dent_ratio, test=False):
|
| 107 |
+
'''
|
| 108 |
+
detect if an object is rectangle by evenness and dent of each border
|
| 109 |
+
'''
|
| 110 |
+
dent_direction = [1, -1, 1, -1] # direction for convex
|
| 111 |
+
|
| 112 |
+
flat = 0
|
| 113 |
+
parameter = 0
|
| 114 |
+
for n, border in enumerate(self.boundary):
|
| 115 |
+
parameter += len(border)
|
| 116 |
+
# dent detection
|
| 117 |
+
pit = 0 # length of pit
|
| 118 |
+
depth = 0 # the degree of surface changing
|
| 119 |
+
if n <= 1:
|
| 120 |
+
adj_side = max(len(self.boundary[2]), len(self.boundary[3])) # get maximum length of adjacent side
|
| 121 |
+
else:
|
| 122 |
+
adj_side = max(len(self.boundary[0]), len(self.boundary[1]))
|
| 123 |
+
|
| 124 |
+
# -> up, bottom: (column_index, min/max row border)
|
| 125 |
+
# -> left, right: (row_index, min/max column border) detect range of each row
|
| 126 |
+
abnm = 0
|
| 127 |
+
for i in range(int(3 + len(border) * 0.02), len(border) - 1):
|
| 128 |
+
# calculate gradient
|
| 129 |
+
difference = border[i][1] - border[i + 1][1]
|
| 130 |
+
# the degree of surface changing
|
| 131 |
+
depth += difference
|
| 132 |
+
# ignore noise at the start of each direction
|
| 133 |
+
if i / len(border) < 0.08 and (dent_direction[n] * difference) / adj_side > 0.5:
|
| 134 |
+
depth = 0 # reset
|
| 135 |
+
|
| 136 |
+
# print(border[i][1], i / len(border), depth, (dent_direction[n] * difference) / adj_side)
|
| 137 |
+
# if the change of the surface is too large, count it as part of abnormal change
|
| 138 |
+
if abs(depth) / adj_side > 0.3:
|
| 139 |
+
abnm += 1 # count the size of the abnm
|
| 140 |
+
# if the abnm is too big, the shape should not be a rectangle
|
| 141 |
+
if abnm / len(border) > 0.1:
|
| 142 |
+
if test:
|
| 143 |
+
print('abnms', abnm, abnm / len(border))
|
| 144 |
+
draw.draw_boundary([self], self.image_shape, show=True)
|
| 145 |
+
self.rect_ = False
|
| 146 |
+
return False
|
| 147 |
+
continue
|
| 148 |
+
else:
|
| 149 |
+
# reset the abnm if the depth back to normal
|
| 150 |
+
abnm = 0
|
| 151 |
+
|
| 152 |
+
# if sunken and the surface changing is large, then counted as pit
|
| 153 |
+
if dent_direction[n] * depth < 0 and abs(depth) / adj_side > 0.15:
|
| 154 |
+
pit += 1
|
| 155 |
+
continue
|
| 156 |
+
|
| 157 |
+
# if the surface is not changing to a pit and the gradient is zero, then count it as flat
|
| 158 |
+
if abs(depth) < 1 + adj_side * 0.015:
|
| 159 |
+
flat += 1
|
| 160 |
+
if test:
|
| 161 |
+
print(depth, adj_side, flat)
|
| 162 |
+
# if the pit is too big, the shape should not be a rectangle
|
| 163 |
+
if pit / len(border) > max_dent_ratio:
|
| 164 |
+
if test:
|
| 165 |
+
print('pit', pit, pit / len(border))
|
| 166 |
+
draw.draw_boundary([self], self.image_shape, show=True)
|
| 167 |
+
self.rect_ = False
|
| 168 |
+
return False
|
| 169 |
+
if test:
|
| 170 |
+
print(flat / parameter, '\n')
|
| 171 |
+
draw.draw_boundary([self], self.image_shape, show=True)
|
| 172 |
+
# ignore text and irregular shape
|
| 173 |
+
if self.height / self.image_shape[0] > 0.3:
|
| 174 |
+
min_rec_evenness = 0.85
|
| 175 |
+
if (flat / parameter) < min_rec_evenness:
|
| 176 |
+
self.rect_ = False
|
| 177 |
+
return False
|
| 178 |
+
self.rect_ = True
|
| 179 |
+
return True
|
| 180 |
+
|
| 181 |
+
def compo_is_line(self, min_line_thickness):
|
| 182 |
+
"""
|
| 183 |
+
Check this object is line by checking its boundary
|
| 184 |
+
:param boundary: boundary: [border_top, border_bottom, border_left, border_right]
|
| 185 |
+
-> top, bottom: list of (column_index, min/max row border)
|
| 186 |
+
-> left, right: list of (row_index, min/max column border) detect range of each row
|
| 187 |
+
:param min_line_thickness:
|
| 188 |
+
:return: Boolean
|
| 189 |
+
"""
|
| 190 |
+
# horizontally
|
| 191 |
+
slim = 0
|
| 192 |
+
for i in range(self.width):
|
| 193 |
+
if abs(self.boundary[1][i][1] - self.boundary[0][i][1]) <= min_line_thickness:
|
| 194 |
+
slim += 1
|
| 195 |
+
if slim / len(self.boundary[0]) > 0.93:
|
| 196 |
+
self.line_ = True
|
| 197 |
+
return True
|
| 198 |
+
# vertically
|
| 199 |
+
slim = 0
|
| 200 |
+
for i in range(self.height):
|
| 201 |
+
if abs(self.boundary[2][i][1] - self.boundary[3][i][1]) <= min_line_thickness:
|
| 202 |
+
slim += 1
|
| 203 |
+
if slim / len(self.boundary[2]) > 0.93:
|
| 204 |
+
self.line_ = True
|
| 205 |
+
return True
|
| 206 |
+
self.line_ = False
|
| 207 |
+
return False
|
| 208 |
+
|
| 209 |
+
def compo_relation(self, compo_b, bias=(0, 0)):
|
| 210 |
+
"""
|
| 211 |
+
:return: -1 : a in b
|
| 212 |
+
0 : a, b are not intersected
|
| 213 |
+
1 : b in a
|
| 214 |
+
2 : a, b are identical or intersected
|
| 215 |
+
"""
|
| 216 |
+
return self.bbox.bbox_relation_nms(compo_b.bbox, bias)
|
| 217 |
+
|
| 218 |
+
def compo_relative_position(self, col_min_base, row_min_base):
|
| 219 |
+
'''
|
| 220 |
+
Convert to relative position based on base coordinator
|
| 221 |
+
'''
|
| 222 |
+
self.bbox.bbox_cvt_relative_position(col_min_base, row_min_base)
|
| 223 |
+
|
| 224 |
+
def compo_merge(self, compo_b):
|
| 225 |
+
self.bbox = self.bbox.bbox_merge(compo_b.bbox)
|
| 226 |
+
self.compo_update(self.id, self.image_shape)
|
| 227 |
+
|
| 228 |
+
def compo_clipping(self, img, pad=0, show=False):
|
| 229 |
+
(column_min, row_min, column_max, row_max) = self.put_bbox()
|
| 230 |
+
column_min = max(column_min - pad, 0)
|
| 231 |
+
column_max = min(column_max + pad, img.shape[1])
|
| 232 |
+
row_min = max(row_min - pad, 0)
|
| 233 |
+
row_max = min(row_max + pad, img.shape[0])
|
| 234 |
+
clip = img[row_min:row_max, column_min:column_max]
|
| 235 |
+
if show:
|
| 236 |
+
cv2.imshow('clipping', clip)
|
| 237 |
+
cv2.waitKey()
|
| 238 |
+
return clip
|
CDM/detect_compo/lib_ip/file_utils.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import json
|
| 4 |
+
from os.path import join as pjoin
|
| 5 |
+
import time
|
| 6 |
+
import cv2
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def save_corners(file_path, corners, compo_name, clear=True):
|
| 10 |
+
try:
|
| 11 |
+
df = pd.read_csv(file_path, index_col=0)
|
| 12 |
+
except:
|
| 13 |
+
df = pd.DataFrame(columns=['component', 'x_max', 'x_min', 'y_max', 'y_min', 'height', 'width'])
|
| 14 |
+
|
| 15 |
+
if clear:
|
| 16 |
+
df = df.drop(df.index)
|
| 17 |
+
for corner in corners:
|
| 18 |
+
(up_left, bottom_right) = corner
|
| 19 |
+
c = {'component': compo_name}
|
| 20 |
+
(c['y_min'], c['x_min']) = up_left
|
| 21 |
+
(c['y_max'], c['x_max']) = bottom_right
|
| 22 |
+
c['width'] = c['y_max'] - c['y_min']
|
| 23 |
+
c['height'] = c['x_max'] - c['x_min']
|
| 24 |
+
df = df.append(c, True)
|
| 25 |
+
df.to_csv(file_path)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def save_corners_json(file_path, compos):
|
| 29 |
+
# img_shape = [int(x * ratio) for x in compos[0].image_shape]
|
| 30 |
+
# w_h_ratio = org.shape[1] / org.shape[0]
|
| 31 |
+
# img_shape = org.shape
|
| 32 |
+
|
| 33 |
+
img_shape = compos[0].image_shape
|
| 34 |
+
output = {'img_shape': img_shape, 'compos': []}
|
| 35 |
+
f_out = open(file_path, 'w')
|
| 36 |
+
|
| 37 |
+
for compo in compos:
|
| 38 |
+
bbox = compo.put_bbox()
|
| 39 |
+
# bbox = [int(x * ratio) for x in bbox]
|
| 40 |
+
c = {'id': compo.id, 'class': compo.category}
|
| 41 |
+
(c['column_min'], c['row_min'], c['column_max'], c['row_max']) = bbox
|
| 42 |
+
c['width'] = compo.width
|
| 43 |
+
c['height'] = compo.height
|
| 44 |
+
# c['width'] = int(compo.width * ratio)
|
| 45 |
+
# c['height'] = int(compo.height * ratio)
|
| 46 |
+
output['compos'].append(c)
|
| 47 |
+
|
| 48 |
+
json.dump(output, f_out, indent=4)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def save_clipping(org, output_root, corners, compo_classes, compo_index):
|
| 52 |
+
if not os.path.exists(output_root):
|
| 53 |
+
os.mkdir(output_root)
|
| 54 |
+
pad = 2
|
| 55 |
+
for i in range(len(corners)):
|
| 56 |
+
compo = compo_classes[i]
|
| 57 |
+
(up_left, bottom_right) = corners[i]
|
| 58 |
+
(col_min, row_min) = up_left
|
| 59 |
+
(col_max, row_max) = bottom_right
|
| 60 |
+
col_min = max(col_min - pad, 0)
|
| 61 |
+
col_max = min(col_max + pad, org.shape[1])
|
| 62 |
+
row_min = max(row_min - pad, 0)
|
| 63 |
+
row_max = min(row_max + pad, org.shape[0])
|
| 64 |
+
|
| 65 |
+
# if component type already exists, index increase by 1, otherwise add this type
|
| 66 |
+
compo_path = pjoin(output_root, compo)
|
| 67 |
+
if compo_classes[i] not in compo_index:
|
| 68 |
+
compo_index[compo_classes[i]] = 0
|
| 69 |
+
if not os.path.exists(compo_path):
|
| 70 |
+
os.mkdir(compo_path)
|
| 71 |
+
else:
|
| 72 |
+
compo_index[compo_classes[i]] += 1
|
| 73 |
+
clip = org[row_min:row_max, col_min:col_max]
|
| 74 |
+
cv2.imwrite(pjoin(compo_path, str(compo_index[compo_classes[i]]) + '.png'), clip)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def build_directory(directory):
|
| 78 |
+
if not os.path.exists(directory):
|
| 79 |
+
os.mkdir(directory)
|
| 80 |
+
return directory
|
CDM/detect_compo/lib_ip/ip_detection.py
ADDED
|
@@ -0,0 +1,574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
import CDM.detect_compo.lib_ip.ip_draw as draw
|
| 5 |
+
import CDM.detect_compo.lib_ip.ip_preprocessing as pre
|
| 6 |
+
from CDM.detect_compo.lib_ip.Component import Component
|
| 7 |
+
import CDM.detect_compo.lib_ip.Component as Compo
|
| 8 |
+
from CDM.config.CONFIG_UIED import Config
|
| 9 |
+
C = Config()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def merge_intersected_corner(compos, org, is_merge_contained_ele, max_gap=(0, 0), max_ele_height=25):
|
| 13 |
+
'''
|
| 14 |
+
:param is_merge_contained_ele: if true, merge compos nested in others
|
| 15 |
+
:param max_gap: (horizontal_distance, vertical_distance) to be merge into one line/column
|
| 16 |
+
:param max_ele_height: if higher than it, recognize the compo as text
|
| 17 |
+
:return:
|
| 18 |
+
'''
|
| 19 |
+
changed = False
|
| 20 |
+
new_compos = []
|
| 21 |
+
Compo.compos_update(compos, org.shape)
|
| 22 |
+
for i in range(len(compos)):
|
| 23 |
+
merged = False
|
| 24 |
+
cur_compo = compos[i]
|
| 25 |
+
for j in range(len(new_compos)):
|
| 26 |
+
relation = cur_compo.compo_relation(new_compos[j], max_gap)
|
| 27 |
+
# print(relation)
|
| 28 |
+
# draw.draw_bounding_box(org, [cur_compo, new_compos[j]], name='b-merge', show=True)
|
| 29 |
+
# merge compo[i] to compo[j] if
|
| 30 |
+
# 1. compo[j] contains compo[i]
|
| 31 |
+
# 2. compo[j] intersects with compo[i] with certain iou
|
| 32 |
+
# 3. is_merge_contained_ele and compo[j] is contained in compo[i]
|
| 33 |
+
if relation == 1 or \
|
| 34 |
+
relation == 2 or \
|
| 35 |
+
(is_merge_contained_ele and relation == -1):
|
| 36 |
+
# (relation == 2 and new_compos[j].height < max_ele_height and cur_compo.height < max_ele_height) or\
|
| 37 |
+
|
| 38 |
+
new_compos[j].compo_merge(cur_compo)
|
| 39 |
+
cur_compo = new_compos[j]
|
| 40 |
+
# draw.draw_bounding_box(org, [new_compos[j]], name='a-merge', show=True)
|
| 41 |
+
merged = True
|
| 42 |
+
changed = True
|
| 43 |
+
# break
|
| 44 |
+
if not merged:
|
| 45 |
+
new_compos.append(compos[i])
|
| 46 |
+
|
| 47 |
+
if not changed:
|
| 48 |
+
return compos
|
| 49 |
+
else:
|
| 50 |
+
return merge_intersected_corner(new_compos, org, is_merge_contained_ele, max_gap, max_ele_height)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def merge_intersected_compos(compos):
|
| 54 |
+
changed = True
|
| 55 |
+
while changed:
|
| 56 |
+
changed = False
|
| 57 |
+
temp_set = []
|
| 58 |
+
for compo_a in compos:
|
| 59 |
+
merged = False
|
| 60 |
+
for compo_b in temp_set:
|
| 61 |
+
if compo_a.compo_relation(compo_b) == 2:
|
| 62 |
+
compo_b.compo_merge(compo_a)
|
| 63 |
+
merged = True
|
| 64 |
+
changed = True
|
| 65 |
+
break
|
| 66 |
+
if not merged:
|
| 67 |
+
temp_set.append(compo_a)
|
| 68 |
+
compos = temp_set.copy()
|
| 69 |
+
return compos
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def rm_contained_compos_not_in_block(compos):
|
| 73 |
+
'''
|
| 74 |
+
remove all components contained by others that are not Block
|
| 75 |
+
'''
|
| 76 |
+
marked = np.full(len(compos), False)
|
| 77 |
+
for i in range(len(compos) - 1):
|
| 78 |
+
for j in range(i + 1, len(compos)):
|
| 79 |
+
relation = compos[i].compo_relation(compos[j])
|
| 80 |
+
if relation == -1 and compos[j].category != 'Block':
|
| 81 |
+
marked[i] = True
|
| 82 |
+
if relation == 1 and compos[i].category != 'Block':
|
| 83 |
+
marked[j] = True
|
| 84 |
+
new_compos = []
|
| 85 |
+
for i in range(len(marked)):
|
| 86 |
+
if not marked[i]:
|
| 87 |
+
new_compos.append(compos[i])
|
| 88 |
+
return new_compos
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def merge_text(compos, org_shape, max_word_gad=4, max_word_height=20):
|
| 92 |
+
def is_text_line(compo_a, compo_b):
|
| 93 |
+
(col_min_a, row_min_a, col_max_a, row_max_a) = compo_a.put_bbox()
|
| 94 |
+
(col_min_b, row_min_b, col_max_b, row_max_b) = compo_b.put_bbox()
|
| 95 |
+
|
| 96 |
+
col_min_s = max(col_min_a, col_min_b)
|
| 97 |
+
col_max_s = min(col_max_a, col_max_b)
|
| 98 |
+
row_min_s = max(row_min_a, row_min_b)
|
| 99 |
+
row_max_s = min(row_max_a, row_max_b)
|
| 100 |
+
|
| 101 |
+
# on the same line
|
| 102 |
+
# if abs(row_min_a - row_min_b) < max_word_gad and abs(row_max_a - row_max_b) < max_word_gad:
|
| 103 |
+
if row_min_s < row_max_s:
|
| 104 |
+
# close distance
|
| 105 |
+
if col_min_s < col_max_s or \
|
| 106 |
+
(0 < col_min_b - col_max_a < max_word_gad) or (0 < col_min_a - col_max_b < max_word_gad):
|
| 107 |
+
return True
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
changed = False
|
| 111 |
+
new_compos = []
|
| 112 |
+
row, col = org_shape[:2]
|
| 113 |
+
for i in range(len(compos)):
|
| 114 |
+
merged = False
|
| 115 |
+
height = compos[i].height
|
| 116 |
+
# ignore non-text
|
| 117 |
+
# if height / row > max_word_height_ratio\
|
| 118 |
+
# or compos[i].category != 'Text':
|
| 119 |
+
if height > max_word_height:
|
| 120 |
+
new_compos.append(compos[i])
|
| 121 |
+
continue
|
| 122 |
+
for j in range(len(new_compos)):
|
| 123 |
+
# if compos[j].category != 'Text':
|
| 124 |
+
# continue
|
| 125 |
+
if is_text_line(compos[i], new_compos[j]):
|
| 126 |
+
new_compos[j].compo_merge(compos[i])
|
| 127 |
+
merged = True
|
| 128 |
+
changed = True
|
| 129 |
+
break
|
| 130 |
+
if not merged:
|
| 131 |
+
new_compos.append(compos[i])
|
| 132 |
+
|
| 133 |
+
if not changed:
|
| 134 |
+
return compos
|
| 135 |
+
else:
|
| 136 |
+
return merge_text(new_compos, org_shape)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def rm_top_or_bottom_corners(components, org_shape, top_bottom_height=C.THRESHOLD_TOP_BOTTOM_BAR):
|
| 140 |
+
new_compos = []
|
| 141 |
+
height, width = org_shape[:2]
|
| 142 |
+
for compo in components:
|
| 143 |
+
(column_min, row_min, column_max, row_max) = compo.put_bbox()
|
| 144 |
+
# remove big ones
|
| 145 |
+
# if (row_max - row_min) / height > 0.65 and (column_max - column_min) / width > 0.8:
|
| 146 |
+
# continue
|
| 147 |
+
if not (row_max < height * top_bottom_height[0] or row_min > height * top_bottom_height[1]):
|
| 148 |
+
new_compos.append(compo)
|
| 149 |
+
return new_compos
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def rm_line_v_h(binary, show=False, max_line_thickness=C.THRESHOLD_LINE_THICKNESS):
|
| 153 |
+
def check_continuous_line(line, edge):
|
| 154 |
+
continuous_length = 0
|
| 155 |
+
line_start = -1
|
| 156 |
+
for j, p in enumerate(line):
|
| 157 |
+
if p > 0:
|
| 158 |
+
if line_start == -1:
|
| 159 |
+
line_start = j
|
| 160 |
+
continuous_length += 1
|
| 161 |
+
elif continuous_length > 0:
|
| 162 |
+
if continuous_length / edge > 0.6:
|
| 163 |
+
return [line_start, j]
|
| 164 |
+
continuous_length = 0
|
| 165 |
+
line_start = -1
|
| 166 |
+
|
| 167 |
+
if continuous_length / edge > 0.6:
|
| 168 |
+
return [line_start, len(line)]
|
| 169 |
+
else:
|
| 170 |
+
return None
|
| 171 |
+
|
| 172 |
+
def extract_line_area(line, start_idx, flag='v'):
|
| 173 |
+
for e, l in enumerate(line):
|
| 174 |
+
if flag == 'v':
|
| 175 |
+
map_line[start_idx + e, l[0]:l[1]] = binary[start_idx + e, l[0]:l[1]]
|
| 176 |
+
|
| 177 |
+
map_line = np.zeros(binary.shape[:2], dtype=np.uint8)
|
| 178 |
+
cv2.imshow('binary', binary)
|
| 179 |
+
|
| 180 |
+
width = binary.shape[1]
|
| 181 |
+
start_row = -1
|
| 182 |
+
line_area = []
|
| 183 |
+
for i, row in enumerate(binary):
|
| 184 |
+
line_v = check_continuous_line(row, width)
|
| 185 |
+
if line_v is not None:
|
| 186 |
+
# new line
|
| 187 |
+
if start_row == -1:
|
| 188 |
+
start_row = i
|
| 189 |
+
line_area = []
|
| 190 |
+
line_area.append(line_v)
|
| 191 |
+
else:
|
| 192 |
+
# checking line
|
| 193 |
+
if start_row != -1:
|
| 194 |
+
if i - start_row < max_line_thickness:
|
| 195 |
+
# binary[start_row: i] = 0
|
| 196 |
+
# map_line[start_row: i] = binary[start_row: i]
|
| 197 |
+
print(line_area, start_row, i)
|
| 198 |
+
extract_line_area(line_area, start_row)
|
| 199 |
+
start_row = -1
|
| 200 |
+
|
| 201 |
+
height = binary.shape[0]
|
| 202 |
+
start_col = -1
|
| 203 |
+
for i in range(width):
|
| 204 |
+
col = binary[:, i]
|
| 205 |
+
line_h = check_continuous_line(col, height)
|
| 206 |
+
if line_h is not None:
|
| 207 |
+
# new line
|
| 208 |
+
if start_col == -1:
|
| 209 |
+
start_col = i
|
| 210 |
+
else:
|
| 211 |
+
# checking line
|
| 212 |
+
if start_col != -1:
|
| 213 |
+
if i - start_col < max_line_thickness:
|
| 214 |
+
# binary[:, start_col: i] = 0
|
| 215 |
+
map_line[:, start_col: i] = binary[:, start_col: i]
|
| 216 |
+
start_col = -1
|
| 217 |
+
|
| 218 |
+
binary -= map_line
|
| 219 |
+
|
| 220 |
+
if show:
|
| 221 |
+
cv2.imshow('no-line', binary)
|
| 222 |
+
cv2.imshow('lines', map_line)
|
| 223 |
+
cv2.waitKey()
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def rm_line(binary,
|
| 227 |
+
max_line_thickness=C.THRESHOLD_LINE_THICKNESS,
|
| 228 |
+
min_line_length_ratio=C.THRESHOLD_LINE_MIN_LENGTH,
|
| 229 |
+
show=False, wait_key=0):
|
| 230 |
+
def is_valid_line(line):
|
| 231 |
+
line_length = 0
|
| 232 |
+
line_gap = 0
|
| 233 |
+
for j in line:
|
| 234 |
+
if j > 0:
|
| 235 |
+
if line_gap > 5:
|
| 236 |
+
return False
|
| 237 |
+
line_length += 1
|
| 238 |
+
line_gap = 0
|
| 239 |
+
elif line_length > 0:
|
| 240 |
+
line_gap += 1
|
| 241 |
+
if line_length / width > 0.95:
|
| 242 |
+
return True
|
| 243 |
+
return False
|
| 244 |
+
|
| 245 |
+
height, width = binary.shape[:2]
|
| 246 |
+
board = np.zeros(binary.shape[:2], dtype=np.uint8)
|
| 247 |
+
|
| 248 |
+
start_row, end_row = -1, -1
|
| 249 |
+
check_line = False
|
| 250 |
+
check_gap = False
|
| 251 |
+
for i, row in enumerate(binary):
|
| 252 |
+
# line_ratio = (sum(row) / 255) / width
|
| 253 |
+
# if line_ratio > 0.9:
|
| 254 |
+
if is_valid_line(row):
|
| 255 |
+
# new start: if it is checking a new line, mark this row as start
|
| 256 |
+
if not check_line:
|
| 257 |
+
start_row = i
|
| 258 |
+
check_line = True
|
| 259 |
+
else:
|
| 260 |
+
# end the line
|
| 261 |
+
if check_line:
|
| 262 |
+
# thin enough to be a line, then start checking gap
|
| 263 |
+
if i - start_row < max_line_thickness:
|
| 264 |
+
end_row = i
|
| 265 |
+
check_gap = True
|
| 266 |
+
else:
|
| 267 |
+
start_row, end_row = -1, -1
|
| 268 |
+
check_line = False
|
| 269 |
+
# check gap
|
| 270 |
+
if check_gap and i - end_row > max_line_thickness:
|
| 271 |
+
binary[start_row: end_row] = 0
|
| 272 |
+
start_row, end_row = -1, -1
|
| 273 |
+
check_line = False
|
| 274 |
+
check_gap = False
|
| 275 |
+
|
| 276 |
+
if (check_line and (height - start_row) < max_line_thickness) or check_gap:
|
| 277 |
+
binary[start_row: end_row] = 0
|
| 278 |
+
|
| 279 |
+
if show:
|
| 280 |
+
cv2.imshow('no-line binary', binary)
|
| 281 |
+
if wait_key is not None:
|
| 282 |
+
cv2.waitKey(wait_key)
|
| 283 |
+
if wait_key == 0:
|
| 284 |
+
cv2.destroyWindow('no-line binary')
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def rm_noise_compos(compos):
|
| 288 |
+
compos_new = []
|
| 289 |
+
for compo in compos:
|
| 290 |
+
if compo.category == 'Noise':
|
| 291 |
+
continue
|
| 292 |
+
compos_new.append(compo)
|
| 293 |
+
return compos_new
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def rm_noise_in_large_img(compos, org,
|
| 297 |
+
max_compo_scale=C.THRESHOLD_COMPO_MAX_SCALE):
|
| 298 |
+
row, column = org.shape[:2]
|
| 299 |
+
remain = np.full(len(compos), True)
|
| 300 |
+
new_compos = []
|
| 301 |
+
for compo in compos:
|
| 302 |
+
if compo.category == 'Image':
|
| 303 |
+
for i in compo.contain:
|
| 304 |
+
remain[i] = False
|
| 305 |
+
for i in range(len(remain)):
|
| 306 |
+
if remain[i]:
|
| 307 |
+
new_compos.append(compos[i])
|
| 308 |
+
return new_compos
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def detect_compos_in_img(compos, binary, org, max_compo_scale=C.THRESHOLD_COMPO_MAX_SCALE, show=False):
|
| 312 |
+
compos_new = []
|
| 313 |
+
row, column = binary.shape[:2]
|
| 314 |
+
for compo in compos:
|
| 315 |
+
if compo.category == 'Image':
|
| 316 |
+
compo.compo_update_bbox_area()
|
| 317 |
+
# org_clip = compo.compo_clipping(org)
|
| 318 |
+
# bin_clip = pre.binarization(org_clip, show=show)
|
| 319 |
+
bin_clip = compo.compo_clipping(binary)
|
| 320 |
+
bin_clip = pre.reverse_binary(bin_clip, show=show)
|
| 321 |
+
|
| 322 |
+
compos_rec, compos_nonrec = component_detection(bin_clip, test=False, step_h=10, step_v=10, rec_detect=True)
|
| 323 |
+
for compo_rec in compos_rec:
|
| 324 |
+
compo_rec.compo_relative_position(compo.bbox.col_min, compo.bbox.row_min)
|
| 325 |
+
if compo_rec.bbox_area / compo.bbox_area < 0.8 and compo_rec.bbox.height > 20 and compo_rec.bbox.width > 20:
|
| 326 |
+
compos_new.append(compo_rec)
|
| 327 |
+
# draw.draw_bounding_box(org, [compo_rec], show=True)
|
| 328 |
+
|
| 329 |
+
# compos_inner = component_detection(bin_clip, rec_detect=False)
|
| 330 |
+
# for compo_inner in compos_inner:
|
| 331 |
+
# compo_inner.compo_relative_position(compo.bbox.col_min, compo.bbox.row_min)
|
| 332 |
+
# draw.draw_bounding_box(org, [compo_inner], show=True)
|
| 333 |
+
# if compo_inner.bbox_area / compo.bbox_area < 0.8:
|
| 334 |
+
# compos_new.append(compo_inner)
|
| 335 |
+
compos += compos_new
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
def compo_filter(compos, min_area, img_shape):
|
| 339 |
+
# max_height = img_shape[0] * 0.8
|
| 340 |
+
# compos_new = []
|
| 341 |
+
# for compo in compos:
|
| 342 |
+
# if compo.area < min_area:
|
| 343 |
+
# continue
|
| 344 |
+
# if compo.height > max_height:
|
| 345 |
+
# continue
|
| 346 |
+
# ratio_h = compo.width / compo.height
|
| 347 |
+
# ratio_w = compo.height / compo.width
|
| 348 |
+
# if ratio_h > 50 or ratio_w > 40 or \
|
| 349 |
+
# (min(compo.height, compo.width) < 8 and max(ratio_h, ratio_w) > 10):
|
| 350 |
+
# continue
|
| 351 |
+
# compos_new.append(compo)
|
| 352 |
+
# return compos_new
|
| 353 |
+
|
| 354 |
+
# mobile semantics filter
|
| 355 |
+
# compos_new = []
|
| 356 |
+
#
|
| 357 |
+
# for compo in compos:
|
| 358 |
+
#
|
| 359 |
+
# if compo.area >= 0.05 * (img_shape[0] * img_shape[1]):
|
| 360 |
+
# continue
|
| 361 |
+
#
|
| 362 |
+
# smaller_dimension = min(compo.width, compo.height)
|
| 363 |
+
# larger_dimension = max(compo.width, compo.height)
|
| 364 |
+
#
|
| 365 |
+
# if smaller_dimension/larger_dimension <= 0.75:
|
| 366 |
+
# continue
|
| 367 |
+
#
|
| 368 |
+
# compos_new.append(compo)
|
| 369 |
+
#
|
| 370 |
+
# return compos_new
|
| 371 |
+
|
| 372 |
+
# my own filter
|
| 373 |
+
compos_new = []
|
| 374 |
+
|
| 375 |
+
for compo in compos:
|
| 376 |
+
|
| 377 |
+
if compo.area >= 0.1 * (img_shape[0] * img_shape[1]):
|
| 378 |
+
continue
|
| 379 |
+
|
| 380 |
+
if compo.area <= 0.0005 * (img_shape[0] * img_shape[1]):
|
| 381 |
+
continue
|
| 382 |
+
|
| 383 |
+
smaller_dimension = min(compo.width, compo.height)
|
| 384 |
+
larger_dimension = max(compo.width, compo.height)
|
| 385 |
+
|
| 386 |
+
if smaller_dimension / larger_dimension <= 0.6:
|
| 387 |
+
continue
|
| 388 |
+
|
| 389 |
+
compos_new.append(compo)
|
| 390 |
+
|
| 391 |
+
return compos_new
|
| 392 |
+
|
| 393 |
+
|
| 394 |
+
def is_block(clip, thread=0.15):
|
| 395 |
+
'''
|
| 396 |
+
Block is a rectangle border enclosing a group of compos (consider it as a wireframe)
|
| 397 |
+
Check if a compo is block by checking if the inner side of its border is blank
|
| 398 |
+
'''
|
| 399 |
+
side = 4 # scan 4 lines inner forward each border
|
| 400 |
+
# top border - scan top down
|
| 401 |
+
blank_count = 0
|
| 402 |
+
for i in range(1, 5):
|
| 403 |
+
if sum(clip[side + i]) / 255 > thread * clip.shape[1]:
|
| 404 |
+
blank_count += 1
|
| 405 |
+
if blank_count > 2: return False
|
| 406 |
+
# left border - scan left to right
|
| 407 |
+
blank_count = 0
|
| 408 |
+
for i in range(1, 5):
|
| 409 |
+
if sum(clip[:, side + i]) / 255 > thread * clip.shape[0]:
|
| 410 |
+
blank_count += 1
|
| 411 |
+
if blank_count > 2: return False
|
| 412 |
+
|
| 413 |
+
side = -4
|
| 414 |
+
# bottom border - scan bottom up
|
| 415 |
+
blank_count = 0
|
| 416 |
+
for i in range(-1, -5, -1):
|
| 417 |
+
if sum(clip[side + i]) / 255 > thread * clip.shape[1]:
|
| 418 |
+
blank_count += 1
|
| 419 |
+
if blank_count > 2: return False
|
| 420 |
+
# right border - scan right to left
|
| 421 |
+
blank_count = 0
|
| 422 |
+
for i in range(-1, -5, -1):
|
| 423 |
+
if sum(clip[:, side + i]) / 255 > thread * clip.shape[0]:
|
| 424 |
+
blank_count += 1
|
| 425 |
+
if blank_count > 2: return False
|
| 426 |
+
return True
|
| 427 |
+
|
| 428 |
+
|
| 429 |
+
def compo_block_recognition(binary, compos, block_side_length=0.15):
|
| 430 |
+
height, width = binary.shape
|
| 431 |
+
for compo in compos:
|
| 432 |
+
if compo.height / height > block_side_length and compo.width / width > block_side_length:
|
| 433 |
+
clip = compo.compo_clipping(binary)
|
| 434 |
+
if is_block(clip):
|
| 435 |
+
compo.category = 'Block'
|
| 436 |
+
|
| 437 |
+
|
| 438 |
+
# take the binary image as input
|
| 439 |
+
# calculate the connected regions -> get the bounding boundaries of them -> check if those regions are rectangles
|
| 440 |
+
# return all boundaries and boundaries of rectangles
|
| 441 |
+
def component_detection(binary, min_obj_area,
|
| 442 |
+
line_thickness=C.THRESHOLD_LINE_THICKNESS,
|
| 443 |
+
min_rec_evenness=C.THRESHOLD_REC_MIN_EVENNESS,
|
| 444 |
+
max_dent_ratio=C.THRESHOLD_REC_MAX_DENT_RATIO,
|
| 445 |
+
step_h = 5, step_v = 2,
|
| 446 |
+
rec_detect=False, show=False, test=False):
|
| 447 |
+
"""
|
| 448 |
+
:param binary: Binary image from pre-processing
|
| 449 |
+
:param min_obj_area: If not pass then ignore the small object
|
| 450 |
+
:param min_obj_perimeter: If not pass then ignore the small object
|
| 451 |
+
:param line_thickness: If not pass then ignore the slim object
|
| 452 |
+
:param min_rec_evenness: If not pass then this object cannot be rectangular
|
| 453 |
+
:param max_dent_ratio: If not pass then this object cannot be rectangular
|
| 454 |
+
:return: boundary: [top, bottom, left, right]
|
| 455 |
+
-> up, bottom: list of (column_index, min/max row border)
|
| 456 |
+
-> left, right: list of (row_index, min/max column border) detect range of each row
|
| 457 |
+
"""
|
| 458 |
+
mask = np.zeros((binary.shape[0] + 2, binary.shape[1] + 2), dtype=np.uint8)
|
| 459 |
+
compos_all = []
|
| 460 |
+
compos_rec = []
|
| 461 |
+
compos_nonrec = []
|
| 462 |
+
row, column = binary.shape[0], binary.shape[1]
|
| 463 |
+
for i in range(0, row, step_h):
|
| 464 |
+
for j in range(i % 2, column, step_v):
|
| 465 |
+
if binary[i, j] == 255 and mask[i, j] == 0:
|
| 466 |
+
# get connected area
|
| 467 |
+
# region = util.boundary_bfs_connected_area(binary, i, j, mask)
|
| 468 |
+
|
| 469 |
+
mask_copy = mask.copy()
|
| 470 |
+
ff = cv2.floodFill(binary, mask, (j, i), None, 0, 0, cv2.FLOODFILL_MASK_ONLY)
|
| 471 |
+
if ff[0] < min_obj_area: continue
|
| 472 |
+
mask_copy = mask - mask_copy
|
| 473 |
+
region = np.reshape(cv2.findNonZero(mask_copy[1:-1, 1:-1]), (-1, 2))
|
| 474 |
+
region = [(p[1], p[0]) for p in region]
|
| 475 |
+
|
| 476 |
+
# filter out some compos
|
| 477 |
+
component = Component(region, binary.shape)
|
| 478 |
+
# calculate the boundary of the connected area
|
| 479 |
+
# ignore small area
|
| 480 |
+
if component.width <= 3 or component.height <= 3:
|
| 481 |
+
continue
|
| 482 |
+
# check if it is line by checking the length of edges
|
| 483 |
+
# if component.compo_is_line(line_thickness):
|
| 484 |
+
# continue
|
| 485 |
+
|
| 486 |
+
if test:
|
| 487 |
+
print('Area:%d' % (len(region)))
|
| 488 |
+
draw.draw_boundary([component], binary.shape, show=True)
|
| 489 |
+
|
| 490 |
+
compos_all.append(component)
|
| 491 |
+
|
| 492 |
+
if rec_detect:
|
| 493 |
+
# rectangle check
|
| 494 |
+
if component.compo_is_rectangle(min_rec_evenness, max_dent_ratio):
|
| 495 |
+
component.rect_ = True
|
| 496 |
+
compos_rec.append(component)
|
| 497 |
+
else:
|
| 498 |
+
component.rect_ = False
|
| 499 |
+
compos_nonrec.append(component)
|
| 500 |
+
|
| 501 |
+
if show:
|
| 502 |
+
print('Area:%d' % (len(region)))
|
| 503 |
+
draw.draw_boundary(compos_all, binary.shape, show=True)
|
| 504 |
+
|
| 505 |
+
# draw.draw_boundary(compos_all, binary.shape, show=True)
|
| 506 |
+
if rec_detect:
|
| 507 |
+
return compos_rec, compos_nonrec
|
| 508 |
+
else:
|
| 509 |
+
return compos_all
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def nested_components_detection(grey, org, grad_thresh,
|
| 513 |
+
show=False, write_path=None,
|
| 514 |
+
step_h=10, step_v=10,
|
| 515 |
+
line_thickness=C.THRESHOLD_LINE_THICKNESS,
|
| 516 |
+
min_rec_evenness=C.THRESHOLD_REC_MIN_EVENNESS,
|
| 517 |
+
max_dent_ratio=C.THRESHOLD_REC_MAX_DENT_RATIO):
|
| 518 |
+
'''
|
| 519 |
+
:param grey: grey-scale of original image
|
| 520 |
+
:return: corners: list of [(top_left, bottom_right)]
|
| 521 |
+
-> top_left: (column_min, row_min)
|
| 522 |
+
-> bottom_right: (column_max, row_max)
|
| 523 |
+
'''
|
| 524 |
+
compos = []
|
| 525 |
+
mask = np.zeros((grey.shape[0]+2, grey.shape[1]+2), dtype=np.uint8)
|
| 526 |
+
broad = np.zeros((grey.shape[0], grey.shape[1], 3), dtype=np.uint8)
|
| 527 |
+
broad_all = broad.copy()
|
| 528 |
+
|
| 529 |
+
row, column = grey.shape[0], grey.shape[1]
|
| 530 |
+
for x in range(0, row, step_h):
|
| 531 |
+
for y in range(0, column, step_v):
|
| 532 |
+
if mask[x, y] == 0:
|
| 533 |
+
# region = flood_fill_bfs(grey, x, y, mask)
|
| 534 |
+
|
| 535 |
+
# flood fill algorithm to get background (layout block)
|
| 536 |
+
mask_copy = mask.copy()
|
| 537 |
+
ff = cv2.floodFill(grey, mask, (y, x), None, grad_thresh, grad_thresh, cv2.FLOODFILL_MASK_ONLY)
|
| 538 |
+
# ignore small regions
|
| 539 |
+
if ff[0] < 500: continue
|
| 540 |
+
mask_copy = mask - mask_copy
|
| 541 |
+
region = np.reshape(cv2.findNonZero(mask_copy[1:-1, 1:-1]), (-1, 2))
|
| 542 |
+
region = [(p[1], p[0]) for p in region]
|
| 543 |
+
|
| 544 |
+
compo = Component(region, grey.shape)
|
| 545 |
+
# draw.draw_region(region, broad_all)
|
| 546 |
+
# if block.height < 40 and block.width < 40:
|
| 547 |
+
# continue
|
| 548 |
+
if compo.height < 30:
|
| 549 |
+
continue
|
| 550 |
+
|
| 551 |
+
# print(block.area / (row * column))
|
| 552 |
+
if compo.area / (row * column) > 0.9:
|
| 553 |
+
continue
|
| 554 |
+
elif compo.area / (row * column) > 0.7:
|
| 555 |
+
compo.redundant = True
|
| 556 |
+
|
| 557 |
+
# get the boundary of this region
|
| 558 |
+
# ignore lines
|
| 559 |
+
if compo.compo_is_line(line_thickness):
|
| 560 |
+
continue
|
| 561 |
+
# ignore non-rectangle as blocks must be rectangular
|
| 562 |
+
if not compo.compo_is_rectangle(min_rec_evenness, max_dent_ratio):
|
| 563 |
+
continue
|
| 564 |
+
# if block.height/row < min_block_height_ratio:
|
| 565 |
+
# continue
|
| 566 |
+
compos.append(compo)
|
| 567 |
+
# draw.draw_region(region, broad)
|
| 568 |
+
if show:
|
| 569 |
+
cv2.imshow('flood-fill all', broad_all)
|
| 570 |
+
cv2.imshow('block', broad)
|
| 571 |
+
cv2.waitKey()
|
| 572 |
+
if write_path is not None:
|
| 573 |
+
cv2.imwrite(write_path, broad)
|
| 574 |
+
return compos
|
CDM/detect_compo/lib_ip/ip_draw.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from random import randint as rint
|
| 4 |
+
from CDM.config.CONFIG_UIED import Config
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
C = Config()
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def draw_bounding_box_class(org, components, color_map=C.COLOR, line=2, show=False, write_path=None, name='board'):
|
| 11 |
+
"""
|
| 12 |
+
Draw bounding box of components with their classes on the original image
|
| 13 |
+
:param org: original image
|
| 14 |
+
:param components: bbox [(column_min, row_min, column_max, row_max)]
|
| 15 |
+
-> top_left: (column_min, row_min)
|
| 16 |
+
-> bottom_right: (column_max, row_max)
|
| 17 |
+
:param color_map: colors mapping to different components
|
| 18 |
+
:param line: line thickness
|
| 19 |
+
:param compo_class: classes matching the corners of components
|
| 20 |
+
:param show: show or not
|
| 21 |
+
:return: labeled image
|
| 22 |
+
"""
|
| 23 |
+
board = org.copy()
|
| 24 |
+
for compo in components:
|
| 25 |
+
bbox = compo.put_bbox()
|
| 26 |
+
board = cv2.rectangle(board, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color_map[compo.category], line)
|
| 27 |
+
# board = cv2.putText(board, compo.category, (bbox[0]+5, bbox[1]+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color_map[compo.category], 2)
|
| 28 |
+
if show:
|
| 29 |
+
cv2.imshow(name, board)
|
| 30 |
+
cv2.waitKey(0)
|
| 31 |
+
if write_path is not None:
|
| 32 |
+
cv2.imwrite(write_path, board)
|
| 33 |
+
return board
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def draw_bounding_box(org, ratio, components, color=(0, 255, 0), line=2,
|
| 37 |
+
show=False, write_path=None, name='board', is_return=False, wait_key=0):
|
| 38 |
+
"""
|
| 39 |
+
Draw bounding box of components on the original image
|
| 40 |
+
:param org: original image
|
| 41 |
+
:param components: bbox [(column_min, row_min, column_max, row_max)]
|
| 42 |
+
-> top_left: (column_min, row_min)
|
| 43 |
+
-> bottom_right: (column_max, row_max)
|
| 44 |
+
:param color: line color
|
| 45 |
+
:param line: line thickness
|
| 46 |
+
:param show: show or not
|
| 47 |
+
:return: labeled image
|
| 48 |
+
"""
|
| 49 |
+
if not show and write_path is None and not is_return: return
|
| 50 |
+
board = org.copy()
|
| 51 |
+
# board = cv2.imread(img_path)
|
| 52 |
+
# ratio = board.shape[0]/org.shape[0]
|
| 53 |
+
|
| 54 |
+
for compo in components:
|
| 55 |
+
bbox = compo.put_bbox()
|
| 56 |
+
|
| 57 |
+
# bounding box on full size image
|
| 58 |
+
# bbox = int(ratio * bbox)
|
| 59 |
+
bbox = [int(x * ratio) for x in bbox]
|
| 60 |
+
board = cv2.rectangle(board, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, line)
|
| 61 |
+
if show:
|
| 62 |
+
cv2.imshow(name, board)
|
| 63 |
+
if wait_key is not None:
|
| 64 |
+
cv2.waitKey(wait_key)
|
| 65 |
+
if wait_key == 0:
|
| 66 |
+
cv2.destroyWindow(name)
|
| 67 |
+
if write_path is not None:
|
| 68 |
+
# board = cv2.resize(board, (1080, 1920))
|
| 69 |
+
# board = board[100:-110]
|
| 70 |
+
cv2.imwrite(write_path, board)
|
| 71 |
+
return board
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def draw_line(org, lines, color=(0, 255, 0), show=False):
|
| 75 |
+
"""
|
| 76 |
+
Draw detected lines on the original image
|
| 77 |
+
:param org: original image
|
| 78 |
+
:param lines: [line_h, line_v]
|
| 79 |
+
-> line_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int)
|
| 80 |
+
-> line_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int}
|
| 81 |
+
:param color: drawn color
|
| 82 |
+
:param show: show or not
|
| 83 |
+
:return: image with lines drawn
|
| 84 |
+
"""
|
| 85 |
+
board = org.copy()
|
| 86 |
+
line_h, line_v = lines
|
| 87 |
+
for line in line_h:
|
| 88 |
+
cv2.line(board, tuple(line['head']), tuple(line['end']), color, line['thickness'])
|
| 89 |
+
for line in line_v:
|
| 90 |
+
cv2.line(board, tuple(line['head']), tuple(line['end']), color, line['thickness'])
|
| 91 |
+
if show:
|
| 92 |
+
cv2.imshow('img', board)
|
| 93 |
+
cv2.waitKey(0)
|
| 94 |
+
return board
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def draw_boundary(components, shape, show=False):
|
| 98 |
+
"""
|
| 99 |
+
Draw boundary of objects on the black withe
|
| 100 |
+
:param components: boundary: [top, bottom, left, right]
|
| 101 |
+
-> up, bottom: (column_index, min/max row border)
|
| 102 |
+
-> left, right: (row_index, min/max column border) detect range of each row
|
| 103 |
+
:param shape: shape or original image
|
| 104 |
+
:param show: show or not
|
| 105 |
+
:return: drawn board
|
| 106 |
+
"""
|
| 107 |
+
board = np.zeros(shape[:2], dtype=np.uint8) # binary board
|
| 108 |
+
for component in components:
|
| 109 |
+
# up and bottom: (column_index, min/max row border)
|
| 110 |
+
for point in component.boundary[0] + component.boundary[1]:
|
| 111 |
+
board[point[1], point[0]] = 255
|
| 112 |
+
# left, right: (row_index, min/max column border)
|
| 113 |
+
for point in component.boundary[2] + component.boundary[3]:
|
| 114 |
+
board[point[0], point[1]] = 255
|
| 115 |
+
if show:
|
| 116 |
+
cv2.imshow('rec', board)
|
| 117 |
+
cv2.waitKey(0)
|
| 118 |
+
return board
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def draw_region(region, broad, show=False):
|
| 122 |
+
color = (rint(0,255), rint(0,255), rint(0,255))
|
| 123 |
+
for point in region:
|
| 124 |
+
broad[point[0], point[1]] = color
|
| 125 |
+
|
| 126 |
+
if show:
|
| 127 |
+
cv2.imshow('region', broad)
|
| 128 |
+
cv2.waitKey()
|
| 129 |
+
return broad
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def draw_region_bin(region, broad, show=False):
|
| 133 |
+
for point in region:
|
| 134 |
+
broad[point[0], point[1]] = 255
|
| 135 |
+
|
| 136 |
+
if show:
|
| 137 |
+
cv2.imshow('region', broad)
|
| 138 |
+
cv2.waitKey()
|
| 139 |
+
return broad
|
CDM/detect_compo/lib_ip/ip_preprocessing.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from CDM.config.CONFIG_UIED import Config
|
| 4 |
+
C = Config()
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def read_img(path, resize_height=None, kernel_size=None):
|
| 8 |
+
|
| 9 |
+
def resize_by_height(org):
|
| 10 |
+
w_h_ratio = org.shape[1] / org.shape[0]
|
| 11 |
+
resize_w = resize_height * w_h_ratio
|
| 12 |
+
re = cv2.resize(org, (int(resize_w), int(resize_height)))
|
| 13 |
+
return re
|
| 14 |
+
|
| 15 |
+
try:
|
| 16 |
+
img = cv2.imread(path)
|
| 17 |
+
if kernel_size is not None:
|
| 18 |
+
img = cv2.medianBlur(img, kernel_size)
|
| 19 |
+
if img is None:
|
| 20 |
+
print("*** Image does not exist ***")
|
| 21 |
+
return None, None
|
| 22 |
+
if resize_height is not None:
|
| 23 |
+
img = resize_by_height(img)
|
| 24 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 25 |
+
return img, gray
|
| 26 |
+
|
| 27 |
+
except Exception as e:
|
| 28 |
+
print(e)
|
| 29 |
+
print("*** Img Reading Failed ***\n")
|
| 30 |
+
return None, None
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def gray_to_gradient(img):
|
| 34 |
+
if len(img.shape) == 3:
|
| 35 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 36 |
+
img_f = np.copy(img)
|
| 37 |
+
img_f = img_f.astype("float")
|
| 38 |
+
|
| 39 |
+
kernel_h = np.array([[0,0,0], [0,-1.,1.], [0,0,0]])
|
| 40 |
+
kernel_v = np.array([[0,0,0], [0,-1.,0], [0,1.,0]])
|
| 41 |
+
dst1 = abs(cv2.filter2D(img_f, -1, kernel_h))
|
| 42 |
+
dst2 = abs(cv2.filter2D(img_f, -1, kernel_v))
|
| 43 |
+
gradient = (dst1 + dst2).astype('uint8')
|
| 44 |
+
return gradient
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def reverse_binary(bin, show=False):
|
| 48 |
+
"""
|
| 49 |
+
Reverse the input binary image
|
| 50 |
+
"""
|
| 51 |
+
r, bin = cv2.threshold(bin, 1, 255, cv2.THRESH_BINARY_INV)
|
| 52 |
+
if show:
|
| 53 |
+
cv2.imshow('binary_rev', bin)
|
| 54 |
+
cv2.waitKey()
|
| 55 |
+
return bin
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def binarization(org, grad_min, show=False, write_path=None, wait_key=0):
|
| 59 |
+
grey = cv2.cvtColor(org, cv2.COLOR_BGR2GRAY)
|
| 60 |
+
grad = gray_to_gradient(grey) # get RoI with high gradient
|
| 61 |
+
rec, binary = cv2.threshold(grad, grad_min, 255, cv2.THRESH_BINARY) # enhance the RoI
|
| 62 |
+
morph = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, (3, 3)) # remove noises
|
| 63 |
+
if write_path is not None:
|
| 64 |
+
cv2.imwrite(write_path, morph)
|
| 65 |
+
if show:
|
| 66 |
+
cv2.imshow('binary', morph)
|
| 67 |
+
if wait_key is not None:
|
| 68 |
+
cv2.waitKey(wait_key)
|
| 69 |
+
return morph
|
CDM/detect_compo/model/model-99-resnet18.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b31df5d3ed9c743990fb7a27baf71626cf7766df36d1f414496c89d34a854f2
|
| 3 |
+
size 44957605
|
CDM/detect_merge/Element.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Element:
|
| 6 |
+
def __init__(self, id, corner, category, text_content=None):
|
| 7 |
+
self.id = id
|
| 8 |
+
self.category = category
|
| 9 |
+
self.col_min, self.row_min, self.col_max, self.row_max = corner
|
| 10 |
+
self.width = self.col_max - self.col_min
|
| 11 |
+
self.height = self.row_max - self.row_min
|
| 12 |
+
self.area = self.width * self.height
|
| 13 |
+
|
| 14 |
+
self.text_content = text_content
|
| 15 |
+
self.parent_id = None
|
| 16 |
+
self.children = [] # list of elements
|
| 17 |
+
self.label = None
|
| 18 |
+
|
| 19 |
+
def init_bound(self):
|
| 20 |
+
self.width = self.col_max - self.col_min
|
| 21 |
+
self.height = self.row_max - self.row_min
|
| 22 |
+
self.area = self.width * self.height
|
| 23 |
+
|
| 24 |
+
def put_bbox(self):
|
| 25 |
+
return self.col_min, self.row_min, self.col_max, self.row_max
|
| 26 |
+
|
| 27 |
+
def wrap_info(self):
|
| 28 |
+
info = {'id':self.id, 'class': self.category, 'height': self.height, 'width': self.width,
|
| 29 |
+
'position': {'column_min': self.col_min, 'row_min': self.row_min, 'column_max': self.col_max,
|
| 30 |
+
'row_max': self.row_max}, 'label': self.label}
|
| 31 |
+
if self.text_content is not None:
|
| 32 |
+
info['text_content'] = self.text_content
|
| 33 |
+
if len(self.children) > 0:
|
| 34 |
+
info['children'] = []
|
| 35 |
+
for child in self.children:
|
| 36 |
+
info['children'].append(child.id)
|
| 37 |
+
if self.parent_id is not None:
|
| 38 |
+
info['parent'] = self.parent_id
|
| 39 |
+
return info
|
| 40 |
+
|
| 41 |
+
def resize(self, resize_ratio):
|
| 42 |
+
self.col_min = int(self.col_min * resize_ratio)
|
| 43 |
+
self.row_min = int(self.row_min * resize_ratio)
|
| 44 |
+
self.col_max = int(self.col_max * resize_ratio)
|
| 45 |
+
self.row_max = int(self.row_max * resize_ratio)
|
| 46 |
+
self.init_bound()
|
| 47 |
+
|
| 48 |
+
def element_merge(self, element_b, new_element=False, new_category=None, new_id=None):
|
| 49 |
+
col_min_a, row_min_a, col_max_a, row_max_a = self.put_bbox()
|
| 50 |
+
col_min_b, row_min_b, col_max_b, row_max_b = element_b.put_bbox()
|
| 51 |
+
new_corner = (min(col_min_a, col_min_b), min(row_min_a, row_min_b), max(col_max_a, col_max_b), max(row_max_a, row_max_b))
|
| 52 |
+
if element_b.text_content is not None:
|
| 53 |
+
self.text_content = element_b.text_content if self.text_content is None else self.text_content + '\n' + element_b.text_content
|
| 54 |
+
if new_element:
|
| 55 |
+
return Element(new_id, new_corner, new_category)
|
| 56 |
+
else:
|
| 57 |
+
self.col_min, self.row_min, self.col_max, self.row_max = new_corner
|
| 58 |
+
self.init_bound()
|
| 59 |
+
|
| 60 |
+
def calc_intersection_area(self, element_b, bias=(0, 0)):
|
| 61 |
+
a = self.put_bbox()
|
| 62 |
+
b = element_b.put_bbox()
|
| 63 |
+
col_min_s = max(a[0], b[0]) - bias[0]
|
| 64 |
+
row_min_s = max(a[1], b[1]) - bias[1]
|
| 65 |
+
col_max_s = min(a[2], b[2])
|
| 66 |
+
row_max_s = min(a[3], b[3])
|
| 67 |
+
w = np.maximum(0, col_max_s - col_min_s)
|
| 68 |
+
h = np.maximum(0, row_max_s - row_min_s)
|
| 69 |
+
inter = w * h
|
| 70 |
+
|
| 71 |
+
iou = inter / (self.area + element_b.area - inter)
|
| 72 |
+
ioa = inter / self.area
|
| 73 |
+
iob = inter / element_b.area
|
| 74 |
+
|
| 75 |
+
return inter, iou, ioa, iob
|
| 76 |
+
|
| 77 |
+
def element_relation(self, element_b, bias=(0, 0)):
|
| 78 |
+
"""
|
| 79 |
+
@bias: (horizontal bias, vertical bias)
|
| 80 |
+
:return: -1 : a in b
|
| 81 |
+
0 : a, b are not intersected
|
| 82 |
+
1 : b in a
|
| 83 |
+
2 : a, b are identical or intersected
|
| 84 |
+
"""
|
| 85 |
+
inter, iou, ioa, iob = self.calc_intersection_area(element_b, bias)
|
| 86 |
+
|
| 87 |
+
# area of intersection is 0
|
| 88 |
+
if ioa == 0:
|
| 89 |
+
return 0
|
| 90 |
+
# a in b
|
| 91 |
+
if ioa >= 1:
|
| 92 |
+
return -1
|
| 93 |
+
# b in a
|
| 94 |
+
if iob >= 1:
|
| 95 |
+
return 1
|
| 96 |
+
return 2
|
| 97 |
+
|
| 98 |
+
def visualize_element(self, img, color=(0, 255, 0), line=1, show=False, ratio=1):
|
| 99 |
+
loc = self.put_bbox()
|
| 100 |
+
|
| 101 |
+
if ratio != 1:
|
| 102 |
+
loc = [int(x * ratio) for x in loc]
|
| 103 |
+
|
| 104 |
+
# cv2.rectangle(img, loc[:2], loc[2:], color, line)
|
| 105 |
+
cv2.rectangle(img, (loc[0], loc[1]), (loc[2], loc[3]), color, line)
|
| 106 |
+
cv2.putText(img, str(int(self.id) + 1), (int(ratio*(self.col_min - 10)), int(ratio*(self.row_max + 10))), cv2.FONT_HERSHEY_SIMPLEX, 1,
|
| 107 |
+
color, line)
|
| 108 |
+
# for child in self.children:
|
| 109 |
+
# child.visualize_element(img, color=(255, 0, 255), line=line)
|
| 110 |
+
if show:
|
| 111 |
+
cv2.imshow('element', img)
|
| 112 |
+
cv2.waitKey(0)
|
| 113 |
+
cv2.destroyWindow('element')
|
CDM/detect_merge/merge.py
ADDED
|
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from os.path import join as pjoin
|
| 5 |
+
import os
|
| 6 |
+
import time
|
| 7 |
+
import shutil
|
| 8 |
+
|
| 9 |
+
from CDM.detect_merge.Element import Element
|
| 10 |
+
from torchvision import models
|
| 11 |
+
from torch import nn
|
| 12 |
+
import torch
|
| 13 |
+
|
| 14 |
+
import CDM.detect_compo.lib_ip.ip_preprocessing as pre
|
| 15 |
+
|
| 16 |
+
# ----------------- load pre-trained classification model ----------------
|
| 17 |
+
|
| 18 |
+
# model = models.resnet18().to('cpu')
|
| 19 |
+
# in_feature_num = model.fc.in_features
|
| 20 |
+
# model.fc = nn.Linear(in_feature_num, 99)
|
| 21 |
+
# model.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(5, 5), padding=(3, 3), stride=(2, 2),
|
| 22 |
+
# bias=False)
|
| 23 |
+
#
|
| 24 |
+
# PATH = "./model/model-99-resnet18.pkl"
|
| 25 |
+
# model.load_state_dict(torch.load(PATH, map_location=torch.device('cpu')))
|
| 26 |
+
#
|
| 27 |
+
# model.eval()
|
| 28 |
+
|
| 29 |
+
# ----------------- end loading ------------------------------------------
|
| 30 |
+
|
| 31 |
+
# information_type = {'Name':['name', 'first name', 'last name', 'full name', 'real name', 'surname', 'family name', 'given name'],
|
| 32 |
+
# 'Birthday':['birthday', 'date of birth', 'birth date', 'DOB', 'dob full birthday'],
|
| 33 |
+
# 'Address':['address', 'mailing address', 'physical address', 'postal address', 'billing address', 'shipping address'],
|
| 34 |
+
# 'Phone':['phone', 'phone number', 'mobile', 'mobile phone', 'mobile number', 'telephone', 'telephone number', 'call'],
|
| 35 |
+
# 'Email':['email', 'e-mail', 'email address', 'e-mail address'],
|
| 36 |
+
# 'Contacts':['contacts', 'phone-book', 'phone book'],
|
| 37 |
+
# 'Location':['location', 'locate', 'place', 'geography', 'geo', 'geo-location', 'precision location'],
|
| 38 |
+
# 'Camera':['camera', 'photo', 'scan', 'album', 'picture', 'gallery', 'photo library', 'storage', 'image', 'video'],
|
| 39 |
+
# 'Microphone':['microphone', 'voice, mic', 'speech', 'talk'],
|
| 40 |
+
# 'Financial':['credit card', 'pay', 'payment', 'debit card', 'mastercard', 'wallet'],
|
| 41 |
+
# 'IP':['IP', 'Internet Protocol', 'IP address', 'internet protocol address'],
|
| 42 |
+
# 'Cookies':['cookies', 'cookie'],
|
| 43 |
+
# 'Social':['facebook', 'twitter']}
|
| 44 |
+
|
| 45 |
+
def show_elements(org_img, eles, ratio, show=False, win_name='element', wait_key=0, shown_resize=None, line=2):
|
| 46 |
+
color_map = {'Text':(0, 0, 255), 'Compo':(0, 255, 0), 'Block':(0, 255, 0), 'Text Content':(255, 0, 255)}
|
| 47 |
+
img = org_img.copy()
|
| 48 |
+
for ele in eles:
|
| 49 |
+
color = color_map[ele.category]
|
| 50 |
+
ele.visualize_element(img=img, color=color, line=line, ratio=ratio)
|
| 51 |
+
img_resize = img
|
| 52 |
+
if shown_resize is not None:
|
| 53 |
+
img_resize = cv2.resize(img, shown_resize)
|
| 54 |
+
if show:
|
| 55 |
+
cv2.imshow(win_name, img_resize)
|
| 56 |
+
cv2.waitKey(wait_key)
|
| 57 |
+
if wait_key == 0:
|
| 58 |
+
cv2.destroyWindow(win_name)
|
| 59 |
+
return img_resize
|
| 60 |
+
|
| 61 |
+
def show_one_element(org_img, eles, ratio, show=False, win_name='element', wait_key=0, shown_resize=None, line=2):
|
| 62 |
+
color_map = {'Text': (0, 0, 255), 'Compo': (0, 255, 0), 'Block': (0, 255, 0), 'Text Content': (255, 0, 255)}
|
| 63 |
+
all_img = []
|
| 64 |
+
for ele in eles:
|
| 65 |
+
img = org_img.copy()
|
| 66 |
+
color = color_map[ele.category]
|
| 67 |
+
ele.visualize_element(img=img, color=color, line=line, ratio=ratio)
|
| 68 |
+
img_resize = img
|
| 69 |
+
all_img.append(img_resize)
|
| 70 |
+
if shown_resize is not None:
|
| 71 |
+
img_resize = cv2.resize(img, shown_resize)
|
| 72 |
+
if show:
|
| 73 |
+
cv2.imshow(win_name, img_resize)
|
| 74 |
+
cv2.waitKey(wait_key)
|
| 75 |
+
if wait_key == 0:
|
| 76 |
+
cv2.destroyWindow(win_name)
|
| 77 |
+
return all_img
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def save_elements(output_file, elements, img_shape, ratio=1):
|
| 81 |
+
components = {'compos': [], 'img_shape': img_shape}
|
| 82 |
+
for i, ele in enumerate(elements):
|
| 83 |
+
|
| 84 |
+
if ratio != 1:
|
| 85 |
+
ele.resize(ratio)
|
| 86 |
+
ele.width = ele.col_max - ele.col_min
|
| 87 |
+
ele.height = ele.row_max - ele.row_min
|
| 88 |
+
|
| 89 |
+
c = ele.wrap_info()
|
| 90 |
+
# c['id'] = i
|
| 91 |
+
components['compos'].append(c)
|
| 92 |
+
json.dump(components, open(output_file, 'w'), indent=4)
|
| 93 |
+
return components
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def reassign_ids(elements):
|
| 97 |
+
for i, element in enumerate(elements):
|
| 98 |
+
element.id = i
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def refine_texts(texts, img_shape):
|
| 102 |
+
refined_texts = []
|
| 103 |
+
# for text in texts:
|
| 104 |
+
# # remove potential noise
|
| 105 |
+
# if len(text.text_content) > 1 and text.height / img_shape[0] < 0.075:
|
| 106 |
+
# refined_texts.append(text)
|
| 107 |
+
|
| 108 |
+
for text in texts:
|
| 109 |
+
# remove potential noise
|
| 110 |
+
if text.height / img_shape[0] < 0.075:
|
| 111 |
+
refined_texts.append(text)
|
| 112 |
+
|
| 113 |
+
return refined_texts
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def merge_text_line_to_paragraph(elements, max_line_gap=5):
|
| 117 |
+
texts = []
|
| 118 |
+
non_texts = []
|
| 119 |
+
for ele in elements:
|
| 120 |
+
if ele.category == 'Text':
|
| 121 |
+
texts.append(ele)
|
| 122 |
+
else:
|
| 123 |
+
non_texts.append(ele)
|
| 124 |
+
|
| 125 |
+
changed = True
|
| 126 |
+
while changed:
|
| 127 |
+
changed = False
|
| 128 |
+
temp_set = []
|
| 129 |
+
for text_a in texts:
|
| 130 |
+
merged = False
|
| 131 |
+
for text_b in temp_set:
|
| 132 |
+
inter_area, _, _, _ = text_a.calc_intersection_area(text_b, bias=(0, max_line_gap))
|
| 133 |
+
if inter_area > 0:
|
| 134 |
+
text_b.element_merge(text_a)
|
| 135 |
+
merged = True
|
| 136 |
+
changed = True
|
| 137 |
+
break
|
| 138 |
+
if not merged:
|
| 139 |
+
temp_set.append(text_a)
|
| 140 |
+
texts = temp_set.copy()
|
| 141 |
+
return non_texts + texts
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def refine_elements(compos, texts, input_img_path, intersection_bias=(2, 2), containment_ratio=0.8, ):
|
| 145 |
+
'''
|
| 146 |
+
1. remove compos contained in text
|
| 147 |
+
2. remove compos containing text area that's too large
|
| 148 |
+
3. store text in a compo if it's contained by the compo as the compo's text child element
|
| 149 |
+
'''
|
| 150 |
+
|
| 151 |
+
# resize_by_height = 800
|
| 152 |
+
# org, grey = pre.read_img(input_img_path, resize_by_height)
|
| 153 |
+
#
|
| 154 |
+
# grey = grey.astype('float32')
|
| 155 |
+
# grey = grey / 255
|
| 156 |
+
#
|
| 157 |
+
# grey = (grey - grey.mean()) / grey.std()
|
| 158 |
+
|
| 159 |
+
elements = []
|
| 160 |
+
contained_texts = []
|
| 161 |
+
|
| 162 |
+
# classification_start_time = time.time()
|
| 163 |
+
|
| 164 |
+
for compo in compos:
|
| 165 |
+
is_valid = True
|
| 166 |
+
text_area = 0
|
| 167 |
+
for text in texts:
|
| 168 |
+
inter, iou, ioa, iob = compo.calc_intersection_area(text, bias=intersection_bias)
|
| 169 |
+
if inter > 0:
|
| 170 |
+
# the non-text is contained in the text compo
|
| 171 |
+
if ioa >= containment_ratio:
|
| 172 |
+
is_valid = False
|
| 173 |
+
break
|
| 174 |
+
text_area += inter
|
| 175 |
+
# the text is contained in the non-text compo
|
| 176 |
+
if iob >= containment_ratio and compo.category != 'Block':
|
| 177 |
+
contained_texts.append(text)
|
| 178 |
+
# print("id: ", compo.id)
|
| 179 |
+
# print("text.text_content: ", text.text_content)
|
| 180 |
+
# print("is_valid: ", is_valid)
|
| 181 |
+
# print("inter: ", inter)
|
| 182 |
+
# print("iou: ", iou)
|
| 183 |
+
# print("ioa: ", ioa)
|
| 184 |
+
# print("iob: ", iob)
|
| 185 |
+
# print("text_area: ", text_area)
|
| 186 |
+
# print("compo.area: ", compo.area)
|
| 187 |
+
if is_valid and text_area / compo.area < containment_ratio:
|
| 188 |
+
# for t in contained_texts:
|
| 189 |
+
# t.parent_id = compo.id
|
| 190 |
+
# compo.children += contained_texts
|
| 191 |
+
|
| 192 |
+
# --------- classification ----------
|
| 193 |
+
|
| 194 |
+
# comp_grey = grey[compo.row_min:compo.row_max, compo.col_min:compo.col_max]
|
| 195 |
+
#
|
| 196 |
+
# comp_crop = cv2.resize(comp_grey, (32, 32))
|
| 197 |
+
#
|
| 198 |
+
# comp_crop = comp_crop.reshape(1, 1, 32, 32)
|
| 199 |
+
#
|
| 200 |
+
# comp_tensor = torch.tensor(comp_crop)
|
| 201 |
+
# comp_tensor = comp_tensor.permute(0, 1, 3, 2)
|
| 202 |
+
#
|
| 203 |
+
# pred_label = model(comp_tensor)
|
| 204 |
+
#
|
| 205 |
+
# if np.argmax(pred_label.cpu().data.numpy(), axis=1) in [72.0, 42.0, 77.0, 91.0, 6.0, 89.0, 40.0, 43.0, 82.0,
|
| 206 |
+
# 3.0, 68.0, 49.0, 56.0, 89.0]:
|
| 207 |
+
# elements.append(compo)
|
| 208 |
+
|
| 209 |
+
# --------- end classification ----------
|
| 210 |
+
|
| 211 |
+
elements.append(compo)
|
| 212 |
+
# time_cost_ic = time.time() - classification_start_time
|
| 213 |
+
# print("time cost for icon classification: %2.2f s" % time_cost_ic)
|
| 214 |
+
|
| 215 |
+
# text_selection_time = time.time()
|
| 216 |
+
|
| 217 |
+
# elements += texts
|
| 218 |
+
for text in texts:
|
| 219 |
+
if text not in contained_texts:
|
| 220 |
+
elements.append(text)
|
| 221 |
+
|
| 222 |
+
# ---------- Simulate keyword search -----------
|
| 223 |
+
|
| 224 |
+
# for key in keyword_list:
|
| 225 |
+
# for w in keyword_list[key]:
|
| 226 |
+
# if w in text.text_content.lower():
|
| 227 |
+
# elements.append(text)
|
| 228 |
+
|
| 229 |
+
# ---------- end -------------------------------
|
| 230 |
+
|
| 231 |
+
# time_cost_ts = time.time() - text_selection_time
|
| 232 |
+
# print("time cost for text selection: %2.2f s" % time_cost_ts)
|
| 233 |
+
|
| 234 |
+
# return elements, time_cost_ic, time_cost_ts
|
| 235 |
+
return elements
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def check_containment(elements):
|
| 239 |
+
for i in range(len(elements) - 1):
|
| 240 |
+
for j in range(i + 1, len(elements)):
|
| 241 |
+
relation = elements[i].element_relation(elements[j], bias=(2, 2))
|
| 242 |
+
if relation == -1:
|
| 243 |
+
elements[j].children.append(elements[i])
|
| 244 |
+
elements[i].parent_id = elements[j].id
|
| 245 |
+
if relation == 1:
|
| 246 |
+
elements[i].children.append(elements[j])
|
| 247 |
+
elements[j].parent_id = elements[i].id
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def remove_top_bar(elements, img_height):
|
| 251 |
+
new_elements = []
|
| 252 |
+
max_height = img_height * 0.04
|
| 253 |
+
for ele in elements:
|
| 254 |
+
if ele.row_min < 10 and ele.height < max_height:
|
| 255 |
+
continue
|
| 256 |
+
new_elements.append(ele)
|
| 257 |
+
return new_elements
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def remove_bottom_bar(elements, img_height):
|
| 261 |
+
new_elements = []
|
| 262 |
+
for ele in elements:
|
| 263 |
+
# parameters for 800-height GUI
|
| 264 |
+
if ele.row_min > 750 and 20 <= ele.height <= 30 and 20 <= ele.width <= 30:
|
| 265 |
+
continue
|
| 266 |
+
new_elements.append(ele)
|
| 267 |
+
return new_elements
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def compos_clip_and_fill(clip_root, org, compos):
|
| 271 |
+
def most_pix_around(pad=6, offset=2):
|
| 272 |
+
'''
|
| 273 |
+
determine the filled background color according to the most surrounding pixel
|
| 274 |
+
'''
|
| 275 |
+
up = row_min - pad if row_min - pad >= 0 else 0
|
| 276 |
+
left = col_min - pad if col_min - pad >= 0 else 0
|
| 277 |
+
bottom = row_max + pad if row_max + pad < org.shape[0] - 1 else org.shape[0] - 1
|
| 278 |
+
right = col_max + pad if col_max + pad < org.shape[1] - 1 else org.shape[1] - 1
|
| 279 |
+
most = []
|
| 280 |
+
for i in range(3):
|
| 281 |
+
val = np.concatenate((org[up:row_min - offset, left:right, i].flatten(),
|
| 282 |
+
org[row_max + offset:bottom, left:right, i].flatten(),
|
| 283 |
+
org[up:bottom, left:col_min - offset, i].flatten(),
|
| 284 |
+
org[up:bottom, col_max + offset:right, i].flatten()))
|
| 285 |
+
most.append(int(np.argmax(np.bincount(val))))
|
| 286 |
+
return most
|
| 287 |
+
|
| 288 |
+
if os.path.exists(clip_root):
|
| 289 |
+
shutil.rmtree(clip_root)
|
| 290 |
+
os.mkdir(clip_root)
|
| 291 |
+
|
| 292 |
+
bkg = org.copy()
|
| 293 |
+
cls_dirs = []
|
| 294 |
+
for compo in compos:
|
| 295 |
+
cls = compo['class']
|
| 296 |
+
if cls == 'Background':
|
| 297 |
+
compo['path'] = pjoin(clip_root, 'bkg.png')
|
| 298 |
+
continue
|
| 299 |
+
c_root = pjoin(clip_root, cls)
|
| 300 |
+
c_path = pjoin(c_root, str(compo['id']) + '.jpg')
|
| 301 |
+
compo['path'] = c_path
|
| 302 |
+
if cls not in cls_dirs:
|
| 303 |
+
os.mkdir(c_root)
|
| 304 |
+
cls_dirs.append(cls)
|
| 305 |
+
|
| 306 |
+
position = compo['position']
|
| 307 |
+
col_min, row_min, col_max, row_max = position['column_min'], position['row_min'], position['column_max'], position['row_max']
|
| 308 |
+
cv2.imwrite(c_path, org[row_min:row_max, col_min:col_max])
|
| 309 |
+
# Fill up the background area
|
| 310 |
+
cv2.rectangle(bkg, (col_min, row_min), (col_max, row_max), most_pix_around(), -1)
|
| 311 |
+
cv2.imwrite(pjoin(clip_root, 'bkg.png'), bkg)
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def merge(img_path, compo_path, text_path, merge_root=None, is_paragraph=False, is_remove_top_bar=False, is_remove_bottom_bar=False, show=False, wait_key=0):
|
| 315 |
+
compo_json = json.load(open(compo_path, 'r'))
|
| 316 |
+
text_json = json.load(open(text_path, 'r'))
|
| 317 |
+
|
| 318 |
+
# load text and non-text compo
|
| 319 |
+
ele_id = 0
|
| 320 |
+
compos = []
|
| 321 |
+
for compo in compo_json['compos']:
|
| 322 |
+
element = Element(ele_id, (compo['column_min'], compo['row_min'], compo['column_max'], compo['row_max']), compo['class'])
|
| 323 |
+
compos.append(element)
|
| 324 |
+
ele_id += 1
|
| 325 |
+
texts = []
|
| 326 |
+
for text in text_json['texts']:
|
| 327 |
+
element = Element(ele_id, (text['column_min'], text['row_min'], text['column_max'], text['row_max']), 'Text', text_content=text['content'])
|
| 328 |
+
texts.append(element)
|
| 329 |
+
ele_id += 1
|
| 330 |
+
if compo_json['img_shape'] != text_json['img_shape']:
|
| 331 |
+
resize_ratio = compo_json['img_shape'][0] / text_json['img_shape'][0]
|
| 332 |
+
for text in texts:
|
| 333 |
+
text.resize(resize_ratio)
|
| 334 |
+
|
| 335 |
+
# check the original detected elements
|
| 336 |
+
img = cv2.imread(img_path)
|
| 337 |
+
img_resize = cv2.resize(img, (compo_json['img_shape'][1], compo_json['img_shape'][0]))
|
| 338 |
+
ratio = img.shape[0] / img_resize.shape[0]
|
| 339 |
+
|
| 340 |
+
show_elements(img, texts + compos, ratio, show=show, win_name='all elements before merging', wait_key=wait_key, line=3)
|
| 341 |
+
|
| 342 |
+
# refine elements
|
| 343 |
+
texts = refine_texts(texts, compo_json['img_shape'])
|
| 344 |
+
elements = refine_elements(compos, texts, img_path)
|
| 345 |
+
if is_remove_top_bar:
|
| 346 |
+
elements = remove_top_bar(elements, img_height=compo_json['img_shape'][0])
|
| 347 |
+
if is_remove_bottom_bar:
|
| 348 |
+
elements = remove_bottom_bar(elements, img_height=compo_json['img_shape'][0])
|
| 349 |
+
if is_paragraph:
|
| 350 |
+
elements = merge_text_line_to_paragraph(elements, max_line_gap=7)
|
| 351 |
+
reassign_ids(elements)
|
| 352 |
+
check_containment(elements)
|
| 353 |
+
board = show_elements(img, elements, ratio, show=show, win_name='elements after merging', wait_key=wait_key, line=3)
|
| 354 |
+
|
| 355 |
+
# save all merged elements, clips and blank background
|
| 356 |
+
name = img_path.replace('\\', '/').split('/')[-1][:-4]
|
| 357 |
+
components = save_elements(pjoin(merge_root, name + '.json'), elements, img_resize.shape)
|
| 358 |
+
cv2.imwrite(pjoin(merge_root, name + '.jpg'), board)
|
| 359 |
+
print('[Merge Completed] Input: %s Output: %s' % (img_path, pjoin(merge_root, name + '.jpg')))
|
| 360 |
+
return board, components
|
| 361 |
+
# return this_ic_time, this_ts_time
|
CDM/detect_text/Text.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Text:
|
| 6 |
+
def __init__(self, id, content, location):
|
| 7 |
+
self.id = id
|
| 8 |
+
self.content = content
|
| 9 |
+
self.location = location
|
| 10 |
+
|
| 11 |
+
self.width = self.location['right'] - self.location['left']
|
| 12 |
+
self.height = self.location['bottom'] - self.location['top']
|
| 13 |
+
self.area = self.width * self.height
|
| 14 |
+
self.word_width = self.width / len(self.content)
|
| 15 |
+
|
| 16 |
+
'''
|
| 17 |
+
********************************
|
| 18 |
+
*** Relation with Other text ***
|
| 19 |
+
********************************
|
| 20 |
+
'''
|
| 21 |
+
def is_justified(self, ele_b, direction='h', max_bias_justify=4):
|
| 22 |
+
'''
|
| 23 |
+
Check if the element is justified
|
| 24 |
+
:param max_bias_justify: maximum bias if two elements to be justified
|
| 25 |
+
:param direction:
|
| 26 |
+
- 'v': vertical up-down connection
|
| 27 |
+
- 'h': horizontal left-right connection
|
| 28 |
+
'''
|
| 29 |
+
l_a = self.location
|
| 30 |
+
l_b = ele_b.location
|
| 31 |
+
# connected vertically - up and below
|
| 32 |
+
if direction == 'v':
|
| 33 |
+
# left and right should be justified
|
| 34 |
+
if abs(l_a['left'] - l_b['left']) < max_bias_justify and abs(l_a['right'] - l_b['right']) < max_bias_justify:
|
| 35 |
+
return True
|
| 36 |
+
return False
|
| 37 |
+
elif direction == 'h':
|
| 38 |
+
# top and bottom should be justified
|
| 39 |
+
if abs(l_a['top'] - l_b['top']) < max_bias_justify and abs(l_a['bottom'] - l_b['bottom']) < max_bias_justify:
|
| 40 |
+
return True
|
| 41 |
+
return False
|
| 42 |
+
|
| 43 |
+
def is_on_same_line(self, text_b, direction='h', bias_gap=4, bias_justify=4):
|
| 44 |
+
'''
|
| 45 |
+
Check if the element is on the same row(direction='h') or column(direction='v') with ele_b
|
| 46 |
+
:param direction:
|
| 47 |
+
- 'v': vertical up-down connection
|
| 48 |
+
- 'h': horizontal left-right connection
|
| 49 |
+
:return:
|
| 50 |
+
'''
|
| 51 |
+
l_a = self.location
|
| 52 |
+
l_b = text_b.location
|
| 53 |
+
# connected vertically - up and below
|
| 54 |
+
if direction == 'v':
|
| 55 |
+
# left and right should be justified
|
| 56 |
+
if self.is_justified(text_b, direction='v', max_bias_justify=bias_justify):
|
| 57 |
+
# top and bottom should be connected (small gap)
|
| 58 |
+
if abs(l_a['bottom'] - l_b['top']) < bias_gap or abs(l_a['top'] - l_b['bottom']) < bias_gap:
|
| 59 |
+
return True
|
| 60 |
+
return False
|
| 61 |
+
elif direction == 'h':
|
| 62 |
+
# top and bottom should be justified
|
| 63 |
+
if self.is_justified(text_b, direction='h', max_bias_justify=bias_justify):
|
| 64 |
+
# top and bottom should be connected (small gap)
|
| 65 |
+
if abs(l_a['right'] - l_b['left']) < bias_gap or abs(l_a['left'] - l_b['right']) < bias_gap:
|
| 66 |
+
return True
|
| 67 |
+
return False
|
| 68 |
+
|
| 69 |
+
def is_intersected(self, text_b, bias):
|
| 70 |
+
l_a = self.location
|
| 71 |
+
l_b = text_b.location
|
| 72 |
+
left_in = max(l_a['left'], l_b['left']) + bias
|
| 73 |
+
top_in = max(l_a['top'], l_b['top']) + bias
|
| 74 |
+
right_in = min(l_a['right'], l_b['right'])
|
| 75 |
+
bottom_in = min(l_a['bottom'], l_b['bottom'])
|
| 76 |
+
|
| 77 |
+
w_in = max(0, right_in - left_in)
|
| 78 |
+
h_in = max(0, bottom_in - top_in)
|
| 79 |
+
area_in = w_in * h_in
|
| 80 |
+
if area_in > 0:
|
| 81 |
+
return True
|
| 82 |
+
|
| 83 |
+
'''
|
| 84 |
+
***********************
|
| 85 |
+
*** Revise the Text ***
|
| 86 |
+
***********************
|
| 87 |
+
'''
|
| 88 |
+
def merge_text(self, text_b):
|
| 89 |
+
text_a = self
|
| 90 |
+
top = min(text_a.location['top'], text_b.location['top'])
|
| 91 |
+
left = min(text_a.location['left'], text_b.location['left'])
|
| 92 |
+
right = max(text_a.location['right'], text_b.location['right'])
|
| 93 |
+
bottom = max(text_a.location['bottom'], text_b.location['bottom'])
|
| 94 |
+
self.location = {'left': left, 'top': top, 'right': right, 'bottom': bottom}
|
| 95 |
+
self.width = self.location['right'] - self.location['left']
|
| 96 |
+
self.height = self.location['bottom'] - self.location['top']
|
| 97 |
+
self.area = self.width * self.height
|
| 98 |
+
|
| 99 |
+
left_element = text_a
|
| 100 |
+
right_element = text_b
|
| 101 |
+
if text_a.location['left'] > text_b.location['left']:
|
| 102 |
+
left_element = text_b
|
| 103 |
+
right_element = text_a
|
| 104 |
+
self.content = left_element.content + ' ' + right_element.content
|
| 105 |
+
self.word_width = self.width / len(self.content)
|
| 106 |
+
|
| 107 |
+
def shrink_bound(self, binary_map):
|
| 108 |
+
bin_clip = binary_map[self.location['top']:self.location['bottom'], self.location['left']:self.location['right']]
|
| 109 |
+
height, width = np.shape(bin_clip)
|
| 110 |
+
|
| 111 |
+
shrink_top = 0
|
| 112 |
+
shrink_bottom = 0
|
| 113 |
+
for i in range(height):
|
| 114 |
+
# top
|
| 115 |
+
if shrink_top == 0:
|
| 116 |
+
if sum(bin_clip[i]) == 0:
|
| 117 |
+
shrink_top = 1
|
| 118 |
+
else:
|
| 119 |
+
shrink_top = -1
|
| 120 |
+
elif shrink_top == 1:
|
| 121 |
+
if sum(bin_clip[i]) != 0:
|
| 122 |
+
self.location['top'] += i
|
| 123 |
+
shrink_top = -1
|
| 124 |
+
# bottom
|
| 125 |
+
if shrink_bottom == 0:
|
| 126 |
+
if sum(bin_clip[height-i-1]) == 0:
|
| 127 |
+
shrink_bottom = 1
|
| 128 |
+
else:
|
| 129 |
+
shrink_bottom = -1
|
| 130 |
+
elif shrink_bottom == 1:
|
| 131 |
+
if sum(bin_clip[height-i-1]) != 0:
|
| 132 |
+
self.location['bottom'] -= i
|
| 133 |
+
shrink_bottom = -1
|
| 134 |
+
|
| 135 |
+
if shrink_top == -1 and shrink_bottom == -1:
|
| 136 |
+
break
|
| 137 |
+
|
| 138 |
+
shrink_left = 0
|
| 139 |
+
shrink_right = 0
|
| 140 |
+
for j in range(width):
|
| 141 |
+
# left
|
| 142 |
+
if shrink_left == 0:
|
| 143 |
+
if sum(bin_clip[:, j]) == 0:
|
| 144 |
+
shrink_left = 1
|
| 145 |
+
else:
|
| 146 |
+
shrink_left = -1
|
| 147 |
+
elif shrink_left == 1:
|
| 148 |
+
if sum(bin_clip[:, j]) != 0:
|
| 149 |
+
self.location['left'] += j
|
| 150 |
+
shrink_left = -1
|
| 151 |
+
# right
|
| 152 |
+
if shrink_right == 0:
|
| 153 |
+
if sum(bin_clip[:, width-j-1]) == 0:
|
| 154 |
+
shrink_right = 1
|
| 155 |
+
else:
|
| 156 |
+
shrink_right = -1
|
| 157 |
+
elif shrink_right == 1:
|
| 158 |
+
if sum(bin_clip[:, width-j-1]) != 0:
|
| 159 |
+
self.location['right'] -= j
|
| 160 |
+
shrink_right = -1
|
| 161 |
+
|
| 162 |
+
if shrink_left == -1 and shrink_right == -1:
|
| 163 |
+
break
|
| 164 |
+
self.width = self.location['right'] - self.location['left']
|
| 165 |
+
self.height = self.location['bottom'] - self.location['top']
|
| 166 |
+
self.area = self.width * self.height
|
| 167 |
+
self.word_width = self.width / len(self.content)
|
| 168 |
+
|
| 169 |
+
'''
|
| 170 |
+
*********************
|
| 171 |
+
*** Visualization ***
|
| 172 |
+
*********************
|
| 173 |
+
'''
|
| 174 |
+
def visualize_element(self, img, color=(0, 0, 255), line=1, show=False):
|
| 175 |
+
loc = self.location
|
| 176 |
+
cv2.rectangle(img, (loc['left'], loc['top']), (loc['right'], loc['bottom']), color, line)
|
| 177 |
+
if show:
|
| 178 |
+
print(self.content)
|
| 179 |
+
cv2.imshow('text', img)
|
| 180 |
+
cv2.waitKey()
|
| 181 |
+
cv2.destroyWindow('text')
|
CDM/detect_text/ocr.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import os
|
| 3 |
+
import requests
|
| 4 |
+
import json
|
| 5 |
+
from base64 import b64encode
|
| 6 |
+
import time
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def Google_OCR_makeImageData(imgpath):
|
| 10 |
+
with open(imgpath, 'rb') as f:
|
| 11 |
+
ctxt = b64encode(f.read()).decode()
|
| 12 |
+
img_req = {
|
| 13 |
+
'image': {
|
| 14 |
+
'content': ctxt
|
| 15 |
+
},
|
| 16 |
+
'features': [{
|
| 17 |
+
'type': 'DOCUMENT_TEXT_DETECTION',
|
| 18 |
+
# 'type': 'TEXT_DETECTION',
|
| 19 |
+
'maxResults': 1
|
| 20 |
+
}]
|
| 21 |
+
}
|
| 22 |
+
return json.dumps({"requests": img_req}).encode()
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def ocr_detection_google(imgpath):
|
| 26 |
+
# start = time.clock()
|
| 27 |
+
url = 'https://vision.googleapis.com/v1/images:annotate'
|
| 28 |
+
|
| 29 |
+
api_key = os.environ.get('google_ocr')
|
| 30 |
+
|
| 31 |
+
imgdata = Google_OCR_makeImageData(imgpath)
|
| 32 |
+
response = requests.post(url,
|
| 33 |
+
data=imgdata,
|
| 34 |
+
params={'key': api_key},
|
| 35 |
+
headers={'Content_Type': 'application/json'})
|
| 36 |
+
# print('*** Text Detection Time Taken:%.3fs ***' % (time.clock() - start))
|
| 37 |
+
print("*** Please replace the Google OCR key at detect_text/ocr.py line 28 with your own (apply in https://cloud.google.com/vision) ***")
|
| 38 |
+
# print('response.json(): ', response.json())
|
| 39 |
+
if response.json()['responses'] == [{}]:
|
| 40 |
+
# No Text
|
| 41 |
+
return None
|
| 42 |
+
else:
|
| 43 |
+
return response.json()['responses'][0]['textAnnotations'][1:]
|
CDM/detect_text/text_detection.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import CDM.detect_text.ocr as ocr
|
| 2 |
+
from CDM.detect_text.Text import Text
|
| 3 |
+
import numpy as np
|
| 4 |
+
import cv2
|
| 5 |
+
import json
|
| 6 |
+
import time
|
| 7 |
+
import os
|
| 8 |
+
from os.path import join as pjoin
|
| 9 |
+
# from paddleocr import PaddleOCR
|
| 10 |
+
import pytesseract
|
| 11 |
+
|
| 12 |
+
# paddle_model = PaddleOCR(use_angle_cls=True, lang="en") #'ch' for chinese and english, 'en' for english
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def save_detection_json(file_path, texts, img_shape):
|
| 16 |
+
f_out = open(file_path, 'w')
|
| 17 |
+
output = {'img_shape': img_shape, 'texts': []}
|
| 18 |
+
for text in texts:
|
| 19 |
+
c = {'id': text.id, 'content': text.content}
|
| 20 |
+
loc = text.location
|
| 21 |
+
c['column_min'], c['row_min'], c['column_max'], c['row_max'] = loc['left'], loc['top'], loc['right'], loc['bottom']
|
| 22 |
+
c['width'] = text.width
|
| 23 |
+
c['height'] = text.height
|
| 24 |
+
output['texts'].append(c)
|
| 25 |
+
json.dump(output, f_out, indent=4)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def visualize_texts(org_img, texts, shown_resize_height=None, show=False, write_path=None):
|
| 29 |
+
img = org_img.copy()
|
| 30 |
+
for text in texts:
|
| 31 |
+
text.visualize_element(img, line=2)
|
| 32 |
+
|
| 33 |
+
img_resize = img
|
| 34 |
+
if shown_resize_height is not None:
|
| 35 |
+
img_resize = cv2.resize(img, (int(shown_resize_height * (img.shape[1]/img.shape[0])), shown_resize_height))
|
| 36 |
+
|
| 37 |
+
if show:
|
| 38 |
+
cv2.imshow('texts', img_resize)
|
| 39 |
+
cv2.waitKey(0)
|
| 40 |
+
cv2.destroyWindow('texts')
|
| 41 |
+
if write_path is not None:
|
| 42 |
+
cv2.imwrite(write_path, img)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def text_sentences_recognition(texts):
|
| 46 |
+
'''
|
| 47 |
+
Merge separate words detected by Google ocr into a sentence
|
| 48 |
+
'''
|
| 49 |
+
changed = True
|
| 50 |
+
while changed:
|
| 51 |
+
changed = False
|
| 52 |
+
temp_set = []
|
| 53 |
+
for text_a in texts:
|
| 54 |
+
merged = False
|
| 55 |
+
for text_b in temp_set:
|
| 56 |
+
if text_a.is_on_same_line(text_b, 'h', bias_justify=0.2 * min(text_a.height, text_b.height), bias_gap=2 * max(text_a.word_width, text_b.word_width)):
|
| 57 |
+
text_b.merge_text(text_a)
|
| 58 |
+
merged = True
|
| 59 |
+
changed = True
|
| 60 |
+
break
|
| 61 |
+
if not merged:
|
| 62 |
+
temp_set.append(text_a)
|
| 63 |
+
texts = temp_set.copy()
|
| 64 |
+
|
| 65 |
+
for i, text in enumerate(texts):
|
| 66 |
+
text.id = i
|
| 67 |
+
return texts
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def merge_intersected_texts(texts):
|
| 71 |
+
'''
|
| 72 |
+
Merge intersected texts (sentences or words)
|
| 73 |
+
'''
|
| 74 |
+
changed = True
|
| 75 |
+
while changed:
|
| 76 |
+
changed = False
|
| 77 |
+
temp_set = []
|
| 78 |
+
for text_a in texts:
|
| 79 |
+
merged = False
|
| 80 |
+
for text_b in temp_set:
|
| 81 |
+
if text_a.is_intersected(text_b, bias=2):
|
| 82 |
+
text_b.merge_text(text_a)
|
| 83 |
+
merged = True
|
| 84 |
+
changed = True
|
| 85 |
+
break
|
| 86 |
+
if not merged:
|
| 87 |
+
temp_set.append(text_a)
|
| 88 |
+
texts = temp_set.copy()
|
| 89 |
+
return texts
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def text_cvt_orc_format(ocr_result):
|
| 93 |
+
texts = []
|
| 94 |
+
if ocr_result is not None:
|
| 95 |
+
for i, result in enumerate(ocr_result):
|
| 96 |
+
error = False
|
| 97 |
+
x_coordinates = []
|
| 98 |
+
y_coordinates = []
|
| 99 |
+
text_location = result['boundingPoly']['vertices']
|
| 100 |
+
content = result['description']
|
| 101 |
+
for loc in text_location:
|
| 102 |
+
if 'x' not in loc or 'y' not in loc:
|
| 103 |
+
error = True
|
| 104 |
+
break
|
| 105 |
+
x_coordinates.append(loc['x'])
|
| 106 |
+
y_coordinates.append(loc['y'])
|
| 107 |
+
if error: continue
|
| 108 |
+
location = {'left': min(x_coordinates), 'top': min(y_coordinates),
|
| 109 |
+
'right': max(x_coordinates), 'bottom': max(y_coordinates)}
|
| 110 |
+
texts.append(Text(i, content, location))
|
| 111 |
+
return texts
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def text_cvt_orc_format_paddle(paddle_result):
|
| 115 |
+
texts = []
|
| 116 |
+
for i, line in enumerate(paddle_result):
|
| 117 |
+
points = np.array(line[0])
|
| 118 |
+
# points = points * 5
|
| 119 |
+
location = {'left': int(min(points[:, 0])), 'top': int(min(points[:, 1])), 'right': int(max(points[:, 0])),
|
| 120 |
+
'bottom': int(max(points[:, 1]))}
|
| 121 |
+
content = line[1][0]
|
| 122 |
+
texts.append(Text(i, content, location))
|
| 123 |
+
return texts
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def text_cvt_orc_format_tesseract(tesseract_result):
|
| 127 |
+
# texts = []
|
| 128 |
+
# i_real = 0
|
| 129 |
+
# for i, line in enumerate(tesseract_result['text']):
|
| 130 |
+
# content = line.strip()
|
| 131 |
+
# location = {
|
| 132 |
+
# 'left': int(tesseract_result['left'][i]),
|
| 133 |
+
# 'top': int(tesseract_result['top'][i]),
|
| 134 |
+
# 'right': int(tesseract_result['left'][i]) + int(tesseract_result['width'][i]),
|
| 135 |
+
# 'bottom': int(tesseract_result['top'][i]) + int(tesseract_result['height'][i])
|
| 136 |
+
# }
|
| 137 |
+
# if len(content) > 0:
|
| 138 |
+
# texts.append(Text(i_real, content, location))
|
| 139 |
+
# i_real = i_real + 1
|
| 140 |
+
|
| 141 |
+
# Extract line boxes
|
| 142 |
+
texts = []
|
| 143 |
+
i_real = 0
|
| 144 |
+
line_boxes = []
|
| 145 |
+
n_boxes = len(tesseract_result['level'])
|
| 146 |
+
for i in range(n_boxes):
|
| 147 |
+
if tesseract_result['level'][i] == 4 and len(tesseract_result['text'][i].strip()) > 0:
|
| 148 |
+
# (x, y, w, h) = (tesseract_result['left'][i], tesseract_result['top'][i], tesseract_result['width'][i], tesseract_result['height'][i])
|
| 149 |
+
content = tesseract_result['text'][i].strip()
|
| 150 |
+
location = {
|
| 151 |
+
'left': int(tesseract_result['left'][i]),
|
| 152 |
+
'top': int(tesseract_result['top'][i]),
|
| 153 |
+
'right': int(tesseract_result['left'][i]) + int(tesseract_result['width'][i]),
|
| 154 |
+
'bottom': int(tesseract_result['top'][i]) + int(tesseract_result['height'][i])
|
| 155 |
+
}
|
| 156 |
+
texts.append(Text(i_real, content, location))
|
| 157 |
+
i_real = i_real + 1
|
| 158 |
+
# print("ocr result: ", texts)
|
| 159 |
+
|
| 160 |
+
return texts
|
| 161 |
+
|
| 162 |
+
def text_cvt_orc_format_tesseract_by_line(data):
|
| 163 |
+
|
| 164 |
+
# line_data = []
|
| 165 |
+
line_num = None
|
| 166 |
+
line_text = []
|
| 167 |
+
line_box = [0, 0, 0, 0]
|
| 168 |
+
texts = []
|
| 169 |
+
i_real = 0
|
| 170 |
+
|
| 171 |
+
for i in range(len(data['level'])):
|
| 172 |
+
# check if the level is word
|
| 173 |
+
if data['level'][i] == 5:
|
| 174 |
+
if line_num != data['line_num'][i]:
|
| 175 |
+
if line_num is not None: # append the previous line data to line_data
|
| 176 |
+
content = ' '.join(line_text)
|
| 177 |
+
location = {
|
| 178 |
+
'left': line_box[0],
|
| 179 |
+
'top': line_box[1],
|
| 180 |
+
'right': line_box[2],
|
| 181 |
+
'bottom': line_box[3]
|
| 182 |
+
}
|
| 183 |
+
texts.append(Text(i_real, content, location))
|
| 184 |
+
i_real = i_real + 1
|
| 185 |
+
|
| 186 |
+
# start a new line
|
| 187 |
+
line_num = data['line_num'][i]
|
| 188 |
+
line_text = [data['text'][i]]
|
| 189 |
+
line_box = [
|
| 190 |
+
data['left'][i],
|
| 191 |
+
data['top'][i],
|
| 192 |
+
data['left'][i] + data['width'][i],
|
| 193 |
+
data['top'][i] + data['height'][i],
|
| 194 |
+
]
|
| 195 |
+
else: # add a word to the current line
|
| 196 |
+
line_text.append(data['text'][i])
|
| 197 |
+
line_box[2] = max(line_box[2], data['left'][i] + data['width'][i])
|
| 198 |
+
line_box[3] = max(line_box[3], data['top'][i] + data['height'][i])
|
| 199 |
+
|
| 200 |
+
# append the last line data to line_data
|
| 201 |
+
if line_text:
|
| 202 |
+
content = ' '.join(line_text)
|
| 203 |
+
location = {
|
| 204 |
+
'left': line_box[0],
|
| 205 |
+
'top': line_box[1],
|
| 206 |
+
'right': line_box[2],
|
| 207 |
+
'bottom': line_box[3]
|
| 208 |
+
}
|
| 209 |
+
texts.append(Text(i_real, content, location))
|
| 210 |
+
i_real = i_real + 1
|
| 211 |
+
|
| 212 |
+
return texts
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def text_filter_noise(texts):
|
| 216 |
+
valid_texts = []
|
| 217 |
+
for text in texts:
|
| 218 |
+
if len(text.content) <= 1 and text.content.lower() not in ['a', ',', '.', '!', '?', '$', '%', ':', '&', '+']:
|
| 219 |
+
continue
|
| 220 |
+
valid_texts.append(text)
|
| 221 |
+
return valid_texts
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def text_detection(input_file='../data/input/30800.jpg', output_file='../data/output', show=False, method='google', paddle_model=None):
|
| 225 |
+
'''
|
| 226 |
+
:param method: google or paddle
|
| 227 |
+
:param paddle_model: the preload paddle model for paddle ocr
|
| 228 |
+
'''
|
| 229 |
+
start = time.process_time()
|
| 230 |
+
name = input_file.split('/')[-1][:-4]
|
| 231 |
+
ocr_root = pjoin(output_file, 'ocr')
|
| 232 |
+
img = cv2.imread(input_file)
|
| 233 |
+
if img is None:
|
| 234 |
+
print("imread nothing!")
|
| 235 |
+
|
| 236 |
+
# resize the img to speed up the ocr
|
| 237 |
+
# img = cv2.resize(img, (int(img.shape[1]/5), int(img.shape[0]/5)))
|
| 238 |
+
# cv2.imshow("img", img)
|
| 239 |
+
# cv2.waitKey(0)
|
| 240 |
+
|
| 241 |
+
if method == 'google':
|
| 242 |
+
print('*** Detect Text through Google OCR ***')
|
| 243 |
+
ocr_result = ocr.ocr_detection_google(input_file)
|
| 244 |
+
texts = text_cvt_orc_format(ocr_result)
|
| 245 |
+
texts = merge_intersected_texts(texts)
|
| 246 |
+
texts = text_filter_noise(texts)
|
| 247 |
+
texts = text_sentences_recognition(texts)
|
| 248 |
+
ocr_time_cost = time.process_time() - start
|
| 249 |
+
elif method == 'paddle':
|
| 250 |
+
# The import of the paddle ocr can be separate to the beginning of the program if you decide to use this method
|
| 251 |
+
# from paddleocr import PaddleOCR
|
| 252 |
+
print('*** Detect Text through Paddle OCR ***')
|
| 253 |
+
# if paddle_model is None:
|
| 254 |
+
# paddle_model = PaddleOCR(use_angle_cls=True, lang="en") #'ch' for chinese and english, 'en' for english
|
| 255 |
+
# None
|
| 256 |
+
result = paddle_model.ocr(input_file, cls=True)
|
| 257 |
+
ocr_time_cost = time.process_time() - start
|
| 258 |
+
texts = text_cvt_orc_format_paddle(result)
|
| 259 |
+
|
| 260 |
+
elif method == 'pytesseract':
|
| 261 |
+
|
| 262 |
+
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 263 |
+
|
| 264 |
+
# Perform OCR using Tesseract
|
| 265 |
+
result = pytesseract.image_to_data(img_rgb, output_type=pytesseract.Output.DICT)
|
| 266 |
+
print("ocr result: ", result)
|
| 267 |
+
|
| 268 |
+
ocr_time_cost = time.process_time() - start
|
| 269 |
+
|
| 270 |
+
# Convert the Tesseract result to the desired format
|
| 271 |
+
texts = text_cvt_orc_format_tesseract_by_line(result)
|
| 272 |
+
print("texts: ", texts)
|
| 273 |
+
else:
|
| 274 |
+
raise ValueError('Method has to be "google" or "paddle" or "pytesseract"')
|
| 275 |
+
|
| 276 |
+
visualize_texts(img, texts, shown_resize_height=800, show=show, write_path=pjoin(ocr_root, name+'.png'))
|
| 277 |
+
save_detection_json(pjoin(ocr_root, name+'.json'), texts, img.shape)
|
| 278 |
+
# ocr_time_cost = time.process_time() - start
|
| 279 |
+
print("[Text Detection Completed in %.3f s] Input: %s Output: %s" % (ocr_time_cost, input_file, pjoin(ocr_root, name+'.json')))
|
| 280 |
+
|
| 281 |
+
# print("!!! detected content !!!")
|
| 282 |
+
# for text in texts:
|
| 283 |
+
# print(text.content)
|
| 284 |
+
|
| 285 |
+
return ocr_time_cost
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
# text_detection()
|
| 289 |
+
|
CDM/input_examples/README.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UIED - UI element detection, detecting UI elements from UI screenshots or drawnings
|
| 2 |
+
|
| 3 |
+
This project is still ongoing and this repo may be updated irregularly, I developed a web app for the UIED in http://uied.online
|
| 4 |
+
|
| 5 |
+
## Related Publications:
|
| 6 |
+
[1. UIED: a hybrid tool for GUI element detection](https://dl.acm.org/doi/10.1145/3368089.3417940)
|
| 7 |
+
|
| 8 |
+
[2. Object Detection for Graphical User Interface: Old Fashioned or Deep Learning or a Combination?](https://arxiv.org/abs/2008.05132)
|
| 9 |
+
|
| 10 |
+
>The repo has been **upgraded with Google OCR** for GUI text detection, to use the original version in our paper (using [EAST](https://github.com/argman/EAST) as text detector), check the relase [v2.3](https://github.com/MulongXie/UIED/releases/tag/v2.3) and download the pre-trained model in [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing).
|
| 11 |
+
|
| 12 |
+
## What is it?
|
| 13 |
+
|
| 14 |
+
UI Element Detection (UIED) is an old-fashioned computer vision (CV) based element detection approach for graphic user interface.
|
| 15 |
+
|
| 16 |
+
The input of UIED could be various UI image, such as mobile app or web page screenshot, UI design drawn by Photoshop or Sketch, and even some hand-drawn UI design. Then the approach detects and classifies text and graphic UI elements, and exports the detection result as JSON file for future application.
|
| 17 |
+
|
| 18 |
+
UIED comprises two parts to detect UI text and graphic elements, such as button, image and input bar.
|
| 19 |
+
* For text, it leverages [Google OCR](https://cloud.google.com/vision/docs/ocr) to perfrom detection.
|
| 20 |
+
|
| 21 |
+
* For graphical elements, it uses old-fashioned CV approaches to locate the elements and a CNN classifier to achieve classification.
|
| 22 |
+
|
| 23 |
+
> UIED is highly customizable, you can replace both parts by your choice (e.g. other text detection approaches). Unlike black-box end-to-end deep learning approach, you can revise the algorithms in the non-text detection and merging (partially or entirely) easily to fit your task.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
## How to use?
|
| 28 |
+
|
| 29 |
+
### Dependency
|
| 30 |
+
* **Python 3.5**
|
| 31 |
+
* **Opencv 3.4.2**
|
| 32 |
+
* **Pandas**
|
| 33 |
+
<!-- * **Tensorflow 1.10.0**
|
| 34 |
+
* **Keras 2.2.4**
|
| 35 |
+
* **Sklearn 0.22.2** -->
|
| 36 |
+
|
| 37 |
+
### Installation
|
| 38 |
+
<!-- Install the mentioned dependencies, and download two pre-trained models from [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing) for EAST text detection and GUI element classification. -->
|
| 39 |
+
|
| 40 |
+
<!-- Change ``CNN_PATH`` and ``EAST_PATH`` in *config/CONFIG.py* to your locations. -->
|
| 41 |
+
|
| 42 |
+
The new version of UIED equipped with Google OCR is easy to deploy and no pre-trained model is needed. Simply donwload the repo along with the dependencies.
|
| 43 |
+
|
| 44 |
+
> Please replace the Google OCR key at `detect_text/ocr.py line 28` with your own (apply in [Google website](https://cloud.google.com/vision)).
|
| 45 |
+
|
| 46 |
+
### Usage
|
| 47 |
+
To test your own image(s):
|
| 48 |
+
* To test single image, change *input_path_img* in ``run_single.py`` to your input image and the results will be output to *output_root*.
|
| 49 |
+
* To test mutiple images, change *input_img_root* in ``run_batch.py`` to your input directory and the results will be output to *output_root*.
|
| 50 |
+
* To adjust the parameters lively, using ``run_testing.py``
|
| 51 |
+
|
| 52 |
+
> Note: The best set of parameters vary for different types of GUI image (Mobile App, Web, PC). I highly recommend to first play with the ``run_testing.py`` to pick a good set of parameters for your data.
|
| 53 |
+
|
| 54 |
+
## Folder structure
|
| 55 |
+
``cnn/``
|
| 56 |
+
* Used to train classifier for graphic UI elements
|
| 57 |
+
* Set path of the CNN classification model
|
| 58 |
+
|
| 59 |
+
``config/``
|
| 60 |
+
* Set data paths
|
| 61 |
+
* Set parameters for graphic elements detection
|
| 62 |
+
|
| 63 |
+
``data/``
|
| 64 |
+
* Input UI images and output detection results
|
| 65 |
+
|
| 66 |
+
``detect_compo/``
|
| 67 |
+
* Non-text GUI component detection
|
| 68 |
+
|
| 69 |
+
``detect_text/``
|
| 70 |
+
* GUI text detection using Google OCR
|
| 71 |
+
|
| 72 |
+
``detect_merge/``
|
| 73 |
+
* Merge the detection results of non-text and text GUI elements
|
| 74 |
+
|
| 75 |
+
The major detection algorithms are in ``detect_compo/``, ``detect_text/`` and ``detect_merge/``
|
| 76 |
+
|
| 77 |
+
## Demo
|
| 78 |
+
GUI element detection result for web screenshot
|
| 79 |
+
|
| 80 |
+

|
CDM/logs/cfg-for-web.txt
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Testing image: data/input/9.png
|
| 2 |
+
|
| 3 |
+
1. detect_compo/ip_region_proposal.py
|
| 4 |
+
# smaller minarea 50 -> 25
|
| 5 |
+
line 70: uied_params = {'param-grad':5, 'param-block':5, 'param-minarea':25}
|
| 6 |
+
|
| 7 |
+
2. detect_compo/lib_ip/ip_detection.py
|
| 8 |
+
line 289-290 comment: # remove filter of aspect ratio
|
| 9 |
+
line 342-344 comment: # remove is_line check
|
| 10 |
+
|
| 11 |
+
3. detect_text_east/lib_east/eval.py
|
| 12 |
+
# smaller max_word_gap 10 -> 5
|
| 13 |
+
line 52: def merge_text(corners, max_word_gad=5) #
|
| 14 |
+
|
| 15 |
+
4. merge.py
|
| 16 |
+
# smaller horizontal max gap to merge lines (6,0) -> (4,0)
|
| 17 |
+
line 199 max_gap=(4,0)
|
| 18 |
+
# smaller vertical max gap to merge paragraph (0,6) -> (0,4)
|
| 19 |
+
line 202 max_gap=(0,6)
|
CDM/logs/log.txt
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
16:10 8/7/2020
|
| 2 |
+
- Synchronized with Webapp.
|
| 3 |
+
- Add image inspection.
|
| 4 |
+
- Used No-line v1.
|
| 5 |
+
- No-line v2 requires bug fix with consideration of gap.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
11:00 23/7/2020
|
| 9 |
+
- Synchronized with Webapp.
|
| 10 |
+
|
| 11 |
+
10:53 4/8/2020
|
| 12 |
+
- Synchronized with Webapp.
|
| 13 |
+
|
| 14 |
+
7/10/2020
|
| 15 |
+
- Extract parameters as configurable
|
| 16 |
+
|
| 17 |
+
30/10/2020
|
| 18 |
+
- Speed optimization (500% boost)
|
| 19 |
+
|
| 20 |
+
11/11/2020
|
| 21 |
+
- Revise rm_line
|
| 22 |
+
- Add adjustable track bar testing
|
CDM/logs/speed-improvement.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Optimization:
|
| 2 |
+
1. ip_preprocessing.py / gray_to_gradient : 0.5s -> 0.02s
|
| 3 |
+
|
| 4 |
+
2. ip_draw.py / draw_bounding_box : if not show and write_path is None: return : 0.005s -> 0s
|
| 5 |
+
|
| 6 |
+
3. ip_detection.py / component_detection : if ff[0] < min_obj_area: continue : 2.5s -> 0.3s
|
| 7 |
+
|
| 8 |
+
4. ip_detection.py / component_detection : cv2.findNonZero : 0.65s -> 0.33s
|
| 9 |
+
|
| 10 |
+
5. block_division.py / block_division : if ff[0] < 500 : continue: 1.97s -> 1s
|
| 11 |
+
|
| 12 |
+
6. block_division.py / block_division : Turn off draw : 1s -> 0.65s
|
CDM/model/model-99-ViT-entire.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fc13133f12a561224c075dac2633af6dbe6036e6c6603c266efc0e6536727ca6
|
| 3 |
+
size 343682793
|
CDM/model/model-99-resnet18.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b31df5d3ed9c743990fb7a27baf71626cf7766df36d1f414496c89d34a854f2
|
| 3 |
+
size 44957605
|
CDM/requirements.txt
ADDED
|
Binary file (3.42 kB). View file
|
|
|
CDM/result_classification/README.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UIED - UI element detection, detecting UI elements from UI screenshots or drawnings
|
| 2 |
+
|
| 3 |
+
This project is still ongoing and this repo may be updated irregularly, I developed a web app for the UIED in http://uied.online
|
| 4 |
+
|
| 5 |
+
## Related Publications:
|
| 6 |
+
[1. UIED: a hybrid tool for GUI element detection](https://dl.acm.org/doi/10.1145/3368089.3417940)
|
| 7 |
+
|
| 8 |
+
[2. Object Detection for Graphical User Interface: Old Fashioned or Deep Learning or a Combination?](https://arxiv.org/abs/2008.05132)
|
| 9 |
+
|
| 10 |
+
>The repo has been **upgraded with Google OCR** for GUI text detection, to use the original version in our paper (using [EAST](https://github.com/argman/EAST) as text detector), check the relase [v2.3](https://github.com/MulongXie/UIED/releases/tag/v2.3) and download the pre-trained model in [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing).
|
| 11 |
+
|
| 12 |
+
## What is it?
|
| 13 |
+
|
| 14 |
+
UI Element Detection (UIED) is an old-fashioned computer vision (CV) based element detection approach for graphic user interface.
|
| 15 |
+
|
| 16 |
+
The input of UIED could be various UI image, such as mobile app or web page screenshot, UI design drawn by Photoshop or Sketch, and even some hand-drawn UI design. Then the approach detects and classifies text and graphic UI elements, and exports the detection result as JSON file for future application.
|
| 17 |
+
|
| 18 |
+
UIED comprises two parts to detect UI text and graphic elements, such as button, image and input bar.
|
| 19 |
+
* For text, it leverages [Google OCR](https://cloud.google.com/vision/docs/ocr) to perfrom detection.
|
| 20 |
+
|
| 21 |
+
* For graphical elements, it uses old-fashioned CV approaches to locate the elements and a CNN classifier to achieve classification.
|
| 22 |
+
|
| 23 |
+
> UIED is highly customizable, you can replace both parts by your choice (e.g. other text detection approaches). Unlike black-box end-to-end deep learning approach, you can revise the algorithms in the non-text detection and merging (partially or entirely) easily to fit your task.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
## How to use?
|
| 28 |
+
|
| 29 |
+
### Dependency
|
| 30 |
+
* **Python 3.5**
|
| 31 |
+
* **Opencv 3.4.2**
|
| 32 |
+
* **Pandas**
|
| 33 |
+
<!-- * **Tensorflow 1.10.0**
|
| 34 |
+
* **Keras 2.2.4**
|
| 35 |
+
* **Sklearn 0.22.2** -->
|
| 36 |
+
|
| 37 |
+
### Installation
|
| 38 |
+
<!-- Install the mentioned dependencies, and download two pre-trained models from [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing) for EAST text detection and GUI element classification. -->
|
| 39 |
+
|
| 40 |
+
<!-- Change ``CNN_PATH`` and ``EAST_PATH`` in *config/CONFIG.py* to your locations. -->
|
| 41 |
+
|
| 42 |
+
The new version of UIED equipped with Google OCR is easy to deploy and no pre-trained model is needed. Simply donwload the repo along with the dependencies.
|
| 43 |
+
|
| 44 |
+
> Please replace the Google OCR key at `detect_text/ocr.py line 28` with your own (apply in [Google website](https://cloud.google.com/vision)).
|
| 45 |
+
|
| 46 |
+
### Usage
|
| 47 |
+
To test your own image(s):
|
| 48 |
+
* To test single image, change *input_path_img* in ``run_single.py`` to your input image and the results will be output to *output_root*.
|
| 49 |
+
* To test mutiple images, change *input_img_root* in ``run_batch.py`` to your input directory and the results will be output to *output_root*.
|
| 50 |
+
* To adjust the parameters lively, using ``run_testing.py``
|
| 51 |
+
|
| 52 |
+
> Note: The best set of parameters vary for different types of GUI image (Mobile App, Web, PC). I highly recommend to first play with the ``run_testing.py`` to pick a good set of parameters for your data.
|
| 53 |
+
|
| 54 |
+
## Folder structure
|
| 55 |
+
``cnn/``
|
| 56 |
+
* Used to train classifier for graphic UI elements
|
| 57 |
+
* Set path of the CNN classification model
|
| 58 |
+
|
| 59 |
+
``config/``
|
| 60 |
+
* Set data paths
|
| 61 |
+
* Set parameters for graphic elements detection
|
| 62 |
+
|
| 63 |
+
``data/``
|
| 64 |
+
* Input UI images and output detection results
|
| 65 |
+
|
| 66 |
+
``detect_compo/``
|
| 67 |
+
* Non-text GUI component detection
|
| 68 |
+
|
| 69 |
+
``detect_text/``
|
| 70 |
+
* GUI text detection using Google OCR
|
| 71 |
+
|
| 72 |
+
``detect_merge/``
|
| 73 |
+
* Merge the detection results of non-text and text GUI elements
|
| 74 |
+
|
| 75 |
+
The major detection algorithms are in ``detect_compo/``, ``detect_text/`` and ``detect_merge/``
|
| 76 |
+
|
| 77 |
+
## Demo
|
| 78 |
+
GUI element detection result for web screenshot
|
| 79 |
+
|
| 80 |
+

|
CDM/result_processing/Untitled.ipynb
ADDED
|
@@ -0,0 +1,937 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 1,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"import json\n",
|
| 10 |
+
"import numpy as np\n",
|
| 11 |
+
"import cv2\n",
|
| 12 |
+
"from glob import glob\n",
|
| 13 |
+
"from os.path import join as pjoin\n",
|
| 14 |
+
"from tqdm import tqdm\n",
|
| 15 |
+
"\n",
|
| 16 |
+
"\n",
|
| 17 |
+
"def resize_label(bboxes, d_height, gt_height, bias=0):\n",
|
| 18 |
+
" bboxes_new = []\n",
|
| 19 |
+
" scale = gt_height / d_height\n",
|
| 20 |
+
" for bbox in bboxes:\n",
|
| 21 |
+
" bbox = [int(b * scale + bias) for b in bbox]\n",
|
| 22 |
+
" bboxes_new.append(bbox)\n",
|
| 23 |
+
" return bboxes_new\n",
|
| 24 |
+
"\n",
|
| 25 |
+
"\n",
|
| 26 |
+
"def draw_bounding_box(org, corners, color=(0, 255, 0), line=2, show=False):\n",
|
| 27 |
+
" board = org.copy()\n",
|
| 28 |
+
" for i in range(len(corners)):\n",
|
| 29 |
+
" board = cv2.rectangle(board, (corners[i][0], corners[i][1]), (corners[i][2], corners[i][3]), color, line)\n",
|
| 30 |
+
" if show:\n",
|
| 31 |
+
" cv2.imshow('a', cv2.resize(board, (500, 1000)))\n",
|
| 32 |
+
" cv2.waitKey(0)\n",
|
| 33 |
+
" return board\n",
|
| 34 |
+
"\n",
|
| 35 |
+
"\n",
|
| 36 |
+
"def load_detect_result_json(reslut_file_root, shrink=0):\n",
|
| 37 |
+
" def is_bottom_or_top(corner):\n",
|
| 38 |
+
" column_min, row_min, column_max, row_max = corner\n",
|
| 39 |
+
" if row_max < 36 or row_min > 725:\n",
|
| 40 |
+
" return True\n",
|
| 41 |
+
" return False\n",
|
| 42 |
+
"\n",
|
| 43 |
+
" result_files = glob(pjoin(reslut_file_root, '*.json'))\n",
|
| 44 |
+
" compos_reform = {}\n",
|
| 45 |
+
" print('Loading %d detection results' % len(result_files))\n",
|
| 46 |
+
" for reslut_file in tqdm(result_files):\n",
|
| 47 |
+
" img_name = reslut_file.split('\\\\')[-1].split('.')[0]\n",
|
| 48 |
+
" compos = json.load(open(reslut_file, 'r'))['compos']\n",
|
| 49 |
+
" for compo in compos:\n",
|
| 50 |
+
" if is_bottom_or_top((compo['column_min'], compo['row_min'], compo['column_max'], compo['row_max'])):\n",
|
| 51 |
+
" continue\n",
|
| 52 |
+
" if img_name not in compos_reform:\n",
|
| 53 |
+
" compos_reform[img_name] = {'bboxes': [[compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink]],\n",
|
| 54 |
+
" 'categories': [compo['category']]}\n",
|
| 55 |
+
" else:\n",
|
| 56 |
+
" compos_reform[img_name]['bboxes'].append([compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink])\n",
|
| 57 |
+
" compos_reform[img_name]['categories'].append(compo['category'])\n",
|
| 58 |
+
" return compos_reform\n",
|
| 59 |
+
"\n",
|
| 60 |
+
"\n",
|
| 61 |
+
"def load_ground_truth_json(gt_file):\n",
|
| 62 |
+
" def get_img_by_id(img_id):\n",
|
| 63 |
+
" for image in images:\n",
|
| 64 |
+
" if image['id'] == img_id:\n",
|
| 65 |
+
" return image['file_name'].split('/')[-1][:-4], (image['height'], image['width'])\n",
|
| 66 |
+
"\n",
|
| 67 |
+
" def cvt_bbox(bbox):\n",
|
| 68 |
+
" '''\n",
|
| 69 |
+
" :param bbox: [x,y,width,height]\n",
|
| 70 |
+
" :return: [col_min, row_min, col_max, row_max]\n",
|
| 71 |
+
" '''\n",
|
| 72 |
+
" bbox = [int(b) for b in bbox]\n",
|
| 73 |
+
" return [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]\n",
|
| 74 |
+
"\n",
|
| 75 |
+
" data = json.load(open(gt_file, 'r'))\n",
|
| 76 |
+
" images = data['images']\n",
|
| 77 |
+
" annots = data['annotations']\n",
|
| 78 |
+
" compos = {}\n",
|
| 79 |
+
" print('Loading %d ground truth' % len(annots))\n",
|
| 80 |
+
" for annot in tqdm(annots):\n",
|
| 81 |
+
" img_name, size = get_img_by_id(annot['image_id'])\n",
|
| 82 |
+
" if img_name not in compos:\n",
|
| 83 |
+
" compos[img_name] = {'bboxes': [cvt_bbox(annot['bbox'])], 'categories': [annot['category_id']], 'size': size}\n",
|
| 84 |
+
" else:\n",
|
| 85 |
+
" compos[img_name]['bboxes'].append(cvt_bbox(annot['bbox']))\n",
|
| 86 |
+
" compos[img_name]['categories'].append(annot['category_id'])\n",
|
| 87 |
+
" return compos\n",
|
| 88 |
+
"\n",
|
| 89 |
+
"\n",
|
| 90 |
+
"def eval(detection, ground_truth, img_root, show=True, no_text=False, only_text=False):\n",
|
| 91 |
+
" def compo_filter(compos, flag):\n",
|
| 92 |
+
" if not no_text and not only_text:\n",
|
| 93 |
+
" return compos\n",
|
| 94 |
+
" compos_new = {'bboxes': [], 'categories': []}\n",
|
| 95 |
+
" for k, category in enumerate(compos['categories']):\n",
|
| 96 |
+
" if only_text:\n",
|
| 97 |
+
" if flag == 'det' and category != 'TextView':\n",
|
| 98 |
+
" continue\n",
|
| 99 |
+
" if flag == 'gt' and int(category) != 14:\n",
|
| 100 |
+
" continue\n",
|
| 101 |
+
" elif no_text:\n",
|
| 102 |
+
" if flag == 'det' and category == 'TextView':\n",
|
| 103 |
+
" continue\n",
|
| 104 |
+
" if flag == 'gt' and int(category) == 14:\n",
|
| 105 |
+
" continue\n",
|
| 106 |
+
"\n",
|
| 107 |
+
" compos_new['bboxes'].append(compos['bboxes'][k])\n",
|
| 108 |
+
" compos_new['categories'].append(category)\n",
|
| 109 |
+
" return compos_new\n",
|
| 110 |
+
"\n",
|
| 111 |
+
" def match(org, d_bbox, gt_bboxes, matched):\n",
|
| 112 |
+
" '''\n",
|
| 113 |
+
" :param matched: mark if the ground truth component is matched\n",
|
| 114 |
+
" :param d_bbox: [col_min, row_min, col_max, row_max]\n",
|
| 115 |
+
" :param gt_bboxes: list of ground truth [[col_min, row_min, col_max, row_max]]\n",
|
| 116 |
+
" :return: Boolean: if IOU large enough or detected box is contained by ground truth\n",
|
| 117 |
+
" '''\n",
|
| 118 |
+
" area_d = (d_bbox[2] - d_bbox[0]) * (d_bbox[3] - d_bbox[1])\n",
|
| 119 |
+
" for i, gt_bbox in enumerate(gt_bboxes):\n",
|
| 120 |
+
" if matched[i] == 0:\n",
|
| 121 |
+
" continue\n",
|
| 122 |
+
" area_gt = (gt_bbox[2] - gt_bbox[0]) * (gt_bbox[3] - gt_bbox[1])\n",
|
| 123 |
+
" col_min = max(d_bbox[0], gt_bbox[0])\n",
|
| 124 |
+
" row_min = max(d_bbox[1], gt_bbox[1])\n",
|
| 125 |
+
" col_max = min(d_bbox[2], gt_bbox[2])\n",
|
| 126 |
+
" row_max = min(d_bbox[3], gt_bbox[3])\n",
|
| 127 |
+
" # if not intersected, area intersection should be 0\n",
|
| 128 |
+
" w = max(0, col_max - col_min)\n",
|
| 129 |
+
" h = max(0, row_max - row_min)\n",
|
| 130 |
+
" area_inter = w * h\n",
|
| 131 |
+
" if area_inter == 0:\n",
|
| 132 |
+
" continue\n",
|
| 133 |
+
" iod = area_inter / area_d\n",
|
| 134 |
+
" iou = area_inter / (area_d + area_gt - area_inter)\n",
|
| 135 |
+
" # if show:\n",
|
| 136 |
+
" # cv2.putText(org, (str(round(iou, 2)) + ',' + str(round(iod, 2))), (d_bbox[0], d_bbox[1]),\n",
|
| 137 |
+
" # cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n",
|
| 138 |
+
"\n",
|
| 139 |
+
" if iou > 0.9 or iod == 1:\n",
|
| 140 |
+
" matched[i] = 0\n",
|
| 141 |
+
" return True\n",
|
| 142 |
+
" return False\n",
|
| 143 |
+
"\n",
|
| 144 |
+
" amount = len(detection)\n",
|
| 145 |
+
" TP, FP, FN = 0, 0, 0\n",
|
| 146 |
+
" pres, recalls, f1s = [], [], []\n",
|
| 147 |
+
" for i, image_id in enumerate(detection):\n",
|
| 148 |
+
" TP_this, FP_this, FN_this = 0, 0, 0\n",
|
| 149 |
+
" img = cv2.imread(pjoin(img_root, image_id + '.jpg'))\n",
|
| 150 |
+
" d_compos = detection[image_id]\n",
|
| 151 |
+
" gt_compos = ground_truth[image_id]\n",
|
| 152 |
+
"\n",
|
| 153 |
+
" org_height = gt_compos['size'][0]\n",
|
| 154 |
+
"\n",
|
| 155 |
+
" d_compos = compo_filter(d_compos, 'det')\n",
|
| 156 |
+
" gt_compos = compo_filter(gt_compos, 'gt')\n",
|
| 157 |
+
"\n",
|
| 158 |
+
" d_compos['bboxes'] = resize_label(d_compos['bboxes'], 800, org_height)\n",
|
| 159 |
+
" matched = np.ones(len(gt_compos['bboxes']), dtype=int)\n",
|
| 160 |
+
" for d_bbox in d_compos['bboxes']:\n",
|
| 161 |
+
" if match(img, d_bbox, gt_compos['bboxes'], matched):\n",
|
| 162 |
+
" TP += 1\n",
|
| 163 |
+
" TP_this += 1\n",
|
| 164 |
+
" else:\n",
|
| 165 |
+
" FP += 1\n",
|
| 166 |
+
" FP_this += 1\n",
|
| 167 |
+
" FN += sum(matched)\n",
|
| 168 |
+
" FN_this = sum(matched)\n",
|
| 169 |
+
"\n",
|
| 170 |
+
" try:\n",
|
| 171 |
+
" pre_this = TP_this / (TP_this + FP_this)\n",
|
| 172 |
+
" recall_this = TP_this / (TP_this + FN_this)\n",
|
| 173 |
+
" f1_this = 2 * (pre_this * recall_this) / (pre_this + recall_this)\n",
|
| 174 |
+
" except:\n",
|
| 175 |
+
" print('empty')\n",
|
| 176 |
+
" continue\n",
|
| 177 |
+
"\n",
|
| 178 |
+
" pres.append(pre_this)\n",
|
| 179 |
+
" recalls.append(recall_this)\n",
|
| 180 |
+
" f1s.append(f1_this)\n",
|
| 181 |
+
" if show:\n",
|
| 182 |
+
" print(image_id + '.jpg')\n",
|
| 183 |
+
" print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f' % (\n",
|
| 184 |
+
" i, amount, TP_this, FP_this, FN_this, pre_this, recall_this))\n",
|
| 185 |
+
" cv2.imshow('org', cv2.resize(img, (500, 1000)))\n",
|
| 186 |
+
" broad = draw_bounding_box(img, d_compos['bboxes'], color=(255, 0, 0), line=3)\n",
|
| 187 |
+
" draw_bounding_box(broad, gt_compos['bboxes'], color=(0, 0, 255), show=True, line=2)\n",
|
| 188 |
+
"\n",
|
| 189 |
+
" if i % 200 == 0:\n",
|
| 190 |
+
" precision = TP / (TP + FP)\n",
|
| 191 |
+
" recall = TP / (TP + FN)\n",
|
| 192 |
+
" f1 = 2 * (precision * recall) / (precision + recall)\n",
|
| 193 |
+
" print(\n",
|
| 194 |
+
" '[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))\n",
|
| 195 |
+
"\n",
|
| 196 |
+
" precision = TP / (TP + FP)\n",
|
| 197 |
+
" recall = TP / (TP + FN)\n",
|
| 198 |
+
" print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))\n",
|
| 199 |
+
" # print(\"Average precision:%.4f; Average recall:%.3f\" % (sum(pres)/len(pres), sum(recalls)/len(recalls)))\n",
|
| 200 |
+
"\n",
|
| 201 |
+
" return pres, recalls, f1s"
|
| 202 |
+
]
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"cell_type": "code",
|
| 206 |
+
"execution_count": 71,
|
| 207 |
+
"metadata": {},
|
| 208 |
+
"outputs": [],
|
| 209 |
+
"source": [
|
| 210 |
+
"import matplotlib.pyplot as plt\n",
|
| 211 |
+
"import numpy as np\n",
|
| 212 |
+
"import math\n",
|
| 213 |
+
"\n",
|
| 214 |
+
"def draw_plot(data, title='Score for our approach'):\n",
|
| 215 |
+
" for i in range(len(data)):\n",
|
| 216 |
+
" data[i] = [d for d in data[i] if not math.isnan(d)]\n",
|
| 217 |
+
"# plt.title(title)\n",
|
| 218 |
+
" labels = ['Precision', 'Recall', 'F1']\n",
|
| 219 |
+
" bplot = plt.boxplot(data, patch_artist=True, labels=labels) # 设置箱型图可填充\n",
|
| 220 |
+
" colors = ['pink', 'lightblue', 'lightgreen']\n",
|
| 221 |
+
" for patch, color in zip(bplot['boxes'], colors):\n",
|
| 222 |
+
" patch.set_facecolor(color) \n",
|
| 223 |
+
" plt.grid(axis='y')\n",
|
| 224 |
+
" plt.xticks(fontsize=16)\n",
|
| 225 |
+
" plt.yticks(fontsize=16)\n",
|
| 226 |
+
" plt.savefig(title + '.png')\n",
|
| 227 |
+
" plt.show()"
|
| 228 |
+
]
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"cell_type": "code",
|
| 232 |
+
"execution_count": 9,
|
| 233 |
+
"metadata": {
|
| 234 |
+
"scrolled": true
|
| 235 |
+
},
|
| 236 |
+
"outputs": [
|
| 237 |
+
{
|
| 238 |
+
"name": "stderr",
|
| 239 |
+
"output_type": "stream",
|
| 240 |
+
"text": [
|
| 241 |
+
" 9%|███████▏ | 442/4708 [00:00<00:01, 4173.66it/s]"
|
| 242 |
+
]
|
| 243 |
+
},
|
| 244 |
+
{
|
| 245 |
+
"name": "stdout",
|
| 246 |
+
"output_type": "stream",
|
| 247 |
+
"text": [
|
| 248 |
+
"Loading 4708 detection results\n"
|
| 249 |
+
]
|
| 250 |
+
},
|
| 251 |
+
{
|
| 252 |
+
"name": "stderr",
|
| 253 |
+
"output_type": "stream",
|
| 254 |
+
"text": [
|
| 255 |
+
"100%|████████████████████████████████████████████████████████████████████████████| 4708/4708 [00:01<00:00, 4404.67it/s]\n"
|
| 256 |
+
]
|
| 257 |
+
}
|
| 258 |
+
],
|
| 259 |
+
"source": [
|
| 260 |
+
"detect = load_detect_result_json('E:\\\\Mulong\\\\Result\\\\rico\\\\rico_uied\\\\rico_new_uied_cls\\\\merge')"
|
| 261 |
+
]
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"cell_type": "code",
|
| 265 |
+
"execution_count": 10,
|
| 266 |
+
"metadata": {},
|
| 267 |
+
"outputs": [
|
| 268 |
+
{
|
| 269 |
+
"name": "stderr",
|
| 270 |
+
"output_type": "stream",
|
| 271 |
+
"text": [
|
| 272 |
+
" 8%|█████▉ | 6915/86646 [00:00<00:01, 68670.52it/s]"
|
| 273 |
+
]
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"name": "stdout",
|
| 277 |
+
"output_type": "stream",
|
| 278 |
+
"text": [
|
| 279 |
+
"Loading 86646 ground truth\n"
|
| 280 |
+
]
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"name": "stderr",
|
| 284 |
+
"output_type": "stream",
|
| 285 |
+
"text": [
|
| 286 |
+
"100%|██████████████████████████████████████████████████████████████████████████| 86646/86646 [00:11<00:00, 7576.11it/s]\n"
|
| 287 |
+
]
|
| 288 |
+
}
|
| 289 |
+
],
|
| 290 |
+
"source": [
|
| 291 |
+
"gt = load_ground_truth_json('E:\\\\Mulong\\\\Datasets\\\\rico\\\\instances_test.json')"
|
| 292 |
+
]
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"cell_type": "code",
|
| 296 |
+
"execution_count": 23,
|
| 297 |
+
"metadata": {},
|
| 298 |
+
"outputs": [
|
| 299 |
+
{
|
| 300 |
+
"name": "stdout",
|
| 301 |
+
"output_type": "stream",
|
| 302 |
+
"text": [
|
| 303 |
+
"[0/4707] TP:16, FP:0, FN:0, Precesion:1.000, Recall:1.000, F1:1.000\n"
|
| 304 |
+
]
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"name": "stderr",
|
| 308 |
+
"output_type": "stream",
|
| 309 |
+
"text": [
|
| 310 |
+
"D:\\Anaconda\\lib\\site-packages\\ipykernel_launcher.py:165: RuntimeWarning: invalid value encountered in double_scalars\n"
|
| 311 |
+
]
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"name": "stdout",
|
| 315 |
+
"output_type": "stream",
|
| 316 |
+
"text": [
|
| 317 |
+
"[200/4707] TP:2222, FP:2920, FN:1705, Precesion:0.432, Recall:0.566, F1:0.490\n",
|
| 318 |
+
"[400/4707] TP:4616, FP:5737, FN:3346, Precesion:0.446, Recall:0.580, F1:0.504\n",
|
| 319 |
+
"[600/4707] TP:6963, FP:8682, FN:4812, Precesion:0.445, Recall:0.591, F1:0.508\n",
|
| 320 |
+
"[800/4707] TP:9367, FP:11432, FN:6305, Precesion:0.450, Recall:0.598, F1:0.514\n",
|
| 321 |
+
"[1000/4707] TP:11222, FP:14346, FN:7511, Precesion:0.439, Recall:0.599, F1:0.507\n",
|
| 322 |
+
"[1200/4707] TP:13680, FP:17278, FN:8901, Precesion:0.442, Recall:0.606, F1:0.511\n",
|
| 323 |
+
"[1400/4707] TP:16274, FP:20664, FN:10379, Precesion:0.441, Recall:0.611, F1:0.512\n",
|
| 324 |
+
"[1600/4707] TP:18431, FP:23002, FN:11556, Precesion:0.445, Recall:0.615, F1:0.516\n",
|
| 325 |
+
"[1800/4707] TP:20718, FP:25600, FN:13049, Precesion:0.447, Recall:0.614, F1:0.517\n",
|
| 326 |
+
"[2000/4707] TP:23009, FP:28626, FN:14588, Precesion:0.446, Recall:0.612, F1:0.516\n",
|
| 327 |
+
"[2200/4707] TP:25424, FP:31555, FN:16191, Precesion:0.446, Recall:0.611, F1:0.516\n",
|
| 328 |
+
"[2400/4707] TP:27559, FP:34176, FN:17388, Precesion:0.446, Recall:0.613, F1:0.517\n",
|
| 329 |
+
"[2600/4707] TP:29820, FP:37065, FN:18617, Precesion:0.446, Recall:0.616, F1:0.517\n",
|
| 330 |
+
"[2800/4707] TP:32108, FP:39846, FN:20018, Precesion:0.446, Recall:0.616, F1:0.518\n",
|
| 331 |
+
"[3000/4707] TP:34188, FP:43112, FN:21399, Precesion:0.442, Recall:0.615, F1:0.515\n",
|
| 332 |
+
"[3200/4707] TP:36558, FP:46011, FN:23002, Precesion:0.443, Recall:0.614, F1:0.514\n",
|
| 333 |
+
"[3400/4707] TP:38783, FP:48918, FN:24365, Precesion:0.442, Recall:0.614, F1:0.514\n",
|
| 334 |
+
"[3600/4707] TP:40958, FP:51829, FN:25605, Precesion:0.441, Recall:0.615, F1:0.514\n",
|
| 335 |
+
"[3800/4707] TP:43270, FP:54963, FN:26841, Precesion:0.440, Recall:0.617, F1:0.514\n",
|
| 336 |
+
"[4000/4707] TP:45512, FP:57838, FN:28141, Precesion:0.440, Recall:0.618, F1:0.514\n",
|
| 337 |
+
"[4200/4707] TP:47544, FP:60789, FN:29420, Precesion:0.439, Recall:0.618, F1:0.513\n",
|
| 338 |
+
"[4400/4707] TP:49907, FP:64407, FN:30897, Precesion:0.437, Recall:0.618, F1:0.512\n",
|
| 339 |
+
"[4600/4707] TP:52181, FP:67592, FN:32399, Precesion:0.436, Recall:0.617, F1:0.511\n",
|
| 340 |
+
"[4706/4707] TP:53393, FP:69230, FN:33248, Precesion:0.435, Recall:0.616, F1:0.511\n"
|
| 341 |
+
]
|
| 342 |
+
}
|
| 343 |
+
],
|
| 344 |
+
"source": [
|
| 345 |
+
"no_text = False\n",
|
| 346 |
+
"only_text = False\n",
|
| 347 |
+
"pres_all, recalls_all, f1_all = eval(detect, gt, 'E:\\\\Mulong\\\\Datasets\\\\rico\\\\combined', show=False, no_text=no_text, only_text=only_text)"
|
| 348 |
+
]
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"cell_type": "code",
|
| 352 |
+
"execution_count": 21,
|
| 353 |
+
"metadata": {},
|
| 354 |
+
"outputs": [
|
| 355 |
+
{
|
| 356 |
+
"name": "stdout",
|
| 357 |
+
"output_type": "stream",
|
| 358 |
+
"text": [
|
| 359 |
+
"[0/4707] TP:1, FP:0, FN:0, Precesion:1.000, Recall:1.000, F1:1.000\n"
|
| 360 |
+
]
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"name": "stderr",
|
| 364 |
+
"output_type": "stream",
|
| 365 |
+
"text": [
|
| 366 |
+
"D:\\Anaconda\\lib\\site-packages\\ipykernel_launcher.py:165: RuntimeWarning: invalid value encountered in double_scalars\n"
|
| 367 |
+
]
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"name": "stdout",
|
| 371 |
+
"output_type": "stream",
|
| 372 |
+
"text": [
|
| 373 |
+
"[200/4707] TP:973, FP:2022, FN:891, Precesion:0.325, Recall:0.522, F1:0.400\n",
|
| 374 |
+
"empty\n",
|
| 375 |
+
"[400/4707] TP:1921, FP:3905, FN:1788, Precesion:0.330, Recall:0.518, F1:0.403\n",
|
| 376 |
+
"[600/4707] TP:2847, FP:6079, FN:2717, Precesion:0.319, Recall:0.512, F1:0.393\n",
|
| 377 |
+
"empty\n",
|
| 378 |
+
"empty\n",
|
| 379 |
+
"empty\n",
|
| 380 |
+
"[800/4707] TP:3774, FP:7895, FN:3574, Precesion:0.323, Recall:0.514, F1:0.397\n",
|
| 381 |
+
"empty\n",
|
| 382 |
+
"[1000/4707] TP:4478, FP:9951, FN:4229, Precesion:0.310, Recall:0.514, F1:0.387\n",
|
| 383 |
+
"empty\n",
|
| 384 |
+
"empty\n",
|
| 385 |
+
"[1200/4707] TP:5451, FP:12055, FN:4960, Precesion:0.311, Recall:0.524, F1:0.391\n",
|
| 386 |
+
"empty\n",
|
| 387 |
+
"empty\n",
|
| 388 |
+
"empty\n",
|
| 389 |
+
"[1400/4707] TP:6493, FP:14405, FN:5804, Precesion:0.311, Recall:0.528, F1:0.391\n",
|
| 390 |
+
"empty\n",
|
| 391 |
+
"empty\n",
|
| 392 |
+
"empty\n",
|
| 393 |
+
"empty\n",
|
| 394 |
+
"[1600/4707] TP:7372, FP:15980, FN:6375, Precesion:0.316, Recall:0.536, F1:0.397\n",
|
| 395 |
+
"empty\n",
|
| 396 |
+
"empty\n",
|
| 397 |
+
"empty\n",
|
| 398 |
+
"[1800/4707] TP:8273, FP:17814, FN:7156, Precesion:0.317, Recall:0.536, F1:0.399\n",
|
| 399 |
+
"empty\n",
|
| 400 |
+
"empty\n",
|
| 401 |
+
"[2000/4707] TP:9273, FP:19993, FN:8051, Precesion:0.317, Recall:0.535, F1:0.398\n",
|
| 402 |
+
"empty\n",
|
| 403 |
+
"[2200/4707] TP:10293, FP:22055, FN:8869, Precesion:0.318, Recall:0.537, F1:0.400\n",
|
| 404 |
+
"[2400/4707] TP:11207, FP:23944, FN:9524, Precesion:0.319, Recall:0.541, F1:0.401\n",
|
| 405 |
+
"empty\n",
|
| 406 |
+
"empty\n",
|
| 407 |
+
"[2600/4707] TP:12103, FP:25932, FN:10276, Precesion:0.318, Recall:0.541, F1:0.401\n",
|
| 408 |
+
"[2800/4707] TP:12994, FP:27792, FN:11122, Precesion:0.319, Recall:0.539, F1:0.400\n",
|
| 409 |
+
"empty\n",
|
| 410 |
+
"empty\n",
|
| 411 |
+
"[3000/4707] TP:13839, FP:30256, FN:11943, Precesion:0.314, Recall:0.537, F1:0.396\n",
|
| 412 |
+
"[3200/4707] TP:14758, FP:32276, FN:12851, Precesion:0.314, Recall:0.535, F1:0.395\n",
|
| 413 |
+
"empty\n",
|
| 414 |
+
"[3400/4707] TP:15718, FP:34337, FN:13627, Precesion:0.314, Recall:0.536, F1:0.396\n",
|
| 415 |
+
"[3600/4707] TP:16695, FP:36424, FN:14263, Precesion:0.314, Recall:0.539, F1:0.397\n",
|
| 416 |
+
"[3800/4707] TP:17641, FP:38693, FN:14932, Precesion:0.313, Recall:0.542, F1:0.397\n",
|
| 417 |
+
"empty\n",
|
| 418 |
+
"empty\n",
|
| 419 |
+
"[4000/4707] TP:18651, FP:40641, FN:15653, Precesion:0.315, Recall:0.544, F1:0.399\n",
|
| 420 |
+
"empty\n",
|
| 421 |
+
"[4200/4707] TP:19554, FP:42631, FN:16305, Precesion:0.314, Recall:0.545, F1:0.399\n",
|
| 422 |
+
"empty\n",
|
| 423 |
+
"empty\n",
|
| 424 |
+
"[4400/4707] TP:20584, FP:45335, FN:17197, Precesion:0.312, Recall:0.545, F1:0.397\n",
|
| 425 |
+
"[4600/4707] TP:21416, FP:47595, FN:17950, Precesion:0.310, Recall:0.544, F1:0.395\n",
|
| 426 |
+
"empty\n",
|
| 427 |
+
"empty\n",
|
| 428 |
+
"empty\n",
|
| 429 |
+
"empty\n",
|
| 430 |
+
"empty\n",
|
| 431 |
+
"[4706/4707] TP:21870, FP:48657, FN:18391, Precesion:0.310, Recall:0.543, F1:0.395\n"
|
| 432 |
+
]
|
| 433 |
+
}
|
| 434 |
+
],
|
| 435 |
+
"source": [
|
| 436 |
+
"no_text = True\n",
|
| 437 |
+
"only_text = False\n",
|
| 438 |
+
"pres_non_text, recalls_non_text, f1_non_text = eval(detect, gt, 'E:\\\\Mulong\\\\Datasets\\\\rico\\\\combined', show=False, no_text=no_text, only_text=only_text)"
|
| 439 |
+
]
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"cell_type": "code",
|
| 443 |
+
"execution_count": 14,
|
| 444 |
+
"metadata": {},
|
| 445 |
+
"outputs": [
|
| 446 |
+
{
|
| 447 |
+
"name": "stdout",
|
| 448 |
+
"output_type": "stream",
|
| 449 |
+
"text": [
|
| 450 |
+
"[0/4707] TP:15, FP:0, FN:0, Precesion:1.000, Recall:1.000, F1:1.000\n",
|
| 451 |
+
"empty\n"
|
| 452 |
+
]
|
| 453 |
+
},
|
| 454 |
+
{
|
| 455 |
+
"name": "stderr",
|
| 456 |
+
"output_type": "stream",
|
| 457 |
+
"text": [
|
| 458 |
+
"D:\\Anaconda\\lib\\site-packages\\ipykernel_launcher.py:165: RuntimeWarning: invalid value encountered in double_scalars\n"
|
| 459 |
+
]
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"name": "stdout",
|
| 463 |
+
"output_type": "stream",
|
| 464 |
+
"text": [
|
| 465 |
+
"empty\n",
|
| 466 |
+
"empty\n",
|
| 467 |
+
"empty\n",
|
| 468 |
+
"empty\n",
|
| 469 |
+
"empty\n",
|
| 470 |
+
"empty\n",
|
| 471 |
+
"empty\n",
|
| 472 |
+
"empty\n",
|
| 473 |
+
"empty\n",
|
| 474 |
+
"empty\n",
|
| 475 |
+
"empty\n",
|
| 476 |
+
"empty\n",
|
| 477 |
+
"empty\n",
|
| 478 |
+
"empty\n",
|
| 479 |
+
"empty\n",
|
| 480 |
+
"[200/4707] TP:1041, FP:1106, FN:1022, Precesion:0.485, Recall:0.505, F1:0.495\n",
|
| 481 |
+
"empty\n",
|
| 482 |
+
"empty\n",
|
| 483 |
+
"empty\n",
|
| 484 |
+
"empty\n",
|
| 485 |
+
"empty\n",
|
| 486 |
+
"empty\n",
|
| 487 |
+
"empty\n",
|
| 488 |
+
"empty\n",
|
| 489 |
+
"empty\n",
|
| 490 |
+
"empty\n",
|
| 491 |
+
"empty\n",
|
| 492 |
+
"empty\n",
|
| 493 |
+
"empty\n",
|
| 494 |
+
"[400/4707] TP:2185, FP:2342, FN:2068, Precesion:0.483, Recall:0.514, F1:0.498\n",
|
| 495 |
+
"empty\n",
|
| 496 |
+
"empty\n",
|
| 497 |
+
"empty\n",
|
| 498 |
+
"empty\n",
|
| 499 |
+
"empty\n",
|
| 500 |
+
"empty\n",
|
| 501 |
+
"empty\n",
|
| 502 |
+
"empty\n",
|
| 503 |
+
"empty\n",
|
| 504 |
+
"empty\n",
|
| 505 |
+
"[600/4707] TP:3272, FP:3447, FN:2939, Precesion:0.487, Recall:0.527, F1:0.506\n",
|
| 506 |
+
"empty\n",
|
| 507 |
+
"empty\n",
|
| 508 |
+
"empty\n",
|
| 509 |
+
"empty\n",
|
| 510 |
+
"empty\n",
|
| 511 |
+
"empty\n",
|
| 512 |
+
"empty\n",
|
| 513 |
+
"[800/4707] TP:4505, FP:4625, FN:3819, Precesion:0.493, Recall:0.541, F1:0.516\n",
|
| 514 |
+
"empty\n",
|
| 515 |
+
"empty\n",
|
| 516 |
+
"empty\n",
|
| 517 |
+
"empty\n",
|
| 518 |
+
"empty\n",
|
| 519 |
+
"empty\n",
|
| 520 |
+
"empty\n",
|
| 521 |
+
"empty\n",
|
| 522 |
+
"empty\n",
|
| 523 |
+
"empty\n",
|
| 524 |
+
"empty\n",
|
| 525 |
+
"empty\n",
|
| 526 |
+
"empty\n",
|
| 527 |
+
"[1000/4707] TP:5426, FP:5713, FN:4600, Precesion:0.487, Recall:0.541, F1:0.513\n",
|
| 528 |
+
"empty\n",
|
| 529 |
+
"empty\n",
|
| 530 |
+
"empty\n",
|
| 531 |
+
"empty\n",
|
| 532 |
+
"empty\n",
|
| 533 |
+
"empty\n",
|
| 534 |
+
"empty\n",
|
| 535 |
+
"[1200/4707] TP:6649, FP:6803, FN:5521, Precesion:0.494, Recall:0.546, F1:0.519\n",
|
| 536 |
+
"empty\n",
|
| 537 |
+
"empty\n",
|
| 538 |
+
"empty\n",
|
| 539 |
+
"empty\n",
|
| 540 |
+
"empty\n",
|
| 541 |
+
"empty\n",
|
| 542 |
+
"[1400/4707] TP:7890, FP:8150, FN:6466, Precesion:0.492, Recall:0.550, F1:0.519\n",
|
| 543 |
+
"empty\n",
|
| 544 |
+
"empty\n",
|
| 545 |
+
"empty\n",
|
| 546 |
+
"empty\n",
|
| 547 |
+
"empty\n",
|
| 548 |
+
"empty\n",
|
| 549 |
+
"empty\n",
|
| 550 |
+
"empty\n",
|
| 551 |
+
"empty\n",
|
| 552 |
+
"empty\n",
|
| 553 |
+
"empty\n",
|
| 554 |
+
"[1600/4707] TP:8964, FP:9117, FN:7276, Precesion:0.496, Recall:0.552, F1:0.522\n",
|
| 555 |
+
"empty\n",
|
| 556 |
+
"empty\n",
|
| 557 |
+
"empty\n",
|
| 558 |
+
"empty\n",
|
| 559 |
+
"empty\n",
|
| 560 |
+
"empty\n",
|
| 561 |
+
"empty\n",
|
| 562 |
+
"empty\n",
|
| 563 |
+
"empty\n",
|
| 564 |
+
"[1800/4707] TP:10052, FP:10179, FN:8286, Precesion:0.497, Recall:0.548, F1:0.521\n",
|
| 565 |
+
"empty\n",
|
| 566 |
+
"empty\n",
|
| 567 |
+
"empty\n",
|
| 568 |
+
"empty\n",
|
| 569 |
+
"empty\n",
|
| 570 |
+
"empty\n",
|
| 571 |
+
"empty\n",
|
| 572 |
+
"empty\n",
|
| 573 |
+
"empty\n",
|
| 574 |
+
"empty\n",
|
| 575 |
+
"empty\n",
|
| 576 |
+
"empty\n",
|
| 577 |
+
"empty\n",
|
| 578 |
+
"[2000/4707] TP:11126, FP:11243, FN:9147, Precesion:0.497, Recall:0.549, F1:0.522\n",
|
| 579 |
+
"empty\n",
|
| 580 |
+
"empty\n",
|
| 581 |
+
"empty\n",
|
| 582 |
+
"empty\n",
|
| 583 |
+
"empty\n",
|
| 584 |
+
"empty\n",
|
| 585 |
+
"empty\n",
|
| 586 |
+
"[2200/4707] TP:12213, FP:12418, FN:10240, Precesion:0.496, Recall:0.544, F1:0.519\n",
|
| 587 |
+
"empty\n",
|
| 588 |
+
"empty\n",
|
| 589 |
+
"empty\n",
|
| 590 |
+
"empty\n",
|
| 591 |
+
"empty\n",
|
| 592 |
+
"empty\n",
|
| 593 |
+
"empty\n",
|
| 594 |
+
"empty\n",
|
| 595 |
+
"empty\n",
|
| 596 |
+
"empty\n",
|
| 597 |
+
"empty\n",
|
| 598 |
+
"empty\n",
|
| 599 |
+
"[2400/4707] TP:13243, FP:13341, FN:10973, Precesion:0.498, Recall:0.547, F1:0.521\n",
|
| 600 |
+
"empty\n",
|
| 601 |
+
"empty\n",
|
| 602 |
+
"empty\n",
|
| 603 |
+
"empty\n",
|
| 604 |
+
"empty\n",
|
| 605 |
+
"empty\n",
|
| 606 |
+
"empty\n",
|
| 607 |
+
"empty\n",
|
| 608 |
+
"[2600/4707] TP:14377, FP:14473, FN:11681, Precesion:0.498, Recall:0.552, F1:0.524\n",
|
| 609 |
+
"empty\n",
|
| 610 |
+
"empty\n",
|
| 611 |
+
"empty\n",
|
| 612 |
+
"empty\n",
|
| 613 |
+
"empty\n",
|
| 614 |
+
"empty\n",
|
| 615 |
+
"empty\n",
|
| 616 |
+
"empty\n",
|
| 617 |
+
"empty\n",
|
| 618 |
+
"empty\n",
|
| 619 |
+
"empty\n",
|
| 620 |
+
"empty\n",
|
| 621 |
+
"empty\n",
|
| 622 |
+
"empty\n",
|
| 623 |
+
"[2800/4707] TP:15494, FP:15674, FN:12516, Precesion:0.497, Recall:0.553, F1:0.524\n",
|
| 624 |
+
"empty\n",
|
| 625 |
+
"empty\n",
|
| 626 |
+
"empty\n",
|
| 627 |
+
"empty\n",
|
| 628 |
+
"empty\n",
|
| 629 |
+
"empty\n",
|
| 630 |
+
"empty\n",
|
| 631 |
+
"empty\n",
|
| 632 |
+
"empty\n",
|
| 633 |
+
"empty\n",
|
| 634 |
+
"empty\n",
|
| 635 |
+
"empty\n",
|
| 636 |
+
"empty\n",
|
| 637 |
+
"[3000/4707] TP:16471, FP:16734, FN:13334, Precesion:0.496, Recall:0.553, F1:0.523\n",
|
| 638 |
+
"empty\n",
|
| 639 |
+
"empty\n",
|
| 640 |
+
"empty\n",
|
| 641 |
+
"empty\n",
|
| 642 |
+
"empty\n",
|
| 643 |
+
"empty\n",
|
| 644 |
+
"empty\n",
|
| 645 |
+
"empty\n",
|
| 646 |
+
"empty\n",
|
| 647 |
+
"empty\n",
|
| 648 |
+
"[3200/4707] TP:17644, FP:17891, FN:14307, Precesion:0.497, Recall:0.552, F1:0.523\n",
|
| 649 |
+
"empty\n",
|
| 650 |
+
"empty\n",
|
| 651 |
+
"empty\n",
|
| 652 |
+
"empty\n",
|
| 653 |
+
"empty\n",
|
| 654 |
+
"empty\n",
|
| 655 |
+
"empty\n",
|
| 656 |
+
"empty\n",
|
| 657 |
+
"empty\n",
|
| 658 |
+
"empty\n",
|
| 659 |
+
"[3400/4707] TP:18711, FP:18935, FN:15092, Precesion:0.497, Recall:0.554, F1:0.524\n",
|
| 660 |
+
"empty\n",
|
| 661 |
+
"empty\n",
|
| 662 |
+
"empty\n",
|
| 663 |
+
"empty\n",
|
| 664 |
+
"empty\n",
|
| 665 |
+
"empty\n",
|
| 666 |
+
"empty\n",
|
| 667 |
+
"empty\n",
|
| 668 |
+
"empty\n",
|
| 669 |
+
"empty\n",
|
| 670 |
+
"empty\n",
|
| 671 |
+
"empty\n",
|
| 672 |
+
"empty\n",
|
| 673 |
+
"empty\n",
|
| 674 |
+
"[3600/4707] TP:19710, FP:19958, FN:15895, Precesion:0.497, Recall:0.554, F1:0.524\n",
|
| 675 |
+
"empty\n",
|
| 676 |
+
"empty\n",
|
| 677 |
+
"empty\n",
|
| 678 |
+
"empty\n",
|
| 679 |
+
"empty\n",
|
| 680 |
+
"empty\n",
|
| 681 |
+
"empty\n",
|
| 682 |
+
"empty\n",
|
| 683 |
+
"empty\n",
|
| 684 |
+
"empty\n",
|
| 685 |
+
"[3800/4707] TP:20845, FP:21054, FN:16693, Precesion:0.498, Recall:0.555, F1:0.525\n",
|
| 686 |
+
"empty\n",
|
| 687 |
+
"empty\n",
|
| 688 |
+
"empty\n",
|
| 689 |
+
"empty\n",
|
| 690 |
+
"empty\n",
|
| 691 |
+
"empty\n",
|
| 692 |
+
"empty\n",
|
| 693 |
+
"empty\n",
|
| 694 |
+
"empty\n",
|
| 695 |
+
"empty\n",
|
| 696 |
+
"[4000/4707] TP:21881, FP:22177, FN:17468, Precesion:0.497, Recall:0.556, F1:0.525\n",
|
| 697 |
+
"empty\n",
|
| 698 |
+
"empty\n",
|
| 699 |
+
"empty\n",
|
| 700 |
+
"empty\n",
|
| 701 |
+
"empty\n",
|
| 702 |
+
"empty\n",
|
| 703 |
+
"empty\n",
|
| 704 |
+
"empty\n",
|
| 705 |
+
"empty\n",
|
| 706 |
+
"empty\n",
|
| 707 |
+
"empty\n",
|
| 708 |
+
"[4200/4707] TP:22842, FP:23306, FN:18263, Precesion:0.495, Recall:0.556, F1:0.524\n",
|
| 709 |
+
"empty\n",
|
| 710 |
+
"empty\n",
|
| 711 |
+
"empty\n",
|
| 712 |
+
"empty\n",
|
| 713 |
+
"empty\n",
|
| 714 |
+
"empty\n",
|
| 715 |
+
"empty\n",
|
| 716 |
+
"empty\n",
|
| 717 |
+
"empty\n",
|
| 718 |
+
"empty\n",
|
| 719 |
+
"empty\n",
|
| 720 |
+
"empty\n",
|
| 721 |
+
"empty\n",
|
| 722 |
+
"[4400/4707] TP:23930, FP:24465, FN:19093, Precesion:0.494, Recall:0.556, F1:0.524\n",
|
| 723 |
+
"empty\n",
|
| 724 |
+
"empty\n",
|
| 725 |
+
"empty\n",
|
| 726 |
+
"empty\n",
|
| 727 |
+
"empty\n",
|
| 728 |
+
"empty\n",
|
| 729 |
+
"empty\n",
|
| 730 |
+
"empty\n",
|
| 731 |
+
"empty\n",
|
| 732 |
+
"[4600/4707] TP:25015, FP:25747, FN:20199, Precesion:0.493, Recall:0.553, F1:0.521\n",
|
| 733 |
+
"empty\n",
|
| 734 |
+
"empty\n",
|
| 735 |
+
"empty\n",
|
| 736 |
+
"empty\n",
|
| 737 |
+
"empty\n",
|
| 738 |
+
"empty\n",
|
| 739 |
+
"empty\n",
|
| 740 |
+
"[4706/4707] TP:25638, FP:26458, FN:20742, Precesion:0.492, Recall:0.553, F1:0.521\n"
|
| 741 |
+
]
|
| 742 |
+
}
|
| 743 |
+
],
|
| 744 |
+
"source": [
|
| 745 |
+
"no_text = False\n",
|
| 746 |
+
"only_text = True\n",
|
| 747 |
+
"pres_text, recalls_text, f1_text = eval(detect, gt, 'E:\\\\Mulong\\\\Datasets\\\\rico\\\\combined', show=False, no_text=no_text, only_text=only_text)"
|
| 748 |
+
]
|
| 749 |
+
},
|
| 750 |
+
{
|
| 751 |
+
"cell_type": "code",
|
| 752 |
+
"execution_count": 15,
|
| 753 |
+
"metadata": {},
|
| 754 |
+
"outputs": [
|
| 755 |
+
{
|
| 756 |
+
"name": "stderr",
|
| 757 |
+
"output_type": "stream",
|
| 758 |
+
"text": [
|
| 759 |
+
"D:\\Anaconda\\lib\\site-packages\\matplotlib\\figure.py:448: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.\n",
|
| 760 |
+
" % get_backend())\n"
|
| 761 |
+
]
|
| 762 |
+
},
|
| 763 |
+
{
|
| 764 |
+
"data": {
|
| 765 |
+
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAD/lJREFUeJzt3XuMXGd9xvHvE4dwycWJa7qlScARmIKLCqFLAqSUdaEioVXcqoQmKrcqYKlqoBRaGkQVTPoHhapFvRha00ZQoISAIHUjQ0DUq4SAwevciBMiuSbUblDDxQQBhRDy6x9zDMNk1zO7nt11Xn8/0sjn8s45vxm/fvb43ZnzpqqQJLXlmOUuQJI0foa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUHHLteJV69eXWvWrFmu00vSQ9KuXbu+XlWPHtZu2cJ9zZo1zMzMLNfpJekhKclXRmnnsIwkNchwl6QGGe6S1CDDXZIaZLhLUoOGhnuSK5Lck+S2OfYnyd8l2ZPk1iRPH3+ZkqT5GOXK/T3AuYfYfx6wtntsBN51+GVJkg7H0HCvquuAbx6iyQbgX6tnB3BykseMq0BJ0vyN40tMpwL7+tb3d9u+OtgwyUZ6V/dMTEwwPT09htMfPdavX7+g523fvn3MlUgPtpD+ad9cPOMI98yybdZZt6tqC7AFYHJysqampsZw+qPHoSYzT3LI/dJim6v/2TeXxzg+LbMfOL1v/TTg7jEcV5K0QOMI963Ay7pPzTwTuLeqHjQkI0laOkOHZZJ8EJgCVifZD7wZeBhAVf0jsA14IbAH+B7w+4tVrCRpNEPDvaouGrK/gD8cW0WSpMPmN1QlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S5mXVqlUkGfkBzKt9ElatWrXMr/KhbxyTdUg6ihw4cGDRJ984+ENBC+eVuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDVopHBPcm6SO5PsSXLpLPsfm2R7kpuS3JrkheMvVZI0qqHhnmQFsBk4D1gHXJRk3UCzPweuqqozgQuBd467UEnS6Ea5cj8L2FNVe6vqPuBKYMNAmwJO6pZXAnePr0RJ0nyNMkH2qcC+vvX9wNkDbTYBn0zyauB44PljqU6StCCjhPts05APTn1+EfCeqvrrJM8C3pfkKVX1wE8dKNkIbASYmJhgenp6ASVrLr6fWipL0dfsz4cnVYM5PdCgF9abquoF3fobAarqrX1tdgPnVtW+bn0v8Myqumeu405OTtbMzMzhvwIBkIRhf5fSOCxFX7M/zy3JrqqaHNZulDH3ncDaJGckOY7eL0y3DrT5b+B53YmfDDwC+Nr8SpYkjcvQcK+q+4FLgGuBO+h9KmZ3ksuTnN81ez3wqiS3AB8EXlH+2JWkZTPKmDtVtQ3YNrDtsr7l24FzxluaJGmh/IaqJDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUEjfYlJkg6qN58Em1Yu/jl0WAx3SfOSt3x7aW4ctmlRT9E8h2UkqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhvsRaNWqVSSZ1wOYV/tVq1Yt86uUtJi8/cAR6MCBA0vy9W5J7fLKXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDRgr3JOcmuTPJniSXztHmxUluT7I7yb+Nt0xJ0nwMvbdMkhXAZuDXgf3AziRbq+r2vjZrgTcC51TVgSQ/u1gFS5KGG+XK/SxgT1Xtrar7gCuBDQNtXgVsrqoDAFV1z3jLlCTNxyh3hTwV2Ne3vh84e6DNEwGS3ACsADZV1ScGD5RkI7ARYGJigunp6QWUfHRYivfG918LZf888mXYrWWTXAC8oKpe2a2/FDirql7d1+Ya4IfAi4HTgOuBp1TVt+Y67uTkZM3MzBz+K2hQkiW55e9in0Ntsn8uryS7qmpyWLtRhmX2A6f3rZ8G3D1Lm3+vqh9W1ZeBO4G1oxYrSRqvUcJ9J7A2yRlJjgMuBLYOtLkaWA+QZDW9YZq94yxUkjS6oeFeVfcDlwDXAncAV1XV7iSXJzm/a3Yt8I0ktwPbgT+tqm8sVtGSpEMbOua+WBxzn5tjmjqS2T+X1zjH3CVJDzGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoNGmUNVS6zefBJsWrn455AWKMmiHv+UU05Z1OMfDQz3I1De8u2luV/2pkU9hRo1377pvdmXh8MyktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBI4V7knOT3JlkT5JLD9HuRUkqyeT4SpQkzdfQcE+yAtgMnAesAy5Ksm6WdicCrwE+P+4iJUnzM8qV+1nAnqraW1X3AVcCG2Zp9xfA24Hvj7E+SdICjBLupwL7+tb3d9t+LMmZwOlVdc0Ya5MkLdAo0+zNNlnij+fMSnIM8A7gFUMPlGwENgJMTEwwPT09UpFHo6V4b3z/tVTsa0svw+Y2TPIsYFNVvaBbfyNAVb21W18J/Bfwne4pPwd8Ezi/qmbmOu7k5GTNzMy5+6i2FHNOOq+llop9bbyS7KqqoR9aGWVYZiewNskZSY4DLgS2HtxZVfdW1eqqWlNVa4AdDAl2SdLiGhruVXU/cAlwLXAHcFVV7U5yeZLzF7tASdL8jTLmTlVtA7YNbLtsjrZTh1+WJOlw+A1VSWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1aKQJsrX0kizq8U855ZRFPb6k5WW4H4Gqat7PSbKg50lqk8MyktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUoJHCPcm5Se5MsifJpbPsf12S25PcmuTTSR43/lIlSaMaGu5JVgCbgfOAdcBFSdYNNLsJmKyqXwI+Arx93IVKkkY3ypX7WcCeqtpbVfcBVwIb+htU1faq+l63ugM4bbxlSpLmY5Qbh50K7Otb3w+cfYj2FwMfn21Hko3ARoCJiQmmp6dHq1Ij8f3Ukcq+ufRGCffZ7j076+0Hk7wEmASeO9v+qtoCbAGYnJysqamp0arUSHw/daSyby69UcJ9P3B63/ppwN2DjZI8H3gT8Nyq+sF4ypMkLcQoY+47gbVJzkhyHHAhsLW/QZIzgX8Czq+qe8ZfpiRpPoaGe1XdD1wCXAvcAVxVVbuTXJ7k/K7ZXwEnAB9OcnOSrXMcTpK0BEaaiamqtgHbBrZd1rf8/DHXJUk6DH5DVZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDVopHBPcm6SO5PsSXLpLPsfnuRD3f7PJ1kz7kIlSaMbGu5JVgCbgfOAdcBFSdYNNLsYOFBVTwDeAbxt3IVKkkY3ypX7WcCeqtpbVfcBVwIbBtpsAN7bLX8EeF6SjK9MSdJ8jBLupwL7+tb3d9tmbVNV9wP3Aj8zjgIlSfN37AhtZrsCrwW0IclGYCPAxMQE09PTI5xeB61fv/6Q++f6z9L27dsXoxzppxyqf9o3l94o4b4fOL1v/TTg7jna7E9yLLAS+ObggapqC7AFYHJysqamphZQ8tGr6kE/L6Ujhv3zyDLKsMxOYG2SM5IcB1wIbB1osxV4ebf8IuA/y79pSVo2Q6/cq+r+JJcA1wIrgCuqaneSy4GZqtoK/AvwviR76F2xX7iYRUuSDm2UYRmqahuwbWDbZX3L3wcuGG9pkqSF8huqktQgw12SGmS4S1KDDHdJapDhLkkNynJ9HD3J14CvLMvJ27Qa+PpyFyHNwr45Xo+rqkcPa7Rs4a7xSjJTVZPLXYc0yL65PByWkaQGGe6S1CDDvR1blrsAaQ72zWXgmLskNcgrd0lqkOH+EJbks0P2b0ty8lLVI81HkjVJbuuWp5Jcs9w1tWSku0Jq8SVZUVU/ms9zqurZQ/a/8PCqkh6smx85VfXActeiuXnlvgS6K5QvJXlvkluTfCTJo5LcleSyJJ8BLkjy+CSfSLIryfVJntQ9fyLJx5Lc0j2e3W3/TvfnY5Jcl+TmJLcleU63/a4kq7vl13X7bkvy2r667kjy7iS7k3wyySO7fa9JcntX75XL8LbpCNLXV94J3Ai8NMnnktyY5MNJTujaPSPJZ7t++oUkJ3bPvb5re+PB/nuIcz2368s3J7kpyYlL8RqbU1U+FvkBrKE3p+w53foVwJ8AdwFv6Gv3aWBtt3w2vRmtAD4EvLZbXgGs7Ja/0/35euBNfftP7JbvovftwF8GvggcD5wA7AbO7Oq6H3ha1/4q4CXd8t3Aw7vlk5f7PfRxRPThB4Bndn3qOuD4bt+fAZcBxwF7gWd020+iNzrwKOAR3ba19Cb5OXjM27rlKeCabvk/+v6tnAAcu9yv/6H4cFhm6eyrqhu65fcDr+mWPwTQXfk8G/hw32TCD+/+/DXgZQDVG7q5d+DYO4ErkjwMuLqqbh7Y/yvAx6rqu925Pgo8h970iF/ua7+L3j84gFuBDyS5Grh6IS9YzflKVe1I8pvAOuCGrq8eB3wO+AXgq1W1E6Cqvg2Q5HjgH5I8DfgR8MQh57kB+JskHwA+WlX7F+XVNM5hmaUz+JnTg+vf7f48BvhWVT2t7/HkkQ5cdR3wq8D/0Jvu8GUDTWafer7nB33LP+Inv4f5DWAzvav+Xd3E5zq6HeyrAT7V10/XVdXF3fbZPlv9x8D/Ak8FJun9MJhTVf0l8ErgkcCOg8OTmh/Dfek8NsmzuuWLgM/07+yucr6c5ALo/dIqyVO73Z8G/qDbviLJSf3PTfI44J6qeje9+WyfPnDu64Df6sb5jwd+G7h+rkKTHAOcXlXbgTcAJ9P777EEsAM4J8kTALp+9UTgS8DPJ3lGt/3E7qJgJb0r+geAl9IbOpxTksdX1Rer6m3ADGC4L4DhvnTuAF6e5FZgFfCuWdr8HnBxklvojYtv6Lb/EbA+yRfpDZ384sDzpoCbk9wE/A7wt/07q+pG4D3AF4DPA/9cVTcdotYVwPu7890EvKOqvjXi61TjquprwCuAD3b9eQfwpKq6D/hd4O+7Pvwp4BHAO+n1/R30hmS+O+uBf+K13S/+bwH+D/j44ryStvkN1SWQZA29XxY9ZZlLkXSU8MpdkhrklbskNcgrd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktSg/wdEbKhcLyCIwQAAAABJRU5ErkJggg==\n",
|
| 766 |
+
"text/plain": [
|
| 767 |
+
"<Figure size 432x288 with 1 Axes>"
|
| 768 |
+
]
|
| 769 |
+
},
|
| 770 |
+
"metadata": {
|
| 771 |
+
"needs_background": "light"
|
| 772 |
+
},
|
| 773 |
+
"output_type": "display_data"
|
| 774 |
+
}
|
| 775 |
+
],
|
| 776 |
+
"source": [
|
| 777 |
+
"draw_plot([pres_all, recalls_all])"
|
| 778 |
+
]
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"cell_type": "code",
|
| 782 |
+
"execution_count": 16,
|
| 783 |
+
"metadata": {},
|
| 784 |
+
"outputs": [
|
| 785 |
+
{
|
| 786 |
+
"name": "stderr",
|
| 787 |
+
"output_type": "stream",
|
| 788 |
+
"text": [
|
| 789 |
+
"D:\\Anaconda\\lib\\site-packages\\matplotlib\\figure.py:448: UserWarning: Matplotlib is currently using module://ipykernel.pylab.backend_inline, which is a non-GUI backend, so cannot show the figure.\n",
|
| 790 |
+
" % get_backend())\n"
|
| 791 |
+
]
|
| 792 |
+
},
|
| 793 |
+
{
|
| 794 |
+
"data": {
|
| 795 |
+
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD8CAYAAACMwORRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAD+BJREFUeJzt3X+QXWV9x/H3hyD+gBBMY7cW0DAaq6lTxa6gUuum2hFsJ2mnYsnUXx00M52itdpaHDsY6R9WO63TH9EWW0arVkRHacpE0bHZAdFoNvySgMykEZsUp/gDEbWKyLd/3INer7u5dzd3d8OT92vmzp5znuee8703Tz45efaee1JVSJLacsxyFyBJGj/DXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktSgY5frwGvWrKm1a9cu1+El6UFpz549X6uqRw3rt2zhvnbtWmZmZpbr8JL0oJTky6P0c1pGkhpkuEtSgwx3SWqQ4S5JDTLcJalBQ8M9yaVJ7kxy8xztSfJ3SfYluSnJ08ZfpiRpPkY5c383cPYh2s8B1nWPLcA7D78sSdLhGBruVXU18I1DdNkE/Gv17AJOSvLocRUoSZq/cVzEdDJwoG/9YLftK4Mdk2yhd3bPxMQE09PTYzj80WPDhg0Let7OnTvHXImOZhs3buSee+5Z1GOsXLmS7du3L+oxWjeOcM8s22a963ZVXQJcAjA5OVlTU1NjOPzR41A3M09yyHZpXO65555FH2tJMB8Ozzg+LXMQOLVv/RTgjjHsV5K0QOMI9+3AS7tPzTwDuLuqfmpKRpK0dIZOyyT5ADAFrElyEHgT8BCAqvpHYAfwAmAf8F3g9xerWEnSaIaGe1VtHtJewB+OrSJJ0mHzClVJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUoJHCPcnZSW5Lsi/JhbO0PybJziTXJ7kpyQvGX6okaVRDwz3JCmAbcA6wHticZP1Atz8HLq+q04HzgHeMu1BJ0uhGOXM/A9hXVfur6l7gMmDTQJ8CTuyWVwF3jK9ESdJ8HTtCn5OBA33rB4EzB/psBT6R5FXA8cDzxlKdJGlBRgn3zLKtBtY3A++uqr9O8kzgvUmeXFX3/8SOki3AFoCJiQmmp6cXULLm4vuppbIUY83xfHhSNZjTAx16Yb21qp7frb8BoKre0tdnL3B2VR3o1vcDz6iqO+fa7+TkZM3MzBz+KxAASRj2ZymNw1KMNcfz3JLsqarJYf1GmXPfDaxLclqS4+j9wnT7QJ//Bp7bHfhJwMOAr86vZEnSuAwN96q6D7gAuAq4ld6nYvYmuTjJxq7b64BXJrkR+ADw8vKfXUlaNqPMuVNVO4AdA9su6lu+BThrvKVJkhbKK1QlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuB+BVq9eTZJ5PYB59V+9evUyv0pJi2mkj0Jqad11111LcgWgpHZ55i5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQX79gKR5qTedCFtXLf4xdFgMd0nzkjd/a0m++6i2Luohmue0jCQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaNFK4Jzk7yW1J9iW5cI4+L0pyS5K9Sf5tvGVKkuZj6LdCJlkBbAN+HTgI7E6yvapu6euzDngDcFZV3ZXkZxerYEnScKOcuZ8B7Kuq/VV1L3AZsGmgzyuBbVV1F0BV3TneMiVJ8zHK97mfDBzoWz8InDnQ5wkASa4FVgBbq+rjgztKsgXYAjAxMcH09PQCSj46LMV74/uvhXJ8Hvky7Ev3k5wLPL+qXtGtvwQ4o6pe1dfnSuAHwIuAU4BrgCdX1Tfn2u/k5GTNzMwc/itoUJKluRnCIh9DbXJ8Lq8ke6pqcli/UaZlDgKn9q2fAtwxS59/r6ofVNWXgNuAdaMWK0kar1HCfTewLslpSY4DzgO2D/S5AtgAkGQNvWma/eMsVJI0uqHhXlX3ARcAVwG3ApdX1d4kFyfZ2HW7Cvh6kluAncCfVtXXF6toSdKhDZ1zXyzOuc/NOU0dyRyfy2ucc+6SpAcZw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBo9xDVUus3nQibF21+MeQ1CzD/QiUN39rab4ve+uiHkLSMnJaRpIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1aKRwT3J2ktuS7Ety4SH6vTBJJZkcX4mSpPkaGu5JVgDbgHOA9cDmJOtn6bcSeDXwuXEXKUman1HO3M8A9lXV/qq6F7gM2DRLv78A3gZ8b4z1SZIWYJRwPxk40Ld+sNv2I0lOB06tqivHWJskaYFGuc1eZtn2o3vAJTkGeDvw8qE7SrYAWwAmJiaYnp4eqcij0VK8N77/WqhktlgYn5UrVzo+D1OG3aszyTOBrVX1/G79DQBV9ZZufRXwX8C3u6f8HPANYGNVzcy138nJyZqZmbP5qJZkae6husjHkMCxNm5J9lTV0A+tjDItsxtYl+S0JMcB5wHbH2isqrurak1Vra2qtcAuhgS7JGlxDQ33qroPuAC4CrgVuLyq9ia5OMnGxS5QkjR/o8y5U1U7gB0D2y6ao+/U4ZclSTocXqEqSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lq0LHLXYBml2RR9//IRz5yUfcvaXkZ7kegqpr3c5Is6HmS2uS0jCQ1yHCXpAYZ7pLUIMNdkhpkuEtSg0YK9yRnJ7ktyb4kF87S/toktyS5Kcmnkjx2/KVKkkY1NNyTrAC2AecA64HNSdYPdLsemKyqXwI+DLxt3IVKkkY3ypn7GcC+qtpfVfcClwGb+jtU1c6q+m63ugs4ZbxlSpLmY5SLmE4GDvStHwTOPET/84GPzdaQZAuwBWBiYoLp6enRqtRIfD91pHJsLr1Rwn226+BnvRQyyYuBSeA5s7VX1SXAJQCTk5M1NTU1WpUaie+njlSOzaU3SrgfBE7tWz8FuGOwU5LnAW8EnlNV3x9PeZKkhRhlzn03sC7JaUmOA84Dtvd3SHI68E/Axqq6c/xlSpLmY2i4V9V9wAXAVcCtwOVVtTfJxUk2dt3+CjgB+FCSG5Jsn2N3kqQlMNK3QlbVDmDHwLaL+pafN+a6JEmHwStUJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoMMd0lqkOEuSQ0y3CWpQYa7JDXIcJekBhnuktQgw12SGmS4S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAYZ7pLUIMNdkhpkuEtSgwx3SWqQ4S5JDTLcJalBhrskNchwl6QGGe6S1CDDXZIaZLhLUoNGCvckZye5Lcm+JBfO0v7QJB/s2j+XZO24C5UkjW5ouCdZAWwDzgHWA5uTrB/odj5wV1U9Hng78NZxFypJGt0oZ+5nAPuqan9V3QtcBmwa6LMJeE+3/GHguUkyvjIlSfMxSrifDBzoWz/YbZu1T1XdB9wN/Mw4CpQkzd+xI/SZ7Qy8FtCHJFuALQATExNMT0+PcHg9YMOGDYdsn+s/Szt37lyMcqSfcKjx6dhceqOE+0Hg1L71U4A75uhzMMmxwCrgG4M7qqpLgEsAJicna2pqagElH72qfurfS+mI4fg8sowyLbMbWJfktCTHAecB2wf6bAde1i2/EPjP8k9akpbN0DP3qrovyQXAVcAK4NKq2pvkYmCmqrYD/wK8N8k+emfs5y1m0ZKkQxtlWoaq2gHsGNh2Ud/y94Bzx1uaJGmhvEJVkhpkuEtSgwx3SWqQ4S5JDTLcJalBWa6Poyf5KvDlZTl4m9YAX1vuIqRZODbH67FV9ahhnZYt3DVeSWaqanK565AGOTaXh9MyktQgw12SGmS4t+OS5S5AmoNjcxk45y5JDfLMXZIaZLg/iCX5zJD2HUlOWqp6pPlIsjbJzd3yVJIrl7umloz0rZBafElWVNUP5/OcqnrWkPYXHF5V0k/r7o+cqrp/uWvR3DxzXwLdGcoXk7wnyU1JPpzkEUluT3JRkk8D5yZ5XJKPJ9mT5JokT+yeP5Hko0lu7B7P6rZ/u/v56CRXJ7khyc1Jnt1tvz3Jmm75tV3bzUle01fXrUnelWRvkk8keXjX9uokt3T1XrYMb5uOIH1j5R3AdcBLknw2yXVJPpTkhK7f05N8phunn0+ysnvuNV3f6x4Yv4c41nO6sXxDkuuTrFyK19icqvKxyA9gLb17yp7VrV8K/AlwO/D6vn6fAtZ1y2fSu6MVwAeB13TLK4BV3fK3u5+vA97Y176yW76d3tWBvwx8ATgeOAHYC5ze1XUf8NSu/+XAi7vlO4CHdssnLfd76OOIGMP3A8/oxtTVwPFd258BFwHHAfuBp3fbT6Q3O/AI4GHdtnX0bvLzwD5v7pangCu75f/o+7tyAnDscr/+B+PDaZmlc6Cqru2W3we8ulv+IEB35vMs4EN9NxN+aPfz14CXAlRv6ubugX3vBi5N8hDgiqq6YaD9V4CPVtV3umN9BHg2vdsjfqmv/x56f+EAbgLen+QK4IqFvGA158tVtSvJbwLrgWu7sXoc8FngF4CvVNVugKr6FkCS44F/SPJU4IfAE4Yc51rgb5K8H/hIVR1clFfTOKdlls7gZ04fWP9O9/MY4JtV9dS+x5NG2nHV1cCvAv9D73aHLx3oMvut53u+37f8Q378e5jfALbRO+vf0934XEe3B8ZqgE/2jdP1VXV+t322z1b/MfC/wFOASXr/GMypqv4SeAXwcGDXA9OTmh/Dfek8Jskzu+XNwKf7G7uznC8lORd6v7RK8pSu+VPAH3TbVyQ5sf+5SR4L3FlV76J3P9unDRz7auC3unn+44HfBq6Zq9AkxwCnVtVO4PXASfT+eywB7ALOSvJ4gG5cPQH4IvDzSZ7ebV/ZnRSsondGfz/wEnpTh3NK8riq+kJVvRWYAQz3BTDcl86twMuS3ASsBt45S5/fA85PciO9efFN3fY/AjYk+QK9qZNfHHjeFHBDkuuB3wH+tr+xqq4D3g18Hvgc8M9Vdf0hal0BvK873vXA26vqmyO+TjWuqr4KvBz4QDeedwFPrKp7gd8F/r4bw58EHga8g97Y30VvSuY7s+74x17T/eL/RuD/gI8tzitpm1eoLoEka+n9sujJy1yKpKOEZ+6S1CDP3CWpQZ65S1KDDHdJapDhLkkNMtwlqUGGuyQ1yHCXpAb9P9Yjn2/XeLGfAAAAAElFTkSuQmCC\n",
|
| 796 |
+
"text/plain": [
|
| 797 |
+
"<Figure size 432x288 with 1 Axes>"
|
| 798 |
+
]
|
| 799 |
+
},
|
| 800 |
+
"metadata": {
|
| 801 |
+
"needs_background": "light"
|
| 802 |
+
},
|
| 803 |
+
"output_type": "display_data"
|
| 804 |
+
}
|
| 805 |
+
],
|
| 806 |
+
"source": [
|
| 807 |
+
"draw_plot([pres_non_text, recalls_non_text])"
|
| 808 |
+
]
|
| 809 |
+
},
|
| 810 |
+
{
|
| 811 |
+
"cell_type": "code",
|
| 812 |
+
"execution_count": 32,
|
| 813 |
+
"metadata": {},
|
| 814 |
+
"outputs": [],
|
| 815 |
+
"source": [
|
| 816 |
+
"import matplotlib.pyplot as plt\n",
|
| 817 |
+
"import seaborn as sns\n",
|
| 818 |
+
"import pandas as pd\n",
|
| 819 |
+
"\n",
|
| 820 |
+
"pres1 = pd.DataFrame({'score_type':'Precision', 'score': pres_non_text, 'class':'Non_text'})\n",
|
| 821 |
+
"pres2 = pd.DataFrame({'score_type':'Precision', 'score': pres_all, 'class':'All_element'})\n",
|
| 822 |
+
"\n",
|
| 823 |
+
"recalls1 = pd.DataFrame({'score_type':'Recall', 'score':recalls_non_text, 'class':'Non_text'})\n",
|
| 824 |
+
"recalls2 = pd.DataFrame({'score_type':'Recall', 'score':recalls_all, 'class':'All_element'})\n",
|
| 825 |
+
"\n",
|
| 826 |
+
"f1s1 = pd.DataFrame({'score_type':'F1', 'score':f1_non_text, 'class':'Non_text'})\n",
|
| 827 |
+
"f1s2 = pd.DataFrame({'score_type':'F1', 'score':f1_all, 'class':'All_element'})\n",
|
| 828 |
+
"\n",
|
| 829 |
+
"data=pd.concat([pres1, pres2, recalls1, recalls2, f1s1, f1s2])"
|
| 830 |
+
]
|
| 831 |
+
},
|
| 832 |
+
{
|
| 833 |
+
"cell_type": "code",
|
| 834 |
+
"execution_count": 36,
|
| 835 |
+
"metadata": {},
|
| 836 |
+
"outputs": [
|
| 837 |
+
{
|
| 838 |
+
"data": {
|
| 839 |
+
"text/plain": [
|
| 840 |
+
"<matplotlib.axes._subplots.AxesSubplot at 0x179cafcdac8>"
|
| 841 |
+
]
|
| 842 |
+
},
|
| 843 |
+
"execution_count": 36,
|
| 844 |
+
"metadata": {},
|
| 845 |
+
"output_type": "execute_result"
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"data": {
|
| 849 |
+
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAEKCAYAAAD9xUlFAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzt3Xl8VNXdx/HPL4EkCAiytCprrKBQJAFZIorWgop1RwX3gj7leSwWl0pr61KL1dKK+hK0bg+1WnEBi4pKCz5a64JgEMPuggoSsYqgVNYk5Pf8MZPrJGaZTHJnsnzfr1denXvumXt+M7fym3PvPeeYuyMiIgKQluoARESk4VBSEBGRgJKCiIgElBRERCSgpCAiIgElBRERCSgpiIhIQElBREQCSgoiIhJokeoAaqtTp07es2fPVIchItKovPXWW1+4e+ea6jW6pNCzZ0+WLl2a6jBERBoVM9sQTz1dPhIRkYCSgoiIBJQUREQk0OjuKYhI41BcXExhYSG7d+9OdSjNSlZWFl27dqVly5YJvV9JQURCUVhYSNu2benZsydmlupwmgV3Z8uWLRQWFpKdnZ3QMUK7fGRmfzazz81sVRX7zcymm9k6M1thZgPDikVEkm/37t107NhRCSGJzIyOHTvWqXcW5j2FvwCjqtl/ItAr+jcBuCfEWEQkBZQQkq+u33loScHdXwG2VlPlNOBhj1gMtDezA8KKR0REapbKewpdgI0x24XRsk8rVjSzCUR6E3Tv3j0pwSXimXdWVFp+2qH9Q223uPgjAFq2TOwaYnP1yIo3a/2eC/oPSbi93bsX1fo9WVnDEm6vudm2e1e57XZZraqtf+ONN9KmTRuuvvrqhNssLd0DQFpaZsLHaGhS+UhqZX0cr6yiu9/v7oPcfVDnzjWO0hYRkQSlMikUAt1itrsCm1IUi4g0cQ8//DD9+/cnJyeHCy+8sNy+Bx54gMGDB5OTk8OZZ57Jzp07AZgzZw79+vUjJyeHo48+GoDVq1czZMgQcnNzyc0dxPvvr0v6ZwlTKpPCPOCi6FNIecA2d//WpSMRkbpavXo1N998My+99BLLly/nzjvvLLd/9OjR5Ofns3z5cvr06cPMmTMBmDJlCgsWLGD58uXMmzcPgHvvvZfLL7+cgoIC3nxzEV27dkn65wlTaPcUzOwx4AdAJzMrBH4DtARw93uB+cCPgHXATmB8WLGISPP20ksvcdZZZ9GpUycAOnToUG7/qlWruO666/jqq6/Yvn07J5xwAgBHHnkk48aNY8yYMYwePRqAI444gptvvpnCwkJOP/1kevU6OLkfJmShJQV3P7eG/Q5MDKt9EZEy7l7to5rjxo3j6aefJicnh7/85S+8/PLLQKRXsGTJEp5//nlyc3MpKCjgvPPOY+jQoTz//POceOLJ3H//PYwcWd3T942L5j4SkSZvxIgRzJ49my1btgCwdWv5p+W//vprDjjgAIqLi5k1a1ZQ/sEHHzB06FCmTJlCp06d2LhxIx9++CEHHXQQkyZN4pRTTmblykrH5zZamuZCRJq873//+1x77bUcc8wxpKenM2DAAGIX67rpppsYOnQoPXr04LDDDuPrr78GYPLkybz//vu4OyNGjCAnJ4epU6fyyCOP0LJlS7773e9w/fW/TtGnCodFruI0HoMGDfKGushOKsYpFBWtKbedkdE3tLaaGo1TCNfatWvp06dP0tqr7TiF+tBQxylU9t2b2VvuPqim9+rykYiIBJQUREQSUFq6CygFSqOvmwYlBRERCSgpiIhIQElBREQCSgoiIhLQOAURSYo5857hyx3b6+14+7Vuw9mnngbAl7t2fmt/Wdl+rfaptzabAyUFkXq0a9eLSTteq1Yj6rWtsH25Yzt7D+5Wc8V4j7duY411OuzTmquuuorbbrsNgGnTprF9+3ZuvPHGeolh/foNLFq0mPPOG5vQ+wsKCti0aRM/+tGP6iWe+qDLRyLSZGVmZjJ37ly++OKLUI6/fv0GHnvsiYTfX1BQwPz58+sxorpTUhCRJqtFixZMmDCBO+6441v7NmzYwIgRI+jfvz8jRozg448/BiKT402aNIlhw4Zx0EEH8eSTT1Z5/F//+npee20RAwcO5Y477mDv3r1MnjyZwYMH079/f+677z4AnnrqKUaOHIm78+mnn9K7d28+/vhjbrjhBp544glyc3N54onEk0t9UlIQkSZt4sSJzJo1i23btpUrv+yyy7joootYsWIF559/PpMmTQr2ffrpp7z22ms899xzXHPNNVUe+5ZbbuKoo4axbNkSrrzySmbOnEm7du3Iz88nPz+fBx54gI8++ogzzjiD/fffn7vvvpuf/OQn/Pa3v6V79+5MmTKFsWPHUlBQwNixiV2Cqm+6pyAiTdq+++7LRRddxPTp02nV6pv5kN544w3mzp0LwIUXXsgvfvGLYN/pp59OWloaffv25bPPPvvWMUtLd5S9Cv63tHQHCxcuZMWKFUHvYtu2bbz//vtkZ2czY8YM+vXrR15eHueeW+3KAimlpNBI7dmzrMryzMyBSY5GpGG74oorGDhwIOPHV72WV+x6C5mZ30xwV5tJQ92dGTNmBIv0xPrkk09IS0vjs88+o7S0lLS0hnmhpmFGJSJNzn6t25C+bmO9/e3Xuk3cbXfo0IExY8YEy2wCDBs2jMcffxyAWbNmcdRRR9X6M7Vt25bt278Otk844QTuueceiouLAXjvvffYsWMHJSUljB8/nkcffZQ+ffpw++23B+8vm6a7oVBPQUSSomxMQar8/Oc/56677gq2p0+fzsUXX8ytt95K586defDBB2t9zP79+9GiRQsGDMjjxz++gCuumMz69esZOHAg7k7nzp15+umnue222xg+fDjDhw8nNzeXwYMHc9JJJ3HssccydepUcnNz+dWvftUg7itoPYV6lMz1FKq6fATo8lGMe/JfSVpblw4+ut7HKVSnoY9TSOZ6CpUNXisTxuC1b+4plJeW1rre20qE1lMQEZF6octHIiI1WLlyJRdeeGFMSSmZmZm88cbLqQopNEoKIiI1OOywwygoKAi2q7p81BQoKdRBvGv8VqxXl3V+RUTCpHsKIiISUFIQEZGALh+JSFL84x9PU1RUf+spZGS0YdSo0+vteBKhpCAiSVFUtJ3jjjug3o73wgufxlXvqaeeYvTo0axdu5ZDDz2U9evXc/LJJ7Nq1Spefvllpk2bxnPPPVertl9++RVuu206zz5b9QyqYVm/fj2LFi3ivPPOC+X4unwkIk3aY489xlFHHRVMadHYrV+/nkcffTS04yspiEiTtX37dl5//XVmzpyZUFLYsWMHF198MYMHH86AATk89dTjlJaWXQJzwNmxYzuXXPI/DB16dLk6f/7zvZx++umccsopZGdnc9ddd3H77bczYMAA8vLy2Lp1KwAffPABo0aN4vDDD2f48OG88847QNXrOlxzzTW8+uqr5ObmVrpORF0pKYhIkzX/2WcZNWoUvXv3pkOHDixbVvX0MJW5+eab+eEPf8iSJf/ixRef55e/vI4dO8qPUbjllls59thjKq2zatUqHn30Ud58802uvfZa9tlnH95++22OOOIIHn74YQAmTJjAjBkzeOutt5g2bRo//elPg2NXtq7D1KlTGT58OAUFBVx55ZV1+XoqFeo9BTMbBdwJpAP/6+5TK+zvDjwEtI/WucbdG9badCLSIBWX7uWLndXfuH5y9mz+e+JEvti5nZNHn8Fjjz3GxIkT425j4cKFzJs3j2nT/gjA7t17+Pjj8mtDv/DCSzz77Hxuv336t+oce+yxtG3blrZt29KuXTtOOeUUIDIYbsWKFWzfvp1FixZx9tlnB8fbs2dP8LqmdR3CEFpSMLN04G7gOKAQyDezee6+JqbadcBsd7/HzPoC84GeYcUkIs3H1i1beO1fr/DOmrWYGXv37iU9La3cL/GauDt/+9vf6NWrS7nyzz7bXK7OnDmPcMghvcvVWbJkabl1GdLS0oLttLQ0SkpKKC0tpX379uVGS8dKdF2HugizpzAEWOfuHwKY2ePAaUBsUnBg3+jrdsCmEONp9HbvXpRQvaysYWGEI1IrGRlt4n5iqCbFpXtJb1H9jKTPPv0MY847l9tmTA/KzjzxJAoLC+Nu54QTTmDGjBnceefvMTPefns5AwbklKtz/PEjuOuu+5g+fVqVdaqy7777kp2dzZw5czj77LNxd1asWEFOTtXvD3sNhjCTQhcgtp9VCAytUOdGYKGZ/QxoDYys7EBmNgGYANC9e/d6D1REwlefYwpqumwEMHfOHCZddVW5sjPPPJNbbrkl7nauv/56rrjiCnJz83B3evTo/q3HUK+77pdceeUvq61TnVmzZnHppZfyu9/9juLiYs4555xqk0L//v1p0aIFOTk5jBs3rt7vK4S2noKZnQ2c4O7/Fd2+EBji7j+LqXNVNIbbzOwIYCbQz91LKz0oDWs9hXjnPqoo0bmP4u0pVNScewpaTyF1wlxPIZ6kUJlO+8S/Wlusb544qp20tMTaq6uGup5CIdAtZrsr3748dAkwG8Dd3wCygE4hxiQiItUIMynkA73MLNvMMoBzgHkV6nwMjAAwsz5EksJmRESS6MEHHyQ3N7fcX22eUmpKQrun4O4lZnYZsIDI46Z/dvfVZjYFWOru84CfAw+Y2ZVEbjqP88a2PqiIVMndMbNUh1Gj8ePHM378+FSHUS/q+k9oqOMUomMO5lcouyHm9RrgyDBjEJHUyMrKYsuWLXTs2LFRJIamwN3ZsmULWVlZCR9DE+KJSCi6du1KYWEhmzfX/xXh7UV7aq5Uic0ZmTVXqoR7Yu2ZJdZeXWRlZdG1a9eE36+kICKhaNmyJdnZ2aEc++HlSxJ630V9chN6X+JP/iXWXipp7iMREQkoKYiISEBJQUREAkoKIiIS0I1mkXqyePFi/v3v5E3Bsv/+rcnLy0tae9I8qKcgIiIB9RRE6kleXh67du2ouWI9adVKvQSpf0oKIiIQygy31R2zoc5yq8tHIiISUFIQEZGAkoKIiASUFEREJKCkICIiASUFEREJKCmIiEhASUFERAIavNbAJHMATUMdPCMiqaOegoiIBNRTqME9+a8k9Zjj+tV7cyKNkv7bSw31FEREJKCkICIiASUFEREJKCmIiEhASUFERAJKCiIiEtAjqdJkLV68mHfz85PX3t4McnKS1pxIKNRTEBGRgHoK0mTl5eXxdnpR8tobnBfKNCUiyRRqT8HMRpnZu2a2zsyuqaLOGDNbY2arzezRMOMREZHqhdZTMLN04G7gOKAQyDezee6+JqZOL+BXwJHu/qWZfSeseEREpGZh9hSGAOvc/UN3LwIeB06rUOcnwN3u/iWAu38eYjwiIlKDMJNCF2BjzHZhtCxWb6C3mb1uZovNbFRlBzKzCWa21MyWbt68OaRwRUQkzKRglZR5he0WQC/gB8C5wP+aWftvvcn9fncf5O6DOnfuXO+BiohIRJhJoRDoFrPdFdhUSZ1n3L3Y3T8C3iWSJEREJAXCTAr5QC8zyzazDOAcYF6FOk8DxwKYWScil5M+DDEmERGpRmhJwd1LgMuABcBaYLa7rzazKWZ2arTaAmCLma0B/glMdvctYcUkIiLVC3XwmrvPB+ZXKLsh5rUDV0X/REQkxTTNhYiIBJQUREQkoKQgIiIBTYgnIs3exjXvsOCTL5Pa5v77tyYvLy+pbcYj7p6CmR1lZuOjrzubWXZ4YYmISCrE1VMws98Ag4BDgAeBlsAjwJHhhSYikhzd+h7KCf2Kk9pmq1YNr5cA8fcUzgBOBXYAuPsmoG1YQYmISGrEmxSKomMKHMDMWocXkoiIpEq8SWG2md0HtDeznwD/BzwQXlgiIpIKcd1TcPdpZnYc8B8i9xVucPcXQo1MRESSrsakEF1BbYG7jwSUCEREmrAaLx+5+15gp5m1S0I8IiKSQvEOXtsNrDSzF4g+gQTg7pNCiUpERFIi3qTwfPRPQpTsUZUNdUSliKROvDeaH4oulNM7WvSuuyd3pIdII9Cq1Ygq9+3evajWx8vKGlaXcERqLd4RzT8AHgLWE1l7uZuZ/djdXwkvtOYn2aMqG+qIShFJnXgvH90GHO/u7wKYWW/gMeDwsAITEZHki3fwWsuyhADg7u8Rmf9IRESakHh7CkvNbCbw1+j2+cBb4YQkIs3d4sWLeTc/P6lt5u9qxeDBhyS1zYYo3qRwKTARmETknsIrwJ/CCkpERFIj3qTQArjT3W+HYJRzZmhRiUizlpeXx9vpRUltc3CSp85uqOK9p/Ai0CpmuxWRSfFERKQJiTcpZLn79rKN6Ot9wglJRERSJd6ksMPMBpZtmNkgYFc4IYmISKrEe0/hcmCOmW0istDOgcDY0KISEZGUiDcpZAMDgO5ElubMI7oKm4iINB3xXj663t3/A7QHjgPuB+4JLSoREUmJeJPC3uj/ngTc6+7PABnhhCQiIqkSb1L4JLpG8xhgvpll1uK9IiLSSMT7D/sYYAEwyt2/AjoAk0OLSkREUiKupODuO919rru/H93+1N0X1vQ+MxtlZu+a2Tozu6aaemeZmUcfdRURkRQJ7RJQdCqMu4ETgb7AuWbWt5J6bYnMqbQkrFhERCQ+8T6SmoghwDp3/xDAzB4HTgPWVKh3E/BH4OoQY0mIZmoUkeYmzJvFXYCNMduF0bKAmQ0Aurn7c9UdyMwmmNlSM1u6efPm+o9URESAcHsKVklZMODNzNKAO4BxNR3I3e8nMjaCQYMGJW3QnGZqFJHmJsyeQiHQLWa7K7ApZrst0A942czWExklPU83m0VEUifMpJAP9DKzbDPLAM4B5pXtdPdt7t7J3Xu6e09gMXCquy8NMSYREalGaEnB3UuAy4iMb1gLzHb31WY2xcxODatdERFJXJj3FHD3+cD8CmU3VFH3B2HGIs3TpYOPrnLfIyverPXxLug/pC7hiDR4mqpCREQCSgoiIhJQUhARkUCo9xRERBqLVq1GVLlv9+5FCR0zK2tYouGkjHoKIiISUFIQEZGAkoKIiAR0T0EkSWKvL+/Zs6zKepmZA5MRjkil1FMQEZGAkoKIiASUFEREJKCkICIiASUFEREJKCmIiEhASUFERAJKCiIiElBSEBGRgJKCiIgElBRERCSguY9EpEGqbn3th5cvSeiYF+UMTTScZkNJoYHRQh8ikkq6fCQiIgElBRERCSgpiIhIQElBREQCSgoiIhJQUhARkYCSgoiIBJQUREQkoMFrIiI1qDgAdM+eZZXWy8wcmIxwQhVqT8HMRpnZu2a2zsyuqWT/VWa2xsxWmNmLZtYjzHhERKR6ofUUzCwduBs4DigE8s1snruvian2NjDI3Xea2aXAH4GxYcUk0lCU/aIsKvrmP4eMjL6pCkckEGZPYQiwzt0/dPci4HHgtNgK7v5Pd98Z3VwMdA0xHhERqUGY9xS6ABtjtguB6qYovAT4e2U7zGwCMAGge/fu9RWfNHMX9B8SvH7mnRVV1jvt0P7JCEekQQizp2CVlHmlFc0uAAYBt1a2393vd/dB7j6oc+fO9RiiiIjECrOnUAh0i9nuCmyqWMnMRgLXAse4+54Q4xERkRqE2VPIB3qZWbaZZQDnAPNiK5jZAOA+4FR3/zzEWEREJA6hJQV3LwEuAxYAa4HZ7r7azKaY2anRarcCbYA5ZlZgZvOqOJyIiCRBqIPX3H0+ML9C2Q0xr0eG2b6IiNSORjTXoLp1Yh9Z8WZCx4x96kVEpCFRUhCRRueinPJPt89dW1Bl3dF9csMOp0nRhHgiIhJQUhARkYCSgoiIBHRPoRGJnb63qql7oWlM3ysiqaGegohILWVmDsQsq9xfU/kxpqQgIiIBJQUREQkoKYiISEBJQUREAkoKIiIS0COpItLoje6Ty8IP1pYrO/57fVIUTeOmnoKIiASUFEREJKCkIJJCGRl9MWtFRkbfVIcitVR27pra+VNSEBGRgJKCiIgE9PRRI1U2z0pR0Zpy5U2pGysiyaeegog0Ccd/rw+tMzJpnZGpx1HrQD2FOqi41vIz76yotN5ph/ZPRjgiInWmpCAikqCWLbNTHUK90+UjEWkyjux2EG0yMlMdRqOmpCAiIgElBRGROigp2ZTqEOqV7imISJOS890uSWtrz54VlJZuZu/eLWRmHpa0dsOknkIj11SH2oskasNXW5LSTklJEaWlmwEoLf2ckpKipLQbNiUFEWkyXtuwjvnvr+b1DetCb6u4eHG1242VkoKINAlFe/ey8vPI9f0Vn2+iaO/e0NoqKfk3UFyhtJiSks9CazNZlBREpEmYu3pZue2n1iyrombdFRe/V0X5u6G1mSyhJgUzG2Vm75rZOjO7ppL9mWb2RHT/EjPrGWY8ItI0FW77ki/37CpXtnX3Lj75z5ehtGfWsVbljUloScHM0oG7gROBvsC5ZlbxTuglwJfufjBwB/CHsOIRkabrtY8/qLT81Q2Vl9dVZubBtSpvTMLsKQwB1rn7h+5eBDwOnFahzmnAQ9HXTwIjzMxCjElEmqDhPb5Xq/K6MsvELLtCWTZmjX80dZhJoQuwMWa7MFpWaR13LwG2AY2//yUiSdVl3/3YL7NVubIOWa3osu9+obWZmdmTb/4JTYtuN35hDl6r7Be/J1AHM5sATADo3r173SMLSapmQ22Kk3IlWypnstX5qx+jvz+QmcteD7bP6Dsw1PbM0sjIyKWoaBkZGQMwaxrP7YT5KQqBbjHbXYGK48GDOmbWAmgHbK14IHe/390Hufugzp07hxSuiDRmGenpHPadAwHo/50DyUhPD73N9PT9yMo6kvT09qG3lSxh9hTygV4WufD2CXAOcF6FOvOAHwNvAGcBL7n7t3oKIiLxOKrHwXRrtx892ifvKrRZVtLaSobQkoK7l5jZZcACIB34s7uvNrMpwFJ3nwfMBP5qZuuI9BDOCSseEWkekpkQmqJQJ8Rz9/nA/AplN8S83g2cHWYMIiISv6ZxZ0REROqFkoKIiASUFEREJKCkICIiASUFEREJKCmIiEjAGttYMTPbDGxIdRwh6gR8keogJCE6d41bUz9/Pdy9xikhGl1SaOrMbKm7D0p1HFJ7OneNm85fhC4fiYhIQElBREQCSgoNz/2pDkASpnPXuOn8oXsKIiISQz0FEREJKCmIiEhASSFBZrbXzArMbJWZzTGzferhmIPMbHo1+w80syfr2o5Ur8K5fdbM6nVZLTMbZ2Z3RV/faGZX1+fxpWYx57jsr6eZdTSzf5rZ9rLz0xwpKSRul7vnuns/oAj4n9idFlGr79fdl7r7pGr2b3L3sxILV2oh9txuBSamOiCpd2XnuOxvPbAbuB5o1klaSaF+vAocHP21sdbM/gQsA7qZ2fFm9oaZLYv2KNoAmNlgM1tkZsvN7E0za2tmPzCz56L7j4n5FfN2dH9PM1sV3Z9lZg+a2cro/mOj5ePMbK6Z/cPM3jezP6boO2kq3gC6lG2Y2WQzyzezFWb225jyi6Jly83sr9GyU8xsSfT8/J+ZfTcF8Uuc3H2Hu79GJDk0W0oKdWRmLYATgZXRokOAh919ALADuA4Y6e4DgaXAVWaWATwBXO7uOcBIYFeFQ18NTHT3XGB4JfsnArj7YcC5wEP2zWKxucBY4DBgrJl1q6/P25yYWTowgsha4pjZ8UAvYAiR7/hwMzvazL4PXAv8MHo+L48e4jUgL/r/hceBXyT5I0jVWsX86Hoq1cE0JKEux9nEtTKzgujrV4msN30gsMHdF0fL84C+wOtmBpBB5JfnIcCn7p4P4O7/AYjWKfM6cLuZzQLmunthhf1HATOi73/HzDYAvaP7XnT3bdFjrgF6ABvr6XM3B2XntifwFvBCtPz46N/b0e02RJJEDvCku38B4O5bo/u7Ak+Y2QFEzv1HSYle4rEr+oNLKlBPIXGx1yR/5u5F0fIdMXUMeCGmXl93vyRaXu0AEXefCvwX0ApYbGaHVqhi335XYE/M670o+ddW2T8YPYj8Y152T8GA38ecz4PdfSZVn88ZwF3R3tx/A1mV1BFpUJQUwrUYONLMDgYws33MrDfwDnCgmQ2OlreNXoYKmNn33H2lu/+ByGWniknhFeD8aN3eQHfg3VA/TTMT7W1NAq42s5bAAuDimPtCXczsO8CLwBgz6xgt7xA9RDvgk+jrHyc1eJEE6RdkiNx9s5mNAx4zs8xo8XXu/p6ZjQVmmFkrIvcLRlZ4+xXRm8d7gTXA34EDYvb/CbjXzFYCJcA4d99T4RKT1JG7v21my4Fz3P2vZtYHeCP6PW8HLnD31WZ2M/AvM9tL5PLSOOBGYI6ZfULkB0J2Kj6DxM/M1gP7AhlmdjpwvLuvSW1UyaVpLkREJKDLRyIiElBSEBGRgJKCiIgElBRERCSgpCAiIgElBZEQmVl7M/tpquMQiZeSgkgtVBxkGIf2gJKCNBoavCZNnpm1BmYTmYsoHbgJ+BC4E2hNZFqQEUAxcA8wiMiAwKvc/Z/RAYgnEZmmojXwQzObDIwBMoGn3P03VTQ/FfhedC6lF4D9icyT9Ew0tllEJkfsAJwRPV428Ki7/zZa5wIiI6szgCXAT919b718OSIVKClIczAK2OTuJwGYWTsio47Hunu+me1LZFT55RCZeTY619TC6BQiAEcA/d19a4XZUg2YZ2ZHu/srlbR9DdCvbPI1MzsGuBJ4JhrHMCJTYFwQPV4/YCeQb2bPE5lLayxwpLsXR6dlPx94uD6/IJEySgrSHKwEppnZH4DngK+ofJba6maefSFm9tOqZkutLCmU4+7/MrO7o3MmjQb+5u4l0WkzXnD3LdFY5hKZCbcEOJxIkoDIBImfJ/pFiNRESUGavOhcU4cDPwJ+Dyyk8llNq5s4quLst7939/sSDOmvRH7tnwNcHBtqhXoebeshd/9Vgm2J1IpuNEuTZ2YHAjvd/RFgGpF1LiqbpTbemWermi21Ml8DbSuU/QW4AsDdV8eUH2dmHaKTJJ5OZE2NF4Gzyo4f3d+jNp9fpDbUU5Dm4DDgVjMrJXIz+VIiv8ArzlIb18yz7r6wstlSqeSyjrtvMbPXo8uo/t3dJ7v7Z2a2Fni6QvURy0twAAAAgElEQVTXiPQiDiZyo3kpgJldR+T+Rlo0/onAhjp/KyKV0CypIklmZvsQuc8xMGaFvHHAIHe/LJWxiejykUgSmdlIIosszShLCCINiXoKIvUguurai5XsGlH2RJFIY6CkICIiAV0+EhGRgJKCiIgElBRERCSgpCAiIgElBRERCfw/wQEZp/wRE5kAAAAASUVORK5CYII=\n",
|
| 850 |
+
"text/plain": [
|
| 851 |
+
"<Figure size 432x288 with 1 Axes>"
|
| 852 |
+
]
|
| 853 |
+
},
|
| 854 |
+
"metadata": {
|
| 855 |
+
"needs_background": "light"
|
| 856 |
+
},
|
| 857 |
+
"output_type": "display_data"
|
| 858 |
+
}
|
| 859 |
+
],
|
| 860 |
+
"source": [
|
| 861 |
+
"sns.boxenplot(x='score_type', y='score', hue='class', data=data, width=0.5, linewidth=1.0, palette=\"Set3\")"
|
| 862 |
+
]
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"cell_type": "code",
|
| 866 |
+
"execution_count": 74,
|
| 867 |
+
"metadata": {},
|
| 868 |
+
"outputs": [
|
| 869 |
+
{
|
| 870 |
+
"data": {
|
| 871 |
+
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAYEAAAEBCAYAAACe6Rn8AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAF4NJREFUeJzt3XuQXGWdxvHvQ7gZFJ0IDIsmGWKyQFBh3UFThauT6AIumkBlBUtFECSwCBIUd6HQcIsoBQIrFJqwrCBQLiugyQpiEDIoSJSAgAly3UwSFpRLAiFcwu23f5zTVNvpTJ+e7p6emff5VHV1+vR5+/y630w/fd5zU0RgZmZp2qzdBZiZWfs4BMzMEuYQMDNLmEPAzCxhDgEzs4Q5BMzMEuYQMDNLmEPAzCxhDgEzs4Rt3u4Catluu+2iq6ur3WWYmQ0rd91119MRsX2t+YZ8CHR1dbF06dJ2l2FmNqxIWllkPg8HmZklzCFgZpYwh4CZWcIKhYCkd0u6UNIdkl6UFJK6CrbdTNLJkvokvSzpXkkzGynazMyao+iawETgIGAt8Js6l3EmcBpwEfAJYAnwE0n/VOfrmJlZkxXdO+jXEdEJIOlLwD5FGknaATgR+E5EnJtPXixpIvAd4IY66zUzsyYqtCYQEW8M8PX3BbYErqyYfiXwPkk7D/B1zcysCVq9YXh3YAPwSMX05fn95BYv38zM+tHqg8XGAM/GxhcyXlP2/EYkzQJmAXR2dtLb29uyAltp6tSpTXmdxYsXN+V1rD7N6D/3XXv4b6+4VoeAgGpXsld/jSJiPjAfoLu7O3p6eppf2SDYOPs2JqnQfDb4avWL+27o8t9eca0eDloDdEiq/NLvKHvezMzapNUhsBzYCnhPxfTStoD7W7x8MzPrR6tD4EbgFeBzFdM/DyyLiBUtXr6ZmfWj8DYBSf+c//Pv8/tPSHoKeCoibs3neQ24PCKOAIiIJyWdD5ws6XngbuBgYBowo0nvwczMBqieDcM/qXh8cX5/K9CT/3tUfit3CrAeOB7YEXgQOCgi/qeuSs3MrOkKh0BE9LtHz6bmiYjXgbn5zczMhhCfRdTMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOAUvSuPHjkdTQDWj4NcaNH9/mT8JS1+qLypgNSatXreLaBx5vdxnM3HWndpdgifOagJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSWsUAhIGivpGknPSVon6TpJ4wq2HSfpckmrJL0o6SFJcyVt01jpZmbWqJoXlZE0GrgF2AAcCgQwF1gs6f0R8UI/bbcBfgVsAXwTWAXsBZwOTAIObvQNmJnZwBW5stiRwARgl4h4BEDSfcDDwFHAef203Zvsy37fiFiUT1ssaQxwoqTREfHigKs3M7OGFBkOmg4sKQUAQESsAG4HZtRou2V+v65i+rP5slWwTjMza4EiIbA7sKzK9OXA5Bptf0W2xnC2pMmS3ippGnA88IP+hpLMzKz1ioTAGGBtlelrgI7+GkbEy8CH8+UsB54HbgZ+DhxbV6VmZtZ0RbYJQLYxuFLNoRxJWwNXAzsAh5BtGP4gMAd4DfiXTbSbBcwC6OzspLe3t2CZw9NIf3/WP/d/+/izB0VU+34vm0H6C/CziDiqYvrFwKcjYvt+2n4ZuAiYGBGPlk0/EpgP7BkR9/a3/O7u7li6dGnNNzJcSaJWH1jzSeLaBx5vdxnM3HUn93+bjPS/PUl3RUR3rfmKDActJ9suUGkycH+Ntu8D1pYHQO73+f1uBZZvZmYtUiQEFgJTJE0oTZDURbb758Iabf8MdEiaWDH9Q/n9/xUr08zMWqFICFwC9AELJM2QNB1YAKwG5pVmkjRe0muS5pS1vYxsY/ANkg6VNFXS14FzgbvIdjM1M7M2qRkC+W6c04CHgCuAq4AVwLSIWF82q4BR5a8ZEX3AFOAesqOMbyA7+Gw+8I8R8UZT3oWZmQ1Iob2DImIVMLPGPH1U2WMoIu4HDhpIcWZm1lo+i6iZWcIcAmZmCXMImJklzCFgZpYwh4CZWcIcAmZmCSt6AjmzESVO3RZ+vGu7y8jqMGsjh4AlSaevGzonkDut3VVYyjwcZGbDztiusUhq6AY01H5s19g2fwrN4TUBMxt2Hlv5GBesuaCtNcweM7uty28WrwmYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCDegaN77tB6x0jRvf5k/BzIYzHyzWgJWrVxG9d7a1BvXs1dblm9nw5jUBM7OEOQTMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBLmEDAzS5hDwMwsYYVCQNJYSddIek7SOknXSRpXdCGSdpP0E0lPS3pJ0oOSjh942WZm1gw1jxiWNBq4BdgAHAoEMBdYLOn9EfFCjfbdefte4EvAc8Ak4K0NVW5mZg0rctqII4EJwC4R8QiApPuAh4GjgPM21VDSZsDlwM0RcWDZU4sHXLGZmTVNkeGg6cCSUgAARMQK4HZgRo22PcBk+gkKMzNrnyIhsDuwrMr05WRf8P35cH6/taQlkl6V9KSk70l6Sz2FmplZ8xUZDhoDrK0yfQ3QUaPtTvn91cBFwElAN3AGMBY4sFojSbOAWQCdnZ309vYWKDNd/nyGN/ff8DUS+q7oqaSjyjQVaFda07gyIubk/+6VNAr4jqTJEXH/RguLmA/MB+ju7o6enp6CZabJn8/w5v4bvkZC3xUZDlpLtjZQqYPqawjlnsnvb6qYvii/37PA8s3MrEWKhMBysu0ClSYDG/2Kr9IWNl6TKK1FvFFg+WZm1iJFQmAhMEXShNIESV3A3vlz/fkF2fEF+1VM3ze/X1qoSjMza4kiIXAJ0AcskDRD0nRgAbAamFeaSdJ4Sa9JKo39ExHPAN8GjpZ0lqSPSzoJmANcXr7bqZmZDb6aG4Yj4gVJ04DzgSvIhnJuBmZHxPqyWQWMYuNgOQN4HjgGOBF4AjgHOLPh6s3MrCGF9g6KiFXAzBrz9FFlj6GICLKDxXzAmJnZEOOziJqZJcwhYGaWsKIHi5mZDRlx6rbw73Nqz9hCx5+6bVuX3ywOATMbdnT6Oi5Yc0Fba5g9ZjZxWltLaAoPB5mZJcwhYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCHAJmZgnzwWINiFO3hcUfa38NVrex48Yxc9edas84CHWYtZNDoAE6fR3Re2d7a+jZa0QctTjYVq1c2fBrSCI7Sa7Z8OXhIDOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGGFQkDSWEnXSHpO0jpJ10mq+/SHkk6WFJJuq79UMzNrtpohIGk0cAuwK3AocAgwCVgsaZuiC5I0ATgFeHJgpZqZWbMVOZX0kcAEYJeIeARA0n3Aw8BRwHkFl/V94Cpgl4LLNTOzFisyHDQdWFIKAICIWAHcDswoshBJnwU+AJw8kCLNzKw1ioTA7sCyKtOXA5NrNZbUAZwP/GtErKmvPDMza6UiITAGWFtl+hqgo0D7c4CHgMuKl2VmZoOh6Nh8tWvoqVYjSf8AfAH4QNRxHT5Js4BZAJ2dnfT29hZtmiR/Pu3jzz5tI6H/i4TAWrK1gUodVF9DKDcPuBR4TNI7ypY5Kn/8UkRsqGwUEfOB+QDd3d3R09NToMx0+fNpH3/2aRsJ/V8kBJaTbReoNBm4v0bb3fLb0VWeWwucAFxQoAYzM2uBIiGwEDhX0oSI+F8ASV3A3sBJNdpOrTLtAmAUcBzwSJXnzcxskBQJgUuAY4EFkr5Btn3gTGA12XAPAJLGA48CZ0TEGQAR0Vv5YpKeBTav9pyZmQ2umnsHRcQLwDSyPXyuIDvgawUwLSLWl80qsl/4Ph+RmdkwUWjvoIhYBcysMU8fBfYYioieIss0M7PW8692M7OEOQTMzBLmE7mZ2bDz7vHvZvaY2W2vYSRwCJjZsLO6b3XDryGJOk5kMGJ5OMjMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhPlU0g0YP3Yc6tmr7TWYmQ2UQ6ABfatWNvwaPqe5mbWTh4PMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBLmEDAzS5hDwMwsYYVCQNJYSddIek7SOknXSap5lJKkbknzJT0g6UVJqyRdJWnnxks3M7NG1QwBSaOBW4BdgUOBQ4BJwGJJ29Ro/hlgd+B7wCeAk4APAEsljW2gbjMza4IiRwwfCUwAdomIRwAk3Qc8DBwFnNdP27Mj4qnyCZJuB1bkrztnIEWbmVlzFBkOmg4sKQUAQESsAG4HZvTXsDIA8mkrgaeAd9VXqpmZNVuRENgdWFZl+nJgcr0LlLQbsAPwp3rbmplZcxUZDhoDrK0yfQ3QUc/CJG0O/IBsTeDSfuabBcwC6OzspLe3t57FDDsj/f2NZO674c39B6p1BktJrwDfjYiTK6Z/C/i3iCh8JlJJPwCOAPaPiEVF2nR3d8fSpUuLLmLY8VlEhy/33fA20vtP0l0R0V1rviJf4GvJ1gYqdVB9DWFTBX2b7Nf9oUUDwMzMWqtICCwn2y5QaTJwf5GFSDqFbPfQr0TEFcXLMzOzViqyYXghMEXShNIESV3A3vlz/ZL0FWAucEpEXDiwMs3MrBWKhMAlQB+wQNIMSdOBBcBqYF5pJknjJb0maU7ZtM8AFwA3ArdImlJ2q3vPIjMza66aw0ER8YKkacD5wBWAgJuB2RGxvmxWAaP462DZL5++X34rdyvQM+DKzcysYYX27ImIVcDMGvP0kX3hl087DDhsYKWZmVmr+SyiZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSXMIWBmljCHgJlZwhwCZmYJcwiYmSWsUAhIGivpGknPSVon6TpJ4wq23VrSOZKekPSSpDskfaSxss3MrBlqhoCk0cAtwK7AocAhwCRgsaRtCizjUuBIYA7wSeAJ4JeS9hxo0WZm1hybF5jnSGACsEtEPAIg6T7gYeAo4LxNNZS0B/BZ4PCI+GE+7VZgOXAGML2h6s3MqpDUlPkiohnlDGlFhoOmA0tKAQAQESuA24EZBdq+Clxd1vY14L+AfSVtVXfFZmY1RERTbikoEgK7A8uqTF8OTC7QdkVEvFil7ZbAxALLNzOzFikSAmOAtVWmrwE6Gmhbet7MzNqkyDYBgGrrRUUG3TSQtpJmAbMAOjs76e3tLbCooWfq1KmF5qs1Lrl48eJmlGN1KtJ/7jsb7oqEwFqq/2LvoPqv/HJrgGq7knaUPb+RiJgPzAfo7u6Onp6eAmUOPamMKY5U7j9LQZHhoOVkY/uVJgP3F2i7c76baWXbV4BHNm5iZmaDpUgILASmSJpQmiCpC9g7f65W2y2AT5e13Rw4GFgUERvqrNfMzJqoSAhcAvQBCyTNkDQdWACsBuaVZpI0XtJrkuaUpkXEPWS7h14g6UuSPka2e+jOwKnNextmZjYQNUMgIl4ApgEPAVcAVwErgGkRsb5sVgGjqrzmF4EfAnOB64GxwH4RcXfD1ZuZWUMK7R0UEauAmTXm6aPKXj8R8RLw1fxmZmZDiM8iamaWMIeAmVnCHAJmZgnTUD8gRtJTwMp219FC2wFPt7sIGxD33fA20vtvfERsX2umIR8CI52kpRHR3e46rH7uu+HN/ZfxcJCZWcIcAmZmCXMItN/8dhdgA+a+G97cf3ibgJlZ0rwmYGaWsCRDQNJhkqLs9rykeyUdm5/ldLDqOE1SXatiknol9baopBGnSl+/IulRSWdJ2rrNtfVJuqzscanWrrYVNYJU6fvy28fzec6StEjSM/n0w9pc9qAbtC+8IerTwGPAtvm/LwR2AOb016iJ/gO4sc42x7SikASU+vptwIHAyfm/j2tnUTYoSn1frnQtlOOAe4CfA18YzKKGitRD4J6IKF3YZpGkicBsqoSAsusIbhERrzRr4RHxGBv/56zVptaFfKy68r6+SdIk4AhJx0fEG+0szFquvO8rvT0i3sj/9pMMgSSHg/pxJ/A2STvkq+pXSjpc0gNkV0LbH0DSaElnS1qRDy+skHSKpL/6PCVtL+liSaslbcjvr5C0Vf78RsNBko6X9CdJL0laK2mppAPLnt9oOEjSLpJ+KunZvN0SSftVzHNavro7SdL1ktZLWilpTmXdibgbeAvZUaMASNpZ0lWSnsr7657yz75svj3yz/uZ/PN+UNLJZc/vI+kGSU9IelHSMklfkzRqcN6aFeUfAF4TqLQz8DpQuk7CVGBP4HTgSaAv32bwS7JLZJ4J/BGYAnyT7FrMXwOQ1AH8Np82F7iPbKhpBrAlsNFV1SR9DvgucAbwG7IvqfdT/RrPpTY7AbcBzwPHAs8BXwaul/TJiPhFRZOfkl3f4XzgU/l7W51PS0kX2Wf1DICkscDvyPr5BOApsivgXSvpgIhYmM/3QaCX7NKoJ5CtyU0i66eSCcDNZMOLLwPdwGnA9sBJLX1XVs2oim19ERGvt62aoSYikrsBhwEB7EIWhB3AUWQB8LN8nj7gRWDHiraH5G0/UjH9FLK1hR3yx2fkr/d3/dRxWtYFbz6+CLi7Ru29QG/Z43OB14CJZdNGAQ+Wv1ZpWcAXK17vj2SX+mx7vwxiXx+ef2bHls13KdkX/zsr2t9ENpxQevxrstAcXXD5ypd7CrAW2KzsuT7gsiq1drX7cxsJt7LPs/J2W5V5J+bPHdbuugf7luIwQLkHgFeBNcDFZFdNO7zs+SUR8eeKNvuRndDut5I2L92ARWTXU56Sz7cPcGdE/KGOeu4E9pR0oaSPSxpdoM1H8jrfHPOM7FfOj/PX2rZi/usrHi8DxtVR43BV3teXAvMi4qKy5/cDbgCeq+jXXwJ7SNo274+9gasi4sVNLUjS30iaJ2kl2Q+DV8nWBt9BtjZog+tAYK+y2xHtLWdoSX046ECy1fnngZUR8XLF809UabMDMJ7sD7uad5bd31tnPT8Ctib7T3oM8KqkG4CvRnbltmrGANWC5s9kv0I7gHVl09dUzLchX+ZIV+rr7cmucneMpN9FxI/y53cg2zC4qY2D7yT7Qt+Mfjbm59tXFgI7ka19PQC8BBxAtjaQwmc91CyLTW8YTl7qIVDrP0e1ffifIbvG8kGbaNOX3z8NvKueYiJbL50HzMu3KexDto3gauBDm2i2BtixyvQdyeqv/NJP1Zt9LekWsm0050i6NrLraD9Dth3m7E20f5xsmO0N+u/X95BtAzgkIq4sTZT0qcbfglnzpT4cNBA3AmOB9RGxtMqtdH7yRcAHJe0xkIVExNqIuBr4b+C9/cx6KzCl/ACjfC+Ug4E/RMTzA1n+SBYRG4Cvk/36Lx13cSPZxt3lm+jXDfkQ0G3A5yW9ZRMvXxrCe3NNUdIWwOda8mbMGpT6msBAXAV8EbhZ0nfJhny2JPsFOB04IP+yOB/4LPArSXPJNsBuR7Z30NHVvpwlzScbmrqDbC+VvyXbEL2on3rOJ9sAdpOkU8mGfo7J2+7f6JsdqSJioaQ7gRMlXUR2bMjvgV/nj/vIhtLeC0yIiNK2ohPJgveOvP8fI9sbaM+IOA74E9k2o29Jep0sDE4YvHdm9ZD0UbIhwtLadLek9QARcU3bChtEDoE6RcSrkvYl29VvFtlupS8Aj5JtdH0ln+9ZSXuTbRA8iWxM+S/ALaV5qridLGAOAd5ONgRxJXBqP/U8LunDZMMY3we2IjsCcv+IqPdo5NR8g2zD79ERcb6k0q6cZ5F9MTxDtuH88lKDiLgz79czyHYB3YrsS/+H+fOvSDqAbE+vH5ENx/0nsAq4ZHDeltXhdOCjZY+/nN8g26Y24vksomZmCfM2ATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhP0//t3ZjeY2sNgAAAAASUVORK5CYII=\n",
|
| 872 |
+
"text/plain": [
|
| 873 |
+
"<Figure size 432x288 with 1 Axes>"
|
| 874 |
+
]
|
| 875 |
+
},
|
| 876 |
+
"metadata": {
|
| 877 |
+
"needs_background": "light"
|
| 878 |
+
},
|
| 879 |
+
"output_type": "display_data"
|
| 880 |
+
}
|
| 881 |
+
],
|
| 882 |
+
"source": [
|
| 883 |
+
"draw_plot([pres_all, recalls_all, f1_all], title='Scores for All Elements')"
|
| 884 |
+
]
|
| 885 |
+
},
|
| 886 |
+
{
|
| 887 |
+
"cell_type": "code",
|
| 888 |
+
"execution_count": 75,
|
| 889 |
+
"metadata": {},
|
| 890 |
+
"outputs": [
|
| 891 |
+
{
|
| 892 |
+
"data": {
|
| 893 |
+
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAYEAAAEBCAYAAACe6Rn8AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAF2hJREFUeJzt3X2QXFWdxvHvQ3gzKDoRGBadZIhhgaDCuoOmFtedRAVcNIHKCpaKIEpgESQobkGh4S2LUiBBodCEZQWBclkBTVYQAySDgkQJCJggQthMEhYUJIEQXgKB3/5xb1NtpzN9e6Z7embO86nq6vTpe/r+uk+mn77n3u6riMDMzNK0VasLMDOz1nEImJklzCFgZpYwh4CZWcIcAmZmCXMImJklzCFgZpYwh4CZWcIcAmZmCdu61QXUstNOO0VnZ2eryzAzG1buvffev0TEzrWWG/Ih0NnZydKlS1tdhpnZsCJpVZHlPB1kZpYwh4CZWcIcAmZmCSsUApLeKekSSXdLelFSSOos2HcrSadL6pX0sqQHJE0fSNFmZtYYRbcEJgCHA+uAX9W5jnOBs4BLgY8BS4AfS/rnOh/HzMwarOjRQb+MiHYASV8EDizSSdIuwKnAtyLiwrx5saQJwLeAm+us18zMGqjQlkBEvN7Pxz8I2Ba4pqL9GuA9knbv5+OamVkDNHvH8D7ARmBFRfvy/Hpik9dvZmZ9aPaXxcYAz8bmJzJeW3b/ZiTNAGYAtLe309PT07QCm2ny5MkNeZzFixc35HGsPo0YP49da/hvr7hmh4CAameyV1+dImIeMA+gq6sruru7G1/ZINg8+zYnqdByNvhqjYvHbujy315xzZ4OWgu0Sap8028ru9/MzFqk2SGwHNgOeFdFe2lfwENNXr+ZmfWh2SFwC/AK8JmK9s8CyyJiZZPXb2ZmfSi8T0DSv+T//Pv8+mOSngaejog78mU2AVdFxBcAIuIpSXOA0yU9D9wHHAFMAaY16DmYmVk/1bNj+McVty/Lr+8AuvN/j8ov5c4ANgAnA7sCfwQOj4j/qatSMzNruMIhEBF9HtGzpWUi4jVgdn4xM7MhxL8iamaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAJWnsuHFIGtAFGPBjjB03rsWvhKWunnMMm40Ya1av5oaHn2h1GUzfa7dWl2CJ85aAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCHAJmZglzCJiZJaxQCEjqkHS9pOckrZd0o6SxBfuOlXSVpNWSXpT0iKTZknYYWOlmZjZQNb8sJmk0sAjYCBwFBDAbWCzpvRHxQh99dwBuA7YBvgGsBvYHzgb2AI4Y6BMwM7P+K/KN4WOB8cCeEbECQNKDwKPAccBFffQ9gOzN/qCIWJi3LZY0BjhV0uiIeLHf1ZuZ2YAUmQ6aCiwpBQBARKwE7gKm1ei7bX69vqL92XzdKlinmZk1QZEQ2AdYVqV9OTCxRt/byLYYzpc0UdKbJU0BTga+39dUkpmZNV+REBgDrKvSvhZo66tjRLwMfDBfz3LgeeB24GfAiXVVamZmDVf0V0SjSlvNqRxJ2wPXAbsAR5LtGH4/MAvYBPzrFvrNAGYAtLe309PTU7DM4WmkPz/rm8e/dfzagyKqvb+XLSD9GfhpRBxX0X4Z8MmI2LmPvl8CLgUmRMRjZe3HAvOA/SLigb7W39XVFUuXLq35RIYrSdQaA2s8SUPmp6Q9/q0x0v/2JN0bEV21lisyHbScbL9ApYnAQzX6vgdYVx4Aud/m13sXWL+ZmTVJkRBYAEySNL7UIKmT7PDPBTX6/glokzShov0D+fX/FSvTzMyaoUgIXA70AvMlTZM0FZgPrAHmlhaSNE7SJkmzyvpeSbYz+GZJR0maLOlrwIXAvWSHmZqZWYvUDIH8MM4pwCPA1cC1wEpgSkRsKFtUwKjyx4yIXmAScD/Zt4xvJvvy2TzgoxHxekOehZmZ9Uuho4MiYjUwvcYyvVQ5YigiHgIO709xZmbWXP4VUTOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ8DMLGEOATOzhDkEzMwS5hAwM0uYQ2AAOseOQ9KALsCA+neOHdfiV8Fs8HV0drT8b6+js6PFr0JjFDrRvFW3as1qoueeltag7v1bun6zVnh81eNcvPbiltYwc8zMlq6/UbwlYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCCh0iKqkDmAN8FBBwGzAzIlYX7L83cA4wGdgBWA1cFhHf6U/RZgMVZ+4IP9qr1WVkdZi1UM0QkDQaWARsBI4CApgNLJb03oh4oUb/rrx/D/BF4DlgD+DNA6rcbAB09npuePiJVpfB9L12I85qdRWWsiJbAscC44E9I2IFgKQHgUeB44CLttRR0lbAVcDtEXFY2V2L+12xmZk1TJF9AlOBJaUAAIiIlcBdwLQafbuBifQRFGZm1jpFQmAfYFmV9uVkb/B9+WB+vb2kJZJelfSUpO9KelM9hZqZWeMVmQ4aA6yr0r4WaKvRd7f8+jrgUuA0oItsJ3EHcFi1TpJmADMA2tvb6enpKVBmuvz6DG8ev+FrJIxd0R+QiyptKtCvtKVxTUTMyv/dI2kU8C1JEyPioc1WFjEPmAfQ1dUV3d3dBctMk1+f4c3jN3yNhLErMh20jmxroFIb1bcQyj2TX99a0b4wv96vwPrNzKxJioTAcrL9ApUmApt9iq/SFzbfkihtRbxeYP1mZtYkRUJgATBJ0vhSg6RO4ID8vr78nOz7BQdXtB+UXy8tVKWZmTVFkRC4HOgF5kuaJmkqMB9YA8wtLSRpnKRNkkpz/0TEM8A3geMlnSfpI5JOA2YBV5UfdmpmZoOv5o7hiHhB0hSyn424mmwq53ayn43YULaogFFsHiznAM8DJwCnAk8CFwDnDrh6MzMbkEJHB+W/ETS9xjK9VDliKCKC7Mti/sKYmdkQ418RNTNLmEPAzCxhDgEzs4Q5BMzMEuYQMDNLmEPAzCxhDgEzs4Q5BMzMEuYQMDNLmEPAzCxhDgEzs4Q5BMzMEuYQMDNLmEPAzCxhDgEzs4Q5BMzMEuYQMDNLmEPAzCxhhU4vaWY2lMSZO8J3ZrW0hpPP3LGl628Uh4CZDTs6ez0Xr724pTXMHDOTOKulJTSEp4PMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBLmEDAzS1ihEJDUIel6Sc9JWi/pRklj612ZpNMlhaQ76y/VzMwarWYISBoNLAL2Ao4CjgT2ABZL2qHoiiSNB84AnupfqWZm1mhFvix2LDAe2DMiVgBIehB4FDgOuKjgur4HXAvsWXC9ZmbWZEWmg6YCS0oBABARK4G7gGlFViLp08D7gNP7U6SZmTVHkRDYB1hWpX05MLFWZ0ltwBzg3yJibX3lmZlZMxUJgTHAuirta4G2Av0vAB4BrixelpmZDYaic/NRpU21Okn6R+BzwPsiotpjbKnfDGAGQHt7Oz09PUW7Dqo4c0dY/OGW1zBUXx8rxuM3fI2EsSsSAuvItgYqtVF9C6HcXOAK4HFJbytb56j89ksRsbGyU0TMA+YBdHV1RXd3d4EyB58mryd67mltDd37E2d1t7QGG5ih+v/bahsJY1ckBJaT7ReoNBF4qEbfvfPL8VXuWwecArT292DNzBJWJAQWABdKGh8R/wsgqRM4ADitRt/JVdouBkYBJwErqtxvZmaDpEgIXA6cCMyX9HWy/QPnAmvIpnsAkDQOeAw4JyLOAYiInsoHk/QssHW1+8zMbHDVDIGIeEHSFLLDPK8m2yF8OzAzIjaULSqyT/j+PSIb8jrGjmX6Xru1ugw6xtb96ytmDVXo6KCIWA1Mr7FMLwWOGIqI7iLrNGum1atWDfgxJFHHQW9mQ5I/tZuZJcwhYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCHAJmZglzCJiZJazQmcXMzIaSd457JzPHzGx5DSOBQ8DMhp01vWsG/Bg+PWjG00FmZglzCJiZJcwhYGaWMIeAmVnCHAJmZglzCJiZJcwhYGaWMIeAmVnCCoWApA5J10t6TtJ6STdKGlugX5ekeZIelvSipNWSrpW0+8BLNzOzgaoZApJGA4uAvYCjgCOBPYDFknao0f1TwD7Ad4GPAacB7wOWSuoYQN1mZtYARX424lhgPLBnRKwAkPQg8ChwHHBRH33Pj4inyxsk3QWszB93Vn+KNjOzxigyHTQVWFIKAICIWAncBUzrq2NlAORtq4CngXfUV6qZmTVakRDYB1hWpX05MLHeFUraG9gF+EO9fc3MrLGKTAeNAdZVaV8LtNWzMklbA98n2xK4oo/lZgAzANrb2+np6alnNcnx69M6fu2HN49f8Z+SrvZ7q+rH+i4F/gE4JCKqBUu2soh5wDyArq6u6O7u7seq0uHXp3X82g9vHr9iIbCObGugUhvVtxCqkvRNsk/3R0XEwqL9zMyseYqEwHKy/QKVJgIPFVmJpDPIDg/9ckRcXbw8MzNrpiI7hhcAkySNLzVI6gQOyO/rk6QvA7OBMyLikv6VaWZmzVAkBC4HeoH5kqZJmgrMB9YAc0sLSRonaZOkWWVtnwIuBm4BFkmaVHap+8giMzNrrJrTQRHxgqQpwBzgarIdwrcDMyNiQ9miAkbx18FycN5+cH4pdwfQ3e/Kh4BxHWNR9/4tr8HMrL801E+03NXVFUuXLm11GU3jk10PXx674W2kj5+keyOiq9Zy/hVRM7OEOQTMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBLmEDAzS5hDwMwsYQ4BM7OEOQTMzBJWKAQkdUi6XtJzktZLulHS2IJ9t5d0gaQnJb0k6W5JHxpY2WZm1gg1Q0DSaGARsBdwFHAksAewWNIOBdZxBXAsMAv4OPAk8AtJ+/W3aDMza4ytCyxzLDAe2DMiVgBIehB4FDgOuGhLHSXtC3waOCYifpC33QEsB84Bpg6oejMzG5Ai00FTgSWlAACIiJXAXcC0An1fBa4r67sJ+C/gIEnb1V2xmZk1TJEQ2AdYVqV9OTCxQN+VEfFilb7bAhMKrN/MrC6Sal6KLJeCItNBY4B1VdrXAm0D6Fu638ysoSKi1SUMG0VCAKDaK1okJtWfvpJmADMA2tvb6enpKbCqoWfy5MmFlqv1iWPx4sWNKMfqVGT8PHY23BUJgXVU/8TeRvVP+eXWAtUOJW0ru38zETEPmAfQ1dUV3d3dBcocevxpZHjz+FkKiuwTWE42t19pIvBQgb6754eZVvZ9BVixeRczMxssRUJgATBJ0vhSg6RO4ID8vlp9twE+WdZ3a+AIYGFEbKyzXjMza6AiIXA50AvMlzRN0lRgPrAGmFtaSNI4SZskzSq1RcT9ZIeHXizpi5I+THZ46O7AmY17GmZm1h81QyAiXgCmAI8AVwPXAiuBKRGxoWxRAaOqPObngR8As4GbgA7g4Ii4b8DVm5nZgBQ6OigiVgPTayzTS5WjfiLiJeAr+cXMzIYQ/4qomVnCHAJmZglzCJiZJUxD/Qsxkp4GVrW6jibaCfhLq4uwfvHYDW8jffzGRcTOtRYa8iEw0klaGhFdra7D6uexG948fhlPB5mZJcwhYGaWMIdA681rdQHWbx674c3jh/cJmJklzVsCZmYJSzIEJB0tKcouz0t6QNKJ+a+cDlYdZ0mqa1NMUo+kniaVNOJUGetXJD0m6TxJ27e4tl5JV5bdLtXa2bKiRpAqY19++Ui+zHmSFkp6Jm8/usVlD7pBe8Mboj4JPA7smP/7EmAXYFZfnRroP4Bb6uxzQjMKSUBprN8CHAacnv/7pFYWZYOiNPblSudCOQm4H/gZ8LnBLGqoSD0E7o+I0oltFkqaAMykSggoO4/gNhHxSqNWHhGPs/l/zlp9ap3Ix6orH+tbJe0BfEHSyRHxeisLs6YrH/tKb42I1/O//SRDIMnpoD7cA7xF0i75pvo1ko6R9DDZmdAOAZA0WtL5klbm0wsrJZ0h6a9eT0k7S7pM0hpJG/PrqyVtl9+/2XSQpJMl/UHSS5LWSVoq6bCy+zebDpK0p6SfSHo277dE0sEVy5yVb+7uIekmSRskrZI0q7LuRNwHvInsW6MASNpd0rWSns7H6/7y175suX3z1/uZ/PX+o6TTy+4/UNLNkp6U9KKkZZK+KmnU4Dw1K8ofALwlUGl34DWgdJ6EycB+wNnAU0Bvvs/gF2SnyDwX+D0wCfgG2bmYvwogqQ34dd42G3iQbKppGrAtsNlZ1SR9Bvg2cA7wK7I3qfdS/RzPpT67AXcCzwMnAs8BXwJukvTxiPh5RZefkJ3fYQ7wify5rcnbUtJJ9lo9AyCpA/gN2TifAjxNdga8GyQdGhEL8uXeD/SQnRr1FLItuT3IxqlkPHA72fTiy0AXcBawM3BaU5+VVTOqYl9fRMRrLatmqImI5C7A0UAAe5IFYRtwHFkA/DRfphd4Edi1ou+Red8PVbSfQba1sEt++5z88f6ujzrOyobgjduXAvfVqL0H6Cm7fSGwCZhQ1jYK+GP5Y5XWBXy+4vF+T3aqz5aPyyCO9TH5a3Zi2XJXkL3xv72i/61k0wml278kC83RBdevfL1nAOuArcru6wWurFJrZ6tft5FwKXs9Ky93Vll2Qn7f0a2ue7AvKU4DlHsYeBVYC1xGdta0Y8ruXxIRf6roczDZD9r9WtLWpQuwkOx8ypPy5Q4E7omI39VRzz3AfpIukfQRSaML9PlQXucbc56Rfcr5Uf5YO1Ysf1PF7WXA2DpqHK7Kx/oKYG5EXFp2/8HAzcBzFeP6C2BfSTvm43EAcG1EvLilFUn6G0lzJa0i+2DwKtnW4NvItgZtcB0G7F92+UJryxlaUp8OOoxsc/55YFVEvFxx/5NV+uwCjCP7w67m7WXXD9RZzw+B7cn+k54AvCrpZuArkZ25rZoxQLWg+RPZp9A2YH1Z+9qK5Tbm6xzpSmO9M9lZ7k6Q9JuI+GF+/y5kOwa3tHPw7WRv6FvRx878fP/KAmA3sq2vh4GXgEPJtgZSeK2HmmWx5R3DyUs9BGr956h2DP8zZOdYPnwLfXrz678A76inmMi2S+cCc/N9CgeS7SO4DvjAFrqtBXat0r4rWf2Vb/qpemOsJS0i20dzgaQbIjuP9jNk+2HO30L/J8im2V6n73F9F9k+gCMj4ppSo6RPDPwpmDVe6tNB/XEL0AFsiIilVS6l3ydfCLxf0r79WUlErIuI64D/Bt7dx6J3AJPKv2CUH4VyBPC7iHi+P+sfySJiI/A1sk//pe9d3EK2c3f5FsZ1Yz4FdCfwWUlv2sLDl6bw3thSlLQN8JmmPBmzAUp9S6A/rgU+D9wu6dtkUz7bkn0CnAocmr9ZzAE+DdwmaTbZDtidyI4OOr7am7OkeWRTU3eTHaXyt2Q7ohf2Uc8csh1gt0o6k2zq54S87yEDfbIjVUQskHQPcKqkS8m+G/Jb4Jf57V6yqbR3A+MjorSv6FSy4L07H//HyY4G2i8iTgL+QLbP6N8lvUYWBqcM3jOzekj6J7IpwtLWdJekDQARcX3LChtEDoE6RcSrkg4iO9RvBtlhpS8Aj5HtdH0lX+5ZSQeQ7RA8jWxO+c/AotIyVdxFFjBHAm8lm4K4Bjizj3qekPRBsmmM7wHbkX0D8pCIqPfbyKn5OtmO3+MjYo6k0qGc55G9MTxDtuP8qlKHiLgnH9dzyA4B3Y7sTf8H+f2vSDqU7EivH5JNx/0nsBq4fHCeltXhbOCfym5/Kb9Atk9txPOviJqZJcz7BMzMEuYQMDNLmEPAzCxhDgEzs4Q5BMzMEuYQMDNLmEPAzCxhDgEzs4Q5BMzMEvb/ucuWDpfEXmUAAAAASUVORK5CYII=\n",
|
| 894 |
+
"text/plain": [
|
| 895 |
+
"<Figure size 432x288 with 1 Axes>"
|
| 896 |
+
]
|
| 897 |
+
},
|
| 898 |
+
"metadata": {
|
| 899 |
+
"needs_background": "light"
|
| 900 |
+
},
|
| 901 |
+
"output_type": "display_data"
|
| 902 |
+
}
|
| 903 |
+
],
|
| 904 |
+
"source": [
|
| 905 |
+
"draw_plot([pres_non_text, recalls_non_text, f1_non_text], title='Score for Non-text Elements')"
|
| 906 |
+
]
|
| 907 |
+
},
|
| 908 |
+
{
|
| 909 |
+
"cell_type": "code",
|
| 910 |
+
"execution_count": 51,
|
| 911 |
+
"metadata": {},
|
| 912 |
+
"outputs": [],
|
| 913 |
+
"source": []
|
| 914 |
+
}
|
| 915 |
+
],
|
| 916 |
+
"metadata": {
|
| 917 |
+
"kernelspec": {
|
| 918 |
+
"display_name": "Python 3",
|
| 919 |
+
"language": "python",
|
| 920 |
+
"name": "python3"
|
| 921 |
+
},
|
| 922 |
+
"language_info": {
|
| 923 |
+
"codemirror_mode": {
|
| 924 |
+
"name": "ipython",
|
| 925 |
+
"version": 3
|
| 926 |
+
},
|
| 927 |
+
"file_extension": ".py",
|
| 928 |
+
"mimetype": "text/x-python",
|
| 929 |
+
"name": "python",
|
| 930 |
+
"nbconvert_exporter": "python",
|
| 931 |
+
"pygments_lexer": "ipython3",
|
| 932 |
+
"version": "3.5.6"
|
| 933 |
+
}
|
| 934 |
+
},
|
| 935 |
+
"nbformat": 4,
|
| 936 |
+
"nbformat_minor": 2
|
| 937 |
+
}
|
CDM/result_processing/eval_classes.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
from glob import glob
|
| 5 |
+
from os.path import join as pjoin
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
class_map = {'0':'Button', '1':'CheckBox', '2':'Chronometer', '3':'EditText', '4':'ImageButton', '5':'ImageView',
|
| 9 |
+
'6':'ProgressBar', '7':'RadioButton', '8':'RatingBar', '9':'SeekBar', '10':'Spinner', '11':'Switch',
|
| 10 |
+
'12':'ToggleButton', '13':'VideoView', '14':'TextView'}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def resize_label(bboxes, d_height, gt_height, bias=0):
|
| 14 |
+
bboxes_new = []
|
| 15 |
+
scale = gt_height / d_height
|
| 16 |
+
for bbox in bboxes:
|
| 17 |
+
bbox = [int(b * scale + bias) for b in bbox]
|
| 18 |
+
bboxes_new.append(bbox)
|
| 19 |
+
return bboxes_new
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def draw_bounding_box(org, corners, color=(0, 255, 0), line=2, show=False):
|
| 23 |
+
board = org.copy()
|
| 24 |
+
for i in range(len(corners)):
|
| 25 |
+
board = cv2.rectangle(board, (corners[i][0], corners[i][1]), (corners[i][2], corners[i][3]), color, line)
|
| 26 |
+
if show:
|
| 27 |
+
cv2.imshow('a', cv2.resize(board, (500, 1000)))
|
| 28 |
+
cv2.waitKey(0)
|
| 29 |
+
return board
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def load_detect_result_json(reslut_file_root, shrink=4):
|
| 33 |
+
def is_bottom_or_top(corner):
|
| 34 |
+
column_min, row_min, column_max, row_max = corner
|
| 35 |
+
if row_max < 36 or row_min > 725:
|
| 36 |
+
return True
|
| 37 |
+
return False
|
| 38 |
+
|
| 39 |
+
result_files = glob(pjoin(reslut_file_root, '*.json'))
|
| 40 |
+
compos_reform = {}
|
| 41 |
+
print('Loading %d detection results' % len(result_files))
|
| 42 |
+
for reslut_file in tqdm(result_files):
|
| 43 |
+
img_name = reslut_file.split('\\')[-1].split('.')[0]
|
| 44 |
+
compos = json.load(open(reslut_file, 'r'))['compos']
|
| 45 |
+
for compo in compos:
|
| 46 |
+
if compo['column_max'] - compo['column_min'] < 10 or compo['row_max'] - compo['row_min'] < 10:
|
| 47 |
+
continue
|
| 48 |
+
if is_bottom_or_top((compo['column_min'], compo['row_min'], compo['column_max'], compo['row_max'])):
|
| 49 |
+
continue
|
| 50 |
+
if img_name not in compos_reform:
|
| 51 |
+
compos_reform[img_name] = {'bboxes': [[compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink]],
|
| 52 |
+
'categories': [compo['category']]}
|
| 53 |
+
else:
|
| 54 |
+
compos_reform[img_name]['bboxes'].append([compo['column_min'] + shrink, compo['row_min'] + shrink, compo['column_max'] - shrink, compo['row_max'] - shrink])
|
| 55 |
+
compos_reform[img_name]['categories'].append(compo['category'])
|
| 56 |
+
return compos_reform
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def load_ground_truth_json(gt_file):
|
| 60 |
+
def get_img_by_id(img_id):
|
| 61 |
+
for image in images:
|
| 62 |
+
if image['id'] == img_id:
|
| 63 |
+
return image['file_name'].split('/')[-1][:-4], (image['height'], image['width'])
|
| 64 |
+
|
| 65 |
+
def cvt_bbox(bbox):
|
| 66 |
+
'''
|
| 67 |
+
:param bbox: [x,y,width,height]
|
| 68 |
+
:return: [col_min, row_min, col_max, row_max]
|
| 69 |
+
'''
|
| 70 |
+
bbox = [int(b) for b in bbox]
|
| 71 |
+
return [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
|
| 72 |
+
|
| 73 |
+
data = json.load(open(gt_file, 'r'))
|
| 74 |
+
images = data['images']
|
| 75 |
+
annots = data['annotations']
|
| 76 |
+
compos = {}
|
| 77 |
+
print('Loading %d ground truth' % len(annots))
|
| 78 |
+
for annot in tqdm(annots):
|
| 79 |
+
img_name, size = get_img_by_id(annot['image_id'])
|
| 80 |
+
if img_name not in compos:
|
| 81 |
+
compos[img_name] = {'bboxes': [cvt_bbox(annot['bbox'])], 'categories': [class_map[str(annot['category_id'])]], 'size': size}
|
| 82 |
+
else:
|
| 83 |
+
compos[img_name]['bboxes'].append(cvt_bbox(annot['bbox']))
|
| 84 |
+
compos[img_name]['categories'].append(class_map[str(annot['category_id'])])
|
| 85 |
+
return compos
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def eval(detection, ground_truth, img_root, show=True, no_text=False, only_text=False):
|
| 89 |
+
def compo_filter(compos, flag):
|
| 90 |
+
if not no_text and not only_text:
|
| 91 |
+
return compos
|
| 92 |
+
compos_new = {'bboxes': [], 'categories': []}
|
| 93 |
+
for k, category in enumerate(compos['categories']):
|
| 94 |
+
if only_text:
|
| 95 |
+
if flag == 'det' and category != 'TextView':
|
| 96 |
+
continue
|
| 97 |
+
if flag == 'gt' and category != 'TextView':
|
| 98 |
+
continue
|
| 99 |
+
elif no_text:
|
| 100 |
+
if flag == 'det' and category == 'TextView':
|
| 101 |
+
continue
|
| 102 |
+
if flag == 'gt' and category == 'TextView':
|
| 103 |
+
continue
|
| 104 |
+
|
| 105 |
+
compos_new['bboxes'].append(compos['bboxes'][k])
|
| 106 |
+
compos_new['categories'].append(category)
|
| 107 |
+
return compos_new
|
| 108 |
+
|
| 109 |
+
def match(org, d_bbox, d_category, gt_compos, matched):
|
| 110 |
+
'''
|
| 111 |
+
:param matched: mark if the ground truth component is matched
|
| 112 |
+
:param d_bbox: [col_min, row_min, col_max, row_max]
|
| 113 |
+
:param gt_bboxes: list of ground truth [[col_min, row_min, col_max, row_max]]
|
| 114 |
+
:return: Boolean: if IOU large enough or detected box is contained by ground truth
|
| 115 |
+
'''
|
| 116 |
+
area_d = (d_bbox[2] - d_bbox[0]) * (d_bbox[3] - d_bbox[1])
|
| 117 |
+
gt_bboxes = gt_compos['bboxes']
|
| 118 |
+
gt_categories = gt_compos['categories']
|
| 119 |
+
for i, gt_bbox in enumerate(gt_bboxes):
|
| 120 |
+
if matched[i] == 0:
|
| 121 |
+
continue
|
| 122 |
+
area_gt = (gt_bbox[2] - gt_bbox[0]) * (gt_bbox[3] - gt_bbox[1])
|
| 123 |
+
col_min = max(d_bbox[0], gt_bbox[0])
|
| 124 |
+
row_min = max(d_bbox[1], gt_bbox[1])
|
| 125 |
+
col_max = min(d_bbox[2], gt_bbox[2])
|
| 126 |
+
row_max = min(d_bbox[3], gt_bbox[3])
|
| 127 |
+
# if not intersected, area intersection should be 0
|
| 128 |
+
w = max(0, col_max - col_min)
|
| 129 |
+
h = max(0, row_max - row_min)
|
| 130 |
+
area_inter = w * h
|
| 131 |
+
if area_inter == 0:
|
| 132 |
+
continue
|
| 133 |
+
iod = area_inter / area_d
|
| 134 |
+
iou = area_inter / (area_d + area_gt - area_inter)
|
| 135 |
+
# if show:
|
| 136 |
+
# cv2.putText(org, (str(round(iou, 2)) + ',' + str(round(iod, 2))), (d_bbox[0], d_bbox[1]),
|
| 137 |
+
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
| 138 |
+
|
| 139 |
+
if iou > 0.9 or iod == 1:
|
| 140 |
+
if d_category == gt_categories[i]:
|
| 141 |
+
matched[i] = 0
|
| 142 |
+
return True
|
| 143 |
+
return False
|
| 144 |
+
|
| 145 |
+
amount = len(detection)
|
| 146 |
+
TP, FP, FN = 0, 0, 0
|
| 147 |
+
pres, recalls, f1s = [], [], []
|
| 148 |
+
for i, image_id in enumerate(detection):
|
| 149 |
+
TP_this, FP_this, FN_this = 0, 0, 0
|
| 150 |
+
img = cv2.imread(pjoin(img_root, image_id + '.jpg'))
|
| 151 |
+
d_compos = detection[image_id]
|
| 152 |
+
if image_id not in ground_truth:
|
| 153 |
+
continue
|
| 154 |
+
gt_compos = ground_truth[image_id]
|
| 155 |
+
|
| 156 |
+
org_height = gt_compos['size'][0]
|
| 157 |
+
|
| 158 |
+
d_compos = compo_filter(d_compos, 'det')
|
| 159 |
+
gt_compos = compo_filter(gt_compos, 'gt')
|
| 160 |
+
|
| 161 |
+
d_compos['bboxes'] = resize_label(d_compos['bboxes'], 800, org_height)
|
| 162 |
+
matched = np.ones(len(gt_compos['bboxes']), dtype=int)
|
| 163 |
+
for j, d_bbox in enumerate(d_compos['bboxes']):
|
| 164 |
+
if match(img, d_bbox, d_compos['categories'][j], gt_compos, matched):
|
| 165 |
+
TP += 1
|
| 166 |
+
TP_this += 1
|
| 167 |
+
else:
|
| 168 |
+
FP += 1
|
| 169 |
+
FP_this += 1
|
| 170 |
+
FN += sum(matched)
|
| 171 |
+
FN_this = sum(matched)
|
| 172 |
+
|
| 173 |
+
try:
|
| 174 |
+
pre_this = TP_this / (TP_this + FP_this)
|
| 175 |
+
recall_this = TP_this / (TP_this + FN_this)
|
| 176 |
+
f1_this = 2 * (pre_this * recall_this) / (pre_this + recall_this)
|
| 177 |
+
except:
|
| 178 |
+
print('empty')
|
| 179 |
+
continue
|
| 180 |
+
|
| 181 |
+
pres.append(pre_this)
|
| 182 |
+
recalls.append(recall_this)
|
| 183 |
+
f1s.append(f1_this)
|
| 184 |
+
if show:
|
| 185 |
+
print(image_id + '.jpg')
|
| 186 |
+
print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f' % (
|
| 187 |
+
i, amount, TP_this, FP_this, FN_this, pre_this, recall_this))
|
| 188 |
+
# cv2.imshow('org', cv2.resize(img, (500, 1000)))
|
| 189 |
+
broad = draw_bounding_box(img, d_compos['bboxes'], color=(255, 0, 0), line=3)
|
| 190 |
+
draw_bounding_box(broad, gt_compos['bboxes'], color=(0, 0, 255), show=True, line=2)
|
| 191 |
+
|
| 192 |
+
if i % 200 == 0:
|
| 193 |
+
precision = TP / (TP + FP)
|
| 194 |
+
recall = TP / (TP + FN)
|
| 195 |
+
f1 = 2 * (precision * recall) / (precision + recall)
|
| 196 |
+
print(
|
| 197 |
+
'[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))
|
| 198 |
+
|
| 199 |
+
precision = TP / (TP + FP)
|
| 200 |
+
recall = TP / (TP + FN)
|
| 201 |
+
print('[%d/%d] TP:%d, FP:%d, FN:%d, Precesion:%.3f, Recall:%.3f, F1:%.3f' % (i, amount, TP, FP, FN, precision, recall, f1))
|
| 202 |
+
# print("Average precision:%.4f; Average recall:%.3f" % (sum(pres)/len(pres), sum(recalls)/len(recalls)))
|
| 203 |
+
|
| 204 |
+
return pres, recalls, f1s
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
no_text = True
|
| 208 |
+
only_text = False
|
| 209 |
+
|
| 210 |
+
# detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_cls\\ip')
|
| 211 |
+
# detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_cls\\merge')
|
| 212 |
+
detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3\\merge')
|
| 213 |
+
# detect = load_detect_result_json('E:\\Mulong\\Result\\rico\\rico_uied\\rico_new_uied_v3\\ocr')
|
| 214 |
+
gt = load_ground_truth_json('E:\\Mulong\\Datasets\\rico\\instances_test.json')
|
| 215 |
+
eval(detect, gt, 'E:\\Mulong\\Datasets\\rico\\combined', show=False, no_text=no_text, only_text=only_text)
|