tferhan commited on
Commit
5b0445c
·
verified ·
1 Parent(s): d570d48

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. zipdeepness/README.md +7 -0
  3. zipdeepness/__init__.py +19 -0
  4. zipdeepness/common/__init__.py +2 -0
  5. zipdeepness/common/channels_mapping.py +262 -0
  6. zipdeepness/common/config_entry_key.py +95 -0
  7. zipdeepness/common/defines.py +12 -0
  8. zipdeepness/common/errors.py +10 -0
  9. zipdeepness/common/lazy_package_loader.py +24 -0
  10. zipdeepness/common/misc.py +9 -0
  11. zipdeepness/common/processing_overlap.py +37 -0
  12. zipdeepness/common/processing_parameters/__init__.py +0 -0
  13. zipdeepness/common/processing_parameters/detection_parameters.py +70 -0
  14. zipdeepness/common/processing_parameters/map_processing_parameters.py +56 -0
  15. zipdeepness/common/processing_parameters/recognition_parameters.py +17 -0
  16. zipdeepness/common/processing_parameters/regression_parameters.py +14 -0
  17. zipdeepness/common/processing_parameters/segmentation_parameters.py +16 -0
  18. zipdeepness/common/processing_parameters/standardization_parameters.py +11 -0
  19. zipdeepness/common/processing_parameters/superresolution_parameters.py +18 -0
  20. zipdeepness/common/processing_parameters/training_data_export_parameters.py +15 -0
  21. zipdeepness/common/temp_files_handler.py +19 -0
  22. zipdeepness/deepness.py +317 -0
  23. zipdeepness/deepness_dockwidget.py +603 -0
  24. zipdeepness/deepness_dockwidget.ui +893 -0
  25. zipdeepness/dialogs/packages_installer/packages_installer_dialog.py +359 -0
  26. zipdeepness/dialogs/packages_installer/packages_installer_dialog.ui +65 -0
  27. zipdeepness/dialogs/resizable_message_box.py +20 -0
  28. zipdeepness/images/get_image_path.py +29 -0
  29. zipdeepness/images/icon.png +3 -0
  30. zipdeepness/landcover_model.onnx +3 -0
  31. zipdeepness/metadata.txt +56 -0
  32. zipdeepness/processing/__init__.py +2 -0
  33. zipdeepness/processing/extent_utils.py +219 -0
  34. zipdeepness/processing/map_processor/__init__.py +0 -0
  35. zipdeepness/processing/map_processor/map_processing_result.py +47 -0
  36. zipdeepness/processing/map_processor/map_processor.py +237 -0
  37. zipdeepness/processing/map_processor/map_processor_detection.py +288 -0
  38. zipdeepness/processing/map_processor/map_processor_recognition.py +221 -0
  39. zipdeepness/processing/map_processor/map_processor_regression.py +174 -0
  40. zipdeepness/processing/map_processor/map_processor_segmentation.py +386 -0
  41. zipdeepness/processing/map_processor/map_processor_superresolution.py +175 -0
  42. zipdeepness/processing/map_processor/map_processor_training_data_export.py +95 -0
  43. zipdeepness/processing/map_processor/map_processor_with_model.py +27 -0
  44. zipdeepness/processing/map_processor/utils/ckdtree.py +62 -0
  45. zipdeepness/processing/models/__init__.py +2 -0
  46. zipdeepness/processing/models/buildings_type_MA.onnx +3 -0
  47. zipdeepness/processing/models/detector.py +709 -0
  48. zipdeepness/processing/models/dual.py +168 -0
  49. zipdeepness/processing/models/model_base.py +444 -0
  50. zipdeepness/processing/models/model_types.py +93 -0
.gitattributes CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  zipdeepness/deepness/images/icon.png filter=lfs diff=lfs merge=lfs -text
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  zipdeepness/deepness/images/icon.png filter=lfs diff=lfs merge=lfs -text
37
+ zipdeepness/images/icon.png filter=lfs diff=lfs merge=lfs -text
zipdeepness/README.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Deepness: Deep Neural Remote Sensing
2
+
3
+ Plugin for QGIS to perform map/image segmentation, regression and object detection with (ONNX) neural network models.
4
+
5
+ Please visit the documentation webpage for details: https://qgis-plugin-deepness.readthedocs.io/
6
+
7
+ Or the repository: https://github.com/PUTvision/qgis-plugin-deepness
zipdeepness/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main plugin module - entry point for the plugin."""
2
+ import os
3
+
4
+ # increase limit of pixels (2^30), before importing cv2.
5
+ # We are doing it here to make sure it will be done before importing cv2 for the first time
6
+ os.environ["OPENCV_IO_MAX_IMAGE_PIXELS"] = pow(2, 40).__str__()
7
+
8
+
9
+ # noinspection PyPep8Naming
10
+ def classFactory(iface): # pylint: disable=invalid-name
11
+ """Load Deepness class from file Deepness.
12
+ :param iface: A QGIS interface instance.
13
+ :type iface: QgsInterface
14
+ """
15
+ from deepness.dialogs.packages_installer import packages_installer_dialog
16
+ packages_installer_dialog.check_required_packages_and_install_if_necessary(iface=iface)
17
+
18
+ from deepness.deepness import Deepness
19
+ return Deepness(iface)
zipdeepness/common/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ """ Submodule that contains the common functions for the deepness plugin.
2
+ """
zipdeepness/common/channels_mapping.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Raster layer (ortophoto) which is being processed consist of channels (usually Red, Green, Blue).
3
+ The neural model expects input channels with some model-defined meaning.
4
+ Channel mappings in this file define how the ortophoto channels translate to model inputs (e.g first model input is Red, second Green).
5
+ """
6
+
7
+ import copy
8
+ from typing import Dict, List
9
+
10
+
11
+ class ImageChannel:
12
+ """
13
+ Defines an image channel - how is it being stored in the data source.
14
+ See note at top of this file for details.
15
+ """
16
+
17
+ def __init__(self, name):
18
+ self.name = name
19
+
20
+ def get_band_number(self):
21
+ raise NotImplementedError('Base class not implemented!')
22
+
23
+ def get_byte_number(self):
24
+ raise NotImplementedError('Base class not implemented!')
25
+
26
+
27
+ class ImageChannelStandaloneBand(ImageChannel):
28
+ """
29
+ Defines an image channel, where each image channel is a separate band in the data source.
30
+ See note at top of this file for details.
31
+ """
32
+
33
+ def __init__(self, band_number: int, name: str):
34
+ super().__init__(name)
35
+ self.band_number = band_number # index within bands (counted from one)
36
+
37
+ def __str__(self):
38
+ txt = f'ImageChannelStandaloneBand(name={self.name}, ' \
39
+ f'band_number={self.band_number})'
40
+ return txt
41
+
42
+ def get_band_number(self):
43
+ return self.band_number
44
+
45
+ def get_byte_number(self):
46
+ raise NotImplementedError('Something went wrong if we are here!')
47
+
48
+
49
+ class ImageChannelCompositeByte(ImageChannel):
50
+ """
51
+ Defines an image channel, where each image channel is a smaller part of a bigger value (e.g. one byte within uint32 for each pixel).
52
+ See note at top of this file for details.
53
+ """
54
+
55
+ def __init__(self, byte_number: int, name: str):
56
+ super().__init__(name)
57
+ self.byte_number = byte_number # position in composite byte (byte number in ARGB32, counted from zero)
58
+
59
+ def __str__(self):
60
+ txt = f'ImageChannelCompositeByte(name={self.name}, ' \
61
+ f'byte_number={self.byte_number})'
62
+ return txt
63
+
64
+ def get_band_number(self):
65
+ raise NotImplementedError('Something went wrong if we are here!')
66
+
67
+ def get_byte_number(self):
68
+ return self.byte_number
69
+
70
+
71
+ class ChannelsMapping:
72
+ """
73
+ Defines mapping of model input channels to input image channels (bands).
74
+ See note at top of this file for details.
75
+ """
76
+
77
+ INVALID_INPUT_CHANNEL = -1
78
+
79
+ def __init__(self):
80
+ self._number_of_model_inputs = 0
81
+ self._number_of_model_output_channels = 0
82
+ self._image_channels = [] # type: List[ImageChannel] # what channels are available from input image
83
+
84
+ # maps model channels to input image channels
85
+ # model_channel_number: image_channel_index (index in self._image_channels)
86
+ self._mapping = {} # type: Dict[int, int]
87
+
88
+ def __str__(self):
89
+ txt = f'ChannelsMapping(' \
90
+ f'number_of_model_inputs={self._number_of_model_inputs}, ' \
91
+ f'image_channels = {self._image_channels}, ' \
92
+ f'mapping {self._mapping})'
93
+ return txt
94
+
95
+ def __eq__(self, other):
96
+ if self._number_of_model_inputs != other._number_of_model_inputs:
97
+ return False
98
+ return True
99
+
100
+ def get_as_default_mapping(self):
101
+ """
102
+ Get the same channels mapping as we have right now, but without the mapping itself
103
+ (so just a definition of inputs and outputs)
104
+
105
+ Returns
106
+ -------
107
+ ChannelsMapping
108
+ """
109
+ default_channels_mapping = copy.deepcopy(self)
110
+ default_channels_mapping._mapping = {}
111
+ return default_channels_mapping
112
+
113
+ def are_all_inputs_standalone_bands(self):
114
+ """
115
+ Checks whether all image_channels are standalone bands (ImageChannelStandaloneBand)
116
+ """
117
+ for image_channel in self._image_channels:
118
+ if not isinstance(image_channel, ImageChannelStandaloneBand):
119
+ return False
120
+ return True
121
+
122
+ def are_all_inputs_composite_byte(self):
123
+ """
124
+ Checks whether all image_channels are composite byte (ImageChannelCompositeByte)
125
+ """
126
+ for image_channel in self._image_channels:
127
+ if not isinstance(image_channel, ImageChannelCompositeByte):
128
+ return False
129
+ return True
130
+
131
+ def set_number_of_model_inputs(self, number_of_model_inputs: int):
132
+ """ Set how many input channels does the model has
133
+ Parameters
134
+ ----------
135
+ number_of_model_inputs : int
136
+ """
137
+ self._number_of_model_inputs = number_of_model_inputs
138
+
139
+ def set_number_of_model_output_channels(self, number_of_output_channels: int):
140
+ """ Set how many output channels does the model has
141
+
142
+ Parameters
143
+ ----------
144
+ number_of_output_channels : int
145
+ """
146
+ self._number_of_model_output_channels = number_of_output_channels
147
+
148
+ def set_number_of_model_inputs_same_as_image_channels(self):
149
+ """ Set the number of model input channels to be the same as number of image channels
150
+ """
151
+ self._number_of_model_inputs = len(self._image_channels)
152
+
153
+ def get_number_of_model_inputs(self) -> int:
154
+ """ Get number of model input channels
155
+
156
+ Returns
157
+ -------
158
+ int
159
+ """
160
+ return self._number_of_model_inputs
161
+
162
+ def get_number_of_model_output_channels(self) -> int:
163
+ """ Get number of model output channels
164
+
165
+ Returns
166
+ -------
167
+ int
168
+ """
169
+ return self._number_of_model_output_channels
170
+
171
+ def get_number_of_image_channels(self) -> int:
172
+ """ Get number of image input channels
173
+
174
+ Returns
175
+ -------
176
+ int
177
+ """
178
+ return len(self._image_channels)
179
+
180
+ def set_image_channels(self, image_channels: List[ImageChannel]):
181
+ """ Set what are the image channels
182
+
183
+ Parameters
184
+ ----------
185
+ image_channels : List[ImageChannel]
186
+ Image channels to set
187
+ """
188
+ self._image_channels = image_channels
189
+ if not self.are_all_inputs_standalone_bands() and not self.are_all_inputs_composite_byte():
190
+ raise Exception("Unsupported image channels composition!")
191
+
192
+ def get_image_channels(self) -> List[ImageChannel]:
193
+ """ Get the current image channels definition
194
+
195
+ Returns
196
+ -------
197
+ List[ImageChannel]
198
+ """
199
+ return self._image_channels
200
+
201
+ def get_image_channel_index_for_model_input(self, model_input_number) -> int:
202
+ """
203
+ Similar to 'get_image_channel_for_model_input', but return an index in array of inputs,
204
+ instead of ImageChannel
205
+ """
206
+ if len(self._image_channels) == 0:
207
+ raise Exception("No image channels!")
208
+
209
+ image_channel_index = self._mapping.get(model_input_number, model_input_number)
210
+ image_channel_index = min(image_channel_index, len(self._image_channels) - 1)
211
+ return image_channel_index
212
+
213
+ def get_image_channel_for_model_input(self, model_input_number: int) -> ImageChannel:
214
+ """
215
+ Get ImageChannel which should be used for the specified model input
216
+
217
+ Parameters
218
+ ----------
219
+ model_input_number : int
220
+ Model input number, counted from 0
221
+
222
+ Returns
223
+ -------
224
+ ImageChannel
225
+ """
226
+ image_channel_index = self.get_image_channel_index_for_model_input(model_input_number)
227
+ return self._image_channels[image_channel_index]
228
+
229
+ def set_image_channel_for_model_input(self, model_input_number: int, image_channel_index: int) -> ImageChannel:
230
+ """
231
+ Set image_channel_index which should be used for this model input
232
+ """
233
+ if image_channel_index >= len(self._image_channels):
234
+ raise Exception("Invalid image channel index!")
235
+ # image_channel = self._image_channels[image_channel_index]
236
+ self._mapping[model_input_number] = image_channel_index
237
+
238
+ def get_mapping_as_list(self) -> List[int]:
239
+ """ Get the mapping of model input channels to image channels, but as a list (e.g. to store it in QGis configuration)
240
+
241
+ Returns
242
+ -------
243
+ List[int]
244
+ """
245
+ mapping_list = []
246
+ for i in range(self._number_of_model_inputs):
247
+ if i in self._mapping:
248
+ mapping_list.append(self._mapping[i])
249
+ else:
250
+ mapping_list.append(-1)
251
+ return mapping_list
252
+
253
+ def load_mapping_from_list(self, mapping_list: List[int]):
254
+ """
255
+ Load self._mapping from a plain list of channels (which is saved in config)
256
+ """
257
+ for i in range(min(self._number_of_model_inputs), len(mapping_list)):
258
+ proposed_channel = mapping_list[i]
259
+ if proposed_channel == -1 or proposed_channel >= self._number_of_model_inputs:
260
+ continue
261
+
262
+ self._mapping[i] = proposed_channel
zipdeepness/common/config_entry_key.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains utilities to write and read configuration parameters to the QGis Project configuration
3
+ """
4
+
5
+ import enum
6
+
7
+ from qgis.core import QgsProject
8
+
9
+ from deepness.common.defines import PLUGIN_NAME
10
+
11
+
12
+ class ConfigEntryKey(enum.Enum):
13
+ """
14
+ Entries to be stored in Project Configuration.
15
+ Second element of enum value (in tuple) is the default value for this field
16
+ """
17
+
18
+ MODEL_FILE_PATH = enum.auto(), '' # Path to the model file
19
+ INPUT_LAYER_ID = enum.auto(), ''
20
+ PROCESSED_AREA_TYPE = enum.auto(), '' # string of ProcessedAreaType, e.g. "ProcessedAreaType.VISIBLE_PART.value"
21
+ MODEL_TYPE = enum.auto(), '' # string of ModelType enum, e.g. "ModelType.SEGMENTATION.value"
22
+ PREPROCESSING_RESOLUTION = enum.auto(), 3.0
23
+ MODEL_BATCH_SIZE = enum.auto(), 1
24
+ PROCESS_LOCAL_CACHE = enum.auto(), False
25
+ PREPROCESSING_TILES_OVERLAP = enum.auto(), 15
26
+
27
+ SEGMENTATION_PROBABILITY_THRESHOLD_ENABLED = enum.auto(), True
28
+ SEGMENTATION_PROBABILITY_THRESHOLD_VALUE = enum.auto(), 0.5
29
+ SEGMENTATION_REMOVE_SMALL_SEGMENT_ENABLED = enum.auto(), True
30
+ SEGMENTATION_REMOVE_SMALL_SEGMENT_SIZE = enum.auto(), 9
31
+
32
+ REGRESSION_OUTPUT_SCALING = enum.auto(), 1.0
33
+
34
+ DETECTION_CONFIDENCE = enum.auto(), 0.5
35
+ DETECTION_IOU = enum.auto(), 0.5
36
+ DETECTOR_TYPE = enum.auto(), 'YOLO_v5_v7_DEFAULT'
37
+
38
+ DATA_EXPORT_DIR = enum.auto(), ''
39
+ DATA_EXPORT_TILES_ENABLED = enum.auto(), True
40
+ DATA_EXPORT_SEGMENTATION_MASK_ENABLED = enum.auto(), False
41
+ DATA_EXPORT_SEGMENTATION_MASK_ID = enum.auto(), ''
42
+
43
+ INPUT_CHANNELS_MAPPING__ADVANCED_MODE = enum.auto, False
44
+ INPUT_CHANNELS_MAPPING__MAPPING_LIST_STR = enum.auto, []
45
+
46
+ def get(self):
47
+ """
48
+ Get the value store in config (or a default one) for the specified field
49
+ """
50
+ read_function = None
51
+
52
+ # check the default value to determine the entry type
53
+ default_value = self.value[1] # second element in the 'value' tuple
54
+ if isinstance(default_value, int):
55
+ read_function = QgsProject.instance().readNumEntry
56
+ elif isinstance(default_value, float):
57
+ read_function = QgsProject.instance().readDoubleEntry
58
+ elif isinstance(default_value, bool):
59
+ read_function = QgsProject.instance().readBoolEntry
60
+ elif isinstance(default_value, str):
61
+ read_function = QgsProject.instance().readEntry
62
+ elif isinstance(default_value, str):
63
+ read_function = QgsProject.instance().readListEntry
64
+ else:
65
+ raise Exception("Unsupported entry type!")
66
+
67
+ value, _ = read_function(PLUGIN_NAME, self.name, default_value)
68
+ return value
69
+
70
+ def set(self, value):
71
+ """ Set the value store in config, for the specified field
72
+
73
+ Parameters
74
+ ----------
75
+ value :
76
+ Value to set in the configuration
77
+ """
78
+ write_function = None
79
+
80
+ # check the default value to determine the entry type
81
+ default_value = self.value[1] # second element in the 'value' tuple
82
+ if isinstance(default_value, int):
83
+ write_function = QgsProject.instance().writeEntry
84
+ elif isinstance(default_value, float):
85
+ write_function = QgsProject.instance().writeEntryDouble
86
+ elif isinstance(default_value, bool):
87
+ write_function = QgsProject.instance().writeEntryBool
88
+ elif isinstance(default_value, str):
89
+ write_function = QgsProject.instance().writeEntry
90
+ elif isinstance(default_value, list):
91
+ write_function = QgsProject.instance().writeEntry
92
+ else:
93
+ raise Exception("Unsupported entry type!")
94
+
95
+ write_function(PLUGIN_NAME, self.name, value)
zipdeepness/common/defines.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contain common definitions used in the project
3
+ """
4
+
5
+ import os
6
+
7
+ PLUGIN_NAME = 'Deepness'
8
+ LOG_TAB_NAME = PLUGIN_NAME
9
+
10
+
11
+ # enable some debugging options (e.g. printing exceptions) - set in terminal before running qgis
12
+ IS_DEBUG = os.getenv("IS_DEBUG", 'False').lower() in ('true', '1', 't')
zipdeepness/common/errors.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains common exceptions used in the project
3
+ """
4
+
5
+
6
+ class OperationFailedException(Exception):
7
+ """
8
+ Base class for a failed operation, in order to have a good error message to show for the user
9
+ """
10
+ pass
zipdeepness/common/lazy_package_loader.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains utility to lazy import packages
3
+ """
4
+
5
+ import importlib
6
+
7
+
8
+ class LazyPackageLoader:
9
+ """ Allows to wrap python package into a lazy version, so that the package will be loaded once it is actually used
10
+
11
+ Usage:
12
+ cv2 = LazyPackageLoader('cv2') # This will not import cv2 yet
13
+ ...
14
+ cv2.waitKey(3) # here will be the actual import
15
+ """
16
+
17
+ def __init__(self, package_name):
18
+ self._package_name = package_name
19
+ self._package = None
20
+
21
+ def __getattr__(self, name):
22
+ if self._package is None:
23
+ self._package = importlib.import_module(self._package_name)
24
+ return getattr(self._package, name)
zipdeepness/common/misc.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains miscellaneous stuff used in the project
3
+ """
4
+
5
+ import os
6
+ import tempfile
7
+
8
+ _TMP_DIR = tempfile.TemporaryDirectory()
9
+ TMP_DIR_PATH = os.path.join(_TMP_DIR.name, 'qgis')
zipdeepness/common/processing_overlap.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ from typing import Dict, List
3
+
4
+
5
+ class ProcessingOverlapOptions(enum.Enum):
6
+ OVERLAP_IN_PIXELS = 'Overlap in pixels'
7
+ OVERLAP_IN_PERCENT = 'Overlap in percent'
8
+
9
+
10
+ class ProcessingOverlap:
11
+ """ Represents overlap between tiles during processing
12
+ """
13
+ def __init__(self, selected_option: ProcessingOverlapOptions, percentage: float = None, overlap_px: int = None):
14
+ self.selected_option = selected_option
15
+
16
+ if selected_option == ProcessingOverlapOptions.OVERLAP_IN_PERCENT and percentage is None:
17
+ raise Exception(f"Percentage must be specified when using {ProcessingOverlapOptions.OVERLAP_IN_PERCENT}")
18
+ if selected_option == ProcessingOverlapOptions.OVERLAP_IN_PIXELS and overlap_px is None:
19
+ raise Exception(f"Overlap in pixels must be specified when using {ProcessingOverlapOptions.OVERLAP_IN_PIXELS}")
20
+
21
+ if selected_option == ProcessingOverlapOptions.OVERLAP_IN_PERCENT:
22
+ self._percentage = percentage
23
+ elif selected_option == ProcessingOverlapOptions.OVERLAP_IN_PIXELS:
24
+ self._overlap_px = overlap_px
25
+ else:
26
+ raise Exception(f"Unknown option: {selected_option}")
27
+
28
+ def get_overlap_px(self, tile_size_px: int) -> int:
29
+ """ Returns the overlap in pixels
30
+
31
+ :param tile_size_px: Tile size in pixels
32
+ :return: Returns the overlap in pixels
33
+ """
34
+ if self.selected_option == ProcessingOverlapOptions.OVERLAP_IN_PIXELS:
35
+ return self._overlap_px
36
+ else:
37
+ return int(tile_size_px * self._percentage / 100 * 2) // 2 # TODO: check if this is correct
zipdeepness/common/processing_parameters/__init__.py ADDED
File without changes
zipdeepness/common/processing_parameters/detection_parameters.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ from dataclasses import dataclass
3
+
4
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters
5
+ from deepness.processing.models.model_base import ModelBase
6
+
7
+
8
+ @dataclass
9
+ class DetectorTypeParameters:
10
+ """
11
+ Defines some model-specific parameters for each model 'type' (e.g. default YOLOv7 has different model output shape than the one trained with Ultralytics' YOLO)
12
+ """
13
+ has_inverted_output_shape: bool = False # whether the output shape of the model is inverted (and we need to apply np.transpose(model_output, (1, 0)))
14
+ skipped_objectness_probability: bool = False # whether the model output has has no 'objectness' probability, and only probability for each class
15
+ ignore_objectness_probability: bool = False # if the model output has the 'objectness' probability, we can still ignore it (it is needeed sometimes, when the probability was always 1...). The behavior should be the same as with `skipped_objectness_probability` (of course one model output needs be skipped)
16
+
17
+
18
+ class DetectorType(enum.Enum):
19
+ """ Type of the detector model """
20
+
21
+ YOLO_v5_v7_DEFAULT = 'YOLO_v5_or_v7_default'
22
+ YOLO_v6 = 'YOLO_v6'
23
+ YOLO_v9 = 'YOLO_v9'
24
+ YOLO_ULTRALYTICS = 'YOLO_Ultralytics'
25
+ YOLO_ULTRALYTICS_SEGMENTATION = 'YOLO_Ultralytics_segmentation'
26
+ YOLO_ULTRALYTICS_OBB = 'YOLO_Ultralytics_obb'
27
+
28
+ def get_parameters(self):
29
+ if self == DetectorType.YOLO_v5_v7_DEFAULT:
30
+ return DetectorTypeParameters() # all default
31
+ elif self == DetectorType.YOLO_v6:
32
+ return DetectorTypeParameters(
33
+ ignore_objectness_probability=True,
34
+ )
35
+ elif self == DetectorType.YOLO_v9:
36
+ return DetectorTypeParameters(
37
+ has_inverted_output_shape=True,
38
+ skipped_objectness_probability=True,
39
+ )
40
+ elif self == DetectorType.YOLO_ULTRALYTICS or self == DetectorType.YOLO_ULTRALYTICS_SEGMENTATION or self == DetectorType.YOLO_ULTRALYTICS_OBB:
41
+ return DetectorTypeParameters(
42
+ has_inverted_output_shape=True,
43
+ skipped_objectness_probability=True,
44
+ )
45
+ else:
46
+ raise ValueError(f'Unknown detector type: {self}')
47
+
48
+ def get_formatted_description(self):
49
+ txt = ''
50
+ txt += ' ' * 10 + f'Inverted output shape: {self.get_parameters().has_inverted_output_shape}\n'
51
+ txt += ' ' * 10 + f'Skipped objectness : {self.get_parameters().skipped_objectness_probability}\n'
52
+ txt += ' ' * 10 + f'Ignore objectness: {self.get_parameters().ignore_objectness_probability}\n'
53
+ return txt
54
+
55
+ def get_all_display_values():
56
+ return [x.value for x in DetectorType]
57
+
58
+
59
+ @dataclass
60
+ class DetectionParameters(MapProcessingParameters):
61
+ """
62
+ Parameters for Inference of detection model (including pre/post-processing) obtained from UI.
63
+ """
64
+
65
+ model: ModelBase # wrapper of the loaded model
66
+
67
+ confidence: float
68
+ iou_threshold: float
69
+
70
+ detector_type: DetectorType = DetectorType.YOLO_v5_v7_DEFAULT # parameters specific for each model type
zipdeepness/common/processing_parameters/map_processing_parameters.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ from deepness.common.channels_mapping import ChannelsMapping
6
+ from deepness.common.processing_overlap import ProcessingOverlap
7
+
8
+
9
+ class ProcessedAreaType(enum.Enum):
10
+ VISIBLE_PART = 'Visible part'
11
+ ENTIRE_LAYER = 'Entire layer'
12
+ FROM_POLYGONS = 'From polygons'
13
+
14
+ @classmethod
15
+ def get_all_names(cls):
16
+ return [e.value for e in cls]
17
+
18
+ @dataclass
19
+ class MapProcessingParameters:
20
+ """
21
+ Common parameters for map processing obtained from UI.
22
+
23
+ TODO: Add default values here, to later set them in UI at startup
24
+ """
25
+
26
+ resolution_cm_per_px: float # image resolution to used during processing
27
+ processed_area_type: ProcessedAreaType # whether to perform operation on the entire field or part
28
+ tile_size_px: int # Tile size for processing (model input size)
29
+ batch_size: int # Batch size for processing
30
+ local_cache: bool # Whether to use local cache for tiles (on disk, /tmp directory)
31
+
32
+ input_layer_id: str # raster layer to process
33
+ mask_layer_id: Optional[str] # Processing of masked layer - if processed_area_type is FROM_POLYGONS
34
+
35
+ processing_overlap: ProcessingOverlap # aka "stride" - how much to overlap tiles during processing
36
+
37
+ input_channels_mapping: ChannelsMapping # describes mapping of image channels to model inputs
38
+
39
+ @property
40
+ def tile_size_m(self):
41
+ return self.tile_size_px * self.resolution_cm_per_px / 100
42
+
43
+ @property
44
+ def processing_overlap_px(self) -> int:
45
+ """
46
+ Always divisible by 2, because overlap is on both sides of the tile
47
+ """
48
+ return self.processing_overlap.get_overlap_px(self.tile_size_px)
49
+
50
+ @property
51
+ def resolution_m_per_px(self):
52
+ return self.resolution_cm_per_px / 100
53
+
54
+ @property
55
+ def processing_stride_px(self):
56
+ return self.tile_size_px - self.processing_overlap_px
zipdeepness/common/processing_parameters/recognition_parameters.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ from deepness.common.processing_parameters.map_processing_parameters import \
6
+ MapProcessingParameters
7
+ from deepness.processing.models.model_base import ModelBase
8
+
9
+
10
+ @dataclass
11
+ class RecognitionParameters(MapProcessingParameters):
12
+ """
13
+ Parameters for Inference of Recognition model (including pre/post-processing) obtained from UI.
14
+ """
15
+
16
+ query_image_path: str # path to query image
17
+ model: ModelBase # wrapper of the loaded model
zipdeepness/common/processing_parameters/regression_parameters.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters
4
+ from deepness.processing.models.model_base import ModelBase
5
+
6
+
7
+ @dataclass
8
+ class RegressionParameters(MapProcessingParameters):
9
+ """
10
+ Parameters for Inference of Regression model (including pre/post-processing) obtained from UI.
11
+ """
12
+
13
+ output_scaling: float # scaling factor for the model output (keep 1 if maximum model output value is 1)
14
+ model: ModelBase # wrapper of the loaded model
zipdeepness/common/processing_parameters/segmentation_parameters.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters
4
+ from deepness.processing.models.model_base import ModelBase
5
+
6
+
7
+ @dataclass
8
+ class SegmentationParameters(MapProcessingParameters):
9
+ """
10
+ Parameters for Inference of Segmentation model (including pre/post-processing) obtained from UI.
11
+ """
12
+
13
+ postprocessing_dilate_erode_size: int # dilate/erode operation size, once we have a single class map. 0 if inactive. Implementation may use median filer instead of erode/dilate
14
+ model: ModelBase # wrapper of the loaded model
15
+
16
+ pixel_classification__probability_threshold: float # Minimum required class probability for pixel. 0 if disabled
zipdeepness/common/processing_parameters/standardization_parameters.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ class StandardizationParameters:
5
+ def __init__(self, channels_number: int):
6
+ self.mean = np.array([0.0 for _ in range(channels_number)], dtype=np.float32)
7
+ self.std = np.array([1.0 for _ in range(channels_number)], dtype=np.float32)
8
+
9
+ def set_mean_std(self, mean: np.array, std: np.array):
10
+ self.mean = np.array(mean, dtype=np.float32)
11
+ self.std = np.array(std, dtype=np.float32)
zipdeepness/common/processing_parameters/superresolution_parameters.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ from deepness.common.channels_mapping import ChannelsMapping
6
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters
7
+ from deepness.processing.models.model_base import ModelBase
8
+
9
+
10
+ @dataclass
11
+ class SuperresolutionParameters(MapProcessingParameters):
12
+ """
13
+ Parameters for Inference of Super Resolution model (including pre/post-processing) obtained from UI.
14
+ """
15
+
16
+ output_scaling: float # scaling factor for the model output (keep 1 if maximum model output value is 1)
17
+ model: ModelBase # wrapper of the loaded model
18
+ scale_factor: int # scale factor for the model output size
zipdeepness/common/processing_parameters/training_data_export_parameters.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Optional
3
+
4
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters
5
+
6
+
7
+ @dataclass
8
+ class TrainingDataExportParameters(MapProcessingParameters):
9
+ """
10
+ Parameters for Exporting Data obtained from UI.
11
+ """
12
+
13
+ export_image_tiles: bool # whether to export input image tiles
14
+ segmentation_mask_layer_id: Optional[str] # id for mask, to be exported as separate tiles
15
+ output_directory_path: str # path where the output files will be saved
zipdeepness/common/temp_files_handler.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path as path
2
+ import shutil
3
+ from tempfile import mkdtemp
4
+
5
+
6
+ class TempFilesHandler:
7
+ def __init__(self) -> None:
8
+ self._temp_dir = mkdtemp()
9
+
10
+ print(f'Created temp dir: {self._temp_dir} for processing')
11
+
12
+ def get_results_img_path(self):
13
+ return path.join(self._temp_dir, 'results.dat')
14
+
15
+ def get_area_mask_img_path(self):
16
+ return path.join(self._temp_dir, 'area_mask.dat')
17
+
18
+ def __del__(self):
19
+ shutil.rmtree(self._temp_dir)
zipdeepness/deepness.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main plugin file - entry point for the plugin.
2
+
3
+ Links the UI and the processing.
4
+
5
+ Skeleton of this file was generate with the QGis plugin to create plugin skeleton - QGIS PluginBuilder
6
+ """
7
+
8
+ import logging
9
+ import traceback
10
+
11
+ from qgis.core import Qgis, QgsApplication, QgsProject, QgsVectorLayer
12
+ from qgis.gui import QgisInterface
13
+ from qgis.PyQt.QtCore import QCoreApplication, Qt
14
+ from qgis.PyQt.QtGui import QIcon
15
+ from qgis.PyQt.QtWidgets import QAction, QMessageBox
16
+
17
+ from deepness.common.defines import IS_DEBUG, PLUGIN_NAME
18
+ from deepness.common.lazy_package_loader import LazyPackageLoader
19
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters, ProcessedAreaType
20
+ from deepness.common.processing_parameters.training_data_export_parameters import TrainingDataExportParameters
21
+ from deepness.deepness_dockwidget import DeepnessDockWidget
22
+ from deepness.dialogs.resizable_message_box import ResizableMessageBox
23
+ from deepness.images.get_image_path import get_icon_path
24
+ from deepness.processing.map_processor.map_processing_result import (MapProcessingResult, MapProcessingResultCanceled,
25
+ MapProcessingResultFailed,
26
+ MapProcessingResultSuccess)
27
+ from deepness.processing.map_processor.map_processor_training_data_export import MapProcessorTrainingDataExport
28
+
29
+ cv2 = LazyPackageLoader('cv2')
30
+
31
+
32
+ class Deepness:
33
+ """ QGIS Plugin Implementation - main class of the plugin.
34
+ Creates the UI classes and processing models and links them together.
35
+ """
36
+
37
+ def __init__(self, iface: QgisInterface):
38
+ """
39
+ :param iface: An interface instance that will be passed to this class
40
+ which provides the hook by which you can manipulate the QGIS
41
+ application at run time.
42
+ :type iface: QgsInterface
43
+ """
44
+ self.iface = iface
45
+
46
+ # Declare instance attributes
47
+ self.actions = []
48
+ self.menu = self.tr(u'&Deepness')
49
+
50
+ self.toolbar = self.iface.addToolBar(u'Deepness')
51
+ self.toolbar.setObjectName(u'Deepness')
52
+
53
+ self.pluginIsActive = False
54
+ self.dockwidget = None
55
+ self._map_processor = None
56
+
57
+ # noinspection PyMethodMayBeStatic
58
+ def tr(self, message):
59
+ """Get the translation for a string using Qt translation API.
60
+
61
+ We implement this ourselves since we do not inherit QObject.
62
+
63
+ :param message: String for translation.
64
+ :type message: str, QString
65
+
66
+ :returns: Translated version of message.
67
+ :rtype: QString
68
+ """
69
+ # noinspection PyTypeChecker,PyArgumentList,PyCallByClass
70
+ return QCoreApplication.translate('Deepness', message)
71
+
72
+ def add_action(
73
+ self,
74
+ icon_path,
75
+ text,
76
+ callback,
77
+ enabled_flag=True,
78
+ add_to_menu=True,
79
+ add_to_toolbar=True,
80
+ status_tip=None,
81
+ whats_this=None,
82
+ parent=None):
83
+ """Add a toolbar icon to the toolbar.
84
+
85
+ :param icon_path: Path to the icon for this action. Can be a resource
86
+ path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
87
+ :type icon_path: str
88
+
89
+ :param text: Text that should be shown in menu items for this action.
90
+ :type text: str
91
+
92
+ :param callback: Function to be called when the action is triggered.
93
+ :type callback: function
94
+
95
+ :param enabled_flag: A flag indicating if the action should be enabled
96
+ by default. Defaults to True.
97
+ :type enabled_flag: bool
98
+
99
+ :param add_to_menu: Flag indicating whether the action should also
100
+ be added to the menu. Defaults to True.
101
+ :type add_to_menu: bool
102
+
103
+ :param add_to_toolbar: Flag indicating whether the action should also
104
+ be added to the toolbar. Defaults to True.
105
+ :type add_to_toolbar: bool
106
+
107
+ :param status_tip: Optional text to show in a popup when mouse pointer
108
+ hovers over the action.
109
+ :type status_tip: str
110
+
111
+ :param parent: Parent widget for the new action. Defaults None.
112
+ :type parent: QWidget
113
+
114
+ :param whats_this: Optional text to show in the status bar when the
115
+ mouse pointer hovers over the action.
116
+
117
+ :returns: The action that was created. Note that the action is also
118
+ added to self.actions list.
119
+ :rtype: QAction
120
+ """
121
+
122
+ icon = QIcon(icon_path)
123
+ action = QAction(icon, text, parent)
124
+ action.triggered.connect(callback)
125
+ action.setEnabled(enabled_flag)
126
+
127
+ if status_tip is not None:
128
+ action.setStatusTip(status_tip)
129
+
130
+ if whats_this is not None:
131
+ action.setWhatsThis(whats_this)
132
+
133
+ if add_to_toolbar:
134
+ self.toolbar.addAction(action)
135
+
136
+ if add_to_menu:
137
+ self.iface.addPluginToMenu(
138
+ self.menu,
139
+ action)
140
+
141
+ self.actions.append(action)
142
+
143
+ return action
144
+
145
+ def initGui(self):
146
+ """Create the menu entries and toolbar icons inside the QGIS GUI."""
147
+
148
+ icon_path = get_icon_path()
149
+ self.add_action(
150
+ icon_path,
151
+ text=self.tr(u'Deepness'),
152
+ callback=self.run,
153
+ parent=self.iface.mainWindow())
154
+
155
+ if IS_DEBUG:
156
+ self.run()
157
+
158
+ def onClosePlugin(self):
159
+ """Cleanup necessary items here when plugin dockwidget is closed"""
160
+
161
+ # disconnects
162
+ self.dockwidget.closingPlugin.disconnect(self.onClosePlugin)
163
+
164
+ # remove this statement if dockwidget is to remain
165
+ # for reuse if plugin is reopened
166
+ # Commented next statement since it causes QGIS crashe
167
+ # when closing the docked window:
168
+ # self.dockwidget = None
169
+
170
+ self.pluginIsActive = False
171
+
172
+ def unload(self):
173
+ """Removes the plugin menu item and icon from QGIS GUI."""
174
+
175
+ for action in self.actions:
176
+ self.iface.removePluginMenu(
177
+ self.tr(u'&Deepness'),
178
+ action)
179
+ self.iface.removeToolBarIcon(action)
180
+ # remove the toolbar
181
+ del self.toolbar
182
+
183
+ def _layers_changed(self, _):
184
+ pass
185
+
186
+ def run(self):
187
+ """Run method that loads and starts the plugin"""
188
+
189
+ if not self.pluginIsActive:
190
+ self.pluginIsActive = True
191
+
192
+ # dockwidget may not exist if:
193
+ # first run of plugin
194
+ # removed on close (see self.onClosePlugin method)
195
+ if self.dockwidget is None:
196
+ # Create the dockwidget (after translation) and keep reference
197
+ self.dockwidget = DeepnessDockWidget(self.iface)
198
+ self._layers_changed(None)
199
+ QgsProject.instance().layersAdded.connect(self._layers_changed)
200
+ QgsProject.instance().layersRemoved.connect(self._layers_changed)
201
+
202
+ # connect to provide cleanup on closing of dockwidget
203
+ self.dockwidget.closingPlugin.connect(self.onClosePlugin)
204
+ self.dockwidget.run_model_inference_signal.connect(self._run_model_inference)
205
+ self.dockwidget.run_training_data_export_signal.connect(self._run_training_data_export)
206
+
207
+ self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dockwidget)
208
+ self.dockwidget.show()
209
+
210
+ def _are_map_processing_parameters_are_correct(self, params: MapProcessingParameters):
211
+ if self._map_processor and self._map_processor.is_busy():
212
+ msg = "Error! Processing already in progress! Please wait or cancel previous task."
213
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Critical, duration=7)
214
+ return False
215
+
216
+ rlayer = QgsProject.instance().mapLayers()[params.input_layer_id]
217
+ if rlayer is None:
218
+ msg = "Error! Please select the layer to process first!"
219
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Critical, duration=7)
220
+ return False
221
+
222
+ if isinstance(rlayer, QgsVectorLayer):
223
+ msg = "Error! Please select a raster layer (vector layer selected)"
224
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Critical, duration=7)
225
+ return False
226
+
227
+ return True
228
+
229
+ def _display_processing_started_info(self):
230
+ msg = "Processing in progress... Cool! It's tea time!"
231
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Info, duration=2)
232
+
233
+ def _run_training_data_export(self, training_data_export_parameters: TrainingDataExportParameters):
234
+ if not self._are_map_processing_parameters_are_correct(training_data_export_parameters):
235
+ return
236
+
237
+ vlayer = None
238
+
239
+ rlayer = QgsProject.instance().mapLayers()[training_data_export_parameters.input_layer_id]
240
+ if training_data_export_parameters.processed_area_type == ProcessedAreaType.FROM_POLYGONS:
241
+ vlayer = QgsProject.instance().mapLayers()[training_data_export_parameters.mask_layer_id]
242
+
243
+ self._map_processor = MapProcessorTrainingDataExport(
244
+ rlayer=rlayer,
245
+ vlayer_mask=vlayer, # layer with masks
246
+ map_canvas=self.iface.mapCanvas(),
247
+ params=training_data_export_parameters)
248
+ self._map_processor.finished_signal.connect(self._map_processor_finished)
249
+ self._map_processor.show_img_signal.connect(self._show_img)
250
+ QgsApplication.taskManager().addTask(self._map_processor)
251
+ self._display_processing_started_info()
252
+
253
+ def _run_model_inference(self, params: MapProcessingParameters):
254
+ from deepness.processing.models.model_types import ModelDefinition # import here to avoid pulling external dependencies to early
255
+
256
+ if not self._are_map_processing_parameters_are_correct(params):
257
+ return
258
+
259
+ vlayer = None
260
+
261
+ rlayer = QgsProject.instance().mapLayers()[params.input_layer_id]
262
+ if params.processed_area_type == ProcessedAreaType.FROM_POLYGONS:
263
+ vlayer = QgsProject.instance().mapLayers()[params.mask_layer_id]
264
+
265
+ model_definition = ModelDefinition.get_definition_for_params(params)
266
+ map_processor_class = model_definition.map_processor_class
267
+
268
+ self._map_processor = map_processor_class(
269
+ rlayer=rlayer,
270
+ vlayer_mask=vlayer,
271
+ map_canvas=self.iface.mapCanvas(),
272
+ params=params)
273
+ self._map_processor.finished_signal.connect(self._map_processor_finished)
274
+ self._map_processor.show_img_signal.connect(self._show_img)
275
+ QgsApplication.taskManager().addTask(self._map_processor)
276
+ self._display_processing_started_info()
277
+
278
+ @staticmethod
279
+ def _show_img(img_rgb, window_name: str):
280
+ """ Helper function to show an image while developing and debugging the plugin """
281
+ # We are importing it here, because it is debug tool,
282
+ # and we don't want to have it in the main scope from the project startup
283
+ img_bgr = img_rgb[..., ::-1]
284
+ cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
285
+ cv2.resizeWindow(window_name, 800, 800)
286
+ cv2.imshow(window_name, img_bgr)
287
+ cv2.waitKey(1)
288
+
289
+ def _map_processor_finished(self, result: MapProcessingResult):
290
+ """ Slot for finished processing of the ortophoto """
291
+ if isinstance(result, MapProcessingResultFailed):
292
+ msg = f'Error! Processing error: "{result.message}"!'
293
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Critical, duration=14)
294
+ if result.exception is not None:
295
+ logging.error(msg)
296
+ trace = '\n'.join(traceback.format_tb(result.exception.__traceback__)[-1:])
297
+ msg = f'{msg}\n\n\n' \
298
+ f'Details: ' \
299
+ f'{str(result.exception.__class__.__name__)} - {result.exception}\n' \
300
+ f'Last Traceback: \n' \
301
+ f'{trace}'
302
+ QMessageBox.critical(self.dockwidget, "Unhandled exception", msg)
303
+ elif isinstance(result, MapProcessingResultCanceled):
304
+ msg = f'Info! Processing canceled by user!'
305
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Info, duration=7)
306
+ elif isinstance(result, MapProcessingResultSuccess):
307
+ msg = 'Processing finished!'
308
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Success, duration=3)
309
+ message_to_show = result.message
310
+
311
+ msgBox = ResizableMessageBox(self.dockwidget)
312
+ msgBox.setWindowTitle("Processing Result")
313
+ msgBox.setText(message_to_show)
314
+ msgBox.setStyleSheet("QLabel{min-width:800 px; font-size: 24px;} QPushButton{ width:250px; font-size: 18px; }")
315
+ msgBox.exec()
316
+
317
+ self._map_processor = None
zipdeepness/deepness_dockwidget.py ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contain the main widget of the plugin
3
+ """
4
+
5
+ import logging
6
+ import os
7
+ from typing import Optional
8
+
9
+ from qgis.core import Qgis, QgsMapLayerProxyModel, QgsProject
10
+ from qgis.PyQt import QtWidgets, uic
11
+ from qgis.PyQt.QtCore import pyqtSignal
12
+ from qgis.PyQt.QtWidgets import QComboBox, QFileDialog, QMessageBox
13
+
14
+ from deepness.common.config_entry_key import ConfigEntryKey
15
+ from deepness.common.defines import IS_DEBUG, PLUGIN_NAME
16
+ from deepness.common.errors import OperationFailedException
17
+ from deepness.common.lazy_package_loader import LazyPackageLoader
18
+ from deepness.common.processing_overlap import ProcessingOverlap, ProcessingOverlapOptions
19
+ from deepness.common.processing_parameters.detection_parameters import DetectionParameters, DetectorType
20
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters, ProcessedAreaType
21
+ from deepness.common.processing_parameters.recognition_parameters import RecognitionParameters
22
+ from deepness.common.processing_parameters.regression_parameters import RegressionParameters
23
+ from deepness.common.processing_parameters.segmentation_parameters import SegmentationParameters
24
+ from deepness.common.processing_parameters.superresolution_parameters import SuperresolutionParameters
25
+ from deepness.common.processing_parameters.training_data_export_parameters import TrainingDataExportParameters
26
+ from deepness.processing.models.model_base import ModelBase
27
+ from deepness.widgets.input_channels_mapping.input_channels_mapping_widget import InputChannelsMappingWidget
28
+ from deepness.widgets.training_data_export_widget.training_data_export_widget import TrainingDataExportWidget
29
+
30
+ FORM_CLASS, _ = uic.loadUiType(os.path.join(
31
+ os.path.dirname(__file__), 'deepness_dockwidget.ui'))
32
+
33
+
34
+ class DeepnessDockWidget(QtWidgets.QDockWidget, FORM_CLASS):
35
+ """
36
+ Main widget of the plugin.
37
+ 'Dock' means it is a 'dcoked' widget, embedded in the QGis application window.
38
+
39
+ The UI design is defined in the `deepness_dockwidget.ui` fiel - recommended to be open in QtDesigner.
40
+ Note: Default values for ui edits are based on 'ConfigEntryKey' default value, not taken from the UI form.
41
+ """
42
+
43
+ closingPlugin = pyqtSignal()
44
+ run_model_inference_signal = pyqtSignal(MapProcessingParameters) # run Segmentation or Detection
45
+ run_training_data_export_signal = pyqtSignal(TrainingDataExportParameters)
46
+
47
+ def __init__(self, iface, parent=None):
48
+ super(DeepnessDockWidget, self).__init__(parent)
49
+ self.iface = iface
50
+ self._model = None # type: Optional[ModelBase]
51
+ self.setupUi(self)
52
+
53
+ self._input_channels_mapping_widget = InputChannelsMappingWidget(self) # mapping of model and input ortophoto channels
54
+ self._training_data_export_widget = TrainingDataExportWidget(self) # widget with UI for data export tool
55
+
56
+ self._create_connections()
57
+ self._setup_misc_ui()
58
+ self._load_ui_from_config()
59
+
60
+ def _show_debug_warning(self):
61
+ """ Show label with warning if we are running debug mode """
62
+ self.label_debugModeWarning.setVisible(IS_DEBUG)
63
+
64
+ def _load_ui_from_config(self):
65
+ """ Load the UI values from the project configuration
66
+ """
67
+ layers = QgsProject.instance().mapLayers()
68
+
69
+ try:
70
+ input_layer_id = ConfigEntryKey.INPUT_LAYER_ID.get()
71
+ if input_layer_id and input_layer_id in layers:
72
+ self.mMapLayerComboBox_inputLayer.setLayer(layers[input_layer_id])
73
+
74
+ processed_area_type_txt = ConfigEntryKey.PROCESSED_AREA_TYPE.get()
75
+ self.comboBox_processedAreaSelection.setCurrentText(processed_area_type_txt)
76
+
77
+ model_type_txt = ConfigEntryKey.MODEL_TYPE.get()
78
+ self.comboBox_modelType.setCurrentText(model_type_txt)
79
+
80
+ self._input_channels_mapping_widget.load_ui_from_config()
81
+ self._training_data_export_widget.load_ui_from_config()
82
+
83
+ # NOTE: load the model after setting the model_type above
84
+ model_file_path = ConfigEntryKey.MODEL_FILE_PATH.get()
85
+ if model_file_path:
86
+ self.lineEdit_modelPath.setText(model_file_path)
87
+ self._load_model_and_display_info(abort_if_no_file_path=True) # to prepare other ui components
88
+
89
+ # needs to be loaded after the model is set up
90
+ self.doubleSpinBox_resolution_cm_px.setValue(ConfigEntryKey.PREPROCESSING_RESOLUTION.get())
91
+ self.spinBox_batchSize.setValue(ConfigEntryKey.MODEL_BATCH_SIZE.get())
92
+ self.checkBox_local_cache.setChecked(ConfigEntryKey.PROCESS_LOCAL_CACHE.get())
93
+ self.spinBox_processingTileOverlapPercentage.setValue(ConfigEntryKey.PREPROCESSING_TILES_OVERLAP.get())
94
+
95
+ self.doubleSpinBox_probabilityThreshold.setValue(
96
+ ConfigEntryKey.SEGMENTATION_PROBABILITY_THRESHOLD_VALUE.get())
97
+ self.checkBox_pixelClassEnableThreshold.setChecked(
98
+ ConfigEntryKey.SEGMENTATION_PROBABILITY_THRESHOLD_ENABLED.get())
99
+ self._set_probability_threshold_enabled()
100
+ self.spinBox_dilateErodeSize.setValue(
101
+ ConfigEntryKey.SEGMENTATION_REMOVE_SMALL_SEGMENT_SIZE.get())
102
+ self.checkBox_removeSmallAreas.setChecked(
103
+ ConfigEntryKey.SEGMENTATION_REMOVE_SMALL_SEGMENT_ENABLED.get())
104
+ self._set_remove_small_segment_enabled()
105
+
106
+ self.doubleSpinBox_regressionScaling.setValue(ConfigEntryKey.REGRESSION_OUTPUT_SCALING.get())
107
+
108
+ self.doubleSpinBox_confidence.setValue(ConfigEntryKey.DETECTION_CONFIDENCE.get())
109
+ self.doubleSpinBox_iouScore.setValue(ConfigEntryKey.DETECTION_IOU.get())
110
+ self.comboBox_detectorType.setCurrentText(ConfigEntryKey.DETECTOR_TYPE.get())
111
+ except Exception:
112
+ logging.exception("Failed to load the ui state from config!")
113
+
114
+ def _save_ui_to_config(self):
115
+ """ Save value from the UI forms to the project config
116
+ """
117
+ ConfigEntryKey.MODEL_FILE_PATH.set(self.lineEdit_modelPath.text())
118
+ ConfigEntryKey.INPUT_LAYER_ID.set(self._get_input_layer_id())
119
+ ConfigEntryKey.MODEL_TYPE.set(self.comboBox_modelType.currentText())
120
+ ConfigEntryKey.PROCESSED_AREA_TYPE.set(self.comboBox_processedAreaSelection.currentText())
121
+
122
+ ConfigEntryKey.PREPROCESSING_RESOLUTION.set(self.doubleSpinBox_resolution_cm_px.value())
123
+ ConfigEntryKey.MODEL_BATCH_SIZE.set(self.spinBox_batchSize.value())
124
+ ConfigEntryKey.PROCESS_LOCAL_CACHE.set(self.checkBox_local_cache.isChecked())
125
+ ConfigEntryKey.PREPROCESSING_TILES_OVERLAP.set(self.spinBox_processingTileOverlapPercentage.value())
126
+
127
+ ConfigEntryKey.SEGMENTATION_PROBABILITY_THRESHOLD_ENABLED.set(
128
+ self.checkBox_pixelClassEnableThreshold.isChecked())
129
+ ConfigEntryKey.SEGMENTATION_PROBABILITY_THRESHOLD_VALUE.set(self.doubleSpinBox_probabilityThreshold.value())
130
+ ConfigEntryKey.SEGMENTATION_REMOVE_SMALL_SEGMENT_ENABLED.set(
131
+ self.checkBox_removeSmallAreas.isChecked())
132
+ ConfigEntryKey.SEGMENTATION_REMOVE_SMALL_SEGMENT_SIZE.set(self.spinBox_dilateErodeSize.value())
133
+
134
+ ConfigEntryKey.REGRESSION_OUTPUT_SCALING.set(self.doubleSpinBox_regressionScaling.value())
135
+
136
+ ConfigEntryKey.DETECTION_CONFIDENCE.set(self.doubleSpinBox_confidence.value())
137
+ ConfigEntryKey.DETECTION_IOU.set(self.doubleSpinBox_iouScore.value())
138
+ ConfigEntryKey.DETECTOR_TYPE.set(self.comboBox_detectorType.currentText())
139
+
140
+ self._input_channels_mapping_widget.save_ui_to_config()
141
+ self._training_data_export_widget.save_ui_to_config()
142
+
143
+ def _rlayer_updated(self):
144
+ self._input_channels_mapping_widget.set_rlayer(self._get_input_layer())
145
+
146
+ def _setup_misc_ui(self):
147
+ """ Setup some misceleounous ui forms
148
+ """
149
+ from deepness.processing.models.model_types import \
150
+ ModelDefinition # import here to avoid pulling external dependencies to early
151
+
152
+ self._show_debug_warning()
153
+ combobox = self.comboBox_processedAreaSelection
154
+ for name in ProcessedAreaType.get_all_names():
155
+ combobox.addItem(name)
156
+
157
+ self.verticalLayout_inputChannelsMapping.addWidget(self._input_channels_mapping_widget)
158
+ self.verticalLayout_trainingDataExport.addWidget(self._training_data_export_widget)
159
+
160
+ self.mMapLayerComboBox_inputLayer.setFilters(QgsMapLayerProxyModel.RasterLayer)
161
+ self.mMapLayerComboBox_areaMaskLayer.setFilters(QgsMapLayerProxyModel.VectorLayer)
162
+
163
+ self.mGroupBox_8.setCollapsed(True) # collapse the group by default
164
+ self._set_processed_area_mask_options()
165
+ self._set_processing_overlap_enabled()
166
+
167
+ for model_definition in ModelDefinition.get_model_definitions():
168
+ self.comboBox_modelType.addItem(model_definition.model_type.value)
169
+
170
+ for detector_type in DetectorType.get_all_display_values():
171
+ self.comboBox_detectorType.addItem(detector_type)
172
+ self._detector_type_changed()
173
+
174
+ self._rlayer_updated() # to force refresh the dependant ui elements
175
+
176
+ def _set_processed_area_mask_options(self):
177
+ show_mask_combobox = (self.get_selected_processed_area_type() == ProcessedAreaType.FROM_POLYGONS)
178
+ self.mMapLayerComboBox_areaMaskLayer.setVisible(show_mask_combobox)
179
+ self.label_areaMaskLayer.setVisible(show_mask_combobox)
180
+
181
+ def get_selected_processed_area_type(self) -> ProcessedAreaType:
182
+ combobox = self.comboBox_processedAreaSelection # type: QComboBox
183
+ txt = combobox.currentText()
184
+ return ProcessedAreaType(txt)
185
+
186
+ def _create_connections(self):
187
+ self.pushButton_runInference.clicked.connect(self._run_inference)
188
+ self.pushButton_runTrainingDataExport.clicked.connect(self._run_training_data_export)
189
+ self.pushButton_browseQueryImagePath.clicked.connect(self._browse_query_image_path)
190
+ self.pushButton_browseModelPath.clicked.connect(self._browse_model_path)
191
+ self.comboBox_processedAreaSelection.currentIndexChanged.connect(self._set_processed_area_mask_options)
192
+ self.comboBox_modelType.currentIndexChanged.connect(self._model_type_changed)
193
+ self.comboBox_detectorType.currentIndexChanged.connect(self._detector_type_changed)
194
+ self.pushButton_reloadModel.clicked.connect(self._load_model_and_display_info)
195
+ self.pushButton_loadDefaultModelParameters.clicked.connect(self._load_default_model_parameters)
196
+ self.mMapLayerComboBox_inputLayer.layerChanged.connect(self._rlayer_updated)
197
+ self.checkBox_pixelClassEnableThreshold.stateChanged.connect(self._set_probability_threshold_enabled)
198
+ self.checkBox_removeSmallAreas.stateChanged.connect(self._set_remove_small_segment_enabled)
199
+ self.radioButton_processingTileOverlapPercentage.toggled.connect(self._set_processing_overlap_enabled)
200
+ self.radioButton_processingTileOverlapPixels.toggled.connect(self._set_processing_overlap_enabled)
201
+
202
+ def _model_type_changed(self):
203
+ from deepness.processing.models.model_types import \
204
+ ModelType # import here to avoid pulling external dependencies to early
205
+
206
+ model_type = ModelType(self.comboBox_modelType.currentText())
207
+
208
+ segmentation_enabled = False
209
+ detection_enabled = False
210
+ regression_enabled = False
211
+ superresolution_enabled = False
212
+ recognition_enabled = False
213
+
214
+ if model_type == ModelType.SEGMENTATION:
215
+ segmentation_enabled = True
216
+ elif model_type == ModelType.DETECTION:
217
+ detection_enabled = True
218
+ elif model_type == ModelType.REGRESSION:
219
+ regression_enabled = True
220
+ elif model_type == ModelType.SUPERRESOLUTION:
221
+ superresolution_enabled = True
222
+ elif model_type == ModelType.RECOGNITION:
223
+ recognition_enabled = True
224
+ else:
225
+ raise Exception(f"Unsupported model type ({model_type})!")
226
+
227
+ self.mGroupBox_segmentationParameters.setVisible(segmentation_enabled)
228
+ self.mGroupBox_detectionParameters.setVisible(detection_enabled)
229
+ self.mGroupBox_regressionParameters.setVisible(regression_enabled)
230
+ self.mGroupBox_superresolutionParameters.setVisible(superresolution_enabled)
231
+ self.mGroupBox_recognitionParameters.setVisible(recognition_enabled)
232
+
233
+ def _detector_type_changed(self):
234
+ detector_type = DetectorType(self.comboBox_detectorType.currentText())
235
+ self.label_detectorTypeDescription.setText(detector_type.get_formatted_description())
236
+
237
+ def _set_processing_overlap_enabled(self):
238
+ overlap_percentage_enabled = self.radioButton_processingTileOverlapPercentage.isChecked()
239
+ self.spinBox_processingTileOverlapPercentage.setEnabled(overlap_percentage_enabled)
240
+
241
+ overlap_pixels_enabled = self.radioButton_processingTileOverlapPixels.isChecked()
242
+ self.spinBox_processingTileOverlapPixels.setEnabled(overlap_pixels_enabled)
243
+
244
+ def _set_probability_threshold_enabled(self):
245
+ self.doubleSpinBox_probabilityThreshold.setEnabled(self.checkBox_pixelClassEnableThreshold.isChecked())
246
+
247
+ def _set_remove_small_segment_enabled(self):
248
+ self.spinBox_dilateErodeSize.setEnabled(self.checkBox_removeSmallAreas.isChecked())
249
+
250
+ def _browse_model_path(self):
251
+ file_path, _ = QFileDialog.getOpenFileName(
252
+ self,
253
+ 'Select Model ONNX file...',
254
+ os.path.expanduser('~'),
255
+ 'All files (*.*);; ONNX files (*.onnx)')
256
+ if file_path:
257
+ self.lineEdit_modelPath.setText(file_path)
258
+ self._load_model_and_display_info()
259
+
260
+ def _browse_query_image_path(self):
261
+ file_path, _ = QFileDialog.getOpenFileName(
262
+ self,
263
+ "Select image file...",
264
+ os.path.expanduser("~"),
265
+ "All files (*.*)",
266
+ )
267
+ if file_path:
268
+ self.lineEdit_recognitionPath.setText(file_path)
269
+
270
+ def _load_default_model_parameters(self):
271
+ """
272
+ Load the default parameters from model metadata
273
+ """
274
+ value = self._model.get_metadata_resolution()
275
+ if value is not None:
276
+ self.doubleSpinBox_resolution_cm_px.setValue(value)
277
+
278
+ value = self._model.get_model_batch_size()
279
+ if value is not None:
280
+ self.spinBox_batchSize.setValue(value)
281
+ self.spinBox_batchSize.setEnabled(False)
282
+ else:
283
+ self.spinBox_batchSize.setEnabled(True)
284
+
285
+ value = self._model.get_metadata_tile_size()
286
+ if value is not None:
287
+ self.spinBox_tileSize_px.setValue(value)
288
+
289
+ value = self._model.get_metadata_tiles_overlap()
290
+ if value is not None:
291
+ self.spinBox_processingTileOverlapPercentage.setValue(value)
292
+
293
+ value = self._model.get_metadata_model_type()
294
+ if value is not None:
295
+ print(f'{value =}')
296
+ self.comboBox_modelType.setCurrentText(value)
297
+
298
+ value = self._model.get_detector_type()
299
+ if value is not None:
300
+ self.comboBox_detectorType.setCurrentText(value)
301
+
302
+ value = self._model.get_metadata_segmentation_threshold()
303
+ if value is not None:
304
+ self.checkBox_pixelClassEnableThreshold.setChecked(bool(value != 0))
305
+ self.doubleSpinBox_probabilityThreshold.setValue(value)
306
+
307
+ value = self._model.get_metadata_segmentation_small_segment()
308
+ if value is not None:
309
+ self.checkBox_removeSmallAreas.setChecked(bool(value != 0))
310
+ self.spinBox_dilateErodeSize.setValue(value)
311
+
312
+ value = self._model.get_metadata_regression_output_scaling()
313
+ if value is not None:
314
+ self.doubleSpinBox_regressionScaling.setValue(value)
315
+
316
+ value = self._model.get_metadata_detection_confidence()
317
+ if value is not None:
318
+ self.doubleSpinBox_confidence.setValue(value)
319
+
320
+ value = self._model.get_metadata_detection_iou_threshold()
321
+ if value is not None:
322
+ self.doubleSpinBox_iouScore.setValue(value)
323
+
324
+ def _load_model_with_type_from_metadata(self, model_class_from_ui, file_path):
325
+ """
326
+ If model has model_type in metadata - use this type to create proper model class.
327
+ Otherwise model_class_from_ui will be used
328
+ """
329
+ from deepness.processing.models.model_types import ( # import here to avoid pulling external dependencies to early
330
+ ModelDefinition, ModelType)
331
+
332
+ model_class = model_class_from_ui
333
+
334
+ model_type_str_from_metadata = ModelBase.get_model_type_from_metadata(file_path)
335
+ if model_type_str_from_metadata is not None:
336
+ model_type = ModelType(model_type_str_from_metadata)
337
+ model_class = ModelDefinition.get_definition_for_type(model_type).model_class
338
+ self.comboBox_modelType.setCurrentText(model_type.value)
339
+
340
+ print(f'{model_type_str_from_metadata = }, {model_class = }')
341
+
342
+ model = model_class(file_path)
343
+ return model
344
+
345
+ def _load_model_and_display_info(self, abort_if_no_file_path: bool = False):
346
+ """
347
+ Tries to load the model and display its message.
348
+ """
349
+ import deepness.processing.models.detector as detector_module # import here to avoid pulling external dependencies to early
350
+ from deepness.processing.models.model_types import \
351
+ ModelType # import here to avoid pulling external dependencies to early
352
+
353
+ file_path = self.lineEdit_modelPath.text()
354
+
355
+ if not file_path and abort_if_no_file_path:
356
+ return
357
+
358
+ txt = ''
359
+
360
+ try:
361
+ model_definition = self.get_selected_model_class_definition()
362
+ model_class = model_definition.model_class
363
+ self._model = self._load_model_with_type_from_metadata(
364
+ model_class_from_ui=model_class,
365
+ file_path=file_path)
366
+ self._model.check_loaded_model_outputs()
367
+ input_0_shape = self._model.get_input_shape()
368
+ txt += 'Legend: [BATCH_SIZE, CHANNELS, HEIGHT, WIDTH]\n'
369
+ txt += 'Inputs:\n'
370
+ txt += f'\t- Input: {input_0_shape}\n'
371
+ input_size_px = input_0_shape[-1]
372
+ batch_size = self._model.get_model_batch_size()
373
+
374
+ txt += 'Outputs:\n'
375
+
376
+ for i, output_shape in enumerate(self._model.get_output_shapes()):
377
+ txt += f'\t- Output {i}: {output_shape}\n'
378
+
379
+ # TODO idk how variable input will be handled
380
+ self.spinBox_tileSize_px.setValue(input_size_px)
381
+ self.spinBox_tileSize_px.setEnabled(False)
382
+
383
+ if batch_size is not None:
384
+ self.spinBox_batchSize.setValue(batch_size)
385
+ self.spinBox_batchSize.setEnabled(False)
386
+ else:
387
+ self.spinBox_batchSize.setEnabled(True)
388
+
389
+ self._input_channels_mapping_widget.set_model(self._model)
390
+
391
+ # super resolution
392
+ if model_class == ModelType.SUPERRESOLUTION:
393
+ output_0_shape = self._model.get_output_shape()
394
+ scale_factor = output_0_shape[-1] / input_size_px
395
+ self.doubleSpinBox_superresolutionScaleFactor.setValue(int(scale_factor))
396
+ # Disable output format options for super-resolution models
397
+ except Exception as e:
398
+ if IS_DEBUG:
399
+ raise e
400
+ txt = "Error! Failed to load the model!\n" \
401
+ "Model may be not usable."
402
+ logging.exception(txt)
403
+ self.spinBox_tileSize_px.setEnabled(True)
404
+ self.spinBox_batchSize.setEnabled(True)
405
+ length_limit = 300
406
+ exception_msg = (str(e)[:length_limit] + '..') if len(str(e)) > length_limit else str(e)
407
+ msg = txt + f'\n\nException: {exception_msg}'
408
+ QMessageBox.critical(self, "Error!", msg)
409
+
410
+ self.label_modelInfo.setText(txt)
411
+
412
+ if isinstance(self._model, detector_module.Detector):
413
+ detector_type = DetectorType(self.comboBox_detectorType.currentText())
414
+ self._model.set_model_type_param(detector_type)
415
+
416
+ def get_mask_layer_id(self):
417
+ if not self.get_selected_processed_area_type() == ProcessedAreaType.FROM_POLYGONS:
418
+ return None
419
+
420
+ mask_layer_id = self.mMapLayerComboBox_areaMaskLayer.currentLayer().id()
421
+ return mask_layer_id
422
+
423
+ def _get_input_layer(self):
424
+ return self.mMapLayerComboBox_inputLayer.currentLayer()
425
+
426
+ def _get_input_layer_id(self):
427
+ layer = self._get_input_layer()
428
+ if layer:
429
+ return layer.id()
430
+ else:
431
+ return ''
432
+
433
+ def _get_overlap_parameter(self):
434
+ if self.radioButton_processingTileOverlapPercentage.isChecked():
435
+ return ProcessingOverlap(
436
+ selected_option=ProcessingOverlapOptions.OVERLAP_IN_PERCENT,
437
+ percentage=self.spinBox_processingTileOverlapPercentage.value(),
438
+ )
439
+ elif self.radioButton_processingTileOverlapPixels.isChecked():
440
+ return ProcessingOverlap(
441
+ selected_option=ProcessingOverlapOptions.OVERLAP_IN_PIXELS,
442
+ overlap_px=self.spinBox_processingTileOverlapPixels.value(),
443
+ )
444
+ else:
445
+ raise Exception('Something goes wrong. No overlap parameter selected!')
446
+
447
+ def _get_pixel_classification_threshold(self):
448
+ if not self.checkBox_pixelClassEnableThreshold.isChecked():
449
+ return 0
450
+ return self.doubleSpinBox_probabilityThreshold.value()
451
+
452
+ def get_selected_model_class_definition(self): # -> ModelDefinition: # we cannot import it here yet
453
+ """
454
+ Get the currently selected model class (in UI)
455
+ """
456
+ from deepness.processing.models.model_types import ( # import here to avoid pulling external dependencies to early
457
+ ModelDefinition, ModelType)
458
+
459
+ model_type_txt = self.comboBox_modelType.currentText()
460
+ model_type = ModelType(model_type_txt)
461
+ model_definition = ModelDefinition.get_definition_for_type(model_type)
462
+ return model_definition
463
+
464
+ def get_inference_parameters(self) -> MapProcessingParameters:
465
+ """ Get the parameters for the model interface.
466
+ The returned type is derived from `MapProcessingParameters` class, depending on the selected model type.
467
+ """
468
+ from deepness.processing.models.model_types import \
469
+ ModelType # import here to avoid pulling external dependencies to early
470
+
471
+ map_processing_parameters = self._get_map_processing_parameters()
472
+
473
+ if self._model is None:
474
+ raise OperationFailedException("Please select and load a model first!")
475
+
476
+ model_type = self.get_selected_model_class_definition().model_type
477
+ if model_type == ModelType.SEGMENTATION:
478
+ params = self.get_segmentation_parameters(map_processing_parameters)
479
+ elif model_type == ModelType.REGRESSION:
480
+ params = self.get_regression_parameters(map_processing_parameters)
481
+ elif model_type == ModelType.SUPERRESOLUTION:
482
+ params = self.get_superresolution_parameters(map_processing_parameters)
483
+ elif model_type == ModelType.RECOGNITION:
484
+ params = self.get_recognition_parameters(map_processing_parameters)
485
+ elif model_type == ModelType.DETECTION:
486
+ params = self.get_detection_parameters(map_processing_parameters)
487
+
488
+ else:
489
+ raise Exception(f"Unknown model type '{model_type}'!")
490
+
491
+ return params
492
+
493
+ def get_segmentation_parameters(self, map_processing_parameters: MapProcessingParameters) -> SegmentationParameters:
494
+ postprocessing_dilate_erode_size = self.spinBox_dilateErodeSize.value() \
495
+ if self.checkBox_removeSmallAreas.isChecked() else 0
496
+
497
+ params = SegmentationParameters(
498
+ **map_processing_parameters.__dict__,
499
+ postprocessing_dilate_erode_size=postprocessing_dilate_erode_size,
500
+ pixel_classification__probability_threshold=self._get_pixel_classification_threshold(),
501
+ model=self._model,
502
+ )
503
+ return params
504
+
505
+ def get_regression_parameters(self, map_processing_parameters: MapProcessingParameters) -> RegressionParameters:
506
+ params = RegressionParameters(
507
+ **map_processing_parameters.__dict__,
508
+ output_scaling=self.doubleSpinBox_regressionScaling.value(),
509
+ model=self._model,
510
+ )
511
+ return params
512
+
513
+ def get_superresolution_parameters(self, map_processing_parameters: MapProcessingParameters) -> SuperresolutionParameters:
514
+ params = SuperresolutionParameters(
515
+ **map_processing_parameters.__dict__,
516
+ model=self._model,
517
+ scale_factor=self.doubleSpinBox_superresolutionScaleFactor.value(),
518
+ output_scaling=self.doubleSpinBox_superresolutionScaling.value(),
519
+ )
520
+ return params
521
+
522
+ def get_recognition_parameters(self, map_processing_parameters: MapProcessingParameters) -> RecognitionParameters:
523
+ params = RecognitionParameters(
524
+ **map_processing_parameters.__dict__,
525
+ model=self._model,
526
+ query_image_path=self.lineEdit_recognitionPath.text(),
527
+ )
528
+ return params
529
+
530
+ def get_detection_parameters(self, map_processing_parameters: MapProcessingParameters) -> DetectionParameters:
531
+
532
+ params = DetectionParameters(
533
+ **map_processing_parameters.__dict__,
534
+ confidence=self.doubleSpinBox_confidence.value(),
535
+ iou_threshold=self.doubleSpinBox_iouScore.value(),
536
+ model=self._model,
537
+ detector_type=DetectorType(self.comboBox_detectorType.currentText()),
538
+ )
539
+
540
+ return params
541
+
542
+ def _get_map_processing_parameters(self) -> MapProcessingParameters:
543
+ """
544
+ Get common parameters for inference and exporting
545
+ """
546
+ processed_area_type = self.get_selected_processed_area_type()
547
+ params = MapProcessingParameters(
548
+ resolution_cm_per_px=self.doubleSpinBox_resolution_cm_px.value(),
549
+ tile_size_px=self.spinBox_tileSize_px.value(),
550
+ batch_size=self.spinBox_batchSize.value(),
551
+ local_cache=self.checkBox_local_cache.isChecked(),
552
+ processed_area_type=processed_area_type,
553
+ mask_layer_id=self.get_mask_layer_id(),
554
+ input_layer_id=self._get_input_layer_id(),
555
+ processing_overlap=self._get_overlap_parameter(),
556
+ input_channels_mapping=self._input_channels_mapping_widget.get_channels_mapping(),
557
+ )
558
+ return params
559
+
560
+ def _run_inference(self):
561
+ # check_required_packages_and_install_if_necessary()
562
+ try:
563
+ params = self.get_inference_parameters()
564
+
565
+ if not params.input_layer_id:
566
+ raise OperationFailedException("Please select an input layer first!")
567
+ except OperationFailedException as e:
568
+ msg = str(e)
569
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Warning, duration=7)
570
+ logging.exception(msg)
571
+ QMessageBox.critical(self, "Error!", msg)
572
+ return
573
+
574
+ self._save_ui_to_config()
575
+ self.run_model_inference_signal.emit(params)
576
+
577
+ def _run_training_data_export(self):
578
+ # check_required_packages_and_install_if_necessary()
579
+ try:
580
+ map_processing_parameters = self._get_map_processing_parameters()
581
+ training_data_export_parameters = self._training_data_export_widget.get_training_data_export_parameters(
582
+ map_processing_parameters)
583
+
584
+ if not map_processing_parameters.input_layer_id:
585
+ raise OperationFailedException("Please select an input layer first!")
586
+
587
+ # Overwrite common parameter - we don't want channels mapping as for the model,
588
+ # but just to take all channels
589
+ training_data_export_parameters.input_channels_mapping = \
590
+ self._input_channels_mapping_widget.get_channels_mapping_for_training_data_export()
591
+ except OperationFailedException as e:
592
+ msg = str(e)
593
+ self.iface.messageBar().pushMessage(PLUGIN_NAME, msg, level=Qgis.Warning)
594
+ logging.exception(msg)
595
+ QMessageBox.critical(self, "Error!", msg)
596
+ return
597
+
598
+ self._save_ui_to_config()
599
+ self.run_training_data_export_signal.emit(training_data_export_parameters)
600
+
601
+ def closeEvent(self, event):
602
+ self.closingPlugin.emit()
603
+ event.accept()
zipdeepness/deepness_dockwidget.ui ADDED
@@ -0,0 +1,893 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <ui version="4.0">
3
+ <class>DeepnessDockWidgetBase</class>
4
+ <widget class="QDockWidget" name="DeepnessDockWidgetBase">
5
+ <property name="geometry">
6
+ <rect>
7
+ <x>0</x>
8
+ <y>0</y>
9
+ <width>486</width>
10
+ <height>1368</height>
11
+ </rect>
12
+ </property>
13
+ <property name="windowTitle">
14
+ <string>Deepness</string>
15
+ </property>
16
+ <widget class="QWidget" name="dockWidgetContents">
17
+ <layout class="QGridLayout" name="gridLayout">
18
+ <item row="0" column="0">
19
+ <widget class="QScrollArea" name="scrollArea">
20
+ <property name="widgetResizable">
21
+ <bool>true</bool>
22
+ </property>
23
+ <widget class="QWidget" name="scrollAreaWidgetContents">
24
+ <property name="geometry">
25
+ <rect>
26
+ <x>0</x>
27
+ <y>-268</y>
28
+ <width>452</width>
29
+ <height>1564</height>
30
+ </rect>
31
+ </property>
32
+ <layout class="QVBoxLayout" name="verticalLayout_3">
33
+ <property name="leftMargin">
34
+ <number>0</number>
35
+ </property>
36
+ <property name="rightMargin">
37
+ <number>0</number>
38
+ </property>
39
+ <property name="bottomMargin">
40
+ <number>0</number>
41
+ </property>
42
+ <item>
43
+ <widget class="QLabel" name="label_debugModeWarning">
44
+ <property name="font">
45
+ <font>
46
+ <pointsize>9</pointsize>
47
+ <bold>true</bold>
48
+ </font>
49
+ </property>
50
+ <property name="styleSheet">
51
+ <string notr="true">color:rgb(198, 70, 0)</string>
52
+ </property>
53
+ <property name="text">
54
+ <string>WARNING: Running plugin in DEBUG mode
55
+ (because env variable IS_DEBUG=true)</string>
56
+ </property>
57
+ </widget>
58
+ </item>
59
+ <item>
60
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_3">
61
+ <property name="sizePolicy">
62
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
63
+ <horstretch>0</horstretch>
64
+ <verstretch>0</verstretch>
65
+ </sizepolicy>
66
+ </property>
67
+ <property name="title">
68
+ <string>Input data:</string>
69
+ </property>
70
+ <layout class="QGridLayout" name="gridLayout_5">
71
+ <item row="0" column="1" colspan="2">
72
+ <widget class="QgsMapLayerComboBox" name="mMapLayerComboBox_inputLayer">
73
+ <property name="toolTip">
74
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-weight:600;&quot;&gt;Layer which will be processed.&lt;/span&gt;&lt;/p&gt;&lt;p&gt;Most probably this is your ortophoto or map source (like satellite image from google earth).&lt;br/&gt;Needs to be a raster layer.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
75
+ </property>
76
+ </widget>
77
+ </item>
78
+ <item row="1" column="1" colspan="2">
79
+ <widget class="QComboBox" name="comboBox_processedAreaSelection">
80
+ <property name="toolTip">
81
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Defines what part of the &amp;quot;Input layer&amp;quot; should be processed.&lt;/p&gt;&lt;p&gt;&lt;br/&gt; - &amp;quot;&lt;span style=&quot; font-style:italic;&quot;&gt;Visible Part&lt;/span&gt;&amp;quot; allows to process the part currently visible on the map canvas.&lt;br/&gt; - &amp;quot;&lt;span style=&quot; font-style:italic;&quot;&gt;Entire Layer&lt;/span&gt;&amp;quot; allows to process the entire ortophoto file&lt;br/&gt; - &amp;quot;&lt;span style=&quot; font-style:italic;&quot;&gt;From Polygons&lt;/span&gt;&amp;quot; allows to select a polygon describing the area to be processed (e.g. if the processed field is a polygon, and we don't want to process outside of it)&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
82
+ </property>
83
+ </widget>
84
+ </item>
85
+ <item row="0" column="0">
86
+ <widget class="QLabel" name="label_10">
87
+ <property name="text">
88
+ <string>Input layer:</string>
89
+ </property>
90
+ </widget>
91
+ </item>
92
+ <item row="1" column="0">
93
+ <widget class="QLabel" name="label_11">
94
+ <property name="text">
95
+ <string>Processed area mask:</string>
96
+ </property>
97
+ </widget>
98
+ </item>
99
+ <item row="2" column="0">
100
+ <widget class="QLabel" name="label_areaMaskLayer">
101
+ <property name="text">
102
+ <string>Area mask layer:</string>
103
+ </property>
104
+ </widget>
105
+ </item>
106
+ <item row="2" column="1" colspan="2">
107
+ <widget class="QgsMapLayerComboBox" name="mMapLayerComboBox_areaMaskLayer">
108
+ <property name="toolTip">
109
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Defines the layer which is being used as a mask for the processing of &amp;quot;Input layer&amp;quot;. &lt;br/&gt;Only pixels within this mask layer will be processed.&lt;/p&gt;&lt;p&gt;Needs to be a vector layer.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
110
+ </property>
111
+ </widget>
112
+ </item>
113
+ </layout>
114
+ </widget>
115
+ </item>
116
+ <item>
117
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox">
118
+ <property name="sizePolicy">
119
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
120
+ <horstretch>0</horstretch>
121
+ <verstretch>0</verstretch>
122
+ </sizepolicy>
123
+ </property>
124
+ <property name="title">
125
+ <string>ONNX Model</string>
126
+ </property>
127
+ <layout class="QGridLayout" name="gridLayout_7">
128
+ <item row="1" column="1">
129
+ <widget class="QLineEdit" name="lineEdit_modelPath">
130
+ <property name="enabled">
131
+ <bool>true</bool>
132
+ </property>
133
+ <property name="toolTip">
134
+ <string>Path to the model file</string>
135
+ </property>
136
+ </widget>
137
+ </item>
138
+ <item row="1" column="0">
139
+ <widget class="QLabel" name="label_4">
140
+ <property name="text">
141
+ <string>Model file path:</string>
142
+ </property>
143
+ </widget>
144
+ </item>
145
+ <item row="1" column="2">
146
+ <widget class="QPushButton" name="pushButton_browseModelPath">
147
+ <property name="enabled">
148
+ <bool>true</bool>
149
+ </property>
150
+ <property name="text">
151
+ <string>Browse...</string>
152
+ </property>
153
+ </widget>
154
+ </item>
155
+ <item row="0" column="0">
156
+ <widget class="QLabel" name="label_5">
157
+ <property name="text">
158
+ <string>Model type:</string>
159
+ </property>
160
+ </widget>
161
+ </item>
162
+ <item row="0" column="1">
163
+ <widget class="QComboBox" name="comboBox_modelType">
164
+ <property name="toolTip">
165
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Type of the model (model class) which you want to use.&lt;br/&gt;You should obtain this information along with the model file.&lt;/p&gt;&lt;p&gt;Please refer to the plugin documentation for more details.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
166
+ </property>
167
+ </widget>
168
+ </item>
169
+ <item row="4" column="0">
170
+ <widget class="QLabel" name="label_3">
171
+ <property name="text">
172
+ <string>Model info:</string>
173
+ </property>
174
+ </widget>
175
+ </item>
176
+ <item row="4" column="1" rowspan="2" colspan="2">
177
+ <widget class="QLabel" name="label_modelInfo">
178
+ <property name="font">
179
+ <font>
180
+ <pointsize>7</pointsize>
181
+ </font>
182
+ </property>
183
+ <property name="styleSheet">
184
+ <string notr="true">color: rgb(135, 135, 133);</string>
185
+ </property>
186
+ <property name="text">
187
+ <string>Model not loaded! Please select its path and click &quot;Load Model&quot; button above first!</string>
188
+ </property>
189
+ <property name="wordWrap">
190
+ <bool>true</bool>
191
+ </property>
192
+ </widget>
193
+ </item>
194
+ <item row="3" column="0" colspan="3">
195
+ <layout class="QHBoxLayout" name="horizontalLayout_10">
196
+ <item>
197
+ <widget class="QPushButton" name="pushButton_reloadModel">
198
+ <property name="toolTip">
199
+ <string>Reload the model given in the line edit above</string>
200
+ </property>
201
+ <property name="text">
202
+ <string>Reload Model</string>
203
+ </property>
204
+ </widget>
205
+ </item>
206
+ <item>
207
+ <widget class="QPushButton" name="pushButton_loadDefaultModelParameters">
208
+ <property name="toolTip">
209
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;&lt;span style=&quot; font-weight:600;&quot;&gt;Load default model parameters.&lt;/span&gt;&lt;/p&gt;&lt;p&gt;ONNX Models can have metadata, which can be parsed and used to set default value for fields in UI, w.g. for tile_size or confidence threshold&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
210
+ </property>
211
+ <property name="text">
212
+ <string>Load default parameters</string>
213
+ </property>
214
+ </widget>
215
+ </item>
216
+ </layout>
217
+ </item>
218
+ </layout>
219
+ </widget>
220
+ </item>
221
+ <item>
222
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_2">
223
+ <property name="sizePolicy">
224
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
225
+ <horstretch>0</horstretch>
226
+ <verstretch>0</verstretch>
227
+ </sizepolicy>
228
+ </property>
229
+ <property name="title">
230
+ <string>Input channels mapping</string>
231
+ </property>
232
+ <layout class="QGridLayout" name="gridLayout_4">
233
+ <item row="1" column="0">
234
+ <layout class="QVBoxLayout" name="verticalLayout_inputChannelsMapping"/>
235
+ </item>
236
+ <item row="0" column="0">
237
+ <widget class="QLabel" name="label_12">
238
+ <property name="font">
239
+ <font>
240
+ <bold>true</bold>
241
+ </font>
242
+ </property>
243
+ <property name="text">
244
+ <string>NOTE: This configuration is depending on the input layer and model type. Please make sure to select the &quot;Input layer&quot; and load the model first!</string>
245
+ </property>
246
+ <property name="wordWrap">
247
+ <bool>true</bool>
248
+ </property>
249
+ </widget>
250
+ </item>
251
+ </layout>
252
+ </widget>
253
+ </item>
254
+ <item>
255
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_4">
256
+ <property name="sizePolicy">
257
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
258
+ <horstretch>0</horstretch>
259
+ <verstretch>0</verstretch>
260
+ </sizepolicy>
261
+ </property>
262
+ <property name="title">
263
+ <string>Processing parameters</string>
264
+ </property>
265
+ <layout class="QGridLayout" name="gridLayout_8">
266
+ <item row="6" column="0" colspan="3">
267
+ <widget class="QGroupBox" name="groupBox">
268
+ <property name="title">
269
+ <string>Tiles overlap:</string>
270
+ </property>
271
+ <layout class="QGridLayout" name="gridLayout_3">
272
+ <item row="0" column="3">
273
+ <widget class="QRadioButton" name="radioButton_processingTileOverlapPixels">
274
+ <property name="text">
275
+ <string>[px]</string>
276
+ </property>
277
+ </widget>
278
+ </item>
279
+ <item row="0" column="2">
280
+ <spacer name="horizontalSpacer_2">
281
+ <property name="orientation">
282
+ <enum>Qt::Horizontal</enum>
283
+ </property>
284
+ <property name="sizeHint" stdset="0">
285
+ <size>
286
+ <width>40</width>
287
+ <height>20</height>
288
+ </size>
289
+ </property>
290
+ </spacer>
291
+ </item>
292
+ <item row="0" column="1">
293
+ <widget class="QSpinBox" name="spinBox_processingTileOverlapPercentage">
294
+ <property name="sizePolicy">
295
+ <sizepolicy hsizetype="MinimumExpanding" vsizetype="Fixed">
296
+ <horstretch>0</horstretch>
297
+ <verstretch>0</verstretch>
298
+ </sizepolicy>
299
+ </property>
300
+ <property name="minimumSize">
301
+ <size>
302
+ <width>80</width>
303
+ <height>0</height>
304
+ </size>
305
+ </property>
306
+ <property name="toolTip">
307
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Defines how much tiles should overlap on their neighbours during processing.&lt;/p&gt;&lt;p&gt;Especially required for model which introduce distortions on the edges of images, so that it can be removed in postprocessing.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
308
+ </property>
309
+ <property name="suffix">
310
+ <string/>
311
+ </property>
312
+ <property name="value">
313
+ <number>15</number>
314
+ </property>
315
+ </widget>
316
+ </item>
317
+ <item row="0" column="4">
318
+ <widget class="QSpinBox" name="spinBox_processingTileOverlapPixels">
319
+ <property name="sizePolicy">
320
+ <sizepolicy hsizetype="MinimumExpanding" vsizetype="Fixed">
321
+ <horstretch>0</horstretch>
322
+ <verstretch>0</verstretch>
323
+ </sizepolicy>
324
+ </property>
325
+ <property name="minimumSize">
326
+ <size>
327
+ <width>80</width>
328
+ <height>0</height>
329
+ </size>
330
+ </property>
331
+ <property name="maximum">
332
+ <number>9999999</number>
333
+ </property>
334
+ </widget>
335
+ </item>
336
+ <item row="0" column="0">
337
+ <widget class="QRadioButton" name="radioButton_processingTileOverlapPercentage">
338
+ <property name="text">
339
+ <string>[%]</string>
340
+ </property>
341
+ <property name="checked">
342
+ <bool>true</bool>
343
+ </property>
344
+ </widget>
345
+ </item>
346
+ </layout>
347
+ </widget>
348
+ </item>
349
+ <item row="2" column="1">
350
+ <widget class="QSpinBox" name="spinBox_tileSize_px">
351
+ <property name="enabled">
352
+ <bool>true</bool>
353
+ </property>
354
+ <property name="toolTip">
355
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Size of the images passed to the model.&lt;/p&gt;&lt;p&gt;Usually needs to be the same as the one used during training.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
356
+ </property>
357
+ <property name="maximum">
358
+ <number>99999</number>
359
+ </property>
360
+ <property name="value">
361
+ <number>512</number>
362
+ </property>
363
+ </widget>
364
+ </item>
365
+ <item row="1" column="1">
366
+ <widget class="QDoubleSpinBox" name="doubleSpinBox_resolution_cm_px">
367
+ <property name="toolTip">
368
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Defines the processing resolution of the &amp;quot;Input layer&amp;quot;.&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;p&gt;Determines the resolution of images fed into the model, allowing to scale the input images.&lt;/p&gt;&lt;p&gt;Should be similar as the resolution used to train the model.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
369
+ </property>
370
+ <property name="decimals">
371
+ <number>2</number>
372
+ </property>
373
+ <property name="minimum">
374
+ <double>0.000000000000000</double>
375
+ </property>
376
+ <property name="maximum">
377
+ <double>999999.000000000000000</double>
378
+ </property>
379
+ <property name="value">
380
+ <double>3.000000000000000</double>
381
+ </property>
382
+ </widget>
383
+ </item>
384
+ <item row="2" column="0">
385
+ <widget class="QLabel" name="label_8">
386
+ <property name="text">
387
+ <string>Tile size [px]:</string>
388
+ </property>
389
+ </widget>
390
+ </item>
391
+ <item row="1" column="0">
392
+ <widget class="QLabel" name="label">
393
+ <property name="text">
394
+ <string>Resolution [cm/px]:</string>
395
+ </property>
396
+ </widget>
397
+ </item>
398
+ <item row="0" column="0" colspan="3">
399
+ <widget class="QLabel" name="label_13">
400
+ <property name="font">
401
+ <font>
402
+ <italic>false</italic>
403
+ <bold>true</bold>
404
+ </font>
405
+ </property>
406
+ <property name="text">
407
+ <string>NOTE: These options may be a fixed value for some models</string>
408
+ </property>
409
+ <property name="wordWrap">
410
+ <bool>true</bool>
411
+ </property>
412
+ </widget>
413
+ </item>
414
+ <item row="3" column="0">
415
+ <widget class="QLabel" name="label_19">
416
+ <property name="text">
417
+ <string>Batch size:</string>
418
+ </property>
419
+ </widget>
420
+ </item>
421
+ <item row="3" column="1">
422
+ <widget class="QSpinBox" name="spinBox_batchSize">
423
+ <property name="toolTip">
424
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;The size of the data batch in the model.&lt;/p&gt;&lt;p&gt;The size depends on the computing resources, in particular the available RAM / GPU memory.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
425
+ </property>
426
+ <property name="minimum">
427
+ <number>1</number>
428
+ </property>
429
+ <property name="maximum">
430
+ <number>9999999</number>
431
+ </property>
432
+ </widget>
433
+ </item>
434
+ <item row="4" column="0">
435
+ <widget class="QCheckBox" name="checkBox_local_cache">
436
+ <property name="toolTip">
437
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;If True, local memory caching is performed - this is helpful when large area maps are processed, but is probably slower than processing in RAM.&lt;/p&gt;&lt;p&gt;&lt;br/&gt;&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
438
+ </property>
439
+ <property name="text">
440
+ <string>Process using local cache</string>
441
+ </property>
442
+ </widget>
443
+ </item>
444
+ </layout>
445
+ </widget>
446
+ </item>
447
+ <item>
448
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_segmentationParameters">
449
+ <property name="sizePolicy">
450
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
451
+ <horstretch>0</horstretch>
452
+ <verstretch>0</verstretch>
453
+ </sizepolicy>
454
+ </property>
455
+ <property name="title">
456
+ <string>Segmentation parameters</string>
457
+ </property>
458
+ <layout class="QGridLayout" name="gridLayout_10">
459
+ <item row="2" column="2">
460
+ <widget class="QDoubleSpinBox" name="doubleSpinBox_probabilityThreshold">
461
+ <property name="toolTip">
462
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Minimum required probability for the class to be considered as belonging to this class.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
463
+ </property>
464
+ <property name="decimals">
465
+ <number>2</number>
466
+ </property>
467
+ <property name="maximum">
468
+ <double>1.000000000000000</double>
469
+ </property>
470
+ <property name="singleStep">
471
+ <double>0.050000000000000</double>
472
+ </property>
473
+ <property name="value">
474
+ <double>0.500000000000000</double>
475
+ </property>
476
+ </widget>
477
+ </item>
478
+ <item row="1" column="1">
479
+ <widget class="QCheckBox" name="checkBox_pixelClassArgmaxEnabled">
480
+ <property name="enabled">
481
+ <bool>false</bool>
482
+ </property>
483
+ <property name="toolTip">
484
+ <string/>
485
+ </property>
486
+ <property name="text">
487
+ <string>Argmax (most probable class only)</string>
488
+ </property>
489
+ <property name="checked">
490
+ <bool>true</bool>
491
+ </property>
492
+ </widget>
493
+ </item>
494
+ <item row="2" column="1">
495
+ <widget class="QCheckBox" name="checkBox_pixelClassEnableThreshold">
496
+ <property name="text">
497
+ <string>Apply class probability threshold:</string>
498
+ </property>
499
+ <property name="checked">
500
+ <bool>true</bool>
501
+ </property>
502
+ </widget>
503
+ </item>
504
+ <item row="3" column="2">
505
+ <widget class="QSpinBox" name="spinBox_dilateErodeSize">
506
+ <property name="toolTip">
507
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Postprocessing option, to remove small areas (small clusters of pixels) belonging to each class, smoothing the predictions.&lt;/p&gt;&lt;p&gt;The actual size (in meters) of the smoothing can be calculated as &amp;quot;Resolution&amp;quot; * &amp;quot;value of this parameter&amp;quot;.&lt;br/&gt;Works as application of dilate and erode operation (twice, in reverse order).&lt;br/&gt;Similar effect to median filter.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
508
+ </property>
509
+ <property name="value">
510
+ <number>9</number>
511
+ </property>
512
+ </widget>
513
+ </item>
514
+ <item row="0" column="1" colspan="2">
515
+ <widget class="QLabel" name="label_2">
516
+ <property name="font">
517
+ <font>
518
+ <bold>true</bold>
519
+ </font>
520
+ </property>
521
+ <property name="text">
522
+ <string>NOTE: Applicable only if a segmentation model is used</string>
523
+ </property>
524
+ </widget>
525
+ </item>
526
+ <item row="3" column="1">
527
+ <widget class="QCheckBox" name="checkBox_removeSmallAreas">
528
+ <property name="text">
529
+ <string>Remove small segment
530
+ areas (dilate/erode size) [px]:</string>
531
+ </property>
532
+ <property name="checked">
533
+ <bool>true</bool>
534
+ </property>
535
+ </widget>
536
+ </item>
537
+ </layout>
538
+ </widget>
539
+ </item>
540
+ <item>
541
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_superresolutionParameters">
542
+ <property name="sizePolicy">
543
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
544
+ <horstretch>0</horstretch>
545
+ <verstretch>0</verstretch>
546
+ </sizepolicy>
547
+ </property>
548
+ <property name="title">
549
+ <string>Superresolution Parameters</string>
550
+ </property>
551
+ <layout class="QGridLayout" name="gridLayout_6">
552
+ <item row="0" column="0">
553
+ <widget class="QLabel" name="label_7">
554
+ <property name="text">
555
+ <string>Upscaling Factor</string>
556
+ </property>
557
+ </widget>
558
+ </item>
559
+ <item row="0" column="1">
560
+ <widget class="QgsSpinBox" name="doubleSpinBox_superresolutionScaleFactor">
561
+ <property name="minimum">
562
+ <number>2</number>
563
+ </property>
564
+ </widget>
565
+ </item>
566
+ <item row="1" column="1">
567
+ <widget class="QgsDoubleSpinBox" name="doubleSpinBox_superresolutionScaling">
568
+ <property name="maximum">
569
+ <double>100000000000.000000000000000</double>
570
+ </property>
571
+ <property name="value">
572
+ <double>255.000000000000000</double>
573
+ </property>
574
+ </widget>
575
+ </item>
576
+ <item row="1" column="0">
577
+ <widget class="QLabel" name="label_14">
578
+ <property name="text">
579
+ <string>Output scaling</string>
580
+ </property>
581
+ </widget>
582
+ </item>
583
+ </layout>
584
+ </widget>
585
+ </item>
586
+ <item>
587
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_regressionParameters">
588
+ <property name="sizePolicy">
589
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
590
+ <horstretch>0</horstretch>
591
+ <verstretch>0</verstretch>
592
+ </sizepolicy>
593
+ </property>
594
+ <property name="title">
595
+ <string>Regression parameters</string>
596
+ </property>
597
+ <layout class="QGridLayout" name="gridLayout_11">
598
+ <item row="1" column="1">
599
+ <widget class="QDoubleSpinBox" name="doubleSpinBox_regressionScaling">
600
+ <property name="toolTip">
601
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Scaling factor for model output values.&lt;/p&gt;&lt;p&gt;Each pixel value will be multiplied by this factor.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
602
+ </property>
603
+ <property name="decimals">
604
+ <number>3</number>
605
+ </property>
606
+ <property name="maximum">
607
+ <double>9999.000000000000000</double>
608
+ </property>
609
+ <property name="value">
610
+ <double>1.000000000000000</double>
611
+ </property>
612
+ </widget>
613
+ </item>
614
+ <item row="1" column="0">
615
+ <widget class="QLabel" name="label_6">
616
+ <property name="text">
617
+ <string>Output scaling
618
+ (keep 1.00 if max output value is 1):</string>
619
+ </property>
620
+ </widget>
621
+ </item>
622
+ </layout>
623
+ </widget>
624
+ </item>
625
+ <item>
626
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_recognitionParameters">
627
+ <property name="sizePolicy">
628
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
629
+ <horstretch>0</horstretch>
630
+ <verstretch>0</verstretch>
631
+ </sizepolicy>
632
+ </property>
633
+ <property name="title">
634
+ <string>Recognition parameters</string>
635
+ </property>
636
+ <layout class="QGridLayout" name="gridLayout_12">
637
+ <item row="1" column="1" colspan="2">
638
+ <widget class="QLabel" name="label_23">
639
+ <property name="font">
640
+ <font>
641
+ <bold>true</bold>
642
+ </font>
643
+ </property>
644
+ <property name="text">
645
+ <string>NOTE: Applicable only if a recognition model is used</string>
646
+ </property>
647
+ </widget>
648
+ </item>
649
+ <item row="3" column="1" colspan="2">
650
+ <widget class="QLabel" name="label_24">
651
+ <property name="text">
652
+ <string>Image to localize path:</string>
653
+ </property>
654
+ </widget>
655
+ </item>
656
+ <item row="4" column="1">
657
+ <widget class="QLineEdit" name="lineEdit_recognitionPath"/>
658
+ </item>
659
+ <item row="4" column="2">
660
+ <widget class="QPushButton" name="pushButton_browseQueryImagePath">
661
+ <property name="text">
662
+ <string>Browse</string>
663
+ </property>
664
+ </widget>
665
+ </item>
666
+ </layout>
667
+ </widget>
668
+ </item>
669
+ <item>
670
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_detectionParameters">
671
+ <property name="title">
672
+ <string>Detection parameters</string>
673
+ </property>
674
+ <layout class="QGridLayout" name="gridLayout_9">
675
+ <item row="3" column="0">
676
+ <widget class="QLabel" name="label_21">
677
+ <property name="text">
678
+ <string>Confidence:</string>
679
+ </property>
680
+ </widget>
681
+ </item>
682
+ <item row="1" column="0">
683
+ <widget class="QLabel" name="label_16">
684
+ <property name="text">
685
+ <string>Detector type:</string>
686
+ </property>
687
+ </widget>
688
+ </item>
689
+ <item row="1" column="1">
690
+ <widget class="QComboBox" name="comboBox_detectorType"/>
691
+ </item>
692
+ <item row="5" column="0">
693
+ <widget class="QLabel" name="label_22">
694
+ <property name="text">
695
+ <string>IoU threshold:</string>
696
+ </property>
697
+ </widget>
698
+ </item>
699
+ <item row="3" column="1">
700
+ <widget class="QDoubleSpinBox" name="doubleSpinBox_confidence">
701
+ <property name="toolTip">
702
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Minimal confidence of the potential detection, to consider it as a detection.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
703
+ </property>
704
+ <property name="decimals">
705
+ <number>2</number>
706
+ </property>
707
+ <property name="maximum">
708
+ <double>1.000000000000000</double>
709
+ </property>
710
+ <property name="singleStep">
711
+ <double>0.050000000000000</double>
712
+ </property>
713
+ </widget>
714
+ </item>
715
+ <item row="0" column="0" colspan="2">
716
+ <widget class="QLabel" name="label_20">
717
+ <property name="font">
718
+ <font>
719
+ <bold>true</bold>
720
+ </font>
721
+ </property>
722
+ <property name="text">
723
+ <string>NOTE: Applicable only if a detection model is used</string>
724
+ </property>
725
+ </widget>
726
+ </item>
727
+ <item row="2" column="0" colspan="2">
728
+ <widget class="QLabel" name="label_detectorTypeDescription">
729
+ <property name="font">
730
+ <font>
731
+ <pointsize>9</pointsize>
732
+ </font>
733
+ </property>
734
+ <property name="text">
735
+ <string>Here goes a longer description of detector type...</string>
736
+ </property>
737
+ </widget>
738
+ </item>
739
+ <item row="5" column="1">
740
+ <widget class="QDoubleSpinBox" name="doubleSpinBox_iouScore">
741
+ <property name="toolTip">
742
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Parameter used in Non Maximum Suppression in post processing.&lt;/p&gt;&lt;p&gt;Defines the threshold of overlap between to neighbouring detections, to consider them as the same object.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
743
+ </property>
744
+ <property name="decimals">
745
+ <number>2</number>
746
+ </property>
747
+ <property name="maximum">
748
+ <double>1.000000000000000</double>
749
+ </property>
750
+ <property name="singleStep">
751
+ <double>0.050000000000000</double>
752
+ </property>
753
+ </widget>
754
+ </item>
755
+ </layout>
756
+ </widget>
757
+ </item>
758
+ <item>
759
+ <widget class="QgsCollapsibleGroupBox" name="mGroupBox_8">
760
+ <property name="sizePolicy">
761
+ <sizepolicy hsizetype="Preferred" vsizetype="Maximum">
762
+ <horstretch>0</horstretch>
763
+ <verstretch>0</verstretch>
764
+ </sizepolicy>
765
+ </property>
766
+ <property name="title">
767
+ <string>Training data export</string>
768
+ </property>
769
+ <property name="checkable">
770
+ <bool>false</bool>
771
+ </property>
772
+ <layout class="QVBoxLayout" name="verticalLayout_4">
773
+ <item>
774
+ <widget class="QLabel" name="label_15">
775
+ <property name="text">
776
+ <string>Note: This group allows to export the data for the training process, with similar data as during inference.</string>
777
+ </property>
778
+ <property name="wordWrap">
779
+ <bool>true</bool>
780
+ </property>
781
+ </widget>
782
+ </item>
783
+ <item>
784
+ <layout class="QVBoxLayout" name="verticalLayout_trainingDataExport"/>
785
+ </item>
786
+ <item>
787
+ <layout class="QHBoxLayout" name="horizontalLayout_2">
788
+ <item>
789
+ <spacer name="horizontalSpacer">
790
+ <property name="orientation">
791
+ <enum>Qt::Horizontal</enum>
792
+ </property>
793
+ <property name="sizeHint" stdset="0">
794
+ <size>
795
+ <width>40</width>
796
+ <height>20</height>
797
+ </size>
798
+ </property>
799
+ </spacer>
800
+ </item>
801
+ <item>
802
+ <widget class="QPushButton" name="pushButton_runTrainingDataExport">
803
+ <property name="toolTip">
804
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Run the export of the data&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
805
+ </property>
806
+ <property name="text">
807
+ <string>Export training data</string>
808
+ </property>
809
+ </widget>
810
+ </item>
811
+ </layout>
812
+ </item>
813
+ <item>
814
+ <widget class="QWidget" name="widget_3" native="true">
815
+ <layout class="QHBoxLayout" name="horizontalLayout">
816
+ <property name="leftMargin">
817
+ <number>0</number>
818
+ </property>
819
+ <property name="topMargin">
820
+ <number>0</number>
821
+ </property>
822
+ <property name="rightMargin">
823
+ <number>0</number>
824
+ </property>
825
+ <property name="bottomMargin">
826
+ <number>0</number>
827
+ </property>
828
+ </layout>
829
+ </widget>
830
+ </item>
831
+ </layout>
832
+ </widget>
833
+ </item>
834
+ <item>
835
+ <spacer name="verticalSpacer">
836
+ <property name="orientation">
837
+ <enum>Qt::Vertical</enum>
838
+ </property>
839
+ <property name="sizeHint" stdset="0">
840
+ <size>
841
+ <width>20</width>
842
+ <height>40</height>
843
+ </size>
844
+ </property>
845
+ </spacer>
846
+ </item>
847
+ </layout>
848
+ </widget>
849
+ </widget>
850
+ </item>
851
+ <item row="1" column="0">
852
+ <layout class="QVBoxLayout" name="verticalLayout">
853
+ <item>
854
+ <widget class="QPushButton" name="pushButton_runInference">
855
+ <property name="toolTip">
856
+ <string>&lt;html&gt;&lt;head/&gt;&lt;body&gt;&lt;p&gt;Run the inference, for the selected above paraeters.&lt;/p&gt;&lt;/body&gt;&lt;/html&gt;</string>
857
+ </property>
858
+ <property name="text">
859
+ <string>Run</string>
860
+ </property>
861
+ </widget>
862
+ </item>
863
+ </layout>
864
+ </item>
865
+ </layout>
866
+ </widget>
867
+ </widget>
868
+ <customwidgets>
869
+ <customwidget>
870
+ <class>QgsCollapsibleGroupBox</class>
871
+ <extends>QGroupBox</extends>
872
+ <header>qgscollapsiblegroupbox.h</header>
873
+ <container>1</container>
874
+ </customwidget>
875
+ <customwidget>
876
+ <class>QgsDoubleSpinBox</class>
877
+ <extends>QDoubleSpinBox</extends>
878
+ <header>qgsdoublespinbox.h</header>
879
+ </customwidget>
880
+ <customwidget>
881
+ <class>QgsMapLayerComboBox</class>
882
+ <extends>QComboBox</extends>
883
+ <header>qgsmaplayercombobox.h</header>
884
+ </customwidget>
885
+ <customwidget>
886
+ <class>QgsSpinBox</class>
887
+ <extends>QSpinBox</extends>
888
+ <header>qgsspinbox.h</header>
889
+ </customwidget>
890
+ </customwidgets>
891
+ <resources/>
892
+ <connections/>
893
+ </ui>
zipdeepness/dialogs/packages_installer/packages_installer_dialog.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This QGIS plugin requires some Python packages to be installed and available.
3
+ This tool allows to install them in a local directory, if they are not installed yet.
4
+ """
5
+
6
+ import importlib
7
+ import logging
8
+ import os
9
+ import subprocess
10
+ import sys
11
+ import traceback
12
+ import urllib
13
+ from dataclasses import dataclass
14
+ from pathlib import Path
15
+ from threading import Thread
16
+ from typing import List
17
+
18
+ from qgis.PyQt import QtCore, uic
19
+ from qgis.PyQt.QtCore import pyqtSignal
20
+ from qgis.PyQt.QtGui import QCloseEvent
21
+ from qgis.PyQt.QtWidgets import QDialog, QMessageBox, QTextBrowser
22
+
23
+ from deepness.common.defines import PLUGIN_NAME
24
+
25
+ PYTHON_VERSION = sys.version_info
26
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
27
+ PLUGIN_ROOT_DIR = os.path.realpath(os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..')))
28
+ PACKAGES_INSTALL_DIR = os.path.join(PLUGIN_ROOT_DIR, f'python{PYTHON_VERSION.major}.{PYTHON_VERSION.minor}')
29
+
30
+
31
+ FORM_CLASS, _ = uic.loadUiType(os.path.join(
32
+ os.path.dirname(__file__), 'packages_installer_dialog.ui'))
33
+
34
+ _ERROR_COLOR = '#ff0000'
35
+
36
+
37
+ @dataclass
38
+ class PackageToInstall:
39
+ name: str
40
+ version: str
41
+ import_name: str # name while importing package
42
+
43
+ def __str__(self):
44
+ return f'{self.name}{self.version}'
45
+
46
+
47
+ REQUIREMENTS_PATH = os.path.join(PLUGIN_ROOT_DIR, 'python_requirements/requirements.txt')
48
+
49
+ with open(REQUIREMENTS_PATH, 'r') as f:
50
+ raw_txt = f.read()
51
+
52
+ libraries_versions = {}
53
+
54
+ for line in raw_txt.split('\n'):
55
+ if line.startswith('#') or not line.strip():
56
+ continue
57
+
58
+ line = line.split(';')[0]
59
+
60
+ if '==' in line:
61
+ lib, version = line.split('==')
62
+ libraries_versions[lib] = '==' + version
63
+ elif '>=' in line:
64
+ lib, version = line.split('>=')
65
+ libraries_versions[lib] = '>=' + version
66
+ elif '<=' in line:
67
+ lib, version = line.split('<=')
68
+ libraries_versions[lib] = '<=' + version
69
+ else:
70
+ libraries_versions[line] = ''
71
+
72
+
73
+ packages_to_install = [
74
+ PackageToInstall(name='opencv-python-headless', version=libraries_versions['opencv-python-headless'], import_name='cv2'),
75
+ ]
76
+
77
+ if sys.platform == "linux" or sys.platform == "linux2":
78
+ packages_to_install += [
79
+ PackageToInstall(name='onnxruntime-gpu', version=libraries_versions['onnxruntime-gpu'], import_name='onnxruntime'),
80
+ ]
81
+ PYTHON_EXECUTABLE_PATH = sys.executable
82
+ elif sys.platform == "darwin": # MacOS
83
+ packages_to_install += [
84
+ PackageToInstall(name='onnxruntime', version=libraries_versions['onnxruntime-gpu'], import_name='onnxruntime'),
85
+ ]
86
+ PYTHON_EXECUTABLE_PATH = str(Path(sys.prefix) / 'bin' / 'python3') # sys.executable yields QGIS in macOS
87
+ elif sys.platform == "win32":
88
+ packages_to_install += [
89
+ PackageToInstall(name='onnxruntime', version=libraries_versions['onnxruntime-gpu'], import_name='onnxruntime'),
90
+ ]
91
+ PYTHON_EXECUTABLE_PATH = 'python' # sys.executable yields QGis.exe in Windows
92
+ else:
93
+ raise Exception("Unsupported operating system!")
94
+
95
+
96
+ class PackagesInstallerDialog(QDialog, FORM_CLASS):
97
+ """
98
+ Dialog witch controls the installation process of packages.
99
+ UI design defined in the `packages_installer_dialog.ui` file.
100
+ """
101
+
102
+ signal_log_line = pyqtSignal(str) # we need to use signal because we cannot edit GUI from another thread
103
+
104
+ INSTALLATION_IN_PROGRESS = False # to make sure we will not start the installation twice
105
+
106
+ def __init__(self, iface, parent=None):
107
+ super(PackagesInstallerDialog, self).__init__(parent)
108
+ self.setupUi(self)
109
+ self.iface = iface
110
+ self.tb = self.textBrowser_log # type: QTextBrowser
111
+ self._create_connections()
112
+ self._setup_message()
113
+ self.aborted = False
114
+ self.thread = None
115
+
116
+ def move_to_top(self):
117
+ """ Move the window to the top.
118
+ Although if installed from plugin manager, the plugin manager will move itself to the top anyway.
119
+ """
120
+ self.setWindowState((self.windowState() & ~QtCore.Qt.WindowMinimized) | QtCore.Qt.WindowActive)
121
+
122
+ if sys.platform == "linux" or sys.platform == "linux2":
123
+ pass
124
+ elif sys.platform == "darwin": # MacOS
125
+ self.raise_() # FIXME: this does not really work, the window is still behind the plugin manager
126
+ elif sys.platform == "win32":
127
+ self.activateWindow()
128
+ else:
129
+ raise Exception("Unsupported operating system!")
130
+
131
+ def _create_connections(self):
132
+ self.pushButton_close.clicked.connect(self.close)
133
+ self.pushButton_install_packages.clicked.connect(self._run_packages_installation)
134
+ self.signal_log_line.connect(self._log_line)
135
+
136
+ def _log_line(self, txt):
137
+ txt = txt \
138
+ .replace(' ', '&nbsp;&nbsp;') \
139
+ .replace('\n', '<br>')
140
+ self.tb.append(txt)
141
+
142
+ def log(self, txt):
143
+ self.signal_log_line.emit(txt)
144
+
145
+ def _setup_message(self) -> None:
146
+
147
+ self.log(f'<h2><span style="color: #000080;"><strong> '
148
+ f'Plugin {PLUGIN_NAME} - Packages installer </strong></span></h2> \n'
149
+ f'\n'
150
+ f'<b>This plugin requires the following Python packages to be installed:</b>')
151
+
152
+ for package in packages_to_install:
153
+ self.log(f'\t- {package.name}{package.version}')
154
+
155
+ self.log('\n\n'
156
+ f'If this packages are not installed in the global environment '
157
+ f'(or environment in which QGIS is started) '
158
+ f'you can install these packages in the local directory (which is included to the Python path).\n\n'
159
+ f'This Dialog does it for you! (Though you can still install these packages manually instead).\n'
160
+ f'<b>Please click "Install packages" button below to install them automatically, </b>'
161
+ f'or "Test and Close" if you installed them manually...\n')
162
+
163
+ def _run_packages_installation(self):
164
+ if self.INSTALLATION_IN_PROGRESS:
165
+ self.log(f'Error! Installation already in progress, cannot start again!')
166
+ return
167
+ self.aborted = False
168
+ self.INSTALLATION_IN_PROGRESS = True
169
+ self.thread = Thread(target=self._install_packages)
170
+ self.thread.start()
171
+
172
+ def _install_packages(self) -> None:
173
+ self.log('\n\n')
174
+ self.log('=' * 60)
175
+ self.log(f'<h3><b>Attempting to install required packages...</b></h3>')
176
+ os.makedirs(PACKAGES_INSTALL_DIR, exist_ok=True)
177
+
178
+ self._install_pip_if_necessary()
179
+
180
+ self.log(f'<h3><b>Attempting to install required packages...</b></h3>\n')
181
+ try:
182
+ self._pip_install_packages(packages_to_install)
183
+ except Exception as e:
184
+ msg = (f'\n <span style="color: {_ERROR_COLOR};"><b> '
185
+ f'Packages installation failed with exception: {e}!\n'
186
+ f'Please try to install the packages again. </b></span>'
187
+ f'\nCheck if there is no error related to system packages, '
188
+ f'which may be required to be installed by your system package manager, e.g. "apt". '
189
+ f'Copy errors from the stack above and google for possible solutions. '
190
+ f'Please report these as an issue on the plugin repository tracker!')
191
+ self.log(msg)
192
+
193
+ # finally, validate the installation, if there was no error so far...
194
+ self.log('\n\n <b>Installation of required packages finished. Validating installation...</b>')
195
+ self._check_packages_installation_and_log()
196
+ self.INSTALLATION_IN_PROGRESS = False
197
+
198
+ def reject(self) -> None:
199
+ self.close()
200
+
201
+ def closeEvent(self, event: QCloseEvent):
202
+ self.aborted = True
203
+ if self._check_packages_installation_and_log():
204
+ event.accept()
205
+ return
206
+
207
+ res = QMessageBox.question(self.iface.mainWindow(),
208
+ f'{PLUGIN_NAME} - skip installation?',
209
+ 'Are you sure you want to abort the installation of the required python packages? '
210
+ 'The plugin may not function correctly without them!',
211
+ QMessageBox.No, QMessageBox.Yes)
212
+ log_msg = 'User requested to close the dialog, but the packages are not installed correctly!\n'
213
+ if res == QMessageBox.Yes:
214
+ log_msg += 'And the user confirmed to close the dialog, knowing the risk!'
215
+ event.accept()
216
+ else:
217
+ log_msg += 'The user reconsidered their decision, and will try to install the packages again!'
218
+ event.ignore()
219
+ log_msg += '\n'
220
+ self.log(log_msg)
221
+
222
+ def _install_pip_if_necessary(self):
223
+ """
224
+ Install pip if not present.
225
+ It happens e.g. in flatpak applications.
226
+
227
+ TODO - investigate whether we can also install pip in local directory
228
+ """
229
+
230
+ self.log(f'<h4><b>Making sure pip is installed...</b></h4>')
231
+ if check_pip_installed():
232
+ self.log(f'<em>Pip is installed, skipping installation...</em>\n')
233
+ return
234
+
235
+ install_pip_command = [PYTHON_EXECUTABLE_PATH, '-m', 'ensurepip']
236
+ self.log(f'<em>Running command to install pip: \n $ {" ".join(install_pip_command)} </em>')
237
+ with subprocess.Popen(install_pip_command,
238
+ stdout=subprocess.PIPE,
239
+ universal_newlines=True,
240
+ stderr=subprocess.STDOUT,
241
+ env={'SETUPTOOLS_USE_DISTUTILS': 'stdlib'}) as process:
242
+ try:
243
+ self._do_process_output_logging(process)
244
+ except InterruptedError as e:
245
+ self.log(str(e))
246
+ return False
247
+
248
+ if process.returncode != 0:
249
+ msg = (f'<span style="color: {_ERROR_COLOR};"><b>'
250
+ f'pip installation failed! Consider installing it manually.'
251
+ f'<b></span>')
252
+ self.log(msg)
253
+ self.log('\n')
254
+
255
+ def _pip_install_packages(self, packages: List[PackageToInstall]) -> None:
256
+ cmd = [PYTHON_EXECUTABLE_PATH, '-m', 'pip', 'install', '-U', f'--target={PACKAGES_INSTALL_DIR}']
257
+ cmd_string = ' '.join(cmd)
258
+
259
+ for pck in packages:
260
+ cmd.append(f"{pck}")
261
+ cmd_string += f"{pck}"
262
+
263
+ self.log(f'<em>Running command: \n $ {cmd_string} </em>')
264
+ with subprocess.Popen(cmd,
265
+ stdout=subprocess.PIPE,
266
+ universal_newlines=True,
267
+ stderr=subprocess.STDOUT) as process:
268
+ self._do_process_output_logging(process)
269
+
270
+ if process.returncode != 0:
271
+ raise RuntimeError('Installation with pip failed')
272
+
273
+ msg = (f'\n<b>'
274
+ f'Packages installed correctly!'
275
+ f'<b>\n\n')
276
+ self.log(msg)
277
+
278
+ def _do_process_output_logging(self, process: subprocess.Popen) -> None:
279
+ """
280
+ :param process: instance of 'subprocess.Popen'
281
+ """
282
+ for stdout_line in iter(process.stdout.readline, ""):
283
+ if stdout_line.isspace():
284
+ continue
285
+ txt = f'<span style="color: #999999;">{stdout_line.rstrip(os.linesep)}</span>'
286
+ self.log(txt)
287
+ if self.aborted:
288
+ raise InterruptedError('Installation aborted by user')
289
+
290
+ def _check_packages_installation_and_log(self) -> bool:
291
+ packages_ok = are_packages_importable()
292
+ self.pushButton_install_packages.setEnabled(not packages_ok)
293
+
294
+ if packages_ok:
295
+ msg1 = f'All required packages are importable! You can close this window now!'
296
+ self.log(msg1)
297
+ return True
298
+
299
+ try:
300
+ import_packages()
301
+ raise Exception("Unexpected successful import of packages?!? It failed a moment ago, we shouldn't be here!")
302
+ except Exception:
303
+ msg_base = '<b>Python packages required by the plugin could not be loaded due to the following error:</b>'
304
+ logging.exception(msg_base)
305
+ tb = traceback.format_exc()
306
+ msg1 = (f'<span style="color: {_ERROR_COLOR};">'
307
+ f'{msg_base} \n '
308
+ f'{tb}\n\n'
309
+ f'<b>Please try installing the packages again.<b>'
310
+ f'</span>')
311
+ self.log(msg1)
312
+
313
+ return False
314
+
315
+
316
+ dialog = None
317
+
318
+
319
+ def import_package(package: PackageToInstall):
320
+ importlib.import_module(package.import_name)
321
+
322
+
323
+ def import_packages():
324
+ for package in packages_to_install:
325
+ import_package(package)
326
+
327
+
328
+ def are_packages_importable() -> bool:
329
+ try:
330
+ import_packages()
331
+ except Exception:
332
+ logging.exception(f'Python packages required by the plugin could not be loaded due to the following error:')
333
+ return False
334
+
335
+ return True
336
+
337
+
338
+ def check_pip_installed() -> bool:
339
+ try:
340
+ subprocess.check_output([PYTHON_EXECUTABLE_PATH, '-m', 'pip', '--version'])
341
+ return True
342
+ except subprocess.CalledProcessError:
343
+ return False
344
+
345
+
346
+ def check_required_packages_and_install_if_necessary(iface):
347
+ os.makedirs(PACKAGES_INSTALL_DIR, exist_ok=True)
348
+ if PACKAGES_INSTALL_DIR not in sys.path:
349
+ sys.path.append(PACKAGES_INSTALL_DIR) # TODO: check for a less intrusive way to do this
350
+
351
+ if are_packages_importable():
352
+ # if packages are importable we are fine, nothing more to do then
353
+ return
354
+
355
+ global dialog
356
+ dialog = PackagesInstallerDialog(iface)
357
+ dialog.setWindowModality(QtCore.Qt.WindowModal)
358
+ dialog.show()
359
+ dialog.move_to_top()
zipdeepness/dialogs/packages_installer/packages_installer_dialog.ui ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <ui version="4.0">
3
+ <class>PackagesInstallerDialog</class>
4
+ <widget class="QDialog" name="PackagesInstallerDialog">
5
+ <property name="geometry">
6
+ <rect>
7
+ <x>0</x>
8
+ <y>0</y>
9
+ <width>693</width>
10
+ <height>494</height>
11
+ </rect>
12
+ </property>
13
+ <property name="windowTitle">
14
+ <string>Deepness - Packages Installer Dialog</string>
15
+ </property>
16
+ <layout class="QGridLayout" name="gridLayout_2">
17
+ <item row="0" column="0">
18
+ <layout class="QGridLayout" name="gridLayout">
19
+ <item row="0" column="0">
20
+ <widget class="QTextBrowser" name="textBrowser_log"/>
21
+ </item>
22
+ </layout>
23
+ </item>
24
+ <item row="1" column="0">
25
+ <layout class="QHBoxLayout" name="horizontalLayout">
26
+ <item>
27
+ <spacer name="horizontalSpacer">
28
+ <property name="orientation">
29
+ <enum>Qt::Horizontal</enum>
30
+ </property>
31
+ <property name="sizeHint" stdset="0">
32
+ <size>
33
+ <width>40</width>
34
+ <height>20</height>
35
+ </size>
36
+ </property>
37
+ </spacer>
38
+ </item>
39
+ <item>
40
+ <widget class="QPushButton" name="pushButton_install_packages">
41
+ <property name="font">
42
+ <font>
43
+ <weight>75</weight>
44
+ <bold>true</bold>
45
+ </font>
46
+ </property>
47
+ <property name="text">
48
+ <string>Install packages</string>
49
+ </property>
50
+ </widget>
51
+ </item>
52
+ <item>
53
+ <widget class="QPushButton" name="pushButton_close">
54
+ <property name="text">
55
+ <string>Test and Close</string>
56
+ </property>
57
+ </widget>
58
+ </item>
59
+ </layout>
60
+ </item>
61
+ </layout>
62
+ </widget>
63
+ <resources/>
64
+ <connections/>
65
+ </ui>
zipdeepness/dialogs/resizable_message_box.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from qgis.PyQt.QtWidgets import QMessageBox, QTextEdit
2
+
3
+
4
+ class ResizableMessageBox(QMessageBox):
5
+ def __init__(self, *args, **kwargs):
6
+ super().__init__(*args, **kwargs)
7
+ self.setSizeGripEnabled(True)
8
+
9
+ def event(self, event):
10
+ if event.type() in (event.LayoutRequest, event.Resize):
11
+ if event.type() == event.Resize:
12
+ res = super().event(event)
13
+ else:
14
+ res = False
15
+ details = self.findChild(QTextEdit)
16
+ if details:
17
+ details.setMaximumSize(16777215, 16777215)
18
+ self.setMaximumSize(16777215, 16777215)
19
+ return res
20
+ return super().event(event)
zipdeepness/images/get_image_path.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains image related functionalities
3
+ """
4
+
5
+ import os
6
+
7
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
8
+
9
+
10
+ def get_icon_path() -> str:
11
+ """ Get path to the file with the main plugin icon
12
+
13
+ Returns
14
+ -------
15
+ str
16
+ Path to the icon
17
+ """
18
+ return get_image_path('icon.png')
19
+
20
+
21
+ def get_image_path(image_name) -> str:
22
+ """ Get path to an image resource, accessing it just by the name of the file (provided it is in the common directory)
23
+
24
+ Returns
25
+ -------
26
+ str
27
+ file path
28
+ """
29
+ return os.path.join(SCRIPT_DIR, image_name)
zipdeepness/images/icon.png ADDED

Git LFS Details

  • SHA256: 1dffd56a93df4d230e3b5b6521e3ddce178423d5c5d5692240f2f1d27bd9d070
  • Pointer size: 131 Bytes
  • Size of remote file: 167 kB
zipdeepness/landcover_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12ae15a9bcc5e28f675e9c829cacbf2ab81776382f92e28645b2f91de3491d93
3
+ size 12336500
zipdeepness/metadata.txt ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file contains metadata for your plugin.
2
+
3
+ # This file should be included when you package your plugin.# Mandatory items:
4
+
5
+ [general]
6
+ name=Deepness: Deep Neural Remote Sensing
7
+ qgisMinimumVersion=3.22
8
+ description=Inference of deep neural network models (ONNX) for segmentation, detection and regression
9
+ version=0.7.0
10
+ author=PUT Vision
11
+ email=przemyslaw.aszkowski@gmail.com
12
+
13
+ about=
14
+ Deepness plugin allows to easily perform segmentation, detection and regression on raster ortophotos with custom ONNX Neural Network models, bringing the power of deep learning to casual users.
15
+ Features highlights:
16
+ - processing any raster layer (custom ortophoto from file or layers from online providers, e.g Google Satellite)
17
+ - limiting processing range to predefined area (visible part or area defined by vector layer polygons)
18
+ - common types of models are supported: segmentation, regression, detection
19
+ - integration with layers (both for input data and model output layers). Once an output layer is created, it can be saved as a file manually
20
+ - model ZOO under development (planes detection on Bing Aerial, Corn field damage, Oil Storage tanks detection, cars detection, ...)
21
+ - training data Export Tool - exporting raster and mask as small tiles
22
+ - parametrization of the processing for advanced users (spatial resolution, overlap, postprocessing)
23
+ Plugin requires external python packages to be installed. After the first plugin startup, a Dialog will show, to assist in this process. Please visit plugin the documentation for details.
24
+
25
+ tracker=https://github.com/PUTvision/qgis-plugin-deepness/issues
26
+ repository=https://github.com/PUTvision/qgis-plugin-deepness
27
+ # End of mandatory metadata
28
+
29
+ # Recommended items:
30
+
31
+ hasProcessingProvider=no
32
+ # Uncomment the following line and add your changelog:
33
+ # changelog=
34
+
35
+ # Tags are comma separated with spaces allowed
36
+ tags=segmentation,detection,classification,machine learning,onnx,neural network,deep learning,regression,deepness,analysis,remote sensing,supervised classification
37
+
38
+ homepage=https://qgis-plugin-deepness.readthedocs.io/
39
+ category=Plugins
40
+ icon=images/icon.png
41
+ # experimental flag
42
+ experimental=False
43
+
44
+ # deprecated flag (applies to the whole plugin, not just a single version)
45
+ deprecated=False
46
+
47
+ # Since QGIS 3.8, a comma separated list of plugins to be installed
48
+ # (or upgraded) can be specified.
49
+ # Check the documentation for more information.
50
+ # plugin_dependencies=
51
+
52
+ Category of the plugin: Raster, Vector, Database or Web
53
+ # category=
54
+
55
+ # If the plugin can run on QGIS Server.
56
+ server=False
zipdeepness/processing/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ """ Main submodule for image processing and deep learning things.
2
+ """
zipdeepness/processing/extent_utils.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains utilities related to Extent processing
3
+ """
4
+
5
+ import logging
6
+
7
+ from qgis.core import QgsCoordinateTransform, QgsRasterLayer, QgsRectangle, QgsVectorLayer
8
+ from qgis.gui import QgsMapCanvas
9
+
10
+ from deepness.common.errors import OperationFailedException
11
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters, ProcessedAreaType
12
+ from deepness.processing.processing_utils import BoundingBox, convert_meters_to_rlayer_units
13
+
14
+
15
+ def round_extent_to_rlayer_grid(extent: QgsRectangle, rlayer: QgsRasterLayer) -> QgsRectangle:
16
+ """
17
+ Round to rlayer "grid" for pixels.
18
+ Grid starts at rlayer_extent.xMinimum & yMinimum
19
+ with resolution of rlayer_units_per_pixel
20
+
21
+ :param extent: Extent to round, needs to be in rlayer CRS units
22
+ :param rlayer: layer detemining the grid
23
+ """
24
+ # For some ortophotos grid spacing is close to (1.0, 1.0), while it shouldn't be.
25
+ # Seems like it is some bug or special "feature" that I do not understand.
26
+ # In that case, just return the extent as it is
27
+ grid_spacing = rlayer.rasterUnitsPerPixelX(), rlayer.rasterUnitsPerPixelY()
28
+ if abs(grid_spacing[0] - 1.0) < 0.0001 and abs(grid_spacing[1] - 1.0) < 0.0001:
29
+ logging.warning('Grid spacing is close to 1.0, which is suspicious, returning extent as it is. It shouldn not be a problem for most cases.')
30
+ return extent
31
+
32
+ grid_start = rlayer.extent().xMinimum(), rlayer.extent().yMinimum()
33
+
34
+ x_min = grid_start[0] + int((extent.xMinimum() - grid_start[0]) / grid_spacing[0]) * grid_spacing[0]
35
+ x_max = grid_start[0] + int((extent.xMaximum() - grid_start[0]) / grid_spacing[0]) * grid_spacing[0]
36
+ y_min = grid_start[1] + int((extent.yMinimum() - grid_start[1]) / grid_spacing[1]) * grid_spacing[1]
37
+ y_max = grid_start[1] + int((extent.yMaximum() - grid_start[1]) / grid_spacing[1]) * grid_spacing[1]
38
+
39
+ new_extent = QgsRectangle(x_min, y_min, x_max, y_max)
40
+ return new_extent
41
+
42
+
43
+ def calculate_extended_processing_extent(base_extent: QgsRectangle,
44
+ params: MapProcessingParameters,
45
+ rlayer: QgsVectorLayer,
46
+ rlayer_units_per_pixel: float) -> QgsRectangle:
47
+ """Calculate the "extended" processing extent, which is the full processing area, rounded to the tile size and overlap
48
+
49
+ Parameters
50
+ ----------
51
+ base_extent : QgsRectangle
52
+ Base extent of the processed ortophoto, which is not rounded to the tile size
53
+ params : MapProcessingParameters
54
+ Processing parameters
55
+ rlayer : QgsVectorLayer
56
+ mask layer
57
+ rlayer_units_per_pixel : float
58
+ how many rlayer CRS units are in 1 pixel
59
+
60
+ Returns
61
+ -------
62
+ QgsRectangle
63
+ The "extended" processing extent
64
+ """
65
+
66
+ # first try to add pixels at every border - same as half-overlap for other tiles
67
+ additional_pixels = params.processing_overlap_px // 2
68
+ additional_pixels_in_units = additional_pixels * rlayer_units_per_pixel
69
+
70
+ tmp_extent = QgsRectangle(
71
+ base_extent.xMinimum() - additional_pixels_in_units,
72
+ base_extent.yMinimum() - additional_pixels_in_units,
73
+ base_extent.xMaximum() + additional_pixels_in_units,
74
+ base_extent.yMaximum() + additional_pixels_in_units,
75
+ )
76
+
77
+ rlayer_extent_infinite = rlayer.extent().isEmpty() # empty extent for infinite layers
78
+ if not rlayer_extent_infinite:
79
+ tmp_extent = tmp_extent.intersect(rlayer.extent())
80
+
81
+ # then add borders to have the extent be equal to N * stride + tile_size, where N is a natural number
82
+ tile_size_px = params.tile_size_px
83
+ stride_px = params.processing_stride_px # stride in pixels
84
+
85
+ current_x_pixels = round(tmp_extent.width() / rlayer_units_per_pixel)
86
+ if current_x_pixels <= tile_size_px:
87
+ missing_pixels_x = tile_size_px - current_x_pixels # just one tile
88
+ else:
89
+ pixels_in_last_stride_x = (current_x_pixels - tile_size_px) % stride_px
90
+ missing_pixels_x = (stride_px - pixels_in_last_stride_x) % stride_px
91
+
92
+ current_y_pixels = round(tmp_extent.height() / rlayer_units_per_pixel)
93
+ if current_y_pixels <= tile_size_px:
94
+ missing_pixels_y = tile_size_px - current_y_pixels # just one tile
95
+ else:
96
+ pixels_in_last_stride_y = (current_y_pixels - tile_size_px) % stride_px
97
+ missing_pixels_y = (stride_px - pixels_in_last_stride_y) % stride_px
98
+
99
+ missing_pixels_x_in_units = missing_pixels_x * rlayer_units_per_pixel
100
+ missing_pixels_y_in_units = missing_pixels_y * rlayer_units_per_pixel
101
+ tmp_extent.setXMaximum(tmp_extent.xMaximum() + missing_pixels_x_in_units)
102
+ tmp_extent.setYMaximum(tmp_extent.yMaximum() + missing_pixels_y_in_units)
103
+
104
+ extended_extent = tmp_extent
105
+ return extended_extent
106
+
107
+
108
+ def is_extent_infinite_or_too_big(rlayer: QgsRasterLayer) -> bool:
109
+ """Check whether layer covers whole earth (infinite extent) or or is too big for processing"""
110
+ rlayer_extent = rlayer.extent()
111
+
112
+ # empty extent happens for infinite layers
113
+ if rlayer_extent.isEmpty():
114
+ return True
115
+
116
+ rlayer_area_m2 = rlayer_extent.area() * (1 / convert_meters_to_rlayer_units(rlayer, 1)) ** 2
117
+
118
+ if rlayer_area_m2 > (1606006962349394 // 10): # so 1/3 of the earth (this magic value is from bing aerial map area)
119
+ return True
120
+
121
+ return False
122
+
123
+
124
+ def calculate_base_processing_extent_in_rlayer_crs(map_canvas: QgsMapCanvas,
125
+ rlayer: QgsRasterLayer,
126
+ vlayer_mask: QgsVectorLayer,
127
+ params: MapProcessingParameters) -> QgsRectangle:
128
+ """ Determine the Base Extent of processing (Extent (rectangle) in which the actual required area is contained)
129
+
130
+ Parameters
131
+ ----------
132
+ map_canvas : QgsMapCanvas
133
+ currently visible map in the UI
134
+ rlayer : QgsRasterLayer
135
+ ortophotomap which is being processed
136
+ vlayer_mask : QgsVectorLayer
137
+ mask layer containing the processed area
138
+ params : MapProcessingParameters
139
+ Processing parameters
140
+
141
+ Returns
142
+ -------
143
+ QgsRectangle
144
+ Base Extent of processing
145
+ """
146
+
147
+ rlayer_extent = rlayer.extent()
148
+ processed_area_type = params.processed_area_type
149
+ rlayer_extent_infinite = is_extent_infinite_or_too_big(rlayer)
150
+
151
+ if processed_area_type == ProcessedAreaType.ENTIRE_LAYER:
152
+ expected_extent = rlayer_extent
153
+ if rlayer_extent_infinite:
154
+ msg = "Cannot process entire layer - layer extent is not defined or too big. " \
155
+ "Make sure you are not processing 'Entire layer' which covers entire earth surface!!"
156
+ raise OperationFailedException(msg)
157
+ elif processed_area_type == ProcessedAreaType.FROM_POLYGONS:
158
+ expected_extent_in_vlayer_crs = vlayer_mask.extent()
159
+ if vlayer_mask.crs() == rlayer.crs():
160
+ expected_extent = expected_extent_in_vlayer_crs
161
+ else:
162
+ t = QgsCoordinateTransform()
163
+ t.setSourceCrs(vlayer_mask.crs())
164
+ t.setDestinationCrs(rlayer.crs())
165
+ expected_extent = t.transform(expected_extent_in_vlayer_crs)
166
+ elif processed_area_type == ProcessedAreaType.VISIBLE_PART:
167
+ # transform visible extent from mapCanvas CRS to layer CRS
168
+ active_extent_in_canvas_crs = map_canvas.extent()
169
+ canvas_crs = map_canvas.mapSettings().destinationCrs()
170
+ t = QgsCoordinateTransform()
171
+ t.setSourceCrs(canvas_crs)
172
+ t.setDestinationCrs(rlayer.crs())
173
+ expected_extent = t.transform(active_extent_in_canvas_crs)
174
+ else:
175
+ raise Exception("Invalid processed are type!")
176
+
177
+ expected_extent = round_extent_to_rlayer_grid(extent=expected_extent, rlayer=rlayer)
178
+
179
+ if rlayer_extent_infinite:
180
+ base_extent = expected_extent
181
+ else:
182
+ base_extent = expected_extent.intersect(rlayer_extent)
183
+
184
+ return base_extent
185
+
186
+
187
+ def calculate_base_extent_bbox_in_full_image(image_size_y: int,
188
+ base_extent: QgsRectangle,
189
+ extended_extent: QgsRectangle,
190
+ rlayer_units_per_pixel) -> BoundingBox:
191
+ """Calculate how the base extent fits in extended_extent in terms of pixel position
192
+
193
+ Parameters
194
+ ----------
195
+ image_size_y : int
196
+ Size of the image in y axis in pixels
197
+ base_extent : QgsRectangle
198
+ Base Extent of processing
199
+ extended_extent : QgsRectangle
200
+ Extended extent of processing
201
+ rlayer_units_per_pixel : _type_
202
+ Number of layer units per a single image pixel
203
+
204
+ Returns
205
+ -------
206
+ BoundingBox
207
+ Bounding box describing position of base extent in the extended extent
208
+ """
209
+ base_extent = base_extent
210
+ extended_extent = extended_extent
211
+
212
+ # should round without a rest anyway, as extends are aligned to rlayer grid
213
+ base_extent_bbox_in_full_image = BoundingBox(
214
+ x_min=round((base_extent.xMinimum() - extended_extent.xMinimum()) / rlayer_units_per_pixel),
215
+ y_min=image_size_y - 1 - round((base_extent.yMaximum() - extended_extent.yMinimum()) / rlayer_units_per_pixel - 1),
216
+ x_max=round((base_extent.xMaximum() - extended_extent.xMinimum()) / rlayer_units_per_pixel) - 1,
217
+ y_max=image_size_y - 1 - round((base_extent.yMinimum() - extended_extent.yMinimum()) / rlayer_units_per_pixel),
218
+ )
219
+ return base_extent_bbox_in_full_image
zipdeepness/processing/map_processor/__init__.py ADDED
File without changes
zipdeepness/processing/map_processor/map_processing_result.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This file defines possible outcomes of map processing
2
+ """
3
+
4
+
5
+ from typing import Callable, Optional
6
+
7
+
8
+ class MapProcessingResult:
9
+ """
10
+ Base class for signaling finished processing result
11
+ """
12
+
13
+ def __init__(self, message: str, gui_delegate: Optional[Callable] = None):
14
+ """
15
+ :param message: message to be shown to the user
16
+ :param gui_delegate: function to be called in GUI thread, as it is not safe to call GUI functions from other threads
17
+ """
18
+ self.message = message
19
+ self.gui_delegate = gui_delegate
20
+
21
+
22
+ class MapProcessingResultSuccess(MapProcessingResult):
23
+ """
24
+ Processing result on success
25
+ """
26
+
27
+ def __init__(self, message: str = '', gui_delegate: Optional[Callable] = None):
28
+ super().__init__(message=message, gui_delegate=gui_delegate)
29
+
30
+
31
+ class MapProcessingResultFailed(MapProcessingResult):
32
+ """
33
+ Processing result on error
34
+ """
35
+
36
+ def __init__(self, error_message: str, exception=None):
37
+ super().__init__(error_message)
38
+ self.exception = exception
39
+
40
+
41
+ class MapProcessingResultCanceled(MapProcessingResult):
42
+ """
43
+ Processing when processing was aborted
44
+ """
45
+
46
+ def __init__(self):
47
+ super().__init__(message='')
zipdeepness/processing/map_processor/map_processor.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This file implements core map processing logic """
2
+
3
+ import logging
4
+ from typing import List, Optional, Tuple
5
+
6
+ import numpy as np
7
+ from qgis.core import QgsRasterLayer, QgsTask, QgsVectorLayer
8
+ from qgis.gui import QgsMapCanvas
9
+ from qgis.PyQt.QtCore import pyqtSignal
10
+
11
+ from deepness.common.defines import IS_DEBUG
12
+ from deepness.common.lazy_package_loader import LazyPackageLoader
13
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters, ProcessedAreaType
14
+ from deepness.common.temp_files_handler import TempFilesHandler
15
+ from deepness.processing import extent_utils, processing_utils
16
+ from deepness.processing.map_processor.map_processing_result import MapProcessingResult, MapProcessingResultFailed
17
+ from deepness.processing.tile_params import TileParams
18
+
19
+ cv2 = LazyPackageLoader('cv2')
20
+
21
+
22
+ class MapProcessor(QgsTask):
23
+ """
24
+ Base class for processing the ortophoto with parameters received from the UI.
25
+
26
+ Actual processing is done in specialized child classes. Here we have the "core" functionality,
27
+ like iterating over single tiles.
28
+
29
+ Objects of this class are created and managed by the 'Deepness'.
30
+ Work is done within QgsTask, for seamless integration with QGis GUI and logic.
31
+ """
32
+
33
+ # error message if finished with error, empty string otherwise
34
+ finished_signal = pyqtSignal(MapProcessingResult)
35
+ # request to show an image. Params: (image, window_name)
36
+ show_img_signal = pyqtSignal(object, str)
37
+
38
+ def __init__(self,
39
+ rlayer: QgsRasterLayer,
40
+ vlayer_mask: Optional[QgsVectorLayer],
41
+ map_canvas: QgsMapCanvas,
42
+ params: MapProcessingParameters):
43
+ """ init
44
+ Parameters
45
+ ----------
46
+ rlayer : QgsRasterLayer
47
+ Raster layer which is being processed
48
+ vlayer_mask : Optional[QgsVectorLayer]
49
+ Vector layer with outline of area which should be processed (within rlayer)
50
+ map_canvas : QgsMapCanvas
51
+ active map canvas (in the GUI), required if processing visible map area
52
+ params : MapProcessingParameters
53
+ see MapProcessingParameters
54
+ """
55
+ QgsTask.__init__(self, self.__class__.__name__)
56
+ self._processing_finished = False
57
+ self.rlayer = rlayer
58
+ self.vlayer_mask = vlayer_mask
59
+ self.params = params
60
+ self._assert_qgis_doesnt_need_reload()
61
+ self._processing_result = MapProcessingResultFailed('Failed to get processing result!')
62
+
63
+ self.stride_px = self.params.processing_stride_px # stride in pixels
64
+ self.rlayer_units_per_pixel = processing_utils.convert_meters_to_rlayer_units(
65
+ self.rlayer, self.params.resolution_m_per_px) # number of rlayer units for one tile pixel
66
+
67
+ self.file_handler = TempFilesHandler() if self.params.local_cache else None
68
+
69
+ # extent in which the actual required area is contained, without additional extensions, rounded to rlayer grid
70
+ self.base_extent = extent_utils.calculate_base_processing_extent_in_rlayer_crs(
71
+ map_canvas=map_canvas,
72
+ rlayer=self.rlayer,
73
+ vlayer_mask=self.vlayer_mask,
74
+ params=self.params)
75
+
76
+ # extent which should be used during model inference, as it includes extra margins to have full tiles,
77
+ # rounded to rlayer grid
78
+ self.extended_extent = extent_utils.calculate_extended_processing_extent(
79
+ base_extent=self.base_extent,
80
+ rlayer=self.rlayer,
81
+ params=self.params,
82
+ rlayer_units_per_pixel=self.rlayer_units_per_pixel)
83
+
84
+ # processed rlayer dimensions (for extended_extent)
85
+ self.img_size_x_pixels = round(self.extended_extent.width() / self.rlayer_units_per_pixel) # how many columns (x)
86
+ self.img_size_y_pixels = round(self.extended_extent.height() / self.rlayer_units_per_pixel) # how many rows (y)
87
+
88
+ # Coordinate of base image within extended image (images for base_extent and extended_extent)
89
+ self.base_extent_bbox_in_full_image = extent_utils.calculate_base_extent_bbox_in_full_image(
90
+ image_size_y=self.img_size_y_pixels,
91
+ base_extent=self.base_extent,
92
+ extended_extent=self.extended_extent,
93
+ rlayer_units_per_pixel=self.rlayer_units_per_pixel)
94
+
95
+ # Number of tiles in x and y dimensions which will be used during processing
96
+ # As we are using "extended_extent" this should divide without any rest
97
+ self.x_bins_number = round((self.img_size_x_pixels - self.params.tile_size_px) / self.stride_px) + 1
98
+ self.y_bins_number = round((self.img_size_y_pixels - self.params.tile_size_px) / self.stride_px) + 1
99
+
100
+ # Mask determining area to process (within extended_extent coordinates)
101
+ self.area_mask_img = processing_utils.create_area_mask_image(
102
+ vlayer_mask=self.vlayer_mask,
103
+ rlayer=self.rlayer,
104
+ extended_extent=self.extended_extent,
105
+ rlayer_units_per_pixel=self.rlayer_units_per_pixel,
106
+ image_shape_yx=(self.img_size_y_pixels, self.img_size_x_pixels),
107
+ files_handler=self.file_handler) # type: Optional[np.ndarray]
108
+
109
+ self._result_img = None
110
+
111
+ def set_results_img(self, img):
112
+ if self._result_img is not None:
113
+ raise Exception("Result image already created!")
114
+
115
+ self._result_img = img
116
+
117
+ def get_result_img(self):
118
+ if self._result_img is None:
119
+ raise Exception("Result image not yet created!")
120
+
121
+ return self._result_img
122
+
123
+ def _assert_qgis_doesnt_need_reload(self):
124
+ """ If the plugin is somehow invalid, it cannot compare the enums correctly
125
+ I suppose it could be fixed somehow, but no need to investigate it now,
126
+ it affects only the development
127
+ """
128
+
129
+ if self.params.processed_area_type.__class__ != ProcessedAreaType:
130
+ raise Exception("Disable plugin, restart QGis and enable plugin again!")
131
+
132
+ def run(self):
133
+ try:
134
+ self._processing_result = self._run()
135
+ except Exception as e:
136
+ logging.exception("Error occurred in MapProcessor:")
137
+ msg = "Unhandled exception occurred. See Python Console for details"
138
+ self._processing_result = MapProcessingResultFailed(msg, exception=e)
139
+ if IS_DEBUG:
140
+ raise e
141
+
142
+ self._processing_finished = True
143
+ return True
144
+
145
+ def _run(self) -> MapProcessingResult:
146
+ raise NotImplementedError('Base class not implemented!')
147
+
148
+ def finished(self, result: bool):
149
+ if result:
150
+ gui_delegate = self._processing_result.gui_delegate
151
+ if gui_delegate is not None:
152
+ gui_delegate()
153
+ else:
154
+ self._processing_result = MapProcessingResultFailed("Unhandled processing error!")
155
+ self.finished_signal.emit(self._processing_result)
156
+
157
+ @staticmethod
158
+ def is_busy():
159
+ return True
160
+
161
+ def _show_image(self, img, window_name='img'):
162
+ self.show_img_signal.emit(img, window_name)
163
+
164
+ def limit_extended_extent_image_to_base_extent_with_mask(self, full_img):
165
+ """
166
+ Limit an image which is for extended_extent to the base_extent image.
167
+ If a limiting polygon was used for processing, it will be also applied.
168
+ :param full_img:
169
+ :return:
170
+ """
171
+ # TODO look for some inplace operation to save memory
172
+ # cv2.copyTo(src=full_img, mask=area_mask_img, dst=full_img) # this doesn't work due to implementation details
173
+
174
+ for i in range(full_img.shape[0]):
175
+ full_img[i] = cv2.copyTo(src=full_img[i], mask=self.area_mask_img)
176
+
177
+ b = self.base_extent_bbox_in_full_image
178
+ result_img = full_img[:, b.y_min:b.y_max+1, b.x_min:b.x_max+1]
179
+ return result_img
180
+
181
+ def _get_array_or_mmapped_array(self, final_shape_px):
182
+ if self.file_handler is not None:
183
+ full_result_img = np.memmap(
184
+ self.file_handler.get_results_img_path(),
185
+ dtype=np.uint8,
186
+ mode='w+',
187
+ shape=final_shape_px)
188
+ else:
189
+ full_result_img = np.zeros(final_shape_px, np.uint8)
190
+
191
+ return full_result_img
192
+
193
+ def tiles_generator(self) -> Tuple[np.ndarray, TileParams]:
194
+ """
195
+ Iterate over all tiles, as a Python generator function
196
+ """
197
+ total_tiles = self.x_bins_number * self.y_bins_number
198
+
199
+ for y_bin_number in range(self.y_bins_number):
200
+ for x_bin_number in range(self.x_bins_number):
201
+ tile_no = y_bin_number * self.x_bins_number + x_bin_number
202
+ progress = tile_no / total_tiles * 100
203
+ self.setProgress(progress)
204
+ print(f" Processing tile {tile_no} / {total_tiles} [{progress:.2f}%]")
205
+ tile_params = TileParams(
206
+ x_bin_number=x_bin_number, y_bin_number=y_bin_number,
207
+ x_bins_number=self.x_bins_number, y_bins_number=self.y_bins_number,
208
+ params=self.params,
209
+ processing_extent=self.extended_extent,
210
+ rlayer_units_per_pixel=self.rlayer_units_per_pixel)
211
+
212
+ if not tile_params.is_tile_within_mask(self.area_mask_img):
213
+ continue # tile outside of mask - to be skipped
214
+
215
+ tile_img = processing_utils.get_tile_image(
216
+ rlayer=self.rlayer, extent=tile_params.extent, params=self.params)
217
+
218
+ yield tile_img, tile_params
219
+
220
+ def tiles_generator_batched(self) -> Tuple[np.ndarray, List[TileParams]]:
221
+ """
222
+ Iterate over all tiles, as a Python generator function, but return them in batches
223
+ """
224
+
225
+ tile_img_batch, tile_params_batch = [], []
226
+
227
+ for tile_img, tile_params in self.tiles_generator():
228
+ tile_img_batch.append(tile_img)
229
+ tile_params_batch.append(tile_params)
230
+
231
+ if len(tile_img_batch) >= self.params.batch_size:
232
+ yield np.array(tile_img_batch), tile_params_batch
233
+ tile_img_batch, tile_params_batch = [], []
234
+
235
+ if len(tile_img_batch) > 0:
236
+ yield np.array(tile_img_batch), tile_params_batch
237
+ tile_img_batch, tile_params_batch = [], []
zipdeepness/processing/map_processor/map_processor_detection.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This file implements map processing for detection model """
2
+ from typing import List
3
+
4
+ import cv2
5
+ import numpy as np
6
+ from qgis.core import QgsFeature, QgsGeometry, QgsProject, QgsVectorLayer
7
+ from qgis.PyQt.QtCore import QVariant
8
+ from qgis.core import QgsFields, QgsField
9
+
10
+ from deepness.common.processing_parameters.detection_parameters import DetectionParameters
11
+ from deepness.processing import processing_utils
12
+ from deepness.processing.map_processor.map_processing_result import (MapProcessingResult, MapProcessingResultCanceled,
13
+ MapProcessingResultSuccess)
14
+ from deepness.processing.map_processor.map_processor_with_model import MapProcessorWithModel
15
+ from deepness.processing.map_processor.utils.ckdtree import cKDTree
16
+ from deepness.processing.models.detector import Detection, Detector
17
+ from deepness.processing.tile_params import TileParams
18
+ from deepness.processing.models.detector import DetectorType
19
+
20
+
21
+ class MapProcessorDetection(MapProcessorWithModel):
22
+ """
23
+ MapProcessor specialized for detecting objects (where there is a finite list of detected objects
24
+ of different classes, which area (bounding boxes) may overlap)
25
+ """
26
+
27
+ def __init__(self,
28
+ params: DetectionParameters,
29
+ **kwargs):
30
+ super().__init__(
31
+ params=params,
32
+ model=params.model,
33
+ **kwargs)
34
+ self.detection_parameters = params
35
+ self.model = params.model # type: Detector
36
+ self.model.set_inference_params(
37
+ confidence=params.confidence,
38
+ iou_threshold=params.iou_threshold
39
+ )
40
+ self.model.set_model_type_param(model_type=params.detector_type)
41
+ self._all_detections = None
42
+
43
+ def get_all_detections(self) -> List[Detection]:
44
+ return self._all_detections
45
+
46
+ def _run(self) -> MapProcessingResult:
47
+ all_bounding_boxes = [] # type: List[Detection]
48
+ for tile_img_batched, tile_params_batched in self.tiles_generator_batched():
49
+ if self.isCanceled():
50
+ return MapProcessingResultCanceled()
51
+
52
+ bounding_boxes_in_tile_batched = self._process_tile(tile_img_batched, tile_params_batched)
53
+ all_bounding_boxes += [d for det in bounding_boxes_in_tile_batched for d in det]
54
+
55
+ with_rot = self.detection_parameters.detector_type == DetectorType.YOLO_ULTRALYTICS_OBB
56
+
57
+ if len(all_bounding_boxes) > 0:
58
+ all_bounding_boxes_nms = self.remove_overlaping_detections(all_bounding_boxes, iou_threshold=self.detection_parameters.iou_threshold, with_rot=with_rot)
59
+ all_bounding_boxes_restricted = self.limit_bounding_boxes_to_processed_area(all_bounding_boxes_nms)
60
+ else:
61
+ all_bounding_boxes_restricted = []
62
+
63
+ gui_delegate = self._create_vlayer_for_output_bounding_boxes(all_bounding_boxes_restricted)
64
+
65
+ result_message = self._create_result_message(all_bounding_boxes_restricted)
66
+ self._all_detections = all_bounding_boxes_restricted
67
+ return MapProcessingResultSuccess(
68
+ message=result_message,
69
+ gui_delegate=gui_delegate,
70
+ )
71
+
72
+ def limit_bounding_boxes_to_processed_area(self, bounding_boxes: List[Detection]) -> List[Detection]:
73
+ """
74
+ Limit all bounding boxes to the constrained area that we process.
75
+ E.g. if we are detecting peoples in a circle, we don't want to count peoples in the entire rectangle
76
+
77
+ :return:
78
+ """
79
+ bounding_boxes_restricted = []
80
+ for det in bounding_boxes:
81
+ # if bounding box is not in the area_mask_img (at least in some percentage) - remove it
82
+
83
+ if self.area_mask_img is not None:
84
+ det_slice = det.bbox.get_slice()
85
+ area_subimg = self.area_mask_img[det_slice]
86
+ pixels_in_area = np.count_nonzero(area_subimg)
87
+ else:
88
+ det_bounding_box = det.bbox
89
+ pixels_in_area = self.base_extent_bbox_in_full_image.calculate_overlap_in_pixels(det_bounding_box)
90
+ total_pixels = det.bbox.get_area()
91
+ coverage = pixels_in_area / total_pixels
92
+ if coverage > 0.5: # some arbitrary value, 50% seems reasonable
93
+ bounding_boxes_restricted.append(det)
94
+
95
+ return bounding_boxes_restricted
96
+
97
+ def _create_result_message(self, bounding_boxes: List[Detection]) -> str:
98
+ # hack, allways one output
99
+ model_outputs = self._get_indexes_of_model_output_channels_to_create()
100
+ channels = range(model_outputs[0])
101
+
102
+ counts_mapping = {}
103
+ total_counts = 0
104
+ for channel_id in channels:
105
+ filtered_bounding_boxes = [det for det in bounding_boxes if det.clss == channel_id]
106
+ counts = len(filtered_bounding_boxes)
107
+ counts_mapping[channel_id] = counts
108
+ total_counts += counts
109
+
110
+ txt = f'Detection done for {len(channels)} model output classes, with the following statistics:\n'
111
+ for channel_id in channels:
112
+ counts = counts_mapping[channel_id]
113
+
114
+ if total_counts:
115
+ counts_percentage = counts / total_counts * 100
116
+ else:
117
+ counts_percentage = 0
118
+
119
+ txt += f' - {self.model.get_channel_name(0, channel_id)}: counts = {counts} ({counts_percentage:.2f} %)\n'
120
+
121
+ return txt
122
+
123
+ def _create_vlayer_for_output_bounding_boxes(self, bounding_boxes: List[Detection]):
124
+ vlayers = []
125
+
126
+ # hack, allways one output
127
+ model_outputs = self._get_indexes_of_model_output_channels_to_create()
128
+ channels = range(model_outputs[0])
129
+
130
+ for channel_id in channels:
131
+ filtered_bounding_boxes = [det for det in bounding_boxes if det.clss == channel_id]
132
+ print(f'Detections for class {channel_id}: {len(filtered_bounding_boxes)}')
133
+
134
+ vlayer = QgsVectorLayer("multipolygon", self.model.get_channel_name(0, channel_id), "memory")
135
+ vlayer.setCrs(self.rlayer.crs())
136
+ prov = vlayer.dataProvider()
137
+ prov.addAttributes([QgsField("confidence", QVariant.Double)])
138
+ vlayer.updateFields()
139
+
140
+ features = []
141
+ for det in filtered_bounding_boxes:
142
+ feature = QgsFeature()
143
+ if det.mask is None:
144
+ bbox_corners_pixels = det.bbox.get_4_corners()
145
+ bbox_corners_crs = processing_utils.transform_points_list_xy_to_target_crs(
146
+ points=bbox_corners_pixels,
147
+ extent=self.extended_extent,
148
+ rlayer_units_per_pixel=self.rlayer_units_per_pixel,
149
+ )
150
+ #feature = QgsFeature() #move outside of the if block
151
+ polygon_xy_vec_vec = [
152
+ bbox_corners_crs
153
+ ]
154
+ geometry = QgsGeometry.fromPolygonXY(polygon_xy_vec_vec)
155
+ #feature.setGeometry(geometry)
156
+ #features.append(feature)
157
+ else:
158
+ contours, _ = cv2.findContours(det.mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
159
+ contours = sorted(contours, key=cv2.contourArea, reverse=True)
160
+
161
+ x_offset, y_offset = det.mask_offsets
162
+
163
+ if len(contours) > 0:
164
+ countur = contours[0]
165
+
166
+ corners = []
167
+ for point in countur:
168
+ corners.append(int(point[0][0]) + x_offset)
169
+ corners.append(int(point[0][1]) + y_offset)
170
+
171
+ mask_corners_pixels = cv2.convexHull(np.array(corners).reshape((-1, 2))).squeeze()
172
+
173
+ mask_corners_crs = processing_utils.transform_points_list_xy_to_target_crs(
174
+ points=mask_corners_pixels,
175
+ extent=self.extended_extent,
176
+ rlayer_units_per_pixel=self.rlayer_units_per_pixel,
177
+ )
178
+
179
+ #feature = QgsFeature()
180
+ polygon_xy_vec_vec = [
181
+ mask_corners_crs
182
+ ]
183
+ geometry = QgsGeometry.fromPolygonXY(polygon_xy_vec_vec)
184
+ #feature.setGeometry(geometry)
185
+ #features.append(feature)
186
+ feature.setGeometry(geometry)
187
+ feature.setAttributes([float(det.conf)])
188
+ features.append(feature)
189
+
190
+ #vlayer = QgsVectorLayer("multipolygon", self.model.get_channel_name(0, channel_id), "memory")
191
+ #vlayer.setCrs(self.rlayer.crs())
192
+ #prov = vlayer.dataProvider()
193
+
194
+ color = vlayer.renderer().symbol().color()
195
+ OUTPUT_VLAYER_COLOR_TRANSPARENCY = 80
196
+ color.setAlpha(OUTPUT_VLAYER_COLOR_TRANSPARENCY)
197
+ vlayer.renderer().symbol().setColor(color)
198
+ # TODO - add also outline for the layer (thicker black border)
199
+
200
+ prov.addFeatures(features)
201
+ vlayer.updateExtents()
202
+
203
+ vlayers.append(vlayer)
204
+
205
+ # accessing GUI from non-GUI thread is not safe, so we need to delegate it to the GUI thread
206
+ def add_to_gui():
207
+ group = QgsProject.instance().layerTreeRoot().insertGroup(0, 'model_output')
208
+ for vlayer in vlayers:
209
+ QgsProject.instance().addMapLayer(vlayer, False)
210
+ group.addLayer(vlayer)
211
+
212
+ return add_to_gui
213
+
214
+ @staticmethod
215
+ def remove_overlaping_detections(bounding_boxes: List[Detection], iou_threshold: float, with_rot: bool = False) -> List[Detection]:
216
+ bboxes = []
217
+ probs = []
218
+ for det in bounding_boxes:
219
+ if with_rot:
220
+ bboxes.append(det.get_bbox_xyxy_rot())
221
+ else:
222
+ bboxes.append(det.get_bbox_xyxy())
223
+ probs.append(det.conf)
224
+
225
+ bboxes = np.array(bboxes)
226
+ probs = np.array(probs)
227
+
228
+ pick_ids = Detector.non_max_suppression_fast(boxes=bboxes, probs=probs, iou_threshold=iou_threshold, with_rot=with_rot)
229
+
230
+ filtered_bounding_boxes = [x for i, x in enumerate(bounding_boxes) if i in pick_ids]
231
+ filtered_bounding_boxes = sorted(filtered_bounding_boxes, reverse=True)
232
+
233
+ pick_ids_kde = MapProcessorDetection.non_max_kdtree(filtered_bounding_boxes, iou_threshold)
234
+
235
+ filtered_bounding_boxes = [x for i, x in enumerate(filtered_bounding_boxes) if i in pick_ids_kde]
236
+
237
+ return filtered_bounding_boxes
238
+
239
+ @staticmethod
240
+ def non_max_kdtree(bounding_boxes: List[Detection], iou_threshold: float) -> List[int]:
241
+ """ Remove overlapping bounding boxes using kdtree
242
+
243
+ :param bounding_boxes: List of bounding boxes in (xyxy format)
244
+ :param iou_threshold: Threshold for intersection over union
245
+ :return: Pick ids to keep
246
+ """
247
+
248
+ centers = np.array([det.get_bbox_center() for det in bounding_boxes])
249
+
250
+ kdtree = cKDTree(centers)
251
+ pick_ids = set()
252
+ removed_ids = set()
253
+
254
+ for i, bbox in enumerate(bounding_boxes):
255
+ if i in removed_ids:
256
+ continue
257
+
258
+ indices = kdtree.query(bbox.get_bbox_center(), k=min(10, len(bounding_boxes)))
259
+
260
+ for j in indices:
261
+ if j in removed_ids:
262
+ continue
263
+
264
+ if i == j:
265
+ continue
266
+
267
+ iou = bbox.bbox.calculate_intersection_over_smaler_area(bounding_boxes[j].bbox)
268
+
269
+ if iou > iou_threshold:
270
+ removed_ids.add(j)
271
+
272
+ pick_ids.add(i)
273
+
274
+ return pick_ids
275
+
276
+ @staticmethod
277
+ def convert_bounding_boxes_to_absolute_positions(bounding_boxes_relative: List[Detection],
278
+ tile_params: TileParams):
279
+ for det in bounding_boxes_relative:
280
+ det.convert_to_global(offset_x=tile_params.start_pixel_x, offset_y=tile_params.start_pixel_y)
281
+
282
+ def _process_tile(self, tile_img: np.ndarray, tile_params_batched: List[TileParams]) -> np.ndarray:
283
+ bounding_boxes_batched: List[Detection] = self.model.process(tile_img)
284
+
285
+ for bounding_boxes, tile_params in zip(bounding_boxes_batched, tile_params_batched):
286
+ self.convert_bounding_boxes_to_absolute_positions(bounding_boxes, tile_params)
287
+
288
+ return bounding_boxes_batched
zipdeepness/processing/map_processor/map_processor_recognition.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This file implements map processing for Recognition model """
2
+
3
+ import os
4
+ import uuid
5
+ from typing import List
6
+
7
+ import numpy as np
8
+ from numpy.linalg import norm
9
+ from osgeo import gdal, osr
10
+ from qgis.core import QgsProject, QgsRasterLayer
11
+
12
+ from deepness.common.defines import IS_DEBUG
13
+ from deepness.common.lazy_package_loader import LazyPackageLoader
14
+ from deepness.common.misc import TMP_DIR_PATH
15
+ from deepness.common.processing_parameters.recognition_parameters import RecognitionParameters
16
+ from deepness.processing.map_processor.map_processing_result import (MapProcessingResult, MapProcessingResultCanceled,
17
+ MapProcessingResultFailed,
18
+ MapProcessingResultSuccess)
19
+ from deepness.processing.map_processor.map_processor_with_model import MapProcessorWithModel
20
+
21
+ cv2 = LazyPackageLoader('cv2')
22
+
23
+
24
+ class MapProcessorRecognition(MapProcessorWithModel):
25
+ """
26
+ MapProcessor specialized for Recognition model
27
+ """
28
+
29
+ def __init__(self, params: RecognitionParameters, **kwargs):
30
+ super().__init__(params=params, model=params.model, **kwargs)
31
+ self.recognition_parameters = params
32
+ self.model = params.model
33
+
34
+ def _run(self) -> MapProcessingResult:
35
+ try:
36
+ query_img = cv2.imread(self.recognition_parameters.query_image_path)
37
+ assert query_img is not None, f"Error occurred while reading query image: {self.recognition_parameters.query_image_path}"
38
+ except Exception as e:
39
+ return MapProcessingResultFailed(f"Error occurred while reading query image: {e}")
40
+
41
+ # some hardcoded code for recognition model
42
+ query_img = cv2.cvtColor(query_img, cv2.COLOR_BGR2RGB)
43
+ query_img_resized = cv2.resize(query_img, self.model.get_input_shape()[2:4][::-1])
44
+ query_img_batched = np.array([query_img_resized])
45
+
46
+ query_img_emb = self.model.process(query_img_batched)[0][0]
47
+
48
+ final_shape_px = (
49
+ self.img_size_y_pixels,
50
+ self.img_size_x_pixels,
51
+ )
52
+
53
+ stride = self.stride_px
54
+ full_result_img = np.zeros(final_shape_px, np.float32)
55
+ mask = np.zeros_like(full_result_img, dtype=np.int16)
56
+ highest = 0
57
+ for tile_img_batched, tile_params_batched in self.tiles_generator_batched():
58
+ if self.isCanceled():
59
+ return MapProcessingResultCanceled()
60
+
61
+ tile_result_batched = self._process_tile(tile_img_batched)[0]
62
+
63
+ for tile_result, tile_params in zip(tile_result_batched, tile_params_batched):
64
+ cossim = np.dot(query_img_emb, tile_result)/(norm(query_img_emb)*norm(tile_result))
65
+
66
+ x_bin = tile_params.x_bin_number
67
+ y_bin = tile_params.y_bin_number
68
+ size = self.params.tile_size_px
69
+
70
+ if cossim > highest:
71
+ highest = cossim
72
+ x_high = x_bin
73
+ y_high = y_bin
74
+
75
+ full_result_img[y_bin*stride:y_bin*stride+size, x_bin*stride:x_bin*stride + size] += cossim
76
+ mask[y_bin*stride:y_bin*stride+size, x_bin*stride:x_bin*stride + size] += 1
77
+
78
+ full_result_img = full_result_img/mask
79
+ self.set_results_img(full_result_img)
80
+
81
+ gui_delegate = self._create_rlayers_from_images_for_base_extent(self.get_result_img(), x_high, y_high, size, stride)
82
+ result_message = self._create_result_message(self.get_result_img(), x_high*self.params.tile_size_px, y_high*self.params.tile_size_px)
83
+ return MapProcessingResultSuccess(
84
+ message=result_message,
85
+ gui_delegate=gui_delegate,
86
+ )
87
+
88
+ def _create_result_message(self, result_img: List[np.ndarray], x_high, y_high) -> str:
89
+ txt = f"Recognition ended, best result found at {x_high}, {y_high}, {result_img.shape}"
90
+ return txt
91
+
92
+ def limit_extended_extent_image_to_base_extent_with_mask(self, full_img):
93
+ """
94
+ Limit an image which is for extended_extent to the base_extent image.
95
+ If a limiting polygon was used for processing, it will be also applied.
96
+ :param full_img:
97
+ :return:
98
+ """
99
+ # TODO look for some inplace operation to save memory
100
+ # cv2.copyTo(src=full_img, mask=area_mask_img, dst=full_img) # this doesn't work due to implementation details
101
+ # full_img = cv2.copyTo(src=full_img, mask=self.area_mask_img)
102
+
103
+ b = self.base_extent_bbox_in_full_image
104
+ result_img = full_img[
105
+ int(b.y_min * self.recognition_parameters.scale_factor): int(
106
+ b.y_max * self.recognition_parameters.scale_factor
107
+ ),
108
+ int(b.x_min * self.recognition_parameters.scale_factor): int(
109
+ b.x_max * self.recognition_parameters.scale_factor
110
+ ),
111
+ :,
112
+ ]
113
+ return result_img
114
+
115
+ def load_rlayer_from_file(self, file_path):
116
+ """
117
+ Create raster layer from tif file
118
+ """
119
+ file_name = os.path.basename(file_path)
120
+ base_file_name = file_name.split("___")[
121
+ 0
122
+ ] # we remove the random_id string we created a moment ago
123
+ rlayer = QgsRasterLayer(file_path, base_file_name)
124
+ if rlayer.width() == 0:
125
+ raise Exception(
126
+ "0 width - rlayer not loaded properly. Probably invalid file path?"
127
+ )
128
+ rlayer.setCrs(self.rlayer.crs())
129
+ return rlayer
130
+
131
+ def _create_rlayers_from_images_for_base_extent(
132
+ self, result_img: np.ndarray,
133
+ x_high,
134
+ y_high,
135
+ size,
136
+ stride
137
+ ):
138
+ y = y_high * stride
139
+ x = x_high * stride
140
+
141
+ result_img[y, x:x+size-1] = 1
142
+ result_img[y+size-1, x:x+size-1] = 1
143
+ result_img[y:y+size-1, x] = 1
144
+ result_img[y:y+size-1, x+size-1] = 1
145
+
146
+ # TODO: We are creating a new file for each layer.
147
+ # Maybe can we pass ownership of this file to QGis?
148
+ # Or maybe even create vlayer directly from array, without a file?
149
+
150
+ random_id = str(uuid.uuid4()).replace("-", "")
151
+ file_path = os.path.join(TMP_DIR_PATH, f"{random_id}.tif")
152
+ self.save_result_img_as_tif(file_path=file_path, img=np.expand_dims(result_img, axis=2))
153
+
154
+ rlayer = self.load_rlayer_from_file(file_path)
155
+ OUTPUT_RLAYER_OPACITY = 0.5
156
+ rlayer.renderer().setOpacity(OUTPUT_RLAYER_OPACITY)
157
+
158
+ # accessing GUI from non-GUI thread is not safe, so we need to delegate it to the GUI thread
159
+ def add_to_gui():
160
+ group = (
161
+ QgsProject.instance()
162
+ .layerTreeRoot()
163
+ .insertGroup(0, "Cosine similarity score")
164
+ )
165
+ QgsProject.instance().addMapLayer(rlayer, False)
166
+ group.addLayer(rlayer)
167
+
168
+ return add_to_gui
169
+
170
+ def save_result_img_as_tif(self, file_path: str, img: np.ndarray):
171
+ """
172
+ As we cannot pass easily an numpy array to be displayed as raster layer, we create temporary geotif files,
173
+ which will be loaded as layer later on
174
+
175
+ Partially based on example from:
176
+ https://gis.stackexchange.com/questions/82031/gdal-python-set-projection-of-a-raster-not-working
177
+ """
178
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
179
+
180
+ extent = self.base_extent
181
+ crs = self.rlayer.crs()
182
+
183
+ geo_transform = [
184
+ extent.xMinimum(),
185
+ self.rlayer_units_per_pixel,
186
+ 0,
187
+ extent.yMaximum(),
188
+ 0,
189
+ -self.rlayer_units_per_pixel,
190
+ ]
191
+
192
+ driver = gdal.GetDriverByName("GTiff")
193
+ n_lines = img.shape[0]
194
+ n_cols = img.shape[1]
195
+ n_chanels = img.shape[2]
196
+ # data_type = gdal.GDT_Byte
197
+ data_type = gdal.GDT_Float32
198
+ grid_data = driver.Create(
199
+ "grid_data", n_cols, n_lines, n_chanels, data_type
200
+ ) # , options)
201
+ # loop over chanels
202
+ for i in range(1, img.shape[2] + 1):
203
+ grid_data.GetRasterBand(i).WriteArray(img[:, :, i - 1])
204
+
205
+ # crs().srsid() - maybe we can use the ID directly - but how?
206
+ # srs.ImportFromEPSG()
207
+ srs = osr.SpatialReference()
208
+ srs.SetFromUserInput(crs.authid())
209
+
210
+ grid_data.SetProjection(srs.ExportToWkt())
211
+ grid_data.SetGeoTransform(geo_transform)
212
+ driver.CreateCopy(file_path, grid_data, 0)
213
+
214
+ def _process_tile(self, tile_img: np.ndarray) -> np.ndarray:
215
+ result = self.model.process(tile_img)
216
+
217
+ # NOTE - currently we are saving result as float32, so we are losing some accuraccy.
218
+ # result = np.clip(result, 0, 255) # old version with uint8_t - not used anymore
219
+ # result = result.astype(np.float32)
220
+
221
+ return result
zipdeepness/processing/map_processor/map_processor_regression.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This file implements map processing for regression model """
2
+
3
+ import os
4
+ import uuid
5
+ from typing import List
6
+
7
+ import numpy as np
8
+ from osgeo import gdal, osr
9
+ from qgis.core import QgsProject, QgsRasterLayer
10
+
11
+ from deepness.common.misc import TMP_DIR_PATH
12
+ from deepness.common.processing_parameters.regression_parameters import RegressionParameters
13
+ from deepness.processing.map_processor.map_processing_result import (MapProcessingResult, MapProcessingResultCanceled,
14
+ MapProcessingResultSuccess)
15
+ from deepness.processing.map_processor.map_processor_with_model import MapProcessorWithModel
16
+
17
+
18
+ class MapProcessorRegression(MapProcessorWithModel):
19
+ """
20
+ MapProcessor specialized for Regression model (where each pixel has a value representing some feature intensity)
21
+ """
22
+
23
+ def __init__(self,
24
+ params: RegressionParameters,
25
+ **kwargs):
26
+ super().__init__(
27
+ params=params,
28
+ model=params.model,
29
+ **kwargs)
30
+ self.regression_parameters = params
31
+ self.model = params.model
32
+
33
+ def _run(self) -> MapProcessingResult:
34
+ number_of_output_channels = len(self._get_indexes_of_model_output_channels_to_create())
35
+ final_shape_px = (number_of_output_channels, self.img_size_y_pixels, self.img_size_x_pixels)
36
+
37
+ # NOTE: consider whether we can use float16/uint16 as datatype
38
+ full_result_imgs = self._get_array_or_mmapped_array(final_shape_px)
39
+
40
+ for tile_img_batched, tile_params_batched in self.tiles_generator_batched():
41
+ if self.isCanceled():
42
+ return MapProcessingResultCanceled()
43
+
44
+ tile_results_batched = self._process_tile(tile_img_batched)
45
+
46
+ for tile_results, tile_params in zip(tile_results_batched, tile_params_batched):
47
+ tile_params.set_mask_on_full_img(
48
+ tile_result=tile_results,
49
+ full_result_img=full_result_imgs)
50
+
51
+ # plt.figure(); plt.imshow(full_result_img); plt.show(block=False); plt.pause(0.001)
52
+ full_result_imgs = self.limit_extended_extent_images_to_base_extent_with_mask(full_imgs=full_result_imgs)
53
+ self.set_results_img(full_result_imgs)
54
+
55
+ gui_delegate = self._create_rlayers_from_images_for_base_extent(self.get_result_img())
56
+ result_message = self._create_result_message(self.get_result_img())
57
+ return MapProcessingResultSuccess(
58
+ message=result_message,
59
+ gui_delegate=gui_delegate,
60
+ )
61
+
62
+ def _create_result_message(self, result_imgs: List[np.ndarray]) -> str:
63
+ txt = f'Regression done, with the following statistics:\n'
64
+ for output_id, _ in enumerate(self._get_indexes_of_model_output_channels_to_create()):
65
+ result_img = result_imgs[output_id]
66
+
67
+ average_value = np.mean(result_img)
68
+ std = np.std(result_img)
69
+
70
+ txt += f' - {self.model.get_channel_name(output_id, 0)}: average_value = {average_value:.2f} (std = {std:.2f}, ' \
71
+ f'min={np.min(result_img)}, max={np.max(result_img)})\n'
72
+
73
+ return txt
74
+
75
+ def limit_extended_extent_images_to_base_extent_with_mask(self, full_imgs: List[np.ndarray]):
76
+ """
77
+ Same as 'limit_extended_extent_image_to_base_extent_with_mask' but for a list of images.
78
+ See `limit_extended_extent_image_to_base_extent_with_mask` for details.
79
+ :param full_imgs:
80
+ :return:
81
+ """
82
+ return self.limit_extended_extent_image_to_base_extent_with_mask(full_img=full_imgs)
83
+
84
+ def load_rlayer_from_file(self, file_path):
85
+ """
86
+ Create raster layer from tif file
87
+ """
88
+ file_name = os.path.basename(file_path)
89
+ base_file_name = file_name.split('___')[0] # we remove the random_id string we created a moment ago
90
+ rlayer = QgsRasterLayer(file_path, base_file_name)
91
+ if rlayer.width() == 0:
92
+ raise Exception("0 width - rlayer not loaded properly. Probably invalid file path?")
93
+ rlayer.setCrs(self.rlayer.crs())
94
+ return rlayer
95
+
96
+ def _create_rlayers_from_images_for_base_extent(self, result_imgs: List[np.ndarray]):
97
+ # TODO: We are creating a new file for each layer.
98
+ # Maybe can we pass ownership of this file to QGis?
99
+ # Or maybe even create vlayer directly from array, without a file?
100
+ rlayers = []
101
+
102
+ for output_id, _ in enumerate(self._get_indexes_of_model_output_channels_to_create()):
103
+
104
+ random_id = str(uuid.uuid4()).replace('-', '')
105
+ file_path = os.path.join(TMP_DIR_PATH, f'{self.model.get_channel_name(output_id, 0)}__{random_id}.tif')
106
+ self.save_result_img_as_tif(file_path=file_path, img=result_imgs[output_id])
107
+
108
+ rlayer = self.load_rlayer_from_file(file_path)
109
+ OUTPUT_RLAYER_OPACITY = 0.5
110
+ rlayer.renderer().setOpacity(OUTPUT_RLAYER_OPACITY)
111
+ rlayers.append(rlayer)
112
+
113
+ def add_to_gui():
114
+ group = QgsProject.instance().layerTreeRoot().insertGroup(0, 'model_output')
115
+ for rlayer in rlayers:
116
+ QgsProject.instance().addMapLayer(rlayer, False)
117
+ group.addLayer(rlayer)
118
+
119
+ return add_to_gui
120
+
121
+ def save_result_img_as_tif(self, file_path: str, img: np.ndarray):
122
+ """
123
+ As we cannot pass easily an numpy array to be displayed as raster layer, we create temporary geotif files,
124
+ which will be loaded as layer later on
125
+
126
+ Partially based on example from:
127
+ https://gis.stackexchange.com/questions/82031/gdal-python-set-projection-of-a-raster-not-working
128
+ """
129
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
130
+
131
+ extent = self.base_extent
132
+ crs = self.rlayer.crs()
133
+
134
+ geo_transform = [extent.xMinimum(), self.rlayer_units_per_pixel, 0,
135
+ extent.yMaximum(), 0, -self.rlayer_units_per_pixel]
136
+
137
+ driver = gdal.GetDriverByName('GTiff')
138
+ n_lines = img.shape[0]
139
+ n_cols = img.shape[1]
140
+ # data_type = gdal.GDT_Byte
141
+ data_type = gdal.GDT_Float32
142
+ grid_data = driver.Create('grid_data', n_cols, n_lines, 1, data_type) # , options)
143
+ grid_data.GetRasterBand(1).WriteArray(img)
144
+
145
+ # crs().srsid() - maybe we can use the ID directly - but how?
146
+ # srs.ImportFromEPSG()
147
+ srs = osr.SpatialReference()
148
+ srs.SetFromUserInput(crs.authid())
149
+
150
+ grid_data.SetProjection(srs.ExportToWkt())
151
+ grid_data.SetGeoTransform(geo_transform)
152
+ driver.CreateCopy(file_path, grid_data, 0)
153
+ print(f'***** {file_path = }')
154
+
155
+ def _process_tile(self, tile_img: np.ndarray) -> np.ndarray:
156
+ many_result = self.model.process(tile_img)
157
+ many_outputs = []
158
+
159
+ for result in many_result:
160
+ result[np.isnan(result)] = 0
161
+ result *= self.regression_parameters.output_scaling
162
+
163
+ # NOTE - currently we are saving result as float32, so we are losing some accuraccy.
164
+ # result = np.clip(result, 0, 255) # old version with uint8_t - not used anymore
165
+ result = result.astype(np.float32)
166
+
167
+ if len(result.shape) == 3:
168
+ result = np.expand_dims(result, axis=1)
169
+
170
+ many_outputs.append(result[:, 0])
171
+
172
+ many_outputs = np.array(many_outputs).transpose((1, 0, 2, 3))
173
+
174
+ return many_outputs
zipdeepness/processing/map_processor/map_processor_segmentation.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This file implements map processing for segmentation model """
2
+
3
+ from typing import Callable
4
+
5
+ import numpy as np
6
+ from qgis.core import QgsProject, QgsVectorLayer, QgsCoordinateTransform, QgsCoordinateReferenceSystem, QgsField, QgsFeature, QgsGeometry, QgsMessageLog
7
+ from deepness.common.lazy_package_loader import LazyPackageLoader
8
+ from deepness.common.processing_parameters.segmentation_parameters import SegmentationParameters
9
+ from deepness.processing import processing_utils
10
+ from deepness.processing.map_processor.map_processing_result import (MapProcessingResult, MapProcessingResultCanceled,
11
+ MapProcessingResultSuccess)
12
+ from deepness.processing.map_processor.map_processor_with_model import MapProcessorWithModel
13
+ from deepness.processing.tile_params import TileParams
14
+ import geopandas as gpd
15
+ import traceback
16
+ import pandas as pd
17
+ from rasterio.features import shapes
18
+ from affine import Affine
19
+ from shapely.geometry import shape
20
+
21
+ cv2 = LazyPackageLoader('cv2')
22
+
23
+
24
+ class MapProcessorSegmentation(MapProcessorWithModel):
25
+ """
26
+ MapProcessor specialized for Segmentation model (where each pixel is assigned to one class).
27
+ """
28
+
29
+ def __init__(self,
30
+ params: SegmentationParameters,
31
+ **kwargs):
32
+ super().__init__(
33
+ params=params,
34
+ model=params.model,
35
+ **kwargs)
36
+ self.segmentation_parameters = params
37
+ self.model = params.model
38
+ self.new_class_names = {
39
+ "0": "background",
40
+ "2": "zone-verte",
41
+ "3": "eau",
42
+ "4": "route",
43
+ "5": "non-residentiel",
44
+ "6": "Villa",
45
+ "7": "traditionnel",
46
+ "8": "appartement",
47
+ "9": "autre"
48
+ }
49
+
50
+
51
+ def tile_mask_to_gdf_latlon(self,tile: TileParams, mask: np.ndarray, raster_layer_crs=None):
52
+
53
+
54
+ # remove channel if exists
55
+ if mask.ndim == 3:
56
+ mask = mask[0]
57
+
58
+ H, W = mask.shape
59
+
60
+ mask_binary = mask != 0
61
+
62
+ x_min = tile.extent.xMinimum()
63
+ y_max = tile.extent.yMaximum()
64
+ pixel_size = tile.rlayer_units_per_pixel
65
+ transform = Affine(pixel_size, 0, x_min, 0, -pixel_size, y_max)
66
+
67
+
68
+ results = (
69
+ {"properties": {"class_id": int(v)}, "geometry": s}
70
+ for s, v in shapes(mask.astype(np.int16), mask=mask_binary, transform=transform)
71
+ )
72
+ geoms = []
73
+ for r in results:
74
+ class_id = r["properties"]["class_id"]
75
+ class_name = self.new_class_names.get(str(class_id), "unknown")
76
+ geom = shape(r["geometry"])
77
+ geoms.append({"geometry": geom, "class_id": class_id, "class_name": class_name})
78
+ if geoms:
79
+ gdf = gpd.GeoDataFrame(geoms, geometry="geometry", crs=raster_layer_crs.authid() if raster_layer_crs else "EPSG:3857")
80
+ else:
81
+ # empty GeoDataFrame with the right columns
82
+ gdf = gpd.GeoDataFrame(columns=["geometry", "class_id", "class_name"], geometry="geometry", crs=raster_layer_crs.authid() if raster_layer_crs else "EPSG:3857")
83
+
84
+ # Transform to lat/lon if needed
85
+ if not gdf.empty:
86
+ gdf = gdf.to_crs("EPSG:4326")
87
+
88
+ gdf = gpd.GeoDataFrame(geoms, geometry="geometry", crs=raster_layer_crs.authid() if raster_layer_crs else "EPSG:3857")
89
+
90
+ # Transform to lat/lon if needed
91
+ gdf = gdf.to_crs("EPSG:4326")
92
+
93
+ # # compute CRS coordinates for each pixel
94
+ # xs = x_min + np.arange(W) * pixel_size
95
+ # ys = y_max - np.arange(H) * pixel_size # Y decreases downward
96
+
97
+ # xs_grid, ys_grid = np.meshgrid(xs, ys)
98
+
99
+ # # flatten
100
+ # xs_flat = xs_grid.flatten()
101
+ # ys_flat = ys_grid.flatten()
102
+ # classes_flat = mask.flatten()
103
+
104
+ # # create points
105
+ # points = [Point(x, y) for x, y in zip(xs_flat, ys_flat)]
106
+
107
+ # gdf = gpd.GeoDataFrame({'class': classes_flat}, geometry=points)
108
+
109
+ # # set CRS
110
+ # if raster_layer_crs is None:
111
+ # raster_layer_crs = QgsCoordinateReferenceSystem("EPSG:3857") # fallback
112
+ # gdf.set_crs(raster_layer_crs.authid(), inplace=True)
113
+
114
+ # # transform to lat/lon (EPSG:4326)
115
+ # transformer = QgsCoordinateTransform(raster_layer_crs, QgsCoordinateReferenceSystem("EPSG:4326"), QgsProject.instance())
116
+ # gdf['geometry'] = gdf['geometry'].apply(lambda pt: Point(*transformer.transform(pt.x, pt.y)))
117
+ # # transformed_coords = [transformer.transform(pt.x, pt.y) for pt in gdf.geometry]
118
+ # # gdf['geometry'] = [Point(x, y) for x, y in transformed_coords]
119
+
120
+
121
+ return gdf
122
+
123
+
124
+ def _run(self) -> MapProcessingResult:
125
+ final_shape_px = (len(self._get_indexes_of_model_output_channels_to_create()), self.img_size_y_pixels, self.img_size_x_pixels)
126
+
127
+ full_result_img = self._get_array_or_mmapped_array(final_shape_px)
128
+ gdf_list = []
129
+ raster_layer = QgsProject.instance().mapLayer(self.params.input_layer_id)
130
+ raster_layer_crs = raster_layer.crs() if raster_layer else QgsCoordinateReferenceSystem("EPSG:3857")
131
+
132
+ for tile_img_batched, tile_params_batched in self.tiles_generator_batched():
133
+ if self.isCanceled():
134
+ return MapProcessingResultCanceled()
135
+
136
+ tile_result_batched = self._process_tile(tile_img_batched)
137
+
138
+ for tile_result, tile_params in zip(tile_result_batched, tile_params_batched):
139
+ tile_params.set_mask_on_full_img(
140
+ tile_result=tile_result,
141
+ full_result_img=full_result_img)
142
+ try:
143
+ gdf_tile = self.tile_mask_to_gdf_latlon(
144
+ tile=tile_params,
145
+ mask=tile_result,
146
+ raster_layer_crs=raster_layer_crs,
147
+ )
148
+ gdf_list.append(gdf_tile)
149
+ QgsMessageLog.logMessage(f"Tile {tile_params.x_bin_number},{tile_params.y_bin_number}: got {len(gdf_tile)} points", "Segmentation", 0)
150
+ except Exception as e:
151
+ QgsMessageLog.logMessage(f"Tile {tile_params.x_bin_number},{tile_params.y_bin_number} failed: {e}", "Segmentation", 2)
152
+ QgsMessageLog.logMessage(traceback.format_exc(), "Segmentation", 2)
153
+
154
+ blur_size = int(self.segmentation_parameters.postprocessing_dilate_erode_size // 2) * 2 + 1 # needs to be odd
155
+
156
+ for i in range(full_result_img.shape[0]):
157
+ full_result_img[i] = cv2.medianBlur(full_result_img[i], blur_size)
158
+
159
+ full_result_img = self.limit_extended_extent_image_to_base_extent_with_mask(full_img=full_result_img)
160
+
161
+ self.set_results_img(full_result_img)
162
+ if gdf_list:
163
+ final_gdf = gpd.GeoDataFrame(pd.concat(gdf_list, ignore_index=True), crs=gdf_list[0].crs)
164
+ csv_file = r"C:\Users\carin\Documents\segmen.csv"
165
+ final_gdf.to_csv(csv_file, index=False)
166
+ else:
167
+ final_gdf = None
168
+ print("No GeoDataFrame generated.")
169
+
170
+
171
+ gui_delegate = self._create_vlayer_from_mask_for_base_extent(self.get_result_img())
172
+
173
+ result_message = self._create_result_message(self.get_result_img())
174
+ return MapProcessingResultSuccess(
175
+ message=result_message,
176
+ gui_delegate=gui_delegate,
177
+ )
178
+
179
+ def _check_output_layer_is_sigmoid_and_has_more_than_one_name(self, output_id: int) -> bool:
180
+ if self.model.outputs_names is None or self.model.outputs_are_sigmoid is None:
181
+ return False
182
+
183
+ return len(self.model.outputs_names[output_id]) > 1 and self.model.outputs_are_sigmoid[output_id]
184
+
185
+ def _create_result_message(self, result_img: np.ndarray) -> str:
186
+
187
+ txt = f'Segmentation done, with the following statistics:\n'
188
+
189
+ for output_id, layer_sizes in enumerate(self._get_indexes_of_model_output_channels_to_create()):
190
+
191
+ txt += f'Channels for output {output_id}:\n'
192
+
193
+ unique, counts = np.unique(result_img[output_id], return_counts=True)
194
+ counts_map = {}
195
+ for i in range(len(unique)):
196
+ counts_map[unique[i]] = counts[i]
197
+
198
+ # # we cannot simply take image dimensions, because we may have irregular processing area from polygon
199
+ number_of_pixels_in_processing_area = np.sum([counts_map[k] for k in counts_map.keys()])
200
+ total_area = number_of_pixels_in_processing_area * self.params.resolution_m_per_px**2
201
+
202
+ for channel_id in range(layer_sizes):
203
+ pixels_count = counts_map.get(channel_id + 1, 0) # we add 1 to avoid 0 values, find the MADD1 code for explanation
204
+ area = pixels_count * self.params.resolution_m_per_px**2
205
+
206
+ if total_area > 0 and not np.isnan(total_area) and not np.isinf(total_area):
207
+ area_percentage = area / total_area * 100
208
+ else:
209
+ area_percentage = 0.0
210
+ # TODO
211
+
212
+ txt += f'\t- {self.model.get_channel_name(output_id, channel_id)}: area = {area:.2f} m^2 ({area_percentage:.2f} %)\n'
213
+
214
+ return txt
215
+
216
+ def _create_vlayer_from_mask_for_base_extent(self, mask_img, vector_gdf: gpd.GeoDataFrame = None) -> Callable:
217
+ """ create vector layer with polygons from the mask image
218
+ :return: function to be called in GUI thread
219
+ """
220
+ vlayers = []
221
+
222
+ for output_id, layer_sizes in enumerate(self._get_indexes_of_model_output_channels_to_create()):
223
+ output_vlayers = []
224
+ for channel_id in range(layer_sizes):
225
+ local_mask_img = np.uint8(mask_img[output_id] == (channel_id + 1)) # we add 1 to avoid 0 values, find the MADD1 code for explanation
226
+
227
+ contours, hierarchy = cv2.findContours(local_mask_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
228
+ contours = processing_utils.transform_contours_yx_pixels_to_target_crs(
229
+ contours=contours,
230
+ extent=self.base_extent,
231
+ rlayer_units_per_pixel=self.rlayer_units_per_pixel)
232
+ features = []
233
+
234
+ if len(contours):
235
+ processing_utils.convert_cv_contours_to_features(
236
+ features=features,
237
+ cv_contours=contours,
238
+ hierarchy=hierarchy[0],
239
+ is_hole=False,
240
+ current_holes=[],
241
+ current_contour_index=0)
242
+ else:
243
+ pass # just nothing, we already have an empty list of features
244
+
245
+ layer_name = self.model.get_channel_name(output_id, channel_id)
246
+ vlayer = QgsVectorLayer("multipolygon", layer_name, "memory")
247
+ vlayer.setCrs(self.rlayer.crs())
248
+ prov = vlayer.dataProvider()
249
+
250
+ color = vlayer.renderer().symbol().color()
251
+ OUTPUT_VLAYER_COLOR_TRANSPARENCY = 80
252
+ color.setAlpha(OUTPUT_VLAYER_COLOR_TRANSPARENCY)
253
+ vlayer.renderer().symbol().setColor(color)
254
+ # TODO - add also outline for the layer (thicker black border)
255
+
256
+ prov.addFeatures(features)
257
+ vlayer.updateExtents()
258
+
259
+ output_vlayers.append(vlayer)
260
+
261
+ vlayers.append(output_vlayers)
262
+
263
+ # accessing GUI from non-GUI thread is not safe, so we need to delegate it to the GUI thread
264
+ def add_to_gui():
265
+ group = QgsProject.instance().layerTreeRoot().insertGroup(0, 'model_output')
266
+
267
+ if len(vlayers) == 1:
268
+ for vlayer in vlayers[0]:
269
+ QgsProject.instance().addMapLayer(vlayer, False)
270
+ group.addLayer(vlayer)
271
+ else:
272
+ for i, output_vlayers in enumerate(vlayers):
273
+ output_group = group.insertGroup(0, f'output_{i}')
274
+ for vlayer in output_vlayers:
275
+ QgsProject.instance().addMapLayer(vlayer, False)
276
+ output_group.addLayer(vlayer)
277
+
278
+ return add_to_gui
279
+
280
+ def _process_tile(self, tile_img_batched: np.ndarray) -> np.ndarray:
281
+
282
+ res = self.model.process(tile_img_batched)
283
+
284
+ # Thresholding optional (apply only on non-background)
285
+ threshold = self.segmentation_parameters.pixel_classification__probability_threshold
286
+ # onnx_classes = np.argmax(build, axis=1)[0].astype(np.int8)
287
+ # confidences = np.max(build, axis=1)[0]
288
+
289
+ # onnx_classes[confidences < threshold] = 0
290
+ # BUILDING_ID = 1
291
+
292
+ # final_mask = onnx_classes.copy()
293
+
294
+ # final_mask[onnx_classes == BUILDING_ID] = type_build[onnx_classes == BUILDING_ID]
295
+
296
+ # final_mask = np.expand_dims(final_mask, axis=(0, 1))
297
+ final_mask = self.process_dual_batch(res, threshold)
298
+
299
+ return final_mask
300
+
301
+ def process_dual_batch(self, result, threshold=0.0):
302
+
303
+ N = result[0].shape[0]
304
+ threshold = self.segmentation_parameters.pixel_classification__probability_threshold
305
+
306
+ # Run the dual model
307
+ res = result[0] # (N, num_classes, H, W)
308
+ segmentation_maps = result[1] # list of N segmentation maps (H, W)
309
+
310
+ # Prepare output batch
311
+ final_batch_masks = np.zeros((N, 1, 512, 512), dtype=np.int8)
312
+ individual_masks = []
313
+
314
+ new_class = {0: 0, 1: 5, 2: 6, 3: 7, 4: 8}
315
+ BUILDING_ID = 1
316
+
317
+ for i in range(N):
318
+ seg_map = segmentation_maps[i] # (H, W)
319
+
320
+ # Map old classes to building classes
321
+ build_mask = np.zeros_like(seg_map, dtype=np.int8)
322
+ for k, v in new_class.items():
323
+ build_mask[seg_map == k] = v
324
+
325
+ # ONNX class predictions and confidences
326
+ onnx_classes = np.argmax(res[i], axis=0).astype(np.int8) # (H, W)
327
+ confidences = np.max(res[i], axis=0) # (H, W)
328
+
329
+ # Threshold low-confidence predictions
330
+ onnx_classes[confidences < threshold] = 0
331
+
332
+ # Merge building predictions with type mask
333
+ final_mask = onnx_classes.copy()
334
+ final_mask[onnx_classes == BUILDING_ID] = build_mask[onnx_classes == BUILDING_ID]
335
+
336
+ # Save results
337
+
338
+ final_batch_masks[i, 0] = final_mask
339
+
340
+ return final_batch_masks
341
+
342
+
343
+ # def _process_tile(self, tile_img_batched: np.ndarray) -> np.ndarray:
344
+ # many_result = self.model.process(tile_img_batched)
345
+ # many_outputs = []
346
+
347
+ # for result in many_result:
348
+ # result[result < self.segmentation_parameters.pixel_classification__probability_threshold] = 0.0
349
+
350
+ # if len(result.shape) == 3:
351
+ # result = np.expand_dims(result, axis=1)
352
+
353
+ # if (result.shape[1] == 1):
354
+ # result = (result != 0).astype(int) + 1 # we add 1 to avoid 0 values, find the MADD1 code for explanation
355
+ # else:
356
+ # shape = result.shape
357
+ # result = np.argmax(result, axis=1).reshape(shape[0], 1, shape[2], shape[3]) + 1 # we add 1 to avoid 0 values, find the MADD1 code for explanation
358
+
359
+ # assert len(result.shape) == 4
360
+ # assert result.shape[1] == 1
361
+
362
+ # many_outputs.append(result[:, 0])
363
+
364
+ # many_outputs = np.array(many_outputs).transpose((1, 0, 2, 3))
365
+
366
+ # return many_outputs
367
+
368
+ # def _process_tile(self, tile_img_batched: np.ndarray) -> np.ndarray:
369
+
370
+ # merged_probs = self.model.process(tile_img_batched)
371
+
372
+ # # Thresholding optional (apply only on non-background)
373
+ # threshold = self.segmentation_parameters.pixel_classification__probability_threshold
374
+ # merged_probs[:, 1:, :, :][merged_probs[:, 1:, :, :] < threshold] = 0.0
375
+
376
+ # # Argmax over channels to get single-channel mask
377
+ # single_channel_mask = np.argmax(merged_probs, axis=1).astype(np.uint8) # shape: (1, H, W)
378
+
379
+ # # Remove 'other' class pixels
380
+ # single_channel_mask[single_channel_mask == 1] = 0
381
+ # single_channel_mask = np.expand_dims(single_channel_mask, axis=1)
382
+
383
+ # return single_channel_mask
384
+
385
+
386
+
zipdeepness/processing/map_processor/map_processor_superresolution.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This file implements map processing for Super Resolution model """
2
+
3
+ import os
4
+ import uuid
5
+ from typing import List
6
+
7
+ import numpy as np
8
+ from osgeo import gdal, osr
9
+ from qgis.core import QgsProject, QgsRasterLayer
10
+
11
+ from deepness.common.misc import TMP_DIR_PATH
12
+ from deepness.common.processing_parameters.superresolution_parameters import SuperresolutionParameters
13
+ from deepness.processing.map_processor.map_processing_result import (MapProcessingResult, MapProcessingResultCanceled,
14
+ MapProcessingResultSuccess)
15
+ from deepness.processing.map_processor.map_processor_with_model import MapProcessorWithModel
16
+
17
+
18
+ class MapProcessorSuperresolution(MapProcessorWithModel):
19
+ """
20
+ MapProcessor specialized for Super Resolution model (whic is is used to upscale the input image to a higher resolution)
21
+ """
22
+
23
+ def __init__(self,
24
+ params: SuperresolutionParameters,
25
+ **kwargs):
26
+ super().__init__(
27
+ params=params,
28
+ model=params.model,
29
+ **kwargs)
30
+ self.superresolution_parameters = params
31
+ self.model = params.model
32
+
33
+ def _run(self) -> MapProcessingResult:
34
+ number_of_output_channels = self.model.get_number_of_output_channels()
35
+
36
+ # always one output
37
+ number_of_output_channels = number_of_output_channels[0]
38
+
39
+ final_shape_px = (int(self.img_size_y_pixels*self.superresolution_parameters.scale_factor), int(self.img_size_x_pixels*self.superresolution_parameters.scale_factor), number_of_output_channels)
40
+
41
+ # NOTE: consider whether we can use float16/uint16 as datatype
42
+ full_result_imgs = self._get_array_or_mmapped_array(final_shape_px)
43
+
44
+ for tile_img_batched, tile_params_batched in self.tiles_generator_batched():
45
+ if self.isCanceled():
46
+ return MapProcessingResultCanceled()
47
+
48
+ tile_results_batched = self._process_tile(tile_img_batched)
49
+
50
+ for tile_results, tile_params in zip(tile_results_batched, tile_params_batched):
51
+ full_result_imgs[int(tile_params.start_pixel_y*self.superresolution_parameters.scale_factor):int((tile_params.start_pixel_y+tile_params.stride_px)*self.superresolution_parameters.scale_factor),
52
+ int(tile_params.start_pixel_x*self.superresolution_parameters.scale_factor):int((tile_params.start_pixel_x+tile_params.stride_px)*self.superresolution_parameters.scale_factor),
53
+ :] = tile_results.transpose(1, 2, 0) # transpose to chanels last
54
+
55
+ # plt.figure(); plt.imshow(full_result_img); plt.show(block=False); plt.pause(0.001)
56
+ full_result_imgs = self.limit_extended_extent_image_to_base_extent_with_mask(full_img=full_result_imgs)
57
+ self.set_results_img(full_result_imgs)
58
+
59
+ gui_delegate = self._create_rlayers_from_images_for_base_extent(self.get_result_img())
60
+ result_message = self._create_result_message(self.get_result_img())
61
+ return MapProcessingResultSuccess(
62
+ message=result_message,
63
+ gui_delegate=gui_delegate,
64
+ )
65
+
66
+ def _create_result_message(self, result_img: List[np.ndarray]) -> str:
67
+ channels = self._get_indexes_of_model_output_channels_to_create()
68
+ txt = f'Super-resolution done \n'
69
+
70
+ if len(channels) > 0:
71
+ total_area = result_img.shape[0] * result_img.shape[1] * (self.params.resolution_m_per_px / self.superresolution_parameters.scale_factor)**2
72
+ txt += f'Total are is {total_area:.2f} m^2'
73
+ return txt
74
+
75
+ def limit_extended_extent_image_to_base_extent_with_mask(self, full_img):
76
+ """
77
+ Limit an image which is for extended_extent to the base_extent image.
78
+ If a limiting polygon was used for processing, it will be also applied.
79
+ :param full_img:
80
+ :return:
81
+ """
82
+ # TODO look for some inplace operation to save memory
83
+ # cv2.copyTo(src=full_img, mask=area_mask_img, dst=full_img) # this doesn't work due to implementation details
84
+ # full_img = cv2.copyTo(src=full_img, mask=self.area_mask_img)
85
+
86
+ b = self.base_extent_bbox_in_full_image
87
+ result_img = full_img[int(b.y_min*self.superresolution_parameters.scale_factor):int(b.y_max*self.superresolution_parameters.scale_factor),
88
+ int(b.x_min*self.superresolution_parameters.scale_factor):int(b.x_max*self.superresolution_parameters.scale_factor),
89
+ :]
90
+ return result_img
91
+
92
+ def load_rlayer_from_file(self, file_path):
93
+ """
94
+ Create raster layer from tif file
95
+ """
96
+ file_name = os.path.basename(file_path)
97
+ base_file_name = file_name.split('___')[0] # we remove the random_id string we created a moment ago
98
+ rlayer = QgsRasterLayer(file_path, base_file_name)
99
+ if rlayer.width() == 0:
100
+ raise Exception("0 width - rlayer not loaded properly. Probably invalid file path?")
101
+ rlayer.setCrs(self.rlayer.crs())
102
+ return rlayer
103
+
104
+ def _create_rlayers_from_images_for_base_extent(self, result_imgs: List[np.ndarray]):
105
+ # TODO: We are creating a new file for each layer.
106
+ # Maybe can we pass ownership of this file to QGis?
107
+ # Or maybe even create vlayer directly from array, without a file?
108
+ rlayers = []
109
+
110
+ for i, channel_id in enumerate(['Super Resolution']):
111
+ result_img = result_imgs
112
+ random_id = str(uuid.uuid4()).replace('-', '')
113
+ file_path = os.path.join(TMP_DIR_PATH, f'{channel_id}___{random_id}.tif')
114
+ self.save_result_img_as_tif(file_path=file_path, img=result_img)
115
+
116
+ rlayer = self.load_rlayer_from_file(file_path)
117
+ OUTPUT_RLAYER_OPACITY = 0.5
118
+ rlayer.renderer().setOpacity(OUTPUT_RLAYER_OPACITY)
119
+ rlayers.append(rlayer)
120
+
121
+ def add_to_gui():
122
+ group = QgsProject.instance().layerTreeRoot().insertGroup(0, 'Super Resolution Results')
123
+ for rlayer in rlayers:
124
+ QgsProject.instance().addMapLayer(rlayer, False)
125
+ group.addLayer(rlayer)
126
+
127
+ return add_to_gui
128
+
129
+ def save_result_img_as_tif(self, file_path: str, img: np.ndarray):
130
+ """
131
+ As we cannot pass easily an numpy array to be displayed as raster layer, we create temporary geotif files,
132
+ which will be loaded as layer later on
133
+
134
+ Partially based on example from:
135
+ https://gis.stackexchange.com/questions/82031/gdal-python-set-projection-of-a-raster-not-working
136
+ """
137
+ os.makedirs(os.path.dirname(file_path), exist_ok=True)
138
+
139
+ extent = self.base_extent
140
+ crs = self.rlayer.crs()
141
+
142
+ geo_transform = [extent.xMinimum(), self.rlayer_units_per_pixel/self.superresolution_parameters.scale_factor, 0,
143
+ extent.yMaximum(), 0, -self.rlayer_units_per_pixel/self.superresolution_parameters.scale_factor]
144
+
145
+ driver = gdal.GetDriverByName('GTiff')
146
+ n_lines = img.shape[0]
147
+ n_cols = img.shape[1]
148
+ n_chanels = img.shape[2]
149
+ # data_type = gdal.GDT_Byte
150
+ data_type = gdal.GDT_Float32
151
+ grid_data = driver.Create('grid_data', n_cols, n_lines, n_chanels, data_type) # , options)
152
+ # loop over chanels
153
+ for i in range(1, img.shape[2]+1):
154
+ grid_data.GetRasterBand(i).WriteArray(img[:, :, i-1])
155
+
156
+ # crs().srsid() - maybe we can use the ID directly - but how?
157
+ # srs.ImportFromEPSG()
158
+ srs = osr.SpatialReference()
159
+ srs.SetFromUserInput(crs.authid())
160
+
161
+ grid_data.SetProjection(srs.ExportToWkt())
162
+ grid_data.SetGeoTransform(geo_transform)
163
+ driver.CreateCopy(file_path, grid_data, 0)
164
+ print(f'***** {file_path = }')
165
+
166
+ def _process_tile(self, tile_img: np.ndarray) -> np.ndarray:
167
+ result = self.model.process(tile_img)
168
+ result[np.isnan(result)] = 0
169
+ result *= self.superresolution_parameters.output_scaling
170
+
171
+ # NOTE - currently we are saving result as float32, so we are losing some accuraccy.
172
+ # result = np.clip(result, 0, 255) # old version with uint8_t - not used anymore
173
+ result = result.astype(np.float32)
174
+
175
+ return result
zipdeepness/processing/map_processor/map_processor_training_data_export.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ This file implements map processing for the Training Data Export Tool """
2
+
3
+ import datetime
4
+ import os
5
+
6
+ import numpy as np
7
+ from qgis.core import QgsProject
8
+
9
+ from deepness.common.lazy_package_loader import LazyPackageLoader
10
+ from deepness.common.processing_parameters.training_data_export_parameters import TrainingDataExportParameters
11
+ from deepness.processing import processing_utils
12
+ from deepness.processing.map_processor.map_processing_result import (MapProcessingResultCanceled,
13
+ MapProcessingResultSuccess)
14
+ from deepness.processing.map_processor.map_processor import MapProcessor
15
+ from deepness.processing.tile_params import TileParams
16
+
17
+ cv2 = LazyPackageLoader('cv2')
18
+
19
+
20
+ class MapProcessorTrainingDataExport(MapProcessor):
21
+ """
22
+ Map Processor specialized in exporting training data, not doing any prediction with model.
23
+ Exports tiles for the ortophoto and a mask layer.
24
+ """
25
+
26
+ def __init__(self,
27
+ params: TrainingDataExportParameters,
28
+ **kwargs):
29
+ super().__init__(
30
+ params=params,
31
+ **kwargs)
32
+ self.params = params
33
+ self.output_dir_path = self._create_output_dir()
34
+
35
+ def _create_output_dir(self) -> str:
36
+ datetime_string = datetime.datetime.now().strftime("%d%m%Y_%H%M%S")
37
+ full_path = os.path.join(self.params.output_directory_path, datetime_string)
38
+ os.makedirs(full_path, exist_ok=True)
39
+ return full_path
40
+
41
+ def _run(self):
42
+ export_segmentation_mask = self.params.segmentation_mask_layer_id is not None
43
+ if export_segmentation_mask:
44
+ vlayer_segmentation = QgsProject.instance().mapLayers()[self.params.segmentation_mask_layer_id]
45
+ vlayer_segmentation.setCrs(self.rlayer.crs())
46
+ segmentation_mask_full = processing_utils.create_area_mask_image(
47
+ rlayer=self.rlayer,
48
+ vlayer_mask=vlayer_segmentation,
49
+ extended_extent=self.extended_extent,
50
+ rlayer_units_per_pixel=self.rlayer_units_per_pixel,
51
+ image_shape_yx=(self.img_size_y_pixels, self.img_size_x_pixels),
52
+ files_handler=self.file_handler)
53
+
54
+ segmentation_mask_full = segmentation_mask_full[np.newaxis, ...]
55
+
56
+ number_of_written_tiles = 0
57
+ for tile_img, tile_params in self.tiles_generator():
58
+ if self.isCanceled():
59
+ return MapProcessingResultCanceled()
60
+
61
+ tile_params = tile_params # type: TileParams
62
+
63
+ if self.params.export_image_tiles:
64
+ file_name = f'tile_img_{tile_params.x_bin_number}_{tile_params.y_bin_number}.png'
65
+ file_path = os.path.join(self.output_dir_path, file_name)
66
+
67
+ if tile_img.dtype in [np.uint32, np.int32]:
68
+ print(f'Exporting image with data type {tile_img.dtype} is not supported. Trimming to uint16. Consider changing the data type in the source image.')
69
+ tile_img = tile_img.astype(np.uint16)
70
+
71
+ if tile_img.shape[-1] == 4:
72
+ tile_img = cv2.cvtColor(tile_img, cv2.COLOR_RGBA2BGRA)
73
+ elif tile_img.shape[-1] == 3:
74
+ tile_img = cv2.cvtColor(tile_img, cv2.COLOR_RGB2BGR)
75
+
76
+ cv2.imwrite(file_path, tile_img)
77
+ number_of_written_tiles += 1
78
+
79
+ if export_segmentation_mask:
80
+ segmentation_mask_for_tile = tile_params.get_entire_tile_from_full_img(segmentation_mask_full)
81
+
82
+ file_name = f'tile_mask_{tile_params.x_bin_number}_{tile_params.y_bin_number}.png'
83
+ file_path = os.path.join(self.output_dir_path, file_name)
84
+
85
+ cv2.imwrite(file_path, segmentation_mask_for_tile[0])
86
+
87
+ result_message = self._create_result_message(number_of_written_tiles)
88
+ return MapProcessingResultSuccess(result_message)
89
+
90
+ def _create_result_message(self, number_of_written_tiles) -> str:
91
+ total_area = self.img_size_x_pixels * self.img_size_y_pixels * self.params.resolution_m_per_px**2
92
+ return f'Exporting data finished!\n' \
93
+ f'Exported {number_of_written_tiles} tiles.\n' \
94
+ f'Total processed area: {total_area:.2f} m^2\n' \
95
+ f'Directory: "{self.output_dir_path}"'
zipdeepness/processing/map_processor/map_processor_with_model.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """ This file implements map processing functions common for all map processors using nural model """
3
+
4
+ from typing import List
5
+
6
+ from deepness.processing.map_processor.map_processor import MapProcessor
7
+ from deepness.processing.models.model_base import ModelBase
8
+
9
+
10
+ class MapProcessorWithModel(MapProcessor):
11
+ """
12
+ Common base class for MapProcessor with models
13
+ """
14
+
15
+ def __init__(self,
16
+ model: ModelBase,
17
+ **kwargs):
18
+ super().__init__(
19
+ **kwargs)
20
+ self.model = model
21
+
22
+ def _get_indexes_of_model_output_channels_to_create(self) -> List[int]:
23
+ """
24
+ Decide what model output channels/classes we want to use at presentation level
25
+ (e.g. for which channels create a layer with results)
26
+ """
27
+ return self.model.get_number_of_output_channels()
zipdeepness/processing/map_processor/utils/ckdtree.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import heapq
2
+
3
+ import numpy as np
4
+
5
+
6
+ class cKDTree:
7
+ def __init__(self, data):
8
+ self.data = np.asarray(data)
9
+ self.tree = self._build_kdtree(np.arange(len(data)))
10
+
11
+ def _build_kdtree(self, indices, depth=0):
12
+ if len(indices) == 0:
13
+ return None
14
+ axis = depth % self.data.shape[1] # alternate between x and y dimensions
15
+
16
+ sorted_indices = indices[np.argsort(self.data[indices, axis])]
17
+ mid = len(sorted_indices) // 2
18
+ node = {
19
+ 'index': sorted_indices[mid],
20
+ 'left': self._build_kdtree(sorted_indices[:mid], depth + 1),
21
+ 'right': self._build_kdtree(sorted_indices[mid + 1:], depth + 1)
22
+ }
23
+ return node
24
+
25
+ def query(self, point: np.ndarray, k: int):
26
+ if type(point) is not np.ndarray:
27
+ point = np.array(point)
28
+
29
+ return [index for _, index in self._query(point, k, self.tree)]
30
+
31
+ def _query(self, point, k, node, depth=0, best_indices=None, best_distances=None):
32
+ if node is None:
33
+ return None
34
+
35
+ axis = depth % self.data.shape[1]
36
+
37
+ if point[axis] < self.data[node['index']][axis]:
38
+ next_node = node['left']
39
+ other_node = node['right']
40
+ else:
41
+ next_node = node['right']
42
+ other_node = node['left']
43
+
44
+ if best_indices is None:
45
+ best_indices = []
46
+ best_distances = []
47
+
48
+ current_distance = np.linalg.norm(self.data[node['index']] - point)
49
+
50
+ if len(best_indices) < k:
51
+ heapq.heappush(best_indices, (-current_distance, node['index']))
52
+ elif current_distance < -best_indices[0][0]:
53
+ heapq.heappop(best_indices)
54
+ heapq.heappush(best_indices, (-current_distance, node['index']))
55
+
56
+ if point[axis] < self.data[node['index']][axis] or len(best_indices) < k or abs(point[axis] - self.data[node['index']][axis]) < -best_indices[0][0]:
57
+ self._query(point, k, next_node, depth + 1, best_indices, best_distances)
58
+
59
+ if point[axis] >= self.data[node['index']][axis] or len(best_indices) < k or abs(point[axis] - self.data[node['index']][axis]) < -best_indices[0][0]:
60
+ self._query(point, k, other_node, depth + 1, best_indices, best_distances)
61
+
62
+ return best_indices
zipdeepness/processing/models/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ """ Module including classes implemetations for the deep learning inference and related functions
2
+ """
zipdeepness/processing/models/buildings_type_MA.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e7c06401e20f6791e272f5ec694d4ae285c79ed6ceb9213875972114869b90f
3
+ size 464134848
zipdeepness/processing/models/detector.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Module including the class for the object detection task and related functions
2
+ """
3
+ from dataclasses import dataclass
4
+ from typing import List, Optional, Tuple
5
+ from qgis.core import Qgis, QgsGeometry, QgsRectangle, QgsPointXY
6
+
7
+ import cv2
8
+ import numpy as np
9
+
10
+ from deepness.common.processing_parameters.detection_parameters import DetectorType
11
+ from deepness.processing.models.model_base import ModelBase
12
+ from deepness.processing.processing_utils import BoundingBox
13
+
14
+
15
+ @dataclass
16
+ class Detection:
17
+ """Class that represents single detection result in object detection model
18
+
19
+ Parameters
20
+ ----------
21
+ bbox : BoundingBox
22
+ bounding box describing the detection rectangle
23
+ conf : float
24
+ confidence of the detection
25
+ clss : int
26
+ class of the detected object
27
+ """
28
+
29
+ bbox: BoundingBox
30
+ """BoundingBox: bounding box describing the detection rectangle"""
31
+ conf: float
32
+ """float: confidence of the detection"""
33
+ clss: int
34
+ """int: class of the detected object"""
35
+ mask: Optional[np.ndarray] = None
36
+ """np.ndarray: mask of the detected object"""
37
+ mask_offsets: Optional[Tuple[int, int]] = None
38
+ """Tuple[int, int]: offsets of the mask"""
39
+
40
+ def convert_to_global(self, offset_x: int, offset_y: int):
41
+ """Apply (x,y) offset to bounding box coordinates
42
+
43
+ Parameters
44
+ ----------
45
+ offset_x : int
46
+ _description_
47
+ offset_y : int
48
+ _description_
49
+ """
50
+ self.bbox.apply_offset(offset_x=offset_x, offset_y=offset_y)
51
+
52
+ if self.mask is not None:
53
+ self.mask_offsets = (offset_x, offset_y)
54
+
55
+ def get_bbox_xyxy(self) -> np.ndarray:
56
+ """Convert stored bounding box into x1y1x2y2 format
57
+
58
+ Returns
59
+ -------
60
+ np.ndarray
61
+ Array in (x1, y1, x2, y2) format
62
+ """
63
+ return self.bbox.get_xyxy()
64
+
65
+ def get_bbox_xyxy_rot(self) -> np.ndarray:
66
+ """Convert stored bounding box into x1y1x2y2r format
67
+
68
+ Returns
69
+ -------
70
+ np.ndarray
71
+ Array in (x1, y1, x2, y2, r) format
72
+ """
73
+ return self.bbox.get_xyxy_rot()
74
+
75
+ def get_bbox_center(self) -> Tuple[int, int]:
76
+ """Get center of the bounding box
77
+
78
+ Returns
79
+ -------
80
+ Tuple[int, int]
81
+ Center of the bounding box
82
+ """
83
+ return self.bbox.get_center()
84
+
85
+ def __lt__(self, other):
86
+ return self.bbox.get_area() < other.bbox.get_area()
87
+
88
+
89
+ class Detector(ModelBase):
90
+ """Class implements object detection features
91
+
92
+ Detector model is used for detection of objects in images. It is based on YOLOv5/YOLOv7 models style.
93
+ """
94
+
95
+ def __init__(self, model_file_path: str):
96
+ """Initialize object detection model
97
+
98
+ Parameters
99
+ ----------
100
+ model_file_path : str
101
+ Path to model file"""
102
+ super(Detector, self).__init__(model_file_path)
103
+
104
+ self.confidence = None
105
+ """float: Confidence threshold"""
106
+ self.iou_threshold = None
107
+ """float: IoU threshold"""
108
+ self.model_type: Optional[DetectorType] = None
109
+ """DetectorType: Model type"""
110
+
111
+ def set_inference_params(self, confidence: float, iou_threshold: float):
112
+ """Set inference parameters
113
+
114
+ Parameters
115
+ ----------
116
+ confidence : float
117
+ Confidence threshold
118
+ iou_threshold : float
119
+ IoU threshold
120
+ """
121
+ self.confidence = confidence
122
+ self.iou_threshold = iou_threshold
123
+
124
+ def set_model_type_param(self, model_type: DetectorType):
125
+ """Set model type parameters
126
+
127
+ Parameters
128
+ ----------
129
+ model_type : str
130
+ Model type
131
+ """
132
+ self.model_type = model_type
133
+
134
+ @classmethod
135
+ def get_class_display_name(cls):
136
+ """Get class display name
137
+
138
+ Returns
139
+ -------
140
+ str
141
+ Class display name"""
142
+ return cls.__name__
143
+
144
+ def get_number_of_output_channels(self):
145
+ """Get number of output channels
146
+
147
+ Returns
148
+ -------
149
+ int
150
+ Number of output channels
151
+ """
152
+ class_names = self.get_outputs_channel_names()[0]
153
+ if class_names is not None:
154
+ return [len(class_names)] # If class names are specified, we expect to have exactly this number of channels as specidied
155
+
156
+ model_type_params = self.model_type.get_parameters()
157
+
158
+ shape_index = -2 if model_type_params.has_inverted_output_shape else -1
159
+
160
+ if len(self.outputs_layers) == 1:
161
+ # YOLO_ULTRALYTICS_OBB
162
+ if self.model_type == DetectorType.YOLO_ULTRALYTICS_OBB:
163
+ return [self.outputs_layers[0].shape[shape_index] - 4 - 1]
164
+
165
+ elif model_type_params.skipped_objectness_probability:
166
+ return [self.outputs_layers[0].shape[shape_index] - 4]
167
+
168
+ return [self.outputs_layers[0].shape[shape_index] - 4 - 1] # shape - 4 bboxes - 1 conf
169
+
170
+ # YOLO_ULTRALYTICS_SEGMENTATION
171
+ elif len(self.outputs_layers) == 2 and self.model_type == DetectorType.YOLO_ULTRALYTICS_SEGMENTATION:
172
+ return [self.outputs_layers[0].shape[shape_index] - 4 - self.outputs_layers[1].shape[1]]
173
+
174
+ else:
175
+ raise NotImplementedError("Model with multiple output layer is not supported! Use only one output layer.")
176
+
177
+ def postprocessing(self, model_output):
178
+ """Postprocess model output
179
+
180
+ NOTE: Maybe refactor this, as it has many added layers of checks which can be simplified.
181
+
182
+ Parameters
183
+ ----------
184
+ model_output : list
185
+ Model output
186
+
187
+ Returns
188
+ -------
189
+ list
190
+ Batch of lists of detections
191
+ """
192
+ if self.confidence is None or self.iou_threshold is None:
193
+ return Exception(
194
+ "Confidence or IOU threshold is not set for model. Use self.set_inference_params"
195
+ )
196
+
197
+ if self.model_type is None:
198
+ return Exception(
199
+ "Model type is not set for model. Use self.set_model_type_param"
200
+ )
201
+
202
+ batch_detection = []
203
+ outputs_range = len(model_output)
204
+
205
+ if self.model_type == DetectorType.YOLO_ULTRALYTICS_SEGMENTATION or self.model_type == DetectorType.YOLO_v9:
206
+ outputs_range = len(model_output[0])
207
+
208
+ for i in range(outputs_range):
209
+ masks = None
210
+ rots = None
211
+ detections = []
212
+
213
+ if self.model_type == DetectorType.YOLO_v5_v7_DEFAULT:
214
+ boxes, conf, classes = self._postprocessing_YOLO_v5_v7_DEFAULT(model_output[0][i])
215
+ elif self.model_type == DetectorType.YOLO_v6:
216
+ boxes, conf, classes = self._postprocessing_YOLO_v6(model_output[0][i])
217
+ elif self.model_type == DetectorType.YOLO_v9:
218
+ boxes, conf, classes = self._postprocessing_YOLO_v9(model_output[0][i])
219
+ elif self.model_type == DetectorType.YOLO_ULTRALYTICS:
220
+ boxes, conf, classes = self._postprocessing_YOLO_ULTRALYTICS(model_output[0][i])
221
+ elif self.model_type == DetectorType.YOLO_ULTRALYTICS_SEGMENTATION:
222
+ boxes, conf, classes, masks = self._postprocessing_YOLO_ULTRALYTICS_SEGMENTATION(model_output[0][i], model_output[1][i])
223
+ elif self.model_type == DetectorType.YOLO_ULTRALYTICS_OBB:
224
+ boxes, conf, classes, rots = self._postprocessing_YOLO_ULTRALYTICS_OBB(model_output[0][i])
225
+ else:
226
+ raise NotImplementedError(f"Model type not implemented! ('{self.model_type}')")
227
+
228
+ masks = masks if masks is not None else [None] * len(boxes)
229
+ rots = rots if rots is not None else [0.0] * len(boxes)
230
+
231
+ for b, c, cl, m, r in zip(boxes, conf, classes, masks, rots):
232
+ det = Detection(
233
+ bbox=BoundingBox(
234
+ x_min=b[0],
235
+ x_max=b[2],
236
+ y_min=b[1],
237
+ y_max=b[3],
238
+ rot=r),
239
+ conf=c,
240
+ clss=cl,
241
+ mask=m,
242
+ )
243
+ detections.append(det)
244
+
245
+ batch_detection.append(detections)
246
+
247
+ return batch_detection
248
+
249
+ def _postprocessing_YOLO_v5_v7_DEFAULT(self, model_output):
250
+ outputs_filtered = np.array(
251
+ list(filter(lambda x: x[4] >= self.confidence, model_output))
252
+ )
253
+
254
+ if len(outputs_filtered.shape) < 2:
255
+ return [], [], []
256
+
257
+ probabilities = outputs_filtered[:, 4]
258
+
259
+ outputs_x1y1x2y2 = self.xywh2xyxy(outputs_filtered)
260
+
261
+ pick_indxs = self.non_max_suppression_fast(
262
+ outputs_x1y1x2y2,
263
+ probs=probabilities,
264
+ iou_threshold=self.iou_threshold)
265
+
266
+ outputs_nms = outputs_x1y1x2y2[pick_indxs]
267
+
268
+ boxes = np.array(outputs_nms[:, :4], dtype=int)
269
+ conf = outputs_nms[:, 4]
270
+ classes = np.argmax(outputs_nms[:, 5:], axis=1)
271
+
272
+ return boxes, conf, classes
273
+
274
+ def _postprocessing_YOLO_v6(self, model_output):
275
+ outputs_filtered = np.array(
276
+ list(filter(lambda x: np.max(x[5:]) >= self.confidence, model_output))
277
+ )
278
+
279
+ if len(outputs_filtered.shape) < 2:
280
+ return [], [], []
281
+
282
+ probabilities = outputs_filtered[:, 4]
283
+
284
+ outputs_x1y1x2y2 = self.xywh2xyxy(outputs_filtered)
285
+
286
+ pick_indxs = self.non_max_suppression_fast(
287
+ outputs_x1y1x2y2,
288
+ probs=probabilities,
289
+ iou_threshold=self.iou_threshold)
290
+
291
+ outputs_nms = outputs_x1y1x2y2[pick_indxs]
292
+
293
+ boxes = np.array(outputs_nms[:, :4], dtype=int)
294
+ conf = np.max(outputs_nms[:, 5:], axis=1)
295
+ classes = np.argmax(outputs_nms[:, 5:], axis=1)
296
+
297
+ return boxes, conf, classes
298
+
299
+ def _postprocessing_YOLO_v9(self, model_output):
300
+ model_output = np.transpose(model_output, (1, 0))
301
+
302
+ outputs_filtered = np.array(
303
+ list(filter(lambda x: np.max(x[4:]) >= self.confidence, model_output))
304
+ )
305
+
306
+ if len(outputs_filtered.shape) < 2:
307
+ return [], [], []
308
+
309
+ probabilities = np.max(outputs_filtered[:, 4:], axis=1)
310
+
311
+ outputs_x1y1x2y2 = self.xywh2xyxy(outputs_filtered)
312
+
313
+ pick_indxs = self.non_max_suppression_fast(
314
+ outputs_x1y1x2y2,
315
+ probs=probabilities,
316
+ iou_threshold=self.iou_threshold)
317
+
318
+ outputs_nms = outputs_x1y1x2y2[pick_indxs]
319
+
320
+ boxes = np.array(outputs_nms[:, :4], dtype=int)
321
+ conf = np.max(outputs_nms[:, 4:], axis=1)
322
+ classes = np.argmax(outputs_nms[:, 4:], axis=1)
323
+
324
+ return boxes, conf, classes
325
+
326
+ def _postprocessing_YOLO_ULTRALYTICS(self, model_output):
327
+ model_output = np.transpose(model_output, (1, 0))
328
+
329
+ outputs_filtered = np.array(
330
+ list(filter(lambda x: np.max(x[4:]) >= self.confidence, model_output))
331
+ )
332
+
333
+ if len(outputs_filtered.shape) < 2:
334
+ return [], [], []
335
+
336
+ probabilities = np.max(outputs_filtered[:, 4:], axis=1)
337
+
338
+ outputs_x1y1x2y2 = self.xywh2xyxy(outputs_filtered)
339
+
340
+ pick_indxs = self.non_max_suppression_fast(
341
+ outputs_x1y1x2y2,
342
+ probs=probabilities,
343
+ iou_threshold=self.iou_threshold)
344
+
345
+ outputs_nms = outputs_x1y1x2y2[pick_indxs]
346
+
347
+ boxes = np.array(outputs_nms[:, :4], dtype=int)
348
+ conf = np.max(outputs_nms[:, 4:], axis=1)
349
+ classes = np.argmax(outputs_nms[:, 4:], axis=1)
350
+
351
+ return boxes, conf, classes
352
+
353
+ def _postprocessing_YOLO_ULTRALYTICS_SEGMENTATION(self, detections, protos):
354
+ detections = np.transpose(detections, (1, 0))
355
+
356
+ number_of_class = self.get_number_of_output_channels()[0]
357
+ mask_start_index = 4 + number_of_class
358
+
359
+ outputs_filtered = np.array(
360
+ list(filter(lambda x: np.max(x[4:4+number_of_class]) >= self.confidence, detections))
361
+ )
362
+
363
+ if len(outputs_filtered.shape) < 2:
364
+ return [], [], [], []
365
+
366
+ probabilities = np.max(outputs_filtered[:, 4:4+number_of_class], axis=1)
367
+
368
+ outputs_x1y1x2y2 = self.xywh2xyxy(outputs_filtered)
369
+
370
+ pick_indxs = self.non_max_suppression_fast(
371
+ outputs_x1y1x2y2,
372
+ probs=probabilities,
373
+ iou_threshold=self.iou_threshold)
374
+
375
+ outputs_nms = outputs_x1y1x2y2[pick_indxs]
376
+
377
+ boxes = np.array(outputs_nms[:, :4], dtype=int)
378
+ conf = np.max(outputs_nms[:, 4:4+number_of_class], axis=1)
379
+ classes = np.argmax(outputs_nms[:, 4:4+number_of_class], axis=1)
380
+ masks_in = np.array(outputs_nms[:, mask_start_index:], dtype=float)
381
+
382
+ masks = self.process_mask(protos, masks_in, boxes)
383
+
384
+ return boxes, conf, classes, masks
385
+
386
+ def _postprocessing_YOLO_ULTRALYTICS_OBB(self, model_output):
387
+ model_output = np.transpose(model_output, (1, 0))
388
+
389
+ outputs_filtered = np.array(
390
+ list(filter(lambda x: np.max(x[4:-1]) >= self.confidence, model_output))
391
+ )
392
+
393
+ if len(outputs_filtered.shape) < 2:
394
+ return [], [], [], []
395
+
396
+ probabilities = np.max(outputs_filtered[:, 4:-1], axis=1)
397
+ rotations = outputs_filtered[:, -1]
398
+
399
+ outputs_x1y1x2y2_rot = self.xywhr2xyxyr(outputs_filtered, rotations)
400
+
401
+ pick_indxs = self.non_max_suppression_fast(
402
+ outputs_x1y1x2y2_rot,
403
+ probs=probabilities,
404
+ iou_threshold=self.iou_threshold,
405
+ with_rot=True)
406
+
407
+ outputs_nms = outputs_x1y1x2y2_rot[pick_indxs]
408
+
409
+ boxes = np.array(outputs_nms[:, :4], dtype=int)
410
+ conf = np.max(outputs_nms[:, 4:-1], axis=1)
411
+ classes = np.argmax(outputs_nms[:, 4:-1], axis=1)
412
+ rots = outputs_nms[:, -1]
413
+
414
+ return boxes, conf, classes, rots
415
+
416
+ # based on https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/ops.py#L638C1-L638C67
417
+ def process_mask(self, protos, masks_in, bboxes):
418
+ c, mh, mw = protos.shape # CHW
419
+ ih, iw = self.input_shape[2:]
420
+
421
+ masks = self.sigmoid(np.matmul(masks_in, protos.astype(float).reshape(c, -1))).reshape(-1, mh, mw)
422
+
423
+ downsampled_bboxes = bboxes.copy().astype(float)
424
+ downsampled_bboxes[:, 0] *= mw / iw
425
+ downsampled_bboxes[:, 2] *= mw / iw
426
+ downsampled_bboxes[:, 3] *= mh / ih
427
+ downsampled_bboxes[:, 1] *= mh / ih
428
+
429
+ masks = self.crop_mask(masks, downsampled_bboxes)
430
+ scaled_masks = np.zeros((len(masks), ih, iw))
431
+
432
+ for i in range(len(masks)):
433
+ scaled_masks[i] = cv2.resize(masks[i], (iw, ih), interpolation=cv2.INTER_LINEAR)
434
+
435
+ masks = np.uint8(scaled_masks >= 0.5)
436
+
437
+ return masks
438
+
439
+ @staticmethod
440
+ def sigmoid(x):
441
+ return 1 / (1 + np.exp(-x))
442
+
443
+ @staticmethod
444
+ # based on https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/ops.py#L598C1-L614C65
445
+ def crop_mask(masks, boxes):
446
+ n, h, w = masks.shape
447
+ x1, y1, x2, y2 = np.split(boxes[:, :, None], 4, axis=1) # x1 shape(n,1,1)
448
+ r = np.arange(w, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w)
449
+ c = np.arange(h, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1)
450
+
451
+ return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
452
+
453
+ @staticmethod
454
+ def xywh2xyxy(x: np.ndarray) -> np.ndarray:
455
+ """Convert bounding box from (x,y,w,h) to (x1,y1,x2,y2) format
456
+
457
+ Parameters
458
+ ----------
459
+ x : np.ndarray
460
+ Bounding box in (x,y,w,h) format with classes' probabilities
461
+
462
+ Returns
463
+ -------
464
+ np.ndarray
465
+ Bounding box in (x1,y1,x2,y2) format
466
+ """
467
+ y = np.copy(x)
468
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
469
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
470
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
471
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
472
+ return y
473
+
474
+ @staticmethod
475
+ def xywhr2xyxyr(bbox: np.ndarray, rot: np.ndarray) -> np.ndarray:
476
+ """Convert bounding box from (x,y,w,h,r) to (x1,y1,x2,y2,r) format, keeping rotated boxes in range [0, pi/2]
477
+
478
+ Parameters
479
+ ----------
480
+ bbox : np.ndarray
481
+ Bounding box in (x,y,w,h) format with classes' probabilities and rotations
482
+
483
+ Returns
484
+ -------
485
+ np.ndarray
486
+ Bounding box in (x1,y1,x2,y2,r) format, keep classes' probabilities
487
+ """
488
+ x, y, w, h = bbox[:, 0], bbox[:, 1], bbox[:, 2], bbox[:, 3]
489
+
490
+ w_ = np.where(w > h, w, h)
491
+ h_ = np.where(w > h, h, w)
492
+ r_ = np.where(w > h, rot, rot + np.pi / 2) % np.pi
493
+
494
+ new_bbox_xywh = np.stack([x, y, w_, h_], axis=1)
495
+ new_bbox_xyxy = Detector.xywh2xyxy(new_bbox_xywh)
496
+
497
+ return np.concatenate([new_bbox_xyxy, bbox[:, 4:-1], r_[:, None]], axis=1)
498
+
499
+
500
+ @staticmethod
501
+ def non_max_suppression_fast(boxes: np.ndarray, probs: np.ndarray, iou_threshold: float, with_rot: bool = False) -> List:
502
+ """Apply non-maximum suppression to bounding boxes
503
+
504
+ Based on:
505
+ https://github.com/amusi/Non-Maximum-Suppression/blob/master/nms.py
506
+
507
+ Parameters
508
+ ----------
509
+ boxes : np.ndarray
510
+ Bounding boxes in (x1,y1,x2,y2) format or (x1,y1,x2,y2,r) format if with_rot is True
511
+ probs : np.ndarray
512
+ Confidence scores
513
+ iou_threshold : float
514
+ IoU threshold
515
+ with_rot: bool
516
+ If True, use rotated IoU
517
+
518
+ Returns
519
+ -------
520
+ List
521
+ List of indexes of bounding boxes to keep
522
+ """
523
+ # If no bounding boxes, return empty list
524
+ if len(boxes) == 0:
525
+ return []
526
+
527
+ # Bounding boxes
528
+ boxes = np.array(boxes)
529
+
530
+ # coordinates of bounding boxes
531
+ start_x = boxes[:, 0]
532
+ start_y = boxes[:, 1]
533
+ end_x = boxes[:, 2]
534
+ end_y = boxes[:, 3]
535
+
536
+ if with_rot:
537
+ # Rotations of bounding boxes
538
+ rotations = boxes[:, 4]
539
+
540
+ # Confidence scores of bounding boxes
541
+ score = np.array(probs)
542
+
543
+ # Picked bounding boxes
544
+ picked_boxes = []
545
+
546
+ # Compute areas of bounding boxes
547
+ areas = (end_x - start_x + 1) * (end_y - start_y + 1)
548
+
549
+ # Sort by confidence score of bounding boxes
550
+ order = np.argsort(score)
551
+
552
+ # Iterate bounding boxes
553
+ while order.size > 0:
554
+ # The index of largest confidence score
555
+ index = order[-1]
556
+
557
+ # Pick the bounding box with largest confidence score
558
+ picked_boxes.append(index)
559
+
560
+ if not with_rot:
561
+ ratio = Detector.compute_iou(index, order, start_x, start_y, end_x, end_y, areas)
562
+ else:
563
+ ratio = Detector.compute_rotated_iou(index, order, start_x, start_y, end_x, end_y, rotations, areas)
564
+
565
+ left = np.where(ratio < iou_threshold)
566
+ order = order[left]
567
+
568
+ return picked_boxes
569
+
570
+ @staticmethod
571
+ def compute_iou(index: int, order: np.ndarray, start_x: np.ndarray, start_y: np.ndarray, end_x: np.ndarray, end_y: np.ndarray, areas: np.ndarray) -> np.ndarray:
572
+ """Compute IoU for bounding boxes
573
+
574
+ Parameters
575
+ ----------
576
+ index : int
577
+ Index of the bounding box
578
+ order : np.ndarray
579
+ Order of bounding boxes
580
+ start_x : np.ndarray
581
+ Start x coordinate of bounding boxes
582
+ start_y : np.ndarray
583
+ Start y coordinate of bounding boxes
584
+ end_x : np.ndarray
585
+ End x coordinate of bounding boxes
586
+ end_y : np.ndarray
587
+ End y coordinate of bounding boxes
588
+ areas : np.ndarray
589
+ Areas of bounding boxes
590
+
591
+ Returns
592
+ -------
593
+ np.ndarray
594
+ IoU values
595
+ """
596
+
597
+ # Compute ordinates of intersection-over-union(IOU)
598
+ x1 = np.maximum(start_x[index], start_x[order[:-1]])
599
+ x2 = np.minimum(end_x[index], end_x[order[:-1]])
600
+ y1 = np.maximum(start_y[index], start_y[order[:-1]])
601
+ y2 = np.minimum(end_y[index], end_y[order[:-1]])
602
+
603
+ # Compute areas of intersection-over-union
604
+ w = np.maximum(0.0, x2 - x1 + 1)
605
+ h = np.maximum(0.0, y2 - y1 + 1)
606
+ intersection = w * h
607
+
608
+ # Compute the ratio between intersection and union
609
+ return intersection / (areas[index] + areas[order[:-1]] - intersection)
610
+
611
+
612
+ @staticmethod
613
+ def compute_rotated_iou(index: int, order: np.ndarray, start_x: np.ndarray, start_y: np.ndarray, end_x: np.ndarray, end_y: np.ndarray, rotations: np.ndarray, areas: np.ndarray) -> np.ndarray:
614
+ """Compute IoU for rotated bounding boxes
615
+
616
+ Parameters
617
+ ----------
618
+ index : int
619
+ Index of the bounding box
620
+ order : np.ndarray
621
+ Order of bounding boxes
622
+ start_x : np.ndarray
623
+ Start x coordinate of bounding boxes
624
+ start_y : np.ndarray
625
+ Start y coordinate of bounding boxes
626
+ end_x : np.ndarray
627
+ End x coordinate of bounding boxes
628
+ end_y : np.ndarray
629
+ End y coordinate of bounding boxes
630
+ rotations : np.ndarray
631
+ Rotations of bounding boxes (in radians, around the center)
632
+ areas : np.ndarray
633
+ Areas of bounding boxes
634
+
635
+ Returns
636
+ -------
637
+ np.ndarray
638
+ IoU values
639
+ """
640
+
641
+ def create_rotated_geom(x1, y1, x2, y2, rotation):
642
+ """Helper function to create a rotated QgsGeometry rectangle"""
643
+ # Define the corners of the box before rotation
644
+ center_x = (x1 + x2) / 2
645
+ center_y = (y1 + y2) / 2
646
+
647
+ # Create a rectangle using QgsRectangle
648
+ rect = QgsRectangle(QgsPointXY(x1, y1), QgsPointXY(x2, y2))
649
+
650
+ # Convert to QgsGeometry
651
+ geom = QgsGeometry.fromRect(rect)
652
+
653
+ # Rotate the geometry around its center
654
+ result = geom.rotate(np.degrees(rotation), QgsPointXY(center_x, center_y))
655
+
656
+ if result == Qgis.GeometryOperationResult.Success:
657
+ return geom
658
+ else:
659
+ return QgsGeometry()
660
+
661
+ # Create the rotated geometry for the current bounding box
662
+ geom1 = create_rotated_geom(start_x[index], start_y[index], end_x[index], end_y[index], rotations[index])
663
+
664
+ iou_values = []
665
+
666
+ # Iterate over the rest of the boxes in order and calculate IoU
667
+ for i in range(len(order) - 1):
668
+ # Create the rotated geometry for the other boxes in the order
669
+ geom2 = create_rotated_geom(start_x[order[i]], start_y[order[i]], end_x[order[i]], end_y[order[i]], rotations[order[i]])
670
+
671
+ # Compute the intersection geometry
672
+ intersection_geom = geom1.intersection(geom2)
673
+
674
+ # Check if intersection is empty
675
+ if intersection_geom.isEmpty():
676
+ intersection_area = 0.0
677
+ else:
678
+ # Compute the intersection area
679
+ intersection_area = intersection_geom.area()
680
+
681
+ # Compute the union area
682
+ union_area = areas[index] + areas[order[i]] - intersection_area
683
+
684
+ # Compute IoU
685
+ iou = intersection_area / union_area if union_area > 0 else 0.0
686
+ iou_values.append(iou)
687
+
688
+ return np.array(iou_values)
689
+
690
+
691
+ def check_loaded_model_outputs(self):
692
+ """Check if model outputs are valid.
693
+ Valid model are:
694
+ - has 1 or 2 outputs layer
695
+ - output layer shape length is 3
696
+ - batch size is 1
697
+ """
698
+
699
+ if len(self.outputs_layers) == 1 or len(self.outputs_layers) == 2:
700
+ shape = self.outputs_layers[0].shape
701
+
702
+ if len(shape) != 3:
703
+ raise Exception(
704
+ f"Detection model output should have 3 dimensions: (Batch_size, detections, values). "
705
+ f"Actually has: {shape}"
706
+ )
707
+
708
+ else:
709
+ raise NotImplementedError("Model with multiple output layer is not supported! Use only one output layer.")
zipdeepness/processing/models/dual.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deepness.processing.models.segmentor import Segmentor
2
+ import numpy as np
3
+ import cv2
4
+ import onnxruntime as ort
5
+ import os
6
+ from typing import List
7
+
8
+ class DualModel(Segmentor):
9
+
10
+ def __init__(self, model_file_path: str):
11
+ super().__init__(model_file_path)
12
+ current_dir = os.path.dirname(os.path.abspath(__file__))
13
+ # Both models are in the same folder
14
+ self.second_model_file_path = os.path.join(current_dir, "buildings_type_MA.onnx")
15
+ self.second_model = ort.InferenceSession(self.second_model_file_path, providers=["CUDAExecutionProvider", "CPUExecutionProvider"])
16
+
17
+
18
+
19
+
20
+ def preprocess_tiles_ade20k(self,tiles_batched: np.ndarray) -> np.ndarray:
21
+ # ADE20K mean/std in 0-1 range
22
+ ADE_MEAN = np.array([123.675, 116.280, 103.530]) / 255.0
23
+ ADE_STD = np.array([58.395, 57.120, 57.375]) / 255.0
24
+
25
+ tiles = tiles_batched.astype(np.float32) / 255.0 # 0-1
26
+
27
+ # Standardize per channel
28
+ tiles = (tiles - ADE_MEAN) / ADE_STD # broadcasting over (N,H,W,C)
29
+
30
+ # Ensure 3D channels (if grayscale)
31
+ if tiles.ndim == 3: # (H,W,C) single image
32
+ tiles = np.expand_dims(tiles, axis=0) # add batch dim
33
+
34
+ if tiles.shape[-1] == 1: # if only 1 channel
35
+ tiles = np.repeat(tiles, 3, axis=-1)
36
+
37
+ # NHWC -> NCHW
38
+ tiles = np.transpose(tiles, (0, 3, 1, 2))
39
+
40
+ return tiles.astype(np.float32)
41
+
42
+
43
+
44
+
45
+ def stable_sigmoid(self, x):
46
+ out = np.empty_like(x, dtype=np.float32)
47
+ positive_mask = x >= 0
48
+ negative_mask = ~positive_mask
49
+
50
+ out[positive_mask] = 1 / (1 + np.exp(-x[positive_mask]))
51
+ exp_x = np.exp(x[negative_mask])
52
+ out[negative_mask] = exp_x / (1 + exp_x)
53
+ return out
54
+
55
+
56
+ def stable_softmax(self, x, axis=-1):
57
+ max_x = np.max(x, axis=axis, keepdims=True)
58
+ exp_x = np.exp(x - max_x)
59
+ return exp_x / np.sum(exp_x, axis=axis, keepdims=True)
60
+
61
+
62
+ def post_process_semantic_segmentation_numpy(self, class_queries_logits, masks_queries_logits, target_sizes=None):
63
+ # Softmax over classes (remove null class)
64
+ masks_classes = self.stable_softmax(class_queries_logits, axis=-1)[..., :-1]
65
+
66
+ # Sigmoid for masks
67
+ masks_probs = self.stable_sigmoid(masks_queries_logits)
68
+
69
+ # Combine: torch.einsum("bqc,bqhw->bchw")
70
+ segmentation = np.einsum("bqc,bqhw->bchw", masks_classes, masks_probs)
71
+
72
+ semantic_segmentation = []
73
+ if target_sizes is not None:
74
+ if len(target_sizes) != class_queries_logits.shape[0]:
75
+ raise ValueError("target_sizes length must match batch size")
76
+
77
+ for idx in range(len(target_sizes)):
78
+ out_h, out_w = target_sizes[idx]
79
+ logits_resized = np.zeros((segmentation.shape[1], out_h, out_w), dtype=np.float32)
80
+ for c in range(segmentation.shape[1]):
81
+ logits_resized[c] = cv2.resize(
82
+ segmentation[idx, c],
83
+ (out_w, out_h),
84
+ interpolation=cv2.INTER_LINEAR
85
+ )
86
+ semantic_map = np.argmax(logits_resized, axis=0)
87
+ semantic_segmentation.append(semantic_map.astype(np.int32))
88
+ else:
89
+ for idx in range(segmentation.shape[0]):
90
+ semantic_map = np.argmax(segmentation[idx], axis=0)
91
+ semantic_segmentation.append(semantic_map.astype(np.int32))
92
+
93
+ return semantic_segmentation
94
+
95
+ def return_probs_mask2former(self,
96
+ class_queries_logits,
97
+ masks_queries_logits,
98
+ target_sizes=None):
99
+
100
+ # Softmax over classes (remove null class)
101
+ masks_classes = self.stable_softmax(class_queries_logits, axis=-1)[..., :-1]
102
+
103
+ # Sigmoid for masks
104
+ masks_probs = self.stable_sigmoid(masks_queries_logits)
105
+
106
+ # Combine: einsum bqc,bqhw -> bchw
107
+ segmentation = np.einsum("bqc,bqhw->bchw", masks_classes, masks_probs)
108
+
109
+ # Resize if target_sizes is given
110
+ if target_sizes is not None:
111
+ if len(target_sizes) != segmentation.shape[0]:
112
+ raise ValueError("target_sizes length must match batch size")
113
+
114
+ segmentation_resized = np.zeros(
115
+ (segmentation.shape[0], segmentation.shape[1], target_sizes[0][0], target_sizes[0][1]),
116
+ dtype=np.float32
117
+ )
118
+
119
+ for idx in range(segmentation.shape[0]):
120
+ out_h, out_w = target_sizes[idx]
121
+ for c in range(segmentation.shape[1]):
122
+ segmentation_resized[idx, c] = cv2.resize(
123
+ segmentation[idx, c],
124
+ (out_w, out_h),
125
+ interpolation=cv2.INTER_LINEAR
126
+ )
127
+ segmentation = segmentation_resized
128
+
129
+
130
+ return segmentation # shape: (B, C, H, W)
131
+
132
+
133
+
134
+
135
+
136
+ def process(self, tiles_batched: np.ndarray):
137
+ input_batch = self.preprocessing(tiles_batched)
138
+ input_building_batch = self.preprocess_tiles_ade20k(tiles_batched)
139
+
140
+ model_output = self.sess.run(
141
+ output_names=None,
142
+ input_feed={self.input_name: input_batch})
143
+ res = self.postprocessing(model_output)
144
+ logits_np, masks_np = self.second_model.run(["logits", "outputs_mask.35"], {"pixel_values": input_building_batch})
145
+ target_sizes=[(512, 512)]*logits_np.shape[0]
146
+ predicted_seg = self.post_process_semantic_segmentation_numpy(
147
+ class_queries_logits=logits_np,
148
+ masks_queries_logits=masks_np,
149
+ target_sizes=target_sizes
150
+ )
151
+ # new_class = {0:0, 1:5, 2:6, 3:7, 4:8}
152
+ # build_mask = np.zeros((512,512), dtype=np.int8)
153
+ # segmentation_map = predicted_seg[0]
154
+ # for k,v in new_class.items():
155
+ # build_mask[segmentation_map == k] = v
156
+
157
+ return res[0], predicted_seg
158
+
159
+
160
+ def get_number_of_output_channels(self) -> List[int]:
161
+ return [9]
162
+
163
+ def get_output_shapes(self) -> List[tuple]:
164
+ return [("N", 9, 512, 512)]
165
+
166
+
167
+
168
+
zipdeepness/processing/models/model_base.py ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Module including the base model interfaces and utilities"""
2
+ import ast
3
+ import json
4
+ from typing import List, Optional
5
+
6
+ import numpy as np
7
+
8
+ from deepness.common.lazy_package_loader import LazyPackageLoader
9
+ from deepness.common.processing_parameters.standardization_parameters import StandardizationParameters
10
+
11
+ ort = LazyPackageLoader('onnxruntime')
12
+
13
+
14
+ class ModelBase:
15
+ """
16
+ Wraps the ONNX model used during processing into a common interface
17
+ """
18
+
19
+ def __init__(self, model_file_path: str):
20
+ """
21
+
22
+ Parameters
23
+ ----------
24
+ model_file_path : str
25
+ Path to the model file
26
+ """
27
+ self.model_file_path = model_file_path
28
+
29
+ options = ort.SessionOptions()
30
+ options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
31
+
32
+ providers = [
33
+ 'CUDAExecutionProvider',
34
+ 'CPUExecutionProvider'
35
+ ]
36
+
37
+ self.sess = ort.InferenceSession(self.model_file_path, options=options, providers=providers)
38
+ inputs = self.sess.get_inputs()
39
+ if len(inputs) > 1:
40
+ raise Exception("ONNX model: unsupported number of inputs")
41
+ input_0 = inputs[0]
42
+
43
+ self.input_shape = input_0.shape
44
+ self.input_name = input_0.name
45
+
46
+ self.outputs_layers = self.sess.get_outputs()
47
+ self.standardization_parameters: StandardizationParameters = self.get_metadata_standarization_parameters()
48
+
49
+ self.outputs_names = self.get_outputs_channel_names()
50
+
51
+ @classmethod
52
+ def get_model_type_from_metadata(cls, model_file_path: str) -> Optional[str]:
53
+ """ Get model type from metadata
54
+
55
+ Parameters
56
+ ----------
57
+ model_file_path : str
58
+ Path to the model file
59
+
60
+ Returns
61
+ -------
62
+ Optional[str]
63
+ Model type or None if not found
64
+ """
65
+ model = cls(model_file_path)
66
+ return model.get_metadata_model_type()
67
+
68
+ def get_input_shape(self) -> tuple:
69
+ """ Get shape of the input for the model
70
+
71
+ Returns
72
+ -------
73
+ tuple
74
+ Shape of the input (batch_size, channels, height, width)
75
+ """
76
+ return self.input_shape
77
+
78
+ def get_output_shapes(self) -> List[tuple]:
79
+ """ Get shapes of the outputs for the model
80
+
81
+ Returns
82
+ -------
83
+ List[tuple]
84
+ Shapes of the outputs (batch_size, channels, height, width)
85
+ """
86
+ return [output.shape for output in self.outputs_layers]
87
+
88
+ def get_model_batch_size(self) -> Optional[int]:
89
+ """ Get batch size of the model
90
+
91
+ Returns
92
+ -------
93
+ Optional[int] | None
94
+ Batch size or None if not found (dynamic batch size)
95
+ """
96
+ bs = self.input_shape[0]
97
+
98
+ if isinstance(bs, str):
99
+ return None
100
+ else:
101
+ return bs
102
+
103
+ def get_input_size_in_pixels(self) -> int:
104
+ """ Get number of input pixels in x and y direction (the same value)
105
+
106
+ Returns
107
+ -------
108
+ int
109
+ Number of pixels in x and y direction
110
+ """
111
+ return self.input_shape[-2:]
112
+
113
+ def get_outputs_channel_names(self) -> Optional[List[List[str]]]:
114
+ """ Get class names from metadata
115
+
116
+ Returns
117
+ -------
118
+ List[List[str]] | None
119
+ List of class names for each model output or None if not found
120
+ """
121
+ meta = self.sess.get_modelmeta()
122
+
123
+ allowed_key_names = ['class_names', 'names'] # support both names for backward compatibility
124
+ for name in allowed_key_names:
125
+ if name not in meta.custom_metadata_map:
126
+ continue
127
+
128
+ txt = meta.custom_metadata_map[name]
129
+ try:
130
+ class_names = json.loads(txt) # default format recommended in the documentation - classes encoded as json
131
+ except json.decoder.JSONDecodeError:
132
+ class_names = ast.literal_eval(txt) # keys are integers instead of strings - use ast
133
+
134
+ if isinstance(class_names, dict):
135
+ class_names = [class_names]
136
+
137
+ sorted_by_key = [sorted(cn.items(), key=lambda kv: int(kv[0])) for cn in class_names]
138
+
139
+ all_names = []
140
+
141
+ for output_index in range(len(sorted_by_key)):
142
+ output_names = []
143
+ class_counter = 0
144
+
145
+ for key, value in sorted_by_key[output_index]:
146
+ if int(key) != class_counter:
147
+ raise Exception("Class names in the model metadata are not consecutive (missing class label)")
148
+ class_counter += 1
149
+ output_names.append(value)
150
+ all_names.append(output_names)
151
+
152
+ return all_names
153
+
154
+ return None
155
+
156
+ def get_channel_name(self, layer_id: int, channel_id: int) -> str:
157
+ """ Get channel name by id if exists in model metadata
158
+
159
+ Parameters
160
+ ----------
161
+ channel_id : int
162
+ Channel id (means index in the output tensor)
163
+
164
+ Returns
165
+ -------
166
+ str
167
+ Channel name or empty string if not found
168
+ """
169
+
170
+ channel_id_str = str(channel_id)
171
+ default_return = f'channel_{channel_id_str}'
172
+
173
+ if self.outputs_names is None:
174
+ return default_return
175
+
176
+ if layer_id >= len(self.outputs_names):
177
+ raise Exception(f'Layer id {layer_id} is out of range of the model outputs')
178
+
179
+ if channel_id >= len(self.outputs_names[layer_id]):
180
+ raise Exception(f'Channel id {channel_id} is out of range of the model outputs')
181
+
182
+ return f'{self.outputs_names[layer_id][channel_id]}'
183
+
184
+ def get_metadata_model_type(self) -> Optional[str]:
185
+ """ Get model type from metadata
186
+
187
+ Returns
188
+ -------
189
+ Optional[str]
190
+ Model type or None if not found
191
+ """
192
+ meta = self.sess.get_modelmeta()
193
+ name = 'model_type'
194
+ if name in meta.custom_metadata_map:
195
+ value = json.loads(meta.custom_metadata_map[name])
196
+ return str(value).capitalize()
197
+ return None
198
+
199
+ def get_metadata_standarization_parameters(self) -> Optional[StandardizationParameters]:
200
+ """ Get standardization parameters from metadata if exists
201
+
202
+ Returns
203
+ -------
204
+ Optional[StandardizationParameters]
205
+ Standardization parameters or None if not found
206
+ """
207
+ meta = self.sess.get_modelmeta()
208
+ name_mean = 'standardization_mean'
209
+ name_std = 'standardization_std'
210
+
211
+ param = StandardizationParameters(channels_number=self.get_input_shape()[-3])
212
+
213
+ if name_mean in meta.custom_metadata_map and name_std in meta.custom_metadata_map:
214
+ mean = json.loads(meta.custom_metadata_map[name_mean])
215
+ std = json.loads(meta.custom_metadata_map[name_std])
216
+
217
+ mean = [float(x) for x in mean]
218
+ std = [float(x) for x in std]
219
+
220
+ param.set_mean_std(mean=mean, std=std)
221
+
222
+ return param
223
+
224
+ return param # default, no standardization
225
+
226
+ def get_metadata_resolution(self) -> Optional[float]:
227
+ """ Get resolution from metadata if exists
228
+
229
+ Returns
230
+ -------
231
+ Optional[float]
232
+ Resolution or None if not found
233
+ """
234
+ meta = self.sess.get_modelmeta()
235
+ name = 'resolution'
236
+ if name in meta.custom_metadata_map:
237
+ value = json.loads(meta.custom_metadata_map[name])
238
+ return float(value)
239
+ return None
240
+
241
+ def get_metadata_tile_size(self) -> Optional[int]:
242
+ """ Get tile size from metadata if exists
243
+
244
+ Returns
245
+ -------
246
+ Optional[int]
247
+ Tile size or None if not found
248
+ """
249
+ meta = self.sess.get_modelmeta()
250
+ name = 'tile_size'
251
+ if name in meta.custom_metadata_map:
252
+ value = json.loads(meta.custom_metadata_map[name])
253
+ return int(value)
254
+ return None
255
+
256
+ def get_metadata_tiles_overlap(self) -> Optional[int]:
257
+ """ Get tiles overlap from metadata if exists
258
+
259
+ Returns
260
+ -------
261
+ Optional[int]
262
+ Tiles overlap or None if not found
263
+ """
264
+ meta = self.sess.get_modelmeta()
265
+ name = 'tiles_overlap'
266
+ if name in meta.custom_metadata_map:
267
+ value = json.loads(meta.custom_metadata_map[name])
268
+ return int(value)
269
+ return None
270
+
271
+ def get_metadata_segmentation_threshold(self) -> Optional[float]:
272
+ """ Get segmentation threshold from metadata if exists
273
+
274
+ Returns
275
+ -------
276
+ Optional[float]
277
+ Segmentation threshold or None if not found
278
+ """
279
+ meta = self.sess.get_modelmeta()
280
+ name = 'seg_thresh'
281
+ if name in meta.custom_metadata_map:
282
+ value = json.loads(meta.custom_metadata_map[name])
283
+ return float(value)
284
+ return None
285
+
286
+ def get_metadata_segmentation_small_segment(self) -> Optional[int]:
287
+ """ Get segmentation small segment from metadata if exists
288
+
289
+ Returns
290
+ -------
291
+ Optional[int]
292
+ Segmentation small segment or None if not found
293
+ """
294
+ meta = self.sess.get_modelmeta()
295
+ name = 'seg_small_segment'
296
+ if name in meta.custom_metadata_map:
297
+ value = json.loads(meta.custom_metadata_map[name])
298
+ return int(value)
299
+ return None
300
+
301
+ def get_metadata_regression_output_scaling(self) -> Optional[float]:
302
+ """ Get regression output scaling from metadata if exists
303
+
304
+ Returns
305
+ -------
306
+ Optional[float]
307
+ Regression output scaling or None if not found
308
+ """
309
+ meta = self.sess.get_modelmeta()
310
+ name = 'reg_output_scaling'
311
+ if name in meta.custom_metadata_map:
312
+ value = json.loads(meta.custom_metadata_map[name])
313
+ return float(value)
314
+ return None
315
+
316
+ def get_metadata_detection_confidence(self) -> Optional[float]:
317
+ """ Get detection confidence from metadata if exists
318
+
319
+ Returns
320
+ -------
321
+ Optional[float]
322
+ Detection confidence or None if not found
323
+ """
324
+ meta = self.sess.get_modelmeta()
325
+ name = 'det_conf'
326
+ if name in meta.custom_metadata_map:
327
+ value = json.loads(meta.custom_metadata_map[name])
328
+ return float(value)
329
+ return None
330
+
331
+ def get_detector_type(self) -> Optional[str]:
332
+ """ Get detector type from metadata if exists
333
+
334
+ Returns string value of DetectorType enum or None if not found
335
+ -------
336
+ Optional[str]
337
+ Detector type or None if not found
338
+ """
339
+ meta = self.sess.get_modelmeta()
340
+ name = 'det_type'
341
+ if name in meta.custom_metadata_map:
342
+ value = json.loads(meta.custom_metadata_map[name])
343
+ return str(value)
344
+ return None
345
+
346
+ def get_metadata_detection_iou_threshold(self) -> Optional[float]:
347
+ """ Get detection iou threshold from metadata if exists
348
+
349
+ Returns
350
+ -------
351
+ Optional[float]
352
+ Detection iou threshold or None if not found
353
+ """
354
+ meta = self.sess.get_modelmeta()
355
+ name = 'det_iou_thresh'
356
+ if name in meta.custom_metadata_map:
357
+ value = json.loads(meta.custom_metadata_map[name])
358
+ return float(value)
359
+ return None
360
+
361
+ def get_number_of_channels(self) -> int:
362
+ """ Returns number of channels in the input layer
363
+
364
+ Returns
365
+ -------
366
+ int
367
+ Number of channels in the input layer
368
+ """
369
+ return self.input_shape[-3]
370
+
371
+ def process(self, tiles_batched: np.ndarray):
372
+ """ Process a single tile image
373
+
374
+ Parameters
375
+ ----------
376
+ img : np.ndarray
377
+ Image to process ([TILE_SIZE x TILE_SIZE x channels], type uint8, values 0 to 255)
378
+
379
+ Returns
380
+ -------
381
+ np.ndarray
382
+ Single prediction
383
+ """
384
+ input_batch = self.preprocessing(tiles_batched)
385
+ model_output = self.sess.run(
386
+ output_names=None,
387
+ input_feed={self.input_name: input_batch})
388
+ res = self.postprocessing(model_output)
389
+ return res
390
+
391
+ def preprocessing(self, tiles_batched: np.ndarray) -> np.ndarray:
392
+ """ Preprocess the batch of images for the model (resize, normalization, etc)
393
+
394
+ Parameters
395
+ ----------
396
+ image : np.ndarray
397
+ Batch of images to preprocess (N,H,W,C), RGB, 0-255
398
+
399
+ Returns
400
+ -------
401
+ np.ndarray
402
+ Preprocessed batch of image (N,C,H,W), RGB, 0-1
403
+ """
404
+
405
+ # imported here, to avoid isseue with uninstalled dependencies during the first plugin start
406
+ # in other places we use LazyPackageLoader, but here it is not so easy
407
+ import deepness.processing.models.preprocessing_utils as preprocessing_utils
408
+
409
+ tiles_batched = preprocessing_utils.limit_channels_number(tiles_batched, limit=self.input_shape[-3])
410
+ tiles_batched = preprocessing_utils.normalize_values_to_01(tiles_batched)
411
+ tiles_batched = preprocessing_utils.standardize_values(tiles_batched, params=self.standardization_parameters)
412
+ tiles_batched = preprocessing_utils.transpose_nhwc_to_nchw(tiles_batched)
413
+
414
+ return tiles_batched
415
+
416
+ def postprocessing(self, outs: List) -> np.ndarray:
417
+ """ Abstract method for postprocessing
418
+
419
+ Parameters
420
+ ----------
421
+ outs : List
422
+ Output from the model (depends on the model type)
423
+
424
+ Returns
425
+ -------
426
+ np.ndarray
427
+ Postprocessed output
428
+ """
429
+ raise NotImplementedError('Base class not implemented!')
430
+
431
+ def get_number_of_output_channels(self) -> List[int]:
432
+ """ Abstract method for getting number of classes in the output layer
433
+
434
+ Returns
435
+ -------
436
+ int
437
+ Number of channels in the output layer"""
438
+ raise NotImplementedError('Base class not implemented!')
439
+
440
+ def check_loaded_model_outputs(self):
441
+ """ Abstract method for checking if the model outputs are valid
442
+
443
+ """
444
+ raise NotImplementedError('Base class not implemented!')
zipdeepness/processing/models/model_types.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ from dataclasses import dataclass
3
+
4
+ from deepness.common.processing_parameters.detection_parameters import DetectionParameters
5
+ from deepness.common.processing_parameters.map_processing_parameters import MapProcessingParameters
6
+ from deepness.common.processing_parameters.recognition_parameters import RecognitionParameters
7
+ from deepness.common.processing_parameters.regression_parameters import RegressionParameters
8
+ from deepness.common.processing_parameters.segmentation_parameters import SegmentationParameters
9
+ from deepness.common.processing_parameters.superresolution_parameters import SuperresolutionParameters
10
+ from deepness.processing.map_processor.map_processor_detection import MapProcessorDetection
11
+ from deepness.processing.map_processor.map_processor_recognition import MapProcessorRecognition
12
+ from deepness.processing.map_processor.map_processor_regression import MapProcessorRegression
13
+ from deepness.processing.map_processor.map_processor_segmentation import MapProcessorSegmentation
14
+ from deepness.processing.map_processor.map_processor_superresolution import MapProcessorSuperresolution
15
+ from deepness.processing.models.detector import Detector
16
+ from deepness.processing.models.recognition import Recognition
17
+ from deepness.processing.models.regressor import Regressor
18
+ from deepness.processing.models.segmentor import Segmentor
19
+ from deepness.processing.models.superresolution import Superresolution
20
+ from deepness.processing.models.dual import DualModel
21
+
22
+
23
+ class ModelType(enum.Enum):
24
+ SEGMENTATION = Segmentor.get_class_display_name()
25
+ REGRESSION = Regressor.get_class_display_name()
26
+ DETECTION = Detector.get_class_display_name()
27
+ SUPERRESOLUTION = Superresolution.get_class_display_name()
28
+ RECOGNITION = Recognition.get_class_display_name()
29
+
30
+
31
+ @dataclass
32
+ class ModelDefinition:
33
+ model_type: ModelType
34
+ model_class: type
35
+ parameters_class: type
36
+ map_processor_class: type
37
+
38
+ @classmethod
39
+ def get_model_definitions(cls):
40
+ return [
41
+ cls(
42
+ model_type=ModelType.SEGMENTATION,
43
+ model_class=DualModel,
44
+ parameters_class=SegmentationParameters,
45
+ map_processor_class=MapProcessorSegmentation,
46
+ ),
47
+ cls(
48
+ model_type=ModelType.REGRESSION,
49
+ model_class=Regressor,
50
+ parameters_class=RegressionParameters,
51
+ map_processor_class=MapProcessorRegression,
52
+ ),
53
+ cls(
54
+ model_type=ModelType.DETECTION,
55
+ model_class=Detector,
56
+ parameters_class=DetectionParameters,
57
+ map_processor_class=MapProcessorDetection,
58
+ ), # superresolution
59
+ cls(
60
+ model_type=ModelType.SUPERRESOLUTION,
61
+ model_class=Superresolution,
62
+ parameters_class=SuperresolutionParameters,
63
+ map_processor_class=MapProcessorSuperresolution,
64
+ ), # recognition
65
+ cls(
66
+ model_type=ModelType.RECOGNITION,
67
+ model_class=Recognition,
68
+ parameters_class=RecognitionParameters,
69
+ map_processor_class=MapProcessorRecognition,
70
+ )
71
+
72
+ ]
73
+
74
+ @classmethod
75
+ def get_definition_for_type(cls, model_type: ModelType):
76
+ model_definitions = cls.get_model_definitions()
77
+ for model_definition in model_definitions:
78
+ if model_definition.model_type == model_type:
79
+ return model_definition
80
+ raise Exception(f"Unknown model type: '{model_type}'!")
81
+
82
+ @classmethod
83
+ def get_definition_for_params(cls, params: MapProcessingParameters):
84
+ """ get model definition corresponding to the specified parameters """
85
+ model_definitions = cls.get_model_definitions()
86
+ for model_definition in model_definitions:
87
+ if type(params) == model_definition.parameters_class:
88
+ return model_definition
89
+
90
+ for model_definition in model_definitions:
91
+ if isinstance(params, model_definition.parameters_class):
92
+ return model_definition
93
+ raise Exception(f"Unknown model type for parameters: '{params}'!")