code stringlengths 17 6.64M |
|---|
class VariationOfInformation(ConfusionMatrixMetric):
def __init__(self, metric: str='VARINFO'):
'Represents a variation of information metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the variation of information.'
tp = self.confusion_matrix.tp
tn = self.confusion_matrix.tn
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
n = self.confusion_matrix.n
fn_tp = (fn + tp)
fp_tp = (fp + tp)
if ((fn_tp == 0) or ((fn_tp / n) == 1) or (fp_tp == 0) or ((fp_tp / n) == 1)):
warnings.warn('Unable to compute variation of information due to log2 of 0, returning -inf', NotComputableMetricWarning)
return float('-inf')
h1 = (- (((fn_tp / n) * math.log2((fn_tp / n))) + ((1 - (fn_tp / n)) * math.log2((1 - (fn_tp / n))))))
h2 = (- (((fp_tp / n) * math.log2((fp_tp / n))) + ((1 - (fp_tp / n)) * math.log2((1 - (fp_tp / n))))))
p00 = (1 if (tn == 0) else (tn / n))
p01 = (1 if (fn == 0) else (fn / n))
p10 = (1 if (fp == 0) else (fp / n))
p11 = (1 if (tp == 0) else (tp / n))
h12 = (- (((((tn / n) * math.log2(p00)) + ((fn / n) * math.log2(p01))) + ((fp / n) * math.log2(p10))) + ((tp / n) * math.log2(p11))))
mi = ((h1 + h2) - h12)
vi = ((h1 + h2) - (2 * mi))
return vi
|
class VolumeSimilarity(ConfusionMatrixMetric):
def __init__(self, metric: str='VOLSMTY'):
'Represents a volume similarity metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the volume similarity.'
tp = self.confusion_matrix.tp
fp = self.confusion_matrix.fp
fn = self.confusion_matrix.fn
if (((tp + fn) + fp) == 0):
warnings.warn('Unable to compute volume similarity due to division by zero, returning -inf', NotComputableMetricWarning)
return float('-inf')
return (1 - (abs((fn - fp)) / (((2 * tp) + fn) + fp)))
|
class MeanAbsoluteError(NumpyArrayMetric):
def __init__(self, metric: str='MAE'):
'Represents a mean absolute error metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the mean absolute error.'
return np.mean(np.abs((self.reference - self.prediction)))
|
class MeanSquaredError(NumpyArrayMetric):
def __init__(self, metric: str='MSE'):
'Represents a mean squared error metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the mean squared error.'
return np.mean(np.square((self.reference - self.prediction)))
|
class RootMeanSquaredError(NumpyArrayMetric):
def __init__(self, metric: str='RMSE'):
'Represents a root mean squared error metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the root mean squared error.'
return np.sqrt(np.mean(np.square((self.reference - self.prediction))))
|
class NormalizedRootMeanSquaredError(NumpyArrayMetric):
def __init__(self, metric: str='NRMSE'):
'Represents a normalized root mean squared error metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the normalized root mean squared error.'
rmse = np.sqrt(np.mean(np.square((self.reference - self.prediction))))
return (rmse / (self.reference.max() - self.reference.min()))
|
class CoefficientOfDetermination(NumpyArrayMetric):
def __init__(self, metric: str='R2'):
'Represents a coefficient of determination (R^2) error metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the coefficient of determination (R^2) error.\n\n See Also:\n https://stackoverflow.com/a/45538060\n '
y_true = self.reference.flatten()
y_predicted = self.prediction.flatten()
sse = sum(((y_true - y_predicted) ** 2))
tse = ((len(y_true) - 1) * np.var(y_true, ddof=1))
r2_score = (1 - (sse / tse))
return r2_score
|
class PeakSignalToNoiseRatio(NumpyArrayMetric):
def __init__(self, metric: str='PSNR'):
'Represents a peak signal to noise ratio metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the peak signal to noise ratio.'
psnr = skimage.metrics.peak_signal_noise_ratio(self.reference, self.prediction, data_range=self.reference.max())
return psnr
|
class StructuralSimilarityIndexMeasure(NumpyArrayMetric):
def __init__(self, metric: str='SSIM'):
'Represents a structural similarity index measure metric.\n\n Args:\n metric (str): The identification string of the metric.\n '
super().__init__(metric)
def calculate(self):
'Calculates the structural similarity index measure.'
if (self.reference.ndim == 2):
ssim = skimage.metrics.structural_similarity(self.reference, self.prediction, data_range=self.reference.max())
elif (self.reference.ndim == 3):
ssim = skimage.metrics.structural_similarity(self.reference, self.prediction, data_range=self.reference.max(), multichannelbool=True)
else:
warnings.warn('Unable to compute StructuralSimilarityIndexMeasure for images of dimension other than 2 or 3.', NotComputableMetricWarning)
ssim = float('-inf')
return ssim
|
def get_reconstruction_metrics():
'Gets a list with reconstruction metrics.\n\n Returns:\n list[Metric]: A list of metrics.\n '
return [PeakSignalToNoiseRatio(), StructuralSimilarityIndexMeasure()]
|
def get_segmentation_metrics():
'Gets a list with segmentation metrics.\n\n Returns:\n list[Metric]: A list of metrics.\n '
return ((get_overlap_metrics() + get_distance_metrics()) + get_classical_metrics())
|
def get_regression_metrics():
'Gets a list with regression metrics.\n\n Returns:\n list[Metric]: A list of metrics.\n '
return [CoefficientOfDetermination(), MeanAbsoluteError(), MeanSquaredError(), RootMeanSquaredError(), NormalizedRootMeanSquaredError()]
|
def get_overlap_metrics():
'Gets a list of overlap-based metrics.\n\n Returns:\n list[Metric]: A list of metrics.\n '
return [AdjustedRandIndex(), AreaUnderCurve(), CohenKappaCoefficient(), DiceCoefficient(), InterclassCorrelation(), JaccardCoefficient(), MutualInformation(), RandIndex(), SurfaceOverlap(), SurfaceDiceOverlap(), VolumeSimilarity()]
|
def get_distance_metrics():
'Gets a list of distance-based metrics.\n\n Returns:\n list[Metric]: A list of metrics.\n '
return [HausdorffDistance(), AverageDistance(), MahalanobisDistance(), VariationOfInformation(), GlobalConsistencyError(), ProbabilisticDistance()]
|
def get_classical_metrics():
'Gets a list of classical metrics.\n\n Returns:\n list[Metric]: A list of metrics.\n '
return [Sensitivity(), Specificity(), Precision(), FMeasure(), Accuracy(), Fallout(), FalseNegativeRate(), TruePositive(), FalsePositive(), TrueNegative(), FalseNegative(), ReferenceVolume(), PredictionVolume()]
|
class Writer(abc.ABC):
'Represents an evaluation results writer base class.'
@abc.abstractmethod
def write(self, results: typing.List[evaluator.Result], **kwargs):
'Writes the evaluation results.\n\n Args:\n results (list of evaluator.Result): The evaluation results.\n '
raise NotImplementedError
|
class ConsoleWriterHelper():
def __init__(self, use_logging: bool=False):
'Represents a console writer helper.\n\n Args:\n use_logging (bool): Indicates whether to use the Python logging module or not.\n '
self.use_logging = use_logging
def format_and_write(self, lines: list):
'Formats and writes the results.\n\n Args:\n lines (list of lists): The lines to write. Each line is a list of columns.\n '
lengths = np.array([list(map(len, row)) for row in lines])
lengths = np.max(lengths, axis=0)
lengths += (((len(lengths) - 1) * [2]) + [0])
out = [[f'{val:<{lengths[idx]}}' for (idx, val) in enumerate(line)] for line in lines]
if self.use_logging:
logging.info('\n'.join((''.join(line) for line in out)))
else:
print('\n'.join((''.join(line) for line in out)), sep='', end='\n')
|
class StatisticsAggregator():
def __init__(self, functions: dict=None):
'Represents a statistics evaluation results aggregator.\n\n Args:\n functions (dict): The numpy function handles to calculate the statistics.\n '
super().__init__()
if (functions is None):
functions = {'MEAN': np.mean, 'STD': np.std}
self.functions = functions
def calculate(self, results: typing.List[evaluator.Result]) -> typing.List[evaluator.Result]:
'Calculates aggregated results (e.g., mean and standard deviation of a metric over all cases).\n\n Args:\n results (typing.List[evaluator.Result]): The results to aggregate.\n\n Returns:\n typing.List[evaluator.Result]: The aggregated results.\n '
labels = sorted({result.label for result in results})
metrics = sorted({result.metric for result in results})
aggregated_results = []
for label in labels:
for metric in metrics:
values = [r.value for r in results if ((r.label == label) and (r.metric == metric))]
for (fn_id, fn) in self.functions.items():
aggregated_results.append(evaluator.Result(fn_id, label, metric, float(fn(values))))
return aggregated_results
|
class CSVWriter(Writer):
def __init__(self, path: str, delimiter: str=';'):
'Represents a CSV file evaluation results writer.\n\n Args:\n path (str): The CSV file path.\n delimiter (str): The CSV column delimiter.\n '
super().__init__()
self.path = path
self.delimiter = delimiter
if (not self.path.lower().endswith('.csv')):
self.path = os.path.join(self.path, '.csv')
def write(self, results: typing.List[evaluator.Result], **kwargs):
'Writes the evaluation results to a CSV file.\n\n Args:\n results (typing.List[evaluator.Result]): The evaluation results.\n '
ids = sorted({result.id_ for result in results})
labels = sorted({result.label for result in results})
metrics = sorted({result.metric for result in results})
with open(self.path, 'w', newline='') as file:
writer = csv.writer(file, delimiter=self.delimiter)
writer.writerow((['SUBJECT', 'LABEL'] + metrics))
for id_ in ids:
for label in labels:
row = [id_, label]
for metric in metrics:
value = next((r.value for r in results if ((r.id_ == id_) and (r.label == label) and (r.metric == metric))), None)
row.append((value if (value is not None) else 'n/a'))
writer.writerow(row)
|
class ConsoleWriter(Writer):
def __init__(self, precision: int=3, use_logging: bool=False):
'Represents a console evaluation results writer.\n\n Args:\n precision (int): The decimal precision.\n use_logging (bool): Indicates whether to use the Python logging module or not.\n '
super().__init__()
self.write_helper = ConsoleWriterHelper(use_logging)
self.precision = precision
def write(self, results: typing.List[evaluator.Result], **kwargs):
'Writes the evaluation results.\n\n Args:\n results (typing.List[evaluator.Result]): The evaluation results.\n '
ids = sorted({result.id_ for result in results})
labels = sorted({result.label for result in results})
metrics = sorted({result.metric for result in results})
lines = [(['SUBJECT', 'LABEL'] + metrics)]
for id_ in ids:
for label in labels:
row = [id_, label]
for metric in metrics:
value = next((r.value for r in results if ((r.id_ == id_) and (r.label == label) and (r.metric == metric))), None)
if (value is not None):
row.append((value if isinstance(value, str) else f'{value:.{self.precision}f}'))
else:
row.append('n/a')
lines.append(row)
self.write_helper.format_and_write(lines)
|
class CSVStatisticsWriter(Writer):
def __init__(self, path: str, delimiter: str=';', functions: dict=None):
'Represents a CSV file evaluation results statistics writer.\n\n Args:\n path (str): The CSV file path.\n delimiter (str): The CSV column delimiter.\n functions (dict): The functions to calculate the statistics.\n '
super().__init__()
self.aggregator = StatisticsAggregator(functions)
self.path = path
self.delimiter = delimiter
if (not self.path.lower().endswith('.csv')):
self.path = os.path.join(self.path, '.csv')
def write(self, results: typing.List[evaluator.Result], **kwargs):
'Writes the evaluation statistic results (e.g., mean and standard deviation of a metric over all cases).\n\n Args:\n results (typing.List[evaluator.Result]): The evaluation results.\n '
aggregated_results = self.aggregator.calculate(results)
with open(self.path, 'w', newline='') as file:
writer = csv.writer(file, delimiter=self.delimiter)
writer.writerow(['LABEL', 'METRIC', 'STATISTIC', 'VALUE'])
for result in aggregated_results:
writer.writerow([result.label, result.metric, result.id_, result.value])
|
class ConsoleStatisticsWriter(Writer):
def __init__(self, precision: int=3, use_logging: bool=False, functions: dict=None):
'Represents a console evaluation results statistics writer.\n\n Args:\n precision (int): The float precision.\n use_logging (bool): Indicates whether to use the Python logging module or not.\n functions (dict): The function handles to calculate the statistics.\n '
super().__init__()
self.aggregator = StatisticsAggregator(functions)
self.write_helper = ConsoleWriterHelper(use_logging)
self.precision = precision
def write(self, results: typing.List[evaluator.Result], **kwargs):
'Writes the evaluation statistic results (e.g., mean and standard deviation of a metric over all cases).\n\n Args:\n results (typing.List[evaluator.Result]): The evaluation results.\n '
aggregated_results = self.aggregator.calculate(results)
lines = [['LABEL', 'METRIC', 'STATISTIC', 'VALUE']]
for result in aggregated_results:
lines.append([result.label, result.metric, result.id_, (result.value if isinstance(result.value, str) else f'{result.value:.{self.precision}f}')])
self.write_helper.format_and_write(lines)
|
class FilterParams(abc.ABC):
'Represents a filter parameters interface.'
|
class Filter(abc.ABC):
def __init__(self):
'Filter base class.'
self.verbose = False
@abc.abstractmethod
def execute(self, image: sitk.Image, params: FilterParams=None) -> sitk.Image:
'Executes a filter on an image.\n\n Args:\n image (sitk.Image): The image to filter.\n params (FilterParams): The filter parameters.\n\n Returns:\n sitk.Image: The filtered image.\n '
raise NotImplementedError()
|
class FilterPipeline():
def __init__(self, filters: typing.List[Filter]=None):
'Represents a filter pipeline, which sequentially executes filters (:class:`.Filter`) on an image.\n\n Args:\n filters (list of Filter): The filters of the pipeline.\n '
self.filters = []
self.params = []
if (filters is not None):
for filter_ in filters:
self.add_filter(filter_)
def add_filter(self, filter_: Filter, params: FilterParams=None):
'Adds a filter to the pipeline.\n\n Args:\n filter_ (Filter): A filter.\n params (FilterParams): The filter parameters.\n '
self.filters.append(filter_)
self.params.append(params)
def set_param(self, params: FilterParams, filter_index: int):
"Sets an image-specific parameter for a filter.\n\n Use this function to update the parameters of a filter to be specific to the image to be filtered.\n\n Args:\n params (FilterParams): The parameter(s).\n filter_index (int): The filter's index the parameters belong to.\n "
self.params[filter_index] = params
def execute(self, image: sitk.Image) -> sitk.Image:
'Executes the filter pipeline on an image.\n\n Args:\n image (sitk.Image): The image to filter.\n\n Returns:\n sitk.Image: The filtered image.\n '
for (param_index, filter_) in enumerate(self.filters):
image = filter_.execute(image, self.params[param_index])
return image
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
string = 'FilterPipeline:\n'
for (filter_no, filter_) in enumerate(self.filters):
string += (((' ' + str((filter_no + 1))) + '. ') + ' '.join(str(filter_).splitlines(True)))
return string.format(self=self)
|
class Relabel(pymia_fltr.Filter):
def __init__(self, label_changes: typing.Dict[(int, typing.Union[(int, tuple)])]) -> None:
'Represents a relabel filter.\n\n Args:\n label_changes(typing.Dict[int, typing.Union[int, tuple]]): Label change rule where the key is the new label\n and the value the existing (can be multiple) label.\n '
super().__init__()
self.label_changes = label_changes
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams=None) -> sitk.Image:
'Executes the relabeling of the label image.\n\n Args:\n image (sitk.Image): The image to filter.\n params (FilterParams): The filter parameters (unused).\n\n Returns:\n sitk.Image: The filtered image.\n '
np_img = sitk.GetArrayFromImage(image)
new_np_img = np_img.copy()
for (new_label, old_labels) in self.label_changes.items():
mask = np.in1d(np_img.ravel(), old_labels).reshape(np_img.shape)
new_np_img[mask] = new_label
new_img = sitk.GetImageFromArray(new_np_img)
new_img.CopyInformation(image)
return new_img
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
str_list = []
for (k, v) in self.label_changes.items():
str_list.append('{}->{}'.format(k, v))
return 'Relabel:\n label_changes: {label_changes}\n'.format(self=self, label_changes='; '.join(str_list))
|
class SizeCorrectionParams(pymia_fltr.FilterParams):
def __init__(self, reference_shape: tuple) -> None:
'Represents size (shape) correction filter parameters used by the :class:`.SizeCorrection` filter.\n\n Args:\n reference_shape (tuple): The reference or target shape.\n '
self.dims = len(reference_shape)
self.reference_shape = reference_shape
|
class SizeCorrection(pymia_fltr.Filter):
def __init__(self, two_sided: bool=True, pad_constant: float=0.0) -> None:
'Represents a filter to correct the shape/size by padding or cropping.\n\n Args:\n two_sided (bool): Indicates whether the cropping and padding should be applied on one or both side(s) of the dimension.\n pad_constant (float): The constant value used for padding.\n '
super().__init__()
self.two_sided = two_sided
self.pad_constant = pad_constant
def execute(self, image: sitk.Image, params: SizeCorrectionParams=None) -> sitk.Image:
'Executes the shape/size correction by padding or cropping.\n\n Args:\n image (sitk.Image): The image to filter.\n params (SizeCorrectionParams): The filter parameters containing the reference (target) shape.\n\n Returns:\n sitk.Image: The filtered image.\n '
if (params is None):
raise ValueError('ShapeParams argument is missing')
if (image.GetDimension() != params.dims):
raise ValueError('image dimension {} is not compatible with reference shape dimension {}'.format(image.GetDimension(), params.dims))
image_shape = image.GetSize()
crop = [(params.dims * [0]), (params.dims * [0])]
pad = [(params.dims * [0]), (params.dims * [0])]
for dim in range(params.dims):
ref_size = params.reference_shape[dim]
dim_size = image_shape[dim]
if (dim_size > ref_size):
if self.two_sided:
crop[0][dim] = ((dim_size - ref_size) // 2)
crop[1][dim] = (((dim_size - ref_size) // 2) + ((dim_size - ref_size) % 2))
else:
crop[0][dim] = (dim_size - ref_size)
elif (dim_size < ref_size):
if self.two_sided:
pad[0][dim] = ((ref_size - dim_size) // 2)
pad[1][dim] = (((ref_size - dim_size) // 2) + ((ref_size - dim_size) % 2))
else:
pad[0][dim] = (ref_size - dim_size)
crop_needed = any((any(c) for c in crop))
if crop_needed:
image = sitk.Crop(image, crop[0], crop[1])
pad_needed = any((any(p) for p in pad))
if pad_needed:
image = sitk.ConstantPad(image, pad[0], pad[1], self.pad_constant)
return image
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'SizeCorrectionFilter:\n two_sided: {self.two_sided}\n'.format(self=self)
|
class CmdlineExecutorParams(pymia_fltr.FilterParams):
def __init__(self, arguments: typing.List[str]) -> None:
'Command line executor filter parameters used by the :class:`.CmdlineExecutor` filter.\n\n Args:\n arguments (typing.List[str]): Additional arguments for the command line execution.\n '
self.arguments = arguments
|
class CmdlineExecutor(pymia_fltr.Filter):
def __init__(self, executable_path: str):
'Represents a command line executable.\n\n Use this filter to execute for instance a C++ command line program, which loads and image, processes, and saves it.\n\n Args:\n executable_path (str): The path to the executable to run.\n '
super().__init__()
self.executable_path = executable_path
def execute(self, image: sitk.Image, params: CmdlineExecutorParams=None) -> sitk.Image:
'Executes a command line program.\n\n Args:\n image (sitk.Image): The image to filter.\n params (CmdlineExecutorParams): The execution specific command line parameters.\n\n Returns:\n sitk.Image: The filtered image.\n '
temp_dir = tempfile.gettempdir()
temp_in = os.path.join(temp_dir, 'in.nii')
sitk.WriteImage(image, temp_in)
temp_out = os.path.join(temp_dir, 'out.nii')
cmd = [self.executable_path, temp_in, temp_out]
if (params is not None):
cmd = (cmd + params.arguments)
subprocess.run(cmd, check=True)
out_image = sitk.ReadImage(temp_out, image.GetPixelID())
os.remove(temp_in)
os.remove(temp_out)
return out_image
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'CmdlineExecutor:\n executable_path: {self.executable_path}\n'.format(self=self)
|
class BinaryThreshold(pymia_fltr.Filter):
def __init__(self, threshold: float):
'Represents a binary threshold image filter.\n\n Args:\n threshold (float): The threshold value.\n '
super().__init__()
self.threshold = threshold
self.filter = sitk.BinaryThresholdImageFilter()
self.filter.SetInsideValue(0)
self.filter.SetOutsideValue(1)
self.filter.SetUpperThreshold(self.threshold)
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams=None) -> sitk.Image:
'Executes the binary threshold filter on an image.\n\n Args:\n image (sitk.Image): The image to filter.\n params (FilterParams): The filter parameters (unused).\n\n Returns:\n sitk.Image: The filtered image.\n '
return self.filter.Execute(image)
|
class LargestNConnectedComponents(pymia_fltr.Filter):
def __init__(self, number_of_components: int=1, consecutive_component_labels: bool=False):
'Represents a largest N connected components filter.\n\n Extracts the largest N connected components from a label image.\n By default the N components will all have the value 1 in the output image.\n Use the `consecutive_component_labels` option such that the largest has value 1,\n the second largest has value 2, etc. Background is always assumed to be 0.\n\n Args:\n number_of_components (int): The number of largest components to extract.\n consecutive_component_labels (bool): The largest component has value 1, the second largest has value 2, ect.\n if set to True; otherwise, all components will have value 1.\n '
super().__init__()
if (not (number_of_components >= 1)):
raise ValueError('number_of_components must be larger or equal to 1')
self.number_of_components = number_of_components
self.consecutive_component_labels = consecutive_component_labels
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams=None) -> sitk.Image:
'Executes the largest N connected components filter on an image.\n\n Args:\n image (sitk.Image): The image to filter.\n params (FilterParams): The filter parameters (unused).\n\n Returns:\n sitk.Image: The filtered image.\n '
image = sitk.ConnectedComponent(image)
image = sitk.RelabelComponent(image)
if self.consecutive_component_labels:
return sitk.Threshold(image, lower=1, upper=self.number_of_components, outsideValue=0)
else:
return sitk.BinaryThreshold(image, lowerThreshold=1, upperThreshold=self.number_of_components, insideValue=1, outsideValue=0)
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'LargestNConnectedComponents:\n number_of_components: {self.number_of_components}\n consecutive_component_labels: {self.consecutive_component_labels}\n'.format(self=self)
|
class BiasFieldCorrectorParams(pymia_fltr.FilterParams):
def __init__(self, mask: sitk.Image):
"Bias field correction filter parameters used by the :class:`.BiasFieldCorrector` filter.\n\n Args:\n mask (sitk.Image): A mask image (0=background; 1=mask).\n\n Examples:\n\n To generate a default mask use Otsu's thresholding:\n\n >>> sitk.OtsuThreshold(image, 0, 1, 200)\n "
self.mask = mask
|
class BiasFieldCorrector(pymia_fltr.Filter):
def __init__(self, convergence_threshold: float=0.001, max_iterations: typing.List[int]=(50, 50, 50, 50), fullwidth_at_halfmax: float=0.15, filter_noise: float=0.01, histogram_bins: int=200, control_points: typing.List[int]=(4, 4, 4), spline_order: int=3):
'Represents a bias field correction filter.\n\n Args:\n convergence_threshold (float): The threshold to stop the optimizer.\n max_iterations (typing.List[int]): The maximum number of optimizer iterations at each level.\n fullwidth_at_halfmax (float): The full width at half maximum.\n filter_noise (float): Wiener filter noise.\n histogram_bins (int): Number of histogram bins.\n control_points (typing.List[int]): The number of spline control points.\n spline_order (int): The spline order.\n '
super().__init__()
self.convergence_threshold = convergence_threshold
self.max_iterations = max_iterations
self.fullwidth_at_halfmax = fullwidth_at_halfmax
self.filter_noise = filter_noise
self.histogram_bins = histogram_bins
self.control_points = control_points
self.spline_order = spline_order
def execute(self, image: sitk.Image, params: BiasFieldCorrectorParams=None) -> sitk.Image:
'Executes a bias field correction on an image.\n\n Args:\n image (sitk.Image): The image to filter.\n params (BiasFieldCorrectorParams): The bias field correction filter parameters.\n\n Returns:\n sitk.Image: The bias field corrected image.\n '
mask = (params.mask if (params is not None) else sitk.OtsuThreshold(image, 0, 1, 200))
return sitk.N4BiasFieldCorrection(image, mask, self.convergence_threshold, self.max_iterations, self.fullwidth_at_halfmax, self.filter_noise, self.histogram_bins, self.control_points, self.spline_order)
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'BiasFieldCorrector:\n convergence_threshold: {self.convergence_threshold}\n max_iterations: {self.max_iterations}\n fullwidth_at_halfmax: {self.fullwidth_at_halfmax}\n filter_noise: {self.filter_noise}\n histogram_bins: {self.histogram_bins}\n control_points: {self.control_points}\n spline_order: {self.spline_order}\n'.format(self=self)
|
class GradientAnisotropicDiffusion(pymia_fltr.Filter):
def __init__(self, time_step: float=0.125, conductance: int=3, conductance_scaling_update_interval: int=1, no_iterations: int=5):
'Represents a gradient anisotropic diffusion filter.\n\n Args:\n time_step (float): The time step.\n conductance (int): The conductance (the higher the smoother the edges).\n conductance_scaling_update_interval: TODO\n no_iterations (int): Number of iterations.\n '
super().__init__()
self.time_step = time_step
self.conductance = conductance
self.conductance_scaling_update_interval = conductance_scaling_update_interval
self.no_iterations = no_iterations
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams=None) -> sitk.Image:
'Executes a gradient anisotropic diffusion on an image.\n\n Args:\n image (sitk.Image): The image to filter.\n params (FilterParams): The parameters (unused).\n\n Returns:\n sitk.Image: The smoothed image.\n '
return sitk.GradientAnisotropicDiffusion(sitk.Cast(image, sitk.sitkFloat32), self.time_step, self.conductance, self.conductance_scaling_update_interval, self.no_iterations)
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'GradientAnisotropicDiffusion:\n time_step: {self.time_step}\n conductance: {self.conductance}\n conductance_scaling_update_interval: {self.conductance_scaling_update_interval}\n no_iterations: {self.no_iterations}\n'.format(self=self)
|
class NormalizeZScore(pymia_fltr.Filter):
'Represents a z-score normalization filter.'
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams=None) -> sitk.Image:
'Executes a z-score normalization on an image.\n\n Args:\n image (sitk.Image): The image to filter.\n params (FilterParams): The parameters (unused).\n\n Returns:\n sitk.Image: The normalized image.\n '
img_arr = sitk.GetArrayFromImage(image)
mean = img_arr.mean()
std = img_arr.std()
img_arr = ((img_arr - mean) / std)
img_out = sitk.GetImageFromArray(img_arr)
img_out.CopyInformation(image)
return img_out
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'NormalizeZScore:\n'.format(self=self)
|
class RescaleIntensity(pymia_fltr.Filter):
def __init__(self, min_intensity: float, max_intensity: float):
'Represents a rescale intensity filter.\n\n Args:\n min_intensity (float): The min intensity value.\n max_intensity (float): The max intensity value.\n '
super().__init__()
self.min_intensity = min_intensity
self.max_intensity = max_intensity
def execute(self, image: sitk.Image, params: pymia_fltr.FilterParams=None) -> sitk.Image:
'Executes an intensity rescaling on an image.\n\n Args:\n image (sitk.Image): The image to filter.\n params (FilterParams): The parameters (unused).\n\n Returns:\n sitk.Image: The intensity rescaled image.\n '
return sitk.RescaleIntensity(image, self.min_intensity, self.max_intensity)
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'RescaleIntensity:\n min_intensity: {self.min_intensity}\n max_intensity: {self.max_intensity}\n'.format(self=self)
|
class HistogramMatcherParams(pymia_fltr.FilterParams):
def __init__(self, reference_image: sitk.Image):
'Histogram matching filter parameters used by the :class:`.HistogramMatcher` filter.\n\n Args:\n reference_image (sitk.Image): Reference image for the matching.\n '
self.reference_image = reference_image
|
class HistogramMatcher(pymia_fltr.Filter):
def __init__(self, histogram_levels: int=256, match_points: int=1, threshold_mean_intensity: bool=True):
'Represents a histogram matching filter.\n\n Args:\n histogram_levels (int): Number of histogram levels.\n match_points (int): Number of match points.\n threshold_mean_intensity (bool): Threshold at mean intensity.\n '
super().__init__()
self.histogram_levels = histogram_levels
self.match_points = match_points
self.threshold_mean_intensity = threshold_mean_intensity
def execute(self, image: sitk.Image, params: HistogramMatcherParams=None) -> sitk.Image:
'Matches the image intensity histogram to a reference.\n\n Args:\n image (sitk.Image): The image to filter.\n params (HistogramMatcherParams): The filter parameters.\n\n Returns:\n sitk.Image: The filtered image.\n '
if (params is None):
raise ValueError('Parameter with reference image is required')
return sitk.HistogramMatching(image, params.reference_image, self.histogram_levels, self.match_points, self.threshold_mean_intensity)
def __str__(self):
'Gets a printable string representation.\n\n Returns:\n str: String representation.\n '
return 'HistogramMatcher:\n histogram_levels: {self.histogram_levels}\n match_points: {self.match_points}\n threshold_mean_intensity: {self.threshold_mean_intensity}\n'.format(self=self)
|
class RegistrationType(enum.Enum):
'Represents the registration transformation type.'
AFFINE = 1
SIMILARITY = 2
RIGID = 3
BSPLINE = 4
|
class RegistrationCallback(abc.ABC):
def __init__(self) -> None:
'Represents the abstract handler for the registration callbacks.'
self.registration_method = None
self.fixed_image = None
self.moving_image = None
self.transform = None
def set_params(self, registration_method: sitk.ImageRegistrationMethod, fixed_image: sitk.Image, moving_image: sitk.Image, transform: sitk.Transform):
'Sets the parameters that might be used during the callbacks\n\n Args:\n registration_method (sitk.ImageRegistrationMethod): The registration method.\n fixed_image (sitk.Image): The fixed image.\n moving_image (sitk.Image): The moving image.\n transform (sitk.Transform): The transformation.\n '
self.registration_method = registration_method
self.fixed_image = fixed_image
self.moving_image = moving_image
self.transform = transform
self.registration_method.AddCommand(sitk.sitkStartEvent, self.registration_started)
self.registration_method.AddCommand(sitk.sitkEndEvent, self.registration_ended)
self.registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, self.registration_resolution_changed)
self.registration_method.AddCommand(sitk.sitkIterationEvent, self.registration_iteration_ended)
def registration_ended(self):
'Callback for the EndEvent.'
pass
def registration_started(self):
'Callback for the StartEvent.'
pass
def registration_resolution_changed(self):
'Callback for the MultiResolutionIterationEvent.'
pass
def registration_iteration_ended(self):
'Callback for the IterationEvent.'
pass
|
class MultiModalRegistrationParams(pymia_fltr.FilterParams):
def __init__(self, fixed_image: sitk.Image, fixed_image_mask: sitk.Image=None, callbacks: typing.List[RegistrationCallback]=None):
'Represents parameters for the multi-modal rigid registration used by the :class:`.MultiModalRegistration` filter.\n\n Args:\n fixed_image (sitk.Image): The fixed image for the registration.\n fixed_image_mask (sitk.Image): A mask for the fixed image to limit the registration.\n callbacks (t.List[RegistrationCallback]): Path to the directory where to plot the registration\n progress if any. Note that this increases the computational time.\n '
self.fixed_image = fixed_image
self.fixed_image_mask = fixed_image_mask
self.callbacks = callbacks
|
class MultiModalRegistration(pymia_fltr.Filter):
def __init__(self, registration_type: RegistrationType=RegistrationType.RIGID, number_of_histogram_bins: int=200, learning_rate: float=1.0, step_size: float=0.001, number_of_iterations: int=200, relaxation_factor: float=0.5, shrink_factors: typing.List[int]=(2, 1, 1), smoothing_sigmas: typing.List[float]=(2, 1, 0), sampling_percentage: float=0.2, sampling_seed: int=sitk.sitkWallClock, resampling_interpolator=sitk.sitkBSpline):
"Represents a multi-modal image registration filter.\n\n The filter estimates a 3-dimensional rigid or affine transformation between images of different modalities using\n - Mutual information similarity metric\n - Linear interpolation\n - Gradient descent optimization\n\n Args:\n registration_type (RegistrationType): The type of the registration ('rigid' or 'affine').\n number_of_histogram_bins (int): The number of histogram bins.\n learning_rate (float): The optimizer's learning rate.\n step_size (float): The optimizer's step size. Each step in the optimizer is at least this large.\n number_of_iterations (int): The maximum number of optimization iterations.\n relaxation_factor (float): The relaxation factor to penalize abrupt changes during optimization.\n shrink_factors (typing.List[int]): The shrink factors at each shrinking level (from high to low).\n smoothing_sigmas (typing.List[int]): The Gaussian sigmas for smoothing at each shrinking level (in physical units).\n sampling_percentage (float): Fraction of voxel of the fixed image that will be used for registration (0, 1].\n Typical values range from 0.01 (1 %) for low detail images to 0.2 (20 %) for high detail images.\n The higher the fraction, the higher the computational time.\n sampling_seed: The seed for reproducible behavior.\n resampling_interpolator: Interpolation to be applied while resampling the image by the determined\n transformation.\n\n Examples:\n\n The following example shows the usage of the MultiModalRegistration class.\n\n >>> fixed_image = sitk.ReadImage('/path/to/image/fixed.mha')\n >>> moving_image = sitk.ReadImage('/path/to/image/moving.mha')\n >>> registration = MultiModalRegistration() # specify parameters to your needs\n >>> parameters = MultiModalRegistrationParams(fixed_image)\n >>> registered_image = registration.execute(moving_image, parameters)\n "
super().__init__()
if (len(shrink_factors) != len(smoothing_sigmas)):
raise ValueError('shrink_factors and smoothing_sigmas need to be same length')
self.registration_type = registration_type
self.number_of_histogram_bins = number_of_histogram_bins
self.learning_rate = learning_rate
self.step_size = step_size
self.number_of_iterations = number_of_iterations
self.relaxation_factor = relaxation_factor
self.shrink_factors = shrink_factors
self.smoothing_sigmas = smoothing_sigmas
self.sampling_percentage = sampling_percentage
self.sampling_seed = sampling_seed
self.resampling_interpolator = resampling_interpolator
registration = sitk.ImageRegistrationMethod()
registration.SetMetricAsMattesMutualInformation(self.number_of_histogram_bins)
registration.SetMetricSamplingStrategy(registration.RANDOM)
registration.SetMetricSamplingPercentage(self.sampling_percentage, self.sampling_seed)
registration.SetMetricUseFixedImageGradientFilter(False)
registration.SetMetricUseMovingImageGradientFilter(False)
registration.SetInterpolator(sitk.sitkLinear)
if (self.registration_type == RegistrationType.BSPLINE):
registration.SetOptimizerAsLBFGSB()
else:
registration.SetOptimizerAsRegularStepGradientDescent(learningRate=self.learning_rate, minStep=self.step_size, numberOfIterations=self.number_of_iterations, relaxationFactor=self.relaxation_factor, gradientMagnitudeTolerance=0.0001, estimateLearningRate=registration.EachIteration, maximumStepSizeInPhysicalUnits=0.0)
registration.SetOptimizerScalesFromPhysicalShift()
registration.SetShrinkFactorsPerLevel(self.shrink_factors)
registration.SetSmoothingSigmasPerLevel(self.smoothing_sigmas)
registration.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
self.registration = registration
self.transform = None
def execute(self, image: sitk.Image, params: MultiModalRegistrationParams=None) -> sitk.Image:
'Executes a multi-modal rigid registration.\n\n Args:\n image (sitk.Image): The moving image to register.\n params (MultiModalRegistrationParams): The parameters, which contain the fixed image.\n\n Returns:\n sitk.Image: The registered image.\n '
if (params is None):
raise ValueError('params is not defined')
dimension = image.GetDimension()
if (dimension not in (2, 3)):
raise ValueError('Image dimension {} is not among the accepted (2, 3)'.format(dimension))
if (self.registration_type == RegistrationType.BSPLINE):
transform_domain_mesh_size = ([10] * image.GetDimension())
initial_transform = sitk.BSplineTransformInitializer(params.fixed_image, transform_domain_mesh_size)
else:
if (self.registration_type == RegistrationType.RIGID):
transform_type = (sitk.VersorRigid3DTransform() if (dimension == 3) else sitk.Euler2DTransform())
elif (self.registration_type == RegistrationType.AFFINE):
transform_type = sitk.AffineTransform(dimension)
elif (self.registration_type == RegistrationType.SIMILARITY):
transform_type = (sitk.Similarity3DTransform() if (dimension == 3) else sitk.Similarity2DTransform())
else:
raise ValueError('not supported registration_type')
initial_transform = sitk.CenteredTransformInitializer(sitk.Cast(params.fixed_image, image.GetPixelIDValue()), image, transform_type, sitk.CenteredTransformInitializerFilter.GEOMETRY)
self.registration.SetInitialTransform(initial_transform, inPlace=True)
if params.fixed_image_mask:
self.registration.SetMetricFixedMask(params.fixed_image_mask)
if (params.callbacks is not None):
for callback in params.callbacks:
callback.set_params(self.registration, params.fixed_image, image, initial_transform)
self.transform = self.registration.Execute(sitk.Cast(params.fixed_image, sitk.sitkFloat32), sitk.Cast(image, sitk.sitkFloat32))
if self.verbose:
print('MultiModalRegistration:\n Final metric value: {0}'.format(self.registration.GetMetricValue()))
print(" Optimizer's stopping condition, {0}".format(self.registration.GetOptimizerStopConditionDescription()))
elif (self.number_of_iterations == self.registration.GetOptimizerIteration()):
print('MultiModalRegistration: Optimizer terminated at number of iterations and did not converge!')
return sitk.Resample(image, params.fixed_image, self.transform, self.resampling_interpolator, 0.0, image.GetPixelIDValue())
def __str__(self):
'Gets a nicely printable string representation.\n\n Returns:\n str: The string representation.\n '
return 'MultiModalRegistration:\n registration_type: {self.registration_type}\n number_of_histogram_bins: {self.number_of_histogram_bins}\n learning_rate: {self.learning_rate}\n step_size: {self.step_size}\n number_of_iterations: {self.number_of_iterations}\n relaxation_factor: {self.relaxation_factor}\n shrink_factors: {self.shrink_factors}\n smoothing_sigmas: {self.smoothing_sigmas}\n sampling_percentage: {self.sampling_percentage}\n resampling_interpolator: {self.resampling_interpolator}\n'.format(self=self)
|
class PlotOnResolutionChangeCallback(RegistrationCallback):
def __init__(self, plot_dir: str, file_name_prefix: str='') -> None:
'Represents a plotter for registrations.\n\n Saves the moving image on each resolution change and the registration end.\n\n Args:\n plot_dir (str): Path to the directory where to save the plots.\n file_name_prefix (str): The file name prefix for the plots.\n '
super().__init__()
self.plot_dir = plot_dir
self.file_name_prefix = file_name_prefix
self.resolution = 0
def registration_ended(self):
'Callback for the EndEvent.'
self._write_image('end')
def registration_started(self):
'Callback for the StartEvent.'
self.resolution = 0
def registration_resolution_changed(self):
'Callback for the MultiResolutionIterationEvent.'
self._write_image(('res' + str(self.resolution)))
self.resolution = (self.resolution + 1)
def registration_iteration_ended(self):
'Callback for the IterationEvent.'
def _write_image(self, file_name_suffix: str):
'Writes an image.'
file_name = os.path.join(self.plot_dir, (((self.file_name_prefix + '_') + file_name_suffix) + '.mha'))
moving_transformed = sitk.Resample(self.moving_image, self.fixed_image, self.transform, sitk.sitkLinear, 0.0, self.moving_image.GetPixelIDValue())
sitk.WriteImage(moving_transformed, file_name)
|
class TestLargestNConnectedComponents(unittest.TestCase):
def setUp(self):
image = sitk.Image((5, 5), sitk.sitkUInt8)
image.SetPixel((0, 0), 1)
image.SetPixel((2, 0), 1)
image.SetPixel((2, 1), 1)
image.SetPixel((4, 0), 1)
image.SetPixel((4, 1), 1)
image.SetPixel((4, 2), 1)
self.image = image
def test_zero_components(self):
with self.assertRaises(ValueError):
fltr.LargestNConnectedComponents(0, False)
def test_one_components(self):
dut = fltr.LargestNConnectedComponents(1, False)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 3)
def test_two_components(self):
dut = fltr.LargestNConnectedComponents(2, False)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((2, 0)), 1)
self.assertEqual(result.GetPixel((2, 1)), 1)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 5)
def test_three_components(self):
dut = fltr.LargestNConnectedComponents(3, False)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((0, 0)), 1)
self.assertEqual(result.GetPixel((2, 0)), 1)
self.assertEqual(result.GetPixel((2, 1)), 1)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 6)
def test_four_components(self):
dut = fltr.LargestNConnectedComponents(3, False)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((0, 0)), 1)
self.assertEqual(result.GetPixel((2, 0)), 1)
self.assertEqual(result.GetPixel((2, 1)), 1)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 6)
def test_consecutive_labels(self):
dut = fltr.LargestNConnectedComponents(3, True)
result = dut.execute(self.image)
self.assertEqual(result.GetPixel((0, 0)), 3)
self.assertEqual(result.GetPixel((2, 0)), 2)
self.assertEqual(result.GetPixel((2, 1)), 2)
self.assertEqual(result.GetPixel((4, 0)), 1)
self.assertEqual(result.GetPixel((4, 1)), 1)
self.assertEqual(result.GetPixel((4, 2)), 1)
result_array = sitk.GetArrayFromImage(result)
self.assertEqual(result_array.sum(), 10)
|
class TestNormalizeZScore(unittest.TestCase):
def setUp(self):
image = sitk.Image((4, 1), sitk.sitkUInt8)
image.SetPixel((0, 0), 1)
image.SetPixel((1, 0), 2)
image.SetPixel((2, 0), 3)
image.SetPixel((3, 0), 4)
self.image = image
self.desired = np.array([[(- 1.3416407864999), (- 0.44721359549996), 0.44721359549996, 1.3416407864999]], np.float64)
def test_normalization(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image)
out_arr = sitk.GetArrayFromImage(out)
np.testing.assert_array_almost_equal(self.desired, out_arr, decimal=12)
def test_normalization_with_param(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image, pymia_fltr.FilterParams())
out_arr = sitk.GetArrayFromImage(out)
np.testing.assert_array_almost_equal(self.desired, out_arr, decimal=12)
def test_image_properties(self):
dut = pymia_fltr_prep.NormalizeZScore()
out = dut.execute(self.image)
self.assertEqual(self.image.GetSize(), out.GetSize())
self.assertEqual(self.image.GetOrigin(), out.GetOrigin())
self.assertEqual(self.image.GetSpacing(), out.GetSpacing())
self.assertEqual(self.image.GetDirection(), out.GetDirection())
self.assertEqual(self.image.GetDimension(), out.GetDimension())
self.assertEqual(self.image.GetNumberOfComponentsPerPixel(), out.GetNumberOfComponentsPerPixel())
self.assertEqual(sitk.sitkFloat64, out.GetPixelID())
|
class TestImageProperties(unittest.TestCase):
def test_is_two_dimensional(self):
x = 10
y = 10
image = sitk.Image([x, y], sitk.sitkUInt8)
dut = img.ImageProperties(image)
self.assertEqual(dut.is_two_dimensional(), True)
self.assertEqual(dut.is_three_dimensional(), False)
self.assertEqual(dut.is_vector_image(), False)
def test_is_three_dimensional(self):
x = 10
y = 10
z = 3
image = sitk.Image([x, y, z], sitk.sitkUInt8)
dut = img.ImageProperties(image)
self.assertEqual(dut.is_two_dimensional(), False)
self.assertEqual(dut.is_three_dimensional(), True)
self.assertEqual(dut.is_vector_image(), False)
def test_is_vector_image(self):
x = 10
y = 10
number_of_components_per_pixel = 3
image = sitk.Image([x, y], sitk.sitkVectorUInt8, number_of_components_per_pixel)
dut = img.ImageProperties(image)
self.assertEqual(dut.is_two_dimensional(), True)
self.assertEqual(dut.is_three_dimensional(), False)
self.assertEqual(dut.is_vector_image(), True)
def test_properties(self):
x = 10
y = 10
z = 3
pixel_id = sitk.sitkUInt8
size = (x, y, z)
direction = (0, 1, 0, 1, 0, 0, 0, 0, 1)
image = sitk.Image([x, y, z], pixel_id)
image.SetOrigin(size)
image.SetSpacing(size)
image.SetDirection(direction)
dut = img.ImageProperties(image)
self.assertEqual(dut.size, size)
self.assertEqual(dut.origin, size)
self.assertEqual(dut.spacing, size)
self.assertEqual(dut.direction, direction)
self.assertEqual(dut.dimensions, z)
self.assertEqual(dut.number_of_components_per_pixel, 1)
self.assertEqual(dut.pixel_id, pixel_id)
def test_equality(self):
x = 10
y = 10
z = 3
pixel_id = sitk.sitkUInt8
size = (x, y, z)
direction = (0, 1, 0, 1, 0, 0, 0, 0, 1)
image = sitk.Image([x, y, z], pixel_id)
image.SetOrigin(size)
image.SetSpacing(size)
image.SetDirection(direction)
dut1 = img.ImageProperties(image)
dut2 = img.ImageProperties(image)
self.assertTrue((dut1 == dut2))
self.assertFalse((dut1 != dut2))
image = sitk.Image([x, y, z], sitk.sitkInt8)
image.SetOrigin(size)
image.SetSpacing(size)
image.SetDirection(direction)
dut1 = img.ImageProperties(image)
self.assertTrue((dut1 == dut2))
self.assertFalse((dut1 != dut2))
image = sitk.Image([x, y, z], sitk.sitkVectorUInt8, 2)
image.SetOrigin(size)
image.SetSpacing(size)
image.SetDirection(direction)
dut1 = img.ImageProperties(image)
self.assertTrue((dut1 == dut2))
self.assertFalse((dut1 != dut2))
def test_non_equality(self):
x = 10
y = 10
z = 3
pixel_id = sitk.sitkUInt8
size = (x, y, z)
direction = (0, 1, 0, 1, 0, 0, 0, 0, 1)
image = sitk.Image([x, y, z], pixel_id)
image.SetOrigin(size)
image.SetSpacing(size)
image.SetDirection(direction)
dut1 = img.ImageProperties(image)
different_size = (x, y, 2)
image = sitk.Image(different_size, pixel_id)
image.SetOrigin(size)
image.SetSpacing(size)
image.SetDirection(direction)
dut2 = img.ImageProperties(image)
self.assertFalse((dut1 == dut2))
self.assertTrue((dut1 != dut2))
image = sitk.Image(size, pixel_id)
image.SetOrigin(different_size)
image.SetSpacing(size)
image.SetDirection(direction)
dut2 = img.ImageProperties(image)
self.assertFalse((dut1 == dut2))
self.assertTrue((dut1 != dut2))
different_size = (x, y, 2)
image = sitk.Image(size, pixel_id)
image.SetOrigin(size)
image.SetSpacing(different_size)
image.SetDirection(direction)
dut2 = img.ImageProperties(image)
self.assertFalse((dut1 == dut2))
self.assertTrue((dut1 != dut2))
different_size = (x, y, 2)
image = sitk.Image(size, pixel_id)
image.SetOrigin(size)
image.SetSpacing(size)
image.SetDirection((1, 0, 0, 0, 1, 0, 0, 0, 1))
dut2 = img.ImageProperties(image)
self.assertFalse((dut1 == dut2))
self.assertTrue((dut1 != dut2))
|
class TestNumpySimpleITKImageBridge(unittest.TestCase):
def setUp(self):
dim_x = 5
dim_y = 10
dim_z = 3
self.no_vector_components = 4
self.origin_spacing_2d = (dim_x, dim_y)
self.direction_2d = (0, 1, 1, 0)
self.origin_spacing_3d = (dim_x, dim_y, dim_z)
self.direction_3d = (1, 0, 0, 0, 1, 0, 0, 0, 1)
image = sitk.Image((dim_x, dim_y, dim_z), sitk.sitkUInt8)
image.SetOrigin(self.origin_spacing_3d)
image.SetSpacing(self.origin_spacing_3d)
image.SetDirection(self.direction_3d)
self.properties_3d = img.ImageProperties(image)
image = sitk.Image((dim_x, dim_y), sitk.sitkUInt8)
image.SetOrigin(self.origin_spacing_2d)
image.SetSpacing(self.origin_spacing_2d)
image.SetDirection(self.direction_2d)
self.properties_2d = img.ImageProperties(image)
self.array_image_shape_2d = np.zeros((dim_y, dim_x), np.uint8)
self.array_2d_vector = np.zeros(((dim_y * dim_x), self.no_vector_components), np.uint8)
self.array_image_shape_2d_vector = np.zeros((dim_y, dim_x, self.no_vector_components), np.uint8)
self.array_image_shape_3d = np.zeros((dim_z, dim_y, dim_x), np.uint8)
self.array_3d_vector = np.zeros((((dim_z * dim_y) * dim_x), self.no_vector_components), np.uint8)
self.array_image_shape_3d_vector = np.zeros((dim_z, dim_y, dim_x, self.no_vector_components), np.uint8)
def test_vector_to_image(self):
image = img.NumpySimpleITKImageBridge.convert(self.array_image_shape_3d.flatten(), self.properties_3d)
self._assert_3d(image)
image = img.NumpySimpleITKImageBridge.convert(self.array_image_shape_2d.flatten(), self.properties_2d)
self._assert_2d(image)
def test_array_to_image(self):
image = img.NumpySimpleITKImageBridge.convert(self.array_image_shape_3d, self.properties_3d)
self._assert_3d(image)
image = img.NumpySimpleITKImageBridge.convert(self.array_image_shape_2d, self.properties_2d)
self._assert_2d(image)
def test_array_to_vector_image(self):
image = img.NumpySimpleITKImageBridge.convert(self.array_image_shape_3d_vector, self.properties_3d)
self._assert_3d(image, True)
image = img.NumpySimpleITKImageBridge.convert(self.array_image_shape_2d_vector, self.properties_2d)
self._assert_2d(image, True)
def test_vector_to_vector_image(self):
image = img.NumpySimpleITKImageBridge.convert(self.array_3d_vector, self.properties_3d)
self._assert_3d(image, True)
image = img.NumpySimpleITKImageBridge.convert(self.array_2d_vector, self.properties_2d)
self._assert_2d(image, True)
def test_convert_unknown_shape(self):
with self.assertRaises(ValueError):
img.NumpySimpleITKImageBridge.convert(self.array_image_shape_3d.flatten(), self.properties_2d)
def _assert_2d(self, image: sitk.Image, is_vector=False):
self.assertEqual(self.properties_2d.size, image.GetSize())
if is_vector:
self.assertEqual(self.no_vector_components, image.GetNumberOfComponentsPerPixel())
else:
self.assertEqual(1, image.GetNumberOfComponentsPerPixel())
self.assertEqual(self.origin_spacing_2d, image.GetOrigin())
self.assertEqual(self.origin_spacing_2d, image.GetSpacing())
self.assertEqual(self.direction_2d, image.GetDirection())
def _assert_3d(self, image: sitk.Image, is_vector=False):
self.assertEqual(self.properties_3d.size, image.GetSize())
if is_vector:
self.assertEqual(self.no_vector_components, image.GetNumberOfComponentsPerPixel())
else:
self.assertEqual(1, image.GetNumberOfComponentsPerPixel())
self.assertEqual(self.origin_spacing_3d, image.GetOrigin())
self.assertEqual(self.origin_spacing_3d, image.GetSpacing())
self.assertEqual(self.direction_3d, image.GetDirection())
|
class TestSimpleITKNumpyImageBridge(unittest.TestCase):
def test_convert(self):
x = 10
y = 10
z = 3
size = (x, y, z)
image = sitk.Image(size, sitk.sitkUInt8)
(array, properties) = img.SimpleITKNumpyImageBridge.convert(image)
self.assertEqual(isinstance(array, np.ndarray), True)
self.assertEqual(array.shape, size[::(- 1)])
self.assertEqual(array.dtype, np.uint8)
self.assertEqual(isinstance(properties, img.ImageProperties), True)
self.assertEqual(properties.size, size)
def test_convert_None(self):
with self.assertRaises(ValueError):
img.SimpleITKNumpyImageBridge.convert(None)
|
def get_kernel():
weight = torch.zeros(8, 1, 3, 3)
weight[(0, 0, 0, 0)] = 1
weight[(1, 0, 0, 1)] = 1
weight[(2, 0, 0, 2)] = 1
weight[(3, 0, 1, 0)] = 1
weight[(4, 0, 1, 2)] = 1
weight[(5, 0, 2, 0)] = 1
weight[(6, 0, 2, 1)] = 1
weight[(7, 0, 2, 2)] = 1
return weight
|
class PAR(nn.Module):
def __init__(self, dilations, num_iter):
super().__init__()
self.dilations = dilations
self.num_iter = num_iter
kernel = get_kernel()
self.register_buffer('kernel', kernel)
self.pos = self.get_pos()
self.dim = 2
self.w1 = 0.3
self.w2 = 0.01
def get_dilated_neighbors(self, x):
(b, c, h, w) = x.shape
x_aff = []
for d in self.dilations:
_x_pad = F.pad(x, ([d] * 4), mode='replicate', value=0)
_x_pad = _x_pad.reshape((b * c), (- 1), _x_pad.shape[(- 2)], _x_pad.shape[(- 1)])
_x = F.conv2d(_x_pad, self.kernel, dilation=d).view(b, c, (- 1), h, w)
x_aff.append(_x)
return torch.cat(x_aff, dim=2)
def get_pos(self):
pos_xy = []
ker = torch.ones(1, 1, 8, 1, 1)
ker[(0, 0, 0, 0, 0)] = np.sqrt(2)
ker[(0, 0, 2, 0, 0)] = np.sqrt(2)
ker[(0, 0, 5, 0, 0)] = np.sqrt(2)
ker[(0, 0, 7, 0, 0)] = np.sqrt(2)
for d in self.dilations:
pos_xy.append((ker * d))
return torch.cat(pos_xy, dim=2)
def forward(self, imgs, masks):
masks = F.interpolate(masks, size=imgs.size()[(- 2):], mode='bilinear', align_corners=True)
(b, c, h, w) = imgs.shape
_imgs = self.get_dilated_neighbors(imgs)
_pos = self.pos.to(_imgs.device)
_imgs_rep = imgs.unsqueeze(self.dim).repeat(1, 1, _imgs.shape[self.dim], 1, 1)
_pos_rep = _pos.repeat(b, 1, 1, h, w)
_imgs_abs = torch.abs((_imgs - _imgs_rep))
_imgs_std = torch.std(_imgs, dim=self.dim, keepdim=True)
_pos_std = torch.std(_pos_rep, dim=self.dim, keepdim=True)
aff = (- (((_imgs_abs / (_imgs_std + 1e-08)) / self.w1) ** 2))
aff = aff.mean(dim=1, keepdim=True)
pos_aff = (- (((_pos_rep / (_pos_std + 1e-08)) / self.w1) ** 2))
aff = (F.softmax(aff, dim=2) + (self.w2 * F.softmax(pos_aff, dim=2)))
for _ in range(self.num_iter):
_masks = self.get_dilated_neighbors(masks)
masks = (_masks * aff).sum(2)
return masks
|
def conv3x3(in_planes, out_planes, stride=1, dilation=1, padding=1):
' 3 x 3 conv'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=False)
|
def conv1x1(in_planes, out_planes, stride=1, dilation=1, padding=1):
' 1 x 1 conv'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, dilation=dilation, bias=False)
|
class LargeFOV(nn.Module):
def __init__(self, in_planes, out_planes, dilation=5):
super(LargeFOV, self).__init__()
self.embed_dim = 512
self.dilation = dilation
self.conv6 = conv3x3(in_planes=in_planes, out_planes=self.embed_dim, padding=self.dilation, dilation=self.dilation)
self.relu6 = nn.ReLU(inplace=True)
self.conv7 = conv3x3(in_planes=self.embed_dim, out_planes=self.embed_dim, padding=self.dilation, dilation=self.dilation)
self.relu7 = nn.ReLU(inplace=True)
self.conv8 = conv1x1(in_planes=self.embed_dim, out_planes=out_planes, padding=0)
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
return None
def forward(self, x):
x = self.conv6(x)
x = self.relu6(x)
x = self.conv7(x)
x = self.relu7(x)
out = self.conv8(x)
return out
|
class ASPP(nn.Module):
def __init__(self, in_planes, out_planes, atrous_rates=[6, 12, 18, 24]):
super(ASPP, self).__init__()
for (i, rate) in enumerate(atrous_rates):
self.add_module(('c%d' % i), nn.Conv2d(in_planes, out_planes, 3, 1, padding=rate, dilation=rate, bias=True))
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.01)
nn.init.constant_(m.bias, 0)
return None
def forward(self, x):
return sum([stage(x) for stage in self.children()])
|
class CTCHead(nn.Module):
def __init__(self, in_dim, out_dim=4096, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if (nlayers == 1):
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
layers.append(nn.GELU())
for _ in range((nlayers - 2)):
layers.append(nn.Linear(hidden_dim, hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=(- 1), p=2)
x = self.last_layer(x)
return x
|
class network(nn.Module):
def __init__(self, backbone, num_classes=None, pretrained=None, init_momentum=None, aux_layer=None):
super().__init__()
self.num_classes = num_classes
self.init_momentum = init_momentum
self.encoder = getattr(encoder, backbone)(pretrained=pretrained, aux_layer=aux_layer)
self.proj_head = CTCHead(in_dim=self.encoder.embed_dim, out_dim=1024)
self.proj_head_t = CTCHead(in_dim=self.encoder.embed_dim, out_dim=1024)
for (param, param_t) in zip(self.proj_head.parameters(), self.proj_head_t.parameters()):
param_t.data.copy_(param.data)
param_t.requires_grad = False
self.in_channels = (([self.encoder.embed_dim] * 4) if hasattr(self.encoder, 'embed_dim') else ([self.encoder.embed_dims[(- 1)]] * 4))
self.pooling = F.adaptive_max_pool2d
self.decoder = decoder.LargeFOV(in_planes=self.in_channels[(- 1)], out_planes=self.num_classes)
self.classifier = nn.Conv2d(in_channels=self.in_channels[(- 1)], out_channels=(self.num_classes - 1), kernel_size=1, bias=False)
self.aux_classifier = nn.Conv2d(in_channels=self.in_channels[(- 1)], out_channels=(self.num_classes - 1), kernel_size=1, bias=False)
@torch.no_grad()
def _EMA_update_encoder_teacher(self, n_iter=None):
momentum = self.init_momentum
for (param, param_t) in zip(self.proj_head.parameters(), self.proj_head_t.parameters()):
param_t.data = ((momentum * param_t.data) + ((1.0 - momentum) * param.data))
def get_param_groups(self):
param_groups = [[], [], [], []]
for (name, param) in list(self.encoder.named_parameters()):
if ('norm' in name):
param_groups[1].append(param)
else:
param_groups[0].append(param)
param_groups[2].append(self.classifier.weight)
param_groups[2].append(self.aux_classifier.weight)
for param in list(self.proj_head.parameters()):
param_groups[2].append(param)
for param in list(self.decoder.parameters()):
param_groups[3].append(param)
return param_groups
def to_2D(self, x, h, w):
(n, hw, c) = x.shape
x = x.transpose(1, 2).reshape(n, c, h, w)
return x
def forward_proj(self, crops, n_iter=None):
global_view = crops[:2]
local_view = crops[2:]
local_inputs = torch.cat(local_view, dim=0)
self._EMA_update_encoder_teacher(n_iter)
global_output_t = self.encoder.forward_features(torch.cat(global_view, dim=0))[0].detach()
output_t = self.proj_head_t(global_output_t)
global_output_s = self.encoder.forward_features(torch.cat(global_view, dim=0))[0]
local_output_s = self.encoder.forward_features(local_inputs)[0]
output_s = torch.cat((global_output_s, local_output_s), dim=0)
output_s = self.proj_head(output_s)
return (output_t, output_s)
def forward(self, x, cam_only=False, crops=None, n_iter=None):
(cls_token, _x, x_aux) = self.encoder.forward_features(x)
if (crops is not None):
(output_t, output_s) = self.forward_proj(crops, n_iter)
(h, w) = ((x.shape[(- 2)] // self.encoder.patch_size), (x.shape[(- 1)] // self.encoder.patch_size))
_x4 = self.to_2D(_x, h, w)
_x_aux = self.to_2D(x_aux, h, w)
seg = self.decoder(_x4)
if cam_only:
cam = F.conv2d(_x4, self.classifier.weight).detach()
cam_aux = F.conv2d(_x_aux, self.aux_classifier.weight).detach()
return (cam_aux, cam)
cls_aux = self.pooling(_x_aux, (1, 1))
cls_aux = self.aux_classifier(cls_aux)
cls_x4 = self.pooling(_x4, (1, 1))
cls_x4 = self.classifier(cls_x4)
cls_x4 = cls_x4.view((- 1), (self.num_classes - 1))
cls_aux = cls_aux.view((- 1), (self.num_classes - 1))
if (crops is None):
return (cls_x4, seg, _x4, cls_aux)
else:
return (cls_x4, seg, _x4, cls_aux, output_t, output_s)
|
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
|
def validate(model=None, data_loader=None, args=None):
(preds, gts, cams, cams_aux) = ([], [], [], [])
model.eval()
avg_meter = AverageMeter()
with torch.no_grad():
for (_, data) in tqdm(enumerate(data_loader), total=len(data_loader), ncols=100, ascii=' >='):
(name, inputs, labels, cls_label) = data
inputs = inputs.cuda()
labels = labels.cuda()
cls_label = cls_label.cuda()
inputs = F.interpolate(inputs, size=[args.crop_size, args.crop_size], mode='bilinear', align_corners=False)
(cls, segs, _, _) = model(inputs)
cls_pred = (cls > 0).type(torch.int16)
_f1 = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_score': _f1})
(_cams, _cams_aux) = multi_scale_cam2(model, inputs, args.cam_scales)
resized_cam = F.interpolate(_cams, size=labels.shape[1:], mode='bilinear', align_corners=False)
cam_label = cam_to_label(resized_cam, cls_label, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
resized_cam_aux = F.interpolate(_cams_aux, size=labels.shape[1:], mode='bilinear', align_corners=False)
cam_label_aux = cam_to_label(resized_cam_aux, cls_label, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
cls_pred = (cls > 0).type(torch.int16)
_f1 = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_score': _f1})
resized_segs = F.interpolate(segs, size=labels.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_segs, dim=1).cpu().numpy().astype(np.int16))
cams += list(cam_label.cpu().numpy().astype(np.int16))
gts += list(labels.cpu().numpy().astype(np.int16))
cams_aux += list(cam_label_aux.cpu().numpy().astype(np.int16))
cls_score = avg_meter.pop('cls_score')
seg_score = evaluate.scores(gts, preds, num_classes=args.num_classes)
cam_score = evaluate.scores(gts, cams, num_classes=args.num_classes)
cam_aux_score = evaluate.scores(gts, cams_aux, num_classes=args.num_classes)
model.train()
tab_results = format_tabs([cam_score, cam_aux_score, seg_score], name_list=['CAM', 'aux_CAM', 'Seg_Pred'], cat_list=coco.class_list)
return (cls_score, tab_results)
|
def train(args=None):
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend=args.backend)
logging.info(('Total gpus: %d, samples per gpu: %d...' % (dist.get_world_size(), args.spg)))
time0 = datetime.datetime.now()
time0 = time0.replace(microsecond=0)
train_dataset = coco.CocoClsDataset(img_dir=args.img_folder, label_dir=args.label_folder, name_list_dir=args.list_folder, split=args.train_set, stage='train', aug=True, rescale_range=args.scales, crop_size=args.crop_size, img_fliplr=True, ignore_index=args.ignore_index, num_classes=args.num_classes)
val_dataset = coco.CocoSegDataset(img_dir=args.img_folder, label_dir=args.label_folder, name_list_dir=args.list_folder, split=args.val_set, stage='val', aug=False, ignore_index=args.ignore_index, num_classes=args.num_classes)
train_sampler = DistributedSampler(train_dataset, shuffle=True)
train_loader = DataLoader(train_dataset, batch_size=args.spg, num_workers=args.num_workers, pin_memory=False, drop_last=True, sampler=train_sampler, prefetch_factor=4)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers, pin_memory=False, drop_last=False)
device = torch.device(args.local_rank)
model = network(backbone=args.backbone, num_classes=args.num_classes, pretrained=args.pretrained, init_momentum=args.momentum, aux_layer=9)
param_groups = model.get_param_groups()
model.to(device)
optim = getattr(optimizer, args.optimizer)(params=[{'params': param_groups[0], 'lr': args.lr, 'weight_decay': args.wt_decay}, {'params': param_groups[1], 'lr': args.lr, 'weight_decay': args.wt_decay}, {'params': param_groups[2], 'lr': (args.lr * 10), 'weight_decay': args.wt_decay}, {'params': param_groups[3], 'lr': (args.lr * 10), 'weight_decay': args.wt_decay}], lr=args.lr, weight_decay=args.wt_decay, betas=args.betas, warmup_iter=args.warmup_iters, max_iter=args.max_iters, warmup_ratio=args.warmup_lr, power=args.power)
logging.info(('\nOptimizer: \n%s' % optim))
model = DistributedDataParallel(model, device_ids=[args.local_rank], find_unused_parameters=True)
train_sampler.set_epoch(np.random.randint(args.max_iters))
train_loader_iter = iter(train_loader)
avg_meter = AverageMeter()
loss_layer = DenseEnergyLoss(weight=1e-07, sigma_rgb=15, sigma_xy=100, scale_factor=0.5)
ncrops = 10
CTC_loss = CTCLoss_neg(ncrops=ncrops, temp=1.0).cuda()
par = PAR(num_iter=10, dilations=[1, 2, 4, 8, 12, 24]).cuda()
for n_iter in range(args.max_iters):
try:
(img_name, inputs, cls_label, img_box, crops) = next(train_loader_iter)
except:
train_sampler.set_epoch(np.random.randint(args.max_iters))
train_loader_iter = iter(train_loader)
(img_name, inputs, cls_label, img_box, crops) = next(train_loader_iter)
inputs = inputs.to(device, non_blocking=True)
inputs_denorm = imutils.denormalize_img2(inputs.clone())
cls_label = cls_label.to(device, non_blocking=True)
(cams, cams_aux) = multi_scale_cam2(model, inputs=inputs, scales=args.cam_scales)
roi_mask = cam_to_roi_mask2(cams_aux.detach(), cls_label=cls_label, low_thre=args.low_thre, hig_thre=args.high_thre)
(local_crops, flags) = crop_from_roi_neg(images=crops[2], roi_mask=roi_mask, crop_num=(ncrops - 2), crop_size=args.local_crop_size)
roi_crops = (crops[:2] + local_crops)
(cls, segs, fmap, cls_aux, out_t, out_s) = model(inputs, crops=roi_crops, n_iter=n_iter)
cls_loss = F.multilabel_soft_margin_loss(cls, cls_label)
cls_loss_aux = F.multilabel_soft_margin_loss(cls_aux, cls_label)
ctc_loss = CTC_loss(out_s, out_t, flags)
(valid_cam, _) = cam_to_label(cams.detach(), cls_label=cls_label, img_box=img_box, ignore_mid=True, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
(valid_cam_aux, _) = cam_to_label(cams_aux.detach(), cls_label=cls_label, img_box=img_box, ignore_mid=True, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
if (n_iter <= 12000):
refined_pseudo_label = refine_cams_with_bkg_v2(par, inputs_denorm, cams=valid_cam_aux, cls_labels=cls_label, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index, img_box=img_box)
else:
refined_pseudo_label = refine_cams_with_bkg_v2(par, inputs_denorm, cams=valid_cam, cls_labels=cls_label, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index, img_box=img_box)
segs = F.interpolate(segs, size=refined_pseudo_label.shape[1:], mode='bilinear', align_corners=False)
seg_loss = get_seg_loss(segs, refined_pseudo_label.type(torch.long), ignore_index=args.ignore_index)
reg_loss = get_energy_loss(img=inputs, logit=segs, label=refined_pseudo_label, img_box=img_box, loss_layer=loss_layer)
resized_cams_aux = F.interpolate(cams_aux, size=fmap.shape[2:], mode='bilinear', align_corners=False)
(_, pseudo_label_aux) = cam_to_label(resized_cams_aux.detach(), cls_label=cls_label, img_box=img_box, ignore_mid=True, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
aff_mask = label_to_aff_mask(pseudo_label_aux)
ptc_loss = get_masked_ptc_loss(fmap, aff_mask)
if (n_iter <= 8000):
loss = ((((((1.0 * cls_loss) + (1.0 * cls_loss_aux)) + (0.0 * ptc_loss)) + (0.0 * ctc_loss)) + (0.0 * seg_loss)) + (0.0 * reg_loss))
elif (n_iter <= 12000):
loss = ((((((1.0 * cls_loss) + (1.0 * cls_loss_aux)) + (0.0 * ptc_loss)) + (0.0 * ctc_loss)) + (0.1 * seg_loss)) + (args.w_reg * reg_loss))
else:
loss = ((((((1.0 * cls_loss) + (1.0 * cls_loss_aux)) + (0.2 * ptc_loss)) + (0.5 * ctc_loss)) + (0.1 * seg_loss)) + (args.w_reg * reg_loss))
cls_pred = (cls > 0).type(torch.int16)
cls_score = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_loss': cls_loss.item(), 'ptc_loss': ptc_loss.item(), 'ctc_loss': ctc_loss.item(), 'cls_loss_aux': cls_loss_aux.item(), 'seg_loss': seg_loss.item(), 'cls_score': cls_score.item()})
optim.zero_grad()
loss.backward()
optim.step()
if (((n_iter + 1) % args.log_iters) == 0):
(delta, eta) = cal_eta(time0, (n_iter + 1), args.max_iters)
cur_lr = optim.param_groups[0]['lr']
if (args.local_rank == 0):
logging.info(('Iter: %d; Elasped: %s; ETA: %s; LR: %.3e; cls_loss: %.4f, cls_loss_aux: %.4f, ptc_loss: %.4f, ctc_loss: %.4f, seg_loss: %.4f...' % ((n_iter + 1), delta, eta, cur_lr, avg_meter.pop('cls_loss'), avg_meter.pop('cls_loss_aux'), avg_meter.pop('ptc_loss'), avg_meter.pop('ctc_loss'), avg_meter.pop('seg_loss'))))
if (((n_iter + 1) % args.eval_iters) == 0):
ckpt_name = os.path.join(args.ckpt_dir, ('model_iter_%d.pth' % (n_iter + 1)))
if (args.local_rank == 0):
logging.info('Validating...')
if args.save_ckpt:
torch.save(model.state_dict(), ckpt_name)
(val_cls_score, tab_results) = validate(model=model, data_loader=val_loader, args=args)
if (args.local_rank == 0):
logging.info(('val cls score: %.6f' % val_cls_score))
logging.info(('\n' + tab_results))
return True
|
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
|
def validate(model=None, data_loader=None, args=None):
(preds, gts, cams, cams_aux) = ([], [], [], [])
model.eval()
avg_meter = AverageMeter()
with torch.no_grad():
for (_, data) in tqdm(enumerate(data_loader), total=len(data_loader), ncols=100, ascii=' >='):
(name, inputs, labels, cls_label) = data
inputs = inputs.cuda()
labels = labels.cuda()
cls_label = cls_label.cuda()
inputs = F.interpolate(inputs, size=[args.crop_size, args.crop_size], mode='bilinear', align_corners=False)
(cls, segs, _, _) = model(inputs)
cls_pred = (cls > 0).type(torch.int16)
_f1 = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_score': _f1})
(_cams, _cams_aux) = multi_scale_cam2(model, inputs, args.cam_scales)
resized_cam = F.interpolate(_cams, size=labels.shape[1:], mode='bilinear', align_corners=False)
cam_label = cam_to_label(resized_cam, cls_label, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
resized_cam_aux = F.interpolate(_cams_aux, size=labels.shape[1:], mode='bilinear', align_corners=False)
cam_label_aux = cam_to_label(resized_cam_aux, cls_label, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
cls_pred = (cls > 0).type(torch.int16)
_f1 = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_score': _f1})
resized_segs = F.interpolate(segs, size=labels.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_segs, dim=1).cpu().numpy().astype(np.int16))
cams += list(cam_label.cpu().numpy().astype(np.int16))
gts += list(labels.cpu().numpy().astype(np.int16))
cams_aux += list(cam_label_aux.cpu().numpy().astype(np.int16))
cls_score = avg_meter.pop('cls_score')
seg_score = evaluate.scores(gts, preds)
cam_score = evaluate.scores(gts, cams)
cam_aux_score = evaluate.scores(gts, cams_aux)
model.train()
tab_results = format_tabs([cam_score, cam_aux_score, seg_score], name_list=['CAM', 'aux_CAM', 'Seg_Pred'], cat_list=voc.class_list)
return (cls_score, tab_results)
|
def train(args=None):
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend=args.backend)
logging.info(('Total gpus: %d, samples per gpu: %d...' % (dist.get_world_size(), args.spg)))
time0 = datetime.datetime.now()
time0 = time0.replace(microsecond=0)
train_dataset = voc.VOC12ClsDataset(root_dir=args.data_folder, name_list_dir=args.list_folder, split=args.train_set, stage='train', aug=True, rescale_range=args.scales, crop_size=args.crop_size, img_fliplr=True, ignore_index=args.ignore_index, num_classes=args.num_classes)
val_dataset = voc.VOC12SegDataset(root_dir=args.data_folder, name_list_dir=args.list_folder, split=args.val_set, stage='val', aug=False, ignore_index=args.ignore_index, num_classes=args.num_classes)
train_sampler = DistributedSampler(train_dataset, shuffle=True)
train_loader = DataLoader(train_dataset, batch_size=args.spg, num_workers=args.num_workers, pin_memory=False, drop_last=True, sampler=train_sampler, prefetch_factor=4)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers, pin_memory=False, drop_last=False)
device = torch.device(args.local_rank)
model = network(backbone=args.backbone, num_classes=args.num_classes, pretrained=args.pretrained, init_momentum=args.momentum, aux_layer=args.aux_layer)
param_groups = model.get_param_groups()
model.to(device)
optim = getattr(optimizer, args.optimizer)(params=[{'params': param_groups[0], 'lr': args.lr, 'weight_decay': args.wt_decay}, {'params': param_groups[1], 'lr': args.lr, 'weight_decay': args.wt_decay}, {'params': param_groups[2], 'lr': (args.lr * 10), 'weight_decay': args.wt_decay}, {'params': param_groups[3], 'lr': (args.lr * 10), 'weight_decay': args.wt_decay}], lr=args.lr, weight_decay=args.wt_decay, betas=args.betas, warmup_iter=args.warmup_iters, max_iter=args.max_iters, warmup_ratio=args.warmup_lr, power=args.power)
logging.info(('\nOptimizer: \n%s' % optim))
model = DistributedDataParallel(model, device_ids=[args.local_rank], find_unused_parameters=True)
train_sampler.set_epoch(np.random.randint(args.max_iters))
train_loader_iter = iter(train_loader)
avg_meter = AverageMeter()
loss_layer = DenseEnergyLoss(weight=1e-07, sigma_rgb=15, sigma_xy=100, scale_factor=0.5)
ncrops = 10
CTC_loss = CTCLoss_neg(ncrops=ncrops, temp=args.temp).cuda()
par = PAR(num_iter=10, dilations=[1, 2, 4, 8, 12, 24]).cuda()
for n_iter in range(args.max_iters):
try:
(img_name, inputs, cls_label, img_box, crops) = next(train_loader_iter)
except:
train_sampler.set_epoch(np.random.randint(args.max_iters))
train_loader_iter = iter(train_loader)
(img_name, inputs, cls_label, img_box, crops) = next(train_loader_iter)
inputs = inputs.to(device, non_blocking=True)
inputs_denorm = imutils.denormalize_img2(inputs.clone())
cls_label = cls_label.to(device, non_blocking=True)
(cams, cams_aux) = multi_scale_cam2(model, inputs=inputs, scales=args.cam_scales)
roi_mask = cam_to_roi_mask2(cams_aux.detach(), cls_label=cls_label, low_thre=args.low_thre, hig_thre=args.high_thre)
(local_crops, flags) = crop_from_roi_neg(images=crops[2], roi_mask=roi_mask, crop_num=(ncrops - 2), crop_size=args.local_crop_size)
roi_crops = (crops[:2] + local_crops)
(cls, segs, fmap, cls_aux, out_t, out_s) = model(inputs, crops=roi_crops, n_iter=n_iter)
cls_loss = F.multilabel_soft_margin_loss(cls, cls_label)
cls_loss_aux = F.multilabel_soft_margin_loss(cls_aux, cls_label)
ctc_loss = CTC_loss(out_s, out_t, flags)
(valid_cam, _) = cam_to_label(cams.detach(), cls_label=cls_label, img_box=img_box, ignore_mid=True, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
refined_pseudo_label = refine_cams_with_bkg_v2(par, inputs_denorm, cams=valid_cam, cls_labels=cls_label, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index, img_box=img_box)
segs = F.interpolate(segs, size=refined_pseudo_label.shape[1:], mode='bilinear', align_corners=False)
seg_loss = get_seg_loss(segs, refined_pseudo_label.type(torch.long), ignore_index=args.ignore_index)
reg_loss = get_energy_loss(img=inputs, logit=segs, label=refined_pseudo_label, img_box=img_box, loss_layer=loss_layer)
resized_cams_aux = F.interpolate(cams_aux, size=fmap.shape[2:], mode='bilinear', align_corners=False)
(_, pseudo_label_aux) = cam_to_label(resized_cams_aux.detach(), cls_label=cls_label, img_box=img_box, ignore_mid=True, bkg_thre=args.bkg_thre, high_thre=args.high_thre, low_thre=args.low_thre, ignore_index=args.ignore_index)
aff_mask = label_to_aff_mask(pseudo_label_aux)
ptc_loss = get_masked_ptc_loss(fmap, aff_mask)
if (n_iter <= 2000):
loss = ((((((1.0 * cls_loss) + (1.0 * cls_loss_aux)) + (args.w_ptc * ptc_loss)) + (args.w_ctc * ctc_loss)) + (0.0 * seg_loss)) + (0.0 * reg_loss))
else:
loss = ((((((1.0 * cls_loss) + (1.0 * cls_loss_aux)) + (args.w_ptc * ptc_loss)) + (args.w_ctc * ctc_loss)) + (args.w_seg * seg_loss)) + (args.w_reg * reg_loss))
cls_pred = (cls > 0).type(torch.int16)
cls_score = evaluate.multilabel_score(cls_label.cpu().numpy()[0], cls_pred.cpu().numpy()[0])
avg_meter.add({'cls_loss': cls_loss.item(), 'ptc_loss': ptc_loss.item(), 'ctc_loss': ctc_loss.item(), 'cls_loss_aux': cls_loss_aux.item(), 'seg_loss': seg_loss.item(), 'cls_score': cls_score.item()})
optim.zero_grad()
loss.backward()
optim.step()
if (((n_iter + 1) % args.log_iters) == 0):
(delta, eta) = cal_eta(time0, (n_iter + 1), args.max_iters)
cur_lr = optim.param_groups[0]['lr']
if (args.local_rank == 0):
logging.info(('Iter: %d; Elasped: %s; ETA: %s; LR: %.3e; cls_loss: %.4f, cls_loss_aux: %.4f, ptc_loss: %.4f, ctc_loss: %.4f, seg_loss: %.4f...' % ((n_iter + 1), delta, eta, cur_lr, avg_meter.pop('cls_loss'), avg_meter.pop('cls_loss_aux'), avg_meter.pop('ptc_loss'), avg_meter.pop('ctc_loss'), avg_meter.pop('seg_loss'))))
if (((n_iter + 1) % args.eval_iters) == 0):
ckpt_name = os.path.join(args.ckpt_dir, ('model_iter_%d.pth' % (n_iter + 1)))
if (args.local_rank == 0):
logging.info('Validating...')
if args.save_ckpt:
torch.save(model.state_dict(), ckpt_name)
(val_cls_score, tab_results) = validate(model=model, data_loader=val_loader, args=args)
if (args.local_rank == 0):
logging.info(('val cls score: %.6f' % val_cls_score))
logging.info(('\n' + tab_results))
return True
|
def load_txt(txt_name):
with open(txt_name) as f:
name_list = [x for x in f.read().split('\n') if x]
return name_list
|
def load_txt(txt_name):
with open(txt_name) as f:
name_list = [x for x in f.read().split('\n') if x]
return name_list
|
def crf_inference(img, probs, t=10, scale_factor=1, labels=21):
(h, w) = img.shape[:2]
n_labels = labels
d = dcrf.DenseCRF2D(w, h, n_labels)
unary = unary_from_softmax(probs)
unary = np.ascontiguousarray(unary)
img_c = np.ascontiguousarray(img)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=(3 / scale_factor), compat=3)
d.addPairwiseBilateral(sxy=(80 / scale_factor), srgb=13, rgbim=np.copy(img_c), compat=10)
Q = d.inference(t)
return np.array(Q).reshape((n_labels, h, w))
|
def crf_inference_label(img, labels, t=10, n_labels=21, gt_prob=0.7):
(h, w) = img.shape[:2]
d = dcrf.DenseCRF2D(w, h, n_labels)
unary = unary_from_labels(labels, n_labels, gt_prob=gt_prob, zero_unsure=False)
d.setUnaryEnergy(unary)
d.addPairwiseGaussian(sxy=3, compat=3)
d.addPairwiseBilateral(sxy=50, srgb=5, rgbim=np.ascontiguousarray(np.copy(img)), compat=10)
q = d.inference(t)
return np.argmax(np.array(q).reshape((n_labels, h, w)), axis=0)
|
class DenseCRF(object):
def __init__(self, iter_max, pos_w, pos_xy_std, bi_w, bi_xy_std, bi_rgb_std):
self.iter_max = iter_max
self.pos_w = pos_w
self.pos_xy_std = pos_xy_std
self.bi_w = bi_w
self.bi_xy_std = bi_xy_std
self.bi_rgb_std = bi_rgb_std
def __call__(self, image, probmap):
(C, H, W) = probmap.shape
U = utils.unary_from_softmax(probmap)
U = np.ascontiguousarray(U)
image = np.ascontiguousarray(image)
d = dcrf.DenseCRF2D(W, H, C)
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=self.pos_xy_std, compat=self.pos_w)
d.addPairwiseBilateral(sxy=self.bi_xy_std, srgb=self.bi_rgb_std, rgbim=image, compat=self.bi_w)
Q = d.inference(self.iter_max)
Q = np.array(Q).reshape((C, H, W))
return Q
|
def multilabel_score(y_true, y_pred):
return metrics.f1_score(y_true, y_pred)
|
def _fast_hist(label_true, label_pred, num_classes):
mask = ((label_true >= 0) & (label_true < num_classes))
hist = np.bincount(((num_classes * label_true[mask].astype(int)) + label_pred[mask]), minlength=(num_classes ** 2))
return hist.reshape(num_classes, num_classes)
|
def scores(label_trues, label_preds, num_classes=21):
hist = np.zeros((num_classes, num_classes))
for (lt, lp) in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), num_classes)
acc = (np.diag(hist).sum() / hist.sum())
_acc_cls = (np.diag(hist) / hist.sum(axis=1))
acc_cls = np.nanmean(_acc_cls)
iu = (np.diag(hist) / ((hist.sum(axis=1) + hist.sum(axis=0)) - np.diag(hist)))
valid = (hist.sum(axis=1) > 0)
mean_iu = np.nanmean(iu[valid])
freq = (hist.sum(axis=1) / hist.sum())
cls_iu = dict(zip(range(num_classes), iu))
return {'pAcc': acc, 'mAcc': acc_cls, 'miou': mean_iu, 'iou': cls_iu}
|
def pseudo_scores(label_trues, label_preds, num_classes=21):
hist = np.zeros((num_classes, num_classes))
for (lt, lp) in zip(label_trues, label_preds):
lt = lt.flatten()
lp = lp.flatten()
lt[(lp == 255)] = 255
lp[(lp == 255)] = 0
hist += _fast_hist(lt, lp, num_classes)
acc = (np.diag(hist).sum() / hist.sum())
acc_cls = (np.diag(hist) / hist.sum(axis=1))
acc_cls = np.nanmean(acc_cls)
iu = (np.diag(hist) / ((hist.sum(axis=1) + hist.sum(axis=0)) - np.diag(hist)))
valid = (hist.sum(axis=1) > 0)
mean_iu = np.nanmean(iu[valid])
freq = (hist.sum(axis=1) / hist.sum())
cls_iu = dict(zip(range(num_classes), iu))
return {'pAcc': acc, 'mAcc': acc_cls, 'miou': mean_iu, 'iou': cls_iu}
|
class CosWarmupAdamW(torch.optim.AdamW):
def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None, **kwargs):
super().__init__(params, lr=lr, betas=betas, weight_decay=weight_decay, eps=1e-08)
self.global_step = 0
self.warmup_iter = np.float(warmup_iter)
self.warmup_ratio = warmup_ratio
self.max_iter = np.float(max_iter)
self.power = power
self.__init_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if (self.global_step < self.warmup_iter):
lr_mult = (self.global_step / self.warmup_iter)
lr_add = ((1 - (self.global_step / self.warmup_iter)) * self.warmup_ratio)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = ((self.__init_lr[i] * lr_mult) + lr_add)
elif (self.global_step < self.max_iter):
lr_mult = ((np.cos((((self.global_step - self.warmup_iter) / (self.max_iter - self.warmup_iter)) * np.pi)) * 0.5) + 0.5)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
super().step(closure)
self.global_step += 1
|
class PolyWarmupAdamW(torch.optim.AdamW):
def __init__(self, params, lr, weight_decay, betas, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None, **kwargs):
super().__init__(params, lr=lr, betas=betas, weight_decay=weight_decay, eps=1e-08)
self.global_step = 0
self.warmup_iter = warmup_iter
self.warmup_ratio = warmup_ratio
self.max_iter = max_iter
self.power = power
self.__init_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if (self.global_step < self.warmup_iter):
lr_mult = (1 - ((1 - (self.global_step / self.warmup_iter)) * (1 - self.warmup_ratio)))
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
elif (self.global_step < self.max_iter):
lr_mult = ((1 - (self.global_step / self.max_iter)) ** self.power)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
super().step(closure)
self.global_step += 1
|
class PolyWarmupSGD(torch.optim.SGD):
def __init__(self, params, lr, weight_decay, warmup_iter=None, max_iter=None, warmup_ratio=None, power=None, **kwargs):
super().__init__(params, lr=lr, momentum=0.9, weight_decay=weight_decay)
self.global_step = 0
self.warmup_iter = warmup_iter
self.warmup_lr = warmup_ratio
self.max_iter = max_iter
self.power = power
self.__init_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if (self.global_step < self.warmup_iter):
lr_mult = ((1 - (self.global_step / self.warmup_iter)) ** self.power)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = ((self.__init_lr[i] * lr_mult) * 10)
elif (self.global_step < self.max_iter):
lr_mult = ((1 - ((self.global_step - self.warmup_iter) / (self.max_iter - self.warmup_iter))) ** self.power)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__init_lr[i] * lr_mult)
super().step(closure)
self.global_step += 1
|
class PConv2D(Conv2D):
def __init__(self, *args, n_channels=3, mono=False, **kwargs):
super().__init__(*args, **kwargs)
self.input_spec = [InputSpec(ndim=4), InputSpec(ndim=4)]
def build(self, input_shape):
'Adapted from original _Conv() layer of Keras \n param input_shape: list of dimensions for [img, mask]\n '
if (self.data_format == 'channels_first'):
channel_axis = 1
else:
channel_axis = (- 1)
if (input_shape[0][channel_axis] is None):
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
self.input_dim = input_shape[0][channel_axis]
kernel_shape = (self.kernel_size + (self.input_dim, self.filters))
self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='img_kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
self.kernel_mask = K.ones(shape=(self.kernel_size + (self.input_dim, self.filters)))
self.pconv_padding = ((int(((self.kernel_size[0] - 1) / 2)), int(((self.kernel_size[0] - 1) / 2))), (int(((self.kernel_size[0] - 1) / 2)), int(((self.kernel_size[0] - 1) / 2))))
self.window_size = (self.kernel_size[0] * self.kernel_size[1])
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, mask=None):
'\n We will be using the Keras conv2d method, and essentially we have\n to do here is multiply the mask with the input X, before we apply the\n convolutions. For the mask itself, we apply convolutions with all weights\n set to 1.\n Subsequently, we clip mask values to between 0 and 1\n '
if ((type(inputs) is not list) or (len(inputs) != 2)):
raise Exception(('PartialConvolution2D must be called on a list of two tensors [img, mask]. Instead got: ' + str(inputs)))
images = K.spatial_2d_padding(inputs[0], self.pconv_padding, self.data_format)
masks = K.spatial_2d_padding(inputs[1], self.pconv_padding, self.data_format)
mask_output = K.conv2d(masks, self.kernel_mask, strides=self.strides, padding='valid', data_format=self.data_format, dilation_rate=self.dilation_rate)
img_output = K.conv2d((images * masks), self.kernel, strides=self.strides, padding='valid', data_format=self.data_format, dilation_rate=self.dilation_rate)
mask_ratio = (self.window_size / (mask_output + 1e-08))
mask_output = K.clip(mask_output, 0, 1)
mask_ratio = (mask_ratio * mask_output)
img_output = (img_output * mask_ratio)
if self.use_bias:
img_output = K.bias_add(img_output, self.bias, data_format=self.data_format)
if (self.activation is not None):
img_output = self.activation(img_output)
return [img_output, mask_output]
def compute_output_shape(self, input_shape):
if (self.data_format == 'channels_last'):
space = input_shape[0][1:(- 1)]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(space[i], self.kernel_size[i], padding='same', stride=self.strides[i], dilation=self.dilation_rate[i])
new_space.append(new_dim)
new_shape = (((input_shape[0][0],) + tuple(new_space)) + (self.filters,))
return [new_shape, new_shape]
if (self.data_format == 'channels_first'):
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(space[i], self.kernel_size[i], padding='same', stride=self.strides[i], dilation=self.dilation_rate[i])
new_space.append(new_dim)
new_shape = ((input_shape[0], self.filters) + tuple(new_space))
return [new_shape, new_shape]
|
def plot_images(images, s=5):
(_, axes) = plt.subplots(1, len(images), figsize=((s * len(images)), s))
if (len(images) == 1):
axes = [axes]
for (img, ax) in zip(images, axes):
ax.imshow(img)
plt.show()
|
def parse_args():
parser = ArgumentParser(description='Compute feature vectors for the objects and backgrounds for the MSRA10K dataset')
parser.add_argument('-obj_path', '--obj_path', type=str, default='/home/bakrinski/datasets/MSRA10K/images/', help='OBJ_FOLDER_IMG input images path')
parser.add_argument('-obj_mask_path', '--obj_mask_path', type=str, default='/home/bakrinski/datasets/MSRA10K/masks/', help='OBJ_FOLDER_MASK input masks path')
parser.add_argument('-bg_path', '--bg_path', type=str, default='/home/dvruiz/PConv-Keras/output/', help='BG_FOLDER_IMG background images path')
parser.add_argument('-metric_knn', '--metric_knn', type=str, default='cosine', help='distance function used in the knn')
parser.add_argument('-nbins', '--nbins', type=int, default=64, help='number of bins for each histogram channel')
parser.add_argument('-size', '--size', type=int, default=10000, help='number of images in the dataset')
parser.add_argument('-k', '--k', type=int, default=10000, help='number of k NearestNeighbors')
return parser.parse_args()
|
def getHistograms(img):
imgHsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
(h, s, v) = cv2.split(imgHsv)
(histH, _) = np.histogram(h, bins=NBINS, density=True)
(histS, _) = np.histogram(s, bins=NBINS, density=True)
(histV, _) = np.histogram(v, bins=NBINS, density=True)
imgGray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
lbp = ft.local_binary_pattern(imgGray, 24, 3, 'uniform')
(histLBP, _) = np.histogram(lbp, bins=NBINS, density=True)
hist = np.concatenate((histH, histS, histV, histLBP))
return hist
|
def getHistogramsWithMask(img, mask):
imgHsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
(h, s, v) = cv2.split(imgHsv)
(histH, _) = np.histogram(h, bins=NBINS, density=True, weights=mask)
(histS, _) = np.histogram(s, bins=NBINS, density=True, weights=mask)
(histV, _) = np.histogram(v, bins=NBINS, density=True, weights=mask)
imgGray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
lbp = ft.local_binary_pattern(imgGray, 24, 3, 'uniform')
(histLBP, _) = np.histogram(lbp, bins=NBINS, density=True, weights=mask)
hist = np.concatenate((histH, histS, histV, histLBP))
return hist
|
def main():
existsDataSetFile = os.path.isfile('dataset.txt')
if (not existsDataSetFile):
with open('dataset.txt', 'w') as fd:
for i in range(0, 10000):
print(i, file=fd)
print('now obj')
existsObj = os.path.isfile('histogramsOBJ.npy')
if (not existsObj):
print('building histograms')
histogramsOBJ = np.empty((DATASET_SIZE, (NBINS * 4)), np.float32)
for i in range(0, DATASET_SIZE, 1):
imgName = 'MSRA10K_image_{:06d}.jpg'.format(i)
imFile = Image.open((OBJ_FOLDER_IMG + imgName))
img = np.array(imFile)
maskName = imgName.replace('.jpg', '.png')
maskName = maskName.replace('image', 'mask')
maskFile = Image.open((OBJ_FOLDER_MASK + maskName))
mask = (np.array(maskFile) / 255)
hist = getHistogramsWithMask(img, mask)
histogramsOBJ[i] = hist
imFile.close()
maskFile.close()
print('saving array histogramsOBJ.npy')
np.save('histogramsOBJ.npy', histogramsOBJ)
else:
print('loading array histogramsOBJ.npy')
histogramsOBJ = np.load('histogramsOBJ.npy')
print('now bg')
existsBg = os.path.isfile('histogramsBG.npy')
if (not existsBg):
print('building histograms')
histogramsBG = np.empty((DATASET_SIZE, (NBINS * 4)), np.float32)
for i in range(0, DATASET_SIZE):
bgName = 'MSRA10K_image_{:06d}.png'.format(i)
bgFile = Image.open((BG_FOLDER_IMG + bgName))
bg = np.array(bgFile)
hist = getHistograms(bg)
histogramsBG[i] = hist
bgFile.close()
print('saving array histogramsBG.npy')
np.save('histogramsBG.npy', histogramsBG)
else:
print('loading array histogramsBG.npy')
histogramsBG = np.load('histogramsBG.npy')
print('now knn')
nbrs = NearestNeighbors(n_neighbors=N_NEIGHBORS, metric=METRIC_KNN, algorithm='auto', n_jobs=(- 1)).fit(histogramsBG)
(distances, indices) = nbrs.kneighbors(histogramsOBJ)
HALF_N_NEIGHBORS = int(np.floor((N_NEIGHBORS / 2)))
with open((('distances_' + METRIC_KNN) + '.txt'), 'w') as fd:
with open((('indices_' + METRIC_KNN) + '.txt'), 'w') as fi:
for i in range(0, N_NEIGHBORS):
valuesDis = str(distances[i][HALF_N_NEIGHBORS])
valuesIndex = str(indices[i][HALF_N_NEIGHBORS])
print(valuesDis, file=fd)
print(valuesIndex, file=fi)
|
def bbox(img):
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
(rmin, rmax) = np.where(rows)[0][[0, (- 1)]]
(cmin, cmax) = np.where(cols)[0][[0, (- 1)]]
return (rmin, rmax, cmin, cmax)
|
def main():
DATASETS = ['Augmented MSRA10K Experiment VIII']
DATASETS_NAME = ['Augmented MSRA10K Experiment VIII']
j = 0
for dataset in DATASETS:
FOLDER_MASK = '/home/dvruiz/scriptPosProcessObjects/29_05_2019_FullMix/multipleBG/masks/'
fileList = os.listdir(FOLDER_MASK)
xs = np.empty(len(fileList), np.float32)
ys = np.empty(len(fileList), np.float32)
index = 0
for i in fileList:
maskName = i
maskFile = Image.open((FOLDER_MASK + maskName))
mask = np.array(maskFile)
shape = mask.shape
h = shape[0]
w = shape[1]
maskFile.close()
(ymin, ymax, xmin, xmax) = bbox(mask)
centerx = (((xmax - xmin) / 2) + xmin)
centery = (((ymax - ymin) / 2) + ymin)
newx = (centerx / w)
newy = (centery / h)
xs[index] = newx
ys[index] = newy
index += 1
plt.clf()
plt.title((DATASETS_NAME[j] + '\n Distribution of Bounding Boxes Center Coordinates'), fontsize='xx-large')
plt.xlabel('Normalized Position X', fontsize='xx-large')
plt.xlim(0, 1)
plt.xticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
plt.ylabel('Normalized Position Y', fontsize='xx-large')
plt.ylim(0, 1)
plt.yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
xy = np.vstack([xs, ys])
z = gaussian_kde(xy)(xy)
z = (z / 100)
plt.scatter(xs, ys, c=z, s=10, edgecolor='', vmin=0, vmax=0.5, cmap=plt.get_cmap('hot'))
cb = plt.colorbar()
cb.set_label('Sample Density', fontsize='xx-large')
plt.tight_layout()
plt.savefig(((savePath + dataset) + 'pos.png'))
plt.savefig(((savePath + dataset) + 'pos.pdf'))
plt.savefig(((savePath + dataset) + 'pos.svg'))
plt.savefig(((savePath + dataset) + 'pos.eps'))
j += 1
|
def autolabel(rects, counts):
for (ii, rect) in enumerate(rects):
height = rect.get_height()
plt.text((rect.get_x() + (rect.get_width() / 2.0)), (1.02 * height), f'{counts[ii]:.2f}', ha='center', va='bottom')
|
def bbox(img):
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
(rmin, rmax) = np.where(rows)[0][[0, (- 1)]]
(cmin, cmax) = np.where(cols)[0][[0, (- 1)]]
return (rmin, rmax, cmin, cmax)
|
def main():
n_bins = 10
DATASETS = ['Augmented MSRA10K Experiment VIII']
DATASETS_NAME = ['Augmented MSRA10K Experiment VIII']
j = 0
for dataset in DATASETS:
FOLDER_MASK = '/home/dvruiz/scriptPosProcessObjects/29_05_2019_FullMix/multipleBG/masks/'
fileList = os.listdir(FOLDER_MASK)
fileList = sorted(fileList)
ys = np.zeros(len(fileList), np.float32)
zs = np.zeros(len(fileList), np.float32)
index = 0
for i in fileList:
maskName = i
maskFile = Image.open((FOLDER_MASK + maskName))
mask = np.array(maskFile)
shape = mask.shape
h = shape[0]
w = shape[1]
maskFile.close()
(ymin, ymax, xmin, xmax) = bbox(mask)
propX = (xmax - xmin)
propY = (ymax - ymin)
areaOBJ = (propX * propY)
areaIMG = (h * w)
prop = (areaOBJ / areaIMG)
ys[index] = prop
index += 1
plt.clf()
plt.title((DATASETS_NAME[j] + '\n Distribution of Bounding Boxes Size'))
weights = (np.ones_like(ys) / float(len(ys)))
fig = plt.figure()
ax = fig.add_subplot(111)
array_bins = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
(counts, bins, patches) = ax.hist(ys, weights=weights, bins=array_bins, zorder=10, label='n-samples')
print('bins=', bins)
farray = np.zeros(10)
sarray = np.zeros(10)
elem = np.zeros(10)
inds = np.digitize(ys, bins[:(len(bins) - 1)])
for i in range(0, len(zs)):
farray[(inds[i] - 1)] += zs[i]
sarray[(inds[i] - 1)] += 1
elem[(inds[i] - 1)] += 1
for i in range(0, len(farray)):
if (elem[i] != 0):
farray[i] /= elem[i]
sarray[i] /= 10000
print('farray=', farray)
print('sarray=', sarray)
print('counts.shape=', counts.shape)
print('counts=', counts)
autolabel(patches, counts)
ax.set_title((DATASETS_NAME[j] + '\n Distribution of Bounding Boxes Size'), fontsize='xx-large')
ax.set_xlabel('Bounding Box Area Proportion', fontsize='xx-large')
ax.set_xlim(0, 1)
ax.set_xticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax.set_ylabel('Normalized Number of Samples', fontsize='xx-large')
ax.set_ylim(0, 1)
ax.set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax.grid()
plt.tight_layout()
plt.savefig(((savePath + dataset) + 'size.png'))
plt.savefig(((savePath + dataset) + 'size.svg'))
plt.savefig(((savePath + dataset) + 'size.pdf'))
plt.savefig(((savePath + dataset) + 'size.eps'))
j += 1
|
def train(args, data_info, show_loss, data_nums):
train_loader = data_info[0]
val_loader = data_info[1]
test_loader = data_info[2]
num_feature = data_info[3]
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = L0_SIGN(args, num_feature, device)
model = model.to(device)
optimizer = torch.optim.Adagrad(filter((lambda p: p.requires_grad), model.parameters()), args.lr, lr_decay=1e-05)
crit = torch.nn.MSELoss()
print([i.size() for i in filter((lambda p: p.requires_grad), model.parameters())])
print('start training...')
for step in range(args.n_epoch):
loss_all = 0
edge_all = 0
model.train()
for data in train_loader:
data = data.to(device)
(output, l0_penaty, l2_penaty, num_edges) = model(data)
label = data.y.view((- 1), 2)
label = label.to(device)
baseloss = crit(output, label)
l0_loss = (l0_penaty * args.l0_weight)
l2_loss = (l2_penaty * args.l2_weight)
loss = ((baseloss + l0_loss) + l2_loss)
loss_all += (data.num_graphs * loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
cur_loss = (loss_all / data_nums[0])
train_auc = 0
(val_auc, val_acc, _) = evaluate(model, val_loader, device)
(test_auc, test_acc, test_edges) = evaluate(model, test_loader, device)
print('Epoch: {:03d}, Loss: {:.4f}, Train Auc: {:.4f}, Val Auc: {:.4f}, Acc: {:.4f}, Test Auc: {:.4f}, Acc: {:.4f}, Train edges: {:07d}'.format(step, cur_loss, train_auc, val_auc, val_acc, test_auc, test_acc, test_edges))
|
def evaluate(model, loader, device):
model.eval()
predictions = []
labels = []
edges_all = 0
with torch.no_grad():
for data in loader:
data = data.to(device)
(pred, _, _, num_edges) = model(data)
pred = pred.detach().cpu().numpy()
edges_all += num_edges
label = data.y.view((- 1), 2).detach().cpu().numpy()
predictions.append(pred)
labels.append(label)
predictions = np.vstack(predictions)
labels = np.vstack(labels)
auc = roc_auc_score(labels, predictions)
acc = accuracy_score(np.argmax(labels, 1), np.argmax(predictions, 1))
return (auc, acc, edges_all)
|
class Dataset(InMemoryDataset):
def __init__(self, root, dataset, pred_edges=1, transform=None, pre_transform=None):
'\n if pred_edges=0, the dataset is used for SIGN/GNN only,\n we store the graph with edges in the .edge file\n '
self.path = root
self.dataset = dataset
self.pred_edges = pred_edges
super(Dataset, self).__init__(root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
self.statistical_info = torch.load(self.processed_paths[1])
self.node_num = self.statistical_info['node_num']
self.data_num = self.statistical_info['data_num']
@property
def raw_file_names(self):
return ['{}{}/{}.data'.format(self.path, self.dataset, self.dataset), '{}{}/{}.edge'.format(self.path, self.dataset, self.dataset)]
@property
def processed_file_names(self):
if (not self.pred_edges):
return ['{}_edge/{}.dataset'.format(self.dataset, self.dataset), '{}_edge/{}.info'.format(self.dataset, self.dataset)]
else:
return ['{}/{}.dataset'.format(self.dataset, self.dataset), '{}/{}.info'.format(self.dataset, self.dataset)]
def download(self):
pass
def read_data(self):
node_list = []
label = []
max_node_index = 0
data_num = 0
with open(self.datafile, 'r') as f:
for line in f:
data_num += 1
data = line.split()
label.append(float(data[0]))
int_list = [int(data[i]) for i in range(len(data))[1:]]
node_list.append(int_list)
if (max_node_index < max(int_list)):
max_node_index = max(int_list)
if (not self.pred_edges):
edge_list = [[[], []] for _ in range(data_num)]
sr_list = []
with open(self.edgefile, 'r') as f:
for line in f:
edge_info = line.split()
node_index = int(edge_info[0])
edge_list[node_index][0].append(int(edge_info[1]))
edge_list[node_index][1].append(int(edge_info[2]))
else:
edge_list = []
sr_list = []
for nodes in node_list:
(edge_l, sr_l) = self.construct_full_edge_list(nodes)
edge_list.append(edge_l)
sr_list.append(sr_l)
label = self.construct_one_hot_label(label)
return (node_list, edge_list, label, sr_list, (max_node_index + 1), data_num)
def construct_full_edge_list(self, nodes):
num_node = len(nodes)
edge_list = [[], []]
sender_receiver_list = []
for i in range(num_node):
for j in range(num_node)[i:]:
edge_list[0].append(i)
edge_list[1].append(j)
sender_receiver_list.append([nodes[i], nodes[j]])
return (edge_list, sender_receiver_list)
def construct_one_hot_label(self, label):
'Convert an iterable of indices to one-hot encoded labels.'
nb_classes = (int(max(label)) + 1)
targets = np.array(label, dtype=np.int32).reshape((- 1))
return np.eye(nb_classes)[targets]
def process(self):
(self.datafile, self.edgefile) = self.raw_file_names
(self.node, edge, label, self.sr_list, node_num, data_num) = self.read_data()
data_list = []
sr_data = []
for i in range(len(self.node)):
node_features = torch.LongTensor(self.node[i]).unsqueeze(1)
x = node_features
edge_index = torch.LongTensor(edge[i])
y = torch.FloatTensor(label[i])
if self.pred_edges:
sr = torch.LongTensor(self.sr_list[i])
else:
sr = []
data = Data(x=x, edge_index=edge_index, edge_attr=sr, y=y)
data_list.append(data)
(data, slices) = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
statistical_info = {'data_num': data_num, 'node_num': node_num}
torch.save(statistical_info, self.processed_paths[1])
def node_M(self):
return self.node_num
def data_N(self):
return self.data_num
"\n def len(self):\n return len(self.node)\n def get(self, idx):\n ###\n data = torch.load(osp.join(self.processed_dir, 'data_{}.pt'.format(idx)))\n return data\n "
|
def create_twomoon_dataset(n, p):
(relevant, y) = make_moons(n_samples=n, shuffle=True, noise=0.1, random_state=None)
print(y.shape)
noise_vector = norm.rvs(loc=0, scale=1, size=[n, (p - 2)])
data = np.concatenate([relevant, noise_vector], axis=1)
print(data.shape)
return (data, y)
|
def create_sin_dataset(n, p):
'This dataset was added to provide an example of L1 norm reg failure for presentation.\n '
assert (p == 2)
x1 = np.random.uniform((- math.pi), math.pi, n).reshape(n, 1)
x2 = np.random.uniform((- math.pi), math.pi, n).reshape(n, 1)
y = np.sin(x1)
data = np.concatenate([x1, x2], axis=1)
print('data.shape: {}'.format(data.shape))
return (data, y)
|
def state_dict(model, include=None, exclude=None, cpu=True):
if isinstance(model, nn.DataParallel):
model = model.module
state_dict = model.state_dict()
matcher = IENameMatcher(include, exclude)
with matcher:
state_dict = {k: v for (k, v) in state_dict.items() if matcher.match(k)}
stat = matcher.get_last_stat()
if (len(stat[1]) > 0):
logger.critical('Weights {}: {}.'.format(stat[0], ', '.join(sorted(list(stat[1])))))
if cpu:
state_dict = as_cpu(state_dict)
return state_dict
|
def load_state_dict(model, state_dict, include=None, exclude=None):
if isinstance(model, nn.DataParallel):
model = model.module
matcher = IENameMatcher(include, exclude)
with matcher:
state_dict = {k: v for (k, v) in state_dict.items() if matcher.match(k)}
stat = matcher.get_last_stat()
if (len(stat[1]) > 0):
logger.critical('Weights {}: {}.'.format(stat[0], ', '.join(sorted(list(stat[1])))))
for (k, v) in state_dict.items():
if isinstance(v, np.ndarray):
state_dict[k] = torch.from_numpy(v)
error_msg = []
own_state = model.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
error_msg.append('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
missing = (set(own_state.keys()) - set(state_dict.keys()))
if (len(missing) > 0):
error_msg.append('Missing keys in state_dict: "{}".'.format(missing))
unexpected = (set(state_dict.keys()) - set(own_state.keys()))
if (len(unexpected) > 0):
error_msg.append('Unexpected key "{}" in state_dict.'.format(unexpected))
if len(error_msg):
raise KeyError('\n'.join(error_msg))
|
def load_weights(model, filename, include=None, exclude=None, return_raw=True):
if osp.isfile(filename):
try:
raw = weights = torch.load(filename)
if (('model' in weights) and ('optimizer' in weights)):
weights = weights['model']
try:
load_state_dict(model, weights, include=include, exclude=exclude)
except KeyError as e:
logger.warning(('Unexpected or missing weights found:\n' + e.args[0]))
logger.critical('Weights loaded: {}.'.format(filename))
if return_raw:
return raw
return True
except Exception:
logger.exception('Error occurred when load weights {}.'.format(filename))
else:
logger.warning('No weights file found at specified position: {}.'.format(filename))
return None
|
class FeatureSelector(nn.Module):
def __init__(self, input_dim, sigma, device):
super(FeatureSelector, self).__init__()
self.mu = torch.nn.Parameter((0.01 * torch.randn(input_dim)), requires_grad=True)
self.noise = torch.randn(self.mu.size())
self.sigma = sigma
self.device = device
def forward(self, prev_x):
z = (self.mu + ((self.sigma * self.noise.normal_()) * self.training))
stochastic_gate = self.hard_sigmoid(z)
new_x = (prev_x * stochastic_gate)
return new_x
def hard_sigmoid(self, x):
return torch.clamp((x + 0.5), 0.0, 1.0)
def regularizer(self, x):
' Gaussian CDF. '
return (0.5 * (1 + torch.erf((x / math.sqrt(2)))))
def _apply(self, fn):
super(FeatureSelector, self)._apply(fn)
self.noise = fn(self.noise)
return self
|
class GatingLayer(nn.Module):
'To implement L1-based gating layer (so that we can compare L1 with L0(STG) in a fair way)\n '
def __init__(self, input_dim, device):
super(GatingLayer, self).__init__()
self.mu = torch.nn.Parameter((0.01 * torch.randn(input_dim)), requires_grad=True)
self.device = device
def forward(self, prev_x):
new_x = (prev_x * self.mu)
return new_x
def regularizer(self, x):
' Gaussian CDF. '
return torch.sum(torch.abs(x))
|
class LinearLayer(nn.Sequential):
def __init__(self, in_features, out_features, batch_norm=None, dropout=None, bias=None, activation=None):
if (bias is None):
bias = (batch_norm is None)
modules = [nn.Linear(in_features, out_features, bias=bias)]
if ((batch_norm is not None) and (batch_norm is not False)):
modules.append(get_batcnnorm(batch_norm, out_features, 1))
if ((dropout is not None) and (dropout is not False)):
modules.append(get_dropout(dropout, 1))
if ((activation is not None) and (activation is not False)):
modules.append(get_activation(activation))
super().__init__(*modules)
def reset_parameters(self):
for module in self.modules():
if isinstance(module, nn.Linear):
module.reset_parameters()
|
class MLPLayer(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dims, batch_norm=None, dropout=None, activation='relu', flatten=True):
super().__init__()
if (hidden_dims is None):
hidden_dims = []
elif (type(hidden_dims) is int):
hidden_dims = [hidden_dims]
dims = [input_dim]
dims.extend(hidden_dims)
dims.append(output_dim)
modules = []
nr_hiddens = len(hidden_dims)
for i in range(nr_hiddens):
layer = LinearLayer(dims[i], dims[(i + 1)], batch_norm=batch_norm, dropout=dropout, activation=activation)
modules.append(layer)
layer = nn.Linear(dims[(- 2)], dims[(- 1)], bias=True)
modules.append(layer)
self.mlp = nn.Sequential(*modules)
self.flatten = flatten
def reset_parameters(self):
for module in self.modules():
if isinstance(module, nn.Linear):
module.reset_parameters()
def forward(self, input):
if self.flatten:
input = input.view(input.size(0), (- 1))
return self.mlp(input)
|
def PartialLogLikelihood(logits, fail_indicator, ties):
"\n fail_indicator: 1 if the sample fails, 0 if the sample is censored.\n logits: raw output from model \n ties: 'noties' or 'efron' or 'breslow'\n "
logL = 0
cumsum_y_pred = torch.cumsum(logits, 0)
hazard_ratio = torch.exp(logits)
cumsum_hazard_ratio = torch.cumsum(hazard_ratio, 0)
if (ties == 'noties'):
log_risk = torch.log(cumsum_hazard_ratio)
likelihood = (logits - log_risk)
uncensored_likelihood = (likelihood * fail_indicator)
logL = (- torch.sum(uncensored_likelihood))
else:
raise NotImplementedError()
observations = torch.sum(fail_indicator, 0)
return ((1.0 * logL) / observations)
|
def calc_concordance_index(logits, fail_indicator, fail_time):
"\n Compute the concordance-index value.\n Parameters:\n label_true: dict, like {'e': event, 't': time}, Observation and Time in survival analyze.\n y_pred: np.array, predictive proportional risk of network.\n Returns:\n concordance index.\n "
hr_pred = (- logits)
ci = concordance_index(fail_time, hr_pred, fail_indicator)
return ci
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.