repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
CoTr
|
CoTr-main/nnUNet/nnunet/utilities/recursive_rename_taskXX_to_taskXXX.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
import os
def recursive_rename(folder):
s = subdirs(folder, join=False)
for ss in s:
if ss.startswith("Task") and ss.find("_") == 6:
task_id = int(ss[4:6])
name = ss[7:]
os.rename(join(folder, ss), join(folder, "Task%03.0d_" % task_id + name))
s = subdirs(folder, join=True)
for ss in s:
recursive_rename(ss)
if __name__ == "__main__":
recursive_rename("/media/fabian/Results/nnUNet")
recursive_rename("/media/fabian/nnunet")
recursive_rename("/media/fabian/My Book/MedicalDecathlon")
recursive_rename("/home/fabian/drives/datasets/nnUNet_raw")
recursive_rename("/home/fabian/drives/datasets/nnUNet_preprocessed")
recursive_rename("/home/fabian/drives/datasets/nnUNet_testSets")
recursive_rename("/home/fabian/drives/datasets/results/nnUNet")
recursive_rename("/home/fabian/drives/e230-dgx2-1-data_fabian/Decathlon_raw")
recursive_rename("/home/fabian/drives/e230-dgx2-1-data_fabian/nnUNet_preprocessed")
| 1,770
| 41.166667
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/run/default_configuration.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnunet
from nnunet.paths import network_training_output_dir, preprocessing_output_dir, default_plans_identifier
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.experiment_planning.summarize_plans import summarize_plans
from nnunet.training.model_restore import recursive_find_python_class
def get_configuration_from_output_folder(folder):
# split off network_training_output_dir
folder = folder[len(network_training_output_dir):]
if folder.startswith("/"):
folder = folder[1:]
configuration, task, trainer_and_plans_identifier = folder.split("/")
trainer, plans_identifier = trainer_and_plans_identifier.split("__")
return configuration, task, trainer, plans_identifier
def get_default_configuration(network, task, network_trainer, plans_identifier=default_plans_identifier,
search_in=(nnunet.__path__[0], "training", "network_training"),
base_module='nnunet.training.network_training'):
assert network in ['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'], \
"network can only be one of the following: \'3d\', \'3d_lowres\', \'3d_fullres\', \'3d_cascade_fullres\'"
dataset_directory = join(preprocessing_output_dir, task)
if network == '2d':
plans_file = join(preprocessing_output_dir, task, plans_identifier + "_plans_2D.pkl")
else:
plans_file = join(preprocessing_output_dir, task, plans_identifier + "_plans_3D.pkl")
plans = load_pickle(plans_file)
possible_stages = list(plans['plans_per_stage'].keys())
if (network == '3d_cascade_fullres' or network == "3d_lowres") and len(possible_stages) == 1:
raise RuntimeError("3d_lowres/3d_cascade_fullres only applies if there is more than one stage. This task does "
"not require the cascade. Run 3d_fullres instead")
if network == '2d' or network == "3d_lowres":
stage = 0
else:
stage = possible_stages[-1]
trainer_class = recursive_find_python_class([join(*search_in)], network_trainer,
current_module=base_module)
output_folder_name = join(network_training_output_dir, network, task, network_trainer + "__" + plans_identifier)
print("###############################################")
print("I am running the following nnUNet: %s" % network)
print("My trainer class is: ", trainer_class)
print("For that I will be using the following configuration:")
summarize_plans(plans_file)
print("I am using stage %d from these plans" % stage)
if (network == '2d' or len(possible_stages) > 1) and not network == '3d_lowres':
batch_dice = True
print("I am using batch dice + CE loss")
else:
batch_dice = False
print("I am using sample dice + CE loss")
print("\nI am using data from this folder: ", join(dataset_directory, plans['data_identifier']))
print("###############################################")
return plans_file, output_folder_name, dataset_directory, batch_dice, stage, trainer_class
| 3,818
| 46.148148
| 119
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/run/run_training_DDP.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.run.default_configuration import get_default_configuration
from nnunet.paths import default_plans_identifier
from nnunet.training.cascade_stuff.predict_next_stage import predict_next_stage
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerCascadeFullRes import nnUNetTrainerCascadeFullRes
from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument("network")
parser.add_argument("network_trainer")
parser.add_argument("task", help="can be task name or task id")
parser.add_argument("fold", help='0, 1, ..., 5 or \'all\'')
parser.add_argument("-val", "--validation_only", help="use this if you want to only run the validation",
action="store_true")
parser.add_argument("-c", "--continue_training", help="use this if you want to continue a training",
action="store_true")
parser.add_argument("-p", help="plans identifier. Only change this if you created a custom experiment planner",
default=default_plans_identifier, required=False)
parser.add_argument("--use_compressed_data", default=False, action="store_true",
help="If you set use_compressed_data, the training cases will not be decompressed. Reading compressed data "
"is much more CPU and RAM intensive and should only be used if you know what you are "
"doing", required=False)
parser.add_argument("--deterministic",
help="Makes training deterministic, but reduces training speed substantially. I (Fabian) think "
"this is not necessary. Deterministic training will make you overfit to some random seed. "
"Don't use that.",
required=False, default=False, action="store_true")
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--fp32", required=False, default=False, action="store_true",
help="disable mixed precision training and run old school fp32")
parser.add_argument("--dbs", required=False, default=False, action="store_true", help="distribute batch size. If "
"True then whatever "
"batch_size is in plans will "
"be distributed over DDP "
"models, if False then each "
"model will have batch_size "
"for a total of "
"GPUs*batch_size")
parser.add_argument("--npz", required=False, default=False, action="store_true", help="if set then nnUNet will "
"export npz files of "
"predicted segmentations "
"in the vlaidation as well. "
"This is needed to run the "
"ensembling step so unless "
"you are developing nnUNet "
"you should enable this")
parser.add_argument("--valbest", required=False, default=False, action="store_true", help="")
parser.add_argument("--find_lr", required=False, default=False, action="store_true", help="")
parser.add_argument("--val_folder", required=False, default="validation_raw",
help="name of the validation folder. No need to use this for most people")
parser.add_argument("--disable_saving", required=False, action='store_true',
help="If set nnU-Net will not save any parameter files. Useful for development when you are "
"only interested in the results and want to save some disk space")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations. Testing purpose only. Hands off")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z if z is resampled separately. Testing purpose only. "
# "Hands off")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False. Testing purpose only. Hands off")
args = parser.parse_args()
task = args.task
fold = args.fold
network = args.network
network_trainer = args.network_trainer
validation_only = args.validation_only
plans_identifier = args.p
use_compressed_data = args.use_compressed_data
decompress_data = not use_compressed_data
deterministic = args.deterministic
valbest = args.valbest
find_lr = args.find_lr
val_folder = args.val_folder
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
fp32 = args.fp32
if not task.startswith("Task"):
task_id = int(task)
task = convert_id_to_task_name(task_id)
if fold == 'all':
pass
else:
fold = int(fold)
#
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
plans_file, output_folder_name, dataset_directory, batch_dice, stage, \
trainer_class = get_default_configuration(network, task, network_trainer, plans_identifier)
if trainer_class is None:
raise RuntimeError("Could not find trainer class in meddec.model_training")
if network == "3d_cascade_fullres":
assert issubclass(trainer_class, (nnUNetTrainerCascadeFullRes, nnUNetTrainerV2CascadeFullRes)), \
"If running 3d_cascade_fullres then your " \
"trainer class must be derived from " \
"nnUNetTrainerCascadeFullRes"
else:
assert issubclass(trainer_class,
nnUNetTrainer), "network_trainer was found but is not derived from nnUNetTrainer"
trainer = trainer_class(plans_file, fold, local_rank=args.local_rank, output_folder=output_folder_name,
dataset_directory=dataset_directory, batch_dice=batch_dice, stage=stage,
unpack_data=decompress_data, deterministic=deterministic, fp16=not fp32,
distribute_batch_size=args.dbs)
if args.disable_saving:
trainer.save_latest_only = False # if false it will not store/overwrite _latest but separate files each
trainer.save_intermediate_checkpoints = False # whether or not to save checkpoint_latest
trainer.save_best_checkpoint = False # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA
trainer.save_final_checkpoint = False # whether or not to save the final checkpoint
trainer.initialize(not validation_only)
if find_lr:
trainer.find_lr()
else:
if not validation_only:
if args.continue_training:
trainer.load_latest_checkpoint()
trainer.run_training()
else:
if valbest:
trainer.load_best_checkpoint(train=False)
else:
trainer.load_latest_checkpoint(train=False)
trainer.network.eval()
# predict validation
trainer.validate(save_softmax=args.npz, validation_folder_name=val_folder)
if network == '3d_lowres':
print("predicting segmentations for the next stage of the cascade")
predict_next_stage(trainer, join(dataset_directory, trainer.plans['data_identifier'] + "_stage%d" % 1))
if __name__ == "__main__":
main()
| 9,856
| 55.976879
| 135
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/run/run_training.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.run.default_configuration import get_default_configuration
from nnunet.paths import default_plans_identifier
from nnunet.training.cascade_stuff.predict_next_stage import predict_next_stage
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerCascadeFullRes import nnUNetTrainerCascadeFullRes
from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument("network")
parser.add_argument("network_trainer")
parser.add_argument("task", help="can be task name or task id")
parser.add_argument("fold", help='0, 1, ..., 5 or \'all\'')
parser.add_argument("-val", "--validation_only", help="use this if you want to only run the validation",
action="store_true")
parser.add_argument("-c", "--continue_training", help="use this if you want to continue a training",
action="store_true")
parser.add_argument("-p", help="plans identifier. Only change this if you created a custom experiment planner",
default=default_plans_identifier, required=False)
parser.add_argument("--use_compressed_data", default=False, action="store_true",
help="If you set use_compressed_data, the training cases will not be decompressed. Reading compressed data "
"is much more CPU and RAM intensive and should only be used if you know what you are "
"doing", required=False)
parser.add_argument("--deterministic",
help="Makes training deterministic, but reduces training speed substantially. I (Fabian) think "
"this is not necessary. Deterministic training will make you overfit to some random seed. "
"Don't use that.",
required=False, default=False, action="store_true")
parser.add_argument("--npz", required=False, default=False, action="store_true", help="if set then nnUNet will "
"export npz files of "
"predicted segmentations "
"in the validation as well. "
"This is needed to run the "
"ensembling step so unless "
"you are developing nnUNet "
"you should enable this")
parser.add_argument("--find_lr", required=False, default=False, action="store_true",
help="not used here, just for fun")
parser.add_argument("--valbest", required=False, default=False, action="store_true",
help="hands off. This is not intended to be used")
parser.add_argument("--fp32", required=False, default=False, action="store_true",
help="disable mixed precision training and run old school fp32")
parser.add_argument("--val_folder", required=False, default="validation_raw",
help="name of the validation folder. No need to use this for most people")
parser.add_argument("--disable_saving", required=False, action='store_true',
help="If set nnU-Net will not save any parameter files. Useful for development when you are "
"only interested in the results and want to save some disk space")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations. Testing purpose only. Hands off")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z if z is resampled separately. Testing purpose only. "
# "Hands off")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False. Testing purpose only. Hands off")
args = parser.parse_args()
task = args.task
fold = args.fold
network = args.network
network_trainer = args.network_trainer
validation_only = args.validation_only
plans_identifier = args.p
find_lr = args.find_lr
use_compressed_data = args.use_compressed_data
decompress_data = not use_compressed_data
deterministic = args.deterministic
valbest = args.valbest
fp32 = args.fp32
run_mixed_precision = not fp32
val_folder = args.val_folder
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
if not task.startswith("Task"):
task_id = int(task)
task = convert_id_to_task_name(task_id)
if fold == 'all':
pass
else:
fold = int(fold)
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
plans_file, output_folder_name, dataset_directory, batch_dice, stage, \
trainer_class = get_default_configuration(network, task, network_trainer, plans_identifier)
if trainer_class is None:
raise RuntimeError("Could not find trainer class in nnunet.training.network_training")
if network == "3d_cascade_fullres":
assert issubclass(trainer_class, (nnUNetTrainerCascadeFullRes, nnUNetTrainerV2CascadeFullRes)), \
"If running 3d_cascade_fullres then your " \
"trainer class must be derived from " \
"nnUNetTrainerCascadeFullRes"
else:
assert issubclass(trainer_class,
nnUNetTrainer), "network_trainer was found but is not derived from nnUNetTrainer"
trainer = trainer_class(plans_file, fold, output_folder=output_folder_name, dataset_directory=dataset_directory,
batch_dice=batch_dice, stage=stage, unpack_data=decompress_data,
deterministic=deterministic,
fp16=run_mixed_precision)
if args.disable_saving:
trainer.save_latest_only = False # if false it will not store/overwrite _latest but separate files each
trainer.save_intermediate_checkpoints = False # whether or not to save checkpoint_latest
trainer.save_best_checkpoint = False # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA
trainer.save_final_checkpoint = False # whether or not to save the final checkpoint
trainer.initialize(not validation_only)
if find_lr:
trainer.find_lr()
else:
if not validation_only:
if args.continue_training:
trainer.load_latest_checkpoint()
trainer.run_training()
else:
if valbest:
trainer.load_best_checkpoint(train=False)
else:
trainer.load_latest_checkpoint(train=False)
trainer.network.eval()
# predict validation
trainer.validate(save_softmax=args.npz, validation_folder_name=val_folder)
if network == '3d_lowres':
print("predicting segmentations for the next stage of the cascade")
predict_next_stage(trainer, join(dataset_directory, trainer.plans['data_identifier'] + "_stage%d" % 1))
if __name__ == "__main__":
main()
| 8,977
| 51.811765
| 135
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/run/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/run/run_training_DP.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.run.default_configuration import get_default_configuration
from nnunet.paths import default_plans_identifier
from nnunet.training.cascade_stuff.predict_next_stage import predict_next_stage
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerCascadeFullRes import nnUNetTrainerCascadeFullRes
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument("network")
parser.add_argument("network_trainer")
parser.add_argument("task", help="can be task name or task id")
parser.add_argument("fold", help='0, 1, ..., 5 or \'all\'')
parser.add_argument("-val", "--validation_only", help="use this if you want to only run the validation",
action="store_true")
parser.add_argument("-c", "--continue_training", help="use this if you want to continue a training",
action="store_true")
parser.add_argument("-p", help="plans identifier. Only change this if you created a custom experiment planner",
default=default_plans_identifier, required=False)
parser.add_argument("--use_compressed_data", default=False, action="store_true",
help="If you set use_compressed_data, the training cases will not be decompressed. Reading compressed data "
"is much more CPU and RAM intensive and should only be used if you know what you are "
"doing", required=False)
parser.add_argument("--deterministic",
help="Makes training deterministic, but reduces training speed substantially. I (Fabian) think "
"this is not necessary. Deterministic training will make you overfit to some random seed. "
"Don't use that.",
required=False, default=False, action="store_true")
parser.add_argument("-gpus", help="number of gpus", required=True,type=int)
parser.add_argument("--dbs", required=False, default=False, action="store_true", help="distribute batch size. If "
"True then whatever "
"batch_size is in plans will "
"be distributed over DDP "
"models, if False then each "
"model will have batch_size "
"for a total of "
"GPUs*batch_size")
parser.add_argument("--npz", required=False, default=False, action="store_true", help="if set then nnUNet will "
"export npz files of "
"predicted segmentations "
"in the vlaidation as well. "
"This is needed to run the "
"ensembling step so unless "
"you are developing nnUNet "
"you should enable this")
parser.add_argument("--valbest", required=False, default=False, action="store_true", help="")
parser.add_argument("--find_lr", required=False, default=False, action="store_true", help="")
parser.add_argument("--fp32", required=False, default=False, action="store_true",
help="disable mixed precision training and run old school fp32")
parser.add_argument("--val_folder", required=False, default="validation_raw",
help="name of the validation folder. No need to use this for most people")
parser.add_argument("--disable_saving", required=False, action='store_true',
help="If set nnU-Net will not save any parameter files. Useful for development when you are "
"only interested in the results and want to save some disk space")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations. Testing purpose only. Hands off")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z if z is resampled separately. Testing purpose only. "
# "Hands off")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False. Testing purpose only. Hands off")
args = parser.parse_args()
task = args.task
fold = args.fold
network = args.network
network_trainer = args.network_trainer
validation_only = args.validation_only
plans_identifier = args.p
use_compressed_data = args.use_compressed_data
decompress_data = not use_compressed_data
deterministic = args.deterministic
valbest = args.valbest
find_lr = args.find_lr
num_gpus = args.gpus
fp32 = args.fp32
val_folder = args.val_folder
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
if not task.startswith("Task"):
task_id = int(task)
task = convert_id_to_task_name(task_id)
if fold == 'all':
pass
else:
fold = int(fold)
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
plans_file, output_folder_name, dataset_directory, batch_dice, stage, \
trainer_class = get_default_configuration(network, task, network_trainer, plans_identifier)
if trainer_class is None:
raise RuntimeError("Could not find trainer class")
if network == "3d_cascade_fullres":
assert issubclass(trainer_class, nnUNetTrainerCascadeFullRes), "If running 3d_cascade_fullres then your " \
"trainer class must be derived from " \
"nnUNetTrainerCascadeFullRes"
else:
assert issubclass(trainer_class, nnUNetTrainer), "network_trainer was found but is not derived from " \
"nnUNetTrainer"
trainer = trainer_class(plans_file, fold, output_folder=output_folder_name,
dataset_directory=dataset_directory, batch_dice=batch_dice, stage=stage,
unpack_data=decompress_data, deterministic=deterministic,
distribute_batch_size=args.dbs, num_gpus=num_gpus, fp16=not fp32)
if args.disable_saving:
trainer.save_latest_only = False # if false it will not store/overwrite _latest but separate files each
trainer.save_intermediate_checkpoints = False # whether or not to save checkpoint_latest
trainer.save_best_checkpoint = False # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA
trainer.save_final_checkpoint = False # whether or not to save the final checkpoint
trainer.initialize(not validation_only)
if find_lr:
trainer.find_lr()
else:
if not validation_only:
if args.continue_training:
trainer.load_latest_checkpoint()
trainer.run_training()
else:
if valbest:
trainer.load_best_checkpoint(train=False)
else:
trainer.load_latest_checkpoint(train=False)
trainer.network.eval()
# predict validation
trainer.validate(save_softmax=args.npz, validation_folder_name=val_folder)
if network == '3d_lowres':
print("predicting segmentations for the next stage of the cascade")
predict_next_stage(trainer, join(dataset_directory, trainer.plans['data_identifier'] + "_stage%d" % 1))
if __name__ == "__main__":
main()
| 9,863
| 56.017341
| 135
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/nnUNet_convert_decathlon_task.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
from nnunet.experiment_planning.utils import split_4d
from nnunet.utilities.file_endings import remove_trailing_slash
def crawl_and_remove_hidden_from_decathlon(folder):
folder = remove_trailing_slash(folder)
assert folder.split('/')[-1].startswith("Task"), "This does not seem to be a decathlon folder. Please give me a " \
"folder that starts with TaskXX and has the subfolders imagesTr, " \
"labelsTr and imagesTs"
subf = subfolders(folder, join=False)
assert 'imagesTr' in subf, "This does not seem to be a decathlon folder. Please give me a " \
"folder that starts with TaskXX and has the subfolders imagesTr, " \
"labelsTr and imagesTs"
assert 'imagesTs' in subf, "This does not seem to be a decathlon folder. Please give me a " \
"folder that starts with TaskXX and has the subfolders imagesTr, " \
"labelsTr and imagesTs"
assert 'labelsTr' in subf, "This does not seem to be a decathlon folder. Please give me a " \
"folder that starts with TaskXX and has the subfolders imagesTr, " \
"labelsTr and imagesTs"
_ = [os.remove(i) for i in subfiles(folder, prefix=".")]
_ = [os.remove(i) for i in subfiles(join(folder, 'imagesTr'), prefix=".")]
_ = [os.remove(i) for i in subfiles(join(folder, 'labelsTr'), prefix=".")]
_ = [os.remove(i) for i in subfiles(join(folder, 'imagesTs'), prefix=".")]
def main():
import argparse
parser = argparse.ArgumentParser(description="The MSD provides data as 4D Niftis with the modality being the first"
" dimension. We think this may be cumbersome for some users and "
"therefore expect 3D niftixs instead, with one file per modality. "
"This utility will convert 4D MSD data into the format nnU-Net "
"expects")
parser.add_argument("-i", help="Input folder. Must point to a TaskXX_TASKNAME folder as downloaded from the MSD "
"website", required=True)
parser.add_argument("-p", required=False, default=default_num_threads, type=int,
help="Use this to specify how many processes are used to run the script. "
"Default is %d" % default_num_threads)
parser.add_argument("-output_task_id", required=False, default=None, type=int,
help="If specified, this will overwrite the task id in the output folder. If unspecified, the "
"task id of the input folder will be used.")
args = parser.parse_args()
crawl_and_remove_hidden_from_decathlon(args.i)
split_4d(args.i, args.p, args.output_task_id)
if __name__ == "__main__":
main()
| 4,015
| 60.784615
| 121
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/DatasetAnalyzer.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Pool
from nnunet.configuration import default_num_threads
from nnunet.paths import nnUNet_raw_data, nnUNet_cropped_data
import numpy as np
import pickle
from nnunet.preprocessing.cropping import get_patient_identifiers_from_cropped_files
from skimage.morphology import label
from collections import OrderedDict
class DatasetAnalyzer(object):
def __init__(self, folder_with_cropped_data, overwrite=True, num_processes=default_num_threads):
"""
:param folder_with_cropped_data:
:param overwrite: If True then precomputed values will not be used and instead recomputed from the data.
False will allow loading of precomputed values. This may be dangerous though if some of the code of this class
was changed, therefore the default is True.
"""
self.num_processes = num_processes
self.overwrite = overwrite
self.folder_with_cropped_data = folder_with_cropped_data
self.sizes = self.spacings = None
self.patient_identifiers = get_patient_identifiers_from_cropped_files(self.folder_with_cropped_data)
assert isfile(join(self.folder_with_cropped_data, "dataset.json")), \
"dataset.json needs to be in folder_with_cropped_data"
self.props_per_case_file = join(self.folder_with_cropped_data, "props_per_case.pkl")
self.intensityproperties_file = join(self.folder_with_cropped_data, "intensityproperties.pkl")
def load_properties_of_cropped(self, case_identifier):
with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
@staticmethod
def _check_if_all_in_one_region(seg, regions):
res = OrderedDict()
for r in regions:
new_seg = np.zeros(seg.shape)
for c in r:
new_seg[seg == c] = 1
labelmap, numlabels = label(new_seg, return_num=True)
if numlabels != 1:
res[tuple(r)] = False
else:
res[tuple(r)] = True
return res
@staticmethod
def _collect_class_and_region_sizes(seg, all_classes, vol_per_voxel):
volume_per_class = OrderedDict()
region_volume_per_class = OrderedDict()
for c in all_classes:
region_volume_per_class[c] = []
volume_per_class[c] = np.sum(seg == c) * vol_per_voxel
labelmap, numregions = label(seg == c, return_num=True)
for l in range(1, numregions + 1):
region_volume_per_class[c].append(np.sum(labelmap == l) * vol_per_voxel)
return volume_per_class, region_volume_per_class
def _get_unique_labels(self, patient_identifier):
seg = np.load(join(self.folder_with_cropped_data, patient_identifier) + ".npz")['data'][-1]
unique_classes = np.unique(seg)
return unique_classes
def _load_seg_analyze_classes(self, patient_identifier, all_classes):
"""
1) what class is in this training case?
2) what is the size distribution for each class?
3) what is the region size of each class?
4) check if all in one region
:return:
"""
seg = np.load(join(self.folder_with_cropped_data, patient_identifier) + ".npz")['data'][-1]
pkl = load_pickle(join(self.folder_with_cropped_data, patient_identifier) + ".pkl")
vol_per_voxel = np.prod(pkl['itk_spacing'])
# ad 1)
unique_classes = np.unique(seg)
# 4) check if all in one region
regions = list()
regions.append(list(all_classes))
for c in all_classes:
regions.append((c, ))
all_in_one_region = self._check_if_all_in_one_region(seg, regions)
# 2 & 3) region sizes
volume_per_class, region_sizes = self._collect_class_and_region_sizes(seg, all_classes, vol_per_voxel)
return unique_classes, all_in_one_region, volume_per_class, region_sizes
def get_classes(self):
datasetjson = load_json(join(self.folder_with_cropped_data, "dataset.json"))
return datasetjson['labels']
def analyse_segmentations(self):
class_dct = self.get_classes()
if self.overwrite or not isfile(self.props_per_case_file):
p = Pool(self.num_processes)
res = p.map(self._get_unique_labels, self.patient_identifiers)
p.close()
p.join()
props_per_patient = OrderedDict()
for p, unique_classes in \
zip(self.patient_identifiers, res):
props = dict()
props['has_classes'] = unique_classes
props_per_patient[p] = props
save_pickle(props_per_patient, self.props_per_case_file)
else:
props_per_patient = load_pickle(self.props_per_case_file)
return class_dct, props_per_patient
def get_sizes_and_spacings_after_cropping(self):
sizes = []
spacings = []
# for c in case_identifiers:
for c in self.patient_identifiers:
properties = self.load_properties_of_cropped(c)
sizes.append(properties["size_after_cropping"])
spacings.append(properties["original_spacing"])
return sizes, spacings
def get_modalities(self):
datasetjson = load_json(join(self.folder_with_cropped_data, "dataset.json"))
modalities = datasetjson["modality"]
modalities = {int(k): modalities[k] for k in modalities.keys()}
return modalities
def get_size_reduction_by_cropping(self):
size_reduction = OrderedDict()
for p in self.patient_identifiers:
props = self.load_properties_of_cropped(p)
shape_before_crop = props["original_size_of_raw_data"]
shape_after_crop = props['size_after_cropping']
size_red = np.prod(shape_after_crop) / np.prod(shape_before_crop)
size_reduction[p] = size_red
return size_reduction
def _get_voxels_in_foreground(self, patient_identifier, modality_id):
all_data = np.load(join(self.folder_with_cropped_data, patient_identifier) + ".npz")['data']
modality = all_data[modality_id]
mask = all_data[-1] > 0
voxels = list(modality[mask][::10]) # no need to take every voxel
return voxels
@staticmethod
def _compute_stats(voxels):
if len(voxels) == 0:
return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
median = np.median(voxels)
mean = np.mean(voxels)
sd = np.std(voxels)
mn = np.min(voxels)
mx = np.max(voxels)
percentile_99_5 = np.percentile(voxels, 99.5)
percentile_00_5 = np.percentile(voxels, 00.5)
return median, mean, sd, mn, mx, percentile_99_5, percentile_00_5
def collect_intensity_properties(self, num_modalities):
if self.overwrite or not isfile(self.intensityproperties_file):
p = Pool(self.num_processes)
results = OrderedDict()
for mod_id in range(num_modalities):
results[mod_id] = OrderedDict()
v = p.starmap(self._get_voxels_in_foreground, zip(self.patient_identifiers,
[mod_id] * len(self.patient_identifiers)))
w = []
for iv in v:
w += iv
median, mean, sd, mn, mx, percentile_99_5, percentile_00_5 = self._compute_stats(w)
local_props = p.map(self._compute_stats, v)
props_per_case = OrderedDict()
for i, pat in enumerate(self.patient_identifiers):
props_per_case[pat] = OrderedDict()
props_per_case[pat]['median'] = local_props[i][0]
props_per_case[pat]['mean'] = local_props[i][1]
props_per_case[pat]['sd'] = local_props[i][2]
props_per_case[pat]['mn'] = local_props[i][3]
props_per_case[pat]['mx'] = local_props[i][4]
props_per_case[pat]['percentile_99_5'] = local_props[i][5]
props_per_case[pat]['percentile_00_5'] = local_props[i][6]
results[mod_id]['local_props'] = props_per_case
results[mod_id]['median'] = median
results[mod_id]['mean'] = mean
results[mod_id]['sd'] = sd
results[mod_id]['mn'] = mn
results[mod_id]['mx'] = mx
results[mod_id]['percentile_99_5'] = percentile_99_5
results[mod_id]['percentile_00_5'] = percentile_00_5
p.close()
p.join()
save_pickle(results, self.intensityproperties_file)
else:
results = load_pickle(self.intensityproperties_file)
return results
def analyze_dataset(self, collect_intensityproperties=True):
# get all spacings and sizes
sizes, spacings = self.get_sizes_and_spacings_after_cropping()
# get all classes and what classes are in what patients
# class min size
# region size per class
classes = self.get_classes()
all_classes = [int(i) for i in classes.keys() if int(i) > 0]
# modalities
modalities = self.get_modalities()
# collect intensity information
if collect_intensityproperties:
intensityproperties = self.collect_intensity_properties(len(modalities))
else:
intensityproperties = None
# size reduction by cropping
size_reductions = self.get_size_reduction_by_cropping()
dataset_properties = dict()
dataset_properties['all_sizes'] = sizes
dataset_properties['all_spacings'] = spacings
dataset_properties['all_classes'] = all_classes
dataset_properties['modalities'] = modalities # {idx: modality name}
dataset_properties['intensityproperties'] = intensityproperties
dataset_properties['size_reductions'] = size_reductions # {patient_id: size_reduction}
save_pickle(dataset_properties, join(self.folder_with_cropped_data, "dataset_properties.pkl"))
return dataset_properties
| 11,051
| 42.003891
| 118
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/summarize_plans.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import preprocessing_output_dir
# This file is intended to double check nnUNets design choices. It is intended to be used for developent purposes only
def summarize_plans(file):
plans = load_pickle(file)
print("num_classes: ", plans['num_classes'])
print("modalities: ", plans['modalities'])
print("use_mask_for_norm", plans['use_mask_for_norm'])
print("keep_only_largest_region", plans['keep_only_largest_region'])
print("min_region_size_per_class", plans['min_region_size_per_class'])
print("min_size_per_class", plans['min_size_per_class'])
print("normalization_schemes", plans['normalization_schemes'])
print("stages...\n")
for i in range(len(plans['plans_per_stage'])):
print("stage: ", i)
print(plans['plans_per_stage'][i])
print("")
def write_plans_to_file(f, plans_file):
print(plans_file)
a = load_pickle(plans_file)
stages = list(a['plans_per_stage'].keys())
stages.sort()
for stage in stages:
patch_size_in_mm = [i * j for i, j in zip(a['plans_per_stage'][stages[stage]]['patch_size'],
a['plans_per_stage'][stages[stage]]['current_spacing'])]
median_patient_size_in_mm = [i * j for i, j in zip(a['plans_per_stage'][stages[stage]]['median_patient_size_in_voxels'],
a['plans_per_stage'][stages[stage]]['current_spacing'])]
f.write(plans_file.split("/")[-2])
f.write(";%s" % plans_file.split("/")[-1])
f.write(";%d" % stage)
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['batch_size']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['num_pool_per_axis']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['patch_size']))
f.write(";%s" % str([str("%03.2f" % i) for i in patch_size_in_mm]))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['median_patient_size_in_voxels']))
f.write(";%s" % str([str("%03.2f" % i) for i in median_patient_size_in_mm]))
f.write(";%s" % str([str("%03.2f" % i) for i in a['plans_per_stage'][stages[stage]]['current_spacing']]))
f.write(";%s" % str([str("%03.2f" % i) for i in a['plans_per_stage'][stages[stage]]['original_spacing']]))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['pool_op_kernel_sizes']))
f.write(";%s" % str(a['plans_per_stage'][stages[stage]]['conv_kernel_sizes']))
f.write(";%s" % str(a['data_identifier']))
f.write("\n")
if __name__ == "__main__":
base_dir = './'#preprocessing_output_dir''
task_dirs = [i for i in subdirs(base_dir, join=False, prefix="Task") if i.find("BrainTumor") == -1 and i.find("MSSeg") == -1]
print("found %d tasks" % len(task_dirs))
with open("2019_02_06_plans_summary.csv", 'w') as f:
f.write("task;plans_file;stage;batch_size;num_pool_per_axis;patch_size;patch_size(mm);median_patient_size_in_voxels;median_patient_size_in_mm;current_spacing;original_spacing;pool_op_kernel_sizes;conv_kernel_sizes\n")
for t in task_dirs:
print(t)
tmp = join(base_dir, t)
plans_files = [i for i in subfiles(tmp, suffix=".pkl", join=False) if i.find("_plans_") != -1 and i.find("Dgx2") == -1]
for p in plans_files:
write_plans_to_file(f, join(tmp, p))
f.write("\n")
| 4,189
| 51.375
| 225
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/experiment_planner_baseline_2DUNet.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import nnunet
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import load_pickle, subfiles
from multiprocessing.pool import Pool
from nnunet.configuration import default_num_threads
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.experiment_planning.utils import add_classes_in_slice_info
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
from nnunet.preprocessing.preprocessing import PreprocessorFor2D
from nnunet.training.model_restore import recursive_find_python_class
class ExperimentPlanner2D(ExperimentPlanner):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner2D, self).__init__(folder_with_cropped_data,
preprocessed_output_folder)
self.data_identifier = default_data_identifier + "_2D"
self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "_plans_2D.pkl")
self.unet_base_num_features = 30
self.unet_max_num_filters = 512
self.unet_max_numpool = 999
self.preprocessor_name = "PreprocessorFor2D"
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape, dtype=np.int64) * num_cases
input_patch_size = new_median_shape[1:]
network_numpool, net_pool_kernel_sizes, net_conv_kernel_sizes, input_patch_size, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing[1:], input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool)
estimated_gpu_ram_consumption = Generic_UNet.compute_approx_vram_consumption(input_patch_size,
network_numpool,
self.unet_base_num_features,
self.unet_max_num_filters,
num_modalities, num_classes,
net_pool_kernel_sizes,
conv_per_stage=self.conv_per_stage)
batch_size = int(np.floor(Generic_UNet.use_this_for_batch_size_computation_2D /
estimated_gpu_ram_consumption * Generic_UNet.DEFAULT_BATCH_SIZE_2D))
if batch_size < self.unet_min_batch_size:
raise RuntimeError("This framework is not made to process patches this large. We will add patch-based "
"2D networks later. Sorry for the inconvenience")
# check if batch size is too large (more than 5 % of dataset)
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
batch_size = min(batch_size, max_batch_size)
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_numpool,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'pool_op_kernel_sizes': net_pool_kernel_sizes,
'conv_kernel_sizes': net_conv_kernel_sizes,
'do_dummy_2D_data_aug': False
}
return plan
def plan_experiment(self):
use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()
print("Are we using the nonzero maks for normalizaion?", use_nonzero_mask_for_normalization)
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
all_classes = self.dataset_properties['all_classes']
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
target_spacing = self.get_target_spacing()
new_shapes = np.array([np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)])
max_spacing_axis = np.argmax(target_spacing)
remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]
self.transpose_forward = [max_spacing_axis] + remaining_axes
self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]
# we base our calculations on the median shape of the datasets
median_shape = np.median(np.vstack(new_shapes), 0)
print("the median shape of the dataset is ", median_shape)
max_shape = np.max(np.vstack(new_shapes), 0)
print("the max shape in the dataset is ", max_shape)
min_shape = np.min(np.vstack(new_shapes), 0)
print("the min shape in the dataset is ", min_shape)
print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, " in the bottleneck")
# how many stages will the image pyramid have?
self.plans_per_stage = []
target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]
median_shape_transposed = np.array(median_shape)[self.transpose_forward]
print("the transposed median shape of the dataset is ", median_shape_transposed)
self.plans_per_stage.append(
self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed, median_shape_transposed,
num_cases=len(self.list_of_cropped_npz_files),
num_modalities=num_modalities,
num_classes=len(all_classes) + 1),
)
print(self.plans_per_stage)
self.plans_per_stage = self.plans_per_stage[::-1]
self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict
normalization_schemes = self.determine_normalization_scheme()
# deprecated
only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None
# these are independent of the stage
plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,
'modalities': modalities, 'normalization_schemes': normalization_schemes,
'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,
'original_spacings': spacings, 'original_sizes': sizes,
'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),
'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,
'use_mask_for_norm': use_nonzero_mask_for_normalization,
'keep_only_largest_region': only_keep_largest_connected_component,
'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,
'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,
'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,
'preprocessor_name': self.preprocessor_name,
}
self.plans = plans
self.save_my_plans()
| 8,764
| 54.125786
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/experiment_planner_baseline_3DUNet.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from collections import OrderedDict
from copy import deepcopy
import nnunet
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
from nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2
from nnunet.experiment_planning.utils import create_lists_from_splitted_dataset
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
from nnunet.preprocessing.cropping import get_case_identifier_from_npz
from nnunet.training.model_restore import recursive_find_python_class
class ExperimentPlanner(object):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
self.folder_with_cropped_data = folder_with_cropped_data
self.preprocessed_output_folder = preprocessed_output_folder
self.list_of_cropped_npz_files = subfiles(self.folder_with_cropped_data, True, None, ".npz", True)
self.preprocessor_name = "GenericPreprocessor"
assert isfile(join(self.folder_with_cropped_data, "dataset_properties.pkl")), \
"folder_with_cropped_data must contain dataset_properties.pkl"
self.dataset_properties = load_pickle(join(self.folder_with_cropped_data, "dataset_properties.pkl"))
self.plans_per_stage = OrderedDict()
self.plans = OrderedDict()
self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "fixed_plans_3D.pkl")
self.data_identifier = default_data_identifier
self.transpose_forward = [0, 1, 2]
self.transpose_backward = [0, 1, 2]
self.unet_base_num_features = Generic_UNet.BASE_NUM_FEATURES_3D
self.unet_max_num_filters = 320
self.unet_max_numpool = 999
self.unet_min_batch_size = 2
self.unet_featuremap_min_edge_length = 4
self.target_spacing_percentile = 50
self.anisotropy_threshold = 3
self.how_much_of_a_patient_must_the_network_see_at_stage0 = 4 # 1/4 of a patient
self.batch_size_covers_max_percent_of_dataset = 0.05 # all samples in the batch together cannot cover more
# than 5% of the entire dataset
self.conv_per_stage = 2
def get_target_spacing(self):
spacings = self.dataset_properties['all_spacings']
# target = np.median(np.vstack(spacings), 0)
# if target spacing is very anisotropic we may want to not downsample the axis with the worst spacing
# uncomment after mystery task submission
"""worst_spacing_axis = np.argmax(target)
if max(target) > (2.5 * min(target)):
spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 5)
target[worst_spacing_axis] = target_spacing_of_that_axis"""
target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)
return target
def save_my_plans(self):
with open(self.plans_fname, 'wb') as f:
pickle.dump(self.plans, f)
def load_my_plans(self):
self.plans = load_pickle(self.plans_fname)
self.plans_per_stage = self.plans['plans_per_stage']
self.dataset_properties = self.plans['dataset_properties']
self.transpose_forward = self.plans['transpose_forward']
self.transpose_backward = self.plans['transpose_backward']
def determine_postprocessing(self):
pass
"""
Spoiler: This is unused, postprocessing was removed. Ignore it.
:return:
print("determining postprocessing...")
props_per_patient = self.dataset_properties['segmentation_props_per_patient']
all_region_keys = [i for k in props_per_patient.keys() for i in props_per_patient[k]['only_one_region'].keys()]
all_region_keys = list(set(all_region_keys))
only_keep_largest_connected_component = OrderedDict()
for r in all_region_keys:
all_results = [props_per_patient[k]['only_one_region'][r] for k in props_per_patient.keys()]
only_keep_largest_connected_component[tuple(r)] = all(all_results)
print("Postprocessing: only_keep_largest_connected_component", only_keep_largest_connected_component)
all_classes = self.dataset_properties['all_classes']
classes = [i for i in all_classes if i > 0]
props_per_patient = self.dataset_properties['segmentation_props_per_patient']
min_size_per_class = OrderedDict()
for c in classes:
all_num_voxels = []
for k in props_per_patient.keys():
all_num_voxels.append(props_per_patient[k]['volume_per_class'][c])
if len(all_num_voxels) > 0:
min_size_per_class[c] = np.percentile(all_num_voxels, 1) * MIN_SIZE_PER_CLASS_FACTOR
else:
min_size_per_class[c] = np.inf
min_region_size_per_class = OrderedDict()
for c in classes:
region_sizes = [l for k in props_per_patient for l in props_per_patient[k]['region_volume_per_class'][c]]
if len(region_sizes) > 0:
min_region_size_per_class[c] = min(region_sizes)
# we don't need that line but better safe than sorry, right?
min_region_size_per_class[c] = min(min_region_size_per_class[c], min_size_per_class[c])
else:
min_region_size_per_class[c] = 0
print("Postprocessing: min_size_per_class", min_size_per_class)
print("Postprocessing: min_region_size_per_class", min_region_size_per_class)
return only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class
"""
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
Computation of input patch size starts out with the new median shape (in voxels) of a dataset. This is
opposed to prior experiments where I based it on the median size in mm. The rationale behind this is that
for some organ of interest the acquisition method will most likely be chosen such that the field of view and
voxel resolution go hand in hand to show the doctor what they need to see. This assumption may be violated
for some modalities with anisotropy (cine MRI) but we will have t live with that. In future experiments I
will try to 1) base input patch size match aspect ratio of input size in mm (instead of voxels) and 2) to
try to enforce that we see the same 'distance' in all directions (try to maintain equal size in mm of patch)
The patches created here attempt keep the aspect ratio of the new_median_shape
:param current_spacing:
:param original_spacing:
:param original_shape:
:param num_cases:
:return:
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props_poolLateV2(tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
# print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
def plan_experiment(self):
use_nonzero_mask_for_normalization = self.determine_whether_to_use_mask_for_norm()
print("Are we using the nonzero mask for normalizaion?", use_nonzero_mask_for_normalization)
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
all_classes = self.dataset_properties['all_classes']
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
target_spacing = self.get_target_spacing()
new_shapes = [np.array(i) / target_spacing * np.array(j) for i, j in zip(spacings, sizes)]
max_spacing_axis = np.argmax(target_spacing)
remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis]
self.transpose_forward = [max_spacing_axis] + remaining_axes
self.transpose_backward = [np.argwhere(np.array(self.transpose_forward) == i)[0][0] for i in range(3)]
# we base our calculations on the median shape of the datasets
median_shape = np.median(np.vstack(new_shapes), 0)
print("the median shape of the dataset is ", median_shape)
max_shape = np.max(np.vstack(new_shapes), 0)
print("the max shape in the dataset is ", max_shape)
min_shape = np.min(np.vstack(new_shapes), 0)
print("the min shape in the dataset is ", min_shape)
print("we don't want feature maps smaller than ", self.unet_featuremap_min_edge_length, " in the bottleneck")
# how many stages will the image pyramid have?
self.plans_per_stage = list()
target_spacing_transposed = np.array(target_spacing)[self.transpose_forward]
median_shape_transposed = np.array(median_shape)[self.transpose_forward]
print("the transposed median shape of the dataset is ", median_shape_transposed)
print("generating configuration for 3d_fullres")
self.plans_per_stage.append(self.get_properties_for_stage(target_spacing_transposed, target_spacing_transposed,
median_shape_transposed,
len(self.list_of_cropped_npz_files),
num_modalities, len(all_classes) + 1))
# thanks Zakiyi (https://github.com/MIC-DKFZ/nnUNet/issues/61) for spotting this bug :-)
# if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \
# architecture_input_voxels < HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0:
architecture_input_voxels_here = np.prod(self.plans_per_stage[-1]['patch_size'], dtype=np.int64)
if np.prod(self.plans_per_stage[-1]['median_patient_size_in_voxels'], dtype=np.int64) / \
architecture_input_voxels_here < self.how_much_of_a_patient_must_the_network_see_at_stage0:
more = False
else:
more = True
if more:
print("generating configuration for 3d_lowres")
# if we are doing more than one stage then we want the lowest stage to have exactly
# HOW_MUCH_OF_A_PATIENT_MUST_THE_NETWORK_SEE_AT_STAGE0 (this is 4 by default so the number of voxels in the
# median shape of the lowest stage must be 4 times as much as the network can process at once (128x128x128 by
# default). Problem is that we are downsampling higher resolution axes before we start downsampling the
# out-of-plane axis. We could probably/maybe do this analytically but I am lazy, so here
# we do it the dumb way
lowres_stage_spacing = deepcopy(target_spacing)
num_voxels = np.prod(median_shape, dtype=np.int64)
while num_voxels > self.how_much_of_a_patient_must_the_network_see_at_stage0 * architecture_input_voxels_here:
max_spacing = max(lowres_stage_spacing)
if np.any((max_spacing / lowres_stage_spacing) > 2):
lowres_stage_spacing[(max_spacing / lowres_stage_spacing) > 2] \
*= 1.01
else:
lowres_stage_spacing *= 1.01
num_voxels = np.prod(target_spacing / lowres_stage_spacing * median_shape, dtype=np.int64)
lowres_stage_spacing_transposed = np.array(lowres_stage_spacing)[self.transpose_forward]
new = self.get_properties_for_stage(lowres_stage_spacing_transposed, target_spacing_transposed,
median_shape_transposed,
len(self.list_of_cropped_npz_files),
num_modalities, len(all_classes) + 1)
architecture_input_voxels_here = np.prod(new['patch_size'], dtype=np.int64)
if 2 * np.prod(new['median_patient_size_in_voxels'], dtype=np.int64) < np.prod(
self.plans_per_stage[0]['median_patient_size_in_voxels'], dtype=np.int64):
self.plans_per_stage.append(new)
self.plans_per_stage = self.plans_per_stage[::-1]
self.plans_per_stage = {i: self.plans_per_stage[i] for i in range(len(self.plans_per_stage))} # convert to dict
print(self.plans_per_stage)
print("transpose forward", self.transpose_forward)
print("transpose backward", self.transpose_backward)
normalization_schemes = self.determine_normalization_scheme()
only_keep_largest_connected_component, min_size_per_class, min_region_size_per_class = None, None, None
# removed training data based postprocessing. This is deprecated
# these are independent of the stage
plans = {'num_stages': len(list(self.plans_per_stage.keys())), 'num_modalities': num_modalities,
'modalities': modalities, 'normalization_schemes': normalization_schemes,
'dataset_properties': self.dataset_properties, 'list_of_npz_files': self.list_of_cropped_npz_files,
'original_spacings': spacings, 'original_sizes': sizes,
'preprocessed_data_folder': self.preprocessed_output_folder, 'num_classes': len(all_classes),
'all_classes': all_classes, 'base_num_features': self.unet_base_num_features,
'use_mask_for_norm': use_nonzero_mask_for_normalization,
'keep_only_largest_region': only_keep_largest_connected_component,
'min_region_size_per_class': min_region_size_per_class, 'min_size_per_class': min_size_per_class,
'transpose_forward': self.transpose_forward, 'transpose_backward': self.transpose_backward,
'data_identifier': self.data_identifier, 'plans_per_stage': self.plans_per_stage,
'preprocessor_name': self.preprocessor_name,
'conv_per_stage': self.conv_per_stage,
}
self.plans = plans
self.save_my_plans()
def determine_normalization_scheme(self):
schemes = OrderedDict()
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
for i in range(num_modalities):
if modalities[i] == "CT" or modalities[i] == 'ct':
schemes[i] = "CT"
else:
schemes[i] = "nonCT"
return schemes
def save_properties_of_cropped(self, case_identifier, properties):
with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
def load_properties_of_cropped(self, case_identifier):
with open(join(self.folder_with_cropped_data, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
def determine_whether_to_use_mask_for_norm(self):
# only use the nonzero mask for normalization of the cropping based on it resulted in a decrease in
# image size (this is an indication that the data is something like brats/isles and then we want to
# normalize in the brain region only)
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
use_nonzero_mask_for_norm = OrderedDict()
for i in range(num_modalities):
if "CT" in modalities[i]:
use_nonzero_mask_for_norm[i] = False
else:
all_size_reductions = []
for k in self.dataset_properties['size_reductions'].keys():
all_size_reductions.append(self.dataset_properties['size_reductions'][k])
if np.median(all_size_reductions) < 3 / 4.:
print("using nonzero mask for normalization")
use_nonzero_mask_for_norm[i] = True
else:
print("not using nonzero mask for normalization")
use_nonzero_mask_for_norm[i] = False
for c in self.list_of_cropped_npz_files:
case_identifier = get_case_identifier_from_npz(c)
properties = self.load_properties_of_cropped(case_identifier)
properties['use_nonzero_mask_for_norm'] = use_nonzero_mask_for_norm
self.save_properties_of_cropped(case_identifier, properties)
use_nonzero_mask_for_normalization = use_nonzero_mask_for_norm
return use_nonzero_mask_for_normalization
def write_normalization_scheme_to_patients(self):
"""
This is used for test set preprocessing
:return:
"""
for c in self.list_of_cropped_npz_files:
case_identifier = get_case_identifier_from_npz(c)
properties = self.load_properties_of_cropped(case_identifier)
properties['use_nonzero_mask_for_norm'] = self.plans['use_mask_for_norm']
self.save_properties_of_cropped(case_identifier, properties)
def run_preprocessing(self, num_threads):
if os.path.isdir(join(self.preprocessed_output_folder, "gt_segmentations")):
shutil.rmtree(join(self.preprocessed_output_folder, "gt_segmentations"))
shutil.copytree(join(self.folder_with_cropped_data, "gt_segmentations"),
join(self.preprocessed_output_folder, "gt_segmentations"))
normalization_schemes = self.plans['normalization_schemes']
use_nonzero_mask_for_normalization = self.plans['use_mask_for_norm']
intensityproperties = self.plans['dataset_properties']['intensityproperties']
preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")],
self.preprocessor_name, current_module="nnunet.preprocessing")
assert preprocessor_class is not None
preprocessor = preprocessor_class(normalization_schemes, use_nonzero_mask_for_normalization,
self.transpose_forward,
intensityproperties)
target_spacings = [i["current_spacing"] for i in self.plans_per_stage.values()]
if self.plans['num_stages'] > 1 and not isinstance(num_threads, (list, tuple)):
num_threads = (default_num_threads, num_threads)
elif self.plans['num_stages'] == 1 and isinstance(num_threads, (list, tuple)):
num_threads = num_threads[-1]
preprocessor.run(target_spacings, self.folder_with_cropped_data, self.preprocessed_output_folder,
self.plans['data_identifier'], num_threads)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task_ids", nargs="+", help="list of int")
parser.add_argument("-p", action="store_true", help="set this if you actually want to run the preprocessing. If "
"this is not set then this script will only create the plans file")
parser.add_argument("-tl", type=int, required=False, default=8, help="num_threads_lowres")
parser.add_argument("-tf", type=int, required=False, default=8, help="num_threads_fullres")
args = parser.parse_args()
task_ids = args.task_ids
run_preprocessing = args.p
tl = args.tl
tf = args.tf
tasks = []
for i in task_ids:
i = int(i)
candidates = subdirs(nnUNet_cropped_data, prefix="Task%03.0d" % i, join=False)
assert len(candidates) == 1
tasks.append(candidates[0])
for t in tasks:
try:
print("\n\n\n", t)
cropped_out_dir = os.path.join(nnUNet_cropped_data, t)
preprocessing_output_dir_this_task = os.path.join(preprocessing_output_dir, t)
splitted_4d_output_dir_task = os.path.join(nnUNet_raw_data, t)
lists, modalities = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)
dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=False)
_ = dataset_analyzer.analyze_dataset() # this will write output files that will be used by the ExperimentPlanner
maybe_mkdir_p(preprocessing_output_dir_this_task)
shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task)
shutil.copy(join(nnUNet_raw_data, t, "dataset.json"), preprocessing_output_dir_this_task)
threads = (tl, tf)
print("number of threads: ", threads, "\n")
exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task)
exp_planner.plan_experiment()
if run_preprocessing:
exp_planner.run_preprocessing(threads)
except Exception as e:
print(e)
| 26,490
| 52.73428
| 147
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/nnUNet_plan_and_preprocess.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnunet
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer
from nnunet.experiment_planning.utils import crop
from nnunet.paths import *
import shutil
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
from nnunet.preprocessing.sanity_checks import verify_dataset_integrity
from nnunet.training.model_restore import recursive_find_python_class
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task_ids", nargs="+", help="List of integers belonging to the task ids you wish to run"
" experiment planning and preprocessing for. Each of these "
"ids must, have a matching folder 'TaskXXX_' in the raw "
"data folder")
parser.add_argument("-pl3d", "--planner3d", type=str, default="ExperimentPlanner3D_v21",
help="Name of the ExperimentPlanner class for the full resolution 3D U-Net and U-Net cascade. "
"Default is ExperimentPlanner3D_v21. Can be 'None', in which case these U-Nets will not be "
"configured")
parser.add_argument("-pl2d", "--planner2d", type=str, default="ExperimentPlanner2D_v21",
help="Name of the ExperimentPlanner class for the 2D U-Net. Default is ExperimentPlanner2D_v21. "
"Can be 'None', in which case this U-Net will not be configured")
parser.add_argument("-no_pp", action="store_true",
help="Set this flag if you dont want to run the preprocessing. If this is set then this script "
"will only run the experiment planning and create the plans file")
parser.add_argument("-tl", type=int, required=False, default=8,
help="Number of processes used for preprocessing the low resolution data for the 3D low "
"resolution U-Net. This can be larger than -tf. Don't overdo it or you will run out of "
"RAM")
parser.add_argument("-tf", type=int, required=False, default=8,
help="Number of processes used for preprocessing the full resolution data of the 2D U-Net and "
"3D U-Net. Don't overdo it or you will run out of RAM")
parser.add_argument("--verify_dataset_integrity", required=False, default=False, action="store_true",
help="set this flag to check the dataset integrity. This is useful and should be done once for "
"each dataset!")
args = parser.parse_args()
task_ids = args.task_ids
dont_run_preprocessing = args.no_pp
tl = args.tl
tf = args.tf
planner_name3d = args.planner3d
planner_name2d = args.planner2d
if planner_name3d == "None":
planner_name3d = None
if planner_name2d == "None":
planner_name2d = None
# we need raw data
tasks = []
for i in task_ids:
i = int(i)
task_name = convert_id_to_task_name(i)
if args.verify_dataset_integrity:
verify_dataset_integrity(join(nnUNet_raw_data, task_name))
crop(task_name, False, tf)
tasks.append(task_name)
search_in = join(nnunet.__path__[0], "experiment_planning")
if planner_name3d is not None:
planner_3d = recursive_find_python_class([search_in], planner_name3d, current_module="nnunet.experiment_planning")
if planner_3d is None:
raise RuntimeError("Could not find the Planner class %s. Make sure it is located somewhere in "
"nnunet.experiment_planning" % planner_name3d)
else:
planner_3d = None
if planner_name2d is not None:
planner_2d = recursive_find_python_class([search_in], planner_name2d, current_module="nnunet.experiment_planning")
if planner_2d is None:
raise RuntimeError("Could not find the Planner class %s. Make sure it is located somewhere in "
"nnunet.experiment_planning" % planner_name2d)
else:
planner_2d = None
for t in tasks:
print("\n\n\n", t)
cropped_out_dir = os.path.join(nnUNet_cropped_data, t)
preprocessing_output_dir_this_task = os.path.join(preprocessing_output_dir, t)
#splitted_4d_output_dir_task = os.path.join(nnUNet_raw_data, t)
#lists, modalities = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)
# we need to figure out if we need the intensity propoerties. We collect them only if one of the modalities is CT
dataset_json = load_json(join(cropped_out_dir, 'dataset.json'))
modalities = list(dataset_json["modality"].values())
collect_intensityproperties = True if (("CT" in modalities) or ("ct" in modalities)) else False
dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=False, num_processes=tf) # this class creates the fingerprint
_ = dataset_analyzer.analyze_dataset(collect_intensityproperties) # this will write output files that will be used by the ExperimentPlanner
maybe_mkdir_p(preprocessing_output_dir_this_task)
shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task)
shutil.copy(join(nnUNet_raw_data, t, "dataset.json"), preprocessing_output_dir_this_task)
threads = (tl, tf)
print("number of threads: ", threads, "\n")
if planner_3d is not None:
exp_planner = planner_3d(cropped_out_dir, preprocessing_output_dir_this_task)
exp_planner.plan_experiment()
if not dont_run_preprocessing: # double negative, yooo
exp_planner.run_preprocessing(threads)
if planner_2d is not None:
exp_planner = planner_2d(cropped_out_dir, preprocessing_output_dir_this_task)
exp_planner.plan_experiment()
if not dont_run_preprocessing: # double negative, yooo
exp_planner.run_preprocessing(threads)
if __name__ == "__main__":
main()
| 7,009
| 49.431655
| 148
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/utils.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle
import shutil
from collections import OrderedDict
from multiprocessing import Pool
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import join, isdir, maybe_mkdir_p, subfiles, subdirs, isfile
from nnunet.configuration import default_num_threads
from nnunet.experiment_planning.DatasetAnalyzer import DatasetAnalyzer
from nnunet.experiment_planning.common_utils import split_4d_nifti
from nnunet.paths import nnUNet_raw_data, nnUNet_cropped_data, preprocessing_output_dir
from nnunet.preprocessing.cropping import ImageCropper
def split_4d(input_folder, num_processes=default_num_threads, overwrite_task_output_id=None):
assert isdir(join(input_folder, "imagesTr")) and isdir(join(input_folder, "labelsTr")) and \
isfile(join(input_folder, "dataset.json")), \
"The input folder must be a valid Task folder from the Medical Segmentation Decathlon with at least the " \
"imagesTr and labelsTr subfolders and the dataset.json file"
while input_folder.endswith("/"):
input_folder = input_folder[:-1]
full_task_name = input_folder.split("/")[-1]
assert full_task_name.startswith("Task"), "The input folder must point to a folder that starts with TaskXX_"
first_underscore = full_task_name.find("_")
assert first_underscore == 6, "Input folder start with TaskXX with XX being a 3-digit id: 00, 01, 02 etc"
input_task_id = int(full_task_name[4:6])
if overwrite_task_output_id is None:
overwrite_task_output_id = input_task_id
task_name = full_task_name[7:]
output_folder = join(nnUNet_raw_data, "Task%03.0d_" % overwrite_task_output_id + task_name)
if isdir(output_folder):
shutil.rmtree(output_folder)
files = []
output_dirs = []
maybe_mkdir_p(output_folder)
for subdir in ["imagesTr", "imagesTs"]:
curr_out_dir = join(output_folder, subdir)
if not isdir(curr_out_dir):
os.mkdir(curr_out_dir)
curr_dir = join(input_folder, subdir)
nii_files = [join(curr_dir, i) for i in os.listdir(curr_dir) if i.endswith(".nii.gz")]
nii_files.sort()
for n in nii_files:
files.append(n)
output_dirs.append(curr_out_dir)
shutil.copytree(join(input_folder, "labelsTr"), join(output_folder, "labelsTr"))
p = Pool(num_processes)
p.starmap(split_4d_nifti, zip(files, output_dirs))
p.close()
p.join()
shutil.copy(join(input_folder, "dataset.json"), output_folder)
def create_lists_from_splitted_dataset(base_folder_splitted):
lists = []
json_file = join(base_folder_splitted, "dataset.json")
with open(json_file) as jsn:
d = json.load(jsn)
training_files = d['training']
num_modalities = len(d['modality'].keys())
for tr in training_files:
cur_pat = []
for mod in range(num_modalities):
cur_pat.append(join(base_folder_splitted, "imagesTr", tr['image'].split("/")[-1][:-7] +
"_%04.0d.nii.gz" % mod))
cur_pat.append(join(base_folder_splitted, "labelsTr", tr['label'].split("/")[-1]))
lists.append(cur_pat)
return lists, {int(i): d['modality'][str(i)] for i in d['modality'].keys()}
def create_lists_from_splitted_dataset_folder(folder):
"""
does not rely on dataset.json
:param folder:
:return:
"""
caseIDs = get_caseIDs_from_splitted_dataset_folder(folder)
list_of_lists = []
for f in caseIDs:
list_of_lists.append(subfiles(folder, prefix=f, suffix=".nii.gz", join=True, sort=True))
return list_of_lists
def get_caseIDs_from_splitted_dataset_folder(folder):
files = subfiles(folder, suffix=".nii.gz", join=False)
# all files must be .nii.gz and have 4 digit modality index
files = [i[:-12] for i in files]
# only unique patient ids
files = np.unique(files)
return files
def crop(task_string, override=False, num_threads=default_num_threads):
cropped_out_dir = join(nnUNet_cropped_data, task_string)
maybe_mkdir_p(cropped_out_dir)
if override and isdir(cropped_out_dir):
shutil.rmtree(cropped_out_dir)
maybe_mkdir_p(cropped_out_dir)
splitted_4d_output_dir_task = join(nnUNet_raw_data, task_string)
lists, _ = create_lists_from_splitted_dataset(splitted_4d_output_dir_task)
imgcrop = ImageCropper(num_threads, cropped_out_dir)
imgcrop.run_cropping(lists, overwrite_existing=override)
shutil.copy(join(nnUNet_raw_data, task_string, "dataset.json"), cropped_out_dir)
def analyze_dataset(task_string, override=False, collect_intensityproperties=True, num_processes=default_num_threads):
cropped_out_dir = join(nnUNet_cropped_data, task_string)
dataset_analyzer = DatasetAnalyzer(cropped_out_dir, overwrite=override, num_processes=num_processes)
_ = dataset_analyzer.analyze_dataset(collect_intensityproperties)
def plan_and_preprocess(task_string, processes_lowres=default_num_threads, processes_fullres=3, no_preprocessing=False):
from nnunet.experiment_planning.experiment_planner_baseline_2DUNet import ExperimentPlanner2D
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
preprocessing_output_dir_this_task_train = join(preprocessing_output_dir, task_string)
cropped_out_dir = join(nnUNet_cropped_data, task_string)
maybe_mkdir_p(preprocessing_output_dir_this_task_train)
shutil.copy(join(cropped_out_dir, "dataset_properties.pkl"), preprocessing_output_dir_this_task_train)
shutil.copy(join(nnUNet_raw_data, task_string, "dataset.json"), preprocessing_output_dir_this_task_train)
exp_planner = ExperimentPlanner(cropped_out_dir, preprocessing_output_dir_this_task_train)
exp_planner.plan_experiment()
if not no_preprocessing:
exp_planner.run_preprocessing((processes_lowres, processes_fullres))
exp_planner = ExperimentPlanner2D(cropped_out_dir, preprocessing_output_dir_this_task_train)
exp_planner.plan_experiment()
if not no_preprocessing:
exp_planner.run_preprocessing(processes_fullres)
# write which class is in which slice to all training cases (required to speed up 2D Dataloader)
# This is done for all data so that if we wanted to use them with 2D we could do so
if not no_preprocessing:
p = Pool(default_num_threads)
# if there is more than one my_data_identifier (different brnaches) then this code will run for all of them if
# they start with the same string. not problematic, but not pretty
stages = [i for i in subdirs(preprocessing_output_dir_this_task_train, join=True, sort=True)
if i.split("/")[-1].find("stage") != -1]
for s in stages:
print(s.split("/")[-1])
list_of_npz_files = subfiles(s, True, None, ".npz", True)
list_of_pkl_files = [i[:-4]+".pkl" for i in list_of_npz_files]
all_classes = []
for pk in list_of_pkl_files:
with open(pk, 'rb') as f:
props = pickle.load(f)
all_classes_tmp = np.array(props['classes'])
all_classes.append(all_classes_tmp[all_classes_tmp >= 0])
p.map(add_classes_in_slice_info, zip(list_of_npz_files, list_of_pkl_files, all_classes))
p.close()
p.join()
def add_classes_in_slice_info(args):
"""
We need this for 2D dataloader with oversampling. As of now it will detect slices that contain specific classes
at run time, meaning it needs to iterate over an entire patient just to extract one slice. That is obviously bad,
so we are doing this once beforehand and just give the dataloader the info it needs in the patients pkl file.
"""
npz_file, pkl_file, all_classes = args
seg_map = np.load(npz_file)['data'][-1]
with open(pkl_file, 'rb') as f:
props = pickle.load(f)
#if props.get('classes_in_slice_per_axis') is not None:
print(pkl_file)
# this will be a dict of dict where the first dict encodes the axis along which a slice is extracted in its keys.
# The second dict (value of first dict) will have all classes as key and as values a list of all slice ids that
# contain this class
classes_in_slice = OrderedDict()
for axis in range(3):
other_axes = tuple([i for i in range(3) if i != axis])
classes_in_slice[axis] = OrderedDict()
for c in all_classes:
valid_slices = np.where(np.sum(seg_map == c, axis=other_axes) > 0)[0]
classes_in_slice[axis][c] = valid_slices
number_of_voxels_per_class = OrderedDict()
for c in all_classes:
number_of_voxels_per_class[c] = np.sum(seg_map == c)
props['classes_in_slice_per_axis'] = classes_in_slice
props['number_of_voxels_per_class'] = number_of_voxels_per_class
with open(pkl_file, 'wb') as f:
pickle.dump(props, f)
| 9,635
| 42.210762
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/experiment_planner_baseline_2DUNet_v21.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nnunet.experiment_planning.experiment_planner_baseline_2DUNet import ExperimentPlanner2D
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
import numpy as np
class ExperimentPlanner2D_v21(ExperimentPlanner2D):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner2D_v21, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_plans_v2.1_2D"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.1_plans_2D.pkl")
self.unet_base_num_features = 32
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape, dtype=np.int64) * num_cases
input_patch_size = new_median_shape[1:]
network_numpool, net_pool_kernel_sizes, net_conv_kernel_sizes, input_patch_size, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing[1:], input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool)
# we pretend to use 30 feature maps. This will yield the same configuration as in V1. The larger memory
# footpring of 32 vs 30 is mor ethan offset by the fp16 training. We make fp16 training default
# Reason for 32 vs 30 feature maps is that 32 is faster in fp16 training (because multiple of 8)
estimated_gpu_ram_consumption = Generic_UNet.compute_approx_vram_consumption(input_patch_size,
network_numpool,
30,
self.unet_max_num_filters,
num_modalities, num_classes,
net_pool_kernel_sizes,
conv_per_stage=self.conv_per_stage)
batch_size = int(np.floor(Generic_UNet.use_this_for_batch_size_computation_2D /
estimated_gpu_ram_consumption * Generic_UNet.DEFAULT_BATCH_SIZE_2D))
if batch_size < self.unet_min_batch_size:
raise RuntimeError("This framework is not made to process patches this large. We will add patch-based "
"2D networks later. Sorry for the inconvenience")
# check if batch size is too large (more than 5 % of dataset)
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
batch_size = min(batch_size, max_batch_size)
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_numpool,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'pool_op_kernel_sizes': net_pool_kernel_sizes,
'conv_kernel_sizes': net_conv_kernel_sizes,
'do_dummy_2D_data_aug': False
}
return plan
| 4,537
| 57.935065
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/common_utils.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from copy import deepcopy
from nnunet.network_architecture.generic_UNet import Generic_UNet
import SimpleITK as sitk
import shutil
from batchgenerators.utilities.file_and_folder_operations import join
def split_4d_nifti(filename, output_folder):
img_itk = sitk.ReadImage(filename)
dim = img_itk.GetDimension()
file_base = filename.split("/")[-1]
if dim == 3:
shutil.copy(filename, join(output_folder, file_base[:-7] + "_0000.nii.gz"))
return
elif dim != 4:
raise RuntimeError("Unexpected dimensionality: %d of file %s, cannot split" % (dim, filename))
else:
img_npy = sitk.GetArrayFromImage(img_itk)
spacing = img_itk.GetSpacing()
origin = img_itk.GetOrigin()
direction = np.array(img_itk.GetDirection()).reshape(4,4)
# now modify these to remove the fourth dimension
spacing = tuple(list(spacing[:-1]))
origin = tuple(list(origin[:-1]))
direction = tuple(direction[:-1, :-1].reshape(-1))
for i, t in enumerate(range(img_npy.shape[0])):
img = img_npy[t]
img_itk_new = sitk.GetImageFromArray(img)
img_itk_new.SetSpacing(spacing)
img_itk_new.SetOrigin(origin)
img_itk_new.SetDirection(direction)
sitk.WriteImage(img_itk_new, join(output_folder, file_base[:-7] + "_%04.0d.nii.gz" % i))
def get_pool_and_conv_props_poolLateV2(patch_size, min_feature_map_size, max_numpool, spacing):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
initial_spacing = deepcopy(spacing)
reach = max(initial_spacing)
dim = len(patch_size)
num_pool_per_axis = get_network_numpool(patch_size, max_numpool, min_feature_map_size)
net_num_pool_op_kernel_sizes = []
net_conv_kernel_sizes = []
net_numpool = max(num_pool_per_axis)
current_spacing = spacing
for p in range(net_numpool):
reached = [current_spacing[i] / reach > 0.5 for i in range(dim)]
pool = [2 if num_pool_per_axis[i] + p >= net_numpool else 1 for i in range(dim)]
if all(reached):
conv = [3] * dim
else:
conv = [3 if not reached[i] else 1 for i in range(dim)]
net_num_pool_op_kernel_sizes.append(pool)
net_conv_kernel_sizes.append(conv)
current_spacing = [i * j for i, j in zip(current_spacing, pool)]
net_conv_kernel_sizes.append([3] * dim)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
return num_pool_per_axis, net_num_pool_op_kernel_sizes, net_conv_kernel_sizes, patch_size, must_be_divisible_by
def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
dim = len(spacing)
current_spacing = deepcopy(list(spacing))
current_size = deepcopy(list(patch_size))
pool_op_kernel_sizes = []
conv_kernel_sizes = []
num_pool_per_axis = [0] * dim
while True:
# This is a problem because sometimes we have spacing 20, 50, 50 and we want to still keep pooling.
# Here we would stop however. This is not what we want! Fixed in get_pool_and_conv_propsv2
min_spacing = min(current_spacing)
valid_axes_for_pool = [i for i in range(dim) if current_spacing[i] / min_spacing < 2]
axes = []
for a in range(dim):
my_spacing = current_spacing[a]
partners = [i for i in range(dim) if current_spacing[i] / my_spacing < 2 and my_spacing / current_spacing[i] < 2]
if len(partners) > len(axes):
axes = partners
conv_kernel_size = [3 if i in axes else 1 for i in range(dim)]
# exclude axes that we cannot pool further because of min_feature_map_size constraint
#before = len(valid_axes_for_pool)
valid_axes_for_pool = [i for i in valid_axes_for_pool if current_size[i] >= 2*min_feature_map_size]
#after = len(valid_axes_for_pool)
#if after == 1 and before > 1:
# break
valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
if len(valid_axes_for_pool) == 0:
break
#print(current_spacing, current_size)
other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
pool_kernel_sizes = [0] * dim
for v in valid_axes_for_pool:
pool_kernel_sizes[v] = 2
num_pool_per_axis[v] += 1
current_spacing[v] *= 2
current_size[v] = np.ceil(current_size[v] / 2)
for nv in other_axes:
pool_kernel_sizes[nv] = 1
pool_op_kernel_sizes.append(pool_kernel_sizes)
conv_kernel_sizes.append(conv_kernel_size)
#print(conv_kernel_sizes)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
conv_kernel_sizes.append([3]*dim)
return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
def get_pool_and_conv_props_v2(spacing, patch_size, min_feature_map_size, max_numpool):
"""
:param spacing:
:param patch_size:
:param min_feature_map_size: min edge length of feature maps in bottleneck
:return:
"""
dim = len(spacing)
current_spacing = deepcopy(list(spacing))
current_size = deepcopy(list(patch_size))
pool_op_kernel_sizes = []
conv_kernel_sizes = []
num_pool_per_axis = [0] * dim
kernel_size = [1] * dim
while True:
# exclude axes that we cannot pool further because of min_feature_map_size constraint
valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size]
if len(valid_axes_for_pool) < 1:
break
spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool]
# find axis that are within factor of 2 within smallest spacing
min_spacing_of_valid = min(spacings_of_axes)
valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2]
# max_numpool constraint
valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]
if len(valid_axes_for_pool) == 1:
if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size:
pass
else:
break
if len(valid_axes_for_pool) < 1:
break
# now we need to find kernel sizes
# kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within
# factor 2 of min_spacing. Once they are 3 they remain 3
for d in range(dim):
if kernel_size[d] == 3:
continue
else:
if spacings_of_axes[d] / min(current_spacing) < 2:
kernel_size[d] = 3
other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]
pool_kernel_sizes = [0] * dim
for v in valid_axes_for_pool:
pool_kernel_sizes[v] = 2
num_pool_per_axis[v] += 1
current_spacing[v] *= 2
current_size[v] = np.ceil(current_size[v] / 2)
for nv in other_axes:
pool_kernel_sizes[nv] = 1
pool_op_kernel_sizes.append(pool_kernel_sizes)
conv_kernel_sizes.append(deepcopy(kernel_size))
#print(conv_kernel_sizes)
must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
patch_size = pad_shape(patch_size, must_be_divisible_by)
# we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
conv_kernel_sizes.append([3]*dim)
return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
def get_shape_must_be_divisible_by(net_numpool_per_axis):
return 2 ** np.array(net_numpool_per_axis)
def pad_shape(shape, must_be_divisible_by):
"""
pads shape so that it is divisibly by must_be_divisible_by
:param shape:
:param must_be_divisible_by:
:return:
"""
if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)):
must_be_divisible_by = [must_be_divisible_by] * len(shape)
else:
assert len(must_be_divisible_by) == len(shape)
new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))]
for i in range(len(shape)):
if shape[i] % must_be_divisible_by[i] == 0:
new_shp[i] -= must_be_divisible_by[i]
new_shp = np.array(new_shp).astype(int)
return new_shp
def get_network_numpool(patch_size, maxpool_cap=999, min_feature_map_size=4):
network_numpool_per_axis = np.floor([np.log(i / min_feature_map_size) / np.log(2) for i in patch_size]).astype(int)
network_numpool_per_axis = [min(i, maxpool_cap) for i in network_numpool_per_axis]
return network_numpool_per_axis
if __name__ == '__main__':
# trying to fix https://github.com/MIC-DKFZ/nnUNet/issues/261
median_shape = [24, 504, 512]
spacing = [5.9999094, 0.50781202, 0.50781202]
num_pool_per_axis, net_num_pool_op_kernel_sizes, net_conv_kernel_sizes, patch_size, must_be_divisible_by = get_pool_and_conv_props_poolLateV2(median_shape, min_feature_map_size=4, max_numpool=999, spacing=spacing)
| 10,545
| 38.350746
| 217
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/change_batch_size.py
|
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
if __name__ == '__main__':
input_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_plans_3D.pkl'
output_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_LISA_plans_3D.pkl'
a = load_pickle(input_file)
a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))
save_pickle(a, output_file)
| 500
| 54.666667
| 111
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/experiment_planner_baseline_3DUNet_v21.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
class ExperimentPlanner3D_v21(ExperimentPlanner):
"""
Combines ExperimentPlannerPoolBasedOnSpacing and ExperimentPlannerTargetSpacingForAnisoAxis
We also increase the base_num_features to 32. This is solely because mixed precision training with 3D convs and
amp is A LOT faster if the number of filters is divisible by 8
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v21, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_plans_v2.1"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.1_plans_3D.pkl")
self.unet_base_num_features = 32
def get_target_spacing(self):
"""
per default we use the 50th percentile=median for the target spacing. Higher spacing results in smaller data
and thus faster and easier training. Smaller spacing results in larger data and thus longer and harder training
For some datasets the median is not a good choice. Those are the datasets where the spacing is very anisotropic
(for example ACDC with (10, 1.5, 1.5)). These datasets still have examples with a spacing of 5 or 6 mm in the low
resolution axis. Choosing the median here will result in bad interpolation artifacts that can substantially
impact performance (due to the low number of slices).
"""
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)
# This should be used to determine the new median shape. The old implementation is not 100% correct.
# Fixed in 2.4
# sizes = [np.array(i) / target * np.array(j) for i, j in zip(spacings, sizes)]
target_size = np.percentile(np.vstack(sizes), self.target_spacing_percentile, 0)
target_size_mm = np.array(target) * np.array(target_size)
# we need to identify datasets for which a different target spacing could be beneficial. These datasets have
# the following properties:
# - one axis which much lower resolution than the others
# - the lowres axis has much less voxels than the others
# - (the size in mm of the lowres axis is also reduced)
worst_spacing_axis = np.argmax(target)
other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
other_spacings = [target[i] for i in other_axes]
other_sizes = [target_size[i] for i in other_axes]
has_aniso_spacing = target[worst_spacing_axis] > (self.anisotropy_threshold * max(other_spacings))
has_aniso_voxels = target_size[worst_spacing_axis] * self.anisotropy_threshold < min(other_sizes)
# we don't use the last one for now
#median_size_in_mm = target[target_size_mm] * RESAMPLING_SEPARATE_Z_ANISOTROPY_THRESHOLD < max(target_size_mm)
if has_aniso_spacing and has_aniso_voxels:
spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
# don't let the spacing of that axis get higher than the other axes
if target_spacing_of_that_axis < max(other_spacings):
target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5
target[worst_spacing_axis] = target_spacing_of_that_axis
return target
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
ExperimentPlanner configures pooling so that we pool late. Meaning that if the number of pooling per axis is
(2, 3, 3), then the first pooling operation will always pool axes 1 and 2 and not 0, irrespective of spacing.
This can cause a larger memory footprint, so it can be beneficial to revise this.
Here we are pooling based on the spacing of the data.
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool)
# we compute as if we were using only 30 feature maps. We can do that because fp16 training is the standard
# now. That frees up some space. The decision to go with 32 is solely due to the speedup we get (non-multiples
# of 8 are not supported in nvidia amp)
ref = Generic_UNet.use_this_for_batch_size_computation_3D * self.unet_base_num_features / \
Generic_UNet.BASE_NUM_FEATURES_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props(current_spacing, tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
#print(new_shp)
#print(here, ref)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what wirks with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
| 10,252
| 55.961111
| 147
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v21_3convperstage.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import ExperimentPlanner3D_v21
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
class ExperimentPlanner3D_v21_3cps(ExperimentPlanner3D_v21):
"""
have 3x conv-in-lrelu per resolution instead of 2 while remaining in the same memory budget
This only works with 3d fullres because we use the same data as ExperimentPlanner3D_v21. Lowres would require to
rerun preprocesing (different patch size = different 3d lowres target spacing)
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v21_3cps, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.1_3cps_plans_3D.pkl")
self.unet_base_num_features = 32
self.conv_per_stage = 3
def run_preprocessing(self, num_threads):
pass
| 1,932
| 46.146341
| 116
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v21_11GB.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \
ExperimentPlanner3D_v21
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
class ExperimentPlanner3D_v21_11GB(ExperimentPlanner3D_v21):
"""
Same as ExperimentPlanner3D_v21, but designed to fill a RTX2080 ti (11GB) in fp16
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v21_11GB, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_plans_v2.1_big"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.1_big_plans_3D.pkl")
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
We need to adapt ref
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool)
# use_this_for_batch_size_computation_3D = 520000000 # 505789440
# typical ExperimentPlanner3D_v21 configurations use 7.5GB, but on a 2080ti we have 11. Allow for more space
# to be used
ref = Generic_UNet.use_this_for_batch_size_computation_3D * 11 / 8
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props(current_spacing, tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
# print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what wirks with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
| 6,712
| 52.704
| 147
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v22.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \
ExperimentPlanner3D_v21
from nnunet.paths import *
class ExperimentPlanner3D_v22(ExperimentPlanner3D_v21):
"""
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super().__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_plans_v2.2"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.2_plans_3D.pkl")
def get_target_spacing(self):
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)
target_size = np.percentile(np.vstack(sizes), self.target_spacing_percentile, 0)
target_size_mm = np.array(target) * np.array(target_size)
# we need to identify datasets for which a different target spacing could be beneficial. These datasets have
# the following properties:
# - one axis which much lower resolution than the others
# - the lowres axis has much less voxels than the others
# - (the size in mm of the lowres axis is also reduced)
worst_spacing_axis = np.argmax(target)
other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
other_spacings = [target[i] for i in other_axes]
other_sizes = [target_size[i] for i in other_axes]
has_aniso_spacing = target[worst_spacing_axis] > (self.anisotropy_threshold * max(other_spacings))
has_aniso_voxels = target_size[worst_spacing_axis] * self.anisotropy_threshold < min(other_sizes)
# we don't use the last one for now
#median_size_in_mm = target[target_size_mm] * RESAMPLING_SEPARATE_Z_ANISOTROPY_THRESHOLD < max(target_size_mm)
if has_aniso_spacing and has_aniso_voxels:
spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
# don't let the spacing of that axis get higher than self.anisotropy_thresholdxthe_other_axes
target_spacing_of_that_axis = max(max(other_spacings) * self.anisotropy_threshold, target_spacing_of_that_axis)
target[worst_spacing_axis] = target_spacing_of_that_axis
return target
| 3,151
| 51.533333
| 123
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_residual_3DUNet_v21.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \
ExperimentPlanner3D_v21
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nnunet.paths import *
from nnunet.network_architecture.generic_modular_residual_UNet import FabiansUNet
class ExperimentPlanner3DFabiansResUNet_v21(ExperimentPlanner3D_v21):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3DFabiansResUNet_v21, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_plans_v2.1"# "nnUNetData_FabiansResUNet_v2.1"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlans_FabiansResUNet_v2.1_plans_3D.pkl")
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
We use FabiansUNet instead of Generic_UNet
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool)
pool_op_kernel_sizes = [[1, 1, 1]] + pool_op_kernel_sizes
blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder[:len(pool_op_kernel_sizes)]
blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder[:len(pool_op_kernel_sizes) - 1]
ref = FabiansUNet.use_this_for_3D_configuration
here = FabiansUNet.compute_approx_vram_consumption(input_patch_size, self.unet_base_num_features,
self.unet_max_num_filters, num_modalities, num_classes,
pool_op_kernel_sizes, blocks_per_stage_encoder,
blocks_per_stage_decoder, 2, self.unet_min_batch_size,)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props(current_spacing, tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
pool_op_kernel_sizes = [[1, 1, 1]] + pool_op_kernel_sizes
blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder[:len(pool_op_kernel_sizes)]
blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder[:len(pool_op_kernel_sizes) - 1]
here = FabiansUNet.compute_approx_vram_consumption(new_shp, self.unet_base_num_features,
self.unet_max_num_filters, num_modalities, num_classes,
pool_op_kernel_sizes, blocks_per_stage_encoder,
blocks_per_stage_decoder, 2, self.unet_min_batch_size)
input_patch_size = new_shp
batch_size = FabiansUNet.default_min_batch_size
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
'num_blocks_encoder': blocks_per_stage_encoder,
'num_blocks_decoder': blocks_per_stage_decoder
}
return plan
def run_preprocessing(self, num_threads):
"""
On all datasets except 3d fullres on spleen the preprocessed data would look identical to
ExperimentPlanner3D_v21 (I tested decathlon data only). Therefore we just reuse the preprocessed data of
that other planner
:param num_threads:
:return:
"""
pass
| 7,394
| 54.601504
| 147
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v21_32GB.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \
ExperimentPlanner3D_v21
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
class ExperimentPlanner3D_v21_32GB(ExperimentPlanner3D_v21):
"""
Same as ExperimentPlanner3D_v21, but designed to fill a V100 (32GB) in fp16
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v21_32GB, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_plans_v2.1_verybig"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.1_verybig_plans_3D.pkl")
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
We need to adapt ref
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool)
# use_this_for_batch_size_computation_3D = 520000000 # 505789440
# typical ExperimentPlanner3D_v21 configurations use 7.5GB, but on a V100 we have 32. Allow for more space
# to be used
ref = Generic_UNet.use_this_for_batch_size_computation_3D * 32 / 8
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props(current_spacing, tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
# print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what wirks with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
| 6,710
| 53.560976
| 147
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/experiment_planner_baseline_3DUNet_v23.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet_v21 import \
ExperimentPlanner3D_v21
from nnunet.paths import *
class ExperimentPlanner3D_v23(ExperimentPlanner3D_v21):
"""
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v23, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_plans_v2.3"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlansv2.3_plans_3D.pkl")
self.preprocessor_name = "Preprocessor3DDifferentResampling"
| 1,337
| 45.137931
| 114
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/normalization/experiment_planner_3DUNet_nonCT.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.paths import *
class ExperimentPlannernonCT(ExperimentPlanner):
"""
Preprocesses all data in nonCT mode (this is what we use for MRI per default, but here it is applied to CT images
as well)
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlannernonCT, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNet_nonCT"
self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "nonCT_plans_3D.pkl")
def determine_normalization_scheme(self):
schemes = OrderedDict()
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
for i in range(num_modalities):
if modalities[i] == "CT":
schemes[i] = "nonCT"
else:
schemes[i] = "nonCT"
return schemes
| 1,765
| 39.136364
| 117
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/normalization/experiment_planner_3DUNet_CT2.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.paths import *
class ExperimentPlannerCT2(ExperimentPlanner):
"""
preprocesses CT data with the "CT2" normalization.
(clip range comes from training set and is the 0.5 and 99.5 percentile of intensities in foreground)
CT = clip to range, then normalize with global mn and sd (computed on foreground in training set)
CT2 = clip to range, normalize each case separately with its own mn and std (computed within the area that was in clip_range)
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlannerCT2, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNet_CT2"
self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "CT2_plans_3D.pkl")
def determine_normalization_scheme(self):
schemes = OrderedDict()
modalities = self.dataset_properties['modalities']
num_modalities = len(list(modalities.keys()))
for i in range(num_modalities):
if modalities[i] == "CT":
schemes[i] = "CT2"
else:
schemes[i] = "nonCT"
return schemes
| 2,016
| 42.847826
| 129
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/pooling_and_convs/experiment_planner_baseline_3DUNet_allConv3x3.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
class ExperimentPlannerAllConv3x3(ExperimentPlanner):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlannerAllConv3x3, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlans" + "allConv3x3_plans_3D.pkl")
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
Computation of input patch size starts out with the new median shape (in voxels) of a dataset. This is
opposed to prior experiments where I based it on the median size in mm. The rationale behind this is that
for some organ of interest the acquisition method will most likely be chosen such that the field of view and
voxel resolution go hand in hand to show the doctor what they need to see. This assumption may be violated
for some modalities with anisotropy (cine MRI) but we will have t live with that. In future experiments I
will try to 1) base input patch size match aspect ratio of input size in mm (instead of voxels) and 2) to
try to enforce that we see the same 'distance' in all directions (try to maintain equal size in mm of patch)
The patches created here attempt keep the aspect ratio of the new_median_shape
:param current_spacing:
:param original_spacing:
:param original_shape:
:param num_cases:
:return:
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props_poolLateV2(tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
for s in range(len(conv_kernel_sizes)):
conv_kernel_sizes[s] = [3 for _ in conv_kernel_sizes[s]]
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
def run_preprocessing(self, num_threads):
pass
| 7,689
| 53.928571
| 147
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/pooling_and_convs/experiment_planner_baseline_3DUNet_poolBasedOnSpacing.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
class ExperimentPlannerPoolBasedOnSpacing(ExperimentPlanner):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlannerPoolBasedOnSpacing, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_poolBasedOnSpacing"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlans" + "poolBasedOnSpacing_plans_3D.pkl")
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
ExperimentPlanner configures pooling so that we pool late. Meaning that if the number of pooling per axis is
(2, 3, 3), then the first pooling operation will always pool axes 1 and 2 and not 0, irrespective of spacing.
This can cause a larger memory footprint, so it can be beneficial to revise this.
Here we are pooling based on the spacing of the data.
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
axis_to_be_reduced = np.argsort(new_shp / new_median_shape)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props(current_spacing, tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props(current_spacing, new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what wirks with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
| 6,758
| 53.072
| 147
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/patch_size/experiment_planner_3DUNet_isotropic_in_voxels.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
class ExperimentPlanner3D_IsoPatchesInVoxels(ExperimentPlanner):
"""
patches that are isotropic in the number of voxels (not mm), such as 128x128x128 allow more voxels to be processed
at once because we don't have to do annoying pooling stuff
CAREFUL!
this one does not support transpose_forward and transpose_backward
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_IsoPatchesInVoxels, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_isoPatchesInVoxels"
self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "fixedisoPatchesInVoxels_plans_3D.pkl")
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
input_patch_size = new_median_shape
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
# find the largest axis. If patch is isotropic, pick the axis with the largest spacing
if len(np.unique(new_shp)) == 1:
axis_to_be_reduced = np.argsort(current_spacing)[-1]
else:
axis_to_be_reduced = np.argsort(new_shp)[-1]
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props_poolLateV2(tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
| 6,300
| 53.318966
| 122
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/patch_size/experiment_planner_3DUNet_isotropic_in_mm.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import numpy as np
from nnunet.experiment_planning.common_utils import get_pool_and_conv_props_poolLateV2
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.paths import *
class ExperimentPlannerIso(ExperimentPlanner):
"""
attempts to create patches that have an isotropic size (in mm, not voxels)
CAREFUL!
this one does not support transpose_forward and transpose_backward
"""
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super().__init__(folder_with_cropped_data, preprocessed_output_folder)
self.plans_fname = join(self.preprocessed_output_folder, "nnUNetPlans" + "fixedisoPatchesInmm_plans_3D.pkl")
self.data_identifier = "nnUNet_isoPatchesInmm"
def get_properties_for_stage(self, current_spacing, original_spacing, original_shape, num_cases,
num_modalities, num_classes):
"""
"""
new_median_shape = np.round(original_spacing / current_spacing * original_shape).astype(int)
dataset_num_voxels = np.prod(new_median_shape) * num_cases
# the next line is what we had before as a default. The patch size had the same aspect ratio as the median shape of a patient. We swapped t
# input_patch_size = new_median_shape
# compute how many voxels are one mm
input_patch_size = 1 / np.array(current_spacing)
# normalize voxels per mm
input_patch_size /= input_patch_size.mean()
# create an isotropic patch of size 512x512x512mm
input_patch_size *= 1 / min(input_patch_size) * 512 # to get a starting value
input_patch_size = np.round(input_patch_size).astype(int)
# clip it to the median shape of the dataset because patches larger then that make not much sense
input_patch_size = [min(i, j) for i, j in zip(input_patch_size, new_median_shape)]
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(input_patch_size,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
ref = Generic_UNet.use_this_for_batch_size_computation_3D
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes,
pool_op_kernel_sizes, conv_per_stage=self.conv_per_stage)
while here > ref:
# here is the difference to ExperimentPlanner. In the old version we made the aspect ratio match
# between patch and new_median_shape, regardless of spacing. It could be better to enforce isotropy
# (in mm) instead
current_patch_in_mm = new_shp * current_spacing
axis_to_be_reduced = np.argsort(current_patch_in_mm)[-1]
# from here on it's the same as before
tmp = deepcopy(new_shp)
tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced]
_, _, _, _, shape_must_be_divisible_by_new = \
get_pool_and_conv_props_poolLateV2(tmp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
new_shp[axis_to_be_reduced] -= shape_must_be_divisible_by_new[axis_to_be_reduced]
# we have to recompute numpool now:
network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, new_shp, \
shape_must_be_divisible_by = get_pool_and_conv_props_poolLateV2(new_shp,
self.unet_featuremap_min_edge_length,
self.unet_max_numpool,
current_spacing)
here = Generic_UNet.compute_approx_vram_consumption(new_shp, network_num_pool_per_axis,
self.unet_base_num_features,
self.unet_max_num_filters, num_modalities,
num_classes, pool_op_kernel_sizes,
conv_per_stage=self.conv_per_stage)
print(new_shp)
input_patch_size = new_shp
batch_size = Generic_UNet.DEFAULT_BATCH_SIZE_3D # This is what works with 128**3
batch_size = int(np.floor(max(ref / here, 1) * batch_size))
# check if batch size is too large
max_batch_size = np.round(self.batch_size_covers_max_percent_of_dataset * dataset_num_voxels /
np.prod(input_patch_size, dtype=np.int64)).astype(int)
max_batch_size = max(max_batch_size, self.unet_min_batch_size)
batch_size = min(batch_size, max_batch_size)
do_dummy_2D_data_aug = (max(input_patch_size) / input_patch_size[
0]) > self.anisotropy_threshold
plan = {
'batch_size': batch_size,
'num_pool_per_axis': network_num_pool_per_axis,
'patch_size': input_patch_size,
'median_patient_size_in_voxels': new_median_shape,
'current_spacing': current_spacing,
'original_spacing': original_spacing,
'do_dummy_2D_data_aug': do_dummy_2D_data_aug,
'pool_op_kernel_sizes': pool_op_kernel_sizes,
'conv_kernel_sizes': conv_kernel_sizes,
}
return plan
| 7,007
| 53.325581
| 147
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/alternative_experiment_planning/target_spacing/experiment_planner_baseline_3DUNet_targetSpacingForAnisoAxis.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nnunet.experiment_planning.experiment_planner_baseline_3DUNet import ExperimentPlanner
from nnunet.paths import *
class ExperimentPlannerTargetSpacingForAnisoAxis(ExperimentPlanner):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super().__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = "nnUNetData_targetSpacingForAnisoAxis"
self.plans_fname = join(self.preprocessed_output_folder,
"nnUNetPlans" + "targetSpacingForAnisoAxis_plans_3D.pkl")
def get_target_spacing(self):
"""
per default we use the 50th percentile=median for the target spacing. Higher spacing results in smaller data
and thus faster and easier training. Smaller spacing results in larger data and thus longer and harder training
For some datasets the median is not a good choice. Those are the datasets where the spacing is very anisotropic
(for example ACDC with (10, 1.5, 1.5)). These datasets still have examples with a pacing of 5 or 6 mm in the low
resolution axis. Choosing the median here will result in bad interpolation artifacts that can substantially
impact performance (due to the low number of slices).
"""
spacings = self.dataset_properties['all_spacings']
sizes = self.dataset_properties['all_sizes']
target = np.percentile(np.vstack(spacings), self.target_spacing_percentile, 0)
target_size = np.percentile(np.vstack(sizes), self.target_spacing_percentile, 0)
target_size_mm = np.array(target) * np.array(target_size)
# we need to identify datasets for which a different target spacing could be beneficial. These datasets have
# the following properties:
# - one axis which much lower resolution than the others
# - the lowres axis has much less voxels than the others
# - (the size in mm of the lowres axis is also reduced)
worst_spacing_axis = np.argmax(target)
other_axes = [i for i in range(len(target)) if i != worst_spacing_axis]
other_spacings = [target[i] for i in other_axes]
other_sizes = [target_size[i] for i in other_axes]
has_aniso_spacing = target[worst_spacing_axis] > (self.anisotropy_threshold * max(other_spacings))
has_aniso_voxels = target_size[worst_spacing_axis] * self.anisotropy_threshold < max(other_sizes)
# we don't use the last one for now
#median_size_in_mm = target[target_size_mm] * RESAMPLING_SEPARATE_Z_ANISOTROPY_THRESHOLD < max(target_size_mm)
if has_aniso_spacing and has_aniso_voxels:
spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis]
target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10)
target[worst_spacing_axis] = target_spacing_of_that_axis
return target
| 3,624
| 55.640625
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/experiment_planning/old/old_plan_and_preprocess_task.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.experiment_planning.utils import split_4d, crop, analyze_dataset, plan_and_preprocess
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import nnUNet_raw_data
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--task', type=str, help="task name. There must be a matching folder in "
"raw_dataset_dir", required=True)
parser.add_argument('-pl', '--processes_lowres', type=int, default=8, help='number of processes used for '
'preprocessing 3d_lowres data, image '
'splitting and image cropping '
'Default: 8. The distinction between '
'processes_lowres and processes_fullres '
'is necessary because preprocessing '
'at full resolution needs a lot of '
'RAM', required=False)
parser.add_argument('-pf', '--processes_fullres', type=int, default=8, help='number of processes used for '
'preprocessing 2d and 3d_fullres '
'data. Default: 3', required=False)
parser.add_argument('-o', '--override', type=int, default=0, help="set this to 1 if you want to override "
"cropped data and intensityproperties. Default: 0",
required=False)
parser.add_argument('-s', '--use_splitted', type=int, default=1, help='1 = use splitted data if already present ('
'skip split_4d). 0 = do splitting again. '
'It is save to set this to 1 at all times '
'unless the dataset was updated in the '
'meantime. Default: 1', required=False)
parser.add_argument('-no_preprocessing', type=int, default=0, help='debug only. If set to 1 this will run only'
'experiment planning and not run the '
'preprocessing')
args = parser.parse_args()
task = args.task
processes_lowres = args.processes_lowres
processes_fullres = args.processes_fullres
override = args.override
use_splitted = args.use_splitted
no_preprocessing = args.no_preprocessing
if override == 0:
override = False
elif override == 1:
override = True
else:
raise ValueError("only 0 or 1 allowed for override")
if no_preprocessing == 0:
no_preprocessing = False
elif no_preprocessing == 1:
no_preprocessing = True
else:
raise ValueError("only 0 or 1 allowed for override")
if use_splitted == 0:
use_splitted = False
elif use_splitted == 1:
use_splitted = True
else:
raise ValueError("only 0 or 1 allowed for use_splitted")
if task == "all":
all_tasks = subdirs(nnUNet_raw_data, prefix="Task", join=False)
for t in all_tasks:
crop(t, override=override, num_threads=processes_lowres)
analyze_dataset(t, override=override, collect_intensityproperties=True, num_processes=processes_lowres)
plan_and_preprocess(t, processes_lowres, processes_fullres, no_preprocessing)
else:
if not use_splitted or not isdir(join(nnUNet_raw_data, task)):
print("splitting task ", task)
split_4d(task)
crop(task, override=override, num_threads=processes_lowres)
analyze_dataset(task, override, collect_intensityproperties=True, num_processes=processes_lowres)
plan_and_preprocess(task, processes_lowres, processes_fullres, no_preprocessing)
| 5,224
| 57.055556
| 121
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/inference/segmentation_export.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from copy import deepcopy
from typing import Union, Tuple
import numpy as np
import SimpleITK as sitk
from batchgenerators.augmentations.utils import resize_segmentation
from nnunet.preprocessing.preprocessing import get_lowres_axis, get_do_separate_z, resample_data_or_seg
from batchgenerators.utilities.file_and_folder_operations import *
def save_segmentation_nifti_from_softmax(segmentation_softmax: Union[str, np.ndarray], out_fname: str,
properties_dict: dict, order: int = 1,
region_class_order: Tuple[Tuple[int]] = None,
seg_postprogess_fn: callable = None, seg_postprocess_args: tuple = None,
resampled_npz_fname: str = None,
non_postprocessed_fname: str = None, force_separate_z: bool = None,
interpolation_order_z: int = 0, verbose: bool = True):
"""
This is a utility for writing segmentations to nifto and npz. It requires the data to have been preprocessed by
GenericPreprocessor because it depends on the property dictionary output (dct) to know the geometry of the original
data. segmentation_softmax does not have to have the same size in pixels as the original data, it will be
resampled to match that. This is generally useful because the spacings our networks operate on are most of the time
not the native spacings of the image data.
If seg_postprogess_fn is not None then seg_postprogess_fnseg_postprogess_fn(segmentation, *seg_postprocess_args)
will be called before nifto export
There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code.) We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray for segmentation_softmax and will handle this automatically
:param segmentation_softmax:
:param out_fname:
:param properties_dict:
:param order:
:param region_class_order:
:param seg_postprogess_fn:
:param seg_postprocess_args:
:param resampled_npz_fname:
:param non_postprocessed_fname:
:param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always
/never resample along z separately. Do not touch unless you know what you are doing
:param interpolation_order_z: if separate z resampling is done then this is the order for resampling in z
:param verbose:
:return:
"""
if verbose: print("force_separate_z:", force_separate_z, "interpolation order:", order)
if isinstance(segmentation_softmax, str):
assert isfile(segmentation_softmax), "If isinstance(segmentation_softmax, str) then " \
"isfile(segmentation_softmax) must be True"
del_file = deepcopy(segmentation_softmax)
segmentation_softmax = np.load(segmentation_softmax)
os.remove(del_file)
# first resample, then put result into bbox of cropping, then save
current_shape = segmentation_softmax.shape
shape_original_after_cropping = properties_dict.get('size_after_cropping')
shape_original_before_cropping = properties_dict.get('original_size_of_raw_data')
# current_spacing = dct.get('spacing_after_resampling')
# original_spacing = dct.get('original_spacing')
if np.any([i != j for i, j in zip(np.array(current_shape[1:]), np.array(shape_original_after_cropping))]):
if force_separate_z is None:
if get_do_separate_z(properties_dict.get('original_spacing')):
do_separate_z = True
lowres_axis = get_lowres_axis(properties_dict.get('original_spacing'))
elif get_do_separate_z(properties_dict.get('spacing_after_resampling')):
do_separate_z = True
lowres_axis = get_lowres_axis(properties_dict.get('spacing_after_resampling'))
else:
do_separate_z = False
lowres_axis = None
else:
do_separate_z = force_separate_z
if do_separate_z:
lowres_axis = get_lowres_axis(properties_dict.get('original_spacing'))
else:
lowres_axis = None
if verbose: print("separate z:", do_separate_z, "lowres axis", lowres_axis)
seg_old_spacing = resample_data_or_seg(segmentation_softmax, shape_original_after_cropping, is_seg=False,
axis=lowres_axis, order=order, do_separate_z=do_separate_z, cval=0,
order_z=interpolation_order_z)
# seg_old_spacing = resize_softmax_output(segmentation_softmax, shape_original_after_cropping, order=order)
else:
if verbose: print("no resampling necessary")
seg_old_spacing = segmentation_softmax
if resampled_npz_fname is not None:
np.savez_compressed(resampled_npz_fname, softmax=seg_old_spacing.astype(np.float16))
# this is needed for ensembling if the nonlinearity is sigmoid
if region_class_order is not None:
properties_dict['regions_class_order'] = region_class_order
save_pickle(properties_dict, resampled_npz_fname[:-4] + ".pkl")
if region_class_order is None:
seg_old_spacing = seg_old_spacing.argmax(0)
else:
seg_old_spacing_final = np.zeros(seg_old_spacing.shape[1:])
for i, c in enumerate(region_class_order):
seg_old_spacing_final[seg_old_spacing[i] > 0.5] = c
seg_old_spacing = seg_old_spacing_final
bbox = properties_dict.get('crop_bbox')
if bbox is not None:
seg_old_size = np.zeros(shape_original_before_cropping)
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + seg_old_spacing.shape[c], shape_original_before_cropping[c]))
seg_old_size[bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = seg_old_spacing
else:
seg_old_size = seg_old_spacing
if seg_postprogess_fn is not None:
seg_old_size_postprocessed = seg_postprogess_fn(np.copy(seg_old_size), *seg_postprocess_args)
else:
seg_old_size_postprocessed = seg_old_size
seg_resized_itk = sitk.GetImageFromArray(seg_old_size_postprocessed.astype(np.uint8))
seg_resized_itk.SetSpacing(properties_dict['itk_spacing'])
seg_resized_itk.SetOrigin(properties_dict['itk_origin'])
seg_resized_itk.SetDirection(properties_dict['itk_direction'])
sitk.WriteImage(seg_resized_itk, out_fname)
if (non_postprocessed_fname is not None) and (seg_postprogess_fn is not None):
seg_resized_itk = sitk.GetImageFromArray(seg_old_size.astype(np.uint8))
seg_resized_itk.SetSpacing(properties_dict['itk_spacing'])
seg_resized_itk.SetOrigin(properties_dict['itk_origin'])
seg_resized_itk.SetDirection(properties_dict['itk_direction'])
sitk.WriteImage(seg_resized_itk, non_postprocessed_fname)
def save_segmentation_nifti(segmentation, out_fname, dct, order=1, force_separate_z=None, order_z=0):
"""
faster and uses less ram than save_segmentation_nifti_from_softmax, but maybe less precise and also does not support
softmax export (which is needed for ensembling). So it's a niche function that may be useful in some cases.
:param segmentation:
:param out_fname:
:param dct:
:param order:
:param force_separate_z:
:return:
"""
# suppress output
print("force_separate_z:", force_separate_z, "interpolation order:", order)
sys.stdout = open(os.devnull, 'w')
if isinstance(segmentation, str):
assert isfile(segmentation), "If isinstance(segmentation_softmax, str) then " \
"isfile(segmentation_softmax) must be True"
del_file = deepcopy(segmentation)
segmentation = np.load(segmentation)
os.remove(del_file)
# first resample, then put result into bbox of cropping, then save
current_shape = segmentation.shape
shape_original_after_cropping = dct.get('size_after_cropping')
shape_original_before_cropping = dct.get('original_size_of_raw_data')
# current_spacing = dct.get('spacing_after_resampling')
# original_spacing = dct.get('original_spacing')
if np.any(np.array(current_shape) != np.array(shape_original_after_cropping)):
if order == 0:
seg_old_spacing = resize_segmentation(segmentation, shape_original_after_cropping, 0, 0)
else:
if force_separate_z is None:
if get_do_separate_z(dct.get('original_spacing')):
do_separate_z = True
lowres_axis = get_lowres_axis(dct.get('original_spacing'))
elif get_do_separate_z(dct.get('spacing_after_resampling')):
do_separate_z = True
lowres_axis = get_lowres_axis(dct.get('spacing_after_resampling'))
else:
do_separate_z = False
lowres_axis = None
else:
do_separate_z = force_separate_z
if do_separate_z:
lowres_axis = get_lowres_axis(dct.get('original_spacing'))
else:
lowres_axis = None
print("separate z:", do_separate_z, "lowres axis", lowres_axis)
seg_old_spacing = resample_data_or_seg(segmentation[None], shape_original_after_cropping, is_seg=True,
axis=lowres_axis, order=order, do_separate_z=do_separate_z, cval=0,
order_z=order_z)[0]
else:
seg_old_spacing = segmentation
bbox = dct.get('crop_bbox')
if bbox is not None:
seg_old_size = np.zeros(shape_original_before_cropping)
for c in range(3):
bbox[c][1] = np.min((bbox[c][0] + seg_old_spacing.shape[c], shape_original_before_cropping[c]))
seg_old_size[bbox[0][0]:bbox[0][1],
bbox[1][0]:bbox[1][1],
bbox[2][0]:bbox[2][1]] = seg_old_spacing
else:
seg_old_size = seg_old_spacing
seg_resized_itk = sitk.GetImageFromArray(seg_old_size.astype(np.uint8))
seg_resized_itk.SetSpacing(dct['itk_spacing'])
seg_resized_itk.SetOrigin(dct['itk_origin'])
seg_resized_itk.SetDirection(dct['itk_direction'])
sitk.WriteImage(seg_resized_itk, out_fname)
sys.stdout = sys.__stdout__
| 11,710
| 50.139738
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/inference/ensemble_predictions.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from copy import deepcopy
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
from multiprocessing import Pool
from nnunet.postprocessing.connected_components import apply_postprocessing_to_folder, load_postprocessing
def merge_files(files, properties_files, out_file, override, store_npz):
if override or not isfile(out_file):
softmax = [np.load(f)['softmax'][None] for f in files]
softmax = np.vstack(softmax)
softmax = np.mean(softmax, 0)
props = [load_pickle(f) for f in properties_files]
reg_class_orders = [p['regions_class_order'] if 'regions_class_order' in p.keys() else None
for p in props]
if not all([i is None for i in reg_class_orders]):
# if reg_class_orders are not None then they must be the same in all pkls
tmp = reg_class_orders[0]
for r in reg_class_orders[1:]:
assert tmp == r, 'If merging files with regions_class_order, the regions_class_orders of all ' \
'files must be the same. regions_class_order: %s, \n files: %s' % \
(str(reg_class_orders), str(files))
regions_class_order = tmp
else:
regions_class_order = None
# Softmax probabilities are already at target spacing so this will not do any resampling (resampling parameters
# don't matter here)
save_segmentation_nifti_from_softmax(softmax, out_file, props[0], 3, regions_class_order, None, None,
force_separate_z=None)
if store_npz:
np.savez_compressed(out_file[:-7] + ".npz", softmax=softmax)
save_pickle(props, out_file[:-7] + ".pkl")
def merge(folders, output_folder, threads, override=True, postprocessing_file=None, store_npz=False):
maybe_mkdir_p(output_folder)
if postprocessing_file is not None:
output_folder_orig = deepcopy(output_folder)
output_folder = join(output_folder, 'not_postprocessed')
maybe_mkdir_p(output_folder)
else:
output_folder_orig = None
patient_ids = [subfiles(i, suffix=".npz", join=False) for i in folders]
patient_ids = [i for j in patient_ids for i in j]
patient_ids = [i[:-4] for i in patient_ids]
patient_ids = np.unique(patient_ids)
for f in folders:
assert all([isfile(join(f, i + ".npz")) for i in patient_ids]), "Not all patient npz are available in " \
"all folders"
assert all([isfile(join(f, i + ".pkl")) for i in patient_ids]), "Not all patient pkl are available in " \
"all folders"
files = []
property_files = []
out_files = []
for p in patient_ids:
files.append([join(f, p + ".npz") for f in folders])
property_files.append([join(f, p + ".pkl") for f in folders])
out_files.append(join(output_folder, p + ".nii.gz"))
p = Pool(threads)
p.starmap(merge_files, zip(files, property_files, out_files, [override] * len(out_files), [store_npz] * len(out_files)))
p.close()
p.join()
if postprocessing_file is not None:
for_which_classes, min_valid_obj_size = load_postprocessing(postprocessing_file)
print('Postprocessing...')
apply_postprocessing_to_folder(output_folder, output_folder_orig,
for_which_classes, min_valid_obj_size, threads)
shutil.copy(postprocessing_file, output_folder_orig)
def main():
import argparse
parser = argparse.ArgumentParser(description="This script will merge predictions (that were prdicted with the "
"-npz option!). You need to specify a postprocessing file so that "
"we know here what postprocessing must be applied. Failing to do so "
"will disable postprocessing")
parser.add_argument('-f', '--folders', nargs='+', help="list of folders to merge. All folders must contain npz "
"files", required=True)
parser.add_argument('-o', '--output_folder', help="where to save the results", required=True, type=str)
parser.add_argument('-t', '--threads', help="number of threads used to saving niftis", required=False, default=2,
type=int)
parser.add_argument('-pp', '--postprocessing_file', help="path to the file where the postprocessing configuration "
"is stored. If this is not provided then no postprocessing "
"will be made. It is strongly recommended to provide the "
"postprocessing file!",
required=False, type=str, default=None)
parser.add_argument('--npz', action="store_true", required=False, help="stores npz and pkl")
args = parser.parse_args()
folders = args.folders
threads = args.threads
output_folder = args.output_folder
pp_file = args.postprocessing_file
npz = args.npz
merge(folders, output_folder, threads, override=True, postprocessing_file=pp_file, store_npz=npz)
if __name__ == "__main__":
main()
| 6,300
| 47.844961
| 124
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/inference/predict_simple.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from nnunet.inference.predict import predict_from_folder
from nnunet.paths import default_plans_identifier, network_training_output_dir, default_cascade_trainer, default_trainer
from batchgenerators.utilities.file_and_folder_operations import join, isdir
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", '--input_folder', help="Must contain all modalities for each patient in the correct"
" order (same as training). Files must be named "
"CASENAME_XXXX.nii.gz where XXXX is the modality "
"identifier (0000, 0001, etc)", required=True)
parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions")
parser.add_argument('-t', '--task_name', help='task name or task ID, required.',
default=default_plans_identifier, required=True)
parser.add_argument('-tr', '--trainer_class_name',
help='Name of the nnUNetTrainer used for 2D U-Net, full resolution 3D U-Net and low resolution '
'U-Net. The default is %s. If you are running inference with the cascade and the folder '
'pointed to by --lowres_segmentations does not contain the segmentation maps generated by '
'the low resolution U-Net then the low resolution segmentation maps will be automatically '
'generated. For this case, make sure to set the trainer class here that matches your '
'--cascade_trainer_class_name (this part can be ignored if defaults are used).'
% default_trainer,
required=False,
default=default_trainer)
parser.add_argument('-ctr', '--cascade_trainer_class_name',
help="Trainer class name used for predicting the 3D full resolution U-Net part of the cascade."
"Default is %s" % default_cascade_trainer, required=False,
default=default_cascade_trainer)
parser.add_argument('-m', '--model', help="2d, 3d_lowres, 3d_fullres or 3d_cascade_fullres. Default: 3d_fullres",
default="3d_fullres", required=False)
parser.add_argument('-p', '--plans_identifier', help='do not touch this unless you know what you are doing',
default=default_plans_identifier, required=False)
parser.add_argument('-f', '--folds', nargs='+', default='None',
help="folds to use for prediction. Default is None which means that folds will be detected "
"automatically in the model output folder")
parser.add_argument('-z', '--save_npz', required=False, action='store_true',
help="use this if you want to ensemble these predictions with those of other models. Softmax "
"probabilities will be saved as compressed numpy arrays in output_folder and can be "
"merged between output_folders with nnUNet_ensemble_predictions")
parser.add_argument('-l', '--lowres_segmentations', required=False, default='None',
help="if model is the highres stage of the cascade then you can use this folder to provide "
"predictions from the low resolution 3D U-Net. If this is left at default, the "
"predictions will be generated automatically (provided that the 3D low resolution U-Net "
"network weights are present")
parser.add_argument("--part_id", type=int, required=False, default=0, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (for example via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_parts", type=int, required=False, default=1,
help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, help=
"Determines many background processes will be used for data preprocessing. Reduce this if you "
"run into out of memory (RAM) problems. Default: 6")
parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, help=
"Determines many background processes will be used for segmentation export. Reduce this if you "
"run into out of memory (RAM) problems. Default: 2")
parser.add_argument("--disable_tta", required=False, default=False, action="store_true",
help="set this flag to disable test time data augmentation via mirroring. Speeds up inference "
"by roughly factor 4 (2D) or 8 (3D)")
parser.add_argument("--overwrite_existing", required=False, default=False, action="store_true",
help="Set this flag if the target folder contains predictions that you would like to overwrite")
parser.add_argument("--mode", type=str, default="normal", required=False, help="Hands off!")
parser.add_argument("--all_in_gpu", type=str, default="None", required=False, help="can be None, False or True. "
"Do not touch.")
parser.add_argument("--step_size", type=float, default=0.5, required=False, help="don't touch")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations, has no effect if mode=fastest. Do not touch this.")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z is z is done differently. Do not touch this.")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False, has no effect if mode=fastest. "
# "Do not touch this.")
parser.add_argument('-chk',
help='checkpoint name, default: model_final_checkpoint',
required=False,
default='model_final_checkpoint')
parser.add_argument('--disable_mixed_precision', default=False, action='store_true', required=False,
help='Predictions are done with mixed precision by default. This improves speed and reduces '
'the required vram. If you want to disable mixed precision you can set this flag. Note '
'that yhis is not recommended (mixed precision is ~2x faster!)')
args = parser.parse_args()
input_folder = args.input_folder
output_folder = args.output_folder
part_id = args.part_id
num_parts = args.num_parts
folds = args.folds
save_npz = args.save_npz
lowres_segmentations = args.lowres_segmentations
num_threads_preprocessing = args.num_threads_preprocessing
num_threads_nifti_save = args.num_threads_nifti_save
disable_tta = args.disable_tta
step_size = args.step_size
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
overwrite_existing = args.overwrite_existing
mode = args.mode
all_in_gpu = args.all_in_gpu
model = args.model
trainer_class_name = args.trainer_class_name
cascade_trainer_class_name = args.cascade_trainer_class_name
task_name = args.task_name
if not task_name.startswith("Task"):
task_id = int(task_name)
task_name = convert_id_to_task_name(task_id)
assert model in ["2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"], "-m must be 2d, 3d_lowres, 3d_fullres or " \
"3d_cascade_fullres"
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
if lowres_segmentations == "None":
lowres_segmentations = None
if isinstance(folds, list):
if folds[0] == 'all' and len(folds) == 1:
pass
else:
folds = [int(i) for i in folds]
elif folds == "None":
folds = None
else:
raise ValueError("Unexpected value for argument folds")
assert all_in_gpu in ['None', 'False', 'True']
if all_in_gpu == "None":
all_in_gpu = None
elif all_in_gpu == "True":
all_in_gpu = True
elif all_in_gpu == "False":
all_in_gpu = False
# we need to catch the case where model is 3d cascade fullres and the low resolution folder has not been set.
# In that case we need to try and predict with 3d low res first
if model == "3d_cascade_fullres" and lowres_segmentations is None:
print("lowres_segmentations is None. Attempting to predict 3d_lowres first...")
assert part_id == 0 and num_parts == 1, "if you don't specify a --lowres_segmentations folder for the " \
"inference of the cascade, custom values for part_id and num_parts " \
"are not supported. If you wish to have multiple parts, please " \
"run the 3d_lowres inference first (separately)"
model_folder_name = join(network_training_output_dir, "3d_lowres", task_name, trainer_class_name + "__" +
args.plans_identifier)
assert isdir(model_folder_name), "model output folder not found. Expected: %s" % model_folder_name
lowres_output_folder = join(output_folder, "3d_lowres_predictions")
predict_from_folder(model_folder_name, input_folder, lowres_output_folder, folds, False,
num_threads_preprocessing, num_threads_nifti_save, None, part_id, num_parts, not disable_tta,
overwrite_existing=overwrite_existing, mode=mode, overwrite_all_in_gpu=all_in_gpu,
mixed_precision=not args.disable_mixed_precision,
step_size=step_size)
lowres_segmentations = lowres_output_folder
torch.cuda.empty_cache()
print("3d_lowres done")
if model == "3d_cascade_fullres":
trainer = cascade_trainer_class_name
else:
trainer = trainer_class_name
model_folder_name = join(network_training_output_dir, model, task_name, trainer + "__" +
args.plans_identifier)
print("using model stored in ", model_folder_name)
assert isdir(model_folder_name), "model output folder not found. Expected: %s" % model_folder_name
predict_from_folder(model_folder_name, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, not disable_tta,
overwrite_existing=overwrite_existing, mode=mode, overwrite_all_in_gpu=all_in_gpu,
mixed_precision=not args.disable_mixed_precision,
step_size=step_size, checkpoint_name=args.chk)
if __name__ == "__main__":
main()
| 13,593
| 59.150442
| 125
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/inference/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/inference/change_trainer.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from batchgenerators.utilities.file_and_folder_operations import *
def pretend_to_be_nnUNetTrainer(folder, checkpoints=("model_best.model.pkl", "model_latest.model.pkl", "model_final_checkpoint.model.pkl")):
pretend_to_be_other_trainer(folder, "nnUNetTrainer", checkpoints)
def pretend_to_be_other_trainer(folder, new_trainer_name, checkpoints=("model_best.model.pkl", "model_latest.model.pkl", "model_final_checkpoint.model.pkl")):
folds = subdirs(folder, prefix="fold_", join=False)
if isdir(join(folder, 'all')):
folds.append('all')
for c in checkpoints:
for f in folds:
checkpoint_file = join(folder, f, c)
if isfile(checkpoint_file):
a = load_pickle(checkpoint_file)
a['name'] = new_trainer_name
save_pickle(a, checkpoint_file)
def main():
import argparse
parser = argparse.ArgumentParser(description='Use this script to change the nnunet trainer class of a saved '
'model. Useful for models that were trained with trainers that do '
'not support inference (multi GPU trainers) or for trainer classes '
'whose source code is not available. For this to work the network '
'architecture must be identical between the original trainer '
'class and the trainer class we are changing to. This script is '
'experimental and only to be used by advanced users.')
parser.add_argument('-i', help='Folder containing the trained model. This folder is the one containing the '
'fold_X subfolders.')
parser.add_argument('-tr', help='Name of the new trainer class')
args = parser.parse_args()
pretend_to_be_other_trainer(args.i, args.tr)
| 2,683
| 50.615385
| 158
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/inference/predict.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from copy import deepcopy
from typing import Tuple, Union, List
import numpy as np
from batchgenerators.augmentations.utils import resize_segmentation
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax, save_segmentation_nifti
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Process, Queue
import torch
import SimpleITK as sitk
import shutil
from multiprocessing import Pool
from nnunet.postprocessing.connected_components import load_remove_save, load_postprocessing
from nnunet.training.model_restore import load_model_and_checkpoint_files
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.one_hot_encoding import to_one_hot
def preprocess_save_to_queue(preprocess_fn, q, list_of_lists, output_files, segs_from_prev_stage, classes,
transpose_forward):
# suppress output
# sys.stdout = open(os.devnull, 'w')
errors_in = []
for i, l in enumerate(list_of_lists):
try:
output_file = output_files[i]
print("preprocessing", output_file)
d, _, dct = preprocess_fn(l)
# print(output_file, dct)
if segs_from_prev_stage[i] is not None:
assert isfile(segs_from_prev_stage[i]) and segs_from_prev_stage[i].endswith(
".nii.gz"), "segs_from_prev_stage" \
" must point to a " \
"segmentation file"
seg_prev = sitk.GetArrayFromImage(sitk.ReadImage(segs_from_prev_stage[i]))
# check to see if shapes match
img = sitk.GetArrayFromImage(sitk.ReadImage(l[0]))
assert all([i == j for i, j in zip(seg_prev.shape, img.shape)]), "image and segmentation from previous " \
"stage don't have the same pixel array " \
"shape! image: %s, seg_prev: %s" % \
(l[0], segs_from_prev_stage[i])
seg_prev = seg_prev.transpose(transpose_forward)
seg_reshaped = resize_segmentation(seg_prev, d.shape[1:], order=1, cval=0)
seg_reshaped = to_one_hot(seg_reshaped, classes)
d = np.vstack((d, seg_reshaped)).astype(np.float32)
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
print(d.shape)
if np.prod(d.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save, 4 because float32 is 4 bytes
print(
"This output is too large for python process-process communication. "
"Saving output temporarily to disk")
np.save(output_file[:-7] + ".npy", d)
d = output_file[:-7] + ".npy"
q.put((output_file, (d, dct)))
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception as e:
print("error in", l)
print(e)
q.put("end")
if len(errors_in) > 0:
print("There were some errors in the following cases:", errors_in)
print("These cases were ignored.")
else:
print("This worker has ended successfully, no errors to report")
# restore output
# sys.stdout = sys.__stdout__
def preprocess_multithreaded(trainer, list_of_lists, output_files, num_processes=2, segs_from_prev_stage=None):
if segs_from_prev_stage is None:
segs_from_prev_stage = [None] * len(list_of_lists)
num_processes = min(len(list_of_lists), num_processes)
classes = list(range(1, trainer.num_classes))
assert isinstance(trainer, nnUNetTrainer)
q = Queue(1)
processes = []
for i in range(num_processes):
pr = Process(target=preprocess_save_to_queue, args=(trainer.preprocess_patient, q,
list_of_lists[i::num_processes],
output_files[i::num_processes],
segs_from_prev_stage[i::num_processes],
classes, trainer.plans['transpose_forward']))
pr.start()
processes.append(pr)
try:
end_ctr = 0
while end_ctr != num_processes:
item = q.get()
if item == "end":
end_ctr += 1
continue
else:
yield item
finally:
for p in processes:
if p.is_alive():
p.terminate() # this should not happen but better safe than sorry right
p.join()
q.close()
def predict_cases(model, list_of_lists, output_filenames, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True, mixed_precision=True, overwrite_existing=False,
all_in_gpu=False, step_size=0.5, checkpoint_name="model_final_checkpoint",
segmentation_export_kwargs: dict = None):
"""
:param segmentation_export_kwargs:
:param model: folder where the model is saved, must contain fold_x subfolders
:param list_of_lists: [[case0_0000.nii.gz, case0_0001.nii.gz], [case1_0000.nii.gz, case1_0001.nii.gz], ...]
:param output_filenames: [output_file_case0.nii.gz, output_file_case1.nii.gz, ...]
:param folds: default: (0, 1, 2, 3, 4) (but can also be 'all' or a subset of the five folds, for example use (0, )
for using only fold_0
:param save_npz: default: False
:param num_threads_preprocessing:
:param num_threads_nifti_save:
:param segs_from_prev_stage:
:param do_tta: default: True, can be set to False for a 8x speedup at the cost of a reduced segmentation quality
:param overwrite_existing: default: True
:param mixed_precision: if None then we take no action. If True/False we overwrite what the model has in its init
:return:
"""
assert len(list_of_lists) == len(output_filenames)
if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames)
pool = Pool(num_threads_nifti_save)
results = []
cleaned_output_files = []
for o in output_filenames:
dr, f = os.path.split(o)
if len(dr) > 0:
maybe_mkdir_p(dr)
if not f.endswith(".nii.gz"):
f, _ = os.path.splitext(f)
f = f + ".nii.gz"
cleaned_output_files.append(join(dr, f))
if not overwrite_existing:
print("number of cases:", len(list_of_lists))
not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)]
cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx]
list_of_lists = [list_of_lists[i] for i in not_done_idx]
if segs_from_prev_stage is not None:
segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx]
print("number of cases that still need to be predicted:", len(cleaned_output_files))
print("emptying cuda cache")
torch.cuda.empty_cache()
print("loading parameters for folds,", folds)
trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=mixed_precision, checkpoint_name=checkpoint_name)
if segmentation_export_kwargs is None:
if 'segmentation_export_params' in trainer.plans.keys():
force_separate_z = trainer.plans['segmentation_export_params']['force_separate_z']
interpolation_order = trainer.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = trainer.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
else:
force_separate_z = segmentation_export_kwargs['force_separate_z']
interpolation_order = segmentation_export_kwargs['interpolation_order']
interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing,
segs_from_prev_stage)
print("starting prediction...")
all_output_files = []
for preprocessed in preprocessing:
output_filename, (d, dct) = preprocessed
all_output_files.append(all_output_files)
if isinstance(d, str):
data = np.load(d)
os.remove(d)
d = data
print("predicting", output_filename)
softmax = []
for p in params:
trainer.load_checkpoint_ram(p, False)
softmax.append(trainer.predict_preprocessed_data_return_seg_and_softmax(
d, do_mirroring=do_tta, mirror_axes=trainer.data_aug_params['mirror_axes'], use_sliding_window=True,
step_size=step_size, use_gaussian=True, all_in_gpu=all_in_gpu,
mixed_precision=mixed_precision)[1][None])
softmax = np.vstack(softmax)
softmax_mean = np.mean(softmax, 0)
transpose_forward = trainer.plans.get('transpose_forward')
if transpose_forward is not None:
transpose_backward = trainer.plans.get('transpose_backward')
softmax_mean = softmax_mean.transpose([0] + [i + 1 for i in transpose_backward])
if save_npz:
npz_file = output_filename[:-7] + ".npz"
else:
npz_file = None
if hasattr(trainer, 'regions_class_order'):
region_class_order = trainer.regions_class_order
else:
region_class_order = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
bytes_per_voxel = 4
if all_in_gpu:
bytes_per_voxel = 2 # if all_in_gpu then the return value is half (float16)
if np.prod(softmax_mean.shape) > (2e9 / bytes_per_voxel * 0.85): # * 0.85 just to be save
print(
"This output is too large for python process-process communication. Saving output temporarily to disk")
np.save(output_filename[:-7] + ".npy", softmax_mean)
softmax_mean = output_filename[:-7] + ".npy"
results.append(pool.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_mean, output_filename, dct, interpolation_order, region_class_order,
None, None,
npz_file, None, force_separate_z, interpolation_order_z),)
))
print("inference done. Now waiting for the segmentation export to finish...")
_ = [i.get() for i in results]
# now apply postprocessing
# first load the postprocessing properties if they are present. Else raise a well visible warning
results = []
pp_file = join(model, "postprocessing.json")
if isfile(pp_file):
print("postprocessing...")
shutil.copy(pp_file, os.path.abspath(os.path.dirname(output_filenames[0])))
# for_which_classes stores for which of the classes everything but the largest connected component needs to be
# removed
for_which_classes, min_valid_obj_size = load_postprocessing(pp_file)
results.append(pool.starmap_async(load_remove_save,
zip(output_filenames, output_filenames,
[for_which_classes] * len(output_filenames),
[min_valid_obj_size] * len(output_filenames))))
_ = [i.get() for i in results]
else:
print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run "
"consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is "
"%s" % model)
pool.close()
pool.join()
def predict_cases_fast(model, list_of_lists, output_filenames, folds, num_threads_preprocessing,
num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True, mixed_precision=True,
overwrite_existing=False,
all_in_gpu=False, step_size=0.5, checkpoint_name="model_final_checkpoint",
segmentation_export_kwargs: dict = None):
assert len(list_of_lists) == len(output_filenames)
if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames)
pool = Pool(num_threads_nifti_save)
results = []
cleaned_output_files = []
for o in output_filenames:
dr, f = os.path.split(o)
if len(dr) > 0:
maybe_mkdir_p(dr)
if not f.endswith(".nii.gz"):
f, _ = os.path.splitext(f)
f = f + ".nii.gz"
cleaned_output_files.append(join(dr, f))
if not overwrite_existing:
print("number of cases:", len(list_of_lists))
not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)]
cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx]
list_of_lists = [list_of_lists[i] for i in not_done_idx]
if segs_from_prev_stage is not None:
segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx]
print("number of cases that still need to be predicted:", len(cleaned_output_files))
print("emptying cuda cache")
torch.cuda.empty_cache()
print("loading parameters for folds,", folds)
trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=mixed_precision, checkpoint_name=checkpoint_name)
if segmentation_export_kwargs is None:
if 'segmentation_export_params' in trainer.plans.keys():
force_separate_z = trainer.plans['segmentation_export_params']['force_separate_z']
interpolation_order = trainer.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = trainer.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
else:
force_separate_z = segmentation_export_kwargs['force_separate_z']
interpolation_order = segmentation_export_kwargs['interpolation_order']
interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing,
segs_from_prev_stage)
print("starting prediction...")
for preprocessed in preprocessing:
print("getting data from preprocessor")
output_filename, (d, dct) = preprocessed
print("got something")
if isinstance(d, str):
print("what I got is a string, so I need to load a file")
data = np.load(d)
os.remove(d)
d = data
# preallocate the output arrays
# same dtype as the return value in predict_preprocessed_data_return_seg_and_softmax (saves time)
softmax_aggr = None # np.zeros((trainer.num_classes, *d.shape[1:]), dtype=np.float16)
all_seg_outputs = np.zeros((len(params), *d.shape[1:]), dtype=int)
print("predicting", output_filename)
for i, p in enumerate(params):
trainer.load_checkpoint_ram(p, False)
res = trainer.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=do_tta,
mirror_axes=trainer.data_aug_params['mirror_axes'],
use_sliding_window=True,
step_size=step_size, use_gaussian=True,
all_in_gpu=all_in_gpu,
mixed_precision=mixed_precision)
if len(params) > 1:
# otherwise we dont need this and we can save ourselves the time it takes to copy that
print("aggregating softmax")
if softmax_aggr is None:
softmax_aggr = res[1]
else:
softmax_aggr += res[1]
all_seg_outputs[i] = res[0]
print("obtaining segmentation map")
if len(params) > 1:
# we dont need to normalize the softmax by 1 / len(params) because this would not change the outcome of the argmax
seg = softmax_aggr.argmax(0)
else:
seg = all_seg_outputs[0]
print("applying transpose_backward")
transpose_forward = trainer.plans.get('transpose_forward')
if transpose_forward is not None:
transpose_backward = trainer.plans.get('transpose_backward')
seg = seg.transpose([i for i in transpose_backward])
print("initializing segmentation export")
results.append(pool.starmap_async(save_segmentation_nifti,
((seg, output_filename, dct, interpolation_order, force_separate_z,
interpolation_order_z),)
))
print("done")
print("inference done. Now waiting for the segmentation export to finish...")
_ = [i.get() for i in results]
# now apply postprocessing
# first load the postprocessing properties if they are present. Else raise a well visible warning
results = []
pp_file = join(model, "postprocessing.json")
if isfile(pp_file):
print("postprocessing...")
shutil.copy(pp_file, os.path.dirname(output_filenames[0]))
# for_which_classes stores for which of the classes everything but the largest connected component needs to be
# removed
for_which_classes, min_valid_obj_size = load_postprocessing(pp_file)
results.append(pool.starmap_async(load_remove_save,
zip(output_filenames, output_filenames,
[for_which_classes] * len(output_filenames),
[min_valid_obj_size] * len(output_filenames))))
_ = [i.get() for i in results]
else:
print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run "
"consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is "
"%s" % model)
pool.close()
pool.join()
def predict_cases_fastest(model, list_of_lists, output_filenames, folds, num_threads_preprocessing,
num_threads_nifti_save, segs_from_prev_stage=None, do_tta=True, mixed_precision=True,
overwrite_existing=False, all_in_gpu=True, step_size=0.5,
checkpoint_name="model_final_checkpoint"):
assert len(list_of_lists) == len(output_filenames)
if segs_from_prev_stage is not None: assert len(segs_from_prev_stage) == len(output_filenames)
pool = Pool(num_threads_nifti_save)
results = []
cleaned_output_files = []
for o in output_filenames:
dr, f = os.path.split(o)
if len(dr) > 0:
maybe_mkdir_p(dr)
if not f.endswith(".nii.gz"):
f, _ = os.path.splitext(f)
f = f + ".nii.gz"
cleaned_output_files.append(join(dr, f))
if not overwrite_existing:
print("number of cases:", len(list_of_lists))
not_done_idx = [i for i, j in enumerate(cleaned_output_files) if not isfile(j)]
cleaned_output_files = [cleaned_output_files[i] for i in not_done_idx]
list_of_lists = [list_of_lists[i] for i in not_done_idx]
if segs_from_prev_stage is not None:
segs_from_prev_stage = [segs_from_prev_stage[i] for i in not_done_idx]
print("number of cases that still need to be predicted:", len(cleaned_output_files))
print("emptying cuda cache")
torch.cuda.empty_cache()
print("loading parameters for folds,", folds)
trainer, params = load_model_and_checkpoint_files(model, folds, mixed_precision=mixed_precision, checkpoint_name=checkpoint_name)
print("starting preprocessing generator")
preprocessing = preprocess_multithreaded(trainer, list_of_lists, cleaned_output_files, num_threads_preprocessing,
segs_from_prev_stage)
print("starting prediction...")
for preprocessed in preprocessing:
print("getting data from preprocessor")
output_filename, (d, dct) = preprocessed
print("got something")
if isinstance(d, str):
print("what I got is a string, so I need to load a file")
data = np.load(d)
os.remove(d)
d = data
# preallocate the output arrays
# same dtype as the return value in predict_preprocessed_data_return_seg_and_softmax (saves time)
all_softmax_outputs = np.zeros((len(params), trainer.num_classes, *d.shape[1:]), dtype=np.float16)
all_seg_outputs = np.zeros((len(params), *d.shape[1:]), dtype=int)
print("predicting", output_filename)
for i, p in enumerate(params):
trainer.load_checkpoint_ram(p, False)
res = trainer.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=do_tta,
mirror_axes=trainer.data_aug_params['mirror_axes'],
use_sliding_window=True,
step_size=step_size, use_gaussian=True,
all_in_gpu=all_in_gpu,
mixed_precision=mixed_precision)
if len(params) > 1:
# otherwise we dont need this and we can save ourselves the time it takes to copy that
all_softmax_outputs[i] = res[1]
all_seg_outputs[i] = res[0]
print("aggregating predictions")
if len(params) > 1:
softmax_mean = np.mean(all_softmax_outputs, 0)
seg = softmax_mean.argmax(0)
else:
seg = all_seg_outputs[0]
print("applying transpose_backward")
transpose_forward = trainer.plans.get('transpose_forward')
if transpose_forward is not None:
transpose_backward = trainer.plans.get('transpose_backward')
seg = seg.transpose([i for i in transpose_backward])
print("initializing segmentation export")
results.append(pool.starmap_async(save_segmentation_nifti,
((seg, output_filename, dct, 0, None),)
))
print("done")
print("inference done. Now waiting for the segmentation export to finish...")
_ = [i.get() for i in results]
# now apply postprocessing
# first load the postprocessing properties if they are present. Else raise a well visible warning
results = []
pp_file = join(model, "postprocessing.json")
if isfile(pp_file):
print("postprocessing...")
shutil.copy(pp_file, os.path.dirname(output_filenames[0]))
# for_which_classes stores for which of the classes everything but the largest connected component needs to be
# removed
for_which_classes, min_valid_obj_size = load_postprocessing(pp_file)
results.append(pool.starmap_async(load_remove_save,
zip(output_filenames, output_filenames,
[for_which_classes] * len(output_filenames),
[min_valid_obj_size] * len(output_filenames))))
_ = [i.get() for i in results]
else:
print("WARNING! Cannot run postprocessing because the postprocessing file is missing. Make sure to run "
"consolidate_folds in the output folder of the model first!\nThe folder you need to run this in is "
"%s" % model)
pool.close()
pool.join()
def check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities):
print("This model expects %d input modalities for each image" % expected_num_modalities)
files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True)
maybe_case_ids = np.unique([i[:-12] for i in files])
remaining = deepcopy(files)
missing = []
assert len(files) > 0, "input folder did not contain any images (expected to find .nii.gz file endings)"
# now check if all required files are present and that no unexpected files are remaining
for c in maybe_case_ids:
for n in range(expected_num_modalities):
expected_output_file = c + "_%04.0d.nii.gz" % n
if not isfile(join(input_folder, expected_output_file)):
missing.append(expected_output_file)
else:
remaining.remove(expected_output_file)
print("Found %d unique case ids, here are some examples:" % len(maybe_case_ids),
np.random.choice(maybe_case_ids, min(len(maybe_case_ids), 10)))
print("If they don't look right, make sure to double check your filenames. They must end with _0000.nii.gz etc")
if len(remaining) > 0:
print("found %d unexpected remaining files in the folder. Here are some examples:" % len(remaining),
np.random.choice(remaining, min(len(remaining), 10)))
if len(missing) > 0:
print("Some files are missing:")
print(missing)
raise RuntimeError("missing files in input_folder")
return maybe_case_ids
def predict_from_folder(model: str, input_folder: str, output_folder: str, folds: Union[Tuple[int], List[int]],
save_npz: bool, num_threads_preprocessing: int, num_threads_nifti_save: int,
lowres_segmentations: Union[str, None],
part_id: int, num_parts: int, tta: bool, mixed_precision: bool = True,
overwrite_existing: bool = True, mode: str = 'normal', overwrite_all_in_gpu: bool = None,
step_size: float = 0.5, checkpoint_name: str = "model_final_checkpoint",
segmentation_export_kwargs: dict = None):
"""
here we use the standard naming scheme to generate list_of_lists and output_files needed by predict_cases
:param model:
:param input_folder:
:param output_folder:
:param folds:
:param save_npz:
:param num_threads_preprocessing:
:param num_threads_nifti_save:
:param lowres_segmentations:
:param part_id:
:param num_parts:
:param tta:
:param mixed_precision:
:param overwrite_existing: if not None then it will be overwritten with whatever is in there. None is default (no overwrite)
:return:
"""
maybe_mkdir_p(output_folder)
shutil.copy(join(model, 'plans.pkl'), output_folder)
assert isfile(join(model, "plans.pkl")), "Folder with saved model weights must contain a plans.pkl file"
expected_num_modalities = load_pickle(join(model, "plans.pkl"))['num_modalities']
# check input folder integrity
case_ids = check_input_folder_and_return_caseIDs(input_folder, expected_num_modalities)
output_files = [join(output_folder, i + ".nii.gz") for i in case_ids]
all_files = subfiles(input_folder, suffix=".nii.gz", join=False, sort=True)
list_of_lists = [[join(input_folder, i) for i in all_files if i[:len(j)].startswith(j) and
len(i) == (len(j) + 12)] for j in case_ids]
if lowres_segmentations is not None:
assert isdir(lowres_segmentations), "if lowres_segmentations is not None then it must point to a directory"
lowres_segmentations = [join(lowres_segmentations, i + ".nii.gz") for i in case_ids]
assert all([isfile(i) for i in lowres_segmentations]), "not all lowres_segmentations files are present. " \
"(I was searching for case_id.nii.gz in that folder)"
lowres_segmentations = lowres_segmentations[part_id::num_parts]
else:
lowres_segmentations = None
if mode == "normal":
if overwrite_all_in_gpu is None:
all_in_gpu = False
else:
all_in_gpu = overwrite_all_in_gpu
return predict_cases(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds,
save_npz, num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations, tta,
mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu,
step_size=step_size, checkpoint_name=checkpoint_name,
segmentation_export_kwargs=segmentation_export_kwargs)
elif mode == "fast":
if overwrite_all_in_gpu is None:
all_in_gpu = True
else:
all_in_gpu = overwrite_all_in_gpu
assert save_npz is False
return predict_cases_fast(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds,
num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations,
tta, mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu,
step_size=step_size, checkpoint_name=checkpoint_name,
segmentation_export_kwargs=segmentation_export_kwargs)
elif mode == "fastest":
if overwrite_all_in_gpu is None:
all_in_gpu = True
else:
all_in_gpu = overwrite_all_in_gpu
assert save_npz is False
return predict_cases_fastest(model, list_of_lists[part_id::num_parts], output_files[part_id::num_parts], folds,
num_threads_preprocessing, num_threads_nifti_save, lowres_segmentations,
tta, mixed_precision=mixed_precision, overwrite_existing=overwrite_existing, all_in_gpu=all_in_gpu,
step_size=step_size, checkpoint_name=checkpoint_name)
else:
raise ValueError("unrecognized mode. Must be normal, fast or fastest")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", '--input_folder', help="Must contain all modalities for each patient in the correct"
" order (same as training). Files must be named "
"CASENAME_XXXX.nii.gz where XXXX is the modality "
"identifier (0000, 0001, etc)", required=True)
parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions")
parser.add_argument('-m', '--model_output_folder',
help='model output folder. Will automatically discover the folds '
'that were '
'run and use those as an ensemble', required=True)
parser.add_argument('-f', '--folds', nargs='+', default='None', help="folds to use for prediction. Default is None "
"which means that folds will be detected "
"automatically in the model output folder")
parser.add_argument('-z', '--save_npz', required=False, action='store_true', help="use this if you want to ensemble"
" these predictions with those of"
" other models. Softmax "
"probabilities will be saved as "
"compresed numpy arrays in "
"output_folder and can be merged "
"between output_folders with "
"merge_predictions.py")
parser.add_argument('-l', '--lowres_segmentations', required=False, default='None', help="if model is the highres "
"stage of the cascade then you need to use -l to specify where the segmentations of the "
"corresponding lowres unet are. Here they are required to do a prediction")
parser.add_argument("--part_id", type=int, required=False, default=0, help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (for example via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_parts", type=int, required=False, default=1,
help="Used to parallelize the prediction of "
"the folder over several GPUs. If you "
"want to use n GPUs to predict this "
"folder you need to run this command "
"n times with --part_id=0, ... n-1 and "
"--num_parts=n (each with a different "
"GPU (via "
"CUDA_VISIBLE_DEVICES=X)")
parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, help=
"Determines many background processes will be used for data preprocessing. Reduce this if you "
"run into out of memory (RAM) problems. Default: 6")
parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, help=
"Determines many background processes will be used for segmentation export. Reduce this if you "
"run into out of memory (RAM) problems. Default: 2")
parser.add_argument("--tta", required=False, type=int, default=1, help="Set to 0 to disable test time data "
"augmentation (speedup of factor "
"4(2D)/8(3D)), "
"lower quality segmentations")
parser.add_argument("--overwrite_existing", required=False, type=int, default=1, help="Set this to 0 if you need "
"to resume a previous "
"prediction. Default: 1 "
"(=existing segmentations "
"in output_folder will be "
"overwritten)")
parser.add_argument("--mode", type=str, default="normal", required=False)
parser.add_argument("--all_in_gpu", type=str, default="None", required=False, help="can be None, False or True")
parser.add_argument("--step_size", type=float, default=0.5, required=False, help="don't touch")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations, has no effect if mode=fastest")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z is z is done differently")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False, has no effect if mode=fastest")
parser.add_argument('--disable_mixed_precision', default=False, action='store_true', required=False,
help='Predictions are done with mixed precision by default. This improves speed and reduces '
'the required vram. If you want to disable mixed precision you can set this flag. Note '
'that yhis is not recommended (mixed precision is ~2x faster!)')
args = parser.parse_args()
input_folder = args.input_folder
output_folder = args.output_folder
part_id = args.part_id
num_parts = args.num_parts
model = args.model_output_folder
folds = args.folds
save_npz = args.save_npz
lowres_segmentations = args.lowres_segmentations
num_threads_preprocessing = args.num_threads_preprocessing
num_threads_nifti_save = args.num_threads_nifti_save
tta = args.tta
step_size = args.step_size
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
overwrite = args.overwrite_existing
mode = args.mode
all_in_gpu = args.all_in_gpu
if lowres_segmentations == "None":
lowres_segmentations = None
if isinstance(folds, list):
if folds[0] == 'all' and len(folds) == 1:
pass
else:
folds = [int(i) for i in folds]
elif folds == "None":
folds = None
else:
raise ValueError("Unexpected value for argument folds")
if tta == 0:
tta = False
elif tta == 1:
tta = True
else:
raise ValueError("Unexpected value for tta, Use 1 or 0")
if overwrite == 0:
overwrite = False
elif overwrite == 1:
overwrite = True
else:
raise ValueError("Unexpected value for overwrite, Use 1 or 0")
assert all_in_gpu in ['None', 'False', 'True']
if all_in_gpu == "None":
all_in_gpu = None
elif all_in_gpu == "True":
all_in_gpu = True
elif all_in_gpu == "False":
all_in_gpu = False
predict_from_folder(model, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta, mixed_precision=not args.disable_mixed_precision,
overwrite_existing=overwrite, mode=mode, overwrite_all_in_gpu=all_in_gpu, step_size=step_size)
| 42,543
| 51.98132
| 182
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/inference/pretrained_models/collect_pretrained_models.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zipfile
from multiprocessing.pool import Pool
from batchgenerators.utilities.file_and_folder_operations import *
import shutil
from nnunet.paths import default_cascade_trainer, default_plans_identifier, default_trainer, network_training_output_dir
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
from subprocess import call
def copy_fold(in_folder: str, out_folder: str):
shutil.copy(join(in_folder, "debug.json"), join(out_folder, "debug.json"))
shutil.copy(join(in_folder, "model_final_checkpoint.model"), join(out_folder, "model_final_checkpoint.model"))
shutil.copy(join(in_folder, "model_final_checkpoint.model.pkl"),
join(out_folder, "model_final_checkpoint.model.pkl"))
shutil.copy(join(in_folder, "progress.png"), join(out_folder, "progress.png"))
if isfile(join(in_folder, "network_architecture.pdf")):
shutil.copy(join(in_folder, "network_architecture.pdf"), join(out_folder, "network_architecture.pdf"))
def copy_model(directory: str, output_directory: str):
"""
:param directory: must have the 5 fold_X subfolders as well as a postprocessing.json and plans.pkl
:param output_directory:
:return:
"""
expected_folders = ["fold_%d" % i for i in range(5)]
assert all([isdir(join(directory, i)) for i in expected_folders]), "not all folds present"
assert isfile(join(directory, "plans.pkl")), "plans.pkl missing"
assert isfile(join(directory, "postprocessing.json")), "postprocessing.json missing"
for e in expected_folders:
maybe_mkdir_p(join(output_directory, e))
copy_fold(join(directory, e), join(output_directory, e))
shutil.copy(join(directory, "plans.pkl"), join(output_directory, "plans.pkl"))
shutil.copy(join(directory, "postprocessing.json"), join(output_directory, "postprocessing.json"))
def copy_pretrained_models_for_task(task_name: str, output_directory: str,
models: tuple = ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"),
nnunet_trainer=default_trainer,
nnunet_trainer_cascade=default_cascade_trainer,
plans_identifier=default_plans_identifier):
trainer_output_dir = nnunet_trainer + "__" + plans_identifier
trainer_output_dir_cascade = nnunet_trainer_cascade + "__" + plans_identifier
for m in models:
to = trainer_output_dir_cascade if m == "3d_cascade_fullres" else trainer_output_dir
expected_output_folder = join(network_training_output_dir, m, task_name, to)
if not isdir(expected_output_folder):
if m == "3d_lowres" or m == "3d_cascade_fullres":
print("Task", task_name, "does not seem to have the cascade")
continue
else:
raise RuntimeError("missing folder! %s" % expected_output_folder)
output_here = join(output_directory, m, task_name, to)
maybe_mkdir_p(output_here)
copy_model(expected_output_folder, output_here)
def check_if_valid(ensemble: str, valid_models, valid_trainers, valid_plans):
ensemble = ensemble[len("ensemble_"):]
mb1, mb2 = ensemble.split("--")
c1, tr1, p1 = mb1.split("__")
c2, tr2, p2 = mb2.split("__")
if c1 not in valid_models: return False
if c2 not in valid_models: return False
if tr1 not in valid_trainers: return False
if tr2 not in valid_trainers: return False
if p1 not in valid_plans: return False
if p2 not in valid_plans: return False
return True
def copy_ensembles(taskname, output_folder, valid_models=('2d', '3d_fullres', '3d_lowres', '3d_cascade_fullres'),
valid_trainers=(default_trainer, default_cascade_trainer),
valid_plans=(default_plans_identifier,)):
ensemble_dir = join(network_training_output_dir, 'ensembles', taskname)
if not isdir(ensemble_dir):
print("No ensemble directory found for task", taskname)
return
subd = subdirs(ensemble_dir, join=False)
valid = []
for s in subd:
v = check_if_valid(s, valid_models, valid_trainers, valid_plans)
if v:
valid.append(s)
output_ensemble = join(output_folder, 'ensembles', taskname)
maybe_mkdir_p(output_ensemble)
for v in valid:
this_output = join(output_ensemble, v)
maybe_mkdir_p(this_output)
shutil.copy(join(ensemble_dir, v, 'postprocessing.json'), this_output)
def compress_everything(output_base, num_processes=8):
p = Pool(num_processes)
tasks = subfolders(output_base, join=False)
tasknames = [i.split('/')[-1] for i in tasks]
args = []
for t, tn in zip(tasks, tasknames):
args.append((join(output_base, tn + ".zip"), join(output_base, t)))
p.starmap(compress_folder, args)
p.close()
p.join()
def compress_folder(zip_file, folder):
"""inspired by https://stackoverflow.com/questions/1855095/how-to-create-a-zip-archive-of-a-directory-in-python"""
zipf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(folder):
for file in files:
zipf.write(join(root, file), os.path.relpath(join(root, file), folder))
def export_one_task(taskname, models, output_folder, nnunet_trainer=default_trainer,
nnunet_trainer_cascade=default_cascade_trainer,
plans_identifier=default_plans_identifier):
copy_pretrained_models_for_task(taskname, output_folder, models, nnunet_trainer, nnunet_trainer_cascade,
plans_identifier)
copy_ensembles(taskname, output_folder, models, (nnunet_trainer, nnunet_trainer_cascade), (plans_identifier,))
compress_folder(join(output_folder, taskname + '.zip'), join(output_folder, taskname))
def export_pretrained_model(task_name: str, output_file: str,
models: tuple = ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"),
nnunet_trainer=default_trainer,
nnunet_trainer_cascade=default_cascade_trainer,
plans_identifier=default_plans_identifier,
folds=(0, 1, 2, 3, 4), strict=True):
zipf = zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED)
trainer_output_dir = nnunet_trainer + "__" + plans_identifier
trainer_output_dir_cascade = nnunet_trainer_cascade + "__" + plans_identifier
for m in models:
to = trainer_output_dir_cascade if m == "3d_cascade_fullres" else trainer_output_dir
expected_output_folder = join(network_training_output_dir, m, task_name, to)
if not isdir(expected_output_folder):
if strict:
raise RuntimeError("Task %s is missing the model %s" % (task_name, m))
else:
continue
expected_folders = ["fold_%d" % i if i != 'all' else i for i in folds]
assert all([isdir(join(expected_output_folder, i)) for i in expected_folders]), "not all requested folds " \
"present, " \
"Task %s model %s" % \
(task_name, m)
assert isfile(join(expected_output_folder, "plans.pkl")), "plans.pkl missing, Task %s model %s" % (task_name, m)
for e in expected_folders:
zipf.write(join(expected_output_folder, e, "debug.json"),
os.path.relpath(join(expected_output_folder, e, "debug.json"),
network_training_output_dir))
zipf.write(join(expected_output_folder, e, "model_final_checkpoint.model"),
os.path.relpath(join(expected_output_folder, e, "model_final_checkpoint.model"),
network_training_output_dir))
zipf.write(join(expected_output_folder, e, "model_final_checkpoint.model.pkl"),
os.path.relpath(join(expected_output_folder, e, "model_final_checkpoint.model.pkl"),
network_training_output_dir))
zipf.write(join(expected_output_folder, e, "progress.png"),
os.path.relpath(join(expected_output_folder, e, "progress.png"), network_training_output_dir))
if isfile(join(expected_output_folder, e, "network_architecture.pdf")):
zipf.write(join(expected_output_folder, e, "network_architecture.pdf"),
os.path.relpath(join(expected_output_folder, e, "network_architecture.pdf"),
network_training_output_dir))
zipf.write(join(expected_output_folder, "plans.pkl"),
os.path.relpath(join(expected_output_folder, "plans.pkl"), network_training_output_dir))
if not isfile(join(expected_output_folder, "postprocessing.json")):
if strict:
raise RuntimeError('postprocessing.json missing. Run nnUNet_determine_postprocessing or disable strict')
else:
print('WARNING: postprocessing.json missing')
else:
zipf.write(join(expected_output_folder, "postprocessing.json"),
os.path.relpath(join(expected_output_folder, "postprocessing.json"), network_training_output_dir))
ensemble_dir = join(network_training_output_dir, 'ensembles', task_name)
if not isdir(ensemble_dir):
print("No ensemble directory found for task", task_name)
return
subd = subdirs(ensemble_dir, join=False)
valid = []
for s in subd:
v = check_if_valid(s, models, (nnunet_trainer, nnunet_trainer_cascade), (plans_identifier))
if v:
valid.append(s)
for v in valid:
zipf.write(join(ensemble_dir, v, 'postprocessing.json'),
os.path.relpath(join(ensemble_dir, v, 'postprocessing.json'),
network_training_output_dir))
zipf.close()
def export_entry_point():
import argparse
parser = argparse.ArgumentParser(description="Use this script to export models to a zip file for sharing with "
"others. You can upload the zip file and then either share the url "
"for usage with nnUNet_download_pretrained_model_by_url, or share the "
"zip for usage with nnUNet_install_pretrained_model_from_zip")
parser.add_argument('-t', type=str, help='task name or task id')
parser.add_argument('-o', type=str, help='output file name. Should end with .zip')
parser.add_argument('-m', nargs='+',
help='list of model configurations. Default: 2d 3d_lowres 3d_fullres 3d_cascade_fullres. Must '
'be adapted to fit the available models of a task',
default=("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), required=False)
parser.add_argument('-tr', type=str, help='trainer class used for 2d 3d_lowres and 3d_fullres. '
'Default: %s' % default_trainer, required=False, default=default_trainer)
parser.add_argument('-trc', type=str, help='trainer class used for 3d_cascade_fullres. '
'Default: %s' % default_cascade_trainer, required=False,
default=default_cascade_trainer)
parser.add_argument('-pl', type=str, help='nnunet plans identifier. Default: %s' % default_plans_identifier,
required=False, default=default_plans_identifier)
parser.add_argument('--disable_strict', action='store_true', help='set this if you want to allow skipping '
'missing things', required=False)
parser.add_argument('-f', nargs='+', type=int, help='Folds. Default: 0 1 2 3 4', required=False, default=[0, 1, 2, 3, 4])
args = parser.parse_args()
folds = args.f
folds = [int(i) if i != 'all' else i for i in folds]
taskname = args.t
if taskname.startswith("Task"):
pass
else:
try:
taskid = int(taskname)
except Exception as e:
print('-t must be either a Task name (TaskXXX_YYY) or a task id (integer)')
raise e
taskname = convert_id_to_task_name(taskid)
export_pretrained_model(taskname, args.o, args.m, args.tr, args.trc, args.pl, strict=not args.disable_strict,
folds=folds)
def export_for_paper():
output_base = "/media/fabian/DeepLearningData/nnunet_trained_models"
task_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 24, 27, 29, 35, 48, 55, 61, 38]
for t in task_ids:
if t == 61:
models = ("3d_fullres",)
else:
models = ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres")
taskname = convert_id_to_task_name(t)
print(taskname)
output_folder = join(output_base, taskname)
maybe_mkdir_p(output_folder)
copy_pretrained_models_for_task(taskname, output_folder, models)
copy_ensembles(taskname, output_folder)
compress_everything(output_base, 8)
| 14,191
| 51.176471
| 125
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/inference/pretrained_models/download_pretrained_model.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
from time import time
from urllib.request import urlopen
from batchgenerators.utilities.file_and_folder_operations import join, isfile
from nnunet.paths import network_training_output_dir
from subprocess import call
import requests
import os
def get_available_models():
available_models = {
"Task001_BrainTumour": {
'description': "Brain Tumor Segmentation. \n"
"Segmentation targets are edema, enhancing tumor and necrosis, \n"
"input modalities are 0: FLAIR, 1: T1, 2: T1 with contrast agent, 3: T2. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task001_BrainTumour.zip?download=1"
},
"Task002_Heart": {
'description': "Left Atrium Segmentation. \n"
"Segmentation target is the left atrium, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task002_Heart.zip?download=1"
},
"Task003_Liver": {
'description': "Liver and Liver Tumor Segmentation. \n"
"Segmentation targets are liver and tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task003_Liver.zip?download=1"
},
"Task004_Hippocampus": {
'description': "Hippocampus Segmentation. \n"
"Segmentation targets posterior and anterior parts of the hippocampus, \n"
"input modalities are 0: MRI. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task004_Hippocampus.zip?download=1"
},
"Task005_Prostate": {
'description': "Prostate Segmentation. \n"
"Segmentation targets are peripheral and central zone, \n"
"input modalities are 0: T2, 1: ADC. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task005_Prostate.zip?download=1"
},
"Task006_Lung": {
'description': "Lung Nodule Segmentation. \n"
"Segmentation target are lung nodules, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task006_Lung.zip?download=1"
},
"Task007_Pancreas": {
'description': "Pancreas Segmentation. \n"
"Segmentation targets are pancras and pancreas tumor, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task007_Pancreas.zip?download=1"
},
"Task008_HepaticVessel": {
'description': "Hepatic Vessel Segmentation. \n"
"Segmentation targets are hepatic vesels and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task008_HepaticVessel.zip?download=1"
},
"Task009_Spleen": {
'description': "Spleen Segmentation. \n"
"Segmentation target is the spleen, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task009_Spleen.zip?download=1"
},
"Task010_Colon": {
'description': "Colon Cancer Segmentation. \n"
"Segmentation target are colon caner primaries, \n"
"input modalities are 0: CT scan. \n"
"Also see Medical Segmentation Decathlon, http://medicaldecathlon.com/",
'url': "https://zenodo.org/record/4003545/files/Task010_Colon.zip?download=1"
},
"Task017_AbdominalOrganSegmentation": {
'description': "Multi-Atlas Labeling Beyond the Cranial Vault - Abdomen. \n"
"Segmentation targets are thirteen different abdominal organs, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://www.synapse.org/#!Synapse:syn3193805/wiki/217754",
'url': "https://zenodo.org/record/4003545/files/Task017_AbdominalOrganSegmentation.zip?download=1"
},
"Task024_Promise": {
'description': "Prostate MR Image Segmentation 2012. \n"
"Segmentation target is the prostate, \n"
"input modalities are 0: T2. \n"
"Also see https://promise12.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task024_Promise.zip?download=1"
},
"Task027_ACDC": {
'description': "Automatic Cardiac Diagnosis Challenge. \n"
"Segmentation targets are right ventricle, left ventricular cavity and left myocardium, \n"
"input modalities are 0: cine MRI. \n"
"Also see https://acdc.creatis.insa-lyon.fr/",
'url': "https://zenodo.org/record/4003545/files/Task027_ACDC.zip?download=1"
},
"Task029_LiTS": {
'description': "Liver and Liver Tumor Segmentation Challenge. \n"
"Segmentation targets are liver and liver tumors, \n"
"input modalities are 0: abdominal CT scan. \n"
"Also see https://competitions.codalab.org/competitions/17094",
'url': "https://zenodo.org/record/4003545/files/Task029_LITS.zip?download=1"
},
"Task035_ISBILesionSegmentation": {
'description': "Longitudinal multiple sclerosis lesion segmentation Challenge. \n"
"Segmentation target is MS lesions, \n"
"input modalities are 0: FLAIR, 1: MPRAGE, 2: proton density, 3: T2. \n"
"Also see https://smart-stats-tools.org/lesion-challenge",
'url': "https://zenodo.org/record/4003545/files/Task035_ISBILesionSegmentation.zip?download=1"
},
"Task038_CHAOS_Task_3_5_Variant2": {
'description': "CHAOS - Combined (CT-MR) Healthy Abdominal Organ Segmentation Challenge (Task 3 & 5). \n"
"Segmentation targets are left and right kidney, liver, spleen, \n"
"input modalities are 0: T1 in-phase, T1 out-phase, T2 (can be any of those)\n"
"Also see https://chaos.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task038_CHAOS_Task_3_5_Variant2.zip?download=1"
},
"Task048_KiTS_clean": {
'description': "Kidney and Kidney Tumor Segmentation Challenge. "
"Segmentation targets kidney and kidney tumors, "
"input modalities are 0: abdominal CT scan. "
"Also see https://kits19.grand-challenge.org/",
'url': "https://zenodo.org/record/4003545/files/Task048_KiTS_clean.zip?download=1"
},
"Task055_SegTHOR": {
'description': "SegTHOR: Segmentation of THoracic Organs at Risk in CT images. \n"
"Segmentation targets are aorta, esophagus, heart and trachea, \n"
"input modalities are 0: CT scan. \n"
"Also see https://competitions.codalab.org/competitions/21145",
'url': "https://zenodo.org/record/4003545/files/Task055_SegTHOR.zip?download=1"
},
"Task061_CREMI": {
'description': "MICCAI Challenge on Circuit Reconstruction from Electron Microscopy Images (Synaptic Cleft segmentation task). \n"
"Segmentation target is synaptic clefts, \n"
"input modalities are 0: serial section transmission electron microscopy of neural tissue. \n"
"Also see https://cremi.org/",
'url': "https://zenodo.org/record/4003545/files/Task061_CREMI.zip?download=1"
},
"Task075_Fluo_C3DH_A549_ManAndSim": {
'description': "Fluo-C3DH-A549-SIM and Fluo-C3DH-A549 datasets of the cell tracking challenge. Segmentation target are C3DH cells in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task075_Fluo_C3DH_A549_ManAndSim.zip?download=1"
},
"Task076_Fluo_N3DH_SIM": {
'description': "Fluo-N3DH-SIM dataset of the cell tracking challenge. Segmentation target are N3DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: fluorescence_microscopy\n"
"Also see http://celltrackingchallenge.net/\n",
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task076_Fluo_N3DH_SIM.py"
'url': "https://zenodo.org/record/4003545/files/Task076_Fluo_N3DH_SIM.zip?download=1"
},
"Task089_Fluo-N2DH-SIM_thickborder_time": {
'description': "Fluo-N2DH-SIM dataset of the cell tracking challenge. Segmentation target are nuclei of N2DH cells and cell borders in fluorescence microscopy images.\n"
"input modalities are 0: t minus 4, 0: t minus 3, 0: t minus 2, 0: t minus 1, 0: frame of interest\n"
"Note that the input channels are different time steps from a time series acquisition\n"
"Note that the segmentation output of the models are cell center and cell border. These outputs mus tbe converted to an instance segmentation for the challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task089_Fluo-N2DH-SIM.py"
"Also see http://celltrackingchallenge.net/",
'url': "https://zenodo.org/record/4003545/files/Task089_Fluo-N2DH-SIM_thickborder_time.zip?download=1"
},
"Task114_heart_MNMs": {
'description': "Cardiac MRI short axis images from the M&Ms challenge 2020.\n"
"input modalities are 0: MRI \n"
"See also https://www.ub.edu/mnms/ \n"
"Note: Labels of the M&Ms Challenge are not in the same order as for the ACDC challenge. \n"
"See https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/dataset_conversion/Task114_heart_mnms.py",
'url': "https://zenodo.org/record/4288464/files/Task114_heart_MNMs.zip?download=1"
},
}
return available_models
def print_available_pretrained_models():
print('The following pretrained models are available:\n')
av_models = get_available_models()
for m in av_models.keys():
print('')
print(m)
print(av_models[m]['description'])
def download_and_install_pretrained_model_by_name(taskname):
av_models = get_available_models()
if taskname not in av_models.keys():
raise RuntimeError("\nThe requested pretrained model ('%s') is not available." % taskname)
if len(av_models[taskname]['url']) == 0:
raise RuntimeError("The requested model has not been uploaded yet. Please check back in a few days")
download_and_install_from_url(av_models[taskname]['url'])
def download_and_install_from_url(url):
assert network_training_output_dir is not None, "Cannot install model because network_training_output_dir is not " \
"set (RESULTS_FOLDER missing as environment variable, see " \
"Installation instructions)"
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import os
home = os.path.expanduser('~')
random_number = int(time() * 1e7)
tempfile = join(home, '.nnunetdownload_%s' % str(random_number))
try:
with open(tempfile, 'wb') as f:
with requests.get(url, stream=True) as r:
r.raise_for_status()
for chunk in r.iter_content(chunk_size=8192 * 16):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
print("Download finished. Extracting...")
install_model_from_zip_file(tempfile)
print("Done")
except Exception as e:
raise e
finally:
if isfile(tempfile):
os.remove(tempfile)
def download_file(url, local_filename):
# borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=None):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
#if chunk:
f.write(chunk)
return local_filename
def install_model_from_zip_file(zip_file: str):
call(['unzip', '-o', '-d', network_training_output_dir, zip_file])
def print_license_warning():
print('')
print('######################################################')
print('!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!')
print('######################################################')
print("Using the pretrained model weights is subject to the license of the dataset they were trained on. Some "
"allow commercial use, others don't. It is your responsibility to make sure you use them appropriately! Use "
"nnUNet_print_pretrained_model_info(task_name) to see a summary of the dataset and where to find its license!")
print('######################################################')
print('')
def download_by_name():
import argparse
parser = argparse.ArgumentParser(description="Use this to download pretrained models. CAREFUL: This script will "
"overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
print_license_warning()
download_and_install_pretrained_model_by_name(taskname)
def download_by_url():
import argparse
parser = argparse.ArgumentParser(
description="Use this to download pretrained models. This script is intended to download models via url only. "
"If you want to download one of our pretrained models, please use nnUNet_download_pretrained_model. "
"CAREFUL: This script will overwrite "
"existing models (if they share the same trainer class and plans as "
"the pretrained model.")
parser.add_argument("url", type=str, help='URL of the pretrained model')
args = parser.parse_args()
url = args.url
download_and_install_from_url(url)
def install_from_zip_entry_point():
import argparse
parser = argparse.ArgumentParser(
description="Use this to install a zip file containing a pretrained model.")
parser.add_argument("zip", type=str, help='zip file')
args = parser.parse_args()
zip = args.zip
install_model_from_zip_file(zip)
def print_pretrained_model_requirements():
import argparse
parser = argparse.ArgumentParser(description="Use this to see the properties of a pretrained model, especially "
"what input modalities it requires")
parser.add_argument("task_name", type=str, help='Task name of the pretrained model. To see '
'available task names, run nnUNet_print_available_'
'pretrained_models')
args = parser.parse_args()
taskname = args.task_name
av = get_available_models()
if taskname not in av.keys():
raise RuntimeError("Invalid task name. This pretrained model does not exist. To see available task names, "
"run nnUNet_print_available_pretrained_models")
print(av[taskname]['description'])
if __name__ == '__main__':
url = 'https://www.dropbox.com/s/ft54q1gi060vm2x/Task004_Hippocampus.zip?dl=1'
| 19,076
| 55.946269
| 191
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/preprocessing/preprocessing.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from batchgenerators.augmentations.utils import resize_segmentation
from nnunet.configuration import default_num_threads, RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD
from nnunet.preprocessing.cropping import get_case_identifier_from_npz, ImageCropper
from skimage.transform import resize
from scipy.ndimage.interpolation import map_coordinates
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing.pool import Pool
def get_do_separate_z(spacing, anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold
return do_separate_z
def get_lowres_axis(new_spacing):
axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic
return axis
def resample_patient(data, seg, original_spacing, target_spacing, order_data=3, order_seg=0, force_separate_z=False,
cval_data=0, cval_seg=-1, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD):
"""
:param cval_seg:
:param cval_data:
:param data:
:param seg:
:param original_spacing:
:param target_spacing:
:param order_data:
:param order_seg:
:param force_separate_z: if None then we dynamically decide how to resample along z, if True/False then always
/never resample along z separately
:param order_z_seg: only applies if do_separate_z is True
:param order_z_data: only applies if do_separate_z is True
:param separate_z_anisotropy_threshold: if max_spacing > separate_z_anisotropy_threshold * min_spacing (per axis)
then resample along lowres axis with order_z_data/order_z_seg instead of order_data/order_seg
:return:
"""
assert not ((data is None) and (seg is None))
if data is not None:
assert len(data.shape) == 4, "data must be c x y z"
if seg is not None:
assert len(seg.shape) == 4, "seg must be c x y z"
if data is not None:
shape = np.array(data[0].shape)
else:
shape = np.array(seg[0].shape)
new_shape = np.round(((np.array(original_spacing) / np.array(target_spacing)).astype(float) * shape)).astype(int)
if force_separate_z is not None:
do_separate_z = force_separate_z
if force_separate_z:
axis = get_lowres_axis(original_spacing)
else:
axis = None
else:
if get_do_separate_z(original_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(original_spacing)
elif get_do_separate_z(target_spacing, separate_z_anisotropy_threshold):
do_separate_z = True
axis = get_lowres_axis(target_spacing)
else:
do_separate_z = False
axis = None
if axis is not None:
if len(axis) == 3:
# every axis has the spacing
axis = (0, )
elif len(axis) == 2:
print("WARNING: axis has len 2, axis: %s, spacing: %s, target_spacing: %s" % (str(axis), original_spacing, target_spacing))
do_separate_z = False
else:
pass
if data is not None:
data_reshaped = resample_data_or_seg(data, new_shape, False, axis, order_data, do_separate_z, cval=cval_data,
order_z=order_z_data)
else:
data_reshaped = None
if seg is not None:
seg_reshaped = resample_data_or_seg(seg, new_shape, True, axis, order_seg, do_separate_z, cval=cval_seg,
order_z=order_z_seg)
else:
seg_reshaped = None
return data_reshaped, seg_reshaped
def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, cval=0, order_z=0):
"""
separate_z=True will resample with order 0 along z
:param data:
:param new_shape:
:param is_seg:
:param axis:
:param order:
:param do_separate_z:
:param cval:
:param order_z: only applies if do_separate_z is True
:return:
"""
assert len(data.shape) == 4, "data must be (c, x, y, z)"
if is_seg:
resize_fn = resize_segmentation
kwargs = OrderedDict()
else:
resize_fn = resize
kwargs = {'mode': 'edge', 'anti_aliasing': False}
dtype_data = data.dtype
data = data.astype(float)
shape = np.array(data[0].shape)
new_shape = np.array(new_shape)
if np.any(shape != new_shape):
if do_separate_z:
print("separate z, order in z is", order_z, "order inplane is", order)
assert len(axis) == 1, "only one anisotropic axis supported"
axis = axis[0]
if axis == 0:
new_shape_2d = new_shape[1:]
elif axis == 1:
new_shape_2d = new_shape[[0, 2]]
else:
new_shape_2d = new_shape[:-1]
reshaped_final_data = []
for c in range(data.shape[0]):
reshaped_data = []
for slice_id in range(shape[axis]):
if axis == 0:
reshaped_data.append(resize_fn(data[c, slice_id], new_shape_2d, order, cval=cval, **kwargs))
elif axis == 1:
reshaped_data.append(resize_fn(data[c, :, slice_id], new_shape_2d, order, cval=cval, **kwargs))
else:
reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order, cval=cval,
**kwargs))
reshaped_data = np.stack(reshaped_data, axis)
if shape[axis] != new_shape[axis]:
# The following few lines are blatantly copied and modified from sklearn's resize()
rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
orig_rows, orig_cols, orig_dim = reshaped_data.shape
row_scale = float(orig_rows) / rows
col_scale = float(orig_cols) / cols
dim_scale = float(orig_dim) / dim
map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim]
map_rows = row_scale * (map_rows + 0.5) - 0.5
map_cols = col_scale * (map_cols + 0.5) - 0.5
map_dims = dim_scale * (map_dims + 0.5) - 0.5
coord_map = np.array([map_rows, map_cols, map_dims])
if not is_seg or order_z == 0:
reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z, cval=cval,
mode='nearest')[None])
else:
unique_labels = np.unique(reshaped_data)
reshaped = np.zeros(new_shape, dtype=dtype_data)
for i, cl in enumerate(unique_labels):
reshaped_multihot = np.round(
map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z,
cval=cval, mode='nearest'))
reshaped[reshaped_multihot > 0.5] = cl
reshaped_final_data.append(reshaped[None])
else:
reshaped_final_data.append(reshaped_data[None])
reshaped_final_data = np.vstack(reshaped_final_data)
else:
print("no separate z, order", order)
reshaped = []
for c in range(data.shape[0]):
reshaped.append(resize_fn(data[c], new_shape, order, cval=cval, **kwargs)[None])
reshaped_final_data = np.vstack(reshaped)
return reshaped_final_data.astype(dtype_data)
else:
print("no resampling necessary")
return data
class GenericPreprocessor(object):
def __init__(self, normalization_scheme_per_modality, use_nonzero_mask, transpose_forward: (tuple, list), intensityproperties=None):
"""
:param normalization_scheme_per_modality: dict {0:'nonCT'}
:param use_nonzero_mask: {0:False}
:param intensityproperties:
"""
self.transpose_forward = transpose_forward
self.intensityproperties = intensityproperties
self.normalization_scheme_per_modality = normalization_scheme_per_modality
self.use_nonzero_mask = use_nonzero_mask
self.resample_separate_z_anisotropy_threshold = RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD
@staticmethod
def load_cropped(cropped_output_dir, case_identifier):
all_data = np.load(os.path.join(cropped_output_dir, "%s.npz" % case_identifier))['data']
data = all_data[:-1].astype(np.float32)
seg = all_data[-1:]
with open(os.path.join(cropped_output_dir, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return data, seg, properties
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
def preprocess_test_case(self, data_files, target_spacing, seg_file=None, force_separate_z=None):
data, seg, properties = ImageCropper.crop_from_list_of_files(data_files, seg_file)
data = data.transpose((0, *[i + 1 for i in self.transpose_forward]))
seg = seg.transpose((0, *[i + 1 for i in self.transpose_forward]))
data, seg, properties = self.resample_and_normalize(data, target_spacing, properties, seg,
force_separate_z=force_separate_z)
return data.astype(np.float32), seg, properties
def _run_internal(self, target_spacing, case_identifier, output_folder_stage, cropped_output_dir, force_separate_z,
all_classes):
data, seg, properties = self.load_cropped(cropped_output_dir, case_identifier)
data = data.transpose((0, *[i + 1 for i in self.transpose_forward]))
seg = seg.transpose((0, *[i + 1 for i in self.transpose_forward]))
data, seg, properties = self.resample_and_normalize(data, target_spacing,
properties, seg, force_separate_z)
all_data = np.vstack((data, seg)).astype(np.float32)
# we need to find out where the classes are and sample some random locations
# let's do 10.000 samples per class
# seed this for reproducibility!
num_samples = 10000
min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too sparse
rndst = np.random.RandomState(1234)
class_locs = {}
for c in all_classes:
all_locs = np.argwhere(all_data[-1] == c)
if len(all_locs) == 0:
class_locs[c] = []
continue
target_num_samples = min(num_samples, len(all_locs))
target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage)))
selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)]
class_locs[c] = selected
print(c, target_num_samples)
properties['class_locations'] = class_locs
print("saving: ", os.path.join(output_folder_stage, "%s.npz" % case_identifier))
np.savez_compressed(os.path.join(output_folder_stage, "%s.npz" % case_identifier),
data=all_data.astype(np.float32))
with open(os.path.join(output_folder_stage, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
def run(self, target_spacings, input_folder_with_cropped_npz, output_folder, data_identifier,
num_threads=default_num_threads, force_separate_z=None):
"""
:param target_spacings: list of lists [[1.25, 1.25, 5]]
:param input_folder_with_cropped_npz: dim: c, x, y, z | npz_file['data'] np.savez_compressed(fname.npz, data=arr)
:param output_folder:
:param num_threads:
:param force_separate_z: None
:return:
"""
print("Initializing to run preprocessing")
print("npz folder:", input_folder_with_cropped_npz)
print("output_folder:", output_folder)
list_of_cropped_npz_files = subfiles(input_folder_with_cropped_npz, True, None, ".npz", True)
maybe_mkdir_p(output_folder)
num_stages = len(target_spacings)
if not isinstance(num_threads, (list, tuple, np.ndarray)):
num_threads = [num_threads] * num_stages
assert len(num_threads) == num_stages
# we need to know which classes are present in this dataset so that we can precompute where these classes are
# located. This is needed for oversampling foreground
all_classes = load_pickle(join(input_folder_with_cropped_npz, 'dataset_properties.pkl'))['all_classes']
for i in range(num_stages):
all_args = []
output_folder_stage = os.path.join(output_folder, data_identifier + "_stage%d" % i)
maybe_mkdir_p(output_folder_stage)
spacing = target_spacings[i]
for j, case in enumerate(list_of_cropped_npz_files):
case_identifier = get_case_identifier_from_npz(case)
args = spacing, case_identifier, output_folder_stage, input_folder_with_cropped_npz, force_separate_z, all_classes
all_args.append(args)
p = Pool(num_threads[i])
p.starmap(self._run_internal, all_args)
p.close()
p.join()
class Preprocessor3DDifferentResampling(GenericPreprocessor):
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=3, order_z_seg=1,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
class Preprocessor3DBetterResampling(GenericPreprocessor):
"""
This preprocessor always uses force_separate_z=False. It does resampling to the target spacing with third
order spline for data (just like GenericPreprocessor) and seg (unlike GenericPreprocessor). It never does separate
resampling in z.
"""
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=False):
"""
data and seg must already have been transposed by transpose_forward. properties are the un-transposed values
(spacing etc)
:param data:
:param target_spacing:
:param properties:
:param seg:
:param force_separate_z:
:return:
"""
if force_separate_z is not False:
print("WARNING: Preprocessor3DBetterResampling always uses force_separate_z=False. "
"You specified %s. Your choice is overwritten" % str(force_separate_z))
force_separate_z = False
# be safe
assert force_separate_z is False
# target_spacing is already transposed, properties["original_spacing"] is not so we need to transpose it!
# data, seg are already transposed. Double check this using the properties
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
# remove nans
data[np.isnan(data)] = 0
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 3,
force_separate_z=force_separate_z, order_z_data=99999, order_z_seg=99999,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
return data, seg, properties
class PreprocessorFor2D(GenericPreprocessor):
def __init__(self, normalization_scheme_per_modality, use_nonzero_mask, transpose_forward: (tuple, list), intensityproperties=None):
super(PreprocessorFor2D, self).__init__(normalization_scheme_per_modality, use_nonzero_mask,
transpose_forward, intensityproperties)
def run(self, target_spacings, input_folder_with_cropped_npz, output_folder, data_identifier,
num_threads=default_num_threads, force_separate_z=None):
print("Initializing to run preprocessing")
print("npz folder:", input_folder_with_cropped_npz)
print("output_folder:", output_folder)
list_of_cropped_npz_files = subfiles(input_folder_with_cropped_npz, True, None, ".npz", True)
assert len(list_of_cropped_npz_files) != 0, "set list of files first"
maybe_mkdir_p(output_folder)
all_args = []
num_stages = len(target_spacings)
# we need to know which classes are present in this dataset so that we can precompute where these classes are
# located. This is needed for oversampling foreground
all_classes = load_pickle(join(input_folder_with_cropped_npz, 'dataset_properties.pkl'))['all_classes']
for i in range(num_stages):
output_folder_stage = os.path.join(output_folder, data_identifier + "_stage%d" % i)
maybe_mkdir_p(output_folder_stage)
spacing = target_spacings[i]
for j, case in enumerate(list_of_cropped_npz_files):
case_identifier = get_case_identifier_from_npz(case)
args = spacing, case_identifier, output_folder_stage, input_folder_with_cropped_npz, force_separate_z, all_classes
all_args.append(args)
p = Pool(num_threads)
p.starmap(self._run_internal, all_args)
p.close()
p.join()
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
target_spacing[0] = original_spacing_transposed[0]
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
print("normalization...")
for c in range(len(data)):
scheme = self.normalization_scheme_per_modality[c]
if scheme == "CT":
# clip to lb and ub from train data foreground and use foreground mn and sd from training data
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
mean_intensity = self.intensityproperties[c]['mean']
std_intensity = self.intensityproperties[c]['sd']
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
data[c] = np.clip(data[c], lower_bound, upper_bound)
data[c] = (data[c] - mean_intensity) / std_intensity
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
elif scheme == "CT2":
# clip to lb and ub from train data foreground, use mn and sd form each case for normalization
assert self.intensityproperties is not None, "ERROR: if there is a CT then we need intensity properties"
lower_bound = self.intensityproperties[c]['percentile_00_5']
upper_bound = self.intensityproperties[c]['percentile_99_5']
mask = (data[c] > lower_bound) & (data[c] < upper_bound)
data[c] = np.clip(data[c], lower_bound, upper_bound)
mn = data[c][mask].mean()
sd = data[c][mask].std()
data[c] = (data[c] - mn) / sd
if use_nonzero_mask[c]:
data[c][seg[-1] < 0] = 0
else:
if use_nonzero_mask[c]:
mask = seg[-1] >= 0
else:
mask = np.ones(seg.shape[1:], dtype=bool)
data[c][mask] = (data[c][mask] - data[c][mask].mean()) / (data[c][mask].std() + 1e-8)
data[c][mask == 0] = 0
print("normalization done")
return data, seg, properties
class PreprocessorFor2D_noNormalization(GenericPreprocessor):
def resample_and_normalize(self, data, target_spacing, properties, seg=None, force_separate_z=None):
original_spacing_transposed = np.array(properties["original_spacing"])[self.transpose_forward]
before = {
'spacing': properties["original_spacing"],
'spacing_transposed': original_spacing_transposed,
'data.shape (data is transposed)': data.shape
}
target_spacing[0] = original_spacing_transposed[0]
data, seg = resample_patient(data, seg, np.array(original_spacing_transposed), target_spacing, 3, 1,
force_separate_z=force_separate_z, order_z_data=0, order_z_seg=0,
separate_z_anisotropy_threshold=self.resample_separate_z_anisotropy_threshold)
after = {
'spacing': target_spacing,
'data.shape (data is resampled)': data.shape
}
print("before:", before, "\nafter: ", after, "\n")
if seg is not None: # hippocampus 243 has one voxel with -2 as label. wtf?
seg[seg < -1] = 0
properties["size_after_resampling"] = data[0].shape
properties["spacing_after_resampling"] = target_spacing
use_nonzero_mask = self.use_nonzero_mask
assert len(self.normalization_scheme_per_modality) == len(data), "self.normalization_scheme_per_modality " \
"must have as many entries as data has " \
"modalities"
assert len(self.use_nonzero_mask) == len(data), "self.use_nonzero_mask must have as many entries as data" \
" has modalities"
return data, seg, properties
| 35,625
| 49.821683
| 136
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/preprocessing/cropping.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import SimpleITK as sitk
import numpy as np
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Pool
from collections import OrderedDict
def create_nonzero_mask(data):
from scipy.ndimage import binary_fill_holes
assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
for c in range(data.shape[0]):
this_mask = data[c] != 0
nonzero_mask = nonzero_mask | this_mask
nonzero_mask = binary_fill_holes(nonzero_mask)
return nonzero_mask
def get_bbox_from_mask(mask, outside_value=0):
mask_voxel_coords = np.where(mask != outside_value)
minzidx = int(np.min(mask_voxel_coords[0]))
maxzidx = int(np.max(mask_voxel_coords[0])) + 1
minxidx = int(np.min(mask_voxel_coords[1]))
maxxidx = int(np.max(mask_voxel_coords[1])) + 1
minyidx = int(np.min(mask_voxel_coords[2]))
maxyidx = int(np.max(mask_voxel_coords[2])) + 1
return [[minzidx, maxzidx], [minxidx, maxxidx], [minyidx, maxyidx]]
def crop_to_bbox(image, bbox):
assert len(image.shape) == 3, "only supports 3d images"
resizer = (slice(bbox[0][0], bbox[0][1]), slice(bbox[1][0], bbox[1][1]), slice(bbox[2][0], bbox[2][1]))
return image[resizer]
def get_case_identifier(case):
case_identifier = case[0].split("/")[-1].split(".nii.gz")[0][:-5]
return case_identifier
def get_case_identifier_from_npz(case):
case_identifier = case.split("/")[-1][:-4]
return case_identifier
def load_case_from_list_of_files(data_files, seg_file=None):
assert isinstance(data_files, list) or isinstance(data_files, tuple), "case must be either a list or a tuple"
properties = OrderedDict()
data_itk = [sitk.ReadImage(f) for f in data_files]
properties["original_size_of_raw_data"] = np.array(data_itk[0].GetSize())[[2, 1, 0]]
properties["original_spacing"] = np.array(data_itk[0].GetSpacing())[[2, 1, 0]]
properties["list_of_data_files"] = data_files
properties["seg_file"] = seg_file
properties["itk_origin"] = data_itk[0].GetOrigin()
properties["itk_spacing"] = data_itk[0].GetSpacing()
properties["itk_direction"] = data_itk[0].GetDirection()
data_npy = np.vstack([sitk.GetArrayFromImage(d)[None] for d in data_itk])
if seg_file is not None:
seg_itk = sitk.ReadImage(seg_file)
seg_npy = sitk.GetArrayFromImage(seg_itk)[None].astype(np.float32)
else:
seg_npy = None
return data_npy.astype(np.float32), seg_npy, properties
def crop_to_nonzero(data, seg=None, nonzero_label=-1):
"""
:param data:
:param seg:
:param nonzero_label: this will be written into the segmentation map
:return:
"""
nonzero_mask = create_nonzero_mask(data)
bbox = get_bbox_from_mask(nonzero_mask, 0)
cropped_data = []
for c in range(data.shape[0]):
cropped = crop_to_bbox(data[c], bbox)
cropped_data.append(cropped[None])
data = np.vstack(cropped_data)
if seg is not None:
cropped_seg = []
for c in range(seg.shape[0]):
cropped = crop_to_bbox(seg[c], bbox)
cropped_seg.append(cropped[None])
seg = np.vstack(cropped_seg)
nonzero_mask = crop_to_bbox(nonzero_mask, bbox)[None]
if seg is not None:
seg[(seg == 0) & (nonzero_mask == 0)] = nonzero_label
else:
nonzero_mask = nonzero_mask.astype(int)
nonzero_mask[nonzero_mask == 0] = nonzero_label
nonzero_mask[nonzero_mask > 0] = 0
seg = nonzero_mask
return data, seg, bbox
def get_patient_identifiers_from_cropped_files(folder):
return [i.split("/")[-1][:-4] for i in subfiles(folder, join=True, suffix=".npz")]
class ImageCropper(object):
def __init__(self, num_threads, output_folder=None):
"""
This one finds a mask of nonzero elements (must be nonzero in all modalities) and crops the image to that mask.
In the case of BRaTS and ISLES data this results in a significant reduction in image size
:param num_threads:
:param output_folder: whete to store the cropped data
:param list_of_files:
"""
self.output_folder = output_folder
self.num_threads = num_threads
if self.output_folder is not None:
maybe_mkdir_p(self.output_folder)
@staticmethod
def crop(data, properties, seg=None):
shape_before = data.shape
data, seg, bbox = crop_to_nonzero(data, seg, nonzero_label=-1)
shape_after = data.shape
print("before crop:", shape_before, "after crop:", shape_after, "spacing:",
np.array(properties["original_spacing"]), "\n")
properties["crop_bbox"] = bbox
properties['classes'] = np.unique(seg)
seg[seg < -1] = 0
properties["size_after_cropping"] = data[0].shape
return data, seg, properties
@staticmethod
def crop_from_list_of_files(data_files, seg_file=None):
data, seg, properties = load_case_from_list_of_files(data_files, seg_file)
return ImageCropper.crop(data, properties, seg)
def load_crop_save(self, case, case_identifier, overwrite_existing=False):
try:
print(case_identifier)
if overwrite_existing \
or (not os.path.isfile(os.path.join(self.output_folder, "%s.npz" % case_identifier))
or not os.path.isfile(os.path.join(self.output_folder, "%s.pkl" % case_identifier))):
data, seg, properties = self.crop_from_list_of_files(case[:-1], case[-1])
all_data = np.vstack((data, seg))
np.savez_compressed(os.path.join(self.output_folder, "%s.npz" % case_identifier), data=all_data)
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
except Exception as e:
print("Exception in", case_identifier, ":")
print(e)
raise e
def get_list_of_cropped_files(self):
return subfiles(self.output_folder, join=True, suffix=".npz")
def get_patient_identifiers_from_cropped_files(self):
return [i.split("/")[-1][:-4] for i in self.get_list_of_cropped_files()]
def run_cropping(self, list_of_files, overwrite_existing=False, output_folder=None):
"""
also copied ground truth nifti segmentation into the preprocessed folder so that we can use them for evaluation
on the cluster
:param list_of_files: list of list of files [[PATIENTID_TIMESTEP_0000.nii.gz], [PATIENTID_TIMESTEP_0000.nii.gz]]
:param overwrite_existing:
:param output_folder:
:return:
"""
if output_folder is not None:
self.output_folder = output_folder
output_folder_gt = os.path.join(self.output_folder, "gt_segmentations")
maybe_mkdir_p(output_folder_gt)
for j, case in enumerate(list_of_files):
if case[-1] is not None:
shutil.copy(case[-1], output_folder_gt)
list_of_args = []
for j, case in enumerate(list_of_files):
case_identifier = get_case_identifier(case)
list_of_args.append((case, case_identifier, overwrite_existing))
p = Pool(self.num_threads)
p.starmap(self.load_crop_save, list_of_args)
p.close()
p.join()
def load_properties(self, case_identifier):
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
def save_properties(self, case_identifier, properties):
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
| 8,571
| 38.502304
| 120
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/preprocessing/sanity_checks.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Pool
import SimpleITK as sitk
import nibabel as nib
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
def verify_all_same_orientation(folder):
"""
This should run after cropping
:param folder:
:return:
"""
nii_files = subfiles(folder, suffix=".nii.gz", join=True)
orientations = []
for n in nii_files:
img = nib.load(n)
affine = img.affine
orientation = nib.aff2axcodes(affine)
orientations.append(orientation)
# now we need to check whether they are all the same
orientations = np.array(orientations)
unique_orientations = np.unique(orientations, axis=0)
all_same = len(unique_orientations) == 1
return all_same, unique_orientations
def verify_same_geometry(img_1: sitk.Image, img_2: sitk.Image):
ori1, spacing1, direction1, size1 = img_1.GetOrigin(), img_1.GetSpacing(), img_1.GetDirection(), img_1.GetSize()
ori2, spacing2, direction2, size2 = img_2.GetOrigin(), img_2.GetSpacing(), img_2.GetDirection(), img_2.GetSize()
same_ori = np.all(np.isclose(ori1, ori2))
if not same_ori:
print("the origin does not match between the images:")
print(ori1)
print(ori2)
same_spac = np.all(np.isclose(spacing1, spacing2))
if not same_spac:
print("the spacing does not match between the images")
print(spacing1)
print(spacing2)
same_dir = np.all(np.isclose(direction1, direction2))
if not same_dir:
print("the direction does not match between the images")
print(direction1)
print(direction2)
same_size = np.all(np.isclose(size1, size2))
if not same_size:
print("the size does not match between the images")
print(size1)
print(size2)
if same_ori and same_spac and same_dir and same_size:
return True
else:
return False
def verify_contains_only_expected_labels(itk_img: str, valid_labels: (tuple, list)):
img_npy = sitk.GetArrayFromImage(sitk.ReadImage(itk_img))
uniques = np.unique(img_npy)
invalid_uniques = [i for i in uniques if i not in valid_labels]
if len(invalid_uniques) == 0:
r = True
else:
r = False
return r, invalid_uniques
def verify_dataset_integrity(folder):
"""
folder needs the imagesTr, imagesTs and labelsTr subfolders. There also needs to be a dataset.json
checks if all training cases and labels are present
checks if all test cases (if any) are present
for each case, checks whether all modalities apre present
for each case, checks whether the pixel grids are aligned
checks whether the labels really only contain values they should
:param folder:
:return:
"""
assert isfile(join(folder, "dataset.json")), "There needs to be a dataset.json file in folder, folder=%s" % folder
assert isdir(join(folder, "imagesTr")), "There needs to be a imagesTr subfolder in folder, folder=%s" % folder
assert isdir(join(folder, "labelsTr")), "There needs to be a labelsTr subfolder in folder, folder=%s" % folder
dataset = load_json(join(folder, "dataset.json"))
training_cases = dataset['training']
num_modalities = len(dataset['modality'].keys())
test_cases = dataset['test']
expected_train_identifiers = [i['image'].split("/")[-1][:-7] for i in training_cases]
expected_test_identifiers = [i.split("/")[-1][:-7] for i in test_cases]
## check training set
nii_files_in_imagesTr = subfiles((join(folder, "imagesTr")), suffix=".nii.gz", join=False)
nii_files_in_labelsTr = subfiles((join(folder, "labelsTr")), suffix=".nii.gz", join=False)
label_files = []
geometries_OK = True
has_nan = False
# check all cases
if len(expected_train_identifiers) != len(np.unique(expected_train_identifiers)): raise RuntimeError("found duplicate training cases in dataset.json")
print("Verifying training set")
for c in expected_train_identifiers:
print("checking case", c)
# check if all files are present
expected_label_file = join(folder, "labelsTr", c + ".nii.gz")
label_files.append(expected_label_file)
expected_image_files = [join(folder, "imagesTr", c + "_%04.0d.nii.gz" % i) for i in range(num_modalities)]
assert isfile(expected_label_file), "could not find label file for case %s. Expected file: \n%s" % (
c, expected_label_file)
assert all([isfile(i) for i in
expected_image_files]), "some image files are missing for case %s. Expected files:\n %s" % (
c, expected_image_files)
# verify that all modalities and the label have the same shape and geometry.
label_itk = sitk.ReadImage(expected_label_file)
nans_in_seg = np.any(np.isnan(sitk.GetArrayFromImage(label_itk)))
has_nan = has_nan | nans_in_seg
if nans_in_seg:
print("There are NAN values in segmentation %s" % expected_label_file)
images_itk = [sitk.ReadImage(i) for i in expected_image_files]
for i, img in enumerate(images_itk):
nans_in_image = np.any(np.isnan(sitk.GetArrayFromImage(img)))
has_nan = has_nan | nans_in_image
same_geometry = verify_same_geometry(img, label_itk)
if not same_geometry:
geometries_OK = False
print("The geometry of the image %s does not match the geometry of the label file. The pixel arrays "
"will not be aligned and nnU-Net cannot use this data. Please make sure your image modalities "
"are coregistered and have the same geometry as the label" % expected_image_files[0][:-12])
if nans_in_image:
print("There are NAN values in image %s" % expected_image_files[i])
# now remove checked files from the lists nii_files_in_imagesTr and nii_files_in_labelsTr
for i in expected_image_files:
nii_files_in_imagesTr.remove(os.path.basename(i))
nii_files_in_labelsTr.remove(os.path.basename(expected_label_file))
# check for stragglers
assert len(
nii_files_in_imagesTr) == 0, "there are training cases in imagesTr that are not listed in dataset.json: %s" % nii_files_in_imagesTr
assert len(
nii_files_in_labelsTr) == 0, "there are training cases in labelsTr that are not listed in dataset.json: %s" % nii_files_in_labelsTr
# verify that only properly declared values are present in the labels
print("Verifying label values")
expected_labels = list(int(i) for i in dataset['labels'].keys())
p = Pool(default_num_threads)
results = p.starmap(verify_contains_only_expected_labels, zip(label_files, [expected_labels] * len(label_files)))
p.close()
p.join()
fail = False
print("Expected label values are", expected_labels)
for i, r in enumerate(results):
if not r[0]:
print("Unexpected labels found in file %s. Found these unexpected values (they should not be there) %s" % (
label_files[i], r[1]))
fail = True
if fail:
raise AssertionError(
"Found unexpected labels in the training dataset. Please correct that or adjust your dataset.json accordingly")
else:
print("Labels OK")
# check test set, but only if there actually is a test set
if len(expected_test_identifiers) > 0:
print("Verifying test set")
nii_files_in_imagesTs = subfiles((join(folder, "imagesTs")), suffix=".nii.gz", join=False)
for c in expected_test_identifiers:
# check if all files are present
expected_image_files = [join(folder, "imagesTs", c + "_%04.0d.nii.gz" % i) for i in range(num_modalities)]
assert all([isfile(i) for i in
expected_image_files]), "some image files are missing for case %s. Expected files:\n %s" % (
c, expected_image_files)
# verify that all modalities and the label have the same geometry. We use the affine for this
if num_modalities > 1:
images_itk = [sitk.ReadImage(i) for i in expected_image_files]
reference_img = images_itk[0]
for i, img in enumerate(images_itk[1:]):
assert verify_same_geometry(img, reference_img), "The modalities of the image %s do not seem to be " \
"registered. Please coregister your modalities." % (
expected_image_files[i])
# now remove checked files from the lists nii_files_in_imagesTr and nii_files_in_labelsTr
for i in expected_image_files:
nii_files_in_imagesTs.remove(os.path.basename(i))
assert len(
nii_files_in_imagesTs) == 0, "there are training cases in imagesTs that are not listed in dataset.json: %s" % nii_files_in_imagesTr
all_same, unique_orientations = verify_all_same_orientation(join(folder, "imagesTr"))
if not all_same:
print(
"WARNING: Not all images in the dataset have the same axis ordering. We very strongly recommend you correct that by reorienting the data. fslreorient2std should do the trick")
# save unique orientations to dataset.json
if not geometries_OK:
raise Warning("GEOMETRY MISMATCH FOUND! CHECK THE TEXT OUTPUT! This does not cause an error at this point but you should definitely check whether your geometries are alright!")
else:
print("Dataset OK")
if has_nan:
raise RuntimeError("Some images have nan values in them. This will break the training. See text output above to see which ones")
def reorient_to_RAS(img_fname: str, output_fname: str = None):
img = nib.load(img_fname)
canonical_img = nib.as_closest_canonical(img)
if output_fname is None:
output_fname = img_fname
nib.save(canonical_img, output_fname)
if __name__ == "__main__":
# investigate geometry issues
import SimpleITK as sitk
# load image
gt_itk = sitk.ReadImage(
"/media/fabian/Results/nnUNet/3d_fullres/Task064_KiTS_labelsFixed/nnUNetTrainerV2__nnUNetPlansv2.1/gt_niftis/case_00085.nii.gz")
# get numpy array
pred_npy = sitk.GetArrayFromImage(gt_itk)
# create new image from numpy array
prek_itk_new = sitk.GetImageFromArray(pred_npy)
# copy geometry
prek_itk_new.CopyInformation(gt_itk)
# prek_itk_new = copy_geometry(prek_itk_new, gt_itk)
# save
sitk.WriteImage(prek_itk_new, "test.mnc")
# load images in nib
gt = nib.load(
"/media/fabian/Results/nnUNet/3d_fullres/Task064_KiTS_labelsFixed/nnUNetTrainerV2__nnUNetPlansv2.1/gt_niftis/case_00085.nii.gz")
pred_nib = nib.load("test.mnc")
new_img_sitk = sitk.ReadImage("test.mnc")
np1 = sitk.GetArrayFromImage(gt_itk)
np2 = sitk.GetArrayFromImage(prek_itk_new)
| 11,771
| 42.925373
| 187
|
py
|
CoTr
|
CoTr-main/nnUNet/nnunet/preprocessing/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/CoTr_package/setup.py
|
from setuptools import setup, find_namespace_packages
setup(name='CoTr',
packages=find_namespace_packages(include=["CoTr", "CoTr.*"]),
version='0.0.1'
)
| 172
| 23.714286
| 67
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/configuration.py
|
import os
default_num_threads = 8 if 'nnUNet_def_n_proc' not in os.environ else int(os.environ['nnUNet_def_n_proc'])
RESAMPLING_SEPARATE_Z_ANISO_THRESHOLD = 3 # determines what threshold to use for resampling the low resolution axis
# separately (with NN)
| 257
| 50.6
| 116
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/__init__.py
|
from __future__ import absolute_import
print("This is CoTr\n")
from . import *
| 80
| 15.2
| 38
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/training/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/training/model_restore.py
|
import CoTr
import torch
from batchgenerators.utilities.file_and_folder_operations import *
import importlib
import pkgutil
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
def recursive_find_python_class(folder, trainer_name, current_module):
tr = None
for importer, modname, ispkg in pkgutil.iter_modules(folder):
# print(modname, ispkg)
if not ispkg:
m = importlib.import_module(current_module + "." + modname)
# m = importlib.import_module(modname)
if hasattr(m, trainer_name):
tr = getattr(m, trainer_name)
break
if tr is None:
for importer, modname, ispkg in pkgutil.iter_modules(folder):
if ispkg:
next_current_module = current_module + "." + modname
tr = recursive_find_python_class([join(folder[0], modname)], trainer_name, current_module=next_current_module)
if tr is not None:
break
return tr
def restore_model(pkl_file, checkpoint=None, train=False, fp16=None):
info = load_pickle(pkl_file)
init = info['init']
name = info['name']
search_in = join(CoTr.__path__[0], "training", "network_training")
tr = recursive_find_python_class([search_in], name, current_module="CoTr.training.network_training")
if tr is None:
try:
import meddec
# search_in = join(meddec.__path__[0], "model_training")
search_in = (os.path.dirname(os.path.abspath(__file__)), 'network_training')
tr = recursive_find_python_class([search_in], name, current_module="training.network_training")
except ImportError:
pass
if tr is None:
raise RuntimeError("Could not find the model trainer specified in checkpoint in nnunet.trainig.network_training. If it "
"is not located there, please move it or change the code of restore_model. Your model "
"trainer can be located in any directory within nnunet.trainig.network_training (search is recursive)."
"\nDebug info: \ncheckpoint file: %s\nName of trainer: %s " % (checkpoint, name))
assert issubclass(tr, nnUNetTrainer), "The network trainer was found but is not a subclass of nnUNetTrainer. " \
"Please make it so!"
trainer = tr(*init)
if fp16 is not None:
trainer.fp16 = fp16
trainer.process_plans(info['plans'])
if checkpoint is not None:
trainer.load_checkpoint(checkpoint, train)
return trainer
def load_best_model_for_inference(folder):
checkpoint = join(folder, "model_best.model")
pkl_file = checkpoint + ".pkl"
return restore_model(pkl_file, checkpoint, False)
def load_model_and_checkpoint_files(folder, folds=None, mixed_precision=None, checkpoint_name="model_best"):
"""
used for if you need to ensemble the five models of a cross-validation. This will restore the model from the
checkpoint in fold 0, load all parameters of the five folds in ram and return both. This will allow for fast
switching between parameters (as opposed to loading them form disk each time).
This is best used for inference and test prediction
:param folder:
:param folds:
:param mixed_precision: if None then we take no action. If True/False we overwrite what the model has in its init
:return:
"""
if isinstance(folds, str):
folds = [join(folder, "all")]
assert isdir(folds[0]), "no output folder for fold %s found" % folds
elif isinstance(folds, (list, tuple)):
if len(folds) == 1 and folds[0] == "all":
folds = [join(folder, "all")]
else:
folds = [join(folder, "fold_%d" % i) for i in folds]
assert all([isdir(i) for i in folds]), "list of folds specified but not all output folders are present"
elif isinstance(folds, int):
folds = [join(folder, "fold_%d" % folds)]
assert all([isdir(i) for i in folds]), "output folder missing for fold %d" % folds
elif folds is None:
print("folds is None so we will automatically look for output folders (not using \'all\'!)")
folds = subfolders(folder, prefix="fold")
print("found the following folds: ", folds)
else:
raise ValueError("Unknown value for folds. Type: %s. Expected: list of int, int, str or None", str(type(folds)))
trainer = restore_model(join(folds[0], "%s.model.pkl" % checkpoint_name), fp16=mixed_precision)
trainer.output_folder = folder
trainer.output_folder_base = folder
trainer.update_fold(0)
trainer.initialize(False)
all_best_model_files = [join(i, "%s.model" % checkpoint_name) for i in folds]
print("using the following model files: ", all_best_model_files)
all_params = [torch.load(i, map_location=torch.device('cpu')) for i in all_best_model_files]
return trainer, all_params
| 4,979
| 43.070796
| 130
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/training/network_training/nnUNetTrainerV2_ResTrans.py
|
from collections import OrderedDict
from typing import Tuple
import numpy as np
import torch
import shutil
from nnunet.training.loss_functions.deep_supervision import MultipleOutputLoss2
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
from nnunet.training.data_augmentation.default_data_augmentation import get_moreDA_augmentation
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from CoTr.network_architecture.neural_network import SegmentationNetwork
from nnunet.training.data_augmentation.default_data_augmentation import default_2D_augmentation_params, \
get_patch_size, default_3D_augmentation_params
from nnunet.training.dataloading.dataset_loading import unpack_dataset
from CoTr.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.nd_softmax import softmax_helper
from sklearn.model_selection import KFold
from torch import nn
from torch.cuda.amp import autocast
from nnunet.training.learning_rate.poly_lr import poly_lr
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.network_architecture.generic_modular_UNet import get_default_network_config
from CoTr.network_architecture.ResTranUnet import ResTranUnet
class nnUNetTrainerV2_ResTrans(nnUNetTrainer):
def __init__(self, plans_file, fold, norm_cfg, activation_cfg, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.max_num_epochs = 1000
self.initial_lr = 1e-2
self.deep_supervision_scales = None
self.ds_loss_weights = None
self.norm_cfg = norm_cfg
self.activation_cfg = activation_cfg
self.pin_memory = True
self.save_best_checkpoint = True
def initialize(self, training=True, force_load_plans=False):
"""
- replaced get_default_augmentation with get_moreDA_augmentation
- enforce to only run this code once
- loss function wrapper for deep supervision
:param training:
:param force_load_plans:
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
print("Patch size is %s" % self.plans['plans_per_stage'][1]['patch_size'])
if self.norm_cfg=='BN':
self.plans['plans_per_stage'][1]['batch_size'] = 8
# self.plans['plans_per_stage'][1]['batch_size'] = 1 #Debug
print("Batch size is %s" % self.plans['plans_per_stage'][1]['batch_size'])
self.process_plans(self.plans)
self.setup_DA_params()
################# Here we wrap the loss for deep supervision ############
# we need to know the number of outputs of the network
net_numpool = len(self.net_num_pool_op_kernel_sizes)
# we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
# this gives higher resolution outputs more weight in the loss
weights = np.array([1 / (2 ** i) for i in range(net_numpool)])
# we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
weights[~mask] = 0
weights = weights / weights.sum()
self.ds_loss_weights = weights
# now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
################# END ###################
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_moreDA_augmentation(
self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
pin_memory=self.pin_memory
)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
- momentum 0.99
- SGD instead of Adam
- self.lr_scheduler = None because we do poly_lr
- deep supervision = True
- i am sure I forgot something here
Known issue: forgot to set neg_slope=0 in InitWeights_He; should not make a difference though
:return:
"""
self.network = ResTranUnet(norm_cfg=self.norm_cfg, activation_cfg=self.activation_cfg, img_size=self.plans['plans_per_stage'][1]['patch_size'],
num_classes=self.num_classes, weight_std=False, deep_supervision=True).cuda()
total = sum([param.nelement() for param in self.network.parameters()])
print(' + Number of Network Params: %.2f(e6)' % (total / 1e6))
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
momentum=0.99, nesterov=True)
self.lr_scheduler = None
def run_online_evaluation(self, output, target):
"""
due to deep supervision the return value and the reference are now lists of tensors. We only need the full
resolution output because this is what we are interested in in the end. The others are ignored
:param output:
:param target:
:return:
"""
target = target[0]
output = output[0]
return super().run_online_evaluation(output, target)
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True,
step_size: float = 0.5, save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size,
save_softmax=save_softmax, use_gaussian=use_gaussian,
overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug,
all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs)
self.network.do_ds = ds
return ret
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision=True) -> Tuple[np.ndarray, np.ndarray]:
"""
We need to wrap this because we need to enforce self.network.do_ds = False for prediction
"""
ds = self.network.do_ds
self.network.do_ds = False
ret = super().predict_preprocessed_data_return_seg_and_softmax(data,
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size, use_gaussian=use_gaussian,
pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose,
mixed_precision=mixed_precision)
self.network.do_ds = ds
return ret
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
"""
gradient clipping improves training stability
:param data_generator:
:param do_backprop:
:param run_online_evaluation:
:return:
"""
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def do_split(self):
"""
Create a split and save it as splits_final.pkl file in the preprocessed data directory.
"""
# if the split file does not exist we need to create it
if not isfile(join(self.dataset_directory, "splits_final.pkl")):
shutil.copy('../../../data/splits_final.pkl', self.dataset_directory)
splits_file = join(self.dataset_directory, "splits_final.pkl")
splits = load_pickle(splits_file)
tr_keys = splits[0]['train']
val_keys = splits[0]['val']
tr_keys.sort()
val_keys.sort()
print("Current train-val split is ...")
print('Training set is %s' % tr_keys)
print('Validation set is %s \n' % val_keys)
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def setup_DA_params(self):
"""
- we increase roation angle from [-15, 15] to [-30, 30]
- scale range is now (0.7, 1.4), was (0.85, 1.25)
- we don't do elastic deformation anymore
:return:
"""
self.downsampe_scales = [[1, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
self.deep_supervision_scales = [[1, 1, 1]] + list(list(i) for i in 1 / np.cumprod(
np.vstack(self.downsampe_scales), axis=0))[:-1]
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.7, 1.4)
self.data_aug_params["do_elastic"] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
self.data_aug_params["num_cached_per_thread"] = 2
def maybe_update_lr(self, epoch=None):
"""
if epoch is not None we overwrite epoch. Else we use epoch = self.epoch + 1
(maybe_update_lr is called in on_epoch_end which is called before epoch is incremented.
herefore we need to do +1 here)
:param epoch:
:return:
"""
if epoch is None:
ep = self.epoch + 1
else:
ep = epoch
self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9)
self.print_to_log_file("lr:", np.round(self.optimizer.param_groups[0]['lr'], decimals=6))
def on_epoch_end(self):
"""
overwrite patient-based early stopping. Always run to 1000 epochs
:return:
"""
super().on_epoch_end()
continue_training = self.epoch < self.max_num_epochs
# it can rarely happen that the momentum of nnUNetTrainerV2 is too high for some dataset. If at epoch 100 the
# estimated validation Dice is still 0 then we reduce the momentum from 0.99 to 0.95
if self.epoch == 100:
if self.all_val_eval_metrics[-1] == 0:
self.optimizer.param_groups[0]["momentum"] = 0.95
self.network.apply(InitWeights_He(1e-2))
self.print_to_log_file("At epoch 100, the mean foreground Dice was 0. This can be caused by a too "
"high momentum. High momentum (0.99) is good for datasets where it works, but "
"sometimes causes issues such as this one. Momentum has now been reduced to "
"0.95 and network weights have been reinitialized")
return continue_training
def run_training(self):
"""
if we run with -c then we need to set the correct lr for the first epoch, otherwise it will run the first
continued epoch with self.initial_lr
we also need to make sure deep supervision in the network is enabled for training, thus the wrapper
:return:
"""
self.maybe_update_lr(self.epoch) # if we dont overwrite epoch then self.epoch+1 is used which is not what we
# want at the start of the training
ds = self.network.do_ds
self.network.do_ds = True
ret = super().run_training()
self.network.do_ds = ds
return ret
| 18,610
| 46.843188
| 151
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/training/network_training/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/training/network_training/network_trainer.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from _warnings import warn
from typing import Tuple
import matplotlib
from batchgenerators.utilities.file_and_folder_operations import *
from CoTr.network_architecture.neural_network import SegmentationNetwork
from sklearn.model_selection import KFold
from torch import nn
from torch.cuda.amp import GradScaler, autocast
from torch.optim.lr_scheduler import _LRScheduler
matplotlib.use("agg")
from time import time, sleep
import torch
import numpy as np
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import sys
from collections import OrderedDict
import torch.backends.cudnn as cudnn
from abc import abstractmethod
from datetime import datetime
from tqdm import trange
from nnunet.utilities.to_torch import maybe_to_torch, to_cuda
class NetworkTrainer(object):
def __init__(self, deterministic=True, fp16=False):
"""
A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such
as the training loop, tracking of training and validation losses (and the target metric if you implement it)
Training can be terminated early if the validation loss (or the target metric if implemented) do not improve
anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth
results.
What you need to override:
- __init__
- initialize
- run_online_evaluation (optional)
- finish_online_evaluation (optional)
- validate
- predict_test_case
"""
self.fp16 = fp16
self.amp_grad_scaler = None
if deterministic:
np.random.seed(12345)
torch.manual_seed(12345)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(12345)
cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
################# SET THESE IN self.initialize() ###################################
self.network: Tuple[SegmentationNetwork, nn.DataParallel] = None
self.optimizer = None
self.lr_scheduler = None
self.tr_gen = self.val_gen = None
self.was_initialized = False
################# SET THESE IN INIT ################################################
self.output_folder = None
self.fold = None
self.loss = None
self.dataset_directory = None
################# SET THESE IN LOAD_DATASET OR DO_SPLIT ############################
self.dataset = None # these can be None for inference mode
self.dataset_tr = self.dataset_val = None # do not need to be used, they just appear if you are using the suggested load_dataset_and_do_split
################# THESE DO NOT NECESSARILY NEED TO BE MODIFIED #####################
self.patience = 50
self.val_eval_criterion_alpha = 0.9 # alpha * old + (1-alpha) * new
# if this is too low then the moving average will be too noisy and the training may terminate early. If it is
# too high the training will take forever
self.train_loss_MA_alpha = 0.93 # alpha * old + (1-alpha) * new
self.train_loss_MA_eps = 5e-4 # new MA must be at least this much better (smaller)
self.max_num_epochs = 1000
self.num_batches_per_epoch = 250
self.num_val_batches_per_epoch = 50
self.also_val_in_tr_mode = False
self.lr_threshold = 1e-6 # the network will not terminate training if the lr is still above this threshold
################# LEAVE THESE ALONE ################################################
self.val_eval_criterion_MA = None
self.train_loss_MA = None
self.best_val_eval_criterion_MA = None
self.best_MA_tr_loss_for_patience = None
self.best_epoch_based_on_MA_tr_loss = None
self.all_tr_losses = []
self.all_val_losses = []
self.all_val_losses_tr_mode = []
self.all_val_eval_metrics = [] # does not have to be used
self.epoch = 0
self.log_file = None
self.deterministic = deterministic
self.use_progress_bar = True
if 'nnunet_use_progress_bar' in os.environ.keys():
self.use_progress_bar = bool(int(os.environ['nnunet_use_progress_bar']))
################# Settings for saving checkpoints ##################################
self.save_every = 50
self.save_latest_only = True # if false it will not store/overwrite _latest but separate files each
# time an intermediate checkpoint is created
self.save_intermediate_checkpoints = True # whether or not to save checkpoint_latest
self.save_best_checkpoint = True # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA
self.save_final_checkpoint = True # whether or not to save the final checkpoint
@abstractmethod
def initialize(self, training=True):
"""
create self.output_folder
modify self.output_folder if you are doing cross-validation (one folder per fold)
set self.tr_gen and self.val_gen
call self.initialize_network and self.initialize_optimizer_and_scheduler (important!)
finally set self.was_initialized to True
:param training:
:return:
"""
@abstractmethod
def load_dataset(self):
pass
def do_split(self):
"""
This is a suggestion for if your dataset is a dictionary (my personal standard)
:return:
"""
splits_file = join(self.dataset_directory, "splits_final.pkl")
if not isfile(splits_file):
self.print_to_log_file("Creating new split...")
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[-1]['train'] = train_keys
splits[-1]['val'] = test_keys
save_pickle(splits, splits_file)
splits = load_pickle(splits_file)
if self.fold == "all":
tr_keys = val_keys = list(self.dataset.keys())
else:
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def plot_progress(self):
"""
Should probably by improved
:return:
"""
try:
font = {'weight': 'normal',
'size': 18}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(30, 24))
ax = fig.add_subplot(111)
ax2 = ax.twinx()
x_values = list(range(self.epoch + 1))
ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label="loss_tr")
ax.plot(x_values, self.all_val_losses, color='r', ls='-', label="loss_val, train=False")
if len(self.all_val_losses_tr_mode) > 0:
ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label="loss_val, train=True")
if len(self.all_val_eval_metrics) == len(x_values):
ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label="evaluation metric")
ax.set_xlabel("epoch")
ax.set_ylabel("loss")
ax2.set_ylabel("evaluation metric")
ax.legend()
ax2.legend(loc=9)
fig.savefig(join(self.output_folder, "progress.png"))
plt.close()
except IOError:
self.print_to_log_file("failed to plot: ", sys.exc_info())
def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):
timestamp = time()
dt_object = datetime.fromtimestamp(timestamp)
if add_timestamp:
args = ("%s:" % dt_object, *args)
if self.log_file is None:
maybe_mkdir_p(self.output_folder)
timestamp = datetime.now()
self.log_file = join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" %
(timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,
timestamp.second))
with open(self.log_file, 'w') as f:
f.write("Starting... \n")
successful = False
max_attempts = 5
ctr = 0
while not successful and ctr < max_attempts:
try:
with open(self.log_file, 'a+') as f:
for a in args:
f.write(str(a))
f.write(" ")
f.write("\n")
successful = True
except IOError:
print("%s: failed to log: " % datetime.fromtimestamp(timestamp), sys.exc_info())
sleep(0.5)
ctr += 1
if also_print_to_console:
print(*args)
def save_checkpoint(self, fname, save_optimizer=True):
start_time = time()
state_dict = self.network.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
lr_sched_state_dct = None
if self.lr_scheduler is not None and hasattr(self.lr_scheduler,
'state_dict'): # not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
lr_sched_state_dct = self.lr_scheduler.state_dict()
# WTF is this!?
# for key in lr_sched_state_dct.keys():
# lr_sched_state_dct[key] = lr_sched_state_dct[key]
if save_optimizer:
optimizer_state_dict = self.optimizer.state_dict()
else:
optimizer_state_dict = None
self.print_to_log_file("saving checkpoint...")
save_this = {
'epoch': self.epoch + 1,
'state_dict': state_dict,
'optimizer_state_dict': optimizer_state_dict,
'lr_scheduler_state_dict': lr_sched_state_dct,
'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode,
self.all_val_eval_metrics),
'best_stuff' : (self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA)}
if self.amp_grad_scaler is not None:
save_this['amp_grad_scaler'] = self.amp_grad_scaler.state_dict()
torch.save(save_this, fname)
self.print_to_log_file("done, saving took %.2f seconds" % (time() - start_time))
def load_best_checkpoint(self, train=True):
if self.fold is None:
raise RuntimeError("Cannot load best checkpoint if self.fold is None")
if isfile(join(self.output_folder, "model_best.model")):
self.load_checkpoint(join(self.output_folder, "model_best.model"), train=train)
else:
self.print_to_log_file("WARNING! model_best.model does not exist! Cannot load best checkpoint. Falling "
"back to load_latest_checkpoint")
self.load_latest_checkpoint(train)
def load_latest_checkpoint(self, train=True):
if isfile(join(self.output_folder, "model_final_checkpoint.model")):
return self.load_checkpoint(join(self.output_folder, "model_final_checkpoint.model"), train=train)
if isfile(join(self.output_folder, "model_latest.model")):
return self.load_checkpoint(join(self.output_folder, "model_latest.model"), train=train)
if isfile(join(self.output_folder, "model_best.model")):
return self.load_best_checkpoint(train)
raise RuntimeError("No checkpoint found")
def load_checkpoint(self, fname, train=True):
self.print_to_log_file("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize(train)
# saved_model = torch.load(fname, map_location=torch.device('cuda', torch.cuda.current_device()))
saved_model = torch.load(fname, map_location=torch.device('cpu'))
self.load_checkpoint_ram(saved_model, train)
@abstractmethod
def initialize_network(self):
"""
initialize self.network here
:return:
"""
pass
@abstractmethod
def initialize_optimizer_and_scheduler(self):
"""
initialize self.optimizer and self.lr_scheduler (if applicable) here
:return:
"""
pass
def load_checkpoint_ram(self, checkpoint, train=True):
"""
used for if the checkpoint is already in ram
:param checkpoint:
:param train:
:return:
"""
if not self.was_initialized:
self.initialize(train)
new_state_dict = OrderedDict()
curr_state_dict_keys = list(self.network.state_dict().keys())
# if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not
# match. Use heuristic to make it match
for k, value in checkpoint['state_dict'].items():
key = k
if key not in curr_state_dict_keys and key.startswith('module.'):
key = key[7:]
new_state_dict[key] = value
if self.fp16:
self._maybe_init_amp()
if 'amp_grad_scaler' in checkpoint.keys():
self.amp_grad_scaler.load_state_dict(checkpoint['amp_grad_scaler'])
self.network.load_state_dict(new_state_dict)
self.epoch = checkpoint['epoch']
if train:
optimizer_state_dict = checkpoint['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and hasattr(self.lr_scheduler, 'load_state_dict') and checkpoint[
'lr_scheduler_state_dict'] is not None:
self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state_dict'])
if issubclass(self.lr_scheduler.__class__, _LRScheduler):
self.lr_scheduler.step(self.epoch)
self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = checkpoint[
'plot_stuff']
# load best loss (if present)
if 'best_stuff' in checkpoint.keys():
self.best_epoch_based_on_MA_tr_loss, self.best_MA_tr_loss_for_patience, self.best_val_eval_criterion_MA = checkpoint[
'best_stuff']
# after the training is done, the epoch is incremented one more time in my old code. This results in
# self.epoch = 1001 for old trained models when the epoch is actually 1000. This causes issues because
# len(self.all_tr_losses) = 1000 and the plot function will fail. We can easily detect and correct that here
if self.epoch != len(self.all_tr_losses):
self.print_to_log_file("WARNING in loading checkpoint: self.epoch != len(self.all_tr_losses). This is "
"due to an old bug and should only appear when you are loading old models. New "
"models should have this fixed! self.epoch is now set to len(self.all_tr_losses)")
self.epoch = len(self.all_tr_losses)
self.all_tr_losses = self.all_tr_losses[:self.epoch]
self.all_val_losses = self.all_val_losses[:self.epoch]
self.all_val_losses_tr_mode = self.all_val_losses_tr_mode[:self.epoch]
self.all_val_eval_metrics = self.all_val_eval_metrics[:self.epoch]
self._maybe_init_amp()
def _maybe_init_amp(self):
if self.fp16 and self.amp_grad_scaler is None and torch.cuda.is_available():
self.amp_grad_scaler = GradScaler()
def plot_network_architecture(self):
"""
can be implemented (see nnUNetTrainer) but does not have to. Not implemented here because it imposes stronger
assumptions on the presence of class variables
:return:
"""
pass
def run_training(self):
_ = self.tr_gen.next()
_ = self.val_gen.next()
if torch.cuda.is_available():
torch.cuda.empty_cache()
self._maybe_init_amp()
maybe_mkdir_p(self.output_folder)
self.plot_network_architecture()
if cudnn.benchmark and cudnn.deterministic:
warn("torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. "
"But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! "
"If you want deterministic then set benchmark=False")
if not self.was_initialized:
self.initialize(True)
while self.epoch < self.max_num_epochs:
self.print_to_log_file("\nepoch: ", self.epoch)
epoch_start_time = time()
train_losses_epoch = []
# train one epoch
self.network.train()
if self.use_progress_bar:
with trange(self.num_batches_per_epoch) as tbar:
for b in tbar:
tbar.set_description("Epoch {}/{}".format(self.epoch+1, self.max_num_epochs))
l = self.run_iteration(self.tr_gen, True)
tbar.set_postfix(loss=l)
train_losses_epoch.append(l)
else:
for _ in range(self.num_batches_per_epoch):
l = self.run_iteration(self.tr_gen, True)
train_losses_epoch.append(l)
self.all_tr_losses.append(np.mean(train_losses_epoch))
self.print_to_log_file("train loss : %.4f" % self.all_tr_losses[-1])
with torch.no_grad():
# validation with train=False
self.network.eval()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False, True)
val_losses.append(l)
self.all_val_losses.append(np.mean(val_losses))
self.print_to_log_file("validation loss: %.4f" % self.all_val_losses[-1])
if self.also_val_in_tr_mode:
self.network.train()
# validation with train=True
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False)
val_losses.append(l)
self.all_val_losses_tr_mode.append(np.mean(val_losses))
self.print_to_log_file("validation loss (train=True): %.4f" % self.all_val_losses_tr_mode[-1])
self.update_train_loss_MA() # needed for lr scheduler and stopping of training
continue_training = self.on_epoch_end()
epoch_end_time = time()
if not continue_training:
# allows for early stopping
break
self.epoch += 1
self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time - epoch_start_time))
self.epoch -= 1 # if we don't do this we can get a problem with loading model_final_checkpoint.
if self.save_final_checkpoint: self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model"))
# now we can delete latest as it will be identical with final
if isfile(join(self.output_folder, "model_latest.model")):
os.remove(join(self.output_folder, "model_latest.model"))
if isfile(join(self.output_folder, "model_latest.model.pkl")):
os.remove(join(self.output_folder, "model_latest.model.pkl"))
def maybe_update_lr(self):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def maybe_save_checkpoint(self):
"""
Saves a checkpoint every save_ever epochs.
:return:
"""
if self.save_intermediate_checkpoints and (self.epoch % self.save_every == (self.save_every - 1)):
self.print_to_log_file("saving scheduled checkpoint file...")
if not self.save_latest_only:
self.save_checkpoint(join(self.output_folder, "model_ep_%03.0d.model" % (self.epoch + 1)))
self.save_checkpoint(join(self.output_folder, "model_latest.model"))
self.print_to_log_file("done")
def update_eval_criterion_MA(self):
"""
If self.all_val_eval_metrics is unused (len=0) then we fall back to using -self.all_val_losses for the MA to determine early stopping
(not a minimization, but a maximization of a metric and therefore the - in the latter case)
:return:
"""
if self.val_eval_criterion_MA is None:
if len(self.all_val_eval_metrics) == 0:
self.val_eval_criterion_MA = - self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.all_val_eval_metrics[-1]
else:
if len(self.all_val_eval_metrics) == 0:
"""
We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower
is better, so we need to negate it.
"""
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - (
1 - self.val_eval_criterion_alpha) * \
self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + (
1 - self.val_eval_criterion_alpha) * \
self.all_val_eval_metrics[-1]
def manage_patience(self):
# update patience
continue_training = True
if self.patience is not None:
# if best_MA_tr_loss_for_patience and best_epoch_based_on_MA_tr_loss were not yet initialized,
# initialize them
if self.best_MA_tr_loss_for_patience is None:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
if self.best_epoch_based_on_MA_tr_loss is None:
self.best_epoch_based_on_MA_tr_loss = self.epoch
if self.best_val_eval_criterion_MA is None:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
# check if the current epoch is the best one according to moving average of validation criterion. If so
# then save 'best' model
# Do not use this for validation. This is intended for test set prediction only.
#self.print_to_log_file("current best_val_eval_criterion_MA is %.4f0" % self.best_val_eval_criterion_MA)
#self.print_to_log_file("current val_eval_criterion_MA is %.4f" % self.val_eval_criterion_MA)
if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
#self.print_to_log_file("saving best epoch checkpoint...")
if self.save_best_checkpoint: self.save_checkpoint(join(self.output_folder, "model_best.model"))
# Now see if the moving average of the train loss has improved. If yes then reset patience, else
# increase patience
if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
self.best_epoch_based_on_MA_tr_loss = self.epoch
#self.print_to_log_file("New best epoch (train loss MA): %03.4f" % self.best_MA_tr_loss_for_patience)
else:
pass
#self.print_to_log_file("No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f" %
# (self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps))
# if patience has reached its maximum then finish training (provided lr is low enough)
if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience:
if self.optimizer.param_groups[0]['lr'] > self.lr_threshold:
#self.print_to_log_file("My patience ended, but I believe I need more time (lr > 1e-6)")
self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2
else:
#self.print_to_log_file("My patience ended")
continue_training = False
else:
pass
#self.print_to_log_file(
# "Patience: %d/%d" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience))
return continue_training
def on_epoch_end(self):
self.finish_online_evaluation() # does not have to do anything, but can be used to update self.all_val_eval_
# metrics
self.plot_progress()
self.maybe_update_lr()
self.maybe_save_checkpoint()
self.update_eval_criterion_MA()
continue_training = self.manage_patience()
return continue_training
def update_train_loss_MA(self):
if self.train_loss_MA is None:
self.train_loss_MA = self.all_tr_losses[-1]
else:
self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \
self.all_tr_losses[-1]
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
data = maybe_to_torch(data)
target = maybe_to_torch(target)
if torch.cuda.is_available():
data = to_cuda(data)
target = to_cuda(target)
self.optimizer.zero_grad()
if self.fp16:
with autocast():
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
self.amp_grad_scaler.scale(l).backward()
self.amp_grad_scaler.step(self.optimizer)
self.amp_grad_scaler.update()
else:
output = self.network(data)
del data
l = self.loss(output, target)
if do_backprop:
l.backward()
self.optimizer.step()
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
return l.detach().cpu().numpy()
def run_online_evaluation(self, *args, **kwargs):
"""
Can be implemented, does not have to
:param output_torch:
:param target_npy:
:return:
"""
pass
def finish_online_evaluation(self):
"""
Can be implemented, does not have to
:return:
"""
pass
@abstractmethod
def validate(self, *args, **kwargs):
pass
def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98):
"""
stolen and adapted from here: https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
:param num_iters:
:param init_value:
:param final_value:
:param beta:
:return:
"""
import math
self._maybe_init_amp()
mult = (final_value / init_value) ** (1 / num_iters)
lr = init_value
self.optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
losses = []
log_lrs = []
for batch_num in range(1, num_iters + 1):
# +1 because this one here is not designed to have negative loss...
loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1
# Compute the smoothed loss
avg_loss = beta * avg_loss + (1 - beta) * loss
smoothed_loss = avg_loss / (1 - beta ** batch_num)
# Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
break
# Record the best loss
if smoothed_loss < best_loss or batch_num == 1:
best_loss = smoothed_loss
# Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
# Update the lr for the next step
lr *= mult
self.optimizer.param_groups[0]['lr'] = lr
import matplotlib.pyplot as plt
lrs = [10 ** i for i in log_lrs]
fig = plt.figure()
plt.xscale('log')
plt.plot(lrs[10:-5], losses[10:-5])
plt.savefig(join(self.output_folder, "lr_finder.png"))
plt.close()
return log_lrs, losses
| 30,846
| 41.372253
| 150
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/training/network_training/nnUNetTrainer.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
from collections import OrderedDict
from multiprocessing import Pool
from time import sleep
from typing import Tuple, List
import matplotlib
import nnunet
import numpy as np
import torch
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.configuration import default_num_threads
from nnunet.evaluation.evaluator import aggregate_scores
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from CoTr.network_architecture.neural_network import SegmentationNetwork
from nnunet.postprocessing.connected_components import determine_postprocessing
from nnunet.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
default_2D_augmentation_params, get_default_augmentation, get_patch_size
from nnunet.training.dataloading.dataset_loading import load_dataset, DataLoader3D, DataLoader2D, unpack_dataset
from nnunet.training.loss_functions.dice_loss import DC_and_CE_loss
from CoTr.training.network_training.network_trainer import NetworkTrainer
from nnunet.utilities.nd_softmax import softmax_helper
from nnunet.utilities.tensor_utilities import sum_tensor
from torch import nn
from torch.optim import lr_scheduler
matplotlib.use("agg")
class nnUNetTrainer(NetworkTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
"""
:param deterministic:
:param fold: can be either [0 ... 5) for cross-validation, 'all' to train on all available training data or
None if you wish to load some checkpoint and do inference only
:param plans_file: the pkl file generated by preprocessing. This file will determine all design choices
:param subfolder_with_preprocessed_data: must be a subfolder of dataset_directory (just the name of the folder,
not the entire path). This is where the preprocessed data lies that will be used for network training. We made
this explicitly available so that differently preprocessed data can coexist and the user can choose what to use.
Can be None if you are doing inference only.
:param output_folder: where to store parameters, plot progress and to the validation
:param dataset_directory: the parent directory in which the preprocessed Task data is stored. This is required
because the split information is stored in this directory. For running prediction only this input is not
required and may be set to None
:param batch_dice: compute dice loss for each sample and average over all samples in the batch or pretend the
batch is a pseudo volume?
:param stage: The plans file may contain several stages (used for lowres / highres / pyramid). Stage must be
specified for training:
if stage 1 exists then stage 1 is the high resolution stage, otherwise it's 0
:param unpack_data: if False, npz preprocessed data will not be unpacked to npy. This consumes less space but
is considerably slower! Running unpack_data=False with 2d should never be done!
IMPORTANT: If you inherit from nnUNetTrainer and the init args change then you need to redefine self.init_args
in your init accordingly. Otherwise checkpoints won't load properly!
"""
super(nnUNetTrainer, self).__init__(deterministic, fp16)
self.unpack_data = unpack_data
self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
# set through arguments from init
self.stage = stage
self.experiment_name = self.__class__.__name__
self.plans_file = plans_file
self.output_folder = output_folder
self.dataset_directory = dataset_directory
self.output_folder_base = self.output_folder
self.fold = fold
self.plans = None
# if we are running inference only then the self.dataset_directory is set (due to checkpoint loading) but it
# irrelevant
if self.dataset_directory is not None and isdir(self.dataset_directory):
self.gt_niftis_folder = join(self.dataset_directory, "gt_segmentations")
else:
self.gt_niftis_folder = None
self.folder_with_preprocessed_data = None
# set in self.initialize()
self.dl_tr = self.dl_val = None
self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \
self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = \
self.net_num_pool_op_kernel_sizes = self.net_conv_kernel_sizes = None # loaded automatically from plans_file
self.basic_generator_patch_size = self.data_aug_params = self.transpose_forward = self.transpose_backward = None
self.batch_dice = batch_dice
self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})
self.online_eval_foreground_dc = []
self.online_eval_tp = []
self.online_eval_fp = []
self.online_eval_fn = []
self.classes = self.do_dummy_2D_aug = self.use_mask_for_norm = self.only_keep_largest_connected_component = \
self.min_region_size_per_class = self.min_size_per_class = None
self.inference_pad_border_mode = "constant"
self.inference_pad_kwargs = {'constant_values': 0}
self.update_fold(fold)
self.pad_all_sides = None
self.lr_scheduler_eps = 1e-3
self.lr_scheduler_patience = 30
self.initial_lr = 3e-4
self.weight_decay = 3e-5
self.oversample_foreground_percent = 0.33
self.conv_per_stage = None
self.regions_class_order = None
def update_fold(self, fold):
"""
used to swap between folds for inference (ensemble of models from cross-validation)
DO NOT USE DURING TRAINING AS THIS WILL NOT UPDATE THE DATASET SPLIT AND THE DATA AUGMENTATION GENERATORS
:param fold:
:return:
"""
if fold is not None:
if isinstance(fold, str):
assert fold == "all" or "xf", "if self.fold is a string then it must be \'all\'"
if self.output_folder.endswith("%s" % str(self.fold)):
self.output_folder = self.output_folder_base
self.output_folder = join(self.output_folder, "%s" % str(fold))
else:
if self.output_folder.endswith("fold_%s" % str(self.fold)):
self.output_folder = self.output_folder_base
self.output_folder = join(self.output_folder, "fold_%s" % str(fold))
self.fold = fold
def setup_DA_params(self):
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
def initialize(self, training=True, force_load_plans=False):
"""
For prediction of test cases just set training=False, this will prevent loading of training data and
training batchgenerator initialization
:param training:
:return:
"""
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
if training:
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
self.print_to_log_file("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
self.print_to_log_file("done")
else:
self.print_to_log_file(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
self.was_initialized = True
def initialize_network(self):
"""
This is specific to the U-Net and must be adapted for other network architectures
:return:
"""
# self.print_to_log_file(self.net_num_pool_op_kernel_sizes)
# self.print_to_log_file(self.net_conv_kernel_sizes)
net_numpool = len(self.net_num_pool_op_kernel_sizes)
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool,
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
self.network.inference_apply_nonlin = softmax_helper
if torch.cuda.is_available():
self.network.cuda()
def initialize_optimizer_and_scheduler(self):
assert self.network is not None, "self.initialize_network must be called first"
self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,
amsgrad=True)
self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,
patience=self.lr_scheduler_patience,
verbose=True, threshold=self.lr_scheduler_eps,
threshold_mode="abs")
def plot_network_architecture(self):
try:
from batchgenerators.utilities.file_and_folder_operations import join
import hiddenlayer as hl
if torch.cuda.is_available():
g = hl.build_graph(self.network, torch.rand((2, self.num_input_channels, *self.patch_size)).cuda(),
transforms=None)
else:
g = hl.build_graph(self.network, torch.rand((1, self.num_input_channels, *self.patch_size)),
transforms=None)
g.save(join(self.output_folder, "network_architecture.pdf"))
del g
except Exception as e:
self.print_to_log_file("Unable to plot network architecture:")
self.print_to_log_file(e)
self.print_to_log_file("\nprinting the network instead:\n")
self.print_to_log_file(self.network)
self.print_to_log_file("\n")
finally:
if torch.cuda.is_available():
torch.cuda.empty_cache()
def run_training(self):
dct = OrderedDict()
for k in self.__dir__():
if not k.startswith("__"):
if not callable(getattr(self, k)):
dct[k] = str(getattr(self, k))
del dct['plans']
del dct['intensity_properties']
del dct['dataset']
del dct['dataset_tr']
del dct['dataset_val']
save_json(dct, join(self.output_folder, "debug.json"))
import shutil
shutil.copy(self.plans_file, join(self.output_folder_base, "plans.pkl"))
super(nnUNetTrainer, self).run_training()
def load_plans_file(self):
"""
This is what actually configures the entire experiment. The plans file is generated by experiment planning
:return:
"""
self.plans = load_pickle(self.plans_file)
def process_plans(self, plans):
if self.stage is None:
assert len(list(plans['plans_per_stage'].keys())) == 1, \
"If self.stage is None then there can be only one stage in the plans file. That seems to not be the " \
"case. Please specify which stage of the cascade must be trained"
self.stage = list(plans['plans_per_stage'].keys())[0]
self.plans = plans
stage_plans = self.plans['plans_per_stage'][self.stage]
self.batch_size = stage_plans['batch_size']
self.net_pool_per_axis = stage_plans['num_pool_per_axis']
self.patch_size = np.array(stage_plans['patch_size']).astype(int)
self.do_dummy_2D_aug = stage_plans['do_dummy_2D_data_aug']
if 'pool_op_kernel_sizes' not in stage_plans.keys():
assert 'num_pool_per_axis' in stage_plans.keys()
self.print_to_log_file("WARNING! old plans file with missing pool_op_kernel_sizes. Attempting to fix it...")
self.net_num_pool_op_kernel_sizes = []
for i in range(max(self.net_pool_per_axis)):
curr = []
for j in self.net_pool_per_axis:
if (max(self.net_pool_per_axis) - j) <= i:
curr.append(2)
else:
curr.append(1)
self.net_num_pool_op_kernel_sizes.append(curr)
else:
self.net_num_pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']
if 'conv_kernel_sizes' not in stage_plans.keys():
self.print_to_log_file("WARNING! old plans file with missing conv_kernel_sizes. Attempting to fix it...")
self.net_conv_kernel_sizes = [[3] * len(self.net_pool_per_axis)] * (max(self.net_pool_per_axis) + 1)
else:
self.net_conv_kernel_sizes = stage_plans['conv_kernel_sizes']
self.pad_all_sides = None # self.patch_size
self.intensity_properties = plans['dataset_properties']['intensityproperties']
self.normalization_schemes = plans['normalization_schemes']
self.base_num_features = plans['base_num_features']
self.num_input_channels = plans['num_modalities']
self.num_classes = plans['num_classes'] + 1 # background is no longer in num_classes
self.classes = plans['all_classes']
self.use_mask_for_norm = plans['use_mask_for_norm']
self.only_keep_largest_connected_component = plans['keep_only_largest_region']
self.min_region_size_per_class = plans['min_region_size_per_class']
self.min_size_per_class = None # DONT USE THIS. plans['min_size_per_class']
if plans.get('transpose_forward') is None or plans.get('transpose_backward') is None:
print("WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. "
"You should rerun preprocessing. We will proceed and assume that both transpose_foward "
"and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!")
plans['transpose_forward'] = [0, 1, 2]
plans['transpose_backward'] = [0, 1, 2]
self.transpose_forward = plans['transpose_forward']
self.transpose_backward = plans['transpose_backward']
if len(self.patch_size) == 2:
self.threeD = False
elif len(self.patch_size) == 3:
self.threeD = True
else:
raise RuntimeError("invalid patch size in plans file: %s" % str(self.patch_size))
if "conv_per_stage" in plans.keys(): # this ha sbeen added to the plans only recently
self.conv_per_stage = plans['conv_per_stage']
else:
self.conv_per_stage = 2
def load_dataset(self):
self.dataset = load_dataset(self.folder_with_preprocessed_data)
def get_basic_generators(self):
self.load_dataset()
self.do_split()
if self.threeD:
dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,
False, oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r')
dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r')
else:
dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r')
dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,
oversample_foreground_percent=self.oversample_foreground_percent,
pad_mode="constant", pad_sides=self.pad_all_sides, memmap_mode='r')
return dl_tr, dl_val
def preprocess_patient(self, input_files):
"""
Used to predict new unseen data. Not used for the preprocessing of the training/test data
:param input_files:
:return:
"""
from nnunet.training.model_restore import recursive_find_python_class
preprocessor_name = self.plans.get('preprocessor_name')
if preprocessor_name is None:
if self.threeD:
preprocessor_name = "GenericPreprocessor"
else:
preprocessor_name = "PreprocessorFor2D"
print("using preprocessor", preprocessor_name)
preprocessor_class = recursive_find_python_class([join(nnunet.__path__[0], "preprocessing")],
preprocessor_name,
current_module="nnunet.preprocessing")
assert preprocessor_class is not None, "Could not find preprocessor %s in nnunet.preprocessing" % \
preprocessor_name
preprocessor = preprocessor_class(self.normalization_schemes, self.use_mask_for_norm,
self.transpose_forward, self.intensity_properties)
d, s, properties = preprocessor.preprocess_test_case(input_files,
self.plans['plans_per_stage'][self.stage][
'current_spacing'])
return d, s, properties
def preprocess_predict_nifti(self, input_files: List[str], output_file: str = None,
softmax_ouput_file: str = None, mixed_precision: bool = True) -> None:
"""
Use this to predict new data
:param input_files:
:param output_file:
:param softmax_ouput_file:
:param mixed_precision:
:return:
"""
print("preprocessing...")
d, s, properties = self.preprocess_patient(input_files)
print("predicting...")
pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params["do_mirror"],
mirror_axes=self.data_aug_params['mirror_axes'],
use_sliding_window=True, step_size=0.5,
use_gaussian=True, pad_border_mode='constant',
pad_kwargs={'constant_values': 0},
verbose=True, all_in_gpu=False,
mixed_precision=mixed_precision)[1]
pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward])
if 'segmentation_export_params' in self.plans.keys():
force_separate_z = self.plans['segmentation_export_params']['force_separate_z']
interpolation_order = self.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
print("resampling to original spacing and nifti export...")
save_segmentation_nifti_from_softmax(pred, output_file, properties, interpolation_order,
self.regions_class_order, None, None, softmax_ouput_file,
None, force_separate_z=force_separate_z,
interpolation_order_z=interpolation_order_z)
print("done")
def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,
mirror_axes: Tuple[int] = None,
use_sliding_window: bool = True, step_size: float = 0.5,
use_gaussian: bool = True, pad_border_mode: str = 'constant',
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
:param data:
:param do_mirroring:
:param mirror_axes:
:param use_sliding_window:
:param step_size:
:param use_gaussian:
:param pad_border_mode:
:param pad_kwargs:
:param all_in_gpu:
:param verbose:
:return:
"""
if pad_border_mode == 'constant' and pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
if do_mirroring and mirror_axes is None:
mirror_axes = self.data_aug_params['mirror_axes']
if do_mirroring:
assert self.data_aug_params["do_mirror"], "Cannot do mirroring as test time augmentation when training " \
"was done without mirroring"
valid = list((SegmentationNetwork, nn.DataParallel))
assert isinstance(self.network, tuple(valid))
current_mode = self.network.training
self.network.eval()
ret = self.network.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window, step_size=step_size,
patch_size=self.patch_size, regions_class_order=self.regions_class_order,
use_gaussian=use_gaussian, pad_border_mode=pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose,
mixed_precision=mixed_precision)
self.network.train(current_mode)
return ret
def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
segmentation_export_kwargs: dict = None):
"""
if debug=True then the temporary files generated for postprocessing determination will be kept
"""
current_mode = self.network.training
self.network.eval()
assert self.was_initialized, "must initialize, ideally with checkpoint (or train first)"
if self.dataset_val is None:
self.load_dataset()
self.do_split()
if segmentation_export_kwargs is None:
if 'segmentation_export_params' in self.plans.keys():
force_separate_z = self.plans['segmentation_export_params']['force_separate_z']
interpolation_order = self.plans['segmentation_export_params']['interpolation_order']
interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']
else:
force_separate_z = None
interpolation_order = 1
interpolation_order_z = 0
else:
force_separate_z = segmentation_export_kwargs['force_separate_z']
interpolation_order = segmentation_export_kwargs['interpolation_order']
interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']
# predictions as they come from the network go here
output_folder = join(self.output_folder, validation_folder_name)
maybe_mkdir_p(output_folder)
# this is for debug purposes
my_input_args = {'do_mirroring': do_mirroring,
'use_sliding_window': use_sliding_window,
'step_size': step_size,
'save_softmax': save_softmax,
'use_gaussian': use_gaussian,
'overwrite': overwrite,
'validation_folder_name': validation_folder_name,
'debug': debug,
'all_in_gpu': all_in_gpu,
'segmentation_export_kwargs': segmentation_export_kwargs,
}
save_json(my_input_args, join(output_folder, "validation_args.json"))
if do_mirroring:
if not self.data_aug_params['do_mirror']:
raise RuntimeError("We did not train with mirroring so you cannot do inference with mirroring enabled")
mirror_axes = self.data_aug_params['mirror_axes']
else:
mirror_axes = ()
pred_gt_tuples = []
export_pool = Pool(default_num_threads)
results = []
for k in self.dataset_val.keys():
properties = load_pickle(self.dataset[k]['properties_file'])
fname = properties['list_of_data_files'][0].split("/")[-1][:-12]
if overwrite or (not isfile(join(output_folder, fname + ".nii.gz"))) or \
(save_softmax and not isfile(join(output_folder, fname + ".npz"))):
data = np.load(self.dataset[k]['data_file'])['data']
print(k, data.shape)
data[-1][data[-1] == -1] = 0
softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1],
do_mirroring=do_mirroring,
mirror_axes=mirror_axes,
use_sliding_window=use_sliding_window,
step_size=step_size,
use_gaussian=use_gaussian,
all_in_gpu=all_in_gpu,
mixed_precision=self.fp16)[1]
softmax_pred = softmax_pred.transpose([0] + [i + 1 for i in self.transpose_backward])
if save_softmax:
softmax_fname = join(output_folder, fname + ".npz")
else:
softmax_fname = None
"""There is a problem with python process communication that prevents us from communicating obejcts
larger than 2 GB between processes (basically when the length of the pickle string that will be sent is
communicated by the multiprocessing.Pipe object then the placeholder (\%i I think) does not allow for long
enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually
patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will
then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either
filename or np.ndarray and will handle this automatically"""
if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save
np.save(join(output_folder, fname + ".npy"), softmax_pred)
softmax_pred = join(output_folder, fname + ".npy")
results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,
((softmax_pred, join(output_folder, fname + ".nii.gz"),
properties, interpolation_order, self.regions_class_order,
None, None,
softmax_fname, None, force_separate_z,
interpolation_order_z),
)
)
)
pred_gt_tuples.append([join(output_folder, fname + ".nii.gz"),
join(self.gt_niftis_folder, fname + ".nii.gz")])
_ = [i.get() for i in results]
self.print_to_log_file("finished prediction")
# evaluate raw predictions
self.print_to_log_file("evaluation of raw predictions")
task = self.dataset_directory.split("/")[-1]
job_name = self.experiment_name
_ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),
json_output_file=join(output_folder, "summary.json"),
json_name=job_name + " val tiled %s" % (str(use_sliding_window)),
json_author="Fabian",
json_task=task, num_threads=default_num_threads)
# in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything
# except the largest connected component for each class. To see if this improves results, we do this for all
# classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will
# have this applied during inference as well
self.print_to_log_file("determining postprocessing")
determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,
final_subf_name=validation_folder_name + "_postprocessed", debug=debug)
# after this the final predictions for the vlaidation set can be found in validation_folder_name_base + "_postprocessed"
# They are always in that folder, even if no postprocessing as applied!
# detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another
# postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be
# done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to
# be used later
gt_nifti_folder = join(self.output_folder_base, "gt_niftis")
maybe_mkdir_p(gt_nifti_folder)
for f in subfiles(self.gt_niftis_folder, suffix=".nii.gz"):
success = False
attempts = 0
e = None
while not success and attempts < 10:
try:
shutil.copy(f, gt_nifti_folder)
success = True
except OSError as e:
attempts += 1
sleep(1)
if not success:
print("Could not copy gt nifti file %s into folder %s" % (f, gt_nifti_folder))
if e is not None:
raise e
self.network.train(current_mode)
def run_online_evaluation(self, output, target):
with torch.no_grad():
num_classes = output.shape[1]
output_softmax = softmax_helper(output)
output_seg = output_softmax.argmax(1)
target = target[:, 0]
axes = tuple(range(1, len(target.shape)))
tp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
fp_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
fn_hard = torch.zeros((target.shape[0], num_classes - 1)).to(output_seg.device.index)
for c in range(1, num_classes):
tp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target == c).float(), axes=axes)
fp_hard[:, c - 1] = sum_tensor((output_seg == c).float() * (target != c).float(), axes=axes)
fn_hard[:, c - 1] = sum_tensor((output_seg != c).float() * (target == c).float(), axes=axes)
tp_hard = tp_hard.sum(0, keepdim=False).detach().cpu().numpy()
fp_hard = fp_hard.sum(0, keepdim=False).detach().cpu().numpy()
fn_hard = fn_hard.sum(0, keepdim=False).detach().cpu().numpy()
self.online_eval_foreground_dc.append(list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))
self.online_eval_tp.append(list(tp_hard))
self.online_eval_fp.append(list(fp_hard))
self.online_eval_fn.append(list(fn_hard))
def finish_online_evaluation(self):
self.online_eval_tp = np.sum(self.online_eval_tp, 0)
self.online_eval_fp = np.sum(self.online_eval_fp, 0)
self.online_eval_fn = np.sum(self.online_eval_fn, 0)
global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in
zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]
if not np.isnan(i)]
self.all_val_eval_metrics.append(np.mean(global_dc_per_class))
self.print_to_log_file("Average global foreground Dice:", str(global_dc_per_class))
self.print_to_log_file("(interpret this as an estimate for the Dice of the different classes. This is not "
"exact.)")
self.online_eval_foreground_dc = []
self.online_eval_tp = []
self.online_eval_fp = []
self.online_eval_fn = []
def save_checkpoint(self, fname, save_optimizer=True):
super(nnUNetTrainer, self).save_checkpoint(fname, save_optimizer)
info = OrderedDict()
info['init'] = self.init_args
info['name'] = self.__class__.__name__
info['class'] = str(self.__class__)
info['plans'] = self.plans
write_pickle(info, fname + ".pkl")
| 39,572
| 53.061475
| 142
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/neural_network.py
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.augmentations.utils import pad_nd_image
from nnunet.utilities.random_stuff import no_op
from nnunet.utilities.to_torch import to_cuda, maybe_to_torch
from torch import nn
import torch
from scipy.ndimage.filters import gaussian_filter
from typing import Union, Tuple, List
from torch.cuda.amp import autocast
import CoTr
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def get_device(self):
if next(self.parameters()).device == "cpu":
return "cpu"
else:
return next(self.parameters()).device.index
def set_device(self, device):
if device == "cpu":
self.cpu()
else:
self.cuda(device)
def forward(self, x):
raise NotImplementedError
class SegmentationNetwork(NeuralNetwork):
def __init__(self):
super(NeuralNetwork, self).__init__()
# if we have 5 pooling then our patch size must be divisible by 2**5
self.input_shape_must_be_divisible_by = None # for example in a 2d network that does 5 pool in x and 6 pool
# in y this would be (32, 64)
# we need to know this because we need to know if we are a 2d or a 3d netowrk
self.conv_op = None # nn.Conv2d or nn.Conv3d
# this tells us how many channely we have in the output. Important for preallocation in inference
self.num_classes = None # number of channels in the output
# depending on the loss, we do not hard code a nonlinearity into the architecture. To aggregate predictions
# during inference, we need to apply the nonlinearity, however. So it is important to let the newtork know what
# to apply in inference. For the most part this will be softmax
self.inference_apply_nonlin = lambda x: x # softmax_helper
# This is for saving a gaussian importance map for inference. It weights voxels higher that are closer to the
# center. Prediction at the borders are often less accurate and are thus downweighted. Creating these Gaussians
# can be expensive, so it makes sense to save and reuse them.
self._gaussian_3d = self._patch_size_for_gaussian_3d = None
self._gaussian_2d = self._patch_size_for_gaussian_2d = None
def predict_3D(self, x: np.ndarray, do_mirroring: bool, mirror_axes: Tuple[int, ...] = (0, 1, 2),
use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: Tuple[int, ...] = None, regions_class_order: Tuple[int, ...] = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 3D image. It does not matter whether the network is a 2D or 3D U-Net, it will
detect that automatically and run the appropriate code.
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y, z).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:param mixed_precision: if True, will run inference in mixed precision with autocast()
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaller than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3, 4) for a 3d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if self.conv_op == nn.Conv2d:
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.conv_op == nn.Conv3d or self.conv_op == CoTr.network_architecture.ResTranUnet.Conv3d_wd:
if max(mirror_axes) > 2:
raise ValueError("mirror axes. duh")
if self.training:
print('WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 4, "data must have shape (c,x,y,z)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv3d or self.conv_op == CoTr.network_architecture.ResTranUnet.Conv3d_wd:
if use_sliding_window:
res = self._internal_predict_3D_3Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu,
verbose=verbose)
else:
res = self._internal_predict_3D_3Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs=pad_kwargs, verbose=verbose)
elif self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_3D_2Dconv_tiled(x, patch_size, do_mirroring, mirror_axes, step_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, False)
else:
res = self._internal_predict_3D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, all_in_gpu, False)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
def predict_2D(self, x, do_mirroring: bool, mirror_axes: tuple = (0, 1, 2), use_sliding_window: bool = False,
step_size: float = 0.5, patch_size: tuple = None, regions_class_order: tuple = None,
use_gaussian: bool = False, pad_border_mode: str = "constant",
pad_kwargs: dict = None, all_in_gpu: bool = False,
verbose: bool = True, mixed_precision: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
Use this function to predict a 2D image. If this is a 3D U-Net it will crash because you cannot predict a 2D
image with that (you dummy).
When running predictions, you need to specify whether you want to run fully convolutional of sliding window
based inference. We very strongly recommend you use sliding window with the default settings.
It is the responsibility of the user to make sure the network is in the proper mode (eval for inference!). If
the network is not in eval mode it will print a warning.
:param x: Your input data. Must be a nd.ndarray of shape (c, x, y).
:param do_mirroring: If True, use test time data augmentation in the form of mirroring
:param mirror_axes: Determines which axes to use for mirroing. Per default, mirroring is done along all three
axes
:param use_sliding_window: if True, run sliding window prediction. Heavily recommended! This is also the default
:param step_size: When running sliding window prediction, the step size determines the distance between adjacent
predictions. The smaller the step size, the denser the predictions (and the longer it takes!). Step size is given
as a fraction of the patch_size. 0.5 is the default and means that wen advance by patch_size * 0.5 between
predictions. step_size cannot be larger than 1!
:param patch_size: The patch size that was used for training the network. Do not use different patch sizes here,
this will either crash or give potentially less accurate segmentations
:param regions_class_order: Fabian only
:param use_gaussian: (Only applies to sliding window prediction) If True, uses a Gaussian importance weighting
to weigh predictions closer to the center of the current patch higher than those at the borders. The reason
behind this is that the segmentation accuracy decreases towards the borders. Default (and recommended): True
:param pad_border_mode: leave this alone
:param pad_kwargs: leave this alone
:param all_in_gpu: experimental. You probably want to leave this as is it
:param verbose: Do you want a wall of text? If yes then set this to True
:return:
"""
torch.cuda.empty_cache()
assert step_size <= 1, 'step_size must be smaler than 1. Otherwise there will be a gap between consecutive ' \
'predictions'
if self.conv_op == nn.Conv3d:
raise RuntimeError("Cannot predict 2d if the network is 3d. Dummy.")
if verbose: print("debug: mirroring", do_mirroring, "mirror_axes", mirror_axes)
assert self.get_device() != "cpu", "CPU not implemented"
if pad_kwargs is None:
pad_kwargs = {'constant_values': 0}
# A very long time ago the mirror axes were (2, 3) for a 2d network. This is just to intercept any old
# code that uses this convention
if len(mirror_axes):
if max(mirror_axes) > 1:
raise ValueError("mirror axes. duh")
if self.training:
print('WARNING! Network is in train mode during inference. This may be intended, or not...')
assert len(x.shape) == 3, "data must have shape (c,x,y)"
if mixed_precision:
context = autocast
else:
context = no_op
with context():
with torch.no_grad():
if self.conv_op == nn.Conv2d:
if use_sliding_window:
res = self._internal_predict_2D_2Dconv_tiled(x, step_size, do_mirroring, mirror_axes, patch_size,
regions_class_order, use_gaussian, pad_border_mode,
pad_kwargs, all_in_gpu, verbose)
else:
res = self._internal_predict_2D_2Dconv(x, patch_size, do_mirroring, mirror_axes, regions_class_order,
pad_border_mode, pad_kwargs, verbose)
else:
raise RuntimeError("Invalid conv op, cannot determine what dimensionality (2d/3d) the network is")
return res
@staticmethod
def _get_gaussian(patch_size, sigma_scale=1. / 8) -> np.ndarray:
tmp = np.zeros(patch_size)
center_coords = [i // 2 for i in patch_size]
sigmas = [i * sigma_scale for i in patch_size]
tmp[tuple(center_coords)] = 1
gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0)
gaussian_importance_map = gaussian_importance_map / np.max(gaussian_importance_map) * 1
gaussian_importance_map = gaussian_importance_map.astype(np.float32)
# gaussian_importance_map cannot be 0, otherwise we may end up with nans!
gaussian_importance_map[gaussian_importance_map == 0] = np.min(
gaussian_importance_map[gaussian_importance_map != 0])
return gaussian_importance_map
@staticmethod
def _compute_steps_for_sliding_window(patch_size: Tuple[int, ...], image_size: Tuple[int, ...], step_size: float) -> List[List[int]]:
assert [i >= j for i, j in zip(image_size, patch_size)], "image size must be as large or larger than patch_size"
assert 0 < step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1'
# our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of
# 110, patch size of 32 and step_size of 0.5, then we want to make 4 steps starting at coordinate 0, 27, 55, 78
target_step_sizes_in_voxels = [i * step_size for i in patch_size]
num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, patch_size)]
steps = []
for dim in range(len(patch_size)):
# the highest step value for this dimension is
max_step_value = image_size[dim] - patch_size[dim]
if num_steps[dim] > 1:
actual_step_size = max_step_value / (num_steps[dim] - 1)
else:
actual_step_size = 99999999999 # does not matter because there is only one step at 0
steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])]
steps.append(steps_here)
return steps
def _internal_predict_3D_3Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
if verbose: print("step_size:", step_size)
if verbose: print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y, z
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1]) * len(steps[2])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_3d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_3d)]):
if verbose: print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8)
self._gaussian_3d = gaussian_importance_map
self._patch_size_for_gaussian_3d = patch_size
else:
if verbose: print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_3d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device())
if verbose: print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose: print("moving data to GPU")
data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True)
if verbose: print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_3d
else:
# add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
add_for_nb_of_preds = np.ones(patch_size, dtype=np.float32)
aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
for z in steps[2]:
lb_z = z
ub_z = z + patch_size[2]
predicted_patch = self._internal_maybe_mirror_and_pred_3D(
data[None, :, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y, lb_z:ub_z] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose: print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose: print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_2D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_2D_2Dconv'
if verbose: print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_2D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_predict_3D_3Dconv(self, x: np.ndarray, min_size: Tuple[int, ...], do_mirroring: bool,
mirror_axes: tuple = (0, 1, 2), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""
This one does fully convolutional inference. No sliding window
"""
assert len(x.shape) == 4, "x must be (c, x, y, z)"
assert self.get_device() != "cpu"
assert self.input_shape_must_be_divisible_by is not None, 'input_shape_must_be_divisible_by must be set to ' \
'run _internal_predict_3D_3Dconv'
if verbose: print("do mirror:", do_mirroring)
data, slicer = pad_nd_image(x, min_size, pad_border_mode, pad_kwargs, True,
self.input_shape_must_be_divisible_by)
predicted_probabilities = self._internal_maybe_mirror_and_pred_3D(data[None], mirror_axes, do_mirroring,
None)[0]
slicer = tuple(
[slice(0, predicted_probabilities.shape[i]) for i in range(len(predicted_probabilities.shape) -
(len(slicer) - 1))] + slicer[1:])
predicted_probabilities = predicted_probabilities[slicer]
if regions_class_order is None:
predicted_segmentation = predicted_probabilities.argmax(0)
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
else:
predicted_probabilities = predicted_probabilities.detach().cpu().numpy()
predicted_segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[predicted_probabilities[i] > 0.5] = c
return predicted_segmentation, predicted_probabilities
def _internal_maybe_mirror_and_pred_3D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
assert len(x.shape) == 5, 'x must be (b, c, x, y, z)'
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([1, self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 8
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, ))))
result_torch += 1 / num_results * torch.flip(pred, (4,))
if m == 2 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3,))
if m == 3 and (2 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3))))
result_torch += 1 / num_results * torch.flip(pred, (4, 3))
if m == 4 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2,))
if m == 5 and (0 in mirror_axes) and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 2))))
result_torch += 1 / num_results * torch.flip(pred, (4, 2))
if m == 6 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if m == 7 and (0 in mirror_axes) and (1 in mirror_axes) and (2 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (4, 3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (4, 3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_maybe_mirror_and_pred_2D(self, x: Union[np.ndarray, torch.tensor], mirror_axes: tuple,
do_mirroring: bool = True,
mult: np.ndarray or torch.tensor = None) -> torch.tensor:
# everything in here takes place on the GPU. If x and mult are not yet on GPU this will be taken care of here
# we now return a cuda tensor! Not numpy array!
assert len(x.shape) == 4, 'x must be (b, c, x, y)'
x = to_cuda(maybe_to_torch(x), gpu_id=self.get_device())
result_torch = torch.zeros([x.shape[0], self.num_classes] + list(x.shape[2:]),
dtype=torch.float).cuda(self.get_device(), non_blocking=True)
if mult is not None:
mult = to_cuda(maybe_to_torch(mult), gpu_id=self.get_device())
if do_mirroring:
mirror_idx = 4
num_results = 2 ** len(mirror_axes)
else:
mirror_idx = 1
num_results = 1
for m in range(mirror_idx):
if m == 0:
pred = self.inference_apply_nonlin(self(x))
result_torch += 1 / num_results * pred
if m == 1 and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, ))))
result_torch += 1 / num_results * torch.flip(pred, (3, ))
if m == 2 and (0 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (2, ))))
result_torch += 1 / num_results * torch.flip(pred, (2, ))
if m == 3 and (0 in mirror_axes) and (1 in mirror_axes):
pred = self.inference_apply_nonlin(self(torch.flip(x, (3, 2))))
result_torch += 1 / num_results * torch.flip(pred, (3, 2))
if mult is not None:
result_torch[:, :] *= mult
return result_torch
def _internal_predict_2D_2Dconv_tiled(self, x: np.ndarray, step_size: float, do_mirroring: bool, mirror_axes: tuple,
patch_size: tuple, regions_class_order: tuple, use_gaussian: bool,
pad_border_mode: str, pad_kwargs: dict, all_in_gpu: bool,
verbose: bool) -> Tuple[np.ndarray, np.ndarray]:
# better safe than sorry
assert len(x.shape) == 3, "x must be (c, x, y)"
assert self.get_device() != "cpu"
if verbose: print("step_size:", step_size)
if verbose: print("do mirror:", do_mirroring)
assert patch_size is not None, "patch_size cannot be None for tiled prediction"
# for sliding window inference the image must at least be as large as the patch size. It does not matter
# whether the shape is divisible by 2**num_pool as long as the patch size is
data, slicer = pad_nd_image(x, patch_size, pad_border_mode, pad_kwargs, True, None)
data_shape = data.shape # still c, x, y
# compute the steps for sliding window
steps = self._compute_steps_for_sliding_window(patch_size, data_shape[1:], step_size)
num_tiles = len(steps[0]) * len(steps[1])
if verbose:
print("data shape:", data_shape)
print("patch size:", patch_size)
print("steps (x, y, and z):", steps)
print("number of tiles:", num_tiles)
# we only need to compute that once. It can take a while to compute this due to the large sigma in
# gaussian_filter
if use_gaussian and num_tiles > 1:
if self._gaussian_2d is None or not all(
[i == j for i, j in zip(patch_size, self._patch_size_for_gaussian_2d)]):
if verbose: print('computing Gaussian')
gaussian_importance_map = self._get_gaussian(patch_size, sigma_scale=1. / 8)
self._gaussian_2d = gaussian_importance_map
self._patch_size_for_gaussian_2d = patch_size
else:
if verbose: print("using precomputed Gaussian")
gaussian_importance_map = self._gaussian_2d
gaussian_importance_map = torch.from_numpy(gaussian_importance_map).cuda(self.get_device(),
non_blocking=True)
else:
gaussian_importance_map = None
if all_in_gpu:
# If we run the inference in GPU only (meaning all tensors are allocated on the GPU, this reduces
# CPU-GPU communication but required more GPU memory) we need to preallocate a few things on GPU
if use_gaussian and num_tiles > 1:
# half precision for the outputs should be good enough. If the outputs here are half, the
# gaussian_importance_map should be as well
gaussian_importance_map = gaussian_importance_map.half()
# make sure we did not round anything to 0
gaussian_importance_map[gaussian_importance_map == 0] = gaussian_importance_map[
gaussian_importance_map != 0].min()
add_for_nb_of_preds = gaussian_importance_map
else:
add_for_nb_of_preds = torch.ones(data.shape[1:], device=self.get_device())
if verbose: print("initializing result array (on GPU)")
aggregated_results = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
if verbose: print("moving data to GPU")
data = torch.from_numpy(data).cuda(self.get_device(), non_blocking=True)
if verbose: print("initializing result_numsamples (on GPU)")
aggregated_nb_of_predictions = torch.zeros([self.num_classes] + list(data.shape[1:]), dtype=torch.half,
device=self.get_device())
else:
if use_gaussian and num_tiles > 1:
add_for_nb_of_preds = self._gaussian_2d
else:
add_for_nb_of_preds = np.ones(data.shape[1:], dtype=np.float32)
aggregated_results = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
aggregated_nb_of_predictions = np.zeros([self.num_classes] + list(data.shape[1:]), dtype=np.float32)
for x in steps[0]:
lb_x = x
ub_x = x + patch_size[0]
for y in steps[1]:
lb_y = y
ub_y = y + patch_size[1]
predicted_patch = self._internal_maybe_mirror_and_pred_2D(
data[None, :, lb_x:ub_x, lb_y:ub_y], mirror_axes, do_mirroring,
gaussian_importance_map)[0]
if all_in_gpu:
predicted_patch = predicted_patch.half()
else:
predicted_patch = predicted_patch.cpu().numpy()
aggregated_results[:, lb_x:ub_x, lb_y:ub_y] += predicted_patch
aggregated_nb_of_predictions[:, lb_x:ub_x, lb_y:ub_y] += add_for_nb_of_preds
# we reverse the padding here (remeber that we padded the input to be at least as large as the patch size
slicer = tuple(
[slice(0, aggregated_results.shape[i]) for i in
range(len(aggregated_results.shape) - (len(slicer) - 1))] + slicer[1:])
aggregated_results = aggregated_results[slicer]
aggregated_nb_of_predictions = aggregated_nb_of_predictions[slicer]
# computing the class_probabilities by dividing the aggregated result with result_numsamples
class_probabilities = aggregated_results / aggregated_nb_of_predictions
if regions_class_order is None:
predicted_segmentation = class_probabilities.argmax(0)
else:
if all_in_gpu:
class_probabilities_here = class_probabilities.detach().cpu().numpy()
else:
class_probabilities_here = class_probabilities
predicted_segmentation = np.zeros(class_probabilities_here.shape[1:], dtype=np.float32)
for i, c in enumerate(regions_class_order):
predicted_segmentation[class_probabilities_here[i] > 0.5] = c
if all_in_gpu:
if verbose: print("copying results to CPU")
if regions_class_order is None:
predicted_segmentation = predicted_segmentation.detach().cpu().numpy()
class_probabilities = class_probabilities.detach().cpu().numpy()
if verbose: print("prediction done")
return predicted_segmentation, class_probabilities
def _internal_predict_3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
all_in_gpu: bool = False, verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv(
x[:, s], min_size, do_mirroring, mirror_axes, regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def predict_3D_pseudo3D_2Dconv(self, x: np.ndarray, min_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), regions_class_order: tuple = None,
pseudo3D_slices: int = 5, all_in_gpu: bool = False,
pad_border_mode: str = "constant", pad_kwargs: dict = None,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
assert pseudo3D_slices % 2 == 1, "pseudo3D_slices must be odd"
extra_slices = (pseudo3D_slices - 1) // 2
shp_for_pad = np.array(x.shape)
shp_for_pad[1] = extra_slices
pad = np.zeros(shp_for_pad, dtype=np.float32)
data = np.concatenate((pad, x, pad), 1)
predicted_segmentation = []
softmax_pred = []
for s in range(extra_slices, data.shape[1] - extra_slices):
d = data[:, (s - extra_slices):(s + extra_slices + 1)]
d = d.reshape((-1, d.shape[-2], d.shape[-1]))
pred_seg, softmax_pres = \
self._internal_predict_2D_2Dconv(d, min_size, do_mirroring, mirror_axes,
regions_class_order, pad_border_mode, pad_kwargs, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
def _internal_predict_3D_2Dconv_tiled(self, x: np.ndarray, patch_size: Tuple[int, int], do_mirroring: bool,
mirror_axes: tuple = (0, 1), step_size: float = 0.5,
regions_class_order: tuple = None, use_gaussian: bool = False,
pad_border_mode: str = "edge", pad_kwargs: dict =None,
all_in_gpu: bool = False,
verbose: bool = True) -> Tuple[np.ndarray, np.ndarray]:
if all_in_gpu:
raise NotImplementedError
assert len(x.shape) == 4, "data must be c, x, y, z"
predicted_segmentation = []
softmax_pred = []
for s in range(x.shape[1]):
pred_seg, softmax_pres = self._internal_predict_2D_2Dconv_tiled(
x[:, s], step_size, do_mirroring, mirror_axes, patch_size, regions_class_order, use_gaussian,
pad_border_mode, pad_kwargs, all_in_gpu, verbose)
predicted_segmentation.append(pred_seg[None])
softmax_pred.append(softmax_pres[None])
predicted_segmentation = np.vstack(predicted_segmentation)
softmax_pred = np.vstack(softmax_pred).transpose((1, 0, 2, 3))
return predicted_segmentation, softmax_pred
if __name__ == '__main__':
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (162, 529, 529), 0.1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (60, 448, 224), 0.5))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 1))
print(SegmentationNetwork._compute_steps_for_sliding_window((30, 224, 224), (30, 224, 224), 0.125))
print(SegmentationNetwork._compute_steps_for_sliding_window((123, 54, 123), (246, 162, 369), 0.25))
| 44,025
| 52.107358
| 137
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/CNNBackbone.py
|
# ------------------------------------------------------------------------
# CNN encoder
# ------------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
class Conv3d_wd(nn.Conv3d):
def __init__(self, in_channels, out_channels, kernel_size, stride=(1,1,1), padding=(0,0,0), dilation=(1,1,1), groups=1, bias=False):
super(Conv3d_wd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True).mean(dim=4, keepdim=True)
weight = weight - weight_mean
# std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1, 1) + 1e-5
std = torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view(-1, 1, 1, 1, 1)
weight = weight / std.expand_as(weight)
return F.conv3d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def conv3x3x3(in_planes, out_planes, kernel_size, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), bias=False, weight_std=False):
"3x3x3 convolution with padding"
if weight_std:
return Conv3d_wd(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
else:
return nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias)
def Norm_layer(norm_cfg, inplanes):
if norm_cfg == 'BN':
out = nn.BatchNorm3d(inplanes)
elif norm_cfg == 'SyncBN':
out = nn.SyncBatchNorm(inplanes)
elif norm_cfg == 'GN':
out = nn.GroupNorm(16, inplanes)
elif norm_cfg == 'IN':
out = nn.InstanceNorm3d(inplanes,affine=True)
return out
def Activation_layer(activation_cfg, inplace=True):
if activation_cfg == 'ReLU':
out = nn.ReLU(inplace=inplace)
elif activation_cfg == 'LeakyReLU':
out = nn.LeakyReLU(negative_slope=1e-2, inplace=inplace)
return out
class ResBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, norm_cfg, activation_cfg, stride=(1, 1, 1), downsample=None, weight_std=False):
super(ResBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, weight_std=weight_std)
self.norm1 = Norm_layer(norm_cfg, planes)
self.nonlin = Activation_layer(activation_cfg, inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.nonlin(out)
return out
class Backbone(nn.Module):
arch_settings = {
9: (ResBlock, (3, 3, 2))
}
def __init__(self,
depth,
in_channels=1,
norm_cfg='BN',
activation_cfg='ReLU',
weight_std=False):
super(Backbone, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
self.depth = depth
block, layers = self.arch_settings[depth]
self.inplanes = 64
self.conv1 = conv3x3x3(in_channels, 64, kernel_size=7, stride=(1, 2, 2), padding=3, bias=False, weight_std=weight_std)
self.norm1 = Norm_layer(norm_cfg, 64)
self.nonlin = Activation_layer(activation_cfg, inplace=True)
self.layer1 = self._make_layer(block, 192, layers[0], stride=(2, 2, 2), norm_cfg=norm_cfg, activation_cfg=activation_cfg, weight_std=weight_std)
self.layer2 = self._make_layer(block, 384, layers[1], stride=(2, 2, 2), norm_cfg=norm_cfg, activation_cfg=activation_cfg, weight_std=weight_std)
self.layer3 = self._make_layer(block, 384, layers[2], stride=(2, 2, 2), norm_cfg=norm_cfg, activation_cfg=activation_cfg, weight_std=weight_std)
self.layers = []
for m in self.modules():
if isinstance(m, (nn.Conv3d, Conv3d_wd)):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm, nn.InstanceNorm3d, nn.SyncBatchNorm)):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=(1, 1, 1), norm_cfg='BN', activation_cfg='ReLU', weight_std=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv3x3x3(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False, weight_std=weight_std), Norm_layer(norm_cfg, planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, norm_cfg, activation_cfg, stride=stride, downsample=downsample, weight_std=weight_std))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, norm_cfg, activation_cfg, weight_std=weight_std))
return nn.Sequential(*layers)
def init_weights(self):
for m in self.modules():
if isinstance(m, (nn.Conv3d, Conv3d_wd)):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm, nn.InstanceNorm3d, nn.SyncBatchNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
out = []
x = self.conv1(x)
x = self.norm1(x)
x = self.nonlin(x)
out.append(x)
x = self.layer1(x)
out.append(x)
x = self.layer2(x)
out.append(x)
x = self.layer3(x)
out.append(x)
return out
| 6,314
| 37.272727
| 152
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/ResTranUnet.py
|
# ------------------------------------------------------------------------
# CoTr
# ------------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from CoTr.network_architecture import CNNBackbone
from CoTr.network_architecture.neural_network import SegmentationNetwork
from CoTr.network_architecture.DeTrans.DeformableTrans import DeformableTransformer
from CoTr.network_architecture.DeTrans.position_encoding import build_position_encoding
class Conv3d_wd(nn.Conv3d):
def __init__(self, in_channels, out_channels, kernel_size, stride=(1,1,1), padding=(0,0,0), dilation=(1,1,1), groups=1, bias=False):
super(Conv3d_wd, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True).mean(dim=4, keepdim=True)
weight = weight - weight_mean
# std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1, 1) + 1e-5
std = torch.sqrt(torch.var(weight.view(weight.size(0), -1), dim=1) + 1e-12).view(-1, 1, 1, 1, 1)
weight = weight / std.expand_as(weight)
return F.conv3d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def conv3x3x3(in_planes, out_planes, kernel_size, stride=(1, 1, 1), padding=(0, 0, 0), dilation=(1, 1, 1), groups=1, bias=False, weight_std=False):
"3x3x3 convolution with padding"
if weight_std:
return Conv3d_wd(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
else:
return nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
def Norm_layer(norm_cfg, inplanes):
if norm_cfg == 'BN':
out = nn.BatchNorm3d(inplanes)
elif norm_cfg == 'SyncBN':
out = nn.SyncBatchNorm(inplanes)
elif norm_cfg == 'GN':
out = nn.GroupNorm(16, inplanes)
elif norm_cfg == 'IN':
out = nn.InstanceNorm3d(inplanes,affine=True)
return out
def Activation_layer(activation_cfg, inplace=True):
if activation_cfg == 'ReLU':
out = nn.ReLU(inplace=inplace)
elif activation_cfg == 'LeakyReLU':
out = nn.LeakyReLU(negative_slope=1e-2, inplace=inplace)
return out
class Conv3dBlock(nn.Module):
def __init__(self,in_channels,out_channels,norm_cfg,activation_cfg,kernel_size,stride=(1, 1, 1),padding=(0, 0, 0),dilation=(1, 1, 1),bias=False,weight_std=False):
super(Conv3dBlock,self).__init__()
self.conv = conv3x3x3(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, weight_std=weight_std)
self.norm = Norm_layer(norm_cfg, out_channels)
self.nonlin = Activation_layer(activation_cfg, inplace=True)
def forward(self,x):
x = self.conv(x)
x = self.norm(x)
x = self.nonlin(x)
return x
class ResBlock(nn.Module):
def __init__(self, inplanes, planes, norm_cfg, activation_cfg, weight_std=False):
super(ResBlock, self).__init__()
self.resconv1 = Conv3dBlock(inplanes, planes, norm_cfg, activation_cfg, kernel_size=3, stride=1, padding=1, bias=False, weight_std=weight_std)
self.resconv2 = Conv3dBlock(planes, planes, norm_cfg, activation_cfg, kernel_size=3, stride=1, padding=1, bias=False, weight_std=weight_std)
def forward(self, x):
residual = x
out = self.resconv1(x)
out = self.resconv2(out)
out = out + residual
return out
class U_ResTran3D(nn.Module):
def __init__(self, norm_cfg='BN', activation_cfg='ReLU', img_size=None, num_classes=None, weight_std=False):
super(U_ResTran3D, self).__init__()
self.MODEL_NUM_CLASSES = num_classes
self.upsamplex2 = nn.Upsample(scale_factor=(1,2,2), mode='trilinear')
self.transposeconv_stage2 = nn.ConvTranspose3d(384, 384, kernel_size=(2,2,2), stride=(2,2,2), bias=False)
self.transposeconv_stage1 = nn.ConvTranspose3d(384, 192, kernel_size=(2,2,2), stride=(2,2,2), bias=False)
self.transposeconv_stage0 = nn.ConvTranspose3d(192, 64, kernel_size=(2,2,2), stride=(2,2,2), bias=False)
self.stage2_de = ResBlock(384, 384, norm_cfg, activation_cfg, weight_std=weight_std)
self.stage1_de = ResBlock(192, 192, norm_cfg, activation_cfg, weight_std=weight_std)
self.stage0_de = ResBlock(64, 64, norm_cfg, activation_cfg, weight_std=weight_std)
self.ds2_cls_conv = nn.Conv3d(384, self.MODEL_NUM_CLASSES, kernel_size=1)
self.ds1_cls_conv = nn.Conv3d(192, self.MODEL_NUM_CLASSES, kernel_size=1)
self.ds0_cls_conv = nn.Conv3d(64, self.MODEL_NUM_CLASSES, kernel_size=1)
self.cls_conv = nn.Conv3d(64, self.MODEL_NUM_CLASSES, kernel_size=1)
for m in self.modules():
if isinstance(m, (nn.Conv3d, Conv3d_wd, nn.ConvTranspose3d)):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, (nn.BatchNorm3d, nn.SyncBatchNorm, nn.InstanceNorm3d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
self.backbone = CNNBackbone.Backbone(depth=9, norm_cfg=norm_cfg, activation_cfg=activation_cfg, weight_std=weight_std)
total = sum([param.nelement() for param in self.backbone.parameters()])
print(' + Number of Backbone Params: %.2f(e6)' % (total / 1e6))
self.position_embed = build_position_encoding(mode='v2', hidden_dim=384)
self.encoder_Detrans = DeformableTransformer(d_model=384, dim_feedforward=1536, dropout=0.1, activation='gelu', num_feature_levels=2, nhead=6, num_encoder_layers=6, enc_n_points=4)
total = sum([param.nelement() for param in self.encoder_Detrans.parameters()])
print(' + Number of Transformer Params: %.2f(e6)' % (total / 1e6))
def posi_mask(self, x):
x_fea = []
x_posemb = []
masks = []
for lvl, fea in enumerate(x):
if lvl > 1:
x_fea.append(fea)
x_posemb.append(self.position_embed(fea))
masks.append(torch.zeros((fea.shape[0], fea.shape[2], fea.shape[3], fea.shape[4]), dtype=torch.bool).cuda())
return x_fea, masks, x_posemb
def forward(self, inputs):
# # %%%%%%%%%%%%% CoTr
x_convs = self.backbone(inputs)
x_fea, masks, x_posemb = self.posi_mask(x_convs)
x_trans = self.encoder_Detrans(x_fea, masks, x_posemb)
# # Single_scale
# # x = self.transposeconv_stage2(x_trans.transpose(-1, -2).view(x_convs[-1].shape))
# # skip2 = x_convs[-2]
# Multi-scale
x = self.transposeconv_stage2(x_trans[:, x_fea[0].shape[-3]*x_fea[0].shape[-2]*x_fea[0].shape[-1]::].transpose(-1, -2).view(x_convs[-1].shape)) # x_trans length: 12*24*24+6*12*12=7776
skip2 = x_trans[:, 0:x_fea[0].shape[-3]*x_fea[0].shape[-2]*x_fea[0].shape[-1]].transpose(-1, -2).view(x_convs[-2].shape)
x = x + skip2
x = self.stage2_de(x)
ds2 = self.ds2_cls_conv(x)
x = self.transposeconv_stage1(x)
skip1 = x_convs[-3]
x = x + skip1
x = self.stage1_de(x)
ds1 = self.ds1_cls_conv(x)
x = self.transposeconv_stage0(x)
skip0 = x_convs[-4]
x = x + skip0
x = self.stage0_de(x)
ds0 = self.ds0_cls_conv(x)
result = self.upsamplex2(x)
result = self.cls_conv(result)
return [result, ds0, ds1, ds2]
class ResTranUnet(SegmentationNetwork):
"""
ResTran-3D Unet
"""
def __init__(self, norm_cfg='BN', activation_cfg='ReLU', img_size=None, num_classes=None, weight_std=False, deep_supervision=False):
super().__init__()
self.do_ds = False
self.U_ResTran3D = U_ResTran3D(norm_cfg, activation_cfg, img_size, num_classes, weight_std) # U_ResTran3D
if weight_std==False:
self.conv_op = nn.Conv3d
else:
self.conv_op = Conv3d_wd
if norm_cfg=='BN':
self.norm_op = nn.BatchNorm3d
if norm_cfg=='SyncBN':
self.norm_op = nn.SyncBatchNorm
if norm_cfg=='GN':
self.norm_op = nn.GroupNorm
if norm_cfg=='IN':
self.norm_op = nn.InstanceNorm3d
self.dropout_op = nn.Dropout3d
self.num_classes = num_classes
self._deep_supervision = deep_supervision
self.do_ds = deep_supervision
def forward(self, x):
seg_output = self.U_ResTran3D(x)
if self._deep_supervision and self.do_ds:
return seg_output
else:
return seg_output[0]
| 9,009
| 41.701422
| 191
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/position_encoding.py
|
"""
Positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from typing import Optional
from torch import Tensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=[64, 64, 64], temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x):
bs, c, d, h, w = x.shape
mask = torch.zeros(bs, d, h, w, dtype=torch.bool).cuda()
assert mask is not None
not_mask = ~mask
d_embed = not_mask.cumsum(1, dtype=torch.float32)
y_embed = not_mask.cumsum(2, dtype=torch.float32)
x_embed = not_mask.cumsum(3, dtype=torch.float32)
if self.normalize:
eps = 1e-6
d_embed = (d_embed - 0.5) / (d_embed[:, -1:, :, :] + eps) * self.scale
y_embed = (y_embed - 0.5) / (y_embed[:, :, -1:, :] + eps) * self.scale
x_embed = (x_embed - 0.5) / (x_embed[:, :, :, -1:] + eps) * self.scale
dim_tx = torch.arange(self.num_pos_feats[0], dtype=torch.float32, device=x.device)
dim_tx = self.temperature ** (3 * (dim_tx // 3) / self.num_pos_feats[0])
dim_ty = torch.arange(self.num_pos_feats[1], dtype=torch.float32, device=x.device)
dim_ty = self.temperature ** (3 * (dim_ty // 3) / self.num_pos_feats[1])
dim_td = torch.arange(self.num_pos_feats[2], dtype=torch.float32, device=x.device)
dim_td = self.temperature ** (3 * (dim_td // 3) / self.num_pos_feats[2])
pos_x = x_embed[:, :, :, :, None] / dim_tx
pos_y = y_embed[:, :, :, :, None] / dim_ty
pos_d = d_embed[:, :, :, :, None] / dim_td
pos_x = torch.stack((pos_x[:, :, :, :, 0::2].sin(), pos_x[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos_y = torch.stack((pos_y[:, :, :, :, 0::2].sin(), pos_y[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos_d = torch.stack((pos_d[:, :, :, :, 0::2].sin(), pos_d[:, :, :, :, 1::2].cos()), dim=5).flatten(4)
pos = torch.cat((pos_d, pos_y, pos_x), dim=4).permute(0, 4, 1, 2, 3)
return pos
def build_position_encoding(mode, hidden_dim):
N_steps = hidden_dim // 3
if (hidden_dim % 3) != 0:
N_steps = [N_steps, N_steps, N_steps + hidden_dim % 3]
else:
N_steps = [N_steps, N_steps, N_steps]
if mode in ('v2', 'sine'):
position_embedding = PositionEmbeddingSine(num_pos_feats=N_steps, normalize=True)
else:
raise ValueError(f"not supported {mode}")
return position_embedding
| 3,032
| 39.986486
| 109
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/DeformableTrans.py
|
# ------------------------------------------------------------------------
# 3D Deformable Transformer
# ------------------------------------------------------------------------
# Modified from Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn.init import xavier_uniform_, constant_, normal_
from .ops.modules import MSDeformAttn
from .position_encoding import build_position_encoding
class DeformableTransformer(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, dim_feedforward=1024, dropout=0.1,
activation="relu", num_feature_levels=4, enc_n_points=4):
super().__init__()
self.d_model = d_model
self.nhead = nhead
encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points)
self.encoder = DeformableTransformerEncoder(encoder_layer, num_encoder_layers)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
normal_(self.level_embed)
def get_valid_ratio(self, mask):
_, D, H, W = mask.shape
valid_D = torch.sum(~mask[:, :, 0, 0], 1)
valid_H = torch.sum(~mask[:, 0, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, 0, :], 1)
valid_ratio_d = valid_D.float() / D
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_d, valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, masks, pos_embeds):
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, d, h, w = src.shape
spatial_shape = (d, h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
return memory
class DeformableTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class DeformableTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (D_, H_, W_) in enumerate(spatial_shapes):
ref_d, ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, D_ - 0.5, D_, dtype=torch.float32, device=device),
torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_d = ref_d.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * D_)
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 2] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * W_)
ref = torch.stack((ref_d, ref_x, ref_y), -1) # D W H
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
output = src
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
return output
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 7,149
| 38.502762
| 132
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/ops/functions/ms_deform_attn_func.py
|
# ------------------------------------------------------------------------
# 3D Deformable Self-attention
# ------------------------------------------------------------------------
# Modified from Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
def ms_deform_attn_core_pytorch_3D(value, value_spatial_shapes, sampling_locations, attention_weights):
N_, S_, M_, D_ = value.shape
_, Lq_, M_, L_, P_, _ = sampling_locations.shape
value_list = value.split([T_ * H_ * W_ for T_, H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
# sampling_grids = 3 * sampling_locations - 1
sampling_value_list = []
for lid_, (T_, H_, W_) in enumerate(value_spatial_shapes):
value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, T_, H_, W_)
sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)[:,None,:,:,:]
sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_.to(dtype=value_l_.dtype), mode='bilinear', padding_mode='zeros', align_corners=False)[:,:,0]
sampling_value_list.append(sampling_value_l_)
attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
return output.transpose(1, 2).contiguous()
| 1,798
| 55.21875
| 161
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/ops/modules/__init__.py
|
from .ms_deform_attn import MSDeformAttn
| 41
| 20
| 40
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/network_architecture/DeTrans/ops/modules/ms_deform_attn.py
|
# ------------------------------------------------------------------------
# 3D Deformable Self-attention
# ------------------------------------------------------------------------
# Modified from Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
from ..functions.ms_deform_attn_func import ms_deform_attn_core_pytorch_3D
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 3)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()*thetas.cos(), thetas.sin()*thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 3).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 3)
:param input_flatten (N, \sum_{l=0}^{L-1} D_l \cdot H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 3), [(D_0, H_0, W_0), (D_1, H_1, W_1), ..., (D_{L-1}, H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, D_0*H_0*W_0, D_0*H_0*W_0+D_1*H_1*W_1, D_0*H_0*W_0+D_1*H_1*W_1+D_2*H_2*W_2, ..., D_0*H_0*W_0+D_1*H_1*W_1+...+D_{L-1}*H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} D_l \cdot H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1] * input_spatial_shapes[:, 2]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 3)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
if reference_points.shape[-1] == 3:
offset_normalizer = torch.stack([input_spatial_shapes[..., 0], input_spatial_shapes[..., 2], input_spatial_shapes[..., 1]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
output = ms_deform_attn_core_pytorch_3D(value, input_spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output
| 5,082
| 51.402062
| 193
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/run/default_configuration.py
|
import nnunet
from nnunet.paths import network_training_output_dir, preprocessing_output_dir, default_plans_identifier
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.experiment_planning.summarize_plans import summarize_plans
from nnunet.training.model_restore import recursive_find_python_class
def get_configuration_from_output_folder(folder):
# split off network_training_output_dir
folder = folder[len(network_training_output_dir):]
if folder.startswith("/"):
folder = folder[1:]
configuration, task, trainer_and_plans_identifier = folder.split("/")
trainer, plans_identifier = trainer_and_plans_identifier.split("__")
return configuration, task, trainer, plans_identifier
def get_default_configuration(outname, network, task, network_trainer, plans_identifier=default_plans_identifier,
search_in=(nnunet.__path__[0], "training", "network_training"),
base_module='nnunet.training.network_training'):
assert network in ['2d', '3d_lowres', '3d_fullres', '3d_cascade_fullres'], \
"network can only be one of the following: \'3d\', \'3d_lowres\', \'3d_fullres\', \'3d_cascade_fullres\'"
dataset_directory = join(preprocessing_output_dir, task)
if network == '2d':
plans_file = join(preprocessing_output_dir, task, plans_identifier + "_plans_2D.pkl")
else:
plans_file = join(preprocessing_output_dir, task, plans_identifier + "_plans_3D.pkl")
plans = load_pickle(plans_file)
possible_stages = list(plans['plans_per_stage'].keys())
if (network == '3d_cascade_fullres' or network == "3d_lowres") and len(possible_stages) == 1:
raise RuntimeError("3d_lowres/3d_cascade_fullres only applies if there is more than one stage. This task does "
"not require the cascade. Run 3d_fullres instead")
if network == '2d' or network == "3d_lowres":
stage = 0
else:
stage = possible_stages[-1]
trainer_class = recursive_find_python_class([join(*search_in)], network_trainer, current_module=base_module)
output_folder_name = join(network_training_output_dir, network+'_'+plans_identifier, task, outname)
print("###############################################")
print("I am running the following nnUNet: %s" % network)
print("My trainer class is: ", trainer_class)
print("For that I will be using the following configuration:")
summarize_plans(plans_file)
print("I am using stage %d from these plans" % stage)
if (network == '2d' or len(possible_stages) > 1) and not network == '3d_lowres':
batch_dice = True
print("I am using batch dice + CE loss")
else:
batch_dice = False
print("I am using sample dice + CE loss")
print("\nI am using data from this folder: ", join(dataset_directory, plans['data_identifier']))
print("###############################################")
return plans_file, output_folder_name, dataset_directory, batch_dice, stage, trainer_class
| 3,076
| 46.338462
| 119
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/run/run_training.py
|
# ------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------
import argparse
from batchgenerators.utilities.file_and_folder_operations import *
from CoTr.run.default_configuration import get_default_configuration
from nnunet.paths import default_plans_identifier
from nnunet.training.cascade_stuff.predict_next_stage import predict_next_stage
from CoTr.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
import os
import CoTr
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-gpu", type=str, default='0')
parser.add_argument("-network", type=str, default='3d_fullres')
parser.add_argument("-network_trainer", type=str, default='nnUNetTrainerV2_ResTrans')
parser.add_argument("-task", type=str, default='17', help="can be task name or task id")
parser.add_argument("-fold", type=str, default='all', help='0, 1, ..., 5 or \'all\'')
parser.add_argument("-outpath", type=str, default='Trainer_CoTr', help='output path')
parser.add_argument("-norm_cfg", type=str, default='IN', help='BN, IN or GN')
parser.add_argument("-activation_cfg", type=str, default='LeakyReLU', help='LeakyReLU or ReLU')
parser.add_argument("-val", "--validation_only", default=False, help="use this if you want to only run the validation",
required=False, action="store_true")
parser.add_argument("-c", "--continue_training", help="use this if you want to continue a training",
action="store_true")
parser.add_argument("-p", help="plans identifier. Only change this if you created a custom experiment planner",
default=default_plans_identifier, required=False)
parser.add_argument("--use_compressed_data", default=False, action="store_true",
help="If you set use_compressed_data, the training cases will not be decompressed. Reading compressed data "
"is much more CPU and RAM intensive and should only be used if you know what you are "
"doing", required=False)
parser.add_argument("--deterministic", default=False, action="store_true")
parser.add_argument("--npz", required=False, default=False, action="store_true", help="if set then nnUNet will "
"export npz files of "
"predicted segmentations "
"in the validation as well. "
"This is needed to run the "
"ensembling step so unless "
"you are developing nnUNet "
"you should enable this")
parser.add_argument("--find_lr", required=False, default=False, action="store_true",
help="not used here, just for fun")
parser.add_argument("--valbest", required=False, default=False, action="store_true",
help="hands off. This is not intended to be used")
parser.add_argument("--fp32", required=False, default=False, action="store_true",
help="disable mixed precision training and run old school fp32")
parser.add_argument("--val_folder", required=False, default="validation_raw",
help="name of the validation folder. No need to use this for most people")
parser.add_argument("--disable_saving", required=False, action='store_true')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
norm_cfg = args.norm_cfg
activation_cfg = args.activation_cfg
outpath = args.outpath + '_' + norm_cfg + '_' + activation_cfg
task = args.task
fold = args.fold
network = args.network
network_trainer = args.network_trainer
validation_only = args.validation_only
plans_identifier = args.p
find_lr = args.find_lr
use_compressed_data = args.use_compressed_data
decompress_data = not use_compressed_data
deterministic = args.deterministic
valbest = args.valbest
fp32 = args.fp32
run_mixed_precision = not fp32
val_folder = args.val_folder
if validation_only and (norm_cfg=='SyncBN'):
norm_cfg=='BN'
if not task.startswith("Task"):
task_id = int(task)
task = convert_id_to_task_name(task_id)
if fold == 'all':
pass
else:
fold = int(fold)
plans_file, output_folder_name, dataset_directory, batch_dice, stage, \
trainer_class = get_default_configuration(outpath, network, task, network_trainer, plans_identifier, \
search_in=(CoTr.__path__[0], "training", "network_training"), \
base_module='CoTr.training.network_training')
trainer = trainer_class(plans_file, fold, norm_cfg, activation_cfg, output_folder=output_folder_name, dataset_directory=dataset_directory,
batch_dice=batch_dice, stage=stage, unpack_data=decompress_data,
deterministic=deterministic,
fp16=run_mixed_precision)
if args.disable_saving:
trainer.save_latest_only = False # if false it will not store/overwrite _latest but separate files each
trainer.save_intermediate_checkpoints = False # whether or not to save checkpoint_latest
trainer.save_best_checkpoint = False # whether or not to save the best checkpoint according to self.best_val_eval_criterion_MA
trainer.save_final_checkpoint = False # whether or not to save the final checkpoint
trainer.initialize(not validation_only)
if find_lr:
trainer.find_lr()
else:
if not validation_only:
if args.continue_training:
trainer.load_latest_checkpoint()
trainer.run_training()
else:
if valbest:
trainer.load_best_checkpoint(train=False)
else:
trainer.load_latest_checkpoint(train=False)
trainer.network.eval()
# predict validation
trainer.validate(save_softmax=args.npz, validation_folder_name=val_folder)
if network == '3d_lowres':
print("predicting segmentations for the next stage of the cascade")
predict_next_stage(trainer, join(dataset_directory, trainer.plans['data_identifier'] + "_stage%d" % 1))
if __name__ == "__main__":
main()
| 7,006
| 49.775362
| 142
|
py
|
CoTr
|
CoTr-main/CoTr_package/CoTr/run/__init__.py
|
from __future__ import absolute_import
from . import *
| 54
| 26.5
| 38
|
py
|
Wasserstein-Q-learning
|
Wasserstein-Q-learning-main/Q_learning.py
|
# -*- coding: utf-8 -*-
"""
Robust Q Learning
"""
import numpy as np
from tqdm import tqdm
from scipy.optimize import minimize
import copy
def robust_q_learning(X,
A,
r,
c,
P_0, # Simulation of next state in dependence of x and a
p_0,
epsilon,
alpha,
x_0,
eps_greedy = 0.05,
Nr_iter = 1000,
q =1,
gamma_t_tilde = lambda t: 1/(t+1),
time_series = False,
T = None,
Q_0 = None):
"""
Parameters
----------
X : numpy.ndarray
A list or numpy array containing all states
A : numpy.ndarray
A list or numpy array containing all actions
r : function
Reward function r(x,a,y) depending on state-action-state.
c : function
Function c(x,y) depending on state and state for the lambda c transform.
P_0 : function
fucntion P_0(x,a) that creates a new random variabe in dependence of state and action
p_0 : function
fucntion p_0(k,x,a) that determines the density at k given a state action pair (x,a)
epsilon : float
For the determination of the radius of the Wasserstein ball.
alpha : float
Discounting rate.
x_0 : numpy.ndarray
the initial state.
eps_greedy : float, optional
Parameter for the epsilon greedy policy. The default is 0.05.
Nr_iter : int, optional
Number of Iterations. The default is 1000.
q : int, optional
powert of the Wasserstein ball. The default is 1.
gamma_t_tilde : function, optional
learning rate. The default is lambda t: 1/(t+1).
time_series : boolean, optional
Whether we consider the time series setting or not. The default is False.
T : numpy.ndarray, optional
The smaller space for the time series setting. The default is None.
Q_0 : matrix, optional
Initial value for the Q-value matrix. The default is None.
Returns
-------
matrix
The Q value matrix.
"""
rng = np.random.default_rng()
#Initialize Q_0
Q = np.zeros([len(X),len(A)])
if Q_0 is not None:
Q = Q_0
# initialize a counter for the visits
Visits = np.zeros([len(X),len(A)])
# Bring A and X into the right format
if np.ndim(A)>1:
A_list = A
else:
A_list = np.array([[a] for a in A])
if np.ndim(X)>1:
X_list = X
else:
X_list = np.array([[x] for x in X])
# Functions to determine the index of an action/state in A or X
def a_index(a):
return np.flatnonzero((a==A_list).all(1))[0]
def x_index(x):
return np.flatnonzero((x==X_list).all(1))[0]
# Define the f_t function
def f(t,x,a,y):
return r(x,a,y)+alpha*np.max(Q[x_index(y),:])
# Define the lambda c transform (already taking into account that we consider -f_t
def lambda_c_transform(t,x,a,y,lam):
return np.max([-f(t,x,a,z)-lam*c(z,y) for z in X])
# Define the epsilon greedy policy
def a_t(t,y):
#eps_bound = 1-(t/Nr_iter)*(eps_greedy)
eps_bound = eps_greedy
unif = np.random.uniform(0)
return (unif>eps_bound)*A[np.argmax(Q[x_index(y),:])]+(unif<=eps_bound)*rng.choice(A)
# The Expectation that is optimized w.r.t.lambda
def expected_value_to_optimize(t,x,a,lam):
return np.sum([(-lambda_c_transform(t,x,a,k,lam))*p_0(k,x,a) for k in X])-(epsilon**q)*lam
# The Expectation that is optimized w.r.t.lambda in the time series case
def expected_value_to_optimize_ts(t,x,a,lam):
return np.sum([(-lambda_c_transform(t,x,a,np.concatenate([x[:-1],[k]]),lam))*p_0(np.concatenate([x[:-1],[k]]),x,a) for k in T])-(epsilon**q)*lam
# Set initial value
X_0 = x_0
lam_0 = 1
#lam_list = []
cons = []
l = {'type': 'ineq',
'fun': lambda x: x - 0}
u = {'type': 'ineq',
'fun': lambda x: 1000 - x}
cons.append(l)
cons.append(u)
# List of differences of Q Matrices
for t in tqdm(range(Nr_iter)):
X_1 = P_0(X_0,a_t(t,X_0))
Q_old = copy.deepcopy(Q)
x,a = X_0, a_t(t,X_0)
x_ind, a_ind = x_index(x),a_index(a)
# Choose the maximal lambda
if time_series:
lam_t = minimize(lambda lam: -expected_value_to_optimize_ts(t,x,a,lam),
x0 = lam_0,
bounds = [(0,None)]).x
else:
lam_t = minimize(lambda lam: -expected_value_to_optimize(t,x,a,lam),
x0 = lam_0,
bounds = [(0,None)]).x
lam_0 = lam_t
#lam_list.append(lam_t)
# Do the update of Q
Q[x_ind, a_ind] = Q_old[x_ind, a_ind]+gamma_t_tilde(Visits[x_ind, a_ind])*(-lambda_c_transform(t,x,a,X_1,lam_t)-(epsilon**q)*lam_t-Q_old[x_ind, a_ind])
Visits[x_ind, a_ind]+=1
X_0 = X_1
return Q
# Classical Q learning #
def q_learning(X,
A,
r,
P_0, # Simulation of next state in dependence of x and a
alpha,
x_0,
eps_greedy = 0.05,
Nr_iter = 1000,
gamma_t_tilde = lambda t: 1/(t+1),
Q_0 = None):
"""
Parameters
----------
X : numpy.ndarray
A list or numpy array containing all states
A : numpy.ndarray
A list or numpy array containing all actions
r : function
Reward function r(x,a,y) depending on state-action-state.
P_0 : function
fucntion P_0(x,a) that creates a new random variabe in dependence of state and action
alpha : float
Discounting rate.
x_0 : numpy.ndarray
the initial state.
eps_greedy : float, optional
Parameter for the epsilon greedy policy. The default is 0.05.
Nr_iter : int, optional
Number of Iterations. The default is 1000.
gamma_t_tilde : function, optional
learning rate. The default is lambda t: 1/(t+1).
Q_0 : matrix, optional
Initial value for the Q-value matrix. The default is None.
Returns
-------
matrix
The Q value matrix.
"""
rng = np.random.default_rng()
#Initialize Q_0
Q = np.zeros([len(X),len(A)])
if Q_0 is not None:
Q = Q_0
Visits = np.zeros([len(X),len(A)])
if np.ndim(A)>1:
A_list = A
else:
A_list = np.array([[a] for a in A])
if np.ndim(X)>1:
X_list = X
else:
X_list = np.array([[x] for x in X])
def a_index(a):
return np.flatnonzero((a==A_list).all(1))[0]
def x_index(x):
return np.flatnonzero((x==X_list).all(1))[0]
# Define the f function
def f(t,x,a,y):
return r(x,a,y)+alpha*np.max(Q[x_index(y),:])
def a_t(t,y):
#eps_bound = 1-(t/Nr_iter)*(eps_greedy)
eps_bound = eps_greedy
unif = np.random.uniform(0)
return (unif>eps_bound)*A[np.argmax(Q[x_index(y),:])]+(unif<=eps_bound)*rng.choice(A)
# Set initial value
X_0 = x_0
# List of differences of Q Matrices
for t in tqdm(range(Nr_iter)):
X_1 = P_0(X_0,a_t(t,X_0))
Q_old = copy.deepcopy(Q)
x,a = X_0, a_t(t,X_0)
x_ind, a_ind = x_index(x),a_index(a)
# Do the update of Q
Q[x_ind, a_ind] = Q_old[x_ind, a_ind]+gamma_t_tilde(Visits[x_ind, a_ind])*(f(t,x,a,X_1)-Q_old[x_ind, a_ind])
Visits[x_ind, a_ind]+=1
X_0 = X_1
return Q
| 7,677
| 32.094828
| 159
|
py
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/test.py
|
"""This module is used to test the Srnet model."""
from glob import glob
import torch
import numpy as np
import imageio as io
from model import Srnet
TEST_BATCH_SIZE = 40
COVER_PATH = "/path/to/cover/images/"
STEGO_PATH = "/path/to/stego/images/"
CHKPT = "./checkpoints/Srnet_model_weights.pt"
cover_image_names = glob(COVER_PATH)
stego_image_names = glob(STEGO_PATH)
cover_labels = np.zeros((len(cover_image_names)))
stego_labels = np.ones((len(stego_image_names)))
model = Srnet().cuda()
ckpt = torch.load(CHKPT)
model.load_state_dict(ckpt["model_state_dict"])
# pylint: disable=E1101
images = torch.empty((TEST_BATCH_SIZE, 1, 256, 256), dtype=torch.float)
# pylint: enable=E1101
test_accuracy = []
for idx in range(0, len(cover_image_names), TEST_BATCH_SIZE // 2):
cover_batch = cover_image_names[idx : idx + TEST_BATCH_SIZE // 2]
stego_batch = stego_image_names[idx : idx + TEST_BATCH_SIZE // 2]
batch = []
batch_labels = []
xi = 0
yi = 0
for i in range(2 * len(cover_batch)):
if i % 2 == 0:
batch.append(stego_batch[xi])
batch_labels.append(1)
xi += 1
else:
batch.append(cover_batch[yi])
batch_labels.append(0)
yi += 1
# pylint: disable=E1101
for i in range(TEST_BATCH_SIZE):
images[i, 0, :, :] = torch.tensor(io.imread(batch[i])).cuda()
image_tensor = images.cuda()
batch_labels = torch.tensor(batch_labels, dtype=torch.long).cuda()
# pylint: enable=E1101
outputs = model(image_tensor)
prediction = outputs.data.max(1)[1]
accuracy = (
prediction.eq(batch_labels.data).sum()
* 100.0
/ (batch_labels.size()[0])
)
test_accuracy.append(accuracy.item())
print(f"test_accuracy = {sum(test_accuracy)/len(test_accuracy):%.2f}")
| 1,824
| 27.968254
| 71
|
py
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/train.py
|
"""This module is use to train the Srnet model."""
import logging
import os
import sys
import time
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
from dataset import dataset
from opts.options import arguments
from model.model import Srnet
from utils.utils import (
latest_checkpoint,
adjust_learning_rate,
weights_init,
saver,
)
opt = arguments()
logging.basicConfig(
filename="training.log",
format="%(asctime)s %(message)s",
level=logging.DEBUG,
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if __name__ == "__main__":
train_data = dataset.DatasetLoad(
opt.cover_path,
opt.stego_path,
opt.train_size,
transform=transforms.Compose(
[
transforms.ToPILImage(),
transforms.RandomRotation(degrees=90),
transforms.ToTensor(),
]
),
)
val_data = dataset.DatasetLoad(
opt.valid_cover_path,
opt.valid_stego_path,
opt.val_size,
transform=transforms.ToTensor(),
)
# Creating training and validation loader.
train_loader = DataLoader(
train_data, batch_size=opt.batch_size, shuffle=True
)
valid_loader = DataLoader(
val_data, batch_size=opt.batch_size, shuffle=False
)
# model creation and initialization.
model = Srnet()
model.to(device)
model = model.apply(weights_init)
# Loss function and Optimizer
loss_fn = nn.NLLLoss()
optimizer = torch.optim.Adamax(
model.parameters(),
lr=opt.lr,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
)
check_point = latest_checkpoint()
if not check_point:
START_EPOCH = 1
if not os.path.exists(opt.checkpoints_dir):
os.makedirs(opt.checkpoints_dir)
print("No checkpoints found!!, Retraining started... ")
else:
pth = opt.checkpoints_dir + "net_" + str(check_point) + ".pt"
ckpt = torch.load(pth)
START_EPOCH = ckpt["epoch"] + 1
model.load_state_dict(ckpt["model_state_dict"])
optimizer.load_state_dict(ckpt["optimizer_state_dict"])
print("Model Loaded from epoch " + str(START_EPOCH) + "..")
for epoch in range(START_EPOCH, opt.num_epochs + 1):
training_loss = []
training_accuracy = []
validation_loss = []
validation_accuracy = []
test_accuracy = []
# Training
model.train()
st_time = time.time()
adjust_learning_rate(optimizer, epoch)
for i, train_batch in enumerate(train_loader):
images = torch.cat((train_batch["cover"], train_batch["stego"]), 0)
labels = torch.cat(
(train_batch["label"][0], train_batch["label"][1]), 0
)
images = images.to(device, dtype=torch.float)
labels = labels.to(device, dtype=torch.long)
optimizer.zero_grad()
outputs = model(images)
loss = loss_fn(outputs, labels)
loss.backward()
optimizer.step()
training_loss.append(loss.item())
prediction = outputs.data.max(1)[1]
accuracy = (
prediction.eq(labels.data).sum() * 100.0 / (labels.size()[0])
)
training_accuracy.append(accuracy.item())
sys.stdout.write(
f"\r Epoch:{epoch}/{opt.num_epochs}"
f" Batch:{i+1}/{len(train_loader)}"
f" Loss:{training_loss[-1]:.4f}"
f" Acc:{training_accuracy[-1]:.2f}"
f" LR:{optimizer.param_groups[0]['lr']:.4f}"
)
end_time = time.time()
# Validation
model.eval()
with torch.no_grad():
for i, val_batch in enumerate(valid_loader):
images = torch.cat((val_batch["cover"], val_batch["stego"]), 0)
labels = torch.cat(
(val_batch["label"][0], val_batch["label"][1]), 0
)
images = images.to(device, dtype=torch.float)
labels = labels.to(device, dtype=torch.long)
outputs = model(images)
loss = loss_fn(outputs, labels)
validation_loss.append(loss.item())
prediction = outputs.data.max(1)[1]
accuracy = (
prediction.eq(labels.data).sum()
* 100.0
/ (labels.size()[0])
)
validation_accuracy.append(accuracy.item())
avg_train_loss = sum(training_loss) / len(training_loss)
avg_valid_loss = sum(validation_loss) / len(validation_loss)
message = (
f"Epoch: {epoch}. "
f"Train Loss:{(sum(training_loss) / len(training_loss)):.5f}. "
f"Valid Loss:{(sum(validation_loss) / len(validation_loss)):.5f}. "
"Train"
f" Acc:{(sum(training_accuracy) / len(training_accuracy)):.2f} "
"Valid"
f" Acc:{(sum(validation_accuracy) / len(validation_accuracy)):.2f} "
)
print("\n", message)
logging.info(message)
state = {
"epoch": epoch,
"opt": opt,
"train_loss": sum(training_loss) / len(training_loss),
"valid_loss": sum(validation_loss) / len(validation_loss),
"train_accuracy": sum(training_accuracy) / len(training_accuracy),
"valid_accuracy": sum(validation_accuracy)
/ len(validation_accuracy),
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"lr": optimizer.param_groups[0]["lr"],
}
saver(state, opt.checkpoints_dir, epoch)
| 5,885
| 29.816754
| 80
|
py
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/dataset/dataset.py
|
"""This module provide the data sample for training."""
import os
from typing import Tuple
import torch
from torch import Tensor
from torch.utils.data import Dataset
import imageio as io
from opts.options import arguments
opt = arguments()
# pylint: disable=E1101
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# pylint: enable=E1101
class DatasetLoad(Dataset):
"""This class returns the data samples."""
def __init__(
self,
cover_path: str,
stego_path: str,
size: int,
transform: Tuple = None,
) -> None:
"""Constructor.
Args:
cover_path (str): path to cover images.
stego_path (str): path to stego images.
size (int): no. of images in any of (cover / stego) directory for
training.
transform (Tuple, optional): _description_. Defaults to None.
"""
self.cover = cover_path
self.stego = stego_path
self.transforms = transform
self.data_size = size
def __len__(self) -> int:
"""returns the length of the dataset."""
return self.data_size
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:
"""Returns the (cover, stego) pairs for training.
Args:
index (int): a random int value in range (0, len(dataset)).
Returns:
Tuple[Tensor, Tensor]: cover and stego pair.
"""
index += 1
img_name = str(index) + ".pgm"
cover_img = io.imread(os.path.join(self.cover, img_name))
stego_img = io.imread(os.path.join(self.stego, img_name))
# pylint: disable=E1101
label1 = torch.tensor(0, dtype=torch.long).to(device)
label2 = torch.tensor(1, dtype=torch.long).to(device)
# pylint: enable=E1101
if self.transforms:
cover_img = self.transforms(cover_img)
stego_img = self.transforms(stego_img)
sample = {"cover": cover_img, "stego": stego_img}
sample["label"] = [label1, label2]
return sample
| 2,091
| 29.318841
| 77
|
py
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/dataset/__init__.py
| 1
| 0
| 0
|
py
|
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/opts/options.py
|
"""This module provides method to enter various input to the model training."""
import argparse
def arguments() -> str:
"""This function returns arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--cover_path",
default="D:\\Github\\Toy-Bossbase-dataset\\bossbase_toy_dataset\\train\\cover",
)
parser.add_argument(
"--stego_path",
default="D:\\Github\\Toy-Bossbase-dataset\\bossbase_toy_dataset\\train\\stego",
)
parser.add_argument(
"--valid_cover_path",
default="D:\\Github\\Toy-Bossbase-dataset\\bossbase_toy_dataset\\valid\\cover",
)
parser.add_argument(
"--valid_stego_path",
default=(
"D:\\Github\\Toy-Bossbase-dataset\\bossbase_toy_dataset\\valid\\stego"
),
)
parser.add_argument("--checkpoints_dir", default="./checkpoints/")
parser.add_argument("--batch_size", type=int, default=10)
parser.add_argument("--num_epochs", type=int, default=50)
parser.add_argument("--train_size", type=int, default=20)
parser.add_argument("--val_size", type=int, default=10)
parser.add_argument("--lr", type=float, default=0.001)
opt = parser.parse_args()
return opt
| 1,235
| 33.333333
| 87
|
py
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/opts/__init__.py
| 0
| 0
| 0
|
py
|
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/utils/utils.py
|
"""This module provides utility function for training."""
import os
import re
from typing import Any, Dict
import torch
from torch import nn
from opts.options import arguments
opt = arguments()
def saver(state: Dict[str, float], save_dir: str, epoch: int) -> None:
torch.save(state, save_dir + "net_" + str(epoch) + ".pt")
def latest_checkpoint() -> int:
"""Returns latest checkpoint."""
if os.path.exists(opt.checkpoints_dir):
all_chkpts = "".join(os.listdir(opt.checkpoints_dir))
if len(all_chkpts) > 0:
latest = max(map(int, re.findall("\d+", all_chkpts)))
else:
latest = None
else:
latest = None
return latest
def adjust_learning_rate(optimizer: Any, epoch: int) -> None:
"""Sets the learning rate to the initial learning_rate and decays by 10
every 30 epochs."""
learning_rate = opt.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group["lr"] = learning_rate
# Weight initialization for conv layers and fc layers
def weights_init(param: Any) -> None:
"""Initializes weights of Conv and fully connected."""
if isinstance(param, nn.Conv2d):
torch.nn.init.xavier_uniform_(param.weight.data)
if param.bias is not None:
torch.nn.init.constant_(param.bias.data, 0.2)
elif isinstance(param, nn.Linear):
torch.nn.init.normal_(param.weight.data, mean=0.0, std=0.01)
torch.nn.init.constant_(param.bias.data, 0.0)
| 1,504
| 29.714286
| 75
|
py
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/model/utils.py
|
"""This module provide building blocks for SRNet."""
from torch import nn
from torch import Tensor
class ConvBn(nn.Module):
"""Provides utility to create different types of layers."""
def __init__(self, in_channels: int, out_channels: int) -> None:
"""Constructor.
Args:
in_channels (int): no. of input channels.
out_channels (int): no. of output channels.
"""
super().__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.batch_norm = nn.BatchNorm2d(out_channels)
def forward(self, inp: Tensor) -> Tensor:
"""Returns Conv2d followed by BatchNorm.
Returns:
Tensor: Output of Conv2D -> BN.
"""
return self.batch_norm(self.conv(inp))
class Type1(nn.Module):
"""Creates type 1 layer of SRNet."""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.convbn = ConvBn(in_channels, out_channels)
self.relu = nn.ReLU()
def forward(self, inp: Tensor) -> Tensor:
"""Returns type 1 layer of SRNet.
Args:
inp (Tensor): input tensor.
Returns:
Tensor: Output of type 1 layer.
"""
return self.relu(self.convbn(inp))
class Type2(nn.Module):
"""Creates type 2 layer of SRNet."""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.type1 = Type1(in_channels, out_channels)
self.convbn = ConvBn(in_channels, out_channels)
def forward(self, inp: Tensor) -> Tensor:
"""Returns type 2 layer of SRNet.
Args:
inp (Tensor): input tensor.
Returns:
Tensor: Output of type 2 layer.
"""
return inp + self.convbn(self.type1(inp))
class Type3(nn.Module):
"""Creates type 3 layer of SRNet."""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=2,
padding=0,
bias=False,
)
self.batch_norm = nn.BatchNorm2d(out_channels)
self.type1 = Type1(in_channels, out_channels)
self.convbn = ConvBn(out_channels, out_channels)
self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, inp: Tensor) -> Tensor:
"""Returns type 3 layer of SRNet.
Args:
inp (Tensor): input tensor.
Returns:
Tensor: Output of type 3 layer.
"""
out = self.batch_norm(self.conv1(inp))
out1 = self.pool(self.convbn(self.type1(inp)))
return out + out1
class Type4(nn.Module):
"""Creates type 4 layer of SRNet."""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.type1 = Type1(in_channels, out_channels)
self.convbn = ConvBn(out_channels, out_channels)
self.gap = nn.AdaptiveAvgPool2d(output_size=1)
def forward(self, inp: Tensor) -> Tensor:
"""Returns type 4 layer of SRNet.
Args:
inp (Tensor): input tensor.
Returns:
Tensor: Output of type 4 layer.
"""
return self.gap(self.convbn(self.type1(inp)))
if __name__ == "__main__":
import torch
tensor = torch.randn((1, 1, 256, 256))
lt1 = Type1(1, 64)
output = lt1(tensor)
print(output.shape)
| 3,652
| 27.317829
| 68
|
py
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/model/model.py
|
""" This module creates SRNet model."""
import torch
from torch import Tensor
from torch import nn
from model.utils import Type1, Type2, Type3, Type4
class Srnet(nn.Module):
"""This is SRNet model class."""
def __init__(self) -> None:
"""Constructor."""
super().__init__()
self.type1s = nn.Sequential(Type1(1, 64), Type1(64, 16))
self.type2s = nn.Sequential(
Type2(16, 16),
Type2(16, 16),
Type2(16, 16),
Type2(16, 16),
Type2(16, 16),
)
self.type3s = nn.Sequential(
Type3(16, 16),
Type3(16, 64),
Type3(64, 128),
Type3(128, 256),
)
self.type4 = Type4(256, 512)
self.dense = nn.Linear(512, 2)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, inp: Tensor) -> Tensor:
"""Returns logits for input images.
Args:
inp (Tensor): input image tensor of shape (Batch, 1, 256, 256)
Returns:
Tensor: Logits of shape (Batch, 2)
"""
out = self.type1s(inp)
out = self.type2s(out)
out = self.type3s(out)
out = self.type4(out)
out = out.view(out.size(0), -1)
out = self.dense(out)
return self.softmax(out)
if __name__ == "__main__":
image = torch.randn((1, 1, 256, 256))
net = Srnet()
print(net(image).shape)
| 1,424
| 26.403846
| 74
|
py
|
Pytorch-implementation-of-SRNet
|
Pytorch-implementation-of-SRNet-master/model/__init__.py
| 0
| 0
| 0
|
py
|
|
Seq-Att-Affect
|
Seq-Att-Affect-master/file_walker.py
|
import os
def walk( path ):
""" Use to walk through all objects in a directory.
Yields either File() or Folder() objects."""
for f in os.listdir(path):
if os.path.isfile(path):
yield File(os.path.join(path, f))
else:
yield Folder(os.path.join(path, f))
class PathEntity:
""" Every object in a directory, file or folders.
Attributes:
isFile: True if it's a file (=it's a File() object)
isDirectory: !isFile (=it's a Folder() object)
full_path: Full Path to entity
name: Name of entity
"""
def __init__(self, path):
self.isFile = os.path.isfile(path)
self.isDirectory = not self.isFile
self.full_path = path
self.name = os.path.splitext(os.path.basename(self.full_path))[0]
class Folder(PathEntity):
"""
Extends PathEntity with walk. Use like this:
for f in folder.walk():
print(f.name)
...
"""
def walk(self):
return walk( self.full_path )
class File(PathEntity):
""" Extends entity file useful file Attributes:
extension: File extension
open(mode): Opens the file (use only using "with" keyword!)
"""
def __init__(self, path):
super(File, self).__init__(path)
self.extension = os.path.splitext(self.full_Path)[1]
def open(self, mode):
return open(self.full_path, mode)
if __name__ == "__main__":
star = lambda x: "*" if x else "/"
for obj in walk("/opt/"):
print(("{}{}:".format(obj.name, star(obj.isFile))))
if obj.isDirectory:
for sub_obj in obj.walk():
print(("\t{}{}".format(sub_obj.name, star(sub_obj.isFile))))
| 1,711
| 27.065574
| 76
|
py
|
Seq-Att-Affect
|
Seq-Att-Affect-master/utils.py
|
import numpy as np
import re
import cv2
from operator import truediv
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pathlib import Path
#import tensorflow as tf
import random
import csv
#from config import *
from scipy.integrate.quadrature import simps
import math
from scipy.stats import multivariate_normal
import os
from random import randint
import glob
from scipy.integrate import simps
from PIL import Image,ImageFilter,ImageEnhance
from math import isnan
import torch
def weights_init_uniform_rule(m):
classname = m.__class__.__name__
# for every Linear layer in a model..
if classname.find('Linear') != -1:
# get the number of the inputs
n = m.in_features
y = 1.0/np.sqrt(n)
m.weight.data.uniform_(-y, y)
m.bias.data.fill_(0)
if classname.find('Conv2d') != -1:
#print('applying mother fucker')
n = m.in_channels
y = 1.0/np.sqrt(n)
m.weight.data.uniform_(-y,y)
def update_lr_ind(opt, lr):
"""Decay learning rates of the generator and discriminator."""
for param_group in opt.param_groups:
param_group['lr'] = lr
def update_lr( lr, optimizer):
"""Decay learning rates of the generator and discriminator."""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def reset_grad(optimizer):
"""Reset the gradient buffers."""
optimizer.zero_grad()
def denorm( x):
"""Convert the range from [-1, 1] to [0, 1]."""
out = (x + 1) / 2
return out.clamp_(0, 1)
def gradient_penalty( y, x):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
"""Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
weight = torch.ones(y.size()).to(device)
dydx = torch.autograd.grad(outputs=y,
inputs=x,
grad_outputs=weight,
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
dydx = dydx.view(dydx.size(0), -1)
dydx_l2norm = torch.sqrt(torch.sum(dydx**2, dim=1))
return torch.mean((dydx_l2norm-1)**2)
def label2onehot( labels, dim):
"""Convert label indices to one-hot vectors."""
batch_size = labels.size(0)
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), labels.long()] = 1
return out
def print_network( model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print(name)
print("The number of parameters: {}".format(num_params))
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def OpenCVtoPIL(opencv_image = None) :
cv2_im = cv2.cvtColor(opencv_image,cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im)
return pil_im
def PILtoOpenCV(pil_image = None):
open_cv_image = np.array(pil_image)
open_cv_image = open_cv_image[:, :, ::-1].copy()
return open_cv_image
def checkDirMake(directory):
import os
if not os.path.exists(directory):
os.makedirs(directory)
def convertToOneHot(vector, num_classes=None):
"""
Converts an input 1-D vector of integers into an output
2-D array of one-hot vectors, where an i'th input value
of j will set a '1' in the i'th row, j'th column of the
output array.
Example:
v = np.array((1, 0, 4))
one_hot_v = convertToOneHot(v)
print one_hot_v
[[0 1 0 0 0]
[1 0 0 0 0]
[0 0 0 0 1]]
"""
assert isinstance(vector, np.ndarray)
assert len(vector) > 0
if num_classes is None:
num_classes = np.max(vector)+1
else:
assert num_classes > 0
assert num_classes >= np.max(vector)
result = np.zeros(shape=(len(vector), num_classes))
result[np.arange(len(vector)), vector] = 1
return result.astype(int)
#print (convertToOneHot(np.array([7]),num_classes = 8))
def readCSV(fileName):
if '.csv' in fileName :
list_dta = []
with open(fileName, 'r') as csvFile:
reader = csv.reader(csvFile,delimiter=';')
for row in reader:
#print('frame' in row[0],row[0].split(',')[1])
if not 'frame' in row[0] :
list_dta.append([float(row[0].split(',')[1])])
return list_dta
#print(fileName,'data : ',list_dta)
#return sorted(list_dta)
#print(fileName,'data : ',list_dta)
else :
return None
def generalNoise(tImageB = None,noiseType = 0,noiseParam = 0):
if noiseType == 0 :
tImageB = tImageB
elif noiseType == 1: #downsample
oWidth, oHeight = tImageB.size
for i in range(int(noiseParam)) :#Scale down (/2) blurLevel times
width, height = tImageB.size
tImageB = tImageB.resize((width//2,height//2))
#print(tImageB.size)
tImageB = tImageB.resize((oWidth,oHeight))
elif noiseType == 2 : #Gaussian blur
tImageB = tImageB.filter(ImageFilter.GaussianBlur(noiseParam))
elif noiseType == 3 : #Gaussian noise
#tImageB = addNoise(tImageB)
#convert to opencv
opencvImage = cv2.cvtColor(np.array(tImageB), cv2.COLOR_RGB2BGR)
#print(opencvImage)
opencvImage = addNoise(opencvImage,var=noiseParam)
pilImage = cv2.cvtColor(opencvImage,cv2.COLOR_BGR2RGB)
#tImageB = Image.fromarray(random_noise(opencvImage))
tImageB = Image.fromarray(pilImage)
elif noiseType == 4 : #Brightness :
#tImageB.show()
#print(noiseParam)
e = ImageEnhance.Brightness(tImageB)
tImageB = e.enhance(noiseParam)
#tImageB.show()
#tImageB.show()
#opencvImage = cv2.cvtColor(np.array(tImageB), cv2.COLOR_RGB2BGR)
#print('before',opencvImage)
#opencvImage = np.asarray(opencvImage*noiseParam,dtype=np.int32)
#print(opencvImage.shape)
#test = opencvImage.astype(np.float64)*noiseParam
'''print(opencvImage)
print(test)'''
#opencvImage = test.astype(np.uint8)
'''for i in range(0,opencvImage.shape[0]):
for j in range(0,opencvImage.shape[1]):
#print(opencvImage[i,j],noiseParam)
opencvImage[i,j,0] = round(opencvImage[i,j,0] * noiseParam)
opencvImage[i,j,1] = round(opencvImage[i,j,1] * noiseParam)
opencvImage[i,j,2] = round(opencvImage[i,j,2] * noiseParam)
#print(opencvImage[i,j])
#print('after',opencvImage)
'''
#pilImage = cv2.cvtColor(opencvImage,cv2.COLOR_BGR2RGB)
#tImageB = Image.fromarray(pilImage)
elif noiseType == 5 :
tImageB = tImageB.convert('L')
np_img = np.array(tImageB, dtype=np.uint8)
np_img = np.dstack([np_img, np_img, np_img])
tImageB = Image.fromarray(np_img, 'RGB')
return tImageB
def addNoise (image,noise_type="gauss",var = .01):
"""
Generate noise to a given Image based on required noise type
Input parameters:
image: ndarray (input image data. It will be converted to float)
noise_type: string
'gauss' Gaussian-distrituion based noise
'poission' Poission-distribution based noise
's&p' Salt and Pepper noise, 0 or 1
'speckle' Multiplicative noise using out = image + n*image
where n is uniform noise with specified mean & variance
"""
row,col,ch= image.shape
if noise_type == "gauss":
mean = 0.0
#var = 0.001
sigma = var**0.5
gauss = np.array(image.shape)
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
#print(gauss)
noisy = image + gauss*255
return noisy.astype('uint8')
elif noise_type == "s&p":
s_vs_p = 0.5
amount = 0.09
out = image
# Generate Salt '1' noise
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 255
# Generate Pepper '0' noise
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_type == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_type =="speckle":
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
else:
return image
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
def imageLandmarking(img, ldmrk, isPIL = True,inputGT = None):
if isPIL :
#convert the image to the opencv format
print(img)
theImage = cv2.cvtColor(np.array(img),cv2.COLOR_RGB2BGR)
else :
theImage = img.copy()
for y in range(68) :
cv2.circle(theImage,(int(ldmrk[y]),int(ldmrk[y+68])),2,(0,255,0) )
if inputGT is not None :
cv2.circle(theImage,(int(inputGT[y]),int(inputGT[y+68])),2,(0,0,255) )
return theImage
def unnormalizedToCV(input = [],customNormalize = None):
output = []
#unorm = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
unorm = UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
for i in range(input.shape[0]) :
#Unnormalized it, convert to numpy and multiple by 255.
if customNormalize is None :
theImage = (unorm(input[i]).numpy()*255).transpose((1,2,0))
else :
theImage = (input[i].numpy().transpose(1,2,0) + customNormalize)
#Then transpose to be height,width,channel, to Int and BGR formate
theImage = cv2.cvtColor(theImage.astype(np.uint8 ),cv2.COLOR_RGB2BGR)
output.append(theImage)
return output
def unnormalizedAndLandmark(input = [], inputPred = [],inputGT = None,customNormalize = None,ldmarkNumber=68):
#input is unnormalized [batch_size, channel, height, width] tensor from pytorch
#inputGT is [batch_size, 136] tensor landmarks
#Output is [batch_size, height,width,channel] BGR, 0-255 Intensities opencv list of landmarked image
output = []
inputPred = inputPred.numpy()
if inputGT is not None :
inputGT = inputGT.numpy()
#unorm = UnNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
unorm = UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
for i in range(inputPred.shape[0]) :
#Unnormalized it, convert to numpy and multiple by 255.
if customNormalize is None :
theImage = (unorm(input[i]).numpy()*255).transpose((1,2,0))
else :
theImage = (input[i].numpy().transpose(1,2,0) + customNormalize)
#Then transpose to be height,width,channel, to Int and BGR formate
theImage = cv2.cvtColor(theImage.astype(np.uint8 ),cv2.COLOR_RGB2BGR)
#Now landmark it.
for y in range(ldmarkNumber) :
cv2.circle(theImage,(int(scale(inputPred[i,y])),int(scale(inputPred[i,y+ldmarkNumber]))),2,(0,255,0) )
if inputGT is not None :
cv2.circle(theImage,(int(scale(inputGT[i,y])),int(scale(inputGT[i,y+ldmarkNumber]))),2,(0,0,255) )
output.append(theImage)
return output
def scale(input):
if input > 99999 :
input = 99999
elif input < -99999 :
input = -99999
elif isnan(input):
input = 0
return input
def plotImages(input = [], title = None, n_row = 4, n_col = 4, fromOpenCV = True,fileName = None,show=False):
#Function to plot row,col image.
#Given [n_row*n_col,image_width, image_height, channel] input
#tittle [n_row*n_col]
fig = plt.figure()
for i in range(n_row * n_col) :
#print('the i ',i)
ax = fig.add_subplot(n_row,n_col,i+1)
if title is not None :
ax.set_title(title[i])
if fromOpenCV :
plt.imshow(cv2.cvtColor(input[i],cv2.COLOR_BGR2RGB))
else :
plt.imshow(input[i])
if fileName :
plt.savefig(fileName)
if show :
plt.show()
def calc_bb_IOU(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def read_kp_file(filename,flatten = False):
x = []
if ('pts' in filename) :
with open(filename) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
if(i not in [0,1,2,len(data2)-1]):
x.append([ float(j) for j in data2[i][0].split()] )
if flatten :
return np.asarray(x).flatten('F')
else :
return np.asarray(x)
def read_kp_file_text(filename):
x = []
y = []
if ('txt' in filename) :
with open(filename) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
#print(data2)
tmp = 0
for i in range(len(data2)) :
if(i not in [0,1,len(data2)-1]):
for j in data2[i][0].split() :
if tmp % 2 == 0 :
x.append(float(j))
else :
y.append(float(j))
tmp+=1
return np.concatenate((x,y))
def read_bb_file(filename):
x = []
if ('pts' in filename) :
with open(filename) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
if(i not in [0,1,2,len(data2)-1]):
x.append([ float(j) for j in data2[i][0].split()] )
return np.asarray(x)
def errCalcNoisy(catTesting = 1, localize = False, t_dir = "300W-Test/01_Indoor/",name='Re3A',is3D=False,ext = ".txt",makeFlag = False):
catTesting = catTesting;
l_error = []
if localize is False :#image dir
if is3D:
dir = curDir + 'images/300VW-Test_M/cat'+str(catTesting)+'/'
else :
dir = curDir + 'images/300VW-Test/cat'+str(catTesting)+'/'
else :
dir = curDir + t_dir +'/'
list_txt = glob.glob1(dir,"*"+ext)
for x in list_txt:
print(("Opening " +dir+x))
file = open(dir+x)
for line in file :
#print float(line)
l_error.append(float(line))
file.close()
all_err = np.array(l_error)
if makeFlag :
list_txt = glob.glob1(dir,"*"+ext)
l_tr = []
l_d = []
for x in list_txt:
print(("Opening " +dir+x))
file = open(dir+x)
for line in file :
data = [ float(j) for j in line.split()]
#print(data)
l_tr.append(float(data[1]))
l_d.append(float(data[0]))
file.close()
if localize is False :
fileName = "src/result_compared/cat"+str(catTesting)+"/"
aboveT = makeErrTxt(all_err,fileName= fileName+name+".txt",threshold = .08,lim = 1.1005)
if makeFlag :
l_tr = np.asarray(l_tr);
l_d = np.asarray(l_d);
f = open(curDir+fileName+"flag.txt",'w')
am_r = truediv(len(l_tr[np.where(l_tr > 0 )]),len(l_tr));
am_d = truediv(len(l_d[np.where(l_d == 0 )]),len(l_d));
f.write("%.4f %.4f\n" % (am_r,am_d));
f.close()
print(("Above T ",name," : "+str(aboveT)))
plot_results(catTesting,resFolder= 'src/result_compared/cat'+str(catTesting),addition=[name],is3D=is3D)
else : #error dir
arrName = ['src/result_compared/300W/Indoor','src/result_compared/300W/Outdoor','src/result_compared/300W/InOut']
aboveT = makeErrTxt(all_err,fileName= arrName[catTesting]+"/"+name+".txt",threshold = .08,lim =.35005, step = .0005)
print(("Above T ",name," : "+str(aboveT)))
plot_results(catTesting+4,resFolder= arrName[catTesting],addition=[name],is3D=is3D)
return all_err
#print(("All error : "+str(all_err)))
def errCalc(catTesting = 1, localize = False, t_dir = "300W-Test/01_Indoor/",name='Re3A',is3D=False,ext = ".txt",makeFlag = False):
catTesting = catTesting;
l_error = []
if localize is False :#image dir
if is3D:
dir = curDir + 'images/300VW-Test_M/cat'+str(catTesting)+'/'
else :
dir = curDir + 'images/300VW-Test/cat'+str(catTesting)+'/'
else :
dir = curDir + t_dir +'/'
list_txt = glob.glob1(dir,"*"+ext)
for x in list_txt:
print(("Opening " +dir+x))
file = open(dir+x)
for line in file :
#print float(line)
l_error.append(float(line))
file.close()
all_err = np.array(l_error)
if makeFlag :
list_txt = glob.glob1(dir,"*"+ext)
l_tr = []
l_d = []
for x in list_txt:
print(("Opening " +dir+x))
file = open(dir+x)
for line in file :
data = [ float(j) for j in line.split()]
#print(data)
l_tr.append(float(data[1]))
l_d.append(float(data[0]))
file.close()
if localize is False :
fileName = "src/result_compared/cat"+str(catTesting)+"/"
aboveT = makeErrTxt(all_err,fileName= fileName+name+".txt",threshold = .08,lim = 1.1005)
if makeFlag :
l_tr = np.asarray(l_tr);
l_d = np.asarray(l_d);
f = open(curDir+fileName+"flag.txt",'w')
am_r = truediv(len(l_tr[np.where(l_tr > 0 )]),len(l_tr));
am_d = truediv(len(l_d[np.where(l_d == 0 )]),len(l_d));
f.write("%.4f %.4f\n" % (am_r,am_d));
f.close()
print(("Above T ",name," : "+str(aboveT)))
plot_results(catTesting,resFolder= 'src/result_compared/cat'+str(catTesting),addition=[name],is3D=is3D)
else : #error dir
arrName = ['src/result_compared/300W/Indoor','src/result_compared/300W/Outdoor','src/result_compared/300W/InOut']
aboveT = makeErrTxt(all_err,fileName= arrName[catTesting]+"/"+name+".txt",threshold = .08,lim =.35005, step = .0005)
print(("Above T ",name," : "+str(aboveT)))
plot_results(catTesting+4,resFolder= arrName[catTesting],addition=[name],is3D=is3D)
return all_err
#print(("All error : "+str(all_err)))
def makeErrTxt(error,fileName = 'result_compared/Decky.txt',threshold = .08,lim = .35005, step = .0001):
print("Making errr")
bin = np.arange(0,lim,step)#0.35005,0.0005), 300vw 1.1005
#res = np.array([len(bin)])
#creating the file
f = open(curDir+fileName,'w')
f.write('300W Challenge 2013 Result\n');
f.write('Participant: Decky.\n');
f.write('-----------------------------------------------------------\n');
f.write('Bin 68_all 68_indoor 68_outdoor 51_all 51_indoor 51_outdoor\n');
for i in range(len(bin)) :
err = truediv(len(error[np.where(error < bin[i])]),len(error))
f.write("%.4f %.4f %.4f %.4f %.4f %.4f %.4f\n" % (bin[i],err, err,err, err, err, err));
f.close()
err_above = truediv(len(error[np.where(error > threshold )]),len(error));
print((error[np.where(error > threshold )]))
return err_above
def plot_results(version, resFolder = 'result_compared',x_limit=0.08, colors=None, markers=None, linewidth=3,
fontsize=12, figure_size=(11, 6),addition = None,is3D = False,All = False):
"""
Method that generates the 300W Faces In-The-Wild Challenge (300-W) results
in the form of Cumulative Error Distributions (CED) curves. The function
renders the indoor, outdoor and indoor + outdoor results based on both 68
and 51 landmark points in 6 different figures.
Please cite:
C. Sagonas, E. Antonakos, G. Tzimiropoulos, S. Zafeiriou, M. Pantic. "300
Faces In-The-Wild Challenge: Database and Results", Image and Vision
Computing, 2015.
Parameters
----------
version : 1 or 2
The version of the 300W challenge to use. If 1, then the reported
results are the ones of the first conduct of the competition in the
ICCV workshop 2013. If 2, then the reported results are the ones of
the second conduct of the competition in the IMAVIS Special Issue 2015.
x_limit : float, optional
The maximum value of the horizontal axis with the errors.
colors : list of colors or None, optional
The colors of the lines. If a list is provided, a value must be
specified for each curve, thus it must have the same length as the
number of plotted curves. If None, then the colours are linearly sampled
from the jet colormap. Some example colour values are:
'r', 'g', 'b', 'c', 'm', 'k', 'w', 'orange', 'pink', etc.
or
(3, ) ndarray with RGB values
linewidth : float, optional
The width of the rendered lines.
fontsize : int, optional
The font size that is applied on the axes and the legend.
figure_size : (float, float) or None, optional
The size of the figure in inches.
"""
if not is3D :
title = "300VW 2D "
else :
title = "300VW 3DA-2D "
# Check version
if version == 1:
participants = ['Dlssvm_Cfss', 'MD_CFSS', 'Mdnet_DlibERT', 'Meem_Cfss', 'Spot_Cfss']
if not All :
title += 'category 1'
elif version == 2:
participants = ['ccot_cfss', 'MD_CFSS', 'spot_cfss', 'srdcf_cfss']
if not All :
title += 'category 2'
elif version == 3:
participants = ['ccot_cfss', 'MD_CFSS', 'meem_cfss', 'srdcf_cfss','staple_cfss']
if not All :
title += 'category 3'
elif version in [4,5,6]:
if is3D :
print("in if")
participants=[]
l_participants = ['Re3A_3D','Re3A_C_3D','FA_3D']
for z in l_participants :
if z not in participants :
participants.append(z)
else:
print("in else")
participants = ['Baltrusaitis', 'Hasan', 'Jaiswal','Milborrow','Yan','Zhou']
#participants = []
participants.append('Re3A')
participants.append('Re3A_C')
participants.append('FA')
arrName = ['Indoor','Outdoor','Indoor + Outdoor']
if not All :
title = arrName[version - 4]
else:
raise ValueError('version must be either 1 or 2')
if All :
title += " All Category "
#participants = []
if version in [1,2,3]:
participants = []
mapName = []
if is3D :
#participants = []
#participants.append('Re3A_3D')
#participants.append('Re3A_C_3D')
participants.append('RT_MT_3D')
participants.append('RT_2_3D')
participants.append('RT_4_3D')
participants.append('RT_8_3D')
participants.append('RT_16_3D')
participants.append('RT_32_3D')
participants.append('FA_MD_3D')
participants.append('FA_MT_3D')
participants.append('3DFFA_MD_3D')
participants.append('3DFFA_MT_3D')
mapName.append('FLL_MT_3D')
mapName.append('CRCN_2_3D')
mapName.append('CRCN_4_3D')
mapName.append('CRCN_8_3D')
mapName.append('CRCN_16_3D')
mapName.append('CRCN_32_3D')
mapName.append('FA_MD_3D')
mapName.append('FA_MT_3D')
mapName.append('3DFFA_MD_3D')
mapName.append('3DFFA_MT_3D')
colors = ['b','red','orange','yellow','yellow','yellow','green','brown','k','purple']
else:
participants.append('RT_MT')
participants.append('RT_2')
participants.append('RT_4')
participants.append('RT_8')
participants.append('RT_16')
participants.append('RT_32')
participants.append('YANG')
participants.append('MD_CFSS')
participants.append('ME_CFSS')
mapName.append('FLL_MT')
mapName.append('CRCN_2')
mapName.append('CRCN_4')
mapName.append('CRCN_8')
mapName.append('CRCN_16')
mapName.append('CRCN_32')
mapName.append('YANG')
mapName.append('MD_CFSS')
mapName.append('ME_CFSS')
#participants.append('FA_MD')
#participants.append('FA_MT')
colors = ['b','red','orange','yellow','yellow','yellow','g','brown','k']
#participants.append('Re3A')
#participants.append('Re3A_C')
#participants.append('FA_MD')
#participants = []
if addition is not None :
for i in addition :
if i not in participants :
participants.append(i)
# Initialize lists
ced68 = []
ced68_indoor = []
ced68_outdoor = []
ced51 = []
ced51_indoor = []
ced51_outdoor = []
legend_entries = []
# Load results
results_folder = curDir+resFolder
i = 0
for f in participants:
# Read file
if 'Re3A' in f or version in [1,2,3,6]:
index = 1
elif version == 4 :#indoor
index = 2;
elif version == 5 :#outdoor
index = 3;
filename = f + '.txt'
tmp = np.loadtxt(str(Path(results_folder) / filename), skiprows=4)
print(str(Path(results_folder) / filename))
# Get CED values
bins = tmp[:, 0]
ced68.append(tmp[:, index])
'''ced68_indoor.append(tmp[:, 2])
ced68_outdoor.append(tmp[:, 3])
ced51.append(tmp[:, 4])
ced51_indoor.append(tmp[:, 5])
ced51_outdoor.append(tmp[:, 6])'''
# Update legend entries
legend_entries.append(mapName[i])# + ' et al.')
i+=1
print(bins,x_limit)
if version < 4 :
idx = [x[0] for x in np.where(bins==x_limit+.0001)] #.0810
else :
idx = [x[0] for x in np.where(bins==x_limit+.005)] #.0810
real_bins = bins[:idx[0]]
print(idx,real_bins)
for i in range(len(ced68)) :
real_ced = ced68[i][:idx[0]]
#print(real_ced)
#AUC = str(round(simps(real_ced,real_bins) * (1/x_limit),3))
AUC = str(round(simps(real_ced,real_bins) * (1/x_limit),5))
FR = str(round(1. - real_ced[-1],5)) #[-3]
#print(real_bins[-1])
#print(legend_entries[i] + " : "+str(simps(real_ced,real_bins) * (1/x_limit)))
print(legend_entries[i] + " : " +AUC+" FR : "+FR)
#legend_entries[i]+=" [AUC : "+AUC+"]"#+"] [FR : "+FR+"]"
#plt.plot(real_bins,real_ced)
#plt.show()
# 68 points, indoor + outdoor
_plot_curves(bins, ced68, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
'''# 68 points, indoor
title = 'Indoor, 68 points'
_plot_curves(bins, ced68_indoor, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
# 68 points, outdoor
title = 'Outdoor, 68 points'
_plot_curves(bins, ced68_outdoor, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
# 51 points, indoor + outdoor
title = 'Indoor + Outdoor, 51 points'
_plot_curves(bins, ced51, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
# 51 points, indoor
title = 'Indoor, 51 points'
_plot_curves(bins, ced51_indoor, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)
# 51 points, outdoor
title = 'Outdoor, 51 points'
_plot_curves(bins, ced51_outdoor, legend_entries, title, x_limit=x_limit,
colors=colors, linewidth=linewidth, fontsize=fontsize,
figure_size=figure_size)'''
def _plot_curves(bins, ced_values, legend_entries, title, x_limit=0.08,
colors=None, linewidth=3, fontsize=12, figure_size=None):
# number of curves
n_curves = len(ced_values)
# if no colors are provided, sample them from the jet colormap
if colors is None:
cm = plt.get_cmap('jet')
colors = [cm(1.*i/n_curves)[:3] for i in range(n_curves)]
# plot all curves
fig = plt.figure()
ax = plt.gca()
for i, y in enumerate(ced_values):
plt.plot(bins, y, color=colors[i],
linestyle='-',
linewidth=linewidth,
label=legend_entries[i])
#print bins.shape, y.shape
# legend
ax.legend(prop={'size': fontsize}, loc=4)
# axes
for l in (ax.get_xticklabels() + ax.get_yticklabels()):
l.set_fontsize(fontsize)
ax.set_xlabel('Normalized Point-to-Point Error', fontsize=fontsize)
ax.set_ylabel('Images Proportion', fontsize=fontsize)
ax.set_title(title, fontsize=fontsize)
# set axes limits
ax.set_xlim([0., x_limit])
ax.set_ylim([0., 1.])
ax.set_yticks(np.arange(0., 1.1, 0.1))
# grid
plt.grid('on', linestyle='--', linewidth=0.5)
# figure size
if figure_size is not None:
fig.set_size_inches(np.asarray(figure_size))
plt.show()
def make_heatmap(image_name,t_image,add,y_batch,isRandom = True,percent_heatmap = .1,percent_heatmap_e = .05):
tBase = os.path.basename(image_name)
tName,tExt = os.path.splitext(tBase)
theDir = os.path.dirname(image_name)+"/../heatmap-"+add+"/"
if not os.path.exists(theDir):
os.makedirs(theDir)
fName =theDir+tName+".npy"
#print(fName)
try :
b_channel,g_channel,r_channel = t_image[:,:,0],t_image[:,:,1],t_image[:,:,2]
except :
print(image_name)
if os.path.isfile(fName) and isRandom:
newChannel = np.load(fName)
print("using saved npy")
else :
print("make npy "+add)
newChannel = b_channel.copy(); newChannel[:] = 0
y_t = y_batch
if isRandom :
t0,t1,t2,t3 = get_bb(y_t[0:int(n_o//2)], y_t[int(n_o//2):],68,False,
random.uniform( -.25, .25 ),
random.uniform( -.25, .25 ),
random.uniform( -.25, .25 ),
random.uniform( -.25, .25 ),
random.uniform( -.25, .25 ))
else :
t0,t1,t2,t3 = get_bb(y_t[0:int(n_o//2)], y_t[int(n_o//2):],68,False)
#print(t0,t1,t2,t3)
l_cd,rv = get_list_heatmap(0,None,t2-t0,t3-t1,percent_heatmap)
l_cd_e,rv_e = get_list_heatmap(0,None,t2-t0,t3-t1,percent_heatmap_e)
height, width,_ = t_image.shape
scaler = 255/np.max(rv)
#addOne = randint(0,2),addTwo = randint(0,2)
for iter in range(68) :
#print(height,width)
if random:
ix,iy = int(y_t[iter]),int(y_t[iter+68])
else :
ix,iy = int(y_t[iter])+randint(0,2),int(y_t[iter+68])+randint(0,2)
#Now drawing given the center
if iter in range(36,48):
l_cd_t = l_cd_e
rv_t = rv_e
else :
l_cd_t = l_cd
rv_t = rv
for iter2 in range(len(l_cd_t)) :
value = int(rv_t[iter2]*scaler)
if newChannel[inBound(iy+l_cd_t[iter2][0],0,height-1), inBound(ix + l_cd_t[iter2][1],0,width-1)] < value :
newChannel[inBound(iy+l_cd_t[iter2][0],0,height-1), inBound(ix + l_cd_t[iter2][1],0,width-1)] = int(rv_t[iter2]*scaler)#int(heatmapValue/2 + rv[iter2] * heatmapValue)
#np.save(fName,newChannel)
return newChannel
def get_enlarged_bb(the_kp = None, div_x = 2, div_y = 2, images = None,is_bb = False, displacement = 0,
displacementxy = None,n_points = 68):
if not is_bb :
if displacementxy is not None :
t = get_bb(x_list = the_kp[:n_points],y_list = the_kp[n_points:],
adding_xmin=displacementxy,adding_xmax=displacementxy,
adding_ymin=displacementxy,adding_ymax=displacementxy)
else :
t = get_bb(x_list = the_kp[:n_points],y_list = the_kp[n_points:],length = n_points,adding = displacement)
else :
t = the_kp
l_x = (t[2]-t[0])/div_x
l_y = (t[3]-t[1])/div_y
x1 = int(max(t[0] - l_x,0))
y1 = int(max(t[1] - l_y,0))
x_min = x1; y_min = y1;
#print tImage.shape
x2 = int(min(t[2] + l_x,images.shape[1]))
y2 = int(min(t[3] + l_y,images.shape[0]))
return t,l_x,l_y,x1,y1,x_min,y_min,x2,y2
def inBoundN(input,min,max):
if input < min :
return min
elif input > max :
return max
return input
def inBound(input,min,max):
if input < min :
return int(min)
elif input > max :
return int(max)
return int(input)
def inBound_tf(input,min,max):
if input < min :
return int(min)
elif input > max :
return int(max)
return int(input)
def eval(input):
if input < 0 :
return 0
else :
return input
def ClipIfNotNone(grad):
if grad is None:
return grad
return tf.clip_by_value(grad, -1, 1)
def initialize_uninitialized_global_variables(sess):
"""
Only initializes the variables of a TensorFlow session that were not
already initialized.
:param sess: the TensorFlow session
:return:
"""
# List all global variables
global_vars = tf.global_variables()
# Find initialized status for all variables
is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
is_initialized = sess.run(is_var_init)
# List all variables that were not initialized previously
not_initialized_vars = [var for (var, init) in
zip(global_vars, is_initialized) if not init]
# Initialize all uninitialized variables found, if any
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
def addPadd(im) :
#im = cv2.imread("./test-frontal.png")
height, width, channels =im.shape
desired_size = np.max(np.array([height,width]))
add_x,add_y = 0,0
old_size = im.shape[:2] # old_size is in (height, width) format
ratio = float(desired_size)/max(old_size)
new_size = tuple([int(x*ratio) for x in old_size])
# new_size should be in (width, height) format
im = cv2.resize(im, (new_size[1], new_size[0]))
delta_w = desired_size - new_size[1]
delta_h = desired_size - new_size[0]
top, bottom = delta_h//2, delta_h-(delta_h//2)
left, right = delta_w//2, delta_w-(delta_w//2)
if height > width : #so shift x
add_x = left
else:
add_y = top
color = [0, 0, 0]
new_im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color)
#print top,bottom,left,right
'''cv2.imshow("image", new_im)
cv2.waitKey(0)
cv2.destroyAllWindows()'''
return new_im,add_x,add_y
def transformation(input, gt, type, info,length = 68 ):
mapping =[
[0,16],
[1,15],
[2,14],
[3,13],
[4,12],
[5,11],
[6,10],
[7,9],
[8,8],
[9,7],
[10,6],
[11,5],
[12,4],
[13,3],
[14,2],
[15,1],
[16,0],
[17,26],
[18,25],
[19,24],
[20,23],
[21,22],
[22,21],
[23,20],
[24,19],
[25,18],
[26,17],
[27,27],
[28,28],
[29,29],
[30,30],
[31,35],
[32,34],
[33,33],
[34,32],
[35,31],
[36,45],
[37,44],
[38,43],
[39,42],
[40,47],
[41,46],
[42,39],
[43,38],
[44,37],
[45,36],
[46,41],
[47,40],
[48,54],
[49,53],
[50,52],
[51,51],
[52,50],
[53,49],
[54,48],
[55,59],
[56,58],
[57,57],
[58,56],
[59,55],
[60,64],
[61,63],
[62,62],
[63,61],
[64,60],
[65,67],
[66,66],
[67,65],
]
mapping84 =[
[0,32],
[1,31],
[2,30],
[3,29],
[4,28],
[5,27],
[6,26],
[7,25],
[8,24],
[9,23],
[10,22],
[11,21],
[12,20],
[13,19],
[14,18],
[15,17],
[16,16],
[17,15],
[18,14],
[19,13],
[20,12],
[21,11],
[22,10],
[23,9],
[24,8],
[25,7],
[26,6],
[27,5],
[28,4],
[29,3],
[30,2],
[31,1],
[32,0],
[33,42],
[34,41],
[35,40],
[36,39],
[37,38],
[38,37],
[39,36],
[40,35],
[41,34],
[42,33],
[43,46],
[44,45],
[45,44],
[46,43],
[47,51],
[48,50],
[49,49],
[50,48],
[51,47],
[52,57],
[53,56],
[54,55],
[55,54],
[56,53],
[57,52],
[58,63],
[59,62],
[60,61],
[61,60],
[62,59],
[63,58],
[64,70],
[65,69],
[66,68],
[67,67],
[68,66],
[69,65],
[70,64],
[71,75],
[72,74],
[73,73],
[74,72],
[75,71],
[76,80],
[77,79],
[78,78],
[79,77],
[80,76],
[81,83],
[82,82],
[83,81],
]
if length > 68:
mapping = np.asarray(mapping84)
else :
mapping = np.asarray(mapping)
if type == 1 :
#print("Flippping") #info is 0,1
gt_o = gt.copy()
height, width,_ = input.shape
if info == 0 : #vertical
#print("Flipping vertically ^v")
output = cv2.flip(input,0)
for i in range(length) :
if gt_o[i+length] > (height/2) : #y
gt_o[i+length] = height/2 - (gt[i+length] -(height/2))
if gt_o[i+length] < (height/2) : #y
gt_o[i+length] = height/2 + ((height/2)-gt[i+length])
elif info == 1 : #horizontal
t_map = mapping[:,1]
#gt_o_t = gt.copy()
#print("Flipping Horizontally <- -> ")
#return np.fliplr(input)
output = cv2.flip(input,1)
for i in range(length) :
if gt[i] > (width/2) : #x
#gt_o_t[i] = (width/2) - (gt[i] - (width/2))
gt_o[t_map[i]] = (width/2) - (gt[i] - (width/2))
if gt[i] < (width/2) : #x
#gt_o_t[i] = (width/2) + ((width/2) - gt[i])
gt_o[t_map[i]] = (width/2) + ((width/2) - gt[i])
#get the new index
#gt_o[t_map[i]] = gt_o_t[i]
gt_o[t_map[i]+length] = gt[i+length]
#needs to be transformed.
return [output,gt_o]
elif type == 2 :
#print("Rotate") # info is 1,2,3
#output = np.rot90(input,info)
rows,cols,_ = input.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),info,1)
output = cv2.warpAffine(input,M,(cols,rows))
gt_o = np.array([gt[:length]-(cols/2),gt[length:]-(rows/2)])
theta = np.radians(-info)
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c,-s), (s, c)))
gt_o = np.dot(R,gt_o)
gt_o = np.concatenate((gt_o[0]+(cols/2),gt_o[1]+(rows/2)),axis = 0)
'''
print R.shape, gt_o.shape
print gt_o.shape'''
return [output,gt_o]
elif type == 3 : #info is 0 to 1
#print("Occlusion")
output = input.copy()
gt_o = gt.copy()
lengthW = 0.5
lengthH = 0.4
s_row = 15
s_col = 7
imHeight,imWidth,_ = input.shape
#Now filling the occluder
l_w = imHeight//s_row
l_h = imWidth//s_col
for ix in range(s_row):
for jx in range(s_col):
#print ix,jx,l_w,l_h
#y1:y2, x1:x2
#print(ix*b_size,outerH ,jx*b_size,outerW,'--',outerImgH,',',outerImgW )
#print(ix*l_w,ix*l_w+l_w ,jx*l_h,jx*l_h+l_h )
output[ix*l_w:ix*l_w+int(l_w*lengthH) ,jx*l_h:jx*l_h+int(l_h*lengthW) ] = np.full([int(l_w*lengthH),int(l_h*lengthW),3],255)
return [output,gt_o]
def calcListNormalizedDistance(pred,gt):
'''
input :
pred : num_images,num points
gt : num_images, num points
'''
err = np.zeros(len(pred))
print((pred.shape))
num_points = pred.shape[2]
for i in range(len(pred)) :
if num_points == 68 :
i_d = np.sqrt(np.square(pred[i,36] - gt[i,45]))
else :
i_d = np.sqrt(np.square(pred[i,19] - gt[i,28]))
sum = 0
for j in range(num_points) :
sum += np.sqrt(np.square(pred[i,j]-gt[i,j]))
err[i] = sum/(num_points * i_d)
return err
def calcNormalizedDistance(pred,gt):
'''
input :
pred : 1,num points
gt : 1, num points
'''
num_points = pred.shape[0]
#print(num_points)
'''if num_points == 68*2 :
i_d = np.sqrt(np.square(pred[36] - gt[45]) + np.square(pred[36+68] - gt[45+68]))
else :
i_d = np.sqrt(np.square(pred[19] - gt[28]) + np.square(pred[19+68] - gt[28+68]))
'''
if num_points == 68*2 :
i_d = np.sqrt(np.square(gt[36] - gt[45]) + np.square(gt[36+68] - gt[45+68]))
else :
i_d = np.sqrt(np.square(gt[19] - gt[28]) + np.square(gt[19+68] - gt[28+68]))
t_sum = 0
num_points_norm = num_points//2
for j in range(num_points_norm) :
t_sum += np.sqrt(np.square(pred[j]-gt[j])+np.square(pred[j+num_points_norm]-gt[j+num_points_norm]))
err = t_sum/(num_points_norm * i_d)
return err
#assumes p_a and p_b are both positive numbers that sum to 100
def myRand(a, p_a, b, p_b):
return a if random.uniform(0,100) < p_a else b
def calcLandmarkErrorListTF(pred,gt):
all_err = []
batch = pred.get_shape()[0]
seq = pred.get_shape()[1]
for i in range(batch) :
for z in range(seq):
bb = get_bb_tf(gt[i,z,0:68],gt[i,z,68:])
width = tf.abs(bb[2] - bb[0])
height = tf.abs(bb[3] - bb[1])
gt_bb = tf.sqrt(tf.square(width) + tf.square(height))
num_points = pred.get_shape()[2]
num_points_norm = num_points//2
sum = []
for j in range(num_points_norm) :
sum.append( tf.sqrt(tf.square(pred[i,z,j]-gt[i,z,j])+tf.square(pred[i,z,j+num_points_norm]-gt[i,z,j+num_points_norm])))
err = tf.divide(tf.stack(sum),gt_bb*num_points_norm)
all_err.append(err)
return tf.reduce_mean(tf.stack(all_err))
def calcLandmarkError(pred,gt): #for 300VW
'''
input :
pred : 1,num points
gt : 1, num points
according to IJCV
Normalized by bounding boxes
'''
#print pred,gt
num_points = pred.shape[0]
num_points_norm = num_points//2
bb = get_bb(gt[:68],gt[68:])
#print(gt)
#print(bb)
'''width = np.abs(bb[2] - bb[0])
height = np.abs(bb[3] - bb[1])
gt_bb = np.sqrt(np.square(width) + np.square(height))
print("1 : ",width,height,gt_bb)'''
width = np.abs(bb[2] - bb[0])
height = np.abs(bb[3] - bb[1])
gt_bb = math.sqrt((width*width) +(height*height))
#print("2 : ",width,height,(width^2) +(height^2),gt_bb)
'''print(bb)
print(gt_bb)
print("BB : ",gt)
print("pred : ",pred)'''
'''print(num_points_norm)
print("BB : ",bb)
print("GT : ",gt)
print("PR : ",pred)'''
#print(num_points)
'''error = np.mean(np.sqrt(np.square(pred-gt)))/gt_bb
return error'''
summ = 0
for j in range(num_points_norm) :
#summ += np.sqrt(np.square(pred[j]-gt[j])+np.square(pred[j+num_points_norm]-gt[j+num_points_norm]))
summ += math.sqrt(((pred[j]-gt[j])*(pred[j]-gt[j])) + ((pred[j+num_points_norm]-gt[j+num_points_norm])*(pred[j+num_points_norm]-gt[j+num_points_norm])))
#err = summ/(num_points_norm * (gt_bb))
err = summ/(num_points_norm*gt_bb)
return err
def showGates(tg = None, batch_index_to_see = 0, n_to_see = 64, n_neurons = 1024,toShow = False, toSave = False, fileName = "gates.jpg"):
#Total figure : 1024/64 data per image : 16 row per gate then *6 gate : 96
t_f_row = n_neurons/n_to_see
n_column = 6
fig = plt.figure()
for p_i in range(t_f_row) :
inputGate = tg[:,0,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see] #all sequence, gate 1, batch 0, 200 neurons
newInputGate= tg[:,1,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see] #all sequence, gate 2, batch 0, 200 neurons
forgetGate = tg[:,2,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see] #all sequence, gate 3, batch 0, 200 neurons
outputGate = tg[:,3,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see] #all sequence, gate 4, batch 0, 200 neurons
cellState = tg[:,4,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see]
outputState = tg[:,5,batch_index_to_see,p_i * n_to_see:p_i*n_to_see+n_to_see]
#print p_i
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 1)
if p_i == 0 :
ax.set_title('Input Gate')
plt.imshow(inputGate,vmin=0,vmax=1)
'''
for temp in inputGate :
for temp2 in temp :
if temp2 < 0 :
print temp2'''
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 2)
if p_i == 0 :
ax.set_title('New Input Gate')
plt.imshow(newInputGate,vmin=0,vmax=1)
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 3)
if p_i == 0 :
ax.set_title('Forget Gate')
plt.imshow(forgetGate,vmin=0,vmax=1)
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 4)
if p_i == 0 :
ax.set_title('Output Gate')
plt.imshow(outputGate,vmin=0,vmax=1)
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 5)
if p_i == 0 :
ax.set_title('Cell State')
plt.imshow(cellState,vmin=0,vmax=1)
ax = fig.add_subplot(t_f_row,n_column,p_i*(n_column) + 6)
if p_i == 0 :
ax.set_title('Output')
plt.imshow(outputState,vmin=0,vmax=1)
#plt.colorbar(orientation='vertical')
if toShow :
plt.show()
if toSave :
fig.savefig(fileName)
def get_list_heatmap(center,cov,image_size_x,image_size_y,percent_radius,exact_radius = None) :
radius_x = int(image_size_x * percent_radius)
radius_y = int(image_size_y * percent_radius)
#print(radius_x,radius_y)
l_cd = []
t_radius_x = radius_x
t_radius_y = radius_y
if t_radius_x <= 0 :
t_radius_x = 1
if t_radius_y <= 0 :
t_radius_y = 1
if exact_radius is not None :
t_radius_x = cov
t_radius_y = cov
#print(t_radius_x,t_radius_y,"radius")
for x in range(center-t_radius_x,center+t_radius_x) :
'''print((center-x)/t_radius_y)
print(math.acos((center-x)/t_radius_y))
print(math.sin(math.acos((center-x)/t_radius_y)))'''
yspan = t_radius_y*math.sin(math.acos(inBoundN((center-x)/t_radius_y,-1,1)));
for y in range (int(center-yspan),int(center+yspan)) :
l_cd.append([x,y])
l_cd = np.asarray(l_cd)
mean = [center,center]
if cov is None :
rv = multivariate_normal.pdf(l_cd,mean = mean, cov = [t_radius_x,t_radius_y])
else :
rv = multivariate_normal.pdf(l_cd,mean = mean, cov = [cov,cov])
return l_cd,rv
def get_bb(x_list, y_list, length = 68,swap = False,adding = 0,adding_xmin=None, adding_xmax = None,adding_ymin = None, adding_ymax = None,show=False):
#print x_list,y_list
xMin = 999999;xMax = -9999999;yMin = 9999999;yMax = -99999999;
if show :
print(x_list, y_list)
for i in range(length): #x
if xMin > x_list[i]:
xMin = int(x_list[i])
if xMax < x_list[i]:
xMax = int(x_list[i])
if yMin > y_list[i]:
yMin = int(y_list[i])
if yMax < y_list[i]:
yMax = int(y_list[i])
#if show :
# print("ymin : ",yMin,'ymax : ',yMax)
l_x = xMax - xMin
l_y = yMax - yMin
#print(xMin,xMax,yMin,yMax)
if swap :
return [xMin,xMax,yMin,yMax]
else :
if adding_xmin is None:
if show :
print("return ",[xMin-adding*l_x,yMin-adding*l_y,xMax+adding*l_x,yMax+adding*l_y])
return [xMin-adding*l_x,yMin-adding*l_y,xMax+adding*l_x,yMax+adding*l_y]
else :
return [xMin+adding_xmin*l_x,yMin+adding_ymin*l_y,xMax+adding_xmax*l_x,yMax+adding_ymax*l_y]
def get_bb_tf(x_list, y_list, length = 68,adding = 0, axMin = None, axMax = None, ayMin = None, ayMax = None):
#print x_list,y_list
xMin = tf.constant(999999.0);xMax = tf.constant(-9999999.0);yMin = tf.constant(9999999.0);yMax = tf.constant(-99999999.0);
for i in range(length): #x
xMin = tf.minimum(x_list[i],xMin)
xMax = tf.maximum(x_list[i],xMax)
yMin = tf.minimum(y_list[i],yMin)
yMax = tf.maximum(y_list[i],yMax)
l_x = xMax - xMin
l_y = yMax - yMin
#adding ranging from 0 to 1
if axMin is None :
return xMin-adding*l_x,yMin-adding*l_y,xMax+adding*l_x,yMax+adding*l_y
else :
return xMin+axMin*l_x,yMin+ayMin*l_y,xMax+axMax*l_x,yMax+ayMax*l_y
def padding(image):
def get_padding_size(image):
h, w, _ = image.shape
longest_edge = max(h, w)
top, bottom, left, right = (0, 0, 0, 0)
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right
top, bottom, left, right = get_padding_size(image)
BLACK = [0, 0, 0]
constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK)
return constant
def get_bb_face(seq_size=2,synthetic = False,path= "images/bb/"):
list_gt = []
list_labels = []
list_labels_t = []
for f in file_walker.walk(curDir +path):
#print(f.name, f.full_path) # Name is without extension
if f.isDirectory: # Check if object is directory
for sub_f in f.walk():
if sub_f.isFile:
if('txt' in sub_f.full_path):
#print(sub_f.name, sub_f.full_path) #this is the groundtruth
list_labels_t.append(sub_f.full_path)
if sub_f.isDirectory: # Check if object is directory
list_img = []
for sub_sub_f in sub_f.walk(): #this is the image
list_img.append(sub_sub_f.full_path)
list_gt.append(sorted(list_img))
list_gt = sorted(list_gt)
list_labels_t = sorted(list_labels_t)
for lbl in list_labels_t :
with open(lbl) as file:
x = [re.split(r',+',l.strip()) for l in file]
y = [ list(map(int, i)) for i in x]
list_labels.append(y)
if seq_size is not None :
list_images = []
list_ground_truth = []
for i in range(0,len(list_gt)):
counter = 0
for j in range(0,int(len(list_gt[i])/seq_size)):
temp = []
temp2 = []
for z in range(counter,counter+seq_size):
temp.append(list_gt[i][z])
#temp2.append([list_labels[i][z][2],list_labels[i][z][3],list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][0]+list_labels[i][z][2],list_labels[i][z][1]+list_labels[i][z][3]])
if not synthetic :
temp2.append([list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][0]+list_labels[i][z][2],list_labels[i][z][1]+list_labels[i][z][3]])
else :
#temp2.append([list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][2],list_labels[i][z][3]])
temp2.append([list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][2],list_labels[i][z][3]])
counter+=seq_size
#print counter
list_images.append(temp)
list_ground_truth.append(temp2)
else :
list_images = []
list_ground_truth = []
for i in range(0,len(list_gt)): #per folder
temp = []
temp2 = []
for j in range(0,len(list_gt[i])):#per number of seq * number of data/seq_siz
temp.append(list_gt[i][j])
#temp2.append([list_labels[i][z][2],list_labels[i][z][3],list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][0]+list_labels[i][z][2],list_labels[i][z][1]+list_labels[i][z][3]])
if not synthetic :
temp2.append([list_labels[i][j][0],list_labels[i][j][1],list_labels[i][j][0]+list_labels[i][j][2],list_labels[i][j][1]+list_labels[i][j][3]])
else :
#temp2.append([list_labels[i][z][0],list_labels[i][z][1],list_labels[i][z][2],list_labels[i][z][3]])
temp2.append([list_labels[i][j][0],list_labels[i][j][1],list_labels[i][j][2],list_labels[i][j][3]])
list_images.append(temp)
list_ground_truth.append(temp2)
'''
print len(list_images)
print len(list_ground_truth)
print (list_images[0])
print (list_ground_truth[0])
img = cv2.imread(list_images[0][0])
cv2.rectangle(img,(list_ground_truth[0][0][2],list_ground_truth[0][0][3]),(list_ground_truth[0][0][4],list_ground_truth[0][0][5]),(255,0,255),1)
cv2.imshow('jim',img)
cv2.waitKey(0)
'''
return[list_images,list_ground_truth]#2d list of allsize, seqlength, (1 for image,6 for bb)
def makeGIF(files,filename):
import imageio
image = []
for i in files :
cv2_im = cv2.cvtColor(i,cv2.COLOR_BGR2RGB)
image.append(cv2_im)
#pil_im = Image.fromarray(cv2_im)
#print np.asarray(image).shape
imageio.mimsave(filename,image,'GIF')
def get_kp_face_temp(seq_size=None,data_list = ["300VW-Train"],per_folder = False,n_skip = 1,is3D = False,is84 = False, dir_name = None,theCurDir = None):
list_gt = []
list_labels_t = []
list_labels = []
if theCurDir is not None :
theDir = theCurDir
else :
theDir = curDir + "images/"
counter_image = 0
i = 0
if dir_name is not None :
annot_name = dir_name
else :
if is84 :
annot_name = 'annot84'
elif is3D :
annot_name = 'annot2'
else :
annot_name = 'annot'
for data in data_list :
print(("Opening "+data))
for f in file_walker.walk(theDir):
if f.isDirectory: # Check if object is directory
print((f.name, f.full_path)) # Name is without extension
for sub_f in f.walk():
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print sub_f.name
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
if(sub_f.name == annot_name) : #If that's annot, add to labels_t
list_labels_t.append(sorted(list_dta))
elif(sub_f.name == 'img'): #Else it is the image
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
'''
print len(list_gt[2])
print len(list_labels_t[2])
'''
#print list_gt
#print list_labels_t
print("Now opening keylabels")
for lbl in list_labels_t :
#print lbl
lbl_68 = [] #Per folder
for lbl_sub in lbl :
print(lbl_sub)
if ('pts' in lbl_sub) :
x = []
with open(lbl_sub) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
#print data
for i in range(len(data2)) :
if(i not in [0,1,2,len(data2)-1]):
x.append([ float(j) for j in data2[i][0].split()] )
#y = [ list(map(int, i)) for i in x]
#print len(x)
lbl_68.append(x) #1 record
list_labels.append(lbl_68)
#print len(list_gt[2]) #dim : numfolder, num_data
#print len(list_labels[2]) #dim : num_folder, num_data, 68
list_images = []
max_width = max_height = -9999
min_width = min_height = 9999
mean_width = mean_height = 0
print(("Total data : "+str(counter_image)))
print("Now partitioning data if required")
if seq_size is not None :
list_ground_truth = np.zeros([int(counter_image/(seq_size*n_skip)),seq_size,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(seq_size*n_skip))): #for number of data/batchsize
temp = []
temp2 = np.zeros([seq_size,136])
i_temp = 0
for z in range(counter,counter+(seq_size*n_skip),n_skip):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = np.array(list_labels[i][z]).flatten('F')
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=seq_size*n_skip
#print counter
else :
if per_folder : #divide per folder
print("Per folder")
list_ground_truth = []
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
temp = []
temp2 = []
for j in range(0,len(list_gt[i]),n_skip): #for number of data/batchsize
#print len(list_gt[i])
#print len(list_labels[i])
#print(list_gt[i][j],list_labels[i][j])
temp.append(list_gt[i][j])
temp2.append(np.array(list_labels[i][j]).flatten('F'))
list_images.append(temp)
list_ground_truth.append(temp2)
else : #make as one long list, for localisation
if dir_name is not None :
list_ground_truth = np.zeros([counter_image,204])
elif is84:
list_ground_truth = np.zeros([counter_image,168])
else :
list_ground_truth = np.zeros([counter_image,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
for j in range(0,len(list_gt[i]),n_skip): #for number of data
#print(("{}/{} {}/{}".format(i,len(list_gt),j,len(list_gt[i]))))
tmpImage = cv2.imread(list_gt[i][j])
list_images.append(list_gt[i][j])
#print(list_gt[i][j])
list_ground_truth[indexer] = np.array(list_labels[i][j]).flatten('F')
indexer += 1
#print counter
mean_width/= indexer
mean_height/= indexer
return list_images,list_ground_truth,[mean_width,mean_height, min_width,max_width, min_height, max_height]
def get_kp_face(seq_size=None,data_list = ["300VW-Train"],per_folder = False,n_skip = 1,is3D = False,is84 = False, dir_name = None,theCurDir = None):
list_gt = []
list_labels_t = []
list_labels = []
if theCurDir is not None :
theDir = theCurDir
else :
theDir = curDir + "images/"
counter_image = 0
i = 0
if dir_name is not None :
annot_name = dir_name
else :
if is84 :
annot_name = 'annot84'
elif is3D :
annot_name = 'annot2'
else :
annot_name = 'annot'
for data in data_list :
print(("Opening "+data))
for f in file_walker.walk(theDir++data+"/"):
if f.isDirectory: # Check if object is directory
print((f.name, f.full_path)) # Name is without extension
for sub_f in f.walk():
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print sub_f.name
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
if(sub_f.name == annot_name) : #If that's annot, add to labels_t
list_labels_t.append(sorted(list_dta))
elif(sub_f.name == 'img'): #Else it is the image
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
'''
print len(list_gt[2])
print len(list_labels_t[2])
'''
#print list_gt
#print list_labels_t
print("Now opening keylabels")
for lbl in list_labels_t :
#print lbl
lbl_68 = [] #Per folder
for lbl_sub in lbl :
print(lbl_sub)
if ('pts' in lbl_sub) :
x = []
with open(lbl_sub) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
#print data
for i in range(len(data2)) :
if(i not in [0,1,2,len(data2)-1]):
x.append([ float(j) for j in data2[i][0].split()] )
#y = [ list(map(int, i)) for i in x]
#print len(x)
lbl_68.append(x) #1 record
list_labels.append(lbl_68)
#print len(list_gt[2]) #dim : numfolder, num_data
#print len(list_labels[2]) #dim : num_folder, num_data, 68
list_images = []
max_width = max_height = -9999
min_width = min_height = 9999
mean_width = mean_height = 0
print(("Total data : "+str(counter_image)))
print("Now partitioning data if required")
if seq_size is not None :
list_ground_truth = np.zeros([int(counter_image/(seq_size*n_skip)),seq_size,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(seq_size*n_skip))): #for number of data/batchsize
temp = []
temp2 = np.zeros([seq_size,136])
i_temp = 0
for z in range(counter,counter+(seq_size*n_skip),n_skip):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = np.array(list_labels[i][z]).flatten('F')
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=seq_size*n_skip
#print counter
else :
if per_folder : #divide per folder
print("Per folder")
list_ground_truth = []
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
temp = []
temp2 = []
'''print(len(list_gt[i]))
print(list_gt[i][0])
print(len(list_labels[i]))'''
for j in range(0,len(list_gt[i]),n_skip): #for number of data/batchsize
#print len(list_gt[i])
#print len(list_labels[i])
#print(list_gt[i][j],list_labels[i][j])
temp.append(list_gt[i][j])
temp2.append(np.array(list_labels[i][j]).flatten('F'))
list_images.append(temp)
list_ground_truth.append(temp2)
else : #make as one long list, for localisation
if dir_name is not None :
list_ground_truth = np.zeros([counter_image,204])
elif is84:
list_ground_truth = np.zeros([counter_image,168])
else :
list_ground_truth = np.zeros([counter_image,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
for j in range(0,len(list_gt[i]),n_skip): #for number of data
#print(("{}/{} {}/{}".format(i,len(list_gt),j,len(list_gt[i]))))
tmpImage = cv2.imread(list_gt[i][j])
'''height, width, channels = tmpImage.shape
mean_width+=width;
mean_height+=height;
if max_width<width :
max_width = width
if max_height<height :
max_height = height
if min_width>width :
min_width = width
if min_height>height :
min_height = height'''
list_images.append(list_gt[i][j])
#print(list_gt[i][j])
list_ground_truth[indexer] = np.array(list_labels[i][j]).flatten('F')
indexer += 1
#print counter
mean_width/= indexer
mean_height/= indexer
'''
im_width = 240
im_height = 180
img = cv2.imread(list_images[500])
height, width, channels = img.shape
img = cv2.resize(img,(im_width,im_height))
ratioWidth = truediv(im_width,width)
ratioHeight = truediv(im_height,height)
print ratioWidth,im_width,width
print ratioHeight,im_height,height
x_list = list_ground_truth[500,0:68] * ratioWidth
y_list = list_ground_truth[500,68:136] * ratioHeight
#getting the bounding box of x and y
bb = get_bb(x_list,y_list)
cv2.rectangle(img,(bb[0],bb[1]),(bb[2],bb[3]),(255,0,255),1)
for i in range(68) :
cv2.circle(img,(int(x_list[i]),int(y_list[i])),3,(0,0,255))
cv2.imshow('jim',img)
cv2.waitKey(0)'''
return list_images,list_ground_truth,[mean_width,mean_height, min_width,max_width, min_height, max_height]
def get_kp_face_localize(seq_size=None,data = "300W/01_Indoor"):
list_gt = []
list_labels_t = []
list_labels = []
counter_image = 0
i = 0
print(("Opening "+data))
for f in file_walker.walk(curDir + "images/"+data+"/"):
print((f.name, f.full_path)) # Name is without extension
if f.isDirectory: # Check if object is directory
for sub_f in f.walk():
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print sub_f.name
for sub_sub_f in sub_f.walk(): #this is the data
list_dta.append(sub_sub_f.full_path)
if(sub_f.name == 'annot') : #If that's annot, add to labels_t
list_labels_t.append(sorted(list_dta))
elif(sub_f.name == 'img'): #Else it is the image
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
'''
print len(list_gt[2])
print len(list_labels_t[2])
'''
print("Now opening keylabels")
for lbl in list_labels_t :
#print lbl
lbl_68 = [] #Per folder
for lbl_sub in lbl :
#print lbl_sub
if ('pts' in lbl_sub) :
x = []
with open(lbl_sub) as file:
data = [re.split(r'\t+',l.strip()) for l in file]
#print data
for i in range(len(data)) :
if(i not in [0,1,2,len(data)-1]):
x.append([ float(j) for j in data[i][0].split()] )
#y = [ list(map(int, i)) for i in x]
#print len(x)
lbl_68.append(x) #1 record
list_labels.append(lbl_68)
#print len(list_gt[2]) #dim : numfolder, num_data
#print len(list_labels[2]) #dim : num_folder, num_data, 68
list_images = []
list_ground_truth = []
max_width = max_height = -9999
min_width = min_height = 9999
mean_width = mean_height = 0
print(("Total data : "+str(counter_image)))
print("Now partitioning data if required")
indexer = 0;
if seq_size is None :
for i in range(0,len(list_gt)): #For each dataset
temp = []
temp2 = []
for j in range(0,len(list_gt[i])): #for number of data/batchsize
t_temp = []
t_temp2 = []
for k in range (2) :
t_temp.append(list_gt[i][j])
t_temp2.append(np.array(list_labels[i][j]).flatten('F'))
temp.append(t_temp)
temp2.append(t_temp2)
list_images.append(temp)
list_ground_truth.append(temp2)
else :
for i in range(0,len(list_gt)): #For each dataset
for j in range(0,len(list_gt[i])): #for number of data/batchsize
t_temp = []
t_temp2 = []
for k in range (2) :
t_temp.append(list_gt[i][j])
t_temp2.append(np.array(list_labels[i][j]).flatten('F'))
list_images.append(t_temp)
list_ground_truth.append(t_temp2)
#print list_images
return list_images,list_ground_truth,[mean_width,mean_height, min_width,max_width, min_height, max_height]
def write_kp_file(finalTargetL,arr,length = 68):
file = open(finalTargetL,'w')
file.write('version: 1\n')
file.write('n_points: '+str(length)+'\n')
file.write('{\n')
for j in range(length) :
file.write(str(arr[j])+' '+str(arr[j+length])+'\n')
file.write('}')
file.close()
def writeLdmarkFile(fileName, ldmark):
file = open(fileName,'w')
file.write('version: 1\n')
file.write('n_points: 68\n')
file.write('{\n')
for i in range(68) :
file.write(str(ldmark[i])+' '+str(ldmark[i+68])+'\n')
file.write('}')
file.close()
return
def test():
import numpy as np
#z = np.array(((1,1),(-1,1),(-1,-1),(1,-1),(1,1)))
v = z[:,0]
a = z[:,1]
vc = (v>0).astype(int)
ac = (a>0).astype(int)
q0 = (vc+ac>1).astype(int)*1
vc = (v<0).astype(int)
ac = (a>0).astype(int)
q1 = (vc+ac>1).astype(int)*2
vc = (v<0).astype(int)
ac = (a<0).astype(int)
q2 = (vc+ac>1).astype(int)*3
vc = (v>0).astype(int)
ac = (a<0).astype(int)
q3 = (vc+ac>1).astype(int)*4
qtotal = (q0+q1+q2+q3)-1
print(q0,q1,q2,q3)
print(qtotal)
def toQuadrant(z):
v = z[:,0]
a = z[:,1]
vc = (v>0).astype(int)
ac = (a>0).astype(int)
q0 = (vc+ac>1).astype(int)*1
vc = (v<0).astype(int)
ac = (a>0).astype(int)
q1 = (vc+ac>1).astype(int)*2
vc = (v<0).astype(int)
ac = (a<0).astype(int)
q2 = (vc+ac>1).astype(int)*3
vc = (v>0).astype(int)
ac = (a<0).astype(int)
q3 = (vc+ac>1).astype(int)*4
qtotal = (q0+q1+q2+q3)-1
#print(q0,q1,q2,q3)
#print(qtotal)
return(qtotal)
#test()
| 78,260
| 31.676827
| 204
|
py
|
Seq-Att-Affect
|
Seq-Att-Affect-master/model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from operator import truediv
class Combiner(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3):
super(Combiner, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
'''curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(inputC / np.power(2, repeat_num))
print('kernelsize : ',kernel_size)
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(46, 112)'''
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(inputC / np.power(2, repeat_num))
self.l1 = nn.Linear(curr_dim, c_dim)
#self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
#self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
################################################################
def forward(self, x, s = None, z = None ):
'''debug = False
if s is not None :
s2 = self.tanh(self.linear1(s))
#print(s2)
#print(s2.shape)
s2 = torch.unsqueeze(torch.unsqueeze(s2,1),2)
#print(s2.shape,s2.size(0))
#s2 = s2.repeat(1, 1, x.size(2), x.size(3))
s2 = s2.expand(s2.size(0),1,112,112)
#print(s2.shape,x.shape)
#print(x)
x = torch.cat([x, s2], dim=1)
x1 = self.lrelu(self.conv1(x))
#print('x1s',x1.shape)
if z is not None :
#print('combining')
x21 = self.lrelu(self.conv21(x1))+z
else :
x21 = self.lrelu(self.conv21(x1))
h = self.lrelu(self.conv22(x21))
#h = self.lrelu(self.conv23(x22))
if debug :
print('D-x',x.shape)
print('D-x0',x1.shape)
print('x1',x1.shape)
print('x21',x21.shape)
#print('x22',x22.shape)
print('xh',h.shape)
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))'''
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
'''print(x24.shape)
h = self.lrelu(self.conv25(x24))'''
out = self.l1(x25)
return out
#h = self.main(x)
'''out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))'''
class CombinerSeq(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3,lstmNeuron = 512,seq_length = 2,batch_length = 10):
super(CombinerSeq, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = c_dim
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.lstmNeuron = lstmNeuron
self.l1 = nn.LSTM(curr_dim, self.lstmNeuron)
self.l2 = nn.Linear(self.lstmNeuron, self.dim_out)
self.initialize(self.btl)
################################################################
def forward(self, x, s = None, z = None ):
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
x25 = torch.unsqueeze(x25,0)
x3,self.linear1_hdn = self.l1(x25,self.linear1_hdn)
out = self.l2(torch.squeeze(x3,0))
return out;
def initialize(self,batch_size = 10):
self.linear1_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
class CombinerSeqAttReplace(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3,
lstmNeuron = 512,seq_length = 2,batch_length = 10, useCH=0):
super(CombinerSeqAttReplace, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = c_dim
self.useCH = useCH
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.lstmNeuron = lstmNeuron
if self.useCH :
self.attn = nn.Linear(2*(self.lstmNeuron + self.lstmNeuron), 1)
else :
self.attn = nn.Linear(self.lstmNeuron + self.lstmNeuron, 1)
self.lstm1 = nn.LSTM(curr_dim, self.lstmNeuron)
self.l2 = nn.Linear(self.lstmNeuron, self.dim_out)
self.initialize(self.btl)
################################################################
def forward(self, x, s = None, z = None, prev_h = None, ret_w=False ):
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
#x25 = torch.unsqueeze(x25,0)
input_lstm = torch.unsqueeze(x25,0)
normalized_weights = []
#Now add the attention if required
if prev_h is not None :
prev_h = torch.stack(prev_h)
if debug :
print('prevh',prev_h.shape)
weights = []
for i in range(len(prev_h)):
if debug :
print(self.lstm1_hdn[0][0].shape)
print(prev_h[0].shape)
if self.useCH :
curHidden = torch.cat((self.lstm1_hdn[0][0],self.lstm1_hdn[1][0]),1)
else :
curHidden = self.lstm1_hdn[0][0]
weights.append(self.attn(torch.cat((curHidden,
prev_h[i]), dim = 1)))
normalized_weights = F.softmax(torch.cat(weights, 1), 1)
if debug :
print(normalized_weights.shape)
if self.useCH :
attn_applied = torch.bmm(normalized_weights.unsqueeze(1),
prev_h.view(prev_h.shape[1], -1, self.lstmNeuron*2))
attn_applied = attn_applied.squeeze(1)
h = attn_applied[:,:self.lstmNeuron].unsqueeze(0)
c = attn_applied[:,self.lstmNeuron:].unsqueeze(1)
#print(h.shape,c.shape)
new_hidden =(h,c)
else :
attn_applied = torch.bmm(normalized_weights.unsqueeze(1),
prev_h.view(prev_h.shape[1], -1, self.lstmNeuron))
#print(attn_applied.shape,self.lstm1_hdn[1].shape)
attn_applied = attn_applied.squeeze(1)
new_hidden = (attn_applied.unsqueeze(0),self.lstm1_hdn[1])
########################
if debug :
print('attn applied',attn_applied.shape)
print('x25',x25.shape)
if debug :
print(input_lstm.shape)
x3,self.lstm1_hdn = self.lstm1(input_lstm,self.lstm1_hdn)
out = self.l2(torch.squeeze(x3,0))
else :
if debug :
print('x25 shape : ',x25.shape)
x3,self.lstm1_hdn = self.lstm1(input_lstm,self.lstm1_hdn)
out = self.l2(torch.squeeze(x3,0))
if ret_w :
return out,normalized_weights;
else :
return out;
def initialize(self,batch_size = 10):
self.lstm1_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
class CombinerSeqAtt(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3,
lstmNeuron = 512,seq_length = 2,batch_length = 10, useCH=0):
super(CombinerSeqAtt, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = c_dim
self.useCH = useCH
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.lstmNeuron = lstmNeuron
if self.useCH :
self.attn = nn.Linear(2*(self.lstmNeuron + self.lstmNeuron), 1)
self.lstm1 = nn.LSTM(curr_dim+self.lstmNeuron+self.lstmNeuron, self.lstmNeuron)
else :
self.attn = nn.Linear(self.lstmNeuron + self.lstmNeuron, 1)
self.lstm1 = nn.LSTM(curr_dim+self.lstmNeuron, self.lstmNeuron)
self.l2 = nn.Linear(self.lstmNeuron, self.dim_out)
self.initialize(self.btl)
################################################################
def forward(self, x, s = None, z = None, prev_h = None,ret_w=False ):
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
#x25 = torch.unsqueeze(x25,0)
normalized_weights = []
#Now add the attention if required
if prev_h is not None :
prev_h = torch.stack(prev_h)
if debug :
print('prevh',prev_h.shape)
weights = []
for i in range(len(prev_h)):
if debug :
print(self.lstm1_hdn[0][0].shape)
print(prev_h[0].shape)
if self.useCH :
curHidden = torch.cat((self.lstm1_hdn[0][0],self.lstm1_hdn[1][0]),1)
else :
curHidden = self.lstm1_hdn[0][0]
weights.append(self.attn(torch.cat((curHidden,
prev_h[i]), dim = 1)))
normalized_weights = F.softmax(torch.cat(weights, 1), 1)
if debug :
print(normalized_weights.shape)
if self.useCH :
attn_applied = torch.bmm(normalized_weights.unsqueeze(1),
prev_h.view(prev_h.shape[1], -1, self.lstmNeuron*2))
else :
attn_applied = torch.bmm(normalized_weights.unsqueeze(1),
prev_h.view(prev_h.shape[1], -1, self.lstmNeuron))
########################
if debug :
print('attn applied',attn_applied.shape)
print('x25',x25.shape)
input_lstm = torch.cat((x25,attn_applied.squeeze(1)),dim=1)
if debug :
print(input_lstm.shape)
input_lstm = torch.unsqueeze(input_lstm,0)
x3,self.lstm1_hdn = self.lstm1(input_lstm,self.lstm1_hdn)
out = self.l2(torch.squeeze(x3,0))
else :
if debug :
print('x25 shape : ',x25.shape)
if self.useCH:
input_lstm = torch.cat((x25,torch.zeros(x25.shape[0],self.lstmNeuron*2).cuda()),dim=1)
else :
input_lstm = torch.cat((x25,torch.zeros(x25.shape[0],self.lstmNeuron).cuda()),dim=1)
input_lstm = torch.unsqueeze(input_lstm,0)
x3,self.lstm1_hdn = self.lstm1(input_lstm,self.lstm1_hdn)
out = self.l2(torch.squeeze(x3,0))
if ret_w:
return out,normalized_weights;
else :
return out;
def initialize(self,batch_size = 10):
self.lstm1_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
class CombinerSeqL(nn.Module):
"""Combiner based on discriminator"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3,lstmNeuron = 512,seq_length = 2,batch_length = 10):
super(CombinerSeqL, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = c_dim
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.lstmNeuron = lstmNeuron
self.ls1 = nn.LSTM(curr_dim, self.lstmNeuron)
self.ls2 = nn.LSTM(self.lstmNeuron, self.lstmNeuron)
self.l1 = nn.Linear(self.lstmNeuron, self.dim_out)
self.initialize(self.btl)
################################################################
def forward(self, x, s = None, z = None ):
debug = False
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
x25 = x24.view(x24.size(0),x24.size(1))
if debug :
print(x.shape)
print(x1.shape)
print(x21.shape)
print(x22.shape)
print(x23.shape)
print(x24.shape)
x25 = torch.unsqueeze(x25,0)
x3,self.linear1_hdn = self.ls1(x25,self.linear1_hdn)
x3,self.linear2_hdn = self.ls2(x3,self.linear2_hdn)
out = self.l1(torch.squeeze(x3,0))
return out;
def initialize(self,batch_size = 10):
self.linear1_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
self.linear2_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
'''
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(DiscriminatorM, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear41 = nn.Linear(4, c_dim)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))'''
class CombiningBottleNeckO(nn.Module):
def __init__(self, dim_in, dim_out,toCombine = False):
'''
input :
1. the z of G
2. the prev results
3. the rought estimation from D
combinations :
1. series of 2d conv
2. linear with previous
'''
super(CombiningBottleNeckO, self).__init__()
'''#from generator
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
#from discriminator
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
######################'''
self.relu = nn.ReLU(inplace=True)
self.lrelu = nn.LeakyReLU(0.01)
#8x8, 32/4 = 8 . 32x32x256
self.conv1 = nn.Conv2d(dim_in, 512, kernel_size=8, stride=4, padding=2, bias=False)
#4x4, 8/2 = 4
self.conv2 = nn.Conv2d(512, 1024, kernel_size=6, stride=2, padding=1, bias=False)
#4x4, 4/2 = 4
#3x3, 4/2 = 2~1
self.conv3 = nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1, bias=False)
self.toCombine = toCombine
if self.toCombine :
self.linear11 = nn.Linear(2048, 512)
else :
self.linear1 = nn.Linear(2048, 1024)
self.linear2 = nn.Linear(1024, dim_out)
'''self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))
self.conv1 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)'''
def forward(self, x,y = None):
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
#print(x1.shape)
x2 = self.lrelu(self.conv2(x1))
#print(x2.shape)
x3 = self.lrelu(self.conv3(x2))
#print(x3.shape)
x3 = x3.view(x3.size(0),-1)
#print(x3.shape)
#x4 = self.relu(self.linear1(x3))
if not self.toCombine :
x4 = self.lrelu(self.linear1(x3))
else :
x41 = self.lrelu(self.linear11(x3))
#x42 = torch.tensor(y)
x42 = y.view(y.size(0),1)
#print('x42 : ',x42.shape)
#1, 1, x.size(2), x.size(3)
#print(x41.size(1))
x42 = x42.repeat(1,x41.size(1)).float()
#print(x41.shape,x42.shape)
x4 = torch.cat((x41,x42),1)
x5 = self.linear2(x4)
#print(x4.shape)
return x5
#return x + self.main(x)
class CombiningBottleNeck(nn.Module):
def __init__(self, dim_in, dim_out,toCombine = False):
'''
input :
1. the z of G
2. the prev results
3. the rought estimation from D
combinations :
1. series of 2d conv
2. linear with previous
'''
super(CombiningBottleNeck, self).__init__()
'''#from generator
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
#from discriminator
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
######################'''
self.relu = nn.ReLU(inplace=True)
self.lrelu = nn.LeakyReLU(0.01)
self.selu = nn.SELU()
#8x8, 32/4 = 8 . 32x32x256
self.conv1 = nn.Conv2d(dim_in, 512, kernel_size=8, stride=4, padding=2, bias=False)
#4x4, 8/2 = 4
self.conv2 = nn.Conv2d(512, 1024, kernel_size=6, stride=2, padding=1, bias=False)
#4x4, 4/2 = 4
#3x3, 4/2 = 2~1
self.conv3 = nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1, bias=False)
self.toCombine = toCombine
if self.toCombine :
self.linear11 = nn.Linear(2048, 512)
else :
self.linear1 = nn.Linear(2048, 1024)
self.linear2 = nn.Linear(1024, dim_out)
'''self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))
self.conv1 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)'''
def forward(self, x,y = None):
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
#print(x1.shape)
x2 = self.lrelu(self.conv2(x1))
#print(x2.shape)
x3 = self.lrelu(self.conv3(x2))
#print(x3.shape)
x3 = x3.view(x3.size(0),-1)
#print(x3.shape)
#x4 = self.relu(self.linear1(x3))
if not self.toCombine :
x4 = self.lrelu(self.linear1(x3))
else :
x41 = self.lrelu(self.linear11(x3))
#x42 = torch.tensor(y)
x42 = y.view(y.size(0),1)
#print('x42 : ',x42.shape)
#1, 1, x.size(2), x.size(3)
#print(x41.size(1))
x42 = x42.repeat(1,x41.size(1)).float()
#print(x41.shape,x42.shape)
x4 = torch.cat((x41,x42),1)
x5 = self.linear2(x4)
#print(x4.shape)
return x5
#return x + self.main(x)
class CombiningBottleNeckSeq(nn.Module):
def __init__(self, dim_in, dim_out,toCombine = False,batch_length = 10, seq_length = 2, withPrev = False,reduced = False,
lstmNeuron = 512):
'''
input :
1. the z of G
2. the prev results
3. the rought estimation from D
combinations :
1. series of 2d conv
2. linear with previous
'''
super(CombiningBottleNeckSeq, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.lrelu = nn.LeakyReLU(0.01)
self.btl = batch_length
self.sql = seq_length
self.dim_out = dim_out
self.lstmNeuron = lstmNeuron
#8x8, 32/4 = 8 . 32x32x256
self.conv1 = nn.Conv2d(dim_in, 512, kernel_size=8, stride=4, padding=2, bias=False)
#4x4, 8/2 = 4
self.conv2 = nn.Conv2d(512, 1024, kernel_size=6, stride=2, padding=1, bias=False)
#4x4, 4/2 = 4
#3x3, 4/2 = 2~1
self.conv3 = nn.Conv2d(1024, 2048, kernel_size=4, stride=2, padding=1, bias=False)
self.toCombine = toCombine
if self.toCombine :
self.linear11 = nn.Linear(2048, 512)
else :
self.linear1 = nn.Linear(2048, 1024)
self.withPrev = withPrev
if self.withPrev : #this is to use the prev result
self.linear2p = nn.Linear(1024, 512)
self.linear2 = nn.LSTM(1024, self.lstmNeuron)
self.linear3 = nn.Linear(self.lstmNeuron, int(truediv(self.lstmNeuron,2)))
self.linear4 = nn.Linear(int(truediv(self.lstmNeuron,2)), dim_out)
self.initialize(self.btl)
def forward(self, x,y = None, y_prev = None):
#print(x.shape)
x1 = self.lrelu(self.conv1(x))
#print(x1.shape)
x2 = self.lrelu(self.conv2(x1))
#print(x2.shape)
x3 = self.lrelu(self.conv3(x2))
#print(x3.shape)
x3 = x3.view(x3.size(0),-1)
#print(x3.shape)
#x4 = self.relu(self.linear1(x3))
if not self.toCombine :
x4 = self.relu(self.linear1(x3))
else :
x41 = self.relu(self.linear11(x3))
#x42 = torch.tensor(y)
x42 = y.view(y.size(0),1)
#print('x42 : ',x42.shape)
#1, 1, x.size(2), x.size(3)
#print(x41.size(1))
x42 = x42.repeat(1,x41.size(1)).float()
#print(x41.shape,x42.shape)
x4 = torch.cat((x41,x42),1)
if self.withPrev: # y_prev is not None :
x4 = self.relu(self.linear2p(x4))
x43 = y_prev.view(y_prev.size(0),1)
x43 = x43.repeat(1,x4.size(1)).float()
#print(x43.shape,x4.shape,'shape')
x4 = torch.cat((x43,x4),1)
#The input dimensions are (seq_len, batch, input_size).
#print(x4.shape,self.linear2_hdn[0].shape,self.linear2_hdn[1].shape)
x4 = torch.unsqueeze(x4,0)
#print('x4 shape ',x4.shape,self.linear2_hdn[0].shape,self.linear2_hdn[1].shape)
x4,self.linear2_hdn = self.linear2(x4,self.linear2_hdn)
x5 = self.lrelu(self.linear3(torch.squeeze(x4,0)))
x6 = self.linear4(x5)
#print(x4.shape)
return x6
#return x + self.main(x)
def initialize(self,batch_size = 10):
#self.linear2Hidden = (torch.randn(1, 1, 3),
#torch.randn(1, 1, 3))
self.linear2_hdn = (torch.zeros(1, batch_size, self.lstmNeuron).cuda(),torch.zeros(1, batch_size, self.lstmNeuron).cuda())
class ResidualBlock(nn.Module):
"""Residual Block with instance normalization."""
def __init__(self, dim_in, dim_out, use_skip = True):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True, track_running_stats=True))
self.use_skip = use_skip
def forward(self, x):
if self.use_skip :
return x + self.main(x)
else :
return self.main(x)
class GeneratorM(nn.Module):
"""Generator network."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, use_skip = True, compressLatent = False):
super(GeneratorM, self).__init__()
self.compressLatent = compressLatent
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
# Down-sampling layers.
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i21 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i22 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
#Bottleneck layers
self.conv31 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
self.conv32 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
#self.conv34 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = use_skip)
#Latent space if required
if self.compressLatent :
self.linear331 = nn.Linear(262144, 512)
self.linear332 = nn.Linear(512, 262144)
else :
self.conv33 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = use_skip)
self.conv34 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
self.conv35 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
#self.conv36 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True)
#Upsampling layers
self.conv41 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i41 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
self.conv42 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i42 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
#Last Layer
self.conv51 = nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False)
self.tanh = nn.Tanh()
##################################################################################
'''
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
# Down-sampling layers.
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck layers.
for i in range(repeat_num/2):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True))
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = False))
for i in range(repeat_num/2):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True))
# Up-sampling layers.
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
'''
def forward(self, x, c = None,returnInter = False):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
if c is not None :
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
x1 = self.relu(self.i1(self.conv1(x)))
x21 = self.relu(self.i21(self.conv21(x1)))
x22 = self.relu(self.i22(self.conv22(x21)))
x31 = self.conv31(x22)
x32 = self.conv32(x31)
if self.compressLatent :
z = self.relu(self.linear331(x32.view(x32.size(0), -1)))
x33 = self.relu(self.linear332(z)).view(z.size(0), 256,32,32)
#print(' z shape : ',z.shape)
else :
x33 = self.conv33(x32)
x34 = self.conv34(x33)
x35 = self.conv35(x34)
#x37 = self.conv37(x36)
x41 = self.relu(self.i41(self.conv41(x35)))
x42 = self.relu(self.i42(self.conv42(x41)))
x51 = self.tanh(self.conv51(x42))
#return self.main(x)
if returnInter :
return x51, x33
else :
return x51
class DiscriminatorM112(nn.Module):
"""Discriminator network with PatchGAN."""
def __init__(self, image_size=112, conv_dim=64, c_dim=5, repeat_num=6):
super(DiscriminatorM112, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear41 = nn.Linear(4, c_dim)
################################################################
def forward(self, x,printH = False):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
if printH and False :
print('tmp : ',tmp[:2])
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class GeneratorMZ(nn.Module):
"""Generator network with internal Z, reduced."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, use_skip = True, compressLatent = False):
super(GeneratorMZ, self).__init__()
self.compressLatent = compressLatent
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
# Down-sampling layers.
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i21 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i22 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
#Bottleneck layers
self.conv3 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = use_skip)
#Upsampling layers
self.conv41 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i41 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
self.conv42 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i42 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
#Last Layer
self.conv51 = nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False)
self.tanh = nn.Tanh()
def forward(self, x, c = None,returnInter = False):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
if c is not None :
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
debug = False
x1 = self.relu(self.i1(self.conv1(x)))
x21 = self.relu(self.i21(self.conv21(x1)))
x22 = self.relu(self.i22(self.conv22(x21)))
'''x31 = self.conv31(x22)
x32 = self.conv32(x31) #2d latent
x33 = self.conv33(x32)'''
x3 = self.conv3(x22) #2d latent
x41 = self.relu(self.i41(self.conv41(x3)))
x42 = self.relu(self.i42(self.conv42(x41)))
x51 = self.tanh(self.conv51(x42))
if debug :
print('G-x0',x.shape)
print('x1',x1.shape)
print('x21',x21.shape)
print('x22',x22.shape)
'''print('x31',x31.shape)
print('x32',x32.shape)
print('x33',x33.shape)'''
#print('x31',x31.shape)
print('x3',x3.shape)
#print('x33',x33.shape)
print('x41',x41.shape)
print('x42',x42.shape)
print('x51',x51.shape)
#return self.main(x)
if returnInter :
#return x51, x33
return x51, x3
else :
return x51
class DiscriminatorMZ(nn.Module):
"""Discriminator network with with external z """
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6, inputC = 3):
super(DiscriminatorMZ, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=2)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(46, 112)
################################################################
def forward(self, x, s = None, z = None ):
debug = False
if s is not None :
s2 = self.tanh(self.linear1(s))
#print(s2)
#print(s2.shape)
s2 = torch.unsqueeze(torch.unsqueeze(s2,1),2)
#print(s2.shape,s2.size(0))
#s2 = s2.repeat(1, 1, x.size(2), x.size(3))
s2 = s2.expand(s2.size(0),1,112,112)
#print(s2.shape,x.shape)
#print(x)
x = torch.cat([x, s2], dim=1)
x1 = self.lrelu(self.conv1(x))
if z is not None :
#print('combining')
x21 = self.lrelu(self.conv21(x1))+z
else :
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
if debug :
print('D-x0',x1.shape)
print('x1',x1.shape)
print('x21',x21.shape)
print('x22',x22.shape)
print('x23',x23.shape)
print('x24',x24.shape)
print('xh',h.shape)
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class DiscriminatorM(nn.Module):
"""Discriminator network with PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(DiscriminatorM, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear41 = nn.Linear(4, c_dim)
################################################################
'''layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(4, c_dim)'''
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
'''def forward(self, x,useTanh = True,printH = False):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
if printH and False :
print('tmp : ',tmp[:2])
if useTanh :
out_cls = self.tanh(tmp)
else :
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))'''
class DiscriminatorMST(nn.Module):
"""Discriminator network -single task """
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6, asDiscriminator = False):
super(DiscriminatorMST, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.asD = asDiscriminator
self.conv1 = nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv24 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv25 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
#self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=kernel_size, bias=False)
self.conv32 = nn.Conv2d(curr_dim, 1, kernel_size=kernel_size, bias=False)
if asDiscriminator :
self.conv30 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
#self.linear41 = nn.Linear(4, c_dim)
def forward(self, x,printH = False):
x1 = self.lrelu(self.conv1(x))
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
x23 = self.lrelu(self.conv23(x22))
x24 = self.lrelu(self.conv24(x23))
h = self.lrelu(self.conv25(x24))
out_A = self.conv31(h)
out_V = self.conv32(h)
out_A = out_A.view(out_A.size(0), out_A.size(1))
out_V = out_V.view(out_V.size(0), out_V.size(1))
outs = torch.cat((out_A,out_V),1)
'''print('out a : ',out_A.shape)
print('out v : ',out_V.shape)
out_A = torch.unsqueeze(out_A,1)
out_V = torch.unsqueeze(out_V,1)'''
if self.asD :
out_src = self.conv30(h)
return out_src,outs
else :
return outs #out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class GeneratorMZR(nn.Module):
"""Generator network with internal Z, heavily reduced.
128x128
"""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, use_skip = True, compressLatent = False):
super(GeneratorMZR, self).__init__()
self.compressLatent = compressLatent
self.conv1 = nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False)
self.i1 = nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True)
self.relu = nn.ReLU(inplace=True)
# Down-sampling layers.
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i21 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False)
self.i22 = nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True)
curr_dim = curr_dim * 2
#Bottleneck layers
self.conv3 = ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = use_skip)
#Upsampling layers
self.conv41 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i41 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
self.conv42 = nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False)
self.i42 = nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True)
curr_dim = curr_dim // 2
#Last Layer
self.conv51 = nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False)
self.tanh = nn.Tanh()
def forward(self, x, c = None,returnInter = False):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
if c is not None :
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
debug = False
x1 = self.relu(self.i1(self.conv1(x)))
x21 = self.relu(self.i21(self.conv21(x1)))
x22 = self.relu(self.i22(self.conv22(x21)))
'''x31 = self.conv31(x22)
x32 = self.conv32(x31) #2d latent
x33 = self.conv33(x32)'''
x3 = self.conv3(x22) #2d latent
x41 = self.relu(self.i41(self.conv41(x3)))
x42 = self.relu(self.i42(self.conv42(x41)))
x51 = self.tanh(self.conv51(x42))
if debug :
print('G-x0',x.shape)
print('x1',x1.shape)
print('x21',x21.shape)
print('x22',x22.shape)
'''print('x31',x31.shape)
print('x32',x32.shape)
print('x33',x33.shape)'''
#print('x31',x31.shape)
print('x3',x3.shape)
#print('x33',x33.shape)
print('x41',x41.shape)
print('x42',x42.shape)
print('x51',x51.shape)
#return self.main(x)
if returnInter :
#return x51, x33
return x51, x3
else :
return x51
class DiscriminatorMZR(nn.Module):
"""Discriminator network with with external z and S. Greatly reduced version
128x128
"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=4, inputC = 3):
super(DiscriminatorMZR, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv23 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(46, 112)
################################################################
def forward(self, x, s = None, z = None ):
debug = True
if s is not None :
s2 = self.tanh(self.linear1(s))
#print(s2)
#print(s2.shape)
s2 = torch.unsqueeze(torch.unsqueeze(s2,1),2)
#print(s2.shape,s2.size(0))
#s2 = s2.repeat(1, 1, x.size(2), x.size(3))
s2 = s2.expand(s2.size(0),1,112,112)
#print(s2.shape,x.shape)
#print(x)
x = torch.cat([x, s2], dim=1)
x1 = self.lrelu(self.conv1(x))
if z is not None :
#print('combining')
x21 = self.lrelu(self.conv21(x1))+z
else :
x21 = self.lrelu(self.conv21(x1))
x22 = self.lrelu(self.conv22(x21))
h = self.lrelu(self.conv23(x22))
if debug :
print('D-x0',x1.shape)
print('x1',x1.shape)
print('x21',x21.shape)
print('x22',x22.shape)
print('xh',h.shape)
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class DiscriminatorMZRL(nn.Module):
"""Discriminator network with with external z and S. Greatly reduced version
128x128
"""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=3, inputC = 3):
super(DiscriminatorMZRL, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.lrelu = nn.LeakyReLU(0.01)
self.conv1 = nn.Conv2d(inputC, conv_dim, kernel_size=4, stride=2, padding=1)
curr_dim = conv_dim
self.conv21 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
self.conv22 = nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1)
curr_dim = curr_dim * 2
kernel_size = int(inputC / np.power(2, repeat_num))
print('kernelsize : ',kernel_size)
self.conv31 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv32 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(46, 112)
################################################################
def forward(self, x, s = None, z = None ):
debug = False
if s is not None :
s2 = self.tanh(self.linear1(s))
#print(s2)
#print(s2.shape)
s2 = torch.unsqueeze(torch.unsqueeze(s2,1),2)
#print(s2.shape,s2.size(0))
#s2 = s2.repeat(1, 1, x.size(2), x.size(3))
s2 = s2.expand(s2.size(0),1,112,112)
#print(s2.shape,x.shape)
#print(x)
x = torch.cat([x, s2], dim=1)
x1 = self.lrelu(self.conv1(x))
#print('x1s',x1.shape)
if z is not None :
#print('combining')
x21 = self.lrelu(self.conv21(x1))+z
else :
x21 = self.lrelu(self.conv21(x1))
h = self.lrelu(self.conv22(x21))
#h = self.lrelu(self.conv23(x22))
if debug :
print('D-x',x.shape)
print('D-x0',x1.shape)
print('x1',x1.shape)
print('x21',x21.shape)
#print('x22',x22.shape)
print('xh',h.shape)
#h = self.main(x)
out_src = self.conv31(h)
tmp = self.conv32(h)
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
class Generator(nn.Module):
"""Generator network."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, use_skip = True):
super(Generator, self).__init__()
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
# Down-sampling layers.
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
# Bottleneck layers.
for i in range(repeat_num/2):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True))
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = False))
for i in range(repeat_num/2):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim,use_skip = True))
# Up-sampling layers.
for i in range(2):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True, track_running_stats=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
def forward(self, x, c = None):
# Replicate spatially and concatenate domain information.
# Note that this type of label conditioning does not work at all if we use reflection padding in Conv2d.
# This is because instance normalization ignores the shifting (or bias) effect.
if c is not None :
c = c.view(c.size(0), c.size(1), 1, 1)
c = c.repeat(1, 1, x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
return self.main(x)
class Discriminator(nn.Module):
"""Discriminator network with PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(Discriminator, self).__init__()
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01))
curr_dim = curr_dim * 2
kernel_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=kernel_size, bias=False)
self.linear1 = nn.Linear(4, c_dim)
def forward(self, x,useTanh = True,printH = False):
h = self.main(x)
out_src = self.conv1(h)
tmp = self.conv2(h)
if printH :
print('tmp : ',tmp[:2])
if useTanh :
out_cls = self.tanh(tmp)
else :
out_cls = tmp
return out_src, out_cls.view(out_cls.size(0), out_cls.size(1))
| 67,049
| 33.795018
| 137
|
py
|
Seq-Att-Affect
|
Seq-Att-Affect-master/main_red_test.py
|
import os
import argparse
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
from model import Generator,Combiner
from model import Discriminator,DiscriminatorM,DiscriminatorMST, DiscriminatorMZ,\
DiscriminatorMZR, Combiner,CombinerSeq,CombinerSeqL,CombinerSeqAtt,CombinerSeqAttReplace, GeneratorM,DiscriminatorM
from torch.autograd import Variable
from torchvision.utils import save_image
from FacialDataset import AFEWVA,AFEWVAReduced,SEWAFEWReduced, SEWAFEWReducedLatent
from utils import *
import time
import torch.nn.functional as F
import numpy as np
import torch
import datetime
from torchvision import transforms
from torch import nn
from calcMetrix import *
from config import *
import csv
import file_walker
import matplotlib.ticker as ticker
from PIL import Image
from scipy.special import softmax
import matplotlib.gridspec as gridspec
def str2bool(v):
return v.lower() in ('true')
def train_only_comb_seq():
#64,0,1200 32,1,2000? 32,2,
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-split', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-addLoss', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-singleTask', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-trainQuadrant', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-dConv', nargs='?', const=1, type=int, default=64)#64
parser.add_argument('-batch_size', nargs='?', const=1, type=int, default=4000) #0 is ori, 1 is red
parser.add_argument('-sewa', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-useWeightNormalization', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-useAll', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-seq_length', nargs='?', const=1, type=int, default=4)#1,2,4,8,16,32
parser.add_argument('-use_attention', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-use_ch', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-use_h', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-toLoad', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-toUpgrade', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-toAddAttention', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-numIters', nargs='?', const=1, type=int, default=200000)#0,1,2
args = parser.parse_args()
split = args.split
addLoss = args.addLoss
singleTask = args.singleTask
isSewa = args.sewa
useWeight = args.useWeightNormalization
useAll = args.useAll
useAtt = args.use_attention
useCH = args.use_ch
useH = args.use_h
trainQuadrant = args.trainQuadrant
alterQuadrant = True
#curDir = "/home/deckyal/eclipse-workspace/FaceTracking/"
c_dim=2
image_size=128
d_conv_dim=args.dConv
lambda_cls=1
lambda_rec=10
lambda_gp=10
inputC = 3#input channel for discriminator
toLoad = args.toLoad
toUpgrade = args.toUpgrade
toAddAttention = args.toAddAttention
resume_iters=None #, help='resume training from this step')
num_iters=args.numIters #, help='number of total iterations for training D')
num_iters_decay=100000 #, help='number of iterations for decaying lr')
g_lr=0.0001 #, help='learning rate for G')
d_lr=0.0001 #, help='learning rate for D')
n_critic=5 #, help='number of D updates per each G update')
beta1=0.5 #, help='beta1 for Adam optimizer')
beta2=0.999 #, help='beta2 for Adam optimizer')
isVideo = True
toAlign = False
seq_length = args.seq_length
batch_size=int(truediv(args.batch_size,seq_length))#500, help='mini-batch size')
# Test configuration.
test_iters=200000 #, help='test model from this step')
# Miscellaneous.
num_workers=1
mode='train' #, choices=['train', 'test'])
use_tensorboard=False
log_dir='stargan/logs'
model_save_dir='stargan/models'
sample_dir='stargan/samples'
result_dir='stargan/results'
# Step size.
log_step=10
sample_step=1000
model_save_step=10000
lr_update_step=100
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# For fast training.
cudnn.benchmark = True
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
#Split
#split = 0
multi_gpu = False
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit :
listSplit.append(i)
print(listSplit)
if not isSewa :
main_name = 'AF-C-'
d_name = 'AFEW-VA-Fixed'#'AFEW-VA-Fixed'
dbType = 0
else :
main_name = 'SE-C-'
d_name = 'SEWA'
dbType = 1
if useH :
main_name += 'R-'
if useCH :
main_name += 'CH-'
load_to_add = main_name
if useAtt :
main_name += 'A-'
load_to_add_split = main_name
mseLoss = nn.MSELoss()
main_name+=(str(d_conv_dim)+'-')
load_to_add+=(str(d_conv_dim)+'-')
load_to_add_split+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
main_name+="-QDAL"
c_dim = 1
else :
main_name+="-QD"
c_dim = 4
save_name = main_name+str(testSplit)+'-n-'+str(seq_length)
print('saving name is : ',save_name)
load_to_add_split = load_to_add_split+str(testSplit)+'-n-'+str(seq_length)
load_to_add = load_to_add+str(testSplit)+'-n-'+str(seq_length)
load_prev = main_name+str(testSplit)+'-n-'+str(int(truediv(seq_length,2)))
err_file = curDir+save_name+".txt"
transform =transforms.Compose([
transforms.Resize((image_size,image_size)),
#transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
ID = SEWAFEWReducedLatent([d_name], None, image_size, 1,split=True, nSplit = nSplit ,listSplit=listSplit
,isVideo=isVideo, seqLength = seq_length,dbType = dbType, returnQuadrant=trainQuadrant,
returnWeight = useWeight,useAll = useAll, splitNumber=testSplit)
dataloader = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = True,worker_init_fn=worker_init_fn)
VD = SEWAFEWReducedLatent([d_name], None, image_size, 1,split=True, nSplit = nSplit,listSplit=[testSplit]
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant,dbType = dbType,useAll = useAll)
dataloaderV = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
if not useH:
model_ft = CombinerSeqAtt(image_size, d_conv_dim, c_dim, 4,64,512,seq_length,batch_size,useCH=useCH)
else :
model_ft = CombinerSeqAttReplace(image_size, d_conv_dim, c_dim, 4,64,512,seq_length,batch_size,useCH=useCH)
d_optimizer = torch.optim.Adam(model_ft.parameters(), d_lr, [beta1, beta2])
print_network(model_ft, 'D')
if toLoad:
print('loading previous model ')
model_ft.load_state_dict(torch.load(curDir+'t-models/'+save_name))
elif toUpgrade :
print('upgrading from previous model ',load_prev)
model_ft.load_state_dict(torch.load(curDir+'t-models/'+load_prev))
elif toAddAttention :
print('adding attention to original model ',load_to_add)
model_ft.load_state_dict(torch.load(curDir+'t-models/'+load_to_add))
else :
model_ft.apply(weights_init_uniform_rule)
model_ft.to(device)
d_lr = d_lr
start_iters = 0
'''if resume_iters:
start_iters = resume_iters
restore_model(resume_iters)'''
# Start training.
print('Start training...')
start_time = time.time()
f = open(err_file,'w+')
f.write("err : ")
f.close()
#best_model_wts = copy.deepcopy(model.state_dict())
lowest_loss = 99999
lMSA,lMSV,lCCV,lCCA,lICA,lICV,lCRA, lCRV, total = 9999,9999,-9999, -9999, -9999, -9999, -9999, -9999, -9999
w,wv,wa = None,None,None
print('batch size : ',batch_size)
for i in range(start_iters, num_iters):
random.seed()
manualSeed = random.randint(1, 10000) # use if you want new results
random.seed(manualSeed)
torch.manual_seed(manualSeed)
print('Epoch {}/{}'.format(i, num_iters - 1))
print('-'*10)
running_loss = 0
model_ft.train()
for x,(data) in enumerate(dataloader,0) :
rinputs_l, rlabels_l,rldmrk_l,_ = data[0],data[1],data[2],data[3]
if useWeight :
w = data[5].cuda()
ccPred_l = []
model_ft.initialize(batch_size = rinputs_l.size(0)) #initialize for each seq
prev_result = None
d_optimizer.zero_grad()
cumLoss = 0
if useAtt :
l_h = []
#print('shape of inputs',rinputs_l.shape)
for y in range(seq_length):
rinputs, rlabels = rinputs_l[:,y].cuda(),rlabels_l[:,y].cuda()
if useAtt :
if len(l_h) > 0:
outputs = model_ft(rinputs,prev_h = l_h)
else :
outputs = model_ft(rinputs)
if useCH :
l_h.append(torch.cat((model_ft.lstm1_hdn[0][0],model_ft.lstm1_hdn[1][0]),1))
else :
l_h.append(model_ft.lstm1_hdn[0][0])
else :
outputs = model_ft(rinputs)
ccPred_l.append(outputs)
#loss+=mseLoss(outputs,rlabels)
loss = calcMSET(outputs,rlabels,w)
cumLoss+=loss
if addLoss :
ov,oa,lv,la = outputs[:,0],outputs[:,1], rlabels[:,0], rlabels[:,1]
corV = -calcCORT(ov, lv, wv)
corA = -calcCORT(oa, la, wa)
cccV = -calcCCCT(ov, lv, wv)
cccA = -calcCCCT(oa, la, wa)
iccV = -calcICCT(ov, lv, wv)
iccA = -calcICCT(oa, la, wa)
#<lossO =corV+corA +cccV+cccA+iccV+iccA
lossO = cccV+cccA+iccV+iccA
if not addLoss :
print("{}/{} loss : {}".format(x,int(len(dataloader.dataset)/batch_size),loss.item()))
else :
print("{}/{} loss : {:.8f}, cor : {:.8f}/{:.8f}, ccc : {:.8f}/{:.8f}, icc : {:.8f}/{:.8f}".format(x,int(len(dataloader.dataset)/batch_size),
loss.item(),corV.item(),corA.item(),cccV.item(),cccA.item(),iccV.item(),iccA.item()))
f = open(err_file,'a')
if not addLoss :
f.write("{}/{} loss : {}\n".format(x,int(len(dataloader.dataset)/batch_size),loss.item()))
else :
f.write("{}/{} loss : {:.3f}, cor : {:.3f}/{:.3f}, ccc : {:.3f}/{:.3f}, icc : {:.3f}/{:.3f}\n".format(x,int(len(dataloader.dataset)/batch_size),
loss.item(),corV.item(),corA.item(),cccV.item(),cccA.item(),iccV.item(),iccA.item()))
f.close()
if addLoss :
cumLoss += lossO
cumLoss.backward()
d_optimizer.step()
# Decay learning rates.
if (i+1) % lr_update_step == 0 and (i+1) > 50 : #(num_iters - num_iters_decay):
d_lr -= (d_lr / float(num_iters_decay))
update_lr(d_lr,d_optimizer)
print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
if i %2 == 0 :
if multi_gpu :
torch.save(model_ft.module.state_dict(),curDir+'t-models/'+save_name)
else :
torch.save(model_ft.state_dict(),curDir+'t-models/'+save_name)
#Deep copy the model_ft
if i%5 == 0 :#epoch_loss < lowest_loss :
lowest_loss = lowest_loss
model_ft.eval()
if True :
listValO = []
listAroO = []
listValL = []
listAroL = []
tvo = [];tao=[];tvl = []; tal = [];
anyDiffer = False
for x,(data) in enumerate(dataloaderV,0) :
rinputs_l, rlabels_l,rldmrk_l = data[0],data[1],data[2]
model_ft.initialize(rinputs_l.shape[0])
if useAtt :
l_h = []
with torch.set_grad_enabled(False) :
pre_result = None
for y in range(seq_length):
rinputs, rlabels, rldmrk = rinputs_l[:,y], rlabels_l[:,y],rldmrk_l[:,y]
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
'''if useAtt :
outputs,the_w = model_ft(inputs,ret_w=True)
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
print('w',the_w[:2])
else :
outputs = model_ft(inputs)
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
'''
if useAtt :
if len(l_h) > 0:
outputs,the_w = model_ft(inputs,prev_h = l_h,ret_w=True)
print('w',the_w[:2])
else :
outputs = model_ft(inputs)
if useCH :
l_h.append(torch.cat((model_ft.lstm1_hdn[0][0],model_ft.lstm1_hdn[1][0]),1))
else :
l_h.append(model_ft.lstm1_hdn[0][0])
else :
outputs = model_ft(inputs)
#print('o shape',outputs.shape)
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
if outputs[:,0].shape[0] != batch_size : #in case the batch size is differ, usually at end of iter
anyDiffer = True
print('differ')
tvo.append(outputs[:,0].detach().cpu())
tao.append(outputs[:,1].detach().cpu())
tvl.append(labels[:,0].detach().cpu())
tal.append(labels[:,1].detach().cpu())
else :
print('equal')
listValO.append(outputs[:,0].detach().cpu())
listAroO.append(outputs[:,1].detach().cpu())
listValL.append(labels[:,0].detach().cpu())
listAroL.append(labels[:,1].detach().cpu())
if len(listValO) > 0 :
est_V = np.asarray(torch.stack(listValO)).flatten()
est_A = np.asarray(torch.stack(listAroO)).flatten()
gt_V = np.asarray(torch.stack(listValL)).flatten()
gt_A = np.asarray(torch.stack(listAroL)).flatten()
if anyDiffer :
est_Vt = np.asarray(torch.stack(tvo)).flatten()
est_At = np.asarray(torch.stack(tao)).flatten()
gt_Vt = np.asarray(torch.stack(tvl)).flatten()
gt_At = np.asarray(torch.stack(tal)).flatten()
#python main_red_test.py -useAll=1 -batch_size=6000 -seq_length=4 -use_attention=1
#now concatenate
if len(listValO) > 0 :
est_V = np.concatenate((est_V,est_Vt))
est_A = np.concatenate((est_A,est_At))
gt_V = np.concatenate((gt_V,gt_Vt))
gt_A = np.concatenate((gt_A,gt_At))
else :
est_V,est_A,gt_V,gt_A = est_Vt,est_At,gt_Vt,gt_At
print(est_V.shape, gt_V.shape)
mseV = calcMSE(est_V, gt_V)
mseA = calcMSE(est_A, gt_A)
corV = calcCOR(est_V, gt_V)
corA = calcCOR(est_A, gt_A)
iccV = calcICC(est_V, gt_V)
iccA = calcICC(est_A, gt_A)
iccV2 = calcICC(gt_V, gt_V)
iccA2 = calcICC(gt_A, gt_A)
cccV = calcCCC(est_V, gt_V)
cccA = calcCCC(est_A, gt_A)
cccV2 = calcCCC(gt_V, gt_V)
cccA2 = calcCCC(gt_A, gt_A)
if lMSA > mseA :
lMSA = mseA
if lMSV > mseV :
lMSV = mseV
if corA > lCRA :
lCRA = corA
if corV > lCRV :
lCRV = corV
if cccA > lCCA :
lCCA = cccA
if cccV > lCCV :
lCCV = cccV
if iccA > lICA :
lICA = iccA
if iccV > lICV :
lICV = iccV
if (corA+corV+cccA+cccV+iccA+iccV) > total :
total = (corA+corV+cccA+cccV+iccA+iccV)
if multi_gpu :
torch.save(model_ft.module.state_dict(),curDir+'t-models/'+save_name+'-best')
else :
torch.save(model_ft.state_dict(),curDir+'t-models/'+save_name+'-best')
print('Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total))
print('MSEV : ',mseV, ', CORV : ',corV,', CCCV : ',cccV,', CCCV2 : ',cccV2,', ICCV : ',iccV,', ICCV2 : ',iccV2)
print('MSEA : ',mseA, ', CORA : ',corA,', CCCA : ',cccA,', CCCA2 : ',cccA2,', ICCA : ',iccA,', ICCA2 : ',iccA2)
f = open(err_file,'a')
res = 'MSEV : '+str(mseV)+ ', CORV : ' +str(corV)+', CCCV : '+str(cccV) +', ICCV : '+str(iccV)+' \n '
f.write(res)
res = 'MSEA : '+str(mseA)+ ', CORA : '+str(corA) +', CCCA : '+str(cccA) +', ICCA : '+str(iccA)+' \n '
f.write(res)
res = 'Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total)+' \n '
f.write(res)
f.close()
print('Best val Acc: {:4f}'.format(lowest_loss))
return
def test_only_comb_seq():
#64,0,1200 32,1,2000? 32,2,
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-split', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-addLoss', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-singleTask', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-trainQuadrant', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-dConv', nargs='?', const=1, type=int, default=64)#64
parser.add_argument('-batch_size', nargs='?', const=1, type=int, default=6000) #0 is ori, 1 is red
parser.add_argument('-sewa', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-useWeightNormalization', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-useAll', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-seq_length', nargs='?', const=1, type=int, default=4)#1,2,4,8,16,32
parser.add_argument('-use_attention', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-use_ch', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-use_h', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-toLoad', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-toUpgrade', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-toAddAttention', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-per', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-numIters', nargs='?', const=1, type=int, default=200000)#0,1,2
args = parser.parse_args()
split = args.split
addLoss = args.addLoss
singleTask = args.singleTask
isSewa = args.sewa
useWeight = args.useWeightNormalization
useAll = args.useAll
useAtt = args.use_attention
useCH = args.use_ch
useH = args.use_h
trainQuadrant = args.trainQuadrant
alterQuadrant = True
per = args.per
list_seq = [2,4,8,16,32]#[1]#[0,2,4,8,16,32]
list_split = range(5)
listRes = []
c_dim=2
image_size=128
d_conv_dim=args.dConv
inputC = 3#input channel for discriminator
isVideo = True
toAlign = False
toLoad = args.toLoad
toUpgrade = args.toUpgrade
toAddAttention = args.toAddAttention
num_workers=1
model_save_dir='stargan/models'
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
toRecordRes = True #use to get the metrics on the model's fodler.
toSave = False #use tosave to save the results to external folder
dirTarget = "/media/deckyal/INT-450GB/extracted"
for seq_length in (list_seq) :
root_dir = '/home/deckyal/Desktop/all-models/'
sp_dir = '0-CH-SeqAfew-att-plus5'
sq_dir = '/'+str(seq_length)+"/"
theDir = root_dir+sp_dir+sq_dir
resDir = theDir+'result/'
checkDirMake(resDir)
#seq_length = args.seq_length
batch_size=int(truediv(args.batch_size,seq_length))#500, help='mini-batch size')
evaluateSplit = True
listRes = []
if evaluateSplit :
for split in list_split :
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit :
listSplit.append(i)
print(listSplit)
if not isSewa :
main_name = 'AF-C-'
d_name = 'AFEW-VA-Fixed'#'AFEW-VA-Fixed'
dbType = 0
else :
main_name = 'SE-C-'
d_name = 'SEWA'
dbType = 1
if useH :
main_name += 'R-'
if useCH :
main_name += 'CH-'
load_to_add = main_name
if useAtt :
main_name += 'A-'
load_to_add_split = main_name
mseLoss = nn.MSELoss()
main_name+=(str(d_conv_dim)+'-')
load_to_add+=(str(d_conv_dim)+'-')
load_to_add_split+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
main_name+="-QDAL"
c_dim = 1
else :
main_name+="-QD"
c_dim = 4
save_name = main_name+str(testSplit)+'-n-'+str(seq_length)
save_name_all = main_name+'all-'+str(seq_length)
print('saving name is : ',save_name)
VD = SEWAFEWReducedLatent([d_name], None, image_size, 1,split=True, nSplit = nSplit,listSplit=[testSplit]
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant,dbType = dbType,useAll = useAll,returnFName = toSave)
dataloaderV = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
if not useH:
model_ft = CombinerSeqAtt(image_size, d_conv_dim, c_dim, 4,64,512,seq_length,batch_size,useCH=useCH)
else :
model_ft = CombinerSeqAttReplace(image_size, d_conv_dim, c_dim, 4,64,512,seq_length,batch_size,useCH=useCH)
if toLoad:
print('loading previous model ')
model_ft.load_state_dict(torch.load(theDir+save_name))
model_ft.to(device)
model_ft.eval()
listValO = []
listAroO = []
listValL = []
listAroL = []
tvo = [];tao=[];tvl = []; tal = [];
anyDiffer = False
print('not eval')
#model_ft.eval()
for x,(data) in enumerate(dataloaderV,0) :
rinputs_l, rlabels_l,rldmrk_l = data[0],data[1],data[2]
if toSave :
fname_l = data[-1]
model_ft.initialize(rinputs_l.shape[0])
if useAtt :
l_h = []
the_w = None
with torch.set_grad_enabled(False) :
pre_result = None
for y in range(seq_length):
rinputs, rlabels, rldmrk = rinputs_l[:,y], rlabels_l[:,y],rldmrk_l[:,y]
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
if useAtt :
if len(l_h) > 0:
outputs,the_w = model_ft(inputs,prev_h = l_h,ret_w=True)
print('w',the_w[:2])
else :
outputs = model_ft(inputs)
if useCH :
l_h.append(torch.cat((model_ft.lstm1_hdn[0][0],model_ft.lstm1_hdn[1][0]),1))
else :
l_h.append(model_ft.lstm1_hdn[0][0])
else :
outputs = model_ft(inputs)
#print('o shape',outputs.shape)
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
if outputs[:,0].shape[0] != batch_size : #in case the batch size is differ, usually at end of iter
anyDiffer = True
print('differ')
tvo.append(outputs[:,0].detach().cpu())
tao.append(outputs[:,1].detach().cpu())
tvl.append(labels[:,0].detach().cpu())
tal.append(labels[:,1].detach().cpu())
else :
print('equal')
listValO.append(outputs[:,0].detach().cpu())
listAroO.append(outputs[:,1].detach().cpu())
listValL.append(labels[:,0].detach().cpu())
listAroL.append(labels[:,1].detach().cpu())
if toSave :
if the_w is None :
the_w = labels.clone()
the_w *=0
#print(fname_l)
#exit(0)
for fn,pred,gt,tw in zip(fname_l[0],outputs.detach().cpu().numpy(),labels.detach().cpu().numpy(),the_w.detach().cpu().numpy()):
#print(fn,pred.shape,gt.shape,tw.shape)
#1st get the file name
dirName, fName = os.path.split(fn)
fName = fName.split('.')[0]
#print('fname ',fName)
print(fName,tw)
listDir = dirName.split('/')
indexName = listDir.index(d_name)
folderName = os.path.join(dirTarget,d_name,listDir[indexName+1])
folderNameImage = os.path.join(folderName,'img')
folderNameRes = os.path.join(folderName,'resPred')
folderNameW = os.path.join(folderName,'theW')
checkDirMake(folderNameImage)
checkDirMake(folderNameRes)
checkDirMake(folderNameW)
#original image path
listDir[-1] = 'img-128'
imgPath = '/'.join(listDir)
#check the image from actual gt, jpg etc. and save dummy file
l_poss = ["jpg","jpeg",'png']
imgName = None
intr = 0
imgName = imgPath+'/'+fName+"."+l_poss[intr]
while (not os.path.isfile(imgName)):
#print('checking ',imgName)
intr+=1
imgName = imgPath+'/'+fName+"."+l_poss[intr]
f = open(folderNameImage+'/'+fName+".txt",'w')
f.write(imgName)
f.close()
#print('saved ',imgName,' to', folderNameImage+'/'+fName+".txt")
#now save the pred,gt in npz
np.savez(folderNameRes+'/'+fName+".npz",pred=pred,lbl=gt)
#now save the tw in separate npz
np.save(folderNameW+'/'+fName+".npy",the_w)
#exit(0)
if len(listValO) > 0 :
est_V = np.asarray(torch.stack(listValO)).flatten()
est_A = np.asarray(torch.stack(listAroO)).flatten()
gt_V = np.asarray(torch.stack(listValL)).flatten()
gt_A = np.asarray(torch.stack(listAroL)).flatten()
if anyDiffer :
est_Vt = np.asarray(torch.stack(tvo)).flatten()
est_At = np.asarray(torch.stack(tao)).flatten()
gt_Vt = np.asarray(torch.stack(tvl)).flatten()
gt_At = np.asarray(torch.stack(tal)).flatten()
#now concatenate
if len(listValO) > 0 :
est_V = np.concatenate((est_V,est_Vt))
est_A = np.concatenate((est_A,est_At))
gt_V = np.concatenate((gt_V,gt_Vt))
gt_A = np.concatenate((gt_A,gt_At))
else :
est_V,est_A,gt_V,gt_A = est_Vt,est_At,gt_Vt,gt_At
print(est_V.shape, gt_V.shape)
mseV = calcMSE(est_V, gt_V)
mseA = calcMSE(est_A, gt_A)
corV = calcCOR(est_V, gt_V)
corA = calcCOR(est_A, gt_A)
iccV = calcICC(est_V, gt_V)
iccA = calcICC(est_A, gt_A)
iccV2 = calcICC(gt_V, gt_V)
iccA2 = calcICC(gt_A, gt_A)
cccV = calcCCC(est_V, gt_V)
cccA = calcCCC(est_A, gt_A)
cccV2 = calcCCC(gt_V, gt_V)
cccA2 = calcCCC(gt_A, gt_A)
#print('Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total))
print('MSEV : ',mseV, ', CORV : ',corV,', CCCV : ',cccV,', CCCV2 : ',cccV2,', ICCV : ',iccV,', ICCV2 : ',iccV2)
print('MSEA : ',mseA, ', CORA : ',corA,', CCCA : ',cccA,', CCCA2 : ',cccA2,', ICCA : ',iccA,', ICCA2 : ',iccA2)
res = np.asarray([[mseV,mseA],[corV,corA],[cccV,cccA],[iccV,iccA]])
listRes.append(res)
if toRecordRes :
np.save(resDir+save_name+".npy",res)
print('saved : ',resDir+save_name+".npy")
with open(resDir+save_name+'.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow([res[0,0],res[0,1]])
spamwriter.writerow([res[1,0],res[1,1]])
spamwriter.writerow([res[2,0],res[2,1]])
spamwriter.writerow([res[3,0],res[3,1]])
if toRecordRes :
#now compiling the reesults from 5 split
listRes = np.stack(listRes)
np.save(resDir+save_name_all+".npy",listRes)
print('saved : ',resDir+save_name_all)
if not isSewa :
main_name = 'AF-C-'
d_name = 'AFEW-VA-Fixed'#'AFEW-VA-Fixed'
dbType = 0
else :
main_name = 'SE-C-'
d_name = 'SEWA'
dbType = 1
if useH :
main_name += 'R-'
if useCH :
main_name += 'CH-'
load_to_add = main_name
if useAtt :
main_name += 'A-'
load_to_add_split = main_name
mseLoss = nn.MSELoss()
main_name+=(str(d_conv_dim)+'-')
load_to_add+=(str(d_conv_dim)+'-')
load_to_add_split+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
main_name+="-QDAL"
c_dim = 1
else :
main_name+="-QD"
c_dim = 4
save_name_all = main_name+'all-'+str(seq_length)
listRes = np.load(resDir+save_name_all+".npy")
print('loaded : ',resDir+save_name_all)
print(listRes)
l_m = []
l_cor = []
l_cc = []
l_ic = []
for tmp in range(listRes.shape[0]):
l_m.append(listRes[tmp][0,0]);l_m.append(listRes[tmp][0,1])
l_cor.append(listRes[tmp][1,0]);l_cor.append(listRes[tmp][1,1])
l_cc.append(listRes[tmp][2,0]);l_cc.append(listRes[tmp][2,1])
l_ic.append(listRes[tmp][3,0]);l_ic.append(listRes[tmp][3,1])
if toRecordRes:
with open(resDir+save_name_all+'.csv', 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(l_m)
spamwriter.writerow(l_cor)
spamwriter.writerow(l_cc)
spamwriter.writerow(l_ic)
print(np.stack(l_m))
#now opening the file to make the csv
if __name__ == '__main__':
train_only_comb_seq() #To train seq C given extracted features of G and D
test_only_comb_seq #To test the seq C
| 40,371
| 38.972277
| 237
|
py
|
Seq-Att-Affect
|
Seq-Att-Affect-master/main_gan_single_reduction.py
|
import os
import argparse
from solver import Solver
from data_loader import get_loader
from torch.backends import cudnn
from model import Generator, Discriminator, GeneratorM, GeneratorMZ, GeneratorMZR, DiscriminatorM,
DiscriminatorMST,DiscriminatorMZ,DiscriminatorMZR,DiscriminatorMZRL,CombinerSeqAtt
from torch.autograd import Variable
from torchvision.utils import save_image
from FacialDataset import AFEWVA, AFEWVAReduced,SEWAFEWReduced
from utils import *
import time
import torch.nn.functional as F
import numpy as np
import torch
import datetime
from torchvision import transforms
from torch import nn
from calcMetrix import *
from config import *
import argparse
import shutil
parser = argparse.ArgumentParser()
parser.add_argument('-split', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-sewa', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-semaine', nargs='?', const=1, type=int, default=1)#0,1,2
parser.add_argument('-gConv', nargs='?', const=1, type=int, default=16)#64
parser.add_argument('-dConv', nargs='?', const=1, type=int, default=16)#64
parser.add_argument('-nSel', nargs='?', const=1, type=int, default=0) #0 is ori, 1 is red
parser.add_argument('-batch_size', nargs='?', const=1, type=int, default=300) #0 is ori, 1 is red
parser.add_argument('-multi_gpu', nargs='?', const=1, type=int, default=0) #0 is ori, 1 is red
parser.add_argument('-resume_iters', nargs='?', const=1, type=int, default=79)#0,1,2. helpfull
parser.add_argument('-mode', nargs='?', const=1, type=int, default=0)#0 : train, 1 : extract
#may change
parser.add_argument('-tryDenoise', nargs='?', const=1, type=int, default=1)#0,1,2. Helpfull
parser.add_argument('-useWeightNormalization', nargs='?', const=0, type=int, default=1)#0,1,2. helpfull
parser.add_argument('-addLoss', nargs='?', const=1, type=int, default=1)#0,1,2. helpfull
#dont change
parser.add_argument('-singleTask', nargs='?', const=1, type=int, default=0)#0,1,2. Multitask is slightly better
parser.add_argument('-trainQuadrant', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-alterQuadrant', nargs='?', const=1, type=int, default=0)#0,1,2
parser.add_argument('-useLatent', nargs='?', const=1, type=int, default=0)#0,1,2 #To use linear latent : bad
parser.add_argument('-useSkip', nargs='?', const=1, type=int, default=0)#0,1,2 #To use skip : no difference
args = parser.parse_args()
def str2bool(v):
return v.lower() in ('true')
##############################################################
def toQuadrant(inputData = None, min = -10, max = 10, toOneHot = False):
threshold = truediv(min+max,2)
vLow = False
aLow = False
q = 0
#print(min,max)
#print('the threshold : ',threshold)
if inputData[0] < threshold :
vLow = True
if inputData[1] < threshold :
aLow = True
if vLow and aLow :
q = 2
elif vLow and not aLow :
q = 1
elif not vLow and not aLow :
q = 0
else :
q = 3
if toOneHot :
rest = np.zeros(4)
rest[q]+=1
return rest
else :
return q
def train_w_gdc_adl(): #training g and d on standard l2 loss
split = args.split
isSewa = args.sewa
isSemaine = args.semaine
modelExist = True
toLoadModel = True
resume_iters=args.resume_iters#89
GName = None;#"AF0-0-16-16-Den-UA-G-429.ckpt"
DName = None;#"AF0-0-16-16-Den-UA-D-429.ckpt"
use_skip = args.useSkip
useLatent = args.useLatent
tryDenoise = args.tryDenoise
addLoss = args.addLoss
useWeight = args.useWeightNormalization
singleTask = args.singleTask
trainQuadrant = args.trainQuadrant
alterQuadrant = args.alterQuadrant
nSel = args.nSel
#curDir = "/home/deckyal/eclipse-workspace/FaceTracking/"
c_dim=2
image_size=128
g_conv_dim=args.gConv
d_conv_dim=args.dConv
lambda_cls=1
lambda_rec=10
lambda_gp=10
inputC = 3#input channel for discriminator
visEvery = 5
saveEvery = 10
# Training configuration.
dataset='CelebA' #, choices=['CelebA', 'RaFD', 'Both'])
batch_size=args.batch_size#50#40#70#20 #, help='mini-batch size')
num_iters=200000 #, help='number of total iterations for training D')
num_iters_decay=100000 #, help='number of iterations for decaying lr')
g_lr=0.0001 #, help='learning rate for G')
d_lr=0.0001 #, help='learning rate for D')
n_critic=5 #, help='number of D updates per each G update')
beta1=0.5 #, help='beta1 for Adam optimizer')
beta2=0.999 #, help='beta2 for Adam optimizer')
#selected_attrs=['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young']
#', '--list', nargs='+', help='selected attributes for the CelebA dataset',default=['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Male', 'Young'])
isVideo = False
seq_length = 2
# Test configuration.
test_iters=200000 #, help='test model from this step')
# Miscellaneous.
num_workers=1
log_dir='stargan/logs'
model_save_dir='stargan/models'
sample_dir='stargan/samples-g_adl'
result_dir='stargan/results'
# Step size.
log_step=20
sample_step=5#1000
model_save_step=10
lr_update_step=100#1000
#model_save_step=10000
#lr_update_step=1000
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
multi_gpu = args.multi_gpu
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit:
listSplit.append(i)
print(listSplit)
if isSemaine:
isSewa = 0
if not isSewa :
if not isSemaine :
d_name = 'AFEW-VA-Fixed'
additionName = "AF"+str(split)+"-"
else :
d_name = 'Sem-Short'
additionName = "SEM"+str(split)+"-"
dbType = 0
else :
d_name = 'SEWA'
dbType = 1
additionName = "SW"+str(split)+"-"
additionName+=(str(nSel)+'-')
additionName+=(str(g_conv_dim)+'-')
additionName+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
additionName+="QDAL-"
c_dim = 1
else :
additionName+="QD-"
c_dim = 4
if tryDenoise :
additionName+="Den-"
transform =transforms.Compose([
#transforms.Resize((image_size,image_size)),
#transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
#AFEW-VA-Small
ID = SEWAFEWReduced([d_name], None, True, image_size, transform, False, True, 1,split=True, nSplit = nSplit ,listSplit=listSplit
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant, returnNoisy = tryDenoise,dbType = dbType,
returnWeight = useWeight,isSemaine = isSemaine)
#ID = AFEWVA([d_name], None, True, image_size, transform, False, True, 1,split=True, nSplit = nSplit ,listSplit=listSplit
# ,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant, returnNoisy = tryDenoise,dbType = dbType,returnWeight = useWeight)
dataloader = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = True,worker_init_fn=worker_init_fn)
VD = SEWAFEWReduced([d_name], None, True, image_size, transform, False, False, 1,split=True, nSplit = nSplit,listSplit=[testSplit]
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant, returnNoisy = tryDenoise,dbType = dbType,
isSemaine = isSemaine)
#VD = AFEWVA([d_name], None, True, image_size, transform, False, False, 1,split=True, nSplit = nSplit,listSplit=[testSplit]
# ,isVideo=isVideo, seqLength = seq_length, returnNoisy = tryDenoise,dbType = dbType)
dataloaderV = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
#Build model
"""Create a generator and a discriminator."""
if nSel :
G = GeneratorMZ(g_conv_dim, 0, 1,use_skip,useLatent)
D = DiscriminatorMZR(image_size, d_conv_dim, c_dim, 4,inputC=inputC)
C = CombinerSeqAtt(image_size, d_conv_dim, c_dim, 4,64,512,1,batch_size,useCH=True)
else :
G = GeneratorM(g_conv_dim, 0, 1,use_skip,useLatent)
D = DiscriminatorM(image_size, d_conv_dim, c_dim, 6)
C = CombinerSeqAtt(image_size, d_conv_dim, c_dim, 4,64,512,1,batch_size,useCH=True)
print_network(G, 'G')
print_network(D, 'D')
if toLoadModel :
print('Loading models from iterations : ',resume_iters)
if modelExist :
additionName+='UA-'
if GName is None :
G_path = os.path.join(curDir+model_save_dir, '{}G-{}.ckpt'.format(additionName,resume_iters))
D_path = os.path.join(curDir+model_save_dir, '{}D-{}.ckpt'.format(additionName,resume_iters))
C_path = os.path.join(curDir+model_save_dir, '{}C-{}.ckpt'.format(additionName,resume_iters))
else :
G_path = os.path.join(curDir+model_save_dir, GName)
D_path = os.path.join(curDir+model_save_dir, DName)
C_path = os.path.join(curDir+model_save_dir, DName)
print('loading ',G_path)
print('loading ',D_path)
G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))
D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))
C.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))
if not modelExist:
additionName+='UA-'
else :
print('Initiating models')
G.apply(weights_init_uniform_rule)
D.apply(weights_init_uniform_rule)
save_name = additionName+str(testSplit)
err_file = curDir+save_name+".txt"
print('err file : ',err_file)
g_optimizer = torch.optim.Adam(G.parameters(), g_lr, [beta1, beta2])
d_optimizer = torch.optim.Adam(D.parameters(), d_lr, [beta1, beta2])
c_optimizer = torch.optim.Adam(C.parameters(), d_lr, [beta1, beta2])
G.to(device)
D.to(device)
C.to(device)
if multi_gpu:
G = nn.DataParallel(G)
D = nn.DataParallel(D)
# Set data loader.
data_loader = dataloader
if not trainQuadrant or (alterQuadrant):
criterion = nn.MSELoss()
else :
criterion = nn.CrossEntropyLoss() #F.cross_entropy(logit, target)
# Fetch fixed inputs for debugging.
data = next(iter(dataloader))
x_fixed, rlabels,rldmrk,_ = data[0],data[1],data[2],data[3]# x_fixed, c_org
if trainQuadrant :
if tryDenoise :
x_fixed = data[6].cuda()
x_target = data[0].cuda()
else :
if tryDenoise :
x_fixed = data[5].cuda()
x_target = data[0].cuda()
x_fixed = x_fixed.to(device)
# Learning rate cache for decaying.
d_lr = d_lr
start_iters = 0
# Start training.
print('Start training...')
start_time = time.time()
if trainQuadrant :
q1 = data[4]
f = open(err_file,'w+')
f.write("err : ")
f.close()
lowest_loss = 99999
lMSA,lMSV,lCCV,lCCA,lICA,lICV,lCRA, lCRV, total = 9999,9999,-9999, -9999, -9999, -9999, -9999, -9999, -9999
w,wv,wa = None,None,None
for i in range(start_iters, num_iters):
random.seed()
manualSeed = random.randint(1, 10000) # use if you want new results
random.seed(manualSeed)
torch.manual_seed(manualSeed)
print('Epoch {}/{}'.format(i, num_iters - 1))
print('-'*10)
running_loss = 0
G.train()
D.train()
for x,(data) in enumerate(dataloader,0) :
rinputs, rlabels,rldmrk,_ =data[0],data[1],data[2],data[3]
if trainQuadrant :
if alterQuadrant :
quadrant = data[5].float().cuda()
else :
quadrant = data[5].cuda()
if tryDenoise :
noisy = data[6].cuda()
else :
if tryDenoise :
noisy = data[5].cuda()
if useWeight :
w = data[6].cuda()
#print(w)
wv = w[:,1]
wa = w[:,0]
else :
if useWeight :
w = data[5].cuda()
#print(w)
wv = w[:,1]
wa = w[:,0]
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
# Compute loss with real images.
out_src, out_cls = D(inputs)
d_loss_real = - torch.mean(out_src)
if not trainQuadrant:
if useWeight :
d_loss_cls = calcMSET(out_cls,labels,w) #criterion(out_cls, labels)
else :
d_loss_cls = criterion(out_cls, labels) #classification_loss(out_cls, label_org, dataset)
if addLoss :
ov,oa,lv,la = out_cls[:,0],out_cls[:,1], labels[:,0], labels[:,1]
corV = -calcCORT(ov, lv, wv)
corA = -calcCORT(oa, la, wa)
cccV = -calcCCCT(ov, lv, wv)
cccA = -calcCCCT(oa, la, wa)
iccV = -calcICCT(ov, lv, wv)
iccA = -calcICCT(oa, la, wa)
d_loss_cls = d_loss_cls + corV+corA +cccV+cccA+iccV+iccA
else :
#print('q ',quadrant)
#print(out_cls.shape, quadrant.shape )
if alterQuadrant :
d_loss_cls = criterion(torch.squeeze(out_cls), quadrant)
else :
d_loss_cls = criterion(out_cls, quadrant)
if x%10 == 0 :
if not trainQuadrant:
print(x,'-',len(dataloader)," Res - label-G : ", out_cls[:3],labels[:3])
else :
if alterQuadrant :
print(x,'-',len(dataloader)," Res - label-G : ", torch.round(out_cls[:3]),quadrant[:3])
else :
print(x,'-',len(dataloader)," Res - label-G : ", torch.max(out_cls[:3],1)[1],quadrant[:3])
# Compute loss with fake images.
if tryDenoise :
theInput = noisy
else :
theInput = inputs
x_fake = G(theInput)
out_src, out_cls = D(x_fake.detach())
d_loss_fake = torch.mean(out_src)
# Compute loss for gradient penalty.
alpha = torch.rand(theInput.size(0), 1, 1, 1).to(device)
x_hat = (alpha * theInput.data + (1 - alpha) * x_fake.data).requires_grad_(True)
out_src, _ = D(x_hat)
d_loss_gp = gradient_penalty(out_src, x_hat)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake + lambda_cls * d_loss_cls + lambda_gp * d_loss_gp
#reset_grad()
g_optimizer.zero_grad()
d_optimizer.zero_grad()
d_loss.backward()
d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss_cls'] = d_loss_cls.item()
loss['D/loss_gp'] = d_loss_gp.item()
###! Actual training of the generator
if (i+1) % n_critic == 0:
# Original-to-target domain.
if tryDenoise :
z,x_fake = G(noisy,returnInter = True)
else :
z,x_fake = G(inputs)
out_src, out_cls = D(x_fake)
if x%10 == 0 :
print("Res - label-D : ", out_cls[:3],labels[:3])
g_loss_fake = - torch.mean(out_src)
if not trainQuadrant:
#g_loss_cls = criterion(out_cls, labels) #classification_loss(out_cls, label_org, dataset)
if useWeight :
g_loss_cls = calcMSET(out_cls,labels,w) #criterion(out_cls, labels)
else :
g_loss_cls = criterion(out_cls, labels) #classification_loss(out_cls, label_org, dataset)
if addLoss :
ov,oa,lv,la = out_cls[:,0],out_cls[:,1], labels[:,0], labels[:,1]
corV = -calcCORT(ov, lv, wv)
corA = -calcCORT(oa, la, wa)
cccV = -calcCCCT(lv, lv, wv)
cccA = -calcCCCT(oa, la, wa)
iccV = -calcICCT(ov, lv, wv)
iccA = -calcICCT(oa, la, wa)
g_loss_cls = g_loss_cls + corV+corA +cccV+cccA+iccV+iccA
else :
if alterQuadrant :
g_loss_cls = criterion(torch.squeeze(out_cls), quadrant)
else :
g_loss_cls = criterion(out_cls, quadrant)
if not isSewa:
q = toQuadrant(out_cls, -10, 10, False)
else :
q = toQuadrant(out_cls, 0, 1, False)
out_c = C(torch.cat((z,q),1))
if useWeight :
c_loss = calcMSET(out_cls,labels,w) #criterion(out_cls, labels)
else :
c_loss = criterion(out_cls, labels) #classification_loss(out_cls, label_org, dataset)
if addLoss :
ov,oa,lv,la = out_c[:,0],out_c[:,1], labels[:,0], labels[:,1]
corV = -calcCORT(ov, lv, wv)
corA = -calcCORT(oa, la, wa)
cccV = -calcCCCT(lv, lv, wv)
cccA = -calcCCCT(oa, la, wa)
iccV = -calcICCT(ov, lv, wv)
iccA = -calcICCT(oa, la, wa)
c_loss = c_loss + corV+corA +cccV+cccA+iccV+iccA
# Target-to-original domain.
x_reconst = G(x_fake)
g_loss_rec = torch.mean(torch.abs(inputs - x_reconst))
# Backward and optimize.
g_loss = g_loss_fake + lambda_rec * g_loss_rec + lambda_cls * g_loss_cls
#reset_grad()
g_optimizer.zero_grad()
d_optimizer.zero_grad()
c_optimizer.zero_grad()
c_loss.backward()
g_loss.backward()
g_optimizer.step()
c_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_cls'] = g_loss_cls.item()
loss['C'] = c_loss.item()
###! Getting the training metrics and samples
#running_loss += loss.item() * inputs.size(0)
#print("{}/{} loss : {}/{}".format(x,int(len(dataloader.dataset)/batch_size),lossC.item(),lossR.item()))
if (i+1) % 10 == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}], Inner {}/{} \n".format(et, i+1, num_iters,x,int(len(dataloader.dataset)/batch_size))
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log)
f = open(err_file,'a')
f.write("Elapsed [{}], Iteration [{}/{}], Inner {}/{} \n".format(et, i+1, num_iters,x,int(len(dataloader.dataset)/batch_size)))
f.write(log)
f.close()
# Translate fixed images for debugging.
if (i+1) % visEvery == 0:
with torch.no_grad():
x_fake_list = [x_fixed]
x_concat = G(x_fixed)
sample_path = os.path.join(curDir+sample_dir, '{}{}-images-denoised.jpg'.format(i+1,additionName))
save_image(denorm(x_concat.data.cpu()), sample_path, nrow=int(round(batch_size/4)), padding=0)
print('Saved real and fake denoised images into {}...'.format(sample_path))
if tryDenoise :
x_concat = x_fixed
sample_path = os.path.join(curDir+sample_dir, '{}{}-images-original.jpg'.format(i+1,additionName))
save_image(denorm(x_concat.data.cpu()), sample_path, nrow=int(round(batch_size/4)), padding=0)
print('Saved real and fake real images into {}...'.format(sample_path))
x_concat = x_target
sample_path = os.path.join(curDir+sample_dir, '{}{}-images-groundtruth.jpg'.format(i+1,additionName))
save_image(denorm(x_concat.data.cpu()), sample_path, nrow=int(round(batch_size/4)), padding=0)
print('Saved real and fake real images into {}...'.format(sample_path))
# Save model checkpoints.
if (i+1) % saveEvery == 0:
G_path = os.path.join(curDir+model_save_dir, '{}G-{}.ckpt'.format(additionName,i))
D_path = os.path.join(curDir+model_save_dir, '{}D-{}.ckpt'.format(additionName,i))
C_path = os.path.join(curDir+model_save_dir, '{}C-{}.ckpt'.format(additionName,i))
if multi_gpu :
torch.save(G.module.state_dict(), G_path)
torch.save(D.module.state_dict(), D_path)
torch.save(C.module.state_dict(), C_path)
else :
torch.save(G.state_dict(), G_path)
torch.save(D.state_dict(), D_path)
torch.save(C.state_dict(), C_path)
print('Saved model checkpoints into {}...'.format(model_save_dir))
print(G_path)
# Decay learning rates.
if (i+1) % lr_update_step == 0 and (i+1) > 50:
g_lr -= (g_lr / float(num_iters_decay))
d_lr -= (d_lr / float(num_iters_decay))
update_lr_ind(d_optimizer,d_lr)
update_lr_ind(g_optimizer,g_lr)
print ('Decayed learning rates, g_lr: {}, d_lr: {}.'.format(g_lr, d_lr))
epoch_loss = running_loss / len(dataloader.dataset)
print('Loss : {:.4f}'.format(epoch_loss))
if i %2 == 0 :
if multi_gpu :
torch.save(D.module.state_dict(),curDir+'t-models/'+'-D'+save_name)
torch.save(G.module.state_dict(),curDir+'t-models/'+'-G'+save_name)
torch.save(C.module.state_dict(),curDir+'t-models/'+'-C'+save_name)
else :
torch.save(D.state_dict(),curDir+'t-models/'+'-D'+save_name)
torch.save(G.state_dict(),curDir+'t-models/'+'-G'+save_name)
torch.save(G.state_dict(),curDir+'t-models/'+'-C'+save_name)
#Deep copy the model_ft
if i%5 == 0 :#epoch_loss < lowest_loss :
if trainQuadrant :
a = 0
b = 0
else :
a = 0
b = 1
lowest_loss = lowest_loss
print("outp8ut : ",out_cls[0])
print("labels : ",labels[0])
if True :
listValO = []
listAroO = []
listValL = []
listAroL = []
tvo = [];tao=[];tvl = []; tal = [];
anyDiffer = False
for x,(data) in enumerate(dataloaderV,0) :
if trainQuadrant:
rinputs, rlabels,rldmrk = data[0],data[5],data[2]
else :
rinputs, rlabels,rldmrk = data[0],data[1],data[2]
G.eval()
D.eval()
C.eval()
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
with torch.set_grad_enabled(False) :
z,inputsM = G(inputs,returnInter = True)
_, outD = D(inputsM)
if not isSewa:
q = toQuadrant(outD, -10, 10, False)
else :
q = toQuadrant(outD, 0, 1, False)
outputs = C(torch.cat((z,q),1))
if trainQuadrant:
if alterQuadrant :
outputs = torch.round(outputs)
else :
_,outputs = torch.max(outputs,1)
if trainQuadrant :
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs.shape)
else :
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
#print(outputs.shape)
if not trainQuadrant :
shape = outputs[:,0].shape[0]
else :
shape = outputs.shape[0]
if shape != batch_size : #in case the batch size is differ, usually at end of iter
anyDiffer = True
print('differ')
if trainQuadrant:
tvo.append(outputs.detach().cpu())
tao.append(outputs.detach().cpu())
tvl.append(labels.detach().cpu())
tal.append(labels.detach().cpu())
else :
tvo.append(outputs[:,a].detach().cpu())
tao.append(outputs[:,b].detach().cpu())
tvl.append(labels[:,a].detach().cpu())
tal.append(labels[:,b].detach().cpu())
else :
print('equal')
if trainQuadrant :
listValO.append(outputs.detach().cpu())
listAroO.append(outputs.detach().cpu())
listValL.append(labels.detach().cpu())
listAroL.append(labels.detach().cpu())
else :
listValO.append(outputs[:,a].detach().cpu())
listAroO.append(outputs[:,b].detach().cpu())
listValL.append(labels[:,a].detach().cpu())
listAroL.append(labels[:,b].detach().cpu())
est_V = np.asarray(torch.stack(listValO)).flatten()
est_A = np.asarray(torch.stack(listAroO)).flatten()
gt_V = np.asarray(torch.stack(listValL)).flatten()
gt_A = np.asarray(torch.stack(listAroL)).flatten()
if anyDiffer :
est_Vt = np.asarray(torch.stack(tvo)).flatten()
est_At = np.asarray(torch.stack(tao)).flatten()
gt_Vt = np.asarray(torch.stack(tvl)).flatten()
gt_At = np.asarray(torch.stack(tal)).flatten()
#now concatenate
est_V = np.concatenate((est_V,est_Vt))
est_A = np.concatenate((est_A,est_At))
gt_V = np.concatenate((gt_V,gt_Vt))
gt_A = np.concatenate((gt_A,gt_At))
print(est_V.shape, gt_V.shape)
mseV = calcMSE(est_V, gt_V)
mseA = calcMSE(est_A, gt_A)
corV = calcCOR(est_V, gt_V)
corA = calcCOR(est_A, gt_A)
iccV = calcICC(est_V, gt_V)
iccA = calcICC(est_A, gt_A)
cccV = calcCCC(est_V, gt_V)
cccA = calcCCC(est_A, gt_A)
iccV2 = calcCCC(gt_V, gt_V)
iccA2 = calcCCC(gt_A, gt_A)
if lMSA > mseA :
lMSA = mseA
if lMSV > mseV :
lMSV = mseV
if corA > lCRA :
lCRA = corA
if corV > lCRV :
lCRV = corV
if cccA > lCCA :
lCCA = cccA
if cccV > lCCV :
lCCV = cccV
if iccA > lICA :
lICA = iccA
if iccV > lICV :
lICV = iccV
if (corA+corV+cccA+cccV+iccA+iccV) > total :
total = (corA+corV+cccA+cccV+iccA+iccV)
G_path = os.path.join(curDir+model_save_dir, '{}G-best-{}.ckpt'.format(additionName,i))
D_path = os.path.join(curDir+model_save_dir, '{}D-best-{}.ckpt'.format(additionName,i))
#G_path = os.path.join(curDir+model_save_dir, '{}{}-G-adl-best.ckpt'.format(i+1,additionName))
#D_path = os.path.join(curDir+model_save_dir, '{}{}-D-adl-best.ckpt'.format(i+1,additionName))
if multi_gpu :
torch.save(G.module.state_dict(), G_path)
torch.save(D.module.state_dict(), D_path)
else :
torch.save(G.state_dict(), G_path)
torch.save(D.state_dict(), D_path)
print('Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total))
print('MSEV : ',mseV, ', CORV : ',corV,', CCCV : ',cccV,', ICCV : ',iccV)
print('MSEA : ',mseA, ', CORA : ',corA,', CCCA : ',cccA,', ICCA : ',iccA)
f = open(err_file,'a')
res = 'MSEV : '+str(mseV)+ ', CORV : ' +str(corV)+', CCCV : '+str(cccV) +', ICCV : '+str(iccV)+' \n '
f.write(res)
res = 'MSEA : '+str(mseA)+ ', CORA : '+str(corA) +', CCCA : '+str(cccA) +', ICCA : '+str(iccA)+' \n '
f.write(res)
res = 'Best, MSEA : '+str(lMSA)+', CORA : '+str(lCRA)+', CCCA : '+str(lCCA)+', ICCA : '+str(lICA)+ ', MSEV : ' +str(lMSV)+ ', CORV : ' +str(lCRV)+', CCCV : '+str(lCCV) +', ICCV : '+str(lICV)+', Total : '+str(total)+' \n '
f.write(res)
f.close()
print('Best val Acc: {:4f}'.format(lowest_loss))
pass
def extract(): #training g and d on standard l2 loss
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
split = args.split
isSewa = args.sewa
isSemaine = args.semaine
toLoadModel = True
resume_iters=args.resume_iters
use_skip = args.useSkip
useLatent = args.useLatent
tryDenoise = args.tryDenoise
addLoss = args.addLoss
useWeight = args.useWeightNormalization
singleTask = args.singleTask
trainQuadrant = args.trainQuadrant
alterQuadrant = args.alterQuadrant
nSel = args.nSel
#curDir = "/home/deckyal/eclipse-workspace/FaceTracking/"
c_dim=2
image_size=128
g_conv_dim=16
d_conv_dim=16
lambda_cls=1
lambda_rec=10
lambda_gp=10
inputC = 3#input channel for discriminator
batch_size=args.batch_size#200 #50#40#70#20 #, help='mini-batch size')
isVideo = False
seq_length = 2
# Test configuration.
test_iters=200000 #, help='test model from this step')
# Miscellaneous.
num_workers=1
log_dir='stargan/logs'
model_save_dir='stargan/models'
sample_dir='stargan/samples-g_adl'
result_dir='stargan/results'
# Step size.
log_step=20
sample_step=5#1000
model_save_step=10
lr_update_step=100#1000
#model_save_step=10000
#lr_update_step=1000
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
multi_gpu = False
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit :
listSplit.append(i)
print(listSplit)
if not isSewa :
if not isSemaine :
d_name = 'AFEW-VA-Fixed'
additionName = "AF"+str(split)+"-"
else :
d_name = 'Sem-Short'
additionName = "SEM"+str(split)+"-"
dbType = 0
else :
d_name = 'SEWA'
dbType = 1
additionName = "SW"+str(split)+"-"
additionName+=(str(nSel)+'-')
additionName+=(str(g_conv_dim)+'-')
additionName+=(str(d_conv_dim)+'-')
if trainQuadrant :
if alterQuadrant :
additionName+="QDAL-"
c_dim = 1
else :
additionName+="QD-"
c_dim = 4
if tryDenoise :
additionName+="Den-"
save_name = additionName+str(testSplit)
transform =transforms.Compose([
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
toDelete = False
VD = SEWAFEWReduced([d_name], None, True, image_size, transform, False, False, 1,split=False, nSplit = nSplit,listSplit=[testSplit]
,isVideo=isVideo, seqLength = seq_length, returnQuadrant=trainQuadrant, returnNoisy = tryDenoise,dbType = dbType, isSemaine=isSemaine)
dataloaderV = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
if nSel :
G = GeneratorMZ(g_conv_dim, 0, 1,use_skip,useLatent)
D = DiscriminatorMZR(image_size, d_conv_dim, c_dim, 4,inputC=inputC)
else :
G = GeneratorM(g_conv_dim, 0, 1,use_skip,useLatent)
D = DiscriminatorM(image_size, d_conv_dim, c_dim, 6)
print_network(G, 'G')
print_network(D, 'D')
if toLoadModel :
print('Loading models from iterations : ',resume_iters)
G_path = os.path.join(curDir+model_save_dir, '{}G-{}.ckpt'.format(additionName,resume_iters))
D_path = os.path.join(curDir+model_save_dir, '{}D-{}.ckpt'.format(additionName,resume_iters))
G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage),strict=False)
D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage),strict=False)
G.to(device)
D.to(device)
listValO = []
listAroO = []
listValL = []
listAroL = []
a = 0
b = 1
iterator = 0
tvo = [];tao=[];tvl = []; tal = [];
anyDiffer = False
print('length : ',len(dataloaderV))
for x,(data) in enumerate(dataloaderV,0) :
if trainQuadrant:
rinputs, rlabels,rldmrk = data[0],data[5],data[2]
else :
rinputs, rlabels,rldmrk = data[0],data[1],data[2]
#for real_batch,va,gt,M,ln,q,noisy_batch,weight in (dataloader) :
fNames = data[4]
G.train()
D.train()
inputs = rinputs.cuda()#to(device)
labels = rlabels.cuda()#to(device)
with torch.set_grad_enabled(False) :
inputsM,z = G(inputs,returnInter = True)
_, outputs = D(inputsM)
if trainQuadrant:
if alterQuadrant :
outputs = torch.round(outputs)
else :
_,outputs = torch.max(outputs,1)
print('inside ')
if trainQuadrant :
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs.shape)
else :
print(x,',',int(truediv(len(VD),batch_size)),outputs[:2], labels[:2],outputs[:,0].shape[0],outputs.shape)
#print(outputs.shape)
print(z.shape)
zSave = z.cpu().numpy()
qSave = outputs.cpu().numpy()
combine = True
#now saving the results individually
for fname,features,va in zip(fNames, zSave,qSave):
iterator+=1
#first inspect the dir
dirName, fName = os.path.split(fname)
fName = fName.split('.')[0]+'.npz'
listDir = dirName.split('/')
listDir[-1] = 'FT-'+additionName+'z'
dirTgt = '/'.join(listDir)
if not toDelete :
checkDirMake(dirTgt)
#va = np.array([5,-5])
#print(va)
if not isSewa:
q = toQuadrant(va, -10, 10, False)
else :
q = toQuadrant(va, 0, 1, False)
#print(q)
if combine :
tmp=np.zeros((1,features.shape[1],features.shape[2]),np.float32)+q
features=np.concatenate((features,tmp),0)
print(tmp[0,0,:2])
print(fname, features.shape)
if os.path.isdir(dirTgt) and toDelete: # and isSewa or False:
print('removing : ',dirTgt)
#os.remove(os.path.join(dirTgt,fNameOri))
#exit(0)
shutil.rmtree(dirTgt)
#print(dirTgt, fName)
vaq = np.array([va[0],va[1],q])
#print('vaq : ',vaq)
if not toDelete :#not os.path.isfile(os.path.join(dirTgt,fName)) :
#np.save(os.path.join(dirTgt,fName),features)
np.savez(os.path.join(dirTgt,fName),z=features,vaq=vaq)
#exit(0)
#np.save('testing.npy',zSave)
#exit(0)
if not trainQuadrant :
shape = outputs[:,0].shape[0]
else :
shape = outputs.shape[0]
if shape != batch_size : #in case the batch size is differ, usually at end of iter
anyDiffer = True
print('differ')
if trainQuadrant:
tvo.append(outputs.detach().cpu())
tao.append(outputs.detach().cpu())
tvl.append(labels.detach().cpu())
tal.append(labels.detach().cpu())
else :
tvo.append(outputs[:,a].detach().cpu())
tao.append(outputs[:,b].detach().cpu())
tvl.append(labels[:,a].detach().cpu())
tal.append(labels[:,b].detach().cpu())
else :
print('equal')
if trainQuadrant :
listValO.append(outputs.detach().cpu())
listAroO.append(outputs.detach().cpu())
listValL.append(labels.detach().cpu())
listAroL.append(labels.detach().cpu())
else :
listValO.append(outputs[:,a].detach().cpu())
listAroO.append(outputs[:,b].detach().cpu())
listValL.append(labels[:,a].detach().cpu())
listAroL.append(labels[:,b].detach().cpu())
if len(listValO) > 0 :
est_V = np.asarray(torch.stack(listValO)).flatten()
est_A = np.asarray(torch.stack(listAroO)).flatten()
gt_V = np.asarray(torch.stack(listValL)).flatten()
gt_A = np.asarray(torch.stack(listAroL)).flatten()
if anyDiffer :
est_Vt = np.asarray(torch.stack(tvo)).flatten()
est_At = np.asarray(torch.stack(tao)).flatten()
gt_Vt = np.asarray(torch.stack(tvl)).flatten()
gt_At = np.asarray(torch.stack(tal)).flatten()
#now concatenate
if len(listValO) > 0 :
est_V = np.concatenate((est_V,est_Vt))
est_A = np.concatenate((est_A,est_At))
gt_V = np.concatenate((gt_V,gt_Vt))
gt_A = np.concatenate((gt_A,gt_At))
else :
est_V,est_A,gt_V,gt_A = est_Vt,est_At,gt_Vt,gt_At
print(est_V.shape, gt_V.shape)
mseV = calcMSE(est_V, gt_V)
mseA = calcMSE(est_A, gt_A)
corV = calcCOR(est_V, gt_V)
corA = calcCOR(est_A, gt_A)
iccV = calcICC(est_V, gt_V)
iccA = calcICC(est_A, gt_A)
cccV = calcCCC(est_V, gt_V)
cccA = calcCCC(est_A, gt_A)
iccV2 = calcCCC(gt_V, gt_V)
iccA2 = calcCCC(gt_A, gt_A)
print('MSEV : ',mseV, ', CORV : ',corV,', CCCV : ',cccV,', ICCV : ',iccV)
print('MSEA : ',mseA, ', CORA : ',corA,', CCCA : ',cccA,', ICCA : ',iccA)
if __name__ == '__main__':
mode = args.mode
if mode == 0 : #To train GDC
train_w_gdc_adl()
elif mode == 1 : #To extract the features
extract()
| 44,327
| 36.156748
| 237
|
py
|
Seq-Att-Affect
|
Seq-Att-Affect-master/FacialDataset.py
|
from math import sqrt
import re
from PIL import Image,ImageFilter
import torch
from torch.utils import data
import torchvision.transforms as transforms
import torchvision.utils as vutils
import csv
import torchvision.transforms.functional as F
import numbers
from torchvision.transforms import RandomRotation,RandomResizedCrop,RandomHorizontalFlip
from utils import *
from config import *
from ImageAugment import *
import utils
from os.path import isfile# load additional module
import pickle
import os
#import nudged
import shutil
import file_walker
import copy
from random import randint
#noiseParamList = np.asarray([[0,0,0],[1,2,3],[1,3,5],[.001,.005,.01],[.8,.5,.2],[0,0,0]])#0 [], 1[1/2,2/4,3/8], 2 [1,3,5], 3 [.01,.1,1], [.001,.005,.01]
noiseParamList =np.asarray([[0,0,0],[2,3,4],[2,4,6],[.005,.01,.05],[.5,.2,.1],[0,0,0]])#0 [], 1[1/2,2/4,3/8], 2 [1,3,5], 3 [.01,.1,1], [.001,.005,.01]
#noiseParamListTrain = np.asarray([[0,0,0],[2,3,4],[2,4,6],[.005,.01,.05],[.5,.2,.1],[0,0,0]])#0 [], 1[1/2,2/4,3/8], 2 [1,3,5], 3 [.01,.1,1], [.001,.005,.01]
noiseParamListTrain = np.asarray([[0,0,0],[2,3,4],[2,4,6],[.005,.01,.05],[.5,.2,.1],[0,0,0]])#0 [], 1[1/2,2/4,3/8], 2 [1,3,5], 3 [.01,.1,1], [.001,.005,.01]
rootDir = "/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data"
rootDirLdmrk = "/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/"
def addGaussianNoise(img,noiseLevel = 1):
noise = torch.randn(img.size()) * noiseLevel
noisy_img = img + noise
return noisy_img
def toQuadrant(inputData = None, min = -10, max = 10, toOneHot = False):
threshold = truediv(min+max,2)
vLow = False
aLow = False
q = 0
#print(min,max)
#print('the threshold : ',threshold)
if inputData[0] < threshold :
vLow = True
if inputData[1] < threshold :
aLow = True
if vLow and aLow :
q = 2
elif vLow and not aLow :
q = 1
elif not vLow and not aLow :
q = 0
else :
q = 3
if toOneHot :
rest = np.zeros(4)
rest[q]+=1
return rest
else :
return q
class SEWAFEW(data.Dataset):
mean_bgr = np.array([91.4953, 103.8827, 131.0912]) # from resnet50_ft.prototxt
def __init__(self, data_list = ["AFEW"],dir_gt = None,onlyFace = True, image_size =224,
transform = None,useIT = False,augment = False, step = 1,split = False,
nSplit = 5, listSplit = [0,1,2,3,4],wHeatmap= False,isVideo = False, seqLength = None,
returnM = False, toAlign = False, dbType = 0):#dbtype 0 is AFEW, 1 is SEWA
self.dbType = dbType
self.seq_length = seqLength
self.isVideo = isVideo
self.align = toAlign
self.useNudget = False
self.returnM = returnM
self.transform = transform
self.onlyFace = onlyFace
self.augment = augment
self.wHeatmap = wHeatmap
self.imageSize = image_size
self.imageHeight = image_size
self.imageWidth = image_size
self.useIT = useIT
self.curDir = rootDir+"/"#/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/"
if self.dbType ==1 :
annotL_name = "annotOri"
self.ldmrkNumber = 49
self.nose = 16
self.leye = 24
self.reye = 29
#mean_shape49-pad3-224
self.mean_shape = np.load(curDir+'mean_shape49-pad3-'+str(image_size)+'.npy')
else :
annotL_name = 'annot'
self.ldmrkNumber = 68
self.nose = 33
self.leye = 41
self.reye = 46
self.mean_shape = np.load(curDir+'mean_shape-pad-'+str(image_size)+'.npy')
self.swap = False
if self.swap :
self.ptsDst = np.asarray([
[self.mean_shape[self.nose+self.ldmrkNumber],self.mean_shape[self.nose]],[self.mean_shape[self.leye+self.ldmrkNumber],self.mean_shape[self.leye]],[self.mean_shape[self.reye+self.ldmrkNumber],self.mean_shape[self.reye]]
],dtype= np.float32)
self.ptsTn = [self.mean_shape[self.nose+self.ldmrkNumber],self.mean_shape[self.nose]],[self.mean_shape[self.leye+self.ldmrkNumber],self.mean_shape[self.leye]],[self.mean_shape[self.reye+self.ldmrkNumber],self.mean_shape[self.reye]]
else :
self.ptsDst = np.asarray([
[self.mean_shape[self.nose],self.mean_shape[self.nose+self.ldmrkNumber]],[self.mean_shape[self.leye],self.mean_shape[self.leye+self.ldmrkNumber]],[self.mean_shape[self.reye],self.mean_shape[self.reye+self.ldmrkNumber]]
],dtype= np.float32)
self.ptsTn = [self.mean_shape[self.nose],self.mean_shape[self.nose+self.ldmrkNumber]],[self.mean_shape[self.leye],self.mean_shape[self.leye+self.ldmrkNumber]],[self.mean_shape[self.reye],self.mean_shape[self.reye+self.ldmrkNumber]]
self.ptsTnFull = np.column_stack((self.mean_shape[:self.ldmrkNumber],self.mean_shape[self.ldmrkNumber:]))
list_gt = []
list_labels_t = []
list_labels_tE = []
counter_image = 0
annotE_name = 'annot2'
if dir_gt is not None :
annot_name = dir_gt
list_missing = []
for data in data_list :
print(("Opening "+data))
for f in file_walker.walk(self.curDir +data+"/"):
if f.isDirectory: # Check if object is directory
#print((f.name, f.full_path)) # Name is without extension
#c_image,c_ldmark = 0,0
if self.dbType == 1 : #we directly get the VA file in case of sewa
#first get the valence
valFile = f.full_path+"/valence/"+f.name+"_Valence_A_Aligned.csv"
aroFile = f.full_path+"/arousal/"+f.name+"_Arousal_A_Aligned.csv"
list_labels_tE.append([valFile,aroFile])
#print(valFile,aroFile)
for sub_f in f.walk():
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print(sub_f.name)
if(sub_f.name == annotL_name) : #If that's annot, add to labels_t
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_labels_t.append(sorted(list_dta))
c_image = len(list_dta)
elif(sub_f.name == 'img'): #Else it is the image
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
c_ldmrk = len(list_dta)
elif (sub_f.name == annotE_name) :
if self.dbType == 0 :
#If that's annot, add to labels_t
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_labels_tE.append(sorted(list_dta))
if(c_image!=c_ldmrk) and False:
print(f.full_path," is incomplete ",'*'*10,c_image,'-',c_ldmrk)
ori = "/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/allVideo/"
target = '/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/allVideo/retrack/'
#shutil.copy(ori+f.name+".avi",target+f.name+".avi")
list_missing.append(f.name)
self.length = counter_image
print("Now opening keylabels")
list_labelsN = []
list_labelsEN = []
list_labels = []
list_labelsE = []
for ix in range(len(list_labels_t)) : #lbl,lble in (list_labels_t,list_labels_tE) :
lbl_68 = [] #Per folder
lbl_2 = [] #Per folder
lbl_n68 = [] #Per folder
lbl_n2 = [] #Per folder
for jx in range(len (list_labels_t[ix])): #lbl_sub in lbl :
#print(os.path.basename(list_gt[ix][jx]))
#print(os.path.basename(list_labels_t[ix][jx]))
#print(os.path.basename(list_labels_tE[ix][jx]))
lbl_sub = list_labels_t[ix][jx]
if ('pts' in lbl_sub) :
x = []
#print(lbl_sub)
lbl_68.append(read_kp_file(lbl_sub,True))
lbl_n68.append(lbl_sub)
if self.dbType == 0 :
lbl_subE = list_labels_tE[ix][jx]
if ('aro' in lbl_subE) :
x = []
#print(lbl_sub)
with open(lbl_subE) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
#x.append([ float(j) for j in data2[i][0].split()] )
temp = [ float(j) for j in data2[i][0].split()]
temp.reverse() #to give the valence first. then arousal
x.append(temp)
#x.reverse()
lbl_2.append(np.array(x).flatten('F'))
lbl_n2.append(lbl_sub)
if self.dbType == 1 : #sewa
#print(list_labels_t[ix][0])
valFile = np.asarray(readCSV(list_labels_tE[ix][0]))
aroFile = np.asarray(readCSV(list_labels_tE[ix][1]))
lbl_n2.append(list_labels_tE[ix][0])
lbl_2 = np.column_stack((valFile,aroFile))
list_labelsN.append(lbl_n68)
list_labelsEN.append(lbl_n2)
list_labels.append(lbl_68)
list_labelsE.append(lbl_2)
t_l_imgs = []
t_l_gt = []
t_l_gtE = []
t_list_gt_names = []
t_list_gtE_names = []
#print(list_labelsEN)
if not self.isVideo :
#Flatten it to one list
for i in range(0,len(list_gt)): #For each dataset
list_images = []
list_gt_names = []
list_gtE_names = []
indexer = 0
list_ground_truth = np.zeros([len(list_gt[i]),self.ldmrkNumber*2])
list_ground_truthE = np.zeros([len(list_gt[i]),2])
for j in range(0,len(list_gt[i]),step): #for number of data #n_skip is usefull for video data
list_images.append(list_gt[i][j])
list_gt_names.append(list_labelsN[i][j])
if self.dbType == 0 :
list_gtE_names.append(list_labelsEN[i][j])
else :
list_gtE_names.append(list_labelsEN[i][0])
#print(list_labelsEN[i])
'''if len(list_labels[i][j] < 1):
print(list_labels[i][j])'''
#print(len(list_labels[i][j]))
list_ground_truth[indexer] = np.array(list_labels[i][j]).flatten('F')
list_ground_truthE[indexer] = np.array(list_labelsE[i][j]).flatten('F')
indexer += 1
t_l_imgs.append(list_images)
t_l_gt.append(list_ground_truth)
t_l_gtE.append(list_ground_truthE)
t_list_gt_names.append(list_gt_names)
t_list_gtE_names.append(list_gtE_names)
else :
if self.seq_length is None :
list_ground_truth = np.zeros([int(counter_image/(self.seq_length*step)),self.seq_length,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
temp2 = np.zeros([self.seq_length,136])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = list_labels[i][z]
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=self.seq_length*step
#print counter
self.l_imgs = list_images
self.l_gt = list_ground_truth
else :
counter_seq = 0;
for i in range(0,len(list_gt)): #For each dataset
indexer = 0;
list_gt_names = []
list_gtE_names = []
list_ground_truth = np.zeros([int(len(list_gt[i])/(self.seq_length*step)),self.seq_length,136]) #np.zeros([counter_image,136])
list_ground_truthE = np.zeros([int(len(list_gt[i])/(self.seq_length*step)),self.seq_length,2])#np.zeros([counter_image,2])
counter = 0
list_images = []
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
tmpn68 = []
tmpn2 = []
temp = []
temp2 = np.zeros([self.seq_length,136])
temp3 = np.zeros([self.seq_length,2])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = list_labels[i][z].flatten('F')
temp3[i_temp] = list_labelsE[i][z].flatten('F')
tmpn68.append(list_labelsN[i][z])
tmpn2.append(list_labelsEN[i][z])
i_temp+=1
counter_seq+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
list_ground_truthE[indexer] = temp3
list_gt_names.append(tmpn68)
list_gtE_names.append(tmpn2)
indexer += 1
counter+=self.seq_length*step
#print counter
t_l_imgs.append(list_images)
t_l_gt.append(list_ground_truth)
t_l_gtE.append(list_ground_truthE)
t_list_gt_names.append(list_gt_names)
t_list_gtE_names.append(list_gtE_names)
self.l_imgs = []
self.l_gt = []
self.l_gtE = []
self.list_gt_names = []
self.list_gtE_names = []
#print('cimage : ',counter_image)
if split :
indexer = 0
self.l_gt = []
self.l_gtE = []
totalData = len(t_l_imgs)
perSplit = int(truediv(totalData, nSplit))
for x in listSplit :
print('split : ',x)
begin = x*perSplit
if x == nSplit-1 :
end = begin + (totalData - begin)
else :
end = begin+perSplit
print(begin,end,totalData)
if not self.isVideo :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])):
#print('append ',t_l_imgs[i][j])
self.l_imgs.append(t_l_imgs[i][j])
self.l_gt.append(t_l_gt[i][j])
self.l_gtE.append(t_l_gtE[i][j])
self.list_gt_names.append(t_list_gt_names[i][j])
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gt = []
t_gtE = []
t_gt_N = []
t_gt_EN = []
tmp = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gt.append(t_l_gt[i][j][k])
t_gtE.append(t_l_gtE[i][j][k])
t_gt_N.append(t_list_gt_names[i][j][k])
t_gt_EN.append(t_list_gtE_names[i][j][k])
tmp+=1
#print('append ',t_img)
self.l_imgs.append(t_img)
self.l_gt.append(t_gt)
self.l_gtE.append(t_gtE)
self.list_gt_names.append(t_gt_N)
self.list_gtE_names.append(t_gt_EN)
indexer+=1
print(len(self.l_imgs))
self.l_gt = np.asarray(self.l_gt)
self.l_gtE = np.asarray(self.l_gtE)
else :
if not self.isVideo :
self.l_gt = np.zeros([counter_image,136])
self.l_gtE = np.zeros([counter_image,2])
indexer = 0
for i in range(len(t_l_imgs)):
for j in range(len(t_l_imgs[i])):
self.l_imgs.append(t_l_imgs[i][j])
print(i,j,'-',len(t_l_imgs[i]))
self.l_gt[indexer] = t_l_gt[i][j]
self.l_gtE[indexer] = t_l_gtE[i][j]
self.list_gt_names.append(t_list_gt_names[i][j])
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
self.l_gt= np.zeros([counter_seq,self.seq_length,136])
self.l_gtE = np.zeros([counter_seq,self.seq_length,2])
indexer = 0
for i in range(len(t_l_imgs)): #dataset
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gt = np.zeros([self.seq_length,136])
t_gte = np.zeros([self.seq_length,2])
t_gt_n = []
t_gt_en = []
i_t = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gt[i_t] = t_l_gt[i][j][k]
t_gte[i_t] = t_l_gtE[i][j][k]
t_gt_n.append(t_list_gt_names[i][j][k])
t_gt_en.append(t_list_gtE_names[i][j][k])
i_t+=1
self.l_imgs.append(t_img)
self.l_gt[indexer] = t_gt
self.l_gtE[indexer] = t_gte
self.list_gt_names.append(t_gt_n)
self.list_gtE_names.append(t_gt_en)
indexer+=1
print('limgs : ',len(self.l_imgs))
def __getitem__(self,index):
#Read all data, transform etc.
#In video, the output will be : [batch_size, sequence_size, channel, width, height]
#Im image : [batch_size, channel, width, height]
l_imgs = []; l_VA = []; l_ldmrk = []; l_nc = []#,torch.FloatTensor(label),newChannel#,x,self.list_gt_names[index]
if not self.isVideo :
x_l = [self.l_imgs[index]];labelE_l =[self.l_gtE[index].copy()];label_l = [self.l_gt[index].copy()];label_n =[self.list_gt_names[index]]
else :
x_l = self.l_imgs[index];labelE_l =self.l_gtE[index].copy();label_l = self.l_gt[index].copy();label_n =self.list_gt_names[index]
for x,labelE,label,ln in zip(x_l,labelE_l,label_l,label_n) :
#print(x,labelE,label,ln)
tImage = Image.open(x).convert("RGB")
tImageB = None
if self.onlyFace :
#crop the face region
#t,l_x,l_y,x1,y1,x_min,y_min,x2,y2 = get_enlarged_bb(the_kp = label,div_x = 2,div_y = 2,images = cv2.imread(x),displacementxy = random.uniform(-.5,.5))
if self.ldmrkNumber > 49 :
t,l_x,l_y,x1,y1,x_min,y_min,x2,y2 = get_enlarged_bb(the_kp = label.copy(),div_x = 8,div_y = 8,images = cv2.imread(x))#,displacementxy = random.uniform(-.5,.5))
else :
t,l_x,l_y,x1,y1,x_min,y_min,x2,y2 = utils.get_enlarged_bb(the_kp = label.copy(),
div_x = 3,div_y = 3,images = cv2.imread(x), n_points = 49)#,displacementxy = random.uniform(-.5,.5))
area = (x1,y1, x2,y2)
tImage = tImage.crop(area)
label[:self.ldmrkNumber] -= x_min
label[self.ldmrkNumber:] -= y_min
tImage = tImage.resize((self.imageWidth,self.imageHeight))
label[:self.ldmrkNumber] *= truediv(self.imageWidth,(x2 - x1))
label[self.ldmrkNumber:] *= truediv(self.imageHeight,(y2 - y1))
#now aliging
if self.align :
tImageT = utils.PILtoOpenCV(tImage.copy())
if self.swap :
ptsSource = torch.tensor([
[label[self.nose+self.ldmrkNumber],label[self.nose]],[label[self.leye+self.ldmrkNumber],label[self.leye]],[label[self.reye+self.ldmrkNumber],label[self.reye]]
])
ptsSn = [
[label[self.nose+self.ldmrkNumber],label[self.nose]],[label[self.leye+self.ldmrkNumber],label[self.leye]],[label[self.reye+self.ldmrkNumber],label[self.reye]]
]
else :
ptsSource = torch.tensor([
[label[self.nose],label[self.nose+self.ldmrkNumber]],[label[self.leye],label[self.leye+self.ldmrkNumber]],[label[self.reye],label[self.reye+self.ldmrkNumber]]
])
ptsSn =[
[label[self.nose],label[self.nose+self.ldmrkNumber]],[label[self.leye],label[self.leye+self.ldmrkNumber]],[label[self.reye],label[self.reye+self.ldmrkNumber]]
]
ptsSnFull = np.column_stack((label[:self.ldmrkNumber],label[self.ldmrkNumber:]))
ptsSnFull = np.asarray(ptsSnFull,np.float32)
ptsSource = ptsSource.numpy()
ptsSource = np.asarray(ptsSource,np.float32)
if self.useNudget :
trans = nudged.estimate(ptsSn,self.ptsTn)
M = np.asarray(trans.get_matrix())[:2,:]
#print("Nudged : ",mN,trans.get_scale(),trans.get_rotation())
else :
#M = cv2.getAffineTransform(ptsSource,self.ptsDst)
#_,_,aff = self.procrustes(ptsSource,self.ptsDst)
#print(ptsSource.shape,'-', self.ptsDst.shape)
#print(ptsSnFull.shape,'-', self.ptsTnFull.shape)
_,_,aff = self.procrustes(self.ptsTnFull,ptsSnFull)
M = aff[:2,:]
dst = cv2.warpAffine(tImageT,M,(self.imageWidth,self.imageHeight))
#print(np.asarray(ptsSn).shape, np.asarray(self.ptsTn).shape,M.shape)
M_full = np.append(M,[[0,0,1]],axis = 0)
l_full = np.stack((label[:self.ldmrkNumber],label[self.ldmrkNumber:],np.ones(self.ldmrkNumber)))
ldmark = np.matmul(M_full, l_full)
if False :
print(ldmark)
for i in range(self.ldmrkNumber) :
cv2.circle(dst,(int(scale(ldmark[0,i])),int(scale(ldmark[1,i]))),2,(0,255,0) )
cv2.imshow('test align',dst)
cv2.waitKey(0)
label = np.concatenate((ldmark[0],ldmark[1]))
tImage = utils.OpenCVtoPIL(dst)
newChannel = None
if self.wHeatmap :
theMiddleName = 'img'
filePath = x.split(os.sep)
ifolder = filePath.index(theMiddleName)
print(ifolder)
image_name = filePath[-1]
annot_name_H = os.path.splitext(image_name)[0]+'.npy'
sDirName = filePath[:ifolder]
dHeatmaps = '/'.join(sDirName)+'/heatmaps'
finalTargetH = dHeatmaps+'/'+annot_name_H
print(finalTargetH)
if isfile(finalTargetH) and False:
newChannel = np.load(finalTargetH)
newChannel = Image.fromarray(newChannel)
else :
checkDirMake(dHeatmaps)
tImageTemp = cv2.cvtColor(np.array(tImage),cv2.COLOR_RGB2BGR)
#tImageTemp = cv2.imread(x)#tImage.copy()
print(len(label),label)
b_channel,g_channel,r_channel = tImageTemp[:,:,0],tImageTemp[:,:,1],tImageTemp[:,:,2]
newChannel = b_channel.copy(); newChannel[:] = 0
t0,t1,t2,t3 = utils.get_bb(label[0:self.ldmrkNumber], label[self.ldmrkNumber:],length=self.ldmrkNumber)
l_cd,rv = utils.get_list_heatmap(0,None,t2-t0,t3-t1,.05)
height, width,_ = tImageTemp.shape
wx = t2-t0
wy = t3-t1
scaler = 255/np.max(rv)
for iter in range(self.ldmrkNumber) :
ix,iy = int(label[iter]),int(label[iter+self.ldmrkNumber])
#Now drawing given the center
for iter2 in range(len(l_cd)) :
value = int(rv[iter2]*scaler)
if newChannel[utils.inBound(iy+l_cd[iter2][0],0,height-1), utils.inBound(ix + l_cd[iter2][1],0,width-1)] < value :
newChannel[utils.inBound(iy+l_cd[iter2][0],0,height-1), utils.inBound(ix + l_cd[iter2][1],0,width-1)] = int(rv[iter2]*scaler)#int(heatmapValue/2 + rv[iter2] * heatmapValue)
'''tImage2 = cv2.merge((b_channel, newChannel,newChannel, newChannel))
cv2.imshow("combined",tImage2)
cv2.waitKey(0)'''
np.save(finalTargetH,newChannel)
newChannel = Image.fromarray(newChannel)
if self.augment :
sel = np.random.randint(0,4)
#0 : neutral, 1 : horizontal flip, 2:random rotation, 3:occlusion
if sel == 0 :
pass
elif sel == 1 :
flip = RandomHorizontalFlip_WL(1,self.ldmrkNumber)
tImage,label,newChannel = flip(tImage,label,newChannel,self.ldmrkNumber)
elif sel == 2 and not self.align :
rot = RandomRotation_WL(45)
tImage,label,newChannel = rot(tImage,label,newChannel,self.ldmrkNumber)
elif sel == 3 :
occ = Occlusion_WL(1)
tImage,label,newChannel = occ(tImage,label,newChannel)
#random crop
if not self.align :
rc = RandomResizedCrop_WL(size = self.imageSize,scale = (0.5,1), ratio = (0.5, 1.5))
tImage,label,newChannel= rc(tImage,label,newChannel)
#additional blurring
if (np.random.randint(1,3)%2==0) and True :
sel_n = np.random.randint(1,6)
#sel_n = 4
rc = GeneralNoise_WL(1)
tImage,label= rc(tImage,label,sel_n,np.random.randint(0,3))
if self.returnM :
if self.swap :
ptsSource = torch.tensor([
[label[self.nose+self.ldmrkNumber],label[self.nose]],[label[self.leye+self.ldmrkNumber],label[self.leye]],[label[self.reye+self.ldmrkNumber],label[self.reye]]
])
ptsSn = [
[label[self.nose+self.ldmrkNumber],label[self.nose]],[label[self.leye+self.ldmrkNumber],label[self.leye]],[label[self.reye+self.ldmrkNumber],label[self.reye]]
]
else :
ptsSource = torch.tensor([
[label[self.nose],label[self.nose+self.ldmrkNumber]],[label[self.leye],label[self.leye+self.ldmrkNumber]],[label[self.reye],label[self.reye+self.ldmrkNumber]]
])
ptsSn =[
[label[self.nose],label[self.nose+self.ldmrkNumber]],[label[self.leye],label[self.leye+self.ldmrkNumber]],[label[self.reye],label[self.reye+self.ldmrkNumber]]
]
ptsSnFull = np.column_stack((label[:self.ldmrkNumber],label[self.ldmrkNumber:]))
ptsSnFull = np.asarray(ptsSnFull,np.float32)
ptsSource = ptsSource.numpy()
ptsSource = np.asarray(ptsSource,np.float32)
if self.useNudget :
trans = nudged.estimate(ptsSn,self.ptsTn)
M = np.asarray(trans.get_matrix())[:2,:]
else :
#M = cv2.getAffineTransform(ptsSource,self.ptsDst)
_,_,aff = self.procrustes(self.ptsTnFull,ptsSnFull)
M = aff[:2,:]
if False :
tImageT = utils.PILtoOpenCV(tImage.copy())
dst = cv2.warpAffine(tImageT,M,(self.imageWidth,self.imageHeight))
print(np.asarray(ptsSn).shape, np.asarray(self.ptsTn).shape,M.shape)
M_full = np.append(M,[[0,0,1]],axis = 0)
l_full = np.stack((label[:self.ldmrkNumber],label[self.ldmrkNumber:],np.ones(self.ldmrkNumber)))
ldmark = np.matmul(M_full, l_full)
print(ldmark)
for i in range(self.ldmrkNumber) :
cv2.circle(dst,(int(scale(ldmark[0,i])),int(scale(ldmark[1,i]))),2,(0,0,255) )
cv2.imshow('test recovered',dst)
cv2.waitKey(0)
Minter = self.param2theta(np.append(M,[[0,0,1]],axis = 0), self.imageWidth,self.imageHeight)
Mt = torch.from_numpy(Minter).float()
else :
Mt = torch.zeros(1)
if self.useIT :
tImage = self.transformInternal(tImage)
else :
tImage = self.transform(tImage)
if not self.wHeatmap :
l_imgs.append(tImage); l_VA.append(torch.FloatTensor(labelE)); l_ldmrk.append(torch.FloatTensor(label))#,x,self.list_gt_names[index]
else :
newChannel = transforms.Resize(224)(newChannel)
newChannel = transforms.ToTensor()(newChannel)
newChannel = newChannel.sub(125)
l_imgs.append(tImage); l_VA.append(torch.FloatTensor(labelE)); l_ldmrk.append(torch.FloatTensor(label)); l_nc.append(newChannel)
#return tImage,torch.FloatTensor(labelE),torch.FloatTensor(label),newChannel#,x,self.list_gt_names[index]
if not self.isVideo :
if self.wHeatmap :
return l_imgs[0], l_VA[0], l_ldmrk[0], l_nc[0], Mt
else :
return l_imgs[0], l_VA[0], l_ldmrk[0], Mt
else :
#lImgs = torch.Tensor(len(l_imgs),3,self.imageHeight,self.imageWidth)
#lVA = torch.Tensor(len(l_VA),2)
#lLD = torch.Tensor(len(l_ldmrk),136)
lImgs = torch.stack(l_imgs)
lVA = torch.stack(l_VA)
lLD = torch.stack(l_ldmrk)
#print(lImgs.shape, l_imgs[0].shape, l_VA[0].shape,len(lImgs))
#torch.cat(l_imgs, out=lImgs)
#torch.cat(l_VA, out=lVA)
#torch.cat(l_ldmrk, out=lLD)
if self.wHeatmap :
#lnc = torch.Tensor(len(l_nc),1,self.imageHeight,self.imageWidth)
#torch.cat(l_nc, out=lnc)
lnc = torch.stack(l_nc)
return lImgs, lVA, lLD, lnc, Mt
else :
return lImgs, lVA, lLD, Mt
def transformInternal(self, img):
transforms.Resize(224)(img)
img = np.array(img, dtype=np.uint8)
#img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float32)
img -= self.mean_bgr
img = img.transpose(2, 0, 1) # C x H x W
img = torch.from_numpy(img).float()
return img
def untransformInternal(self, img, lbl):
img = img.numpy()
img = img.transpose(1, 2, 0)
img += self.mean_bgr
img = img.astype(np.uint8)
img = img[:, :, ::-1]
return img, lbl
def param2theta(self,param, w, h):
param = np.linalg.inv(param)
theta = np.zeros([2,3])
theta[0,0] = param[0,0]
theta[0,1] = param[0,1]*h/w
theta[0,2] = param[0,2]*2/w + theta[0,0] + theta[0,1] - 1
theta[1,0] = param[1,0]*w/h
theta[1,1] = param[1,1]
theta[1,2] = param[1,2]*2/h + theta[1,0] + theta[1,1] - 1
return theta
def procrustes(self, X, Y, scaling=True, reflection='best'):
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
#tform = {'rotation':T, 'scale':b, 'translation':c}
tform = np.append(b*T,[c],axis = 0).T
tform = np.append(tform,[[0,0,1]],axis = 0)
return d, Z, tform
def __len__(self):
return len(self.l_imgs)
class SEWAFEWReducedLatent(data.Dataset): #return affect on Valence[0], Arousal[1] order
mean_bgr = np.array([91.4953, 103.8827, 131.0912]) # from resnet50_ft.prototxt
def __init__(self, data_list = ["AFEW"],dir_gt = None, image_size =224, step = 1,split = False,
nSplit = 5, listSplit = [0,1,2,3,4],isVideo = False, seqLength = None, dbType = 0,
returnQuadrant = False, returnWeight = False,useAll = False,
splitNumber = None,returnVAQ=False,returnFName = False,isSemaine=False):#dbtype 0 is AFEW, 1 is SEWA
self.dbType = dbType
self.isSemaine = isSemaine
self.seq_length = seqLength
self.isVideo = isVideo
self.returnNoisy = False
self.returnVAQ = returnVAQ
self.returnFName = returnFName
self.curDir = rootDir +"/"#/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/"
if dbType == 0 :
featName = "FT-AF0-0-16-16-Den"
else :
featName = "FT-SW0-0-16-16-Den"
if self.isSemaine :
featName = "FT-SEM0-0-16-16-Den"
if useAll :
featName+="-UA"
featName+="-z"
self.returnWeight = returnWeight
if self.returnWeight :
name = 'VA-Train-'+str(listSplit[0])+'.npy'
if self.dbType == 1 :
name='S-'+name
if isSemaine :
name = 'SE-VA-Train-'+str(listSplit[0])+'.npy'
weight = np.load(rootDir+"/DST-SE-AF/"+name).astype('float')+1
sum = weight.sum(0)
weight = (weight/sum)
#print('1',weight)
weight = 1/weight
#print('2',weight)
sum = weight.sum(0)
weight = weight/sum
#print('3',weight)
self.weight = weight
self.returnQ = returnQuadrant
list_gt = []
list_labels_tE = []
counter_image = 0
annotE_name = 'annot2'
if dir_gt is not None :
annot_name = dir_gt
list_missing = []
for data in data_list :
print(("Opening "+data))
fullDir = self.curDir +data+"/"
listFolder = os.listdir(fullDir)
listFolder.sort()
for tempx in range(len(listFolder)):
f = listFolder[tempx]
fullPath = os.path.join(fullDir,f)
#print('opening fullpath',fullPath)
if os.path.isdir(fullPath): # Check if object is directory
#print((f.name, f.full_path)) # Name is without extension
#c_image,c_ldmark = 0,0
if self.dbType == 1 : #we directly get the VA file in case of sewa
#first get the valence
valFile = fullPath+"/valence/"+f+"_Valence_A_Aligned.csv"
aroFile = fullPath+"/arousal/"+f+"_Arousal_A_Aligned.csv"
list_labels_tE.append([valFile,aroFile])
#print(valFile,aroFile)
#print('fp ',fullPath)
for sub_f in file_walker.walk(fullPath):
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print(sub_f.name)
if(sub_f.name == featName): #Else it is the image
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
c_ldmrk = len(list_dta)
elif (sub_f.name == annotE_name) :
if self.dbType == 0 :
#If that's annot, add to labels_t
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_labels_tE.append(sorted(list_dta))
self.length = counter_image
print("Now opening keylabels")
list_labelsEN = []
list_labelsE = []
for ix in range(len(list_labels_tE)) : #lbl,lble in (list_labels_t,list_labels_tE) :
lbl_2 = [] #Per folder
lbl_n2 = [] #Per folder
if self.dbType == 1 : #sewa
#print(list_labels_t[ix][0])
valFile = np.asarray(readCSV(list_labels_tE[ix][0]))
aroFile = np.asarray(readCSV(list_labels_tE[ix][1]))
lbl_n2.append(list_labels_tE[ix][0])
lbl_2 = np.column_stack((valFile,aroFile))
else :
for jx in range(len (list_labels_tE[ix])): #lbl_sub in lbl :
#print(os.path.basename(list_gt[ix][jx]))
#print(os.path.basename(list_labels_t[ix][jx]))
#print(os.path.basename(list_labels_tE[ix][jx]))
if self.dbType == 0 :
lbl_subE = list_labels_tE[ix][jx]
if ('aro' in lbl_subE) :
x = []
#print(lbl_sub)
with open(lbl_subE) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
temp = [ float(j) for j in data2[i][0].split()]
temp.reverse() #to give the valence first. then arousal
x.append(temp)
lbl_2.append(np.array(x).flatten('F'))
lbl_n2.append(lbl_subE)
list_labelsEN.append(lbl_n2)
list_labelsE.append(lbl_2)
t_l_imgs = []
t_l_gtE = []
t_list_gtE_names = []
#print(list_labelsEN)
print(len(list_labelsE))
print(len(list_labelsE[0]))
print(len(list_labelsE[0][0]))
print(list_labelsE[0][0])
if not self.isVideo :
#Flatten it to one list
for i in range(0,len(list_gt)): #For each dataset
list_images = []
list_gtE_names = []
indexer = 0
list_ground_truthE = np.zeros([len(list_gt[i]),2])
for j in range(0,len(list_gt[i]),step): #for number of data #n_skip is usefull for video data
list_images.append(list_gt[i][j])
#print(list_labelsEN)
if self.dbType == 0 :
list_gtE_names.append(list_labelsEN[i][j])
else :
list_gtE_names.append(list_labelsEN[i][0])
#print(list_labelsEN[i])
'''if len(list_labels[i][j] < 1):
print(list_labels[i][j])'''
#print(len(list_labels[i][j]))
list_ground_truthE[indexer] = np.array(list_labelsE[i][j]).flatten('F')
indexer += 1
t_l_imgs.append(list_images)
t_l_gtE.append(list_ground_truthE)
t_list_gtE_names.append(list_gtE_names)
else :
if self.seq_length is None :
list_ground_truth = np.zeros([int(counter_image/(self.seq_length*step)),self.seq_length,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
temp2 = np.zeros([self.seq_length,136])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = list_labels[i][z]
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=self.seq_length*step
#print counter
self.l_imgs = list_images
self.l_gt = list_ground_truth
else :
counter_seq = 0;
for i in range(0,len(list_gt)): #For each dataset
indexer = 0;
list_gtE_names = []
list_ground_truthE = np.zeros([int(len(list_gt[i])/(self.seq_length*step)),self.seq_length,2])#np.zeros([counter_image,2])
counter = 0
list_images = []
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
tmpn2 = []
temp3 = np.zeros([self.seq_length,2])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
#print(list_labelsE[i][z])
temp3[i_temp] = list_labelsE[i][z].flatten('F')
if self.dbType == 0 :
#list_gtE_names.append(list_labelsEN[i][j])
tmpn2.append(list_labelsEN[i][j])
else :
#list_gtE_names.append(list_labelsEN[i][0])
tmpn2.append(list_labelsEN[i][0])
i_temp+=1
counter_seq+=1
list_images.append(temp)
list_ground_truthE[indexer] = temp3
list_gtE_names.append(tmpn2)
indexer += 1
counter+=self.seq_length*step
#print counter
t_l_imgs.append(list_images)
t_l_gtE.append(list_ground_truthE)
t_list_gtE_names.append(list_gtE_names)
self.l_imgs = []
self.l_gtE = []
self.list_gtE_names = []
#print('cimage : ',counter_image)
if split :
indexer = 0
self.l_gtE = []
totalData = len(t_l_imgs)
perSplit = int(truediv(totalData, nSplit))
for x in listSplit :
print('split : ',x)
begin = x*perSplit
if x == nSplit-1 :
end = begin + (totalData - begin)
else :
end = begin+perSplit
print(begin,end,totalData)
if not self.isVideo :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])):
#print('append ',t_l_imgs[i][j])
self.l_imgs.append(t_l_imgs[i][j])
self.l_gtE.append(t_l_gtE[i][j])
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gtE = []
t_gt_EN = []
tmp = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gtE.append(t_l_gtE[i][j][k])
t_gt_EN.append(t_list_gtE_names[i][j][k])
tmp+=1
#print('append ',t_img)
self.l_imgs.append(t_img)
self.l_gtE.append(t_gtE)
self.list_gtE_names.append(t_gt_EN)
indexer+=1
print(len(self.l_imgs))
print('limgs : ',len(self.l_imgs))
def __getitem__(self,index):
#Read all data, transform etc.
#In video, the output will be : [batch_size, sequence_size, channel, width, height]
#Im image : [batch_size, channel, width, height]
l_imgs = []; l_ldmrk = []; l_VA = []; l_nc = []; l_qdrnt = []#,torch.FloatTensor(label),newChannel#,x,self.list_gt_names[index]
if self.returnFName :
l_fname = []
if self.returnNoisy :
l_nimgs = []
if self.returnWeight :
l_weights = []
if self.returnVAQ :
l_vaq = []
if not self.isVideo :
x_l = [self.l_imgs[index]];labelE_l =[self.l_gtE[index].copy()];label_n =[self.list_gtE_names[index]]
else :
x_l = self.l_imgs[index];labelE_l =self.l_gtE[index].copy();label_n =self.list_gtE_names[index]
#print('label n ',label_n)
for x,labelE,ln in zip(x_l,labelE_l,label_n) :
tmp = np.load(x);#tImage = np.load(x) #Image.open(x).convert("RGB")
if self.returnFName :
l_fname.append(x)
reduce = True
if reduce :
tImage = tmp['z'][:64]
else :
tImage=tmp['z']
if self.returnVAQ:
vaq = torch.from_numpy(tmp['vaq'])
l_vaq.append(vaq)
#tImage = np.load(x)
nImage = tImage.copy()
label = torch.zeros(1)
Mt = torch.zeros(1)
tImage = torch.from_numpy(tImage)
if self.returnNoisy :
nImage = torch.from_numpy(nImage)
#print('shap e: ', tImage.shape)
l_imgs.append(tImage); l_VA.append(torch.FloatTensor(labelE)); l_ldmrk.append(torch.FloatTensor(label))#,x,self.list_gt_names[index]
if self.returnNoisy :
l_nimgs.append(nImage)
if self.returnQ :
if self.dbType == 1 :
min = 0; max = 1;
elif self.isSemaine == 1:
min = -1; max = 1;
else :
min = -10; max = 10;
l_qdrnt.append(toQuadrant(labelE, min, max, toOneHot=False))
if self.returnWeight :
v = labelE[0]
a = labelE[0]
if self.dbType == 1 :#sewa
v = v*10+1
a = a*10+1
elif self.isSemaine == 1 :
v = v*10+10
a = a*10+10
else :
v = v+10
a = a+10
v,a = int(v),int(a)
l_weights.append([self.weight[v,0],self.weight[a,1]])
l_nc.append(ln)
#print('lnc : ',l_nc)
if not self.isVideo :
if self.returnQ :
if self.returnNoisy :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_qdrnt[0],l_nimgs[0]]
else :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_qdrnt[0]]
else :
if self.returnNoisy :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_nimgs[0]]
else :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0]]
if self.returnWeight :
res.append(torch.tensor(l_weights[0]))
if self.returnVAQ :
res.append(torch.tensor(l_vaq[0]))
#res.append(l_vaq)
if self.returnFName:
res.append(l_fname[0])
return res
else :
lImgs = torch.stack(l_imgs)
lVA = torch.stack(l_VA)
lLD = torch.stack(l_ldmrk)
#print(l_VA)
l_qdrnt = torch.tensor((l_qdrnt))
if self.returnQ :
if self.returnNoisy :
res = [lImgs, lVA, lLD, Mt,l_nc,l_qdrnt,l_nimgs]
else :
res = [lImgs, lVA, lLD, Mt,l_nc,l_qdrnt]
else :
if self.returnNoisy :
res = [lImgs, lVA, lLD, Mt,l_nc,l_nimgs]
else :
res = [lImgs, lVA, lLD, Mt,l_nc]
if self.returnWeight :
l_weights = torch.tensor(l_weights)
res.append(l_weights)
if self.returnVAQ :
l_vaq = torch.tensor(l_vaq)
res.append(l_vaq)
if self.returnFName:
res.append(l_fname)
return res
def __len__(self):
return len(self.l_imgs)
class SEWAFEWReduced(data.Dataset): #return affect on Valence[0], Arousal[1] order
mean_bgr = np.array([91.4953, 103.8827, 131.0912]) # from resnet50_ft.prototxt
def __init__(self, data_list = ["AFEW"],dir_gt = None,onlyFace = True, image_size =224,
transform = None,useIT = False,augment = False, step = 1,split = False,
nSplit = 5, listSplit = [0,1,2,3,4],isVideo = False, seqLength = None, dbType = 0,
returnQuadrant = False, returnNoisy = False, returnWeight = False, isSemaine = False):#dbtype 0 is AFEW, 1 is SEWA
self.dbType = dbType
self.isSemaine = isSemaine
self.seq_length = seqLength
self.isVideo = isVideo
self.transform = transform
self.onlyFace = onlyFace
self.augment = augment
self.imageSize = image_size
self.imageHeight = image_size
self.imageWidth = image_size
self.useIT = useIT
self.curDir = rootDir +"/"#/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/"
self.returnNoisy = returnNoisy
self.returnWeight = returnWeight
if self.returnWeight :
name = 'VA-Train-'+str(listSplit[0])+'.npy'
if self.dbType == 1 :
name='S-'+name
if isSemaine :
name = 'SE-VA-Train-'+str(listSplit[0])+'.npy'
print('weight',name)
weight = np.load(rootDir+"/DST-SE-AF/"+name).astype('float')+1
sum = weight.sum(0)
weight = (weight/sum)
#print('1',weight)
weight = 1/weight
#print('2',weight)
sum = weight.sum(0)
weight = weight/sum
#print('3',weight)
"just tesing for the latencyh if its possible. "
self.weight = weight
self.returnQ = returnQuadrant
if self.augment :
self.flip = RandomHorizontalFlip(1)
self.rot = RandomRotation(45)
self.occ = Occlusion(1)
self.rc = RandomResizedCrop(size = self.imageSize,scale = (0.5,1), ratio = (0.5, 1.5))
if self.returnNoisy :
self.gn = GeneralNoise(1)
self.occ = Occlusion(1)
list_gt = []
list_labels_tE = []
counter_image = 0
annotE_name = 'annot2'
if dir_gt is not None :
annot_name = dir_gt
list_missing = []
for data in data_list :
print(("Opening "+data))
fullDir = self.curDir +data+"/"
listFolder = os.listdir(fullDir)
listFolder.sort()
for tempx in range(len(listFolder)):
f = listFolder[tempx]
fullPath = os.path.join(fullDir,f)
#print('opening fullpath',fullPath)
if os.path.isdir(fullPath): # Check if object is directory
#print((f.name, f.full_path)) # Name is without extension
#c_image,c_ldmark = 0,0
if self.dbType == 1 : #we directly get the VA file in case of sewa
#first get the valence
valFile = fullPath+"/valence/"+f+"_Valence_A_Aligned.csv"
aroFile = fullPath+"/arousal/"+f+"_Arousal_A_Aligned.csv"
list_labels_tE.append([valFile,aroFile])
#print(valFile,aroFile)
for sub_f in file_walker.walk(fullPath):
if sub_f.isDirectory: # Check if object is directory
list_dta = []
#print(sub_f.name)
if(sub_f.name == 'img-128'): #Else it is the image
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_gt.append(sorted(list_dta))
counter_image+=len(list_dta)
c_ldmrk = len(list_dta)
elif (sub_f.name == annotE_name) :
if self.dbType == 0 :
#If that's annot, add to labels_t
for sub_sub_f in sub_f.walk(): #this is the data
if(".npy" not in sub_sub_f.full_path):
list_dta.append(sub_sub_f.full_path)
list_labels_tE.append(sorted(list_dta))
self.length = counter_image
print("Now opening keylabels")
list_labelsEN = []
list_labelsE = []
for ix in range(len(list_labels_tE)) : #lbl,lble in (list_labels_t,list_labels_tE) :
lbl_2 = [] #Per folder
lbl_n2 = [] #Per folder
if self.dbType == 1 : #sewa
#print(list_labels_t[ix][0])
valFile = np.asarray(readCSV(list_labels_tE[ix][0]))
aroFile = np.asarray(readCSV(list_labels_tE[ix][1]))
#lbl_n2.append(list_labels_tE[ix][0])
for it in range(1,len(valFile)+1):
dir,_ = os.path.split(list_labels_tE[ix][0])
newName = str(it).zfill(6)+'.tmp'
lbl_n2.append(os.path.join(dir,newName))
lbl_2 = np.column_stack((valFile,aroFile))
else :
for jx in range(len (list_labels_tE[ix])): #lbl_sub in lbl :
#print(os.path.basename(list_gt[ix][jx]))
#print(os.path.basename(list_labels_t[ix][jx]))
#print(os.path.basename(list_labels_tE[ix][jx]))
if self.dbType == 0 :
lbl_subE = list_labels_tE[ix][jx]
if ('aro' in lbl_subE) :
x = []
#print(lbl_sub)
with open(lbl_subE) as file:
data2 = [re.split(r'\t+',l.strip()) for l in file]
for i in range(len(data2)) :
temp = [ float(j) for j in data2[i][0].split()]
temp.reverse() #to give the valence first. then arousal
x.append(temp)
lbl_2.append(np.array(x).flatten('F'))
lbl_n2.append(lbl_subE)
list_labelsEN.append(lbl_n2)
list_labelsE.append(lbl_2)
t_l_imgs = []
t_l_gtE = []
t_list_gtE_names = []
#print(list_labelsEN)
if not self.isVideo :
#Flatten it to one list
for i in range(0,len(list_gt)): #For each dataset
list_images = []
list_gtE_names = []
indexer = 0
list_ground_truthE = np.zeros([len(list_gt[i]),2])
for j in range(0,len(list_gt[i]),step): #for number of data #n_skip is usefull for video data
list_images.append(list_gt[i][j])
#print(list_labelsEN)
if self.dbType == 0 :
list_gtE_names.append(list_labelsEN[i][j])
else :
#list_gtE_names.append(list_labelsEN[i][0])
list_gtE_names.append(list_labelsEN[i][j])
#print(list_labelsEN[i])
'''if len(list_labels[i][j] < 1):
print(list_labels[i][j])'''
#print(len(list_labels[i][j]))
list_ground_truthE[indexer] = np.array(list_labelsE[i][j]).flatten('F')
indexer += 1
t_l_imgs.append(list_images)
t_l_gtE.append(list_ground_truthE)
t_list_gtE_names.append(list_gtE_names)
else :
if self.seq_length is None :
list_ground_truth = np.zeros([int(counter_image/(self.seq_length*step)),self.seq_length,136])
indexer = 0;
for i in range(0,len(list_gt)): #For each dataset
counter = 0
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
temp2 = np.zeros([self.seq_length,136])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp2[i_temp] = list_labels[i][z]
i_temp+=1
list_images.append(temp)
list_ground_truth[indexer] = temp2
indexer += 1
counter+=self.seq_length*step
#print counter
self.l_imgs = list_images
self.l_gt = list_ground_truth
else :
counter_seq = 0;
for i in range(0,len(list_gt)): #For each dataset
indexer = 0;
list_gtE_names = []
list_ground_truthE = np.zeros([int(len(list_gt[i])/(self.seq_length*step)),self.seq_length,2])#np.zeros([counter_image,2])
counter = 0
list_images = []
for j in range(0,int(len(list_gt[i])/(self.seq_length*step))): #for number of data/batchsize
temp = []
tmpn2 = []
temp3 = np.zeros([self.seq_length,2])
i_temp = 0
for z in range(counter,counter+(self.seq_length*step),step):#1 to seq_size
temp.append(list_gt[i][z])
temp3[i_temp] = list_labelsE[i][z].flatten('F')
if self.dbType == 0 :
#list_gtE_names.append(list_labelsEN[i][j])
tmpn2.append(list_labelsEN[i][j])
else :
#list_gtE_names.append(list_labelsEN[i][0])
tmpn2.append(list_labelsEN[i][0])
i_temp+=1
counter_seq+=1
list_images.append(temp)
list_ground_truthE[indexer] = temp3
list_gtE_names.append(tmpn2)
indexer += 1
counter+=self.seq_length*step
#print counter
t_l_imgs.append(list_images)
t_l_gtE.append(list_ground_truthE)
t_list_gtE_names.append(list_gtE_names)
self.l_imgs = []
self.l_gtE = []
self.list_gtE_names = []
#print('cimage : ',counter_image)
if split :
indexer = 0
self.l_gtE = []
totalData = len(t_l_imgs)
perSplit = int(truediv(totalData, nSplit))
for x in listSplit :
print('split : ',x)
begin = x*perSplit
if x == nSplit-1 :
end = begin + (totalData - begin)
else :
end = begin+perSplit
print(begin,end,totalData)
if not self.isVideo :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])):
#print('append ',t_l_imgs[i][j])
self.l_imgs.append(t_l_imgs[i][j])
self.l_gtE.append(t_l_gtE[i][j])
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
for i in range(begin,end) :
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gtE = []
t_gt_EN = []
tmp = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gtE.append(t_l_gtE[i][j][k])
t_gt_EN.append(t_list_gtE_names[i][j][k])
tmp+=1
#print('append ',t_img)
self.l_imgs.append(t_img)
self.l_gtE.append(t_gtE)
self.list_gtE_names.append(t_gt_EN)
indexer+=1
print(len(self.l_imgs))
self.l_gtE = np.asarray(self.l_gtE)
else :
if not self.isVideo :
self.l_gtE = np.zeros([counter_image,2])
indexer = 0
for i in range(len(t_l_imgs)):
for j in range(len(t_l_imgs[i])):
self.l_imgs.append(t_l_imgs[i][j])
print(i,j,'-',len(t_l_imgs[i]))
self.l_gtE[indexer] = t_l_gtE[i][j]
self.list_gtE_names.append(t_list_gtE_names[i][j])
indexer+=1
else :
self.l_gtE = np.zeros([counter_seq,self.seq_length,2])
indexer = 0
for i in range(len(t_l_imgs)): #dataset
for j in range(len(t_l_imgs[i])): #seq counter
t_img = []
t_gte = np.zeros([self.seq_length,2])
t_gt_n = []
t_gt_en = []
i_t = 0
for k in range(len(t_l_imgs[i][j])): #seq size
t_img.append(t_l_imgs[i][j][k])
t_gte[i_t] = t_l_gtE[i][j][k]
t_gt_en.append(t_list_gtE_names[i][j][k])
i_t+=1
self.l_imgs.append(t_img)
self.l_gtE[indexer] = t_gte
self.list_gtE_names.append(t_gt_en)
indexer+=1
print('limgs : ',len(self.l_imgs))
def __getitem__(self,index):
#Read all data, transform etc.
#In video, the output will be : [batch_size, sequence_size, channel, width, height]
#Im image : [batch_size, channel, width, height]
l_imgs = []; l_ldmrk = []; l_VA = []; l_nc = []; l_qdrnt = []#,torch.FloatTensor(label),newChannel#,x,self.list_gt_names[index]
if self.returnNoisy :
l_nimgs = []
if self.returnWeight :
l_weights = []
if not self.isVideo :
x_l = [self.l_imgs[index]];labelE_l =[self.l_gtE[index].copy()];label_n =[self.list_gtE_names[index]]
else :
x_l = self.l_imgs[index];labelE_l =self.l_gtE[index].copy();label_n =self.list_gtE_names[index]
#print('label n ',label_n)
for x,labelE,ln in zip(x_l,labelE_l,label_n) :
#print(x,labelE,label,ln)
#print('label : ',ln)
tImage = Image.open(x).convert("RGB")
tImageB = None
newChannel = None
if self.augment :
if self.returnNoisy :
sel = np.random.randint(0,3) #Skip occlusion as noise
else :
sel = np.random.randint(0,4)
#0 : neutral, 1 : horizontal flip, 2:random rotation, 3:occlusion
if sel == 0 :
pass
elif sel == 1 :
#flip = RandomHorizontalFlip_WL(1)
#tImage,label,newChannel = flip(tImage,label,newChannel)
#flip = RandomHorizontalFlip(1)
tImage = self.flip(tImage)
elif sel == 2 :
#rot = RandomRotation_WL(45)
#tImage,label,newChannel = rot(tImage,label,newChannel)
#rot = RandomRotation(45)
tImage = self.rot(tImage)
elif sel == 3 :
#occ = Occlusion_WL(1)
#tImage,label,newChannel = occ(tImage,label,newChannel)
#occ = Occlusion(1)
tImage = self.occ(tImage)
#random crop
if (np.random.randint(1,3)%2==0) :
#rc = RandomResizedCrop_WL(size = self.imageSize,scale = (0.5,1), ratio = (0.5, 1.5))
#tImage,label,newChannel= rc(tImage,label,newChannel)
#rc = RandomResizedCrop(size = self.imageSize,scale = (0.5,1), ratio = (0.5, 1.5))
tImage= self.rc(tImage)
if self.returnNoisy :
nImage = tImage.copy()
#additional blurring
if (np.random.randint(1,3)%2==0):
#sel_n = np.random.randint(1,6)
sel_n = np.random.randint(1,7)
#sel_n = 4
#gn = GeneralNoise_WL(1)
#tImage,label= gn(tImage,label,sel_n,np.random.randint(0,3))
if sel_n > 5 :
#occ = Occlusion(1)
nImage = self.occ(nImage)
else :
#rc = GeneralNoise(1)
#tImage = rc(tImage,sel_n,np.random.randint(0,3))
nImage = self.gn(nImage,sel_n,np.random.randint(0,3))
label = torch.zeros(1)
Mt = torch.zeros(1)
if self.useIT :
tImage = self.transformInternal(tImage)
if self.returnNoisy :
nImage = self.transformInternal(nImage)
else :
tImage = self.transform(tImage)
if self.returnNoisy :
nImage = self.transform(nImage)
l_imgs.append(tImage); l_VA.append(torch.FloatTensor(labelE)); l_ldmrk.append(torch.FloatTensor(label))#,x,self.list_gt_names[index]
if self.returnNoisy :
l_nimgs.append(nImage)
if self.returnQ :
if self.dbType == 1 :
min = 0; max = 1;
elif self.isSemaine == 1:
min = -1; max = 1;
else :
min = -10; max = 10;
l_qdrnt.append(toQuadrant(labelE, min, max, toOneHot=False))
if self.returnWeight :
v = labelE[0]
a = labelE[0]
if self.dbType == 1 :#sewa
v = v*10+1
a = a*10+1
elif self.isSemaine == 1 :
v = v*10+10
a = a*10+10
else :
v = v+10
a = a+10
v,a = int(v),int(a)
'''print('the v :{} a : {} db : {}'.format(v,a,self.dbType))
print(self.weight)
print(self.weight.shape)'''
l_weights.append([self.weight[v,0],self.weight[a,1]])
l_nc.append(ln)
#print('lnc : ',l_nc)
if not self.isVideo :
if self.returnQ :
if self.returnNoisy :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_qdrnt[0],l_nimgs[0]]
else :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_qdrnt[0]]
else :
if self.returnNoisy :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0],l_nimgs[0]]
else :
res = [l_imgs[0], l_VA[0], l_ldmrk[0], Mt,l_nc[0]]
if self.returnWeight :
res.append(torch.tensor(l_weights[0]))
return res
else :
#lImgs = torch.Tensor(len(l_imgs),3,self.imageHeight,self.imageWidth)
#lVA = torch.Tensor(len(l_VA),2)
#lLD = torch.Tensor(len(l_ldmrk),136)
lImgs = torch.stack(l_imgs)
lVA = torch.stack(l_VA)
lLD = torch.stack(l_ldmrk)
#print(l_VA)
l_qdrnt = torch.tensor((l_qdrnt))
#print(lImgs.shape, l_imgs[0].shape, l_VA[0].shape,len(lImgs))
#torch.cat(l_imgs, out=lImgs)
#torch.cat(l_VA, out=lVA)
#torch.cat(l_ldmrk, out=lLD)
if self.returnQ :
if self.returnNoisy :
res = [lImgs, lVA, lLD, Mt,l_nc,l_qdrnt,l_nimgs]
else :
res = [lImgs, lVA, lLD, Mt,l_nc,l_qdrnt]
else :
if self.returnNoisy :
res = [lImgs, lVA, lLD, Mt,l_nc,l_nimgs]
else :
res = [lImgs, lVA, lLD, Mt,l_nc]
if self.returnWeight :
l_weights = torch.tensor(l_weights)
res.append(l_weights)
return res
def transformInternal(self, img):
transforms.Resize(224)(img)
img = np.array(img, dtype=np.uint8)
#img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float32)
img -= self.mean_bgr
img = img.transpose(2, 0, 1) # C x H x W
img = torch.from_numpy(img).float()
return img
def untransformInternal(self, img, lbl):
img = img.numpy()
img = img.transpose(1, 2, 0)
img += self.mean_bgr
img = img.astype(np.uint8)
img = img[:, :, ::-1]
return img, lbl
def param2theta(self,param, w, h):
param = np.linalg.inv(param)
theta = np.zeros([2,3])
theta[0,0] = param[0,0]
theta[0,1] = param[0,1]*h/w
theta[0,2] = param[0,2]*2/w + theta[0,0] + theta[0,1] - 1
theta[1,0] = param[1,0]*w/h
theta[1,1] = param[1,1]
theta[1,2] = param[1,2]*2/h + theta[1,0] + theta[1,1] - 1
return theta
def procrustes(self, X, Y, scaling=True, reflection='best'):
n,m = X.shape
ny,my = Y.shape
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 /= normX
Y0 /= normY
if my < m:
Y0 = np.concatenate((Y0, np.zeros(n, m-my)),0)
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
if reflection is not 'best':
# does the current solution use a reflection?
have_reflection = np.linalg.det(T) < 0
# if that's not what was specified, force another reflection
if reflection != have_reflection:
V[:,-1] *= -1
s[-1] *= -1
T = np.dot(V, U.T)
traceTA = s.sum()
if scaling:
# optimum scaling of Y
b = traceTA * normX / normY
# standarised distance between X and b*Y*T + c
d = 1 - traceTA**2
# transformed coords
Z = normX*traceTA*np.dot(Y0, T) + muX
else:
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
# transformation matrix
if my < m:
T = T[:my,:]
c = muX - b*np.dot(muY, T)
#transformation values
#tform = {'rotation':T, 'scale':b, 'translation':c}
tform = np.append(b*T,[c],axis = 0).T
tform = np.append(tform,[[0,0,1]],axis = 0)
return d, Z, tform
def __len__(self):
return len(self.l_imgs)
def convertName(input):
number = int(re.search(r'\d+', input).group())
if 'train' in input :
return number
elif 'dev' in input :
return 10+number
elif 'test' in input :
return 20+number
def cropImage():
batch_size = 20
image_size = 224
isVideo = False
doConversion = False
lndmrkNumber =68
#lndmarkNumber = 49
isSewa = False
desireS = 224
smll = desireS!=224
ratio = truediv(desireS,224)
if ratio :
displaySize = str(128)
else :
displaySize = str(image_size)
err_denoised = curDir+"de-label-"+'semaine'+".txt"
checkDirMake(os.path.dirname(err_denoised))
print('file of denoising : ',err_denoised)
fileOfDen = open(err_denoised,'w')
fileOfDen.close()
#theDataSet = "AFEW-VA-Small"
#theDataSet = "AFEW-VA-Fixed"
#theDataSet = "SEWA-small"
#theDataSet = "SEWA"
theDataSet = "Sem-Short"
oriDir = '/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/'+theDataSet
#oriDir = '/media/deckyal/INT-2TB/comparisons/'+theDataSet + "/" + str(theNoiseType)+"-"+str(theNoiseParam)+'/'
targetDir = '/home/deckyal/eclipse-workspace/DeepModel/src/MMTVA/data/'+theDataSet+'-ext'
checkDirMake(targetDir)
data_transform = transforms.Compose([
transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean = (.5,.5,.5), std = (.5,.5,.5))
])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ID = ImageDataset(data_list = [theDataSet],onlyFace=True,transform=data_transform,image_size=image_size
,injectedLink = oriDir,isVideo = isVideo,giveCroppedFace=True,
annotName='annot',lndmarkNumber=lndmrkNumber,isSewa = isSewa)
#annotName = annotOri
dataloader = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = False)
unorm = UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
notNeutral = 0
list_nn = []
list_name_nn = []
print('inside',len(dataloader))
GD = GeneralDAEX(nClass = 3)
dircl1 = '/home/deckyal/eclipse-workspace/FaceTracking/src/toBeUsedT-5Aug/'+'Mix3-combineAE.pt'
dircl2 = '/home/deckyal/eclipse-workspace/FaceTracking/src/toBeUsedT-5Aug/'+'Mix3-combineCL.pt'
outDir = "mix3-"
model_lg = LogisticRegression(512, 3)
netAEC = DAEE()
netAEC.load_state_dict(torch.load(dircl1))
netAEC = netAEC.cuda()
netAEC.eval()
#theDataSetOut = theDataVideo+outDir
model_lg.load_state_dict(torch.load(dircl2))
model_lg = model_lg.cuda()
model_lg.eval()
#print(netAEC.fce.weight)
print(model_lg.linear2.weight)
#exit(0)
isVideo = False
#exit(0)
data_transform = transforms.Compose([
transforms.Resize((image_size,image_size)),
#transforms.CenterCrop(image_size),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transforms.Normalize(mean = (.5,.5,.5), std = (.5,.5,.5))
])
# Plot some training images
for inside in dataloader : # = next(iter(dataloader))
print(len(inside))
real_batch,gt,cr,x,gtcr = inside[0],inside[1],inside[2],inside[3],inside[4]
if isSewa :
gtcr2 = inside[5]
else :
gtcr2 = gtcr
print(real_batch.size())
for imgs,gts,imgcrs,fileName,gtcrs,gts2 in zip(real_batch.cuda(),gt.numpy(),cr.cuda(),x,gtcr.numpy(),gtcr2.numpy()):
print(fileName)
#first save the original image
#now getting the name and file path
filePath = fileName.split(os.sep)
annotPath = copy.copy(filePath)
if isSewa :
annotPathSewa = copy.copy(filePath)
filePathCleaned = copy.copy(filePath)
filePath[-2]+='-'+displaySize
filePathCleaned[-2]+='-'+displaySize+'-C'
if isSewa :
annotPath[-2]='annotOri-'+displaySize
annotPathSewa[-2]='annot-'+displaySize
else :
annotPath[-2]='annot-'+displaySize
newFilePath = '/'.join(filePath[:-1])
newAnnotPath = '/'.join(annotPath[:-1])
if isSewa :
newAnnotPathSewa = '/'.join(annotPathSewa[:-1])
newClFilePath = '/'.join(filePathCleaned[:-1])
#print(filePath,annotPath)
print(newFilePath, newAnnotPath)
#ifolder = filePath.index(theDataVideo)
image_name = filePath[-1]
annot_name = os.path.splitext(image_name)[0]+'.pts'
'''if isVideo :
middle = filePath[ifolder+2:-2]
print(middle)
middle = '/'.join(middle)
finalTargetPathI = targetDir+middle+'/img/'
finalTargetPathA = targetDir+middle+'/annot/'
else :
finalTargetPathI = targetDir+'img/'
finalTargetPathA = targetDir+'annot/' '''
checkDirMake(newFilePath)
checkDirMake(newAnnotPath)
if isSewa :
checkDirMake(newAnnotPathSewa)
checkDirMake(newClFilePath)
finalTargetImage = newFilePath+'/'+image_name
finalTargetImageCl = newClFilePath+'/'+image_name
finalTargetAnnot = newAnnotPath+'/'+annot_name
if isSewa :
finalTargetAnnotSewa = newAnnotPathSewa+'/'+annot_name
theOri = unorm(imgcrs.detach().cpu()).numpy()*255
theOri = cv2.cvtColor(theOri.transpose((1,2,0)).astype(np.uint8 ),cv2.COLOR_RGB2BGR)
if smll :
theOri = cv2.resize(theOri,(128,128))
cv2.imwrite(finalTargetImage,theOri)
if smll :
gtcrs[:lndmrkNumber] *= ratio
gtcrs[lndmrkNumber:] *= ratio
if isSewa :
gts2[:68] *= ratio
gts2[68:] *= ratio
write_kp_file(finalTargetAnnot,gtcrs,length = lndmrkNumber)
if isSewa :
write_kp_file(finalTargetAnnotSewa,gts2,length = 68)
#print(gtcrs)
#Now see the result back
r_image = cv2.imread(finalTargetImage)
print(finalTargetAnnot)
predicted = utils.read_kp_file(finalTargetAnnot, True)
for z22 in range(lndmrkNumber) :
#print(z22)
cv2.circle(r_image,(int(predicted[z22]),int(predicted[z22+lndmrkNumber])),2,(0,255,0))
if isSewa:
predicted2 = utils.read_kp_file(finalTargetAnnotSewa, True)
for z22 in range(68) :
cv2.circle(r_image,(int(predicted2[z22]),int(predicted2[z22+68])),2,(255,255,255))
cv2.imshow('test',r_image)
cv2.waitKey(1)
#exit(0)
#second get the cleaned one
#if cl_type == 1 :
recon_batch,xe = netAEC(imgs.unsqueeze(0))
#else :
# xe = netAEC(imgs.unsqueeze(0))
labels = model_lg(xe)
x, y = torch.max(labels, 1)
ll = y.cpu()[0]
print('res',ll)
#res = GD.forward(imgs.unsqueeze(0), y[0])[0].detach().cpu()
res = GD.forward(imgcrs.unsqueeze(0), y[0])[0].detach().cpu()
theRest = unorm(res).numpy()*255
print(theRest.shape)
theRest = cv2.cvtColor(theRest.transpose((1,2,0)).astype(np.uint8 ),cv2.COLOR_RGB2BGR)
if smll :
theRest = cv2.resize(theRest,(128,128))
theOri = unorm(imgs.detach().cpu()).numpy()*255
print(theOri.shape)
theOri = cv2.cvtColor(theOri.transpose((1,2,0)).astype(np.uint8 ),cv2.COLOR_RGB2BGR)
cv2.imshow('theori',theRest)
cv2.waitKey(1)
cv2.imwrite(finalTargetImageCl,theRest)
#third save the cleaned one
#exit(0)
'''
#print(theRest.shape)
theImage = theRest
#now getting the name and file path
filePath = fileName.split(os.sep)
ifolder = filePath.index(theDataVideo)
image_name = filePath[-1]
annot_name = os.path.splitext(image_name)[0]+'.pts'
if isVideo :
middle = filePath[ifolder+2:-2]
print(middle)
middle = '/'.join(middle)
finalTargetPathI = targetDir+middle+'/img/'
finalTargetPathA = targetDir+middle+'/annot/'
else :
finalTargetPathI = targetDir+'img/'
finalTargetPathA = targetDir+'annot/'
checkDirMake(finalTargetPathI)
checkDirMake(finalTargetPathA)
finalTargetImage = finalTargetPathI+image_name
finalTargetAnnot = finalTargetPathA+annot_name
print(finalTargetImage,finalTargetAnnot)'''
if ll != 0 or True:
if ll != 0:
notNeutral+=1
list_nn.append(ll)
list_name_nn.append(finalTargetImage)
fileOfDen = open(err_denoised,'a')
fileOfDen.write(str(int(ll))+','+finalTargetImage+"\n")
fileOfDen.close()
print('status : ',ll)
'''
cv2.imshow('ori',theOri)
cv2.waitKey(0)
cv2.imshow('after',theRest)
cv2.waitKey(0)'''
print(y,labels)
print("not neutral count : ",notNeutral)
def getDistributionAC():
import matplotlib.pyplot as plt
targetDir = '/home/deckyal/eclipse-workspace/FaceTracking/FaceTracking-NR/StarGAN_Collections/stargan-master/distribution/'
tname = "AC"
image_size = 112
batch_size = 20000
transform = transforms.Compose([
#transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean = (.5,.5,.5), std = (.5,.5,.5))
])
if False :
a = np.array(range(20))
v = np.array(range(20))
tx = np.array(range(20))
for i in range(5) :
z = np.load(targetDir+'VA-Train-'+str(i)+'.npy')
la = z[:,0]
lv = z[:,1]
#print(la,la.shape)
a+=la
v+=lv
z = np.load(targetDir+'VA-Test-'+str(i)+'.npy')
la = z[:,0]
lv = z[:,1]
#print(la,la.shape)
a+=la
v+=lv
fig = plt.figure()
ax = plt.subplot(2, 2, 1)
ax.bar(tx,a)
ax.set_title('a')
ax = plt.subplot(2, 2, 2)
ax.bar(tx,v)
ax.set_title('v')
plt.show()
#print(a)
#print(v)
exit(0)
ID = AFFChallenge(data_list = ["AffectChallenge"],mode = 'Train',onlyFace = True, image_size =112,
transform = transform,useIT = False,augment = False, step = 1,isVideo = False, seqLength = None, dbType = 0,
returnQuadrant = False, returnNoisy = False, returnWeight = False)
VD = AFFChallenge(data_list = ["AffectChallenge"],mode = 'Val',onlyFace = True, image_size =112,
transform = transform,useIT = False,augment = False, step = 1,isVideo = False, seqLength = None, dbType = 0,
returnQuadrant = False, returnNoisy = False, returnWeight = False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataloader = torch.utils.data.DataLoader(dataset = data, batch_size = batch_size, shuffle = True)
dataloaderTrn = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = False)
dataloaderVal = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = False)
listV = np.array(range(0,21))
listA = np.array(range(0,21))
listVx = np.array(range(0,21))
listAx = np.array(range(0,21))
listVt = np.array(range(0,21))
listAt = np.array(range(0,21))
#for real_batch,vas,gt,M,_ in (dataloaderTrn) :
x = 0
for lImgs,vas,gt,M,ex in (dataloaderTrn) :
for va in vas :
print(x,len(dataloaderTrn)*batch_size)
#print(ex,gt,M)
#print(va,vas)
lva = (va.cpu().numpy()) * 10+10
name = 'AC-Train'
print(va)
print(lva)
listV[int(round(lva[0]))]+=1
listA[int(round(lva[1]))]+=1
x+=1
x = 0
print(listV,listA)
np.save(targetDir+name+'.npy',np.column_stack((listV,listA)))
for real_batch,vas,gt,M,ex in (dataloaderVal) :
for va in vas :
print(x,len(dataloaderVal)*batch_size)
lva = (va.cpu().numpy()) * 10+10
name = 'AC-Test-'
listVt[int(round(lva[0]))]+=1
listAt[int(round(lva[1]))]+=1
x+=1
print(listVt,listAt)
np.save(targetDir+name+'.npy',np.column_stack((listVt,listAt)))
'''fig, ax = plt.subplots(nrows=1, ncols=2)
for row in ax:
for col in row:
col.plot(x, y)'''
fig = plt.figure()
ax = plt.subplot(2, 2, 1)
ax.bar(listVx,listV)
ax.set_title('v train')
ax = plt.subplot(2, 2, 2)
ax.bar(listAx,listA)
ax.set_title('A train')
ax = plt.subplot(2, 2, 3)
ax.bar(listVx,listVt)
ax.set_title('v test')
ax = plt.subplot(2, 2, 4)
ax.bar(listAx,listAt)
ax.set_title('A test')
#plt.show()
plt.savefig(tname+".png")
exit(0)
def getDistribution():
import matplotlib.pyplot as plt
targetDir = '/home/deckyal/eclipse-workspace/FaceTracking/FaceTracking-NR/StarGAN_Collections/stargan-master/distribution/'
isAFEW = True
isSemaine = True
name = "AFEW"
if not isAFEW :
name = "SEWA"
image_size = 224
batch_size = 1000#5000
transform = transforms.Compose([
#transforms.Resize((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean = (.5,.5,.5), std = (.5,.5,.5))
])
if False :
a = np.array(range(20))
v = np.array(range(20))
tx = np.array(range(20))
for i in range(5) :
z = np.load(targetDir+'VA-Train-'+str(i)+'.npy')
la = z[:,0]
lv = z[:,1]
#print(la,la.shape)
a+=la
v+=lv
z = np.load(targetDir+'VA-Test-'+str(i)+'.npy')
la = z[:,0]
lv = z[:,1]
#print(la,la.shape)
a+=la
v+=lv
fig = plt.figure()
ax = plt.subplot(2, 2, 1)
ax.bar(tx,a)
ax.set_title('a')
ax = plt.subplot(2, 2, 2)
ax.bar(tx,v)
ax.set_title('v')
plt.show()
#print(a)
#print(v)
exit(0)
for split in range(5) :
minA,minV = 9999,9999
maxA,maxV = -9999,-9999
#split = 1
multi_gpu = False
testSplit = split
print("Test split " , testSplit)
nSplit = 5
listSplit = []
for i in range(nSplit):
if i!=testSplit :
listSplit.append(i)
print(listSplit)
#sem short
#sem small
if not isAFEW :
ID = SEWAFEWReduced(data_list = ["SEWA-small"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=listSplit,
isVideo=False, seqLength = 6,dbType = 1)
VD = SEWAFEWReduced(data_list = ["SEWA-small"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=[testSplit],
isVideo=False, seqLength = 6,dbType = 1)
else :
''' ID = SEWAFEWReduced(data_list = ["AFEW-VA-Fixed"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=listSplit,
isVideo=False, seqLength = 6,dbType = 0)
VD = SEWAFEWReduced(data_list = ["AFEW-VA-Fixed"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=[testSplit],
isVideo=False, seqLength = 6,dbType = 0)
'''
if isSemaine :
ID = SEWAFEWReduced(data_list = ["Sem-Short"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=listSplit,
isVideo=False, seqLength = 6,dbType = 0)
VD = SEWAFEWReduced(data_list = ["Sem-Short"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=[testSplit],
isVideo=False, seqLength = 6,dbType = 0)
else :
ID = SEWAFEWReduced(data_list = ["AFEW-VA-Fixed"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=listSplit,
isVideo=False, seqLength = 6,dbType = 0)
VD = SEWAFEWReduced(data_list = ["AFEW-VA-Fixed"], dir_gt = None, onlyFace = True, image_size = image_size,
transform = transform, useIT = True, augment = False, step = 1,split=True, nSplit = 5,listSplit=[testSplit],
isVideo=False, seqLength = 6,dbType = 0)
dataloaderTrn = torch.utils.data.DataLoader(dataset = ID, batch_size = batch_size, shuffle = True)
dataloaderVal = torch.utils.data.DataLoader(dataset = VD, batch_size = batch_size, shuffle = True)
if isSemaine : #-1 to 1
listV = np.array(range(0,20))
listA = np.array(range(0,20))
listVx = np.array(range(0,20))
listAx = np.array(range(0,20))
listVt = np.array(range(0,20))
listAt = np.array(range(0,20))
listVall = np.array(range(0,20))
listAall = np.array(range(0,20))
elif isAFEW : #-10 to 10
listV = np.array(range(0,20))
listA = np.array(range(0,20))
listVx = np.array(range(0,20))
listAx = np.array(range(0,20))
listVt = np.array(range(0,20))
listAt = np.array(range(0,20))
else : #0 to 1
listV = np.array(range(0,12))
listA = np.array(range(0,12))
listVx = np.array(range(0,12))
listAx = np.array(range(0,12))
listVt = np.array(range(0,12))
listAt = np.array(range(0,12))
x = 0
temp = []
for real_batch,vas,gt,M,_ in (dataloaderTrn) :
for va in vas :
print(x)
#print(va,vas)
print(va)
t = va.cpu().numpy()
if isSemaine :
lva = (va.cpu().numpy()) * 10+10
name = 'SE-VA-Train-'
elif not isAFEW :
lva = (va.cpu().numpy()) * 10+1
name = 'S-VA-Train-'
#name = 'SE-VA-Train-'
else :
#print(va.cpu().numpy())
lva = (va.cpu().numpy())+10
name = 'VA-Train-'
listV[int(round(lva[0]))]+=1
listA[int(round(lva[1]))]+=1
listVall[int(round(lva[1]))]+=1
listAall[int(round(lva[1]))]+=1
print(lva)
temp.append(va[0])
x+=1
if minV > t[0]:
minV = t[0]
if maxV < t[0]:
maxV = t[0]
if minA > t[1]:
minA = t[1]
if maxA < t[1]:
maxA = t[1]
'''plt.plot(temp, linestyle=':',marker='s')
plt.show()'''
x = 0
print(listV,listA)
np.save(targetDir+name+str(testSplit)+'.npy',np.column_stack((listV,listA)))
for real_batch,vas,gt,M,_ in (dataloaderVal) :
for va in vas :
print(x)
t = va.cpu().numpy()
if isSemaine :
lva = (va.cpu().numpy()) * 10+10
#name = 'S-VA-Test-'
name = 'SE-VA-Test-'
elif not isAFEW : #sewa
lva = (va.cpu().numpy()) * 10+1
name = 'S-VA-Test-'
else :
lva = (va.cpu().numpy())+10
name = 'VA-Test-'
listVt[int(round(lva[0]))]+=1
listAt[int(round(lva[1]))]+=1
x+=1
if minV > t[0]:
minV = t[0]
if maxV < t[0]:
maxV = t[0]
if minA > t[1]:
minA = t[1]
if maxA < t[1]:
maxA = t[1]
print(listVt,listAt)
np.save(targetDir+name+str(testSplit)+'.npy',np.column_stack((listVt,listAt)))
print('minmax',minA,minV,maxA,maxV)
'''fig, ax = plt.subplots(nrows=1, ncols=2)
for row in ax:
for col in row:
col.plot(x, y)'''
fig = plt.figure()
ax = plt.subplot(2, 2, 1)
ax.bar(listVx,listV)
ax.set_title('v train')
ax = plt.subplot(2, 2, 2)
ax.bar(listAx,listA)
ax.set_title('A train')
ax = plt.subplot(2, 2, 3)
ax.bar(listVx,listVt)
ax.set_title('v test')
ax = plt.subplot(2, 2, 4)
ax.bar(listAx,listAt)
ax.set_title('A test')
#plt.show()
plt.savefig(name+'-'+str(split)+".png")
exit(0)
exit(0)
def checkQuadrant() :
#Val, arou
x = [-10,-10]
y = [-10,10]
z = [10,-10]
a = [10,10]
def toQuadrant(inputData = None, min = -10, max = 10, toOneHot = False):
threshold = truediv(min,max)
vLow = False
aLow = False
q = 0
if inputData[0] < threshold :
vLow = True
if inputData[1] < threshold :
aLow = True
if vLow and aLow :
q = 2
elif vLow and not aLow :
q = 1
elif not vLow and not aLow :
q = 0
else :
q = 3
if toOneHot :
rest = np.zeros(4)
rest[q]+=1
return rest
else :
return q
print(toQuadrant(inputData = x,toOneHot = True))
print(toQuadrant(inputData = y,toOneHot = True))
print(toQuadrant(inputData = z,toOneHot = True))
print(toQuadrant(inputData = a,toOneHot = True))
| 111,306
| 37.381724
| 243
|
py
|
PROTES
|
PROTES-main/setup.py
|
import os
import re
from setuptools import setup
def find_packages(package, basepath):
packages = [package]
for name in os.listdir(basepath):
path = os.path.join(basepath, name)
if not os.path.isdir(path):
continue
packages.extend(find_packages('%s.%s'%(package, name), path))
return packages
here = os.path.abspath(os.path.dirname(__file__))
desc = 'Method PROTES (PRobability Optimizer with TEnsor Sampling) for derivative-free optimization of the multidimensional arrays and discretized multivariate functions based on the tensor train (TT) format'
with open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
desc_long = f.read()
with open(os.path.join(here, 'protes/__init__.py'), encoding='utf-8') as f:
text = f.read()
version = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", text, re.M)
version = version.group(1)
with open(os.path.join(here, 'requirements.txt'), encoding='utf-8') as f:
requirements = f.read().split('\n')
requirements = [r for r in requirements if len(r) >= 3]
setup_args = dict(
name='protes',
version=version,
description=desc,
long_description=desc_long,
long_description_content_type='text/markdown',
author='Andrei Chertkov',
author_email='andre.chertkov@gmail.com',
url='https://github.com/anabatsh/PROTES',
classifiers=[
'Development Status :: 4 - Beta', # 3 - Alpha, 5 - Production/Stable
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='Derivative-free optimization multidimensional optimization low-rank representation tensor train format',
packages=find_packages('protes', './protes/'),
python_requires='>=3.8',
project_urls={
'Source': 'https://github.com/anabatsh/PROTES',
},
)
if __name__ == '__main__':
setup(
**setup_args,
install_requires=requirements,
include_package_data=True)
| 2,467
| 34.257143
| 208
|
py
|
PROTES
|
PROTES-main/protes/protes.py
|
import jax
import jax.numpy as jnp
import optax
from time import perf_counter as tpc
def protes(f, d, n, m, k=100, k_top=10, k_gd=1, lr=5.E-2, r=5, seed=0,
is_max=False, log=False, log_ind=False, info={}, P=None,
with_info_i_opt_list=False, with_info_full=False):
time = tpc()
info.update({'d': d, 'n': n, 'm_max': m, 'm': 0, 'k': k, 'k_top': k_top,
'k_gd': k_gd, 'lr': lr, 'r': r, 'seed': seed, 'is_max': is_max,
'is_rand': P is None, 't': 0, 'i_opt': None, 'y_opt': None,
'm_opt_list': [], 'i_opt_list': [], 'y_opt_list': []})
if with_info_full:
info.update({
'P_list': [], 'I_list': [], 'y_list': []})
rng = jax.random.PRNGKey(seed)
if P is None:
rng, key = jax.random.split(rng)
P = _generate_initial(d, n, r, key)
elif len(P[1].shape) != 4:
raise ValueError('Initial P tensor should have special format')
optim = optax.adam(lr)
state = optim.init(P)
interface_matrices = jax.jit(_interface_matrices)
sample = jax.jit(jax.vmap(_sample, (None, None, None, None, 0)))
likelihood = jax.jit(jax.vmap(_likelihood, (None, None, None, None, 0)))
@jax.jit
def loss(P_cur, I_cur):
Pl, Pm, Pr = P_cur
Zm = interface_matrices(Pm, Pr)
l = likelihood(Pl, Pm, Pr, Zm, I_cur)
return jnp.mean(-l)
loss_grad = jax.grad(loss)
@jax.jit
def optimize(state, P_cur, I_cur):
grads = loss_grad(P_cur, I_cur)
updates, state = optim.update(grads, state)
P_cur = jax.tree_util.tree_map(lambda u, p: p + u, updates, P_cur)
return state, P_cur
while True:
Pl, Pm, Pr = P
Zm = interface_matrices(Pm, Pr)
rng, key = jax.random.split(rng)
I = sample(Pl, Pm, Pr, Zm, jax.random.split(key, k))
y = f(I)
y = jnp.array(y)
info['m'] += y.shape[0]
is_new = _check(P, I, y, info, with_info_i_opt_list, with_info_full)
if info['m'] >= m:
info['t'] = tpc() - time
break
ind = jnp.argsort(y, kind='stable')
ind = (ind[::-1] if is_max else ind)[:k_top]
for _ in range(k_gd):
state, P = optimize(state, P, I[ind, :])
info['t'] = tpc() - time
_log(info, log, log_ind, is_new)
_log(info, log, log_ind, is_new, is_end=True)
return info['i_opt'], info['y_opt']
def _check(P, I, y, info, with_info_i_opt_list, with_info_full):
"""Check the current batch of function values and save the improvement."""
ind_opt = jnp.argmax(y) if info['is_max'] else jnp.argmin(y)
i_opt_curr = I[ind_opt, :]
y_opt_curr = y[ind_opt]
is_new = info['y_opt'] is None
is_new = is_new or info['is_max'] and info['y_opt'] < y_opt_curr
is_new = is_new or not info['is_max'] and info['y_opt'] > y_opt_curr
if is_new:
info['i_opt'] = i_opt_curr
info['y_opt'] = y_opt_curr
if is_new or with_info_full:
info['m_opt_list'].append(info['m'])
info['y_opt_list'].append(info['y_opt'])
if with_info_i_opt_list or with_info_full:
info['i_opt_list'].append(info['i_opt'].copy())
if with_info_full:
info['P_list'].append([G.copy() for G in P])
info['I_list'].append(I.copy())
info['y_list'].append(y.copy())
return is_new
def _generate_initial(d, n, r, key):
"""Build initial random TT-tensor for probability."""
keyl, keym, keyr = jax.random.split(key, 3)
Yl = jax.random.uniform(keyl, (1, n, r))
Ym = jax.random.uniform(keym, (d-2, r, n, r))
Yr = jax.random.uniform(keyr, (r, n, 1))
return [Yl, Ym, Yr]
def _interface_matrices(Ym, Yr):
"""Compute the "interface matrices" for the TT-tensor."""
def body(Z, Y_cur):
Z = jnp.sum(Y_cur, axis=1) @ Z
Z /= jnp.linalg.norm(Z)
return Z, Z
Z, Zr = body(jnp.ones(1), Yr)
_, Zm = jax.lax.scan(body, Z, Ym, reverse=True)
return jnp.vstack((Zm[1:], Zr))
def _likelihood(Yl, Ym, Yr, Zm, i):
"""Compute the likelihood in a multi-index i for TT-tensor."""
def body(Q, data):
I_cur, Y_cur, Z_cur = data
G = jnp.einsum('r,riq,q->i', Q, Y_cur, Z_cur)
G = jnp.abs(G)
G /= jnp.sum(G)
Q = jnp.einsum('r,rq->q', Q, Y_cur[:, I_cur, :])
Q /= jnp.linalg.norm(Q)
return Q, G[I_cur]
Q, yl = body(jnp.ones(1), (i[0], Yl, Yl[0, i[0], :]))
Q, ym = jax.lax.scan(body, Q, (i[1:-1], Ym, Zm))
Q, yr = body(Q, (i[-1], Yr, jnp.ones(1)))
y = jnp.hstack((jnp.array(yl), ym, jnp.array(yr)))
return jnp.sum(jnp.log(jnp.array(y)))
def _log(info, log=False, log_ind=False, is_new=False, is_end=False):
"""Print current optimization result to output."""
if not log or (not is_new and not is_end):
return
text = f'protes > '
text += f'm {info["m"]:-7.1e} | '
text += f't {info["t"]:-9.3e} | '
text += f'y {info["y_opt"]:-11.4e}'
if log_ind:
text += f' | i {" ".join([str(i) for i in info["i_opt"]])}'
if is_end:
text += ' <<< DONE'
print(text)
def _sample(Yl, Ym, Yr, Zm, key):
"""Generate sample according to given probability TT-tensor."""
def body(Q, data):
key_cur, Y_cur, Z_cur = data
G = jnp.einsum('r,riq,q->i', Q, Y_cur, Z_cur)
G = jnp.abs(G)
G /= jnp.sum(G)
i = jax.random.choice(key_cur, jnp.arange(Y_cur.shape[1]), p=G)
Q = jnp.einsum('r,rq->q', Q, Y_cur[:, i, :])
Q /= jnp.linalg.norm(Q)
return Q, i
keys = jax.random.split(key, len(Ym) + 2)
Q, il = body(jnp.ones(1), (keys[0], Yl, Zm[0]))
Q, im = jax.lax.scan(body, Q, (keys[1:-1], Ym, Zm))
Q, ir = body(Q, (keys[-1], Yr, jnp.ones(1)))
il = jnp.array(il, dtype=jnp.int32)
ir = jnp.array(ir, dtype=jnp.int32)
return jnp.hstack((il, im, ir))
| 5,895
| 28.333333
| 78
|
py
|
PROTES
|
PROTES-main/protes/protes_general.py
|
import jax
import jax.numpy as jnp
import optax
from time import perf_counter as tpc
def protes_general(f, n, m, k=100, k_top=10, k_gd=1, lr=5.E-2, r=5, seed=0,
is_max=False, log=False, log_ind=False, info={}, P=None,
with_info_i_opt_list=False, with_info_full=False):
time = tpc()
info.update({'n': n, 'm_max': m, 'm': 0, 'k': k, 'k_top': k_top,
'k_gd': k_gd, 'lr': lr, 'r': r, 'seed': seed, 'is_max': is_max,
'is_rand': P is None, 't': 0, 'i_opt': None, 'y_opt': None,
'm_opt_list': [], 'i_opt_list': [], 'y_opt_list': []})
if with_info_full:
info.update({
'P_list': [], 'I_list': [], 'y_list': []})
rng = jax.random.PRNGKey(seed)
if P is None:
rng, key = jax.random.split(rng)
P = _generate_initial(n, r, key)
optim = optax.adam(lr)
state = optim.init(P)
sample = jax.jit(jax.vmap(_sample, (None, 0)))
likelihood = jax.jit(jax.vmap(_likelihood, (None, 0)))
@jax.jit
def loss(P_cur, I_cur):
return jnp.mean(-likelihood(P_cur, I_cur))
loss_grad = jax.grad(loss)
@jax.jit
def optimize(state, P_cur, I_cur):
grads = loss_grad(P_cur, I_cur)
updates, state = optim.update(grads, state)
P_cur = jax.tree_util.tree_map(lambda u, p: p + u, updates, P_cur)
return state, P_cur
while True:
rng, key = jax.random.split(rng)
I = sample(P, jax.random.split(key, k))
y = f(I)
y = jnp.array(y)
info['m'] += y.shape[0]
is_new = _check(P, I, y, info, with_info_i_opt_list, with_info_full)
if info['m'] >= m:
info['t'] = tpc() - time
break
ind = jnp.argsort(y, kind='stable')
ind = (ind[::-1] if is_max else ind)[:k_top]
for _ in range(k_gd):
state, P = optimize(state, P, I[ind, :])
info['t'] = tpc() - time
_log(info, log, log_ind, is_new)
_log(info, log, log_ind, is_new, is_end=True)
return info['i_opt'], info['y_opt']
def _check(P, I, y, info, with_info_i_opt_list, with_info_full):
"""Check the current batch of function values and save the improvement."""
ind_opt = jnp.argmax(y) if info['is_max'] else jnp.argmin(y)
i_opt_curr = I[ind_opt, :]
y_opt_curr = y[ind_opt]
is_new = info['y_opt'] is None
is_new = is_new or info['is_max'] and info['y_opt'] < y_opt_curr
is_new = is_new or not info['is_max'] and info['y_opt'] > y_opt_curr
if is_new:
info['i_opt'] = i_opt_curr
info['y_opt'] = y_opt_curr
if is_new or with_info_full:
info['m_opt_list'].append(info['m'])
info['y_opt_list'].append(info['y_opt'])
if with_info_i_opt_list or with_info_full:
info['i_opt_list'].append(info['i_opt'].copy())
if with_info_full:
info['P_list'].append([G.copy() for G in P])
info['I_list'].append(I.copy())
info['y_list'].append(y.copy())
return is_new
def _generate_initial(n, r, key):
"""Build initial random TT-tensor for probability."""
d = len(n)
r = [1] + [r]*(d-1) + [1]
keys = jax.random.split(key, d)
Y = []
for j in range(d):
Y.append(jax.random.uniform(keys[j], (r[j], n[j], r[j+1])))
return Y
def _interface_matrices(Y):
"""Compute the "interface matrices" for the TT-tensor."""
d = len(Y)
Z = [[]] * (d+1)
Z[0] = jnp.ones(1)
Z[d] = jnp.ones(1)
for j in range(d-1, 0, -1):
Z[j] = jnp.sum(Y[j], axis=1) @ Z[j+1]
Z[j] /= jnp.linalg.norm(Z[j])
return Z
def _likelihood(Y, I):
"""Compute the likelihood in a multi-index I for TT-tensor."""
d = len(Y)
Z = _interface_matrices(Y)
G = jnp.einsum('riq,q->i', Y[0], Z[1])
G = jnp.abs(G)
G /= G.sum()
y = [G[I[0]]]
Z[0] = Y[0][0, I[0], :]
for j in range(1, d):
G = jnp.einsum('r,riq,q->i', Z[j-1], Y[j], Z[j+1])
G = jnp.abs(G)
G /= jnp.sum(G)
y.append(G[I[j]])
Z[j] = Z[j-1] @ Y[j][:, I[j], :]
Z[j] /= jnp.linalg.norm(Z[j])
return jnp.sum(jnp.log(jnp.array(y)))
def _log(info, log=False, log_ind=False, is_new=False, is_end=False):
"""Print current optimization result to output."""
if not log or (not is_new and not is_end):
return
text = f'protes > '
text += f'm {info["m"]:-7.1e} | '
text += f't {info["t"]:-9.3e} | '
text += f'y {info["y_opt"]:-11.4e}'
if log_ind:
text += f' | i {" ".join([str(i) for i in info["i_opt"]])}'
if is_end:
text += ' <<< DONE'
print(text)
def _sample(Y, key):
"""Generate sample according to given probability TT-tensor."""
d = len(Y)
keys = jax.random.split(key, d)
I = jnp.zeros(d, dtype=jnp.int32)
Z = _interface_matrices(Y)
G = jnp.einsum('riq,q->i', Y[0], Z[1])
G = jnp.abs(G)
G /= G.sum()
i = jax.random.choice(keys[0], jnp.arange(Y[0].shape[1]), p=G)
I = I.at[0].set(i)
Z[0] = Y[0][0, i, :]
for j in range(1, d):
G = jnp.einsum('r,riq,q->i', Z[j-1], Y[j], Z[j+1])
G = jnp.abs(G)
G /= jnp.sum(G)
i = jax.random.choice(keys[j], jnp.arange(Y[j].shape[1]), p=G)
I = I.at[j].set(i)
Z[j] = Z[j-1] @ Y[j][:, i, :]
Z[j] /= jnp.linalg.norm(Z[j])
return I
| 5,369
| 25.453202
| 78
|
py
|
PROTES
|
PROTES-main/protes/__init__.py
|
__version__ = '0.3.2'
from .animation import animation
from .protes import protes
from .protes_general import protes_general
| 127
| 17.285714
| 42
|
py
|
PROTES
|
PROTES-main/protes/animation.py
|
import jax.numpy as jnp
import matplotlib as mpl
from matplotlib import cm
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import LinearLocator
import numpy as np
import os
from time import perf_counter as tpc
from .protes_general import protes_general
mpl.rc('animation', html='jshtml')
mpl.rcParams['animation.embed_limit'] = 2**128
def _func_on_grid(f, a, b, n1, n2):
I1 = np.arange(n1)
I2 = np.arange(n2)
I1, I2 = np.meshgrid(I1, I2)
I = np.hstack([I1.reshape(-1, 1), I2.reshape(-1, 1)])
Y = f(I).reshape(n1, n2)
X1 = I1 / (n1 - 1) * (b - a) + a
X2 = I2 / (n2 - 1) * (b - a) + a
return X1, X2, Y
def _p_full(P):
return np.einsum('riq,qjs->rijs', *P)[0, :, :, 0]
def _plot_2d(fig, ax, Y, i_opt_real=None):
img = ax.imshow(Y, cmap=cm.coolwarm, alpha=0.8)
if i_opt_real is not None:
ax.scatter(*i_opt_real, s=500, c='#ffbf00', marker='*', alpha=0.9)
ax.set_xlim(0, Y.shape[0])
ax.set_ylim(0, Y.shape[1])
ax.axis('off')
return img
def _plot_3d(fig, ax, title, X1, X2, Y):
ax.set_title(title, fontsize=16)
surf = ax.plot_surface(X1, X2, Y, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
# fig.colorbar(surf, ax=ax, shrink=0.3, aspect=10)
return surf
def animate(f, a, b, n, info, i_opt_real=None, fpath=None):
y_opt_real = None if i_opt_real is None else f(i_opt_real.reshape(1, -1))[0]
fig = plt.figure(figsize=(16, 16))
plt.subplots_adjust(wspace=0.3, hspace=0.3)
ax1 = fig.add_subplot(221, projection='3d')
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223, projection='3d')
ax4 = fig.add_subplot(224)
X1, X2, Y = _func_on_grid(f, a, b, n, n)
P = _p_full(info['P_list'][0])
img_y_3d = _plot_3d(fig, ax1, 'Target function', X1, X2, Y)
img_p_3d = _plot_3d(fig, ax3, 'Probability tensor', X1, X2, P)
img_y_2d = _plot_2d(fig, ax2, Y, i_opt_real)
img_p_2d = _plot_2d(fig, ax4, P, i_opt_real)
img_opt = ax2.scatter(0, 0, s=150, c='#EE17DA', marker='D')
img_req = ax2.scatter(0, 0, s= 70, c='#8b1d1d')
img_req_top1 = ax2.scatter(0, 0, s= 110, c='#ffcc00', alpha=0.8)
img_req_top2 = ax4.scatter(0, 0, s= 110, c='#ffcc00')
img_hist, = ax2.plot([], [], '--', c='#485536', linewidth=1, markersize=0)
def update(k, *args):
i_opt = info['i_opt_list'][k]
y_opt = info['y_opt_list'][k]
m = info['m_opt_list'][k]
I = info['I_list'][k]
y = info['y_list'][k]
e = None if y_opt_real is None else abs(y_opt_real - y_opt)
P = _p_full(info['P_list'][k])
ind = jnp.argsort(y, kind='stable')
ind = (ind[::-1] if info['is_max'] else ind)[:info['k_top']]
I_top = I[ind, :]
ax3.clear()
_plot_3d(fig, ax3, 'Probability tensor', X1, X2, P)
img_p_2d.set_array(P)
img_opt.set_offsets(np.array([i_opt[0], i_opt[1]]))
img_req.set_offsets(I)
img_req_top1.set_offsets(I_top)
img_req_top2.set_offsets(I_top)
pois_x, pois_y = [], []
for i in info['i_opt_list'][:(k+1)]:
pois_x.append(i[0])
pois_y.append(i[1])
img_hist.set_data(pois_x, pois_y)
title = f'Queries: {m:-7.1e}'
if e is None:
title += f' | Opt : {y_opt:-11.4e}'
else:
title += f' | Error : {e:-7.1e}'
ax2.set_title(title, fontsize=20)
return img_p_2d, img_opt, img_req, img_req_top1, img_req_top2, img_hist
anim = FuncAnimation(fig, update, interval=30,
frames=len(info['y_list']), blit=True, repeat=False)
if fpath:
anim.save(fpath, writer='pillow', fps=0.7)
else:
anim.show()
def animation(f, a, b, n=501, m=int(1.E+4), k=100, k_top=10, k_gd=1, lr=5.E-2,
i_opt_real=None, fpath='animation/animation.gif', is_max=False):
"""Animation of the PROTES work for the 2D case."""
print('\n... start optimization ...')
t = tpc()
info = {}
i_opt, y_opt = protes_general(f, [n, n], m, k, k_top, k_gd, lr, info=info,
is_max=is_max, log=True, with_info_full=True)
print(f'Optimization is ready (total time {tpc()-t:-8.2f} sec)')
print('\n... start building animation ...')
t = tpc()
if os.path.dirname(fpath):
os.makedirs(os.path.dirname(fpath), exist_ok=True)
animate(f, a, b, n, info, i_opt_real, fpath)
print(f'Animation is ready (total time {tpc()-t:-8.2f} sec)')
| 4,670
| 30.993151
| 80
|
py
|
PROTES
|
PROTES-main/demo/demo_qubo.py
|
import numpy as np
from time import perf_counter as tpc
from protes import protes
def func_build():
"""Binary knapsack problem."""
d = 50
n = 2
w = [
80, 82, 85, 70, 72, 70, 66, 50, 55, 25, 50, 55, 40, 48, 59, 32, 22,
60, 30, 32, 40, 38, 35, 32, 25, 28, 30, 22, 50, 30, 45, 30, 60, 50,
20, 65, 20, 25, 30, 10, 20, 25, 15, 10, 10, 10, 4, 4, 2, 1]
p = [
220, 208, 198, 192, 180, 180, 165, 162, 160, 158, 155, 130, 125,
122, 120, 118, 115, 110, 105, 101, 100, 100, 98, 96, 95, 90, 88, 82,
80, 77, 75, 73, 72, 70, 69, 66, 65, 63, 60, 58, 56, 50, 30, 20, 15,
10, 8, 5, 3, 1]
C = 1000
def func(i):
"""Target function: y=f(i); [d] -> float."""
cost = np.dot(p, i)
constr = np.dot(w, i)
return 0 if constr > C else -cost
return d, n, lambda I: np.array([func(i) for i in I])
def demo():
"""Demonstration for QUBO problem.
We will solve the binary knapsack problem with fixed weights wi in [5, 20],
profits pi in [50, 100] (i = 1, 2, . . . , d) and the maximum capacity
C = 1000. It is from work (Dong et al., 2021) (problem k3; d = 50), where
anglemodulated bat algorithm (AMBA) was proposed for high-dimensional
binary optimization problems with engineering application to antenna
topology optimization. Note that ths problem has known exact solution -3103.
The result in console should looks like this:
protes > m 1.0e+02 | t 3.021e+00 | y -2.7560e+03
protes > m 3.0e+02 | t 3.051e+00 | y -2.8150e+03
protes > m 4.0e+02 | t 3.061e+00 | y -2.8350e+03
protes > m 8.0e+02 | t 3.099e+00 | y -2.8700e+03
protes > m 1.0e+03 | t 3.116e+00 | y -2.8850e+03
protes > m 1.1e+03 | t 3.124e+00 | y -2.9070e+03
protes > m 1.3e+03 | t 3.139e+00 | y -2.9350e+03
protes > m 1.4e+03 | t 3.147e+00 | y -2.9690e+03
protes > m 1.7e+03 | t 3.171e+00 | y -2.9990e+03
protes > m 2.0e+03 | t 3.194e+00 | y -3.0030e+03
protes > m 2.2e+03 | t 3.210e+00 | y -3.0700e+03
protes > m 6.9e+03 | t 3.574e+00 | y -3.0720e+03
protes > m 8.5e+03 | t 3.701e+00 | y -3.0750e+03
protes > m 1.0e+04 | t 3.816e+00 | y -3.0750e+03 <<< DONE
RESULT | y opt = -3.0750e+03 | time = 3.8277
"""
d, n, f = func_build() # Target function, and array shape
m = int(1.E+4) # Number of requests to the objective function
t = tpc()
i_opt, y_opt = protes(f, d, n, m, log=True)
print(f'\nRESULT | y opt = {y_opt:-11.4e} | time = {tpc()-t:-10.4f}')
if __name__ == '__main__':
demo()
| 2,591
| 33.105263
| 80
|
py
|
PROTES
|
PROTES-main/demo/demo_func.py
|
import jax.numpy as jnp
from time import perf_counter as tpc
from protes import protes
def func_build(d, n):
"""Ackley function. See https://www.sfu.ca/~ssurjano/ackley.html."""
a = -32.768 # Grid lower bound
b = +32.768 # Grid upper bound
par_a = 20. # Standard parameter values for Ackley function
par_b = 0.2
par_c = 2.*jnp.pi
def func(I):
"""Target function: y=f(I); [samples,d] -> [samples]."""
X = I / (n - 1) * (b - a) + a
y1 = jnp.sqrt(jnp.sum(X**2, axis=1) / d)
y1 = - par_a * jnp.exp(-par_b * y1)
y2 = jnp.sum(jnp.cos(par_c * X), axis=1)
y2 = - jnp.exp(y2 / d)
y3 = par_a + jnp.exp(1.)
return y1 + y2 + y3
return func
def demo():
"""A simple demonstration for discretized multivariate analytic function.
We will find the minimum of an implicitly given "d"-dimensional array
having "n" elements in each dimension. The array is obtained from the
discretization of an analytic function.
The result in console should looks like this (note that the exact minimum
of this function is y = 0 and it is reached at the origin of coordinates):
protes > m 1.0e+02 | t 3.190e+00 | y 2.0214e+01
protes > m 2.0e+02 | t 3.203e+00 | y 1.8211e+01
protes > m 5.0e+02 | t 3.216e+00 | y 1.8174e+01
protes > m 6.0e+02 | t 3.220e+00 | y 1.7491e+01
protes > m 7.0e+02 | t 3.224e+00 | y 1.7078e+01
protes > m 8.0e+02 | t 3.228e+00 | y 1.6180e+01
protes > m 1.1e+03 | t 3.238e+00 | y 1.4116e+01
protes > m 1.4e+03 | t 3.250e+00 | y 8.4726e+00
protes > m 2.7e+03 | t 3.293e+00 | y 0.0000e+00
protes > m 1.0e+04 | t 3.534e+00 | y 0.0000e+00 <<< DONE
RESULT | y opt = 0.0000e+00 | time = 3.5459
"""
d = 7 # Dimension
n = 11 # Mode size
m = int(1.E+4) # Number of requests to the objective function
f = func_build(d, n) # Target function, which defines the array elements
t = tpc()
i_opt, y_opt = protes(f, d, n, m, log=True)
print(f'\nRESULT | y opt = {y_opt:-11.4e} | time = {tpc()-t:-10.4f}')
if __name__ == '__main__':
demo()
| 2,206
| 30.084507
| 78
|
py
|
PROTES
|
PROTES-main/calc/calc_one.py
|
import numpy as np
import os
from time import perf_counter as tpc
from jax.config import config
config.update('jax_enable_x64', True)
os.environ['JAX_PLATFORM_NAME'] = 'cpu'
from protes import protes
from teneva_bm import BmQuboKnapAmba
from opti import *
Optis = {
'Our': OptiProtes,
'BS-1': OptiTTOpt,
'BS-2': OptiOptimatt,
'BS-3': OptiOPO,
'BS-4': OptiPSO,
'BS-5': OptiNB,
'BS-6': OptiSPSA,
'BS-7': OptiPortfolio,
}
class Log:
def __init__(self, fpath='log_one.txt'):
self.fpath = fpath
self.is_new = True
if os.path.dirname(self.fpath):
os.makedirs(os.path.dirname(self.fpath), exist_ok=True)
def __call__(self, text):
print(text)
with open(self.fpath, 'w' if self.is_new else 'a') as f:
f.write(text + '\n')
self.is_new = False
def calc_one(m=int(1.E+5), rep=10):
log = Log()
res = {}
bm = BmQuboKnapAmba(d=50, name='P-14').prep()
log(bm.info())
for name, Opti in Optis.items():
res[name] = []
for seed in range(rep):
np.random.seed(seed)
opti = Opti(name=name)
opti.prep(bm.get, bm.d, bm.n, m, is_f_batch=True)
if name == 'Our':
opti.opts(seed=seed)
opti.optimize()
res[name].append(opti.y)
log(opti.info() + f' # {seed+1:-3d}')
log('')
text = '\n\n\n\n--- RESULT ---\n\n'
for name, Opti in Optis.items():
y = np.array(res[name])
text += name + ' '*max(0, 10-len(name)) + ' >>> '
text += f'Mean: {np.mean(y):-12.6e} | Best: {np.min(y):-12.6e}\n'
log(text)
if __name__ == '__main__':
calc_one()
| 1,716
| 22.202703
| 75
|
py
|
PROTES
|
PROTES-main/calc/construct_TT.py
|
## Code from https://github.com/G-Ryzhakov/Constructive-TT
import numpy as np
from time import perf_counter as tpc
from numba import jit, njit
def G0(n):
res = np.zeros([n, n], dtype=int)
for i in range(n):
res[i, :i+1] = 1
return res
def main_core(f, n, m):
return main_core_list([f(i) for i in range(n)], n, m)
def main_core_list(f, n, m):
"""
Строит функциональное ядро, предполагается, что
f: [0, n-1] -> [0, m-1]
"""
row, col, data = [], [], []
f0 = f[0]
row.extend([0]*(f0+1))
col.extend(list(range(f0+1)))
data.extend([1]*(f0+1))
for i in range(1, n):
f0_prev = f0
f0 = f[i]
if f0 > f0_prev:
d = f0 - f0_prev
row.extend([i]*d)
col.extend(list(range(f0_prev+1, f0+1) ))
data.extend([1]*d)
if f0 < f0_prev:
d = f0_prev - f0
row.extend([i]*d)
col.extend(list(range(f0+1, f0_prev+1) ))
data.extend([-1]*d)
mat = csc_matrix((data, (row, col)), shape=(n, m))
#return lil_matrix(mat)
return mat
#@njit
def main_core_list_ex(f, n=None, m=None, res=None, fill=None):
"""
f: [0, n-1] -> [0, m-1]
"""
if res is None:
res = np.zeros((n, m))
if fill is None:
fill = [1]*len(f)
for i, v in enumerate(f):
if v >= 0:
res[i, v] = fill[i]
return res
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==
def next_core(f_list, v_in, v_out=None, to_ret_idx=True, last_core=False):
"""
if last_core then fill with core with true value
"""
# vals may contain None, so no numpy array
if last_core:
vals = [[(f(v) or 0 ) for v in v_in] for f in f_list]
else:
vals = [[f(v) for v in v_in] for f in f_list]
if v_out is None:
v_out = set([])
for v in vals:
v_out |= set(v)
v_out = sorted(set(v_out) - set([None]))
inv_idx = {v: i for i, v in enumerate(v_out)}
inv_idx[None] = -1
n, m = len(v_in), len(v_out)
if last_core:
m = 1
core = np.zeros([n, len(f_list), m])
res = []
for i, vf in enumerate(vals):
if last_core:
print(vf)
res.append(np.array(vf, dtype=float))
main_core_list(np.zeros(len(vf), dtype=int), core[:, i, :], fill=res[-1])
else:
res.append(np.array([inv_idx[j] for j in vf]))
main_core_list(res[-1], core[:, i, :])
if to_ret_idx:
return core, v_out, res
else:
return core, v_out
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==
def add_dim_core(n, m, d):
res = np.zeros([n, d, m])
range_l = range(min(n, m))
res[range_l, :, range_l] = 1
return res
def insert_dim_core(cores, i, d):
if i == 0 or i > len(cores) - 1:
n = 1
else:
n = cores[i-1].shape[-1]
cores = cores[:i] + [add_dim_core(n, n, d)] + cores[i:]
def const_func(x):
return lambda y: x
def add_func(x):
return lambda y: y + x
def ind_func(x):
return lambda y: (0 if x == y else None)
def gt_func(x):
return lambda y: (0 if y >= x else None)
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==
# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==
def _reshape(a, shape):
return np.reshape(a, shape, order='F')
def matrix_svd(M, delta=1E-8, rmax=None, ckeck_zero=True):
# this function is a modified version from ttpy package, see https://github.com/oseledets/ttpy
if M.shape[0] <= M.shape[1]:
cov = M.dot(M.T)
singular_vectors = 'left'
else:
cov = M.T.dot(M)
singular_vectors = 'right'
if ckeck_zero and np.linalg.norm(cov) < 1e-14:
#if np.abs(cov.reshape(-1)).sum() < 1e-14:
return np.zeros([M.shape[0], 1]), np.zeros([1, M.shape[1]])
w, v = np.linalg.eigh(cov)
w[w < 0] = 0
w = np.sqrt(w)
svd = [v, w]
idx = np.argsort(svd[1])[::-1]
svd[0] = svd[0][:, idx]
svd[1] = svd[1][idx]
S = (svd[1]/svd[1][0])**2
where = np.where(np.cumsum(S[::-1]) <= delta**2)[0]
if len(where) == 0:
rank = max(1, min(rmax, len(S)))
else:
rank = max(1, min(rmax, len(S) - 1 - where[-1]))
left = svd[0]
left = left[:, :rank]
if singular_vectors == 'left':
M2 = ((1. / svd[1][:rank])[:, np.newaxis]*left.T).dot(M)
left = left*svd[1][:rank]
else:
M2 = M.dot(left)
left, M2 = M2, left.T
return left, M2
def show(Y):
N = [G.shape[1] for G in Y]
R = [G.shape[0] for G in Y] + [1]
l = max(int(np.ceil(np.log10(np.max(R)+1))) + 1, 3)
form_str = '{:^' + str(l) + '}'
s0 = ' '*(l//2)
s1 = s0 + ''.join([form_str.format(n) for n in N])
s2 = s0 + ''.join([form_str.format('/ \\') for _ in N])
s3 = ''.join([form_str.format(r) for r in R])
print(f'{s1}\n{s2}\n{s3}\n')
def full(Y):
"""Returns the tensor in full format."""
Q = Y[0].copy()
for y in Y[:1]:
Q = np.tensordot(Q, y, 1)
return Q[0, ..., 0]
@njit
def main_core_list_old(f, res, fill=None):
"""
f: [0, n-1] -> [0, m-1]
"""
if fill is None:
fill = np.ones(len(f))
for i, v in enumerate(f):
if v >= 0:
res[i, v] = fill[i]
return res
@njit
def main_core_list(f, res, left_to_right=None):
"""
f: [0, n-1] -> [0, m-1]
"""
if left_to_right:
for i, v in enumerate(f):
if v >= 0:
res[i, v] = 1
else:
for i, v in enumerate(f):
if v >= 0:
res[v, i] = 1
return res
def mid_avr(l):
return (l[0] + l[-1])/2
@njit
def _next_indices_1(vals_np, eps):
idx_rank = []
se = vals_np[0]
i = 0
if vals_np[1] - se >= eps:
idx_rank.append(i)
len_vals_np_m_1 = len(vals_np) - 1
#while len(idx_rank) < max_rank:
while i < len_vals_np_m_1:
i = min(np.searchsorted(vals_np, se + eps, side='right'), len_vals_np_m_1)
se = vals_np[i]
idx_rank.append(i)
#if i == len_vals_np_m_1:
# flag = False
# break
return idx_rank
@njit
def mean_avr_list(vals_np, idx_rank):
n = len(idx_rank) - 1
res = np.empty(n)
for i in range(n):
i_s = idx_rank[i]
i_e = idx_rank[i+1]
if i_e - i_s > 1:
res[i] = (vals_np[i_s+1] + vals_np[i_e])/2.
else:
res[i] = vals_np[i_s+1]
return res
def next_indices(f_list, v_in, v_out=None, max_rank=None, relative_eps=None):
"""
if last_core then fill with core with true value
"""
# vals may contain None, so no numpy array
vals = [[f(v) for v in v_in] for f in f_list]
#print(max_rank, vals)
if max_rank is None and relative_eps is None:
if v_out is None:
v_out = set([])
for v in vals:
v_out |= set(v)
v_out = set(v_out) - set([None])
#v_out = list(v_out)
try:
v_out = sorted(v_out)
except: # Can't sort as it's not a regular type
pass
inv_idx = {v: i for i, v in enumerate(v_out)}
inv_idx[None] = -1
res = np.array([[inv_idx[j] for j in vf] for vf in vals])
else:
#if isinstance(vals[0][0], complex):
# dtype=complex
#else:
# dtype=float
#print(vals, dtype)
#dtype=float
vals_np = np.unique(np.array(vals, dtype=float).reshape(-1))
vals_search = v_out = vals_np = vals_np[:np.searchsorted(vals_np, np.nan)]
if max_rank is None:
max_rank = 2**30
if relative_eps is not None and len(vals_np) > 1:
eps = relative_eps*(vals_np[-1] - vals_np[0])
idx_rank = _next_indices_1(vals_np, eps)
else:
idx_rank = np.arange(len(vals_np))
if max_rank < len(idx_rank):
idx_rank = np.asarray(idx_rank)
idx_rank = idx_rank[np.linspace(-1, len(idx_rank)-1, max_rank+1).round().astype(int)[1:]]
if len(vals_np) > len(idx_rank):
vals_search = vals_np[idx_rank]
idx_rank = [-1] + list(idx_rank)
v_out = [mid_avr(vals_np[i_s+1:i_e+1]) for i_s, i_e in zip(idx_rank[:-1], idx_rank[1:]) ]
#v_out = mean_avr_list(vals_np, idx_rank)
res = np.array([[np.searchsorted(vals_search, j) if j is not None else -1 for j in vf] for vf in vals])
#print(res, v_out, len(v_out))
return v_out, res
def all_sets(ar):
vals = np.unique(ar)
N = np.arange(len(ar))
return [set(N[ar==v]) for v in vals]
def pair_intersects(set1, set2):
"""
arguments and return -- list of sets
"""
res = []
for s1 in set1:
for s2 in set2:
cur_set = s1 & s2
if cur_set:
res.append(cur_set)
return res
def all_intersects(sets):
set1 = sets[0]
for set2 in sets[1:]:
set1 = pair_intersects(set1, set2)
#return sorted(set1, key=min) # sorting only for convience. Remove when otdebuged
return set1
def reindex(idxx):
d = len(idxx)
res = [None]*d
idx_cur = idxx[d-1]
for i in range(d-1, 0, -1):
s_all = all_intersects([all_sets(v) for v in idx_cur])
idxx_new = [ [] for _ in idx_prev]
for s in s_all:
for v, v_prev in zip(idxx_new, idx_cur):
v.append(v_prev[min(s)])
res[i] = np.array(idxx_new)
# Alter prv indices
idx_cur = np.array(idxx[i-1], copy=True, dtype=int) # это уже индескы меньше d-1, поэтому int
for i, s in enumerate(s_all):
for v in s:
#print(v, idx_cur)
idx_cur[idx_cur==v] = i
res[0] = idx_cur
return res
def build_cores_by_indices(idxx, left_to_right=True):
if not idxx:
return []
d = len(idxx)
cores = [None]*d
for i, idx in enumerate(idxx):
n = idx.shape[0]
r0 = idx.shape[1]
r1 = idx.max() + 1
if not left_to_right:
r1, r0 = r0, r1
core = np.zeros([r0, n, r1])
for j, vf in enumerate(idx):
main_core_list(vf, core[:, j, :], left_to_right=left_to_right)
cores[i] = core
return cores
def build_core_by_vals(func, vals_in_out):
vals_in, vals_out = vals_in_out
n = len(func)
core = np.empty([len(vals_in), n, len(vals_out)])
for k in range(n):
core[:, k, :] = [[(func[k](i, j) or 0) for j in vals_out] for i in vals_in]
return core
@njit
def TT_func_mat_vec(vec, idx, res=None, direction=True):
"""
direction -- forward or backward mul (analog left_to_right)
"""
num_sum = 0
if res is None:
res_len = idx.max() + 1
res = np.zeros(res_len)
if direction:
for i, v in enumerate(idx):
if v >= 0:
res[v] += vec[i]
num_sum += 1
else:
for i, v in enumerate(idx):
if v >= 0:
res[i] += vec[v]
num_sum += 1
return res, num_sum
def TT_func_mat_mul(mat, idx, res=None, direction=True):
if res is None:
if direction:
res = np.zeros((mat.shape[0], idx.max() + 1))
else:
res = np.zeros((idx.shape[1], mat.shape[1]))
return _TT_func_mat_mul(mat, idx, res, direction)
@njit
def _TT_func_mat_mul(mat, idx, res, direction=True):
"""
direction -- forward or backward mul (analog left_to_right)
"""
if direction:
for i, v in enumerate(idx):
if v >= 0:
res[:, v] += mat[:, i]
else:
for i, v in enumerate(idx):
if v >= 0:
res[i, :] += mat[v, :]
return res
# -=-=-=-=-=-
def make_two_arg(func):
return lambda x, y: func(x)
class tens(object):
def mat_mul(self, n, i, mat, direction=True, res=None):
"""
matmul silece $i$ of core $n$ on mat (left or right)
"""
if n == self.pos_val_core or self._cores:
if direction:
if res is not None:
res += mat @ self.core(n)[:, i, :]
return res
else:
return mat @ self.core(n)[:, i, :]
else:
if res is not None:
res += self.core(n)[:, i, :] @ mat
return res
else:
return self.core(n)[:, i, :] @ mat
if n < self.pos_val_core:
return TT_func_mat_mul(mat, self.indices[0][n][i], res=res, direction=direction)
if n > self.pos_val_core:
return TT_func_mat_mul(mat.T, self.indices[1][self.d-1 - n][i], res=res.T, direction=not direction)
def test_mat_mul(self):
mat = np.array([[1]])
for n in range(self.pos_val_core):
#mat = sum(self.mat_mul(n, i, mat, direction=True) for i in range(len(self.indices[0][n])))
res = np.zeros([mat.shape[0], self.indices[0][n].max()+1])
for i in range(len(self.indices[0][n])):
self.mat_mul(n, i, mat, direction=True, res=res)
mat = res
mat = sum(self.mat_mul(self.pos_val_core, i, mat, direction=True) for i in range(len(self.funcs_vals)))
for n in range(self.pos_val_core+1, self.d):
#mat = sum(self.mat_mul(n, i, mat, direction=True) for i in range(len(self.indices[1][self.d-1 - n])))
res = np.zeros([mat.shape[0], self.indices[1][self.d-1 - n].shape[1]])
for i in range(len(self.indices[1][self.d-1 - n])):
self.mat_mul(n, i, mat, direction=True, res=res)
mat = res
return mat.item()
def convolve(self, t, or1='C', or2='F'):
"""
convolve two TT-tensors, calculationg tensor product throu vectorization
"""
shapes = self.shapes
assert (shapes == t.shapes).all()
mat = np.array([[1]])
for n in range(self.d):
res = np.zeros(self.cores_shape(n)[1]*t.cores_shape(n)[1])
mat = mat.reshape(-1, self.cores_shape(n)[0], order=or1)
for i in range(shapes[n]):
tmp = self.mat_mul(n, i, mat, direction=True)
tmp2 = t.mat_mul(n, i, tmp.T, direction=True)
#print(tmp.shape, tmp2.shape, res.shape)
res += tmp2.reshape(-1, order=or2)
mat = res
return mat.item()
def cores_shape(self, n):
if self._cores_shapes[n] is not None:
return self._cores_shapes[n]
if n < self.pos_val_core:
idx = self.indices[0][n]
cs = (idx.shape[1], idx.max() + 1)
if n > self.pos_val_core:
idx = self.indices[1][self.d-1 - n]
cs = (idx.max() + 1, idx.shape[1])
if n == self.pos_val_core:
cs = self.core(n).shape
cs = (cs[0], cs[-1])
self._cores_shapes[n] = cs
return cs
def __init__(self, funcs=None, indices=None, do_reverse=False, do_truncate=False,
v_in=None, debug=True, relative_eps=None, max_rank=None):
if type(funcs[0][0]) == list: # new
self.funcs_left = funcs[0]
self.funcs_right = funcs[1]
self.funcs_vals = funcs[2]
else:
self.funcs_left = funcs[:-1]
self.funcs_right = []
self.funcs_vals = []
_="""
for i, fi in enumerate(funcs[-1]):
#f =
#f(0, 0)
#self.funcs_vals.append(lambda x, y: funcs[-1][i](x))
self.funcs_vals.append(make_two_arg(funcs[-1][i]))
self.funcs_vals[-1](0, 0)
self.funcs_vals[0](0, 0)
self.funcs_vals[1](0, 0)
"""
self.funcs_vals = [make_two_arg(i) for i in funcs[-1]]
#self.funcs = funcs
self.d = len(self.funcs_left) + len(self.funcs_right) + 1
self.pos_val_core = len(self.funcs_left)
self.do_reverse = do_reverse
self.do_truncate = do_truncate and not self.funcs_right
self._cores = None
self._indices = indices
self.debug = debug
self._cores_shapes = [None]*self.d
if v_in is None:
self.v_in = 0
else:
self.v_in = v_in
self.relative_eps = relative_eps
self.max_rank = max_rank
def p(self, mes):
if self.debug:
print(mes)
@property
def indices(self):
if self._indices is not None:
return self._indices
self._indices = []
v_in = [self.v_in]
idxx_a = []
for func in self.funcs_left:
v_in, idxx = next_indices(func, v_in, max_rank=self.max_rank, relative_eps=self.relative_eps)
idxx_a.append(idxx)
v_out_left = v_in
self._indices.append(idxx_a)
v_in = [self.v_in]
idxx_a = []
for func in self.funcs_right:
v_in, idxx = next_indices(func, v_in, max_rank=self.max_rank, relative_eps=self.relative_eps)
idxx_a.append(idxx)
v_out_right = v_in
self._indices.append(idxx_a)
self._indices.append([v_out_left, v_out_right])
return self._indices
@indices.setter
def indices(self, indices):
#print("Don't bother me!")
self._indices = indices
@property
def cores(self):
if self._cores is None:
cores_left = build_cores_by_indices(self.indices[0], left_to_right=True)
cores_right = build_cores_by_indices(self.indices[1], left_to_right=False)
try:
core_val = self.mid_core
except:
core_val = build_core_by_vals(self.funcs_vals, self.indices[2])
self._cores = cores_left + [core_val] + cores_right[::-1]
if self.do_truncate:
self.truncate()
return self._cores
def core(self, n, skip_build=False):
#print(n, self.pos_val_core, self._cores is None)
if self._cores is None or skip_build:
d = self.d
if n < self.pos_val_core:
return build_cores_by_indices([self.indices[0][n]], left_to_right=True)[0]
elif n > self.pos_val_core:
return build_cores_by_indices([self.indices[1][d-1 - n]], left_to_right=False)[0]
else:
try:
#print('returning... mid_core')
return self.mid_core # mid_core does not midifyed during rounding
except:
#print('failed. Building...')
self.mid_core = build_core_by_vals(self.funcs_vals, self.indices[2])
return self.mid_core
else:
return self._cores[n]
@cores.setter
def cores(self, cores):
if self._cores is not None:
print("Warning: cores are already set")
self._cores = cores
def index_revrse(self):
self._indices = reindex(self._indices)
@property
def shapes(self, func_shape=False):
if func_shape:
return np.array([len(i) for i in self.funcs])
else:
return np.array([i.shape[1] for i in self.cores])
@property
def erank(self):
"""Compute effective rank of the TT-tensor."""
Y = self.cores
if not Y:
return None
d = len(Y)
N = self.shapes
R = np.array([1] + [G.shape[-1] for G in Y])
sz = np.dot(N * R[:-1], R[1:])
b = N[0] + N[-1]
a = np.sum(N[1:-1])
return (np.sqrt(b * b + 4 * a * sz) - b) / (2 * a)
def show(self):
show(self.cores)
def show_TeX(self, delim="---"):
rnks = [i.shape[0] for i in self.cores] + [1]
print(delim.join([str(j) for j in rnks]))
def truncate(self, delta=1E-10, r=np.iinfo(np.int32).max):
N = self.shapes
Z = self.cores
# We don't need to othogonolize cores here. But delta might not adcuate
for k in range(self.d-1, 0, -1):
M = _reshape(Z[k], [Z[k].shape[0], -1])
L, M = matrix_svd(M, delta, r, ckeck_zero=False)
Z[k] = _reshape(M, [-1, N[k], Z[k].shape[2]])
Z[k-1] = np.einsum('ijk,kl', Z[k-1], L, optimize=True)
self._cores = Z
def simple_mean(self):
Y = self.cores
G0 = Y[0]
G0 = G0.reshape(-1, G0.shape[-1])
G0 = np.sum(G0, axis=0)
for i, G in enumerate(Y[1:], start=1):
G0 = G0 @ np.sum(G, axis=1)
return G0.item()
def simple_mean_func(self):
k = self.pos_val_core
d = self.d
#print(d)
num_op_sum = 0
def build_vec(inds, G, head=True):
num_op_sum = 0
if head:
G = G.reshape(-1, G.shape[-1])
#num_op_sum += (G != 0).sum() # Actually, all 1 in G on deiffernet places, thus no sum
G = np.sum(G, axis=0)
else:
G = G.reshape(G.shape[0], -1)
#num_op_sum += (G != 0).sum()
G = np.sum(G, axis=1)
#num_op_mult = num_op_sum
#print(G)
for idxx in inds:
res_l = idxx.max() + 1
res = np.zeros(res_l)
for idx in idxx:
#res += TT_func_mat_vec(G, idx, res_l)
_, num_sum_cur = TT_func_mat_vec(G, idx, res)
num_op_sum += 2*num_sum_cur # 2* because '+' and '*' there
#print(res)
G = res
return G, num_op_sum
t0 = []
t0.append(tpc())
G0 = self.core(0, skip_build=True)
t0.append(tpc()) #1
G0, num_op_sum_cur = build_vec(self.indices[0], G0) if k > 0 else (np.array([[1]]), 0)
t0.append(tpc()) #2
num_op_sum += num_op_sum_cur
G1 = self.core(d - 1, skip_build=True)
t0.append(tpc()) #3
G1, num_op_sum_cur = build_vec(self.indices[1], G1, False) if k < d - 1 else (np.array([[1]]), 0)
num_op_sum += num_op_sum_cur
t0.append(tpc()) #4
# l0 = G0.size()
#l1 = G1.size()
mid_core = self.core(k, skip_build=True)
t0.append(tpc()) #5
num_op_sum += (mid_core != 0).sum()
core_k = np.sum(mid_core, axis=1)
t0.append(tpc()) #6
#print(G0, core_k, G1)
#print(G0.shape, self.core(k).shape, G1.shape)
#print(G0, G1)
n, m = core_k.shape
num_op_sum += n*m + min(m, n) # mults
num_op_sum += n*m + min(m, n) # sums
self.num_op = num_op_sum
times = np.array(t0)
self._times = times[1:] - times[:-1]
return (G0 @ core_k @ G1).item()
def show_n_core(self, n):
c = self.cores[n]
for i in range(c.shape[1]):
print(c[:, i, :])
def mult_and_mean(Y1, Y2):
G0 = Y1[0][:, None, :, :, None] * Y2[0][None, :, :, None, :]
G0 = G0.reshape(-1, G0.shape[-1])
G0 = np.sum(G0, axis=0)
for G1, G2 in zip(Y1[1:], Y2[1:]):
G = G1[:, None, :, :, None] * G2[None, :, :, None, :]
G = G.reshape([G1.shape[0]*G2.shape[0], -1, G1.shape[-1]*G2.shape[-1]])
G = np.sum(G, axis=1)
print(G0.shape, G.shape)
G0 = G0 @ G
return G0.item()
def mult_and_mean(Y1, Y2):
G0 = np.array([[1]])
for G1, G2 in zip(Y1, Y2):
G = G1[:, None, :, :, None] * G2[None, :, :, None, :]
G = G.reshape([G1.shape[0]*G2.shape[0], -1, G1.shape[-1]*G2.shape[-1]])
G = np.sum(G, axis=1)
#print(G0.shape, G.shape)
G0 = G0 @ G
return G0.item()
def partial_mean(Y):
G0 = Y[0]
G0 = np.sum(G0, axis=1)
for G in Y[1:]:
G0 = G0 @ np.sum(G, axis=1)
return G0
| 25,218
| 27.690557
| 114
|
py
|
PROTES
|
PROTES-main/calc/calc.py
|
import matplotlib as mpl
import numpy as np
import os
import pickle
import sys
from time import perf_counter as tpc
mpl.rcParams.update({
'font.family': 'normal',
'font.serif': [],
'font.sans-serif': [],
'font.monospace': [],
'font.size': 12,
'text.usetex': False,
})
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import seaborn as sns
sns.set_context('paper', font_scale=2.5)
sns.set_style('white')
sns.mpl.rcParams['legend.frameon'] = 'False'
from jax.config import config
config.update('jax_enable_x64', True)
os.environ['JAX_PLATFORM_NAME'] = 'cpu'
import jax.numpy as jnp
from constr import ind_tens_max_ones
from teneva_bm import *
bms = [
BmFuncAckley(d=7, n=16, name='P-01'),
BmFuncAlpine(d=7, n=16, name='P-02'),
BmFuncExp(d=7, n=16, name='P-03'),
BmFuncGriewank(d=7, n=16, name='P-04'),
BmFuncMichalewicz(d=7, n=16, name='P-05'),
BmFuncPiston(d=7, n=16, name='P-06'),
BmFuncQing(d=7, n=16, name='P-07'),
BmFuncRastrigin(d=7, n=16, name='P-08'),
BmFuncSchaffer(d=7, n=16, name='P-09'),
BmFuncSchwefel(d=7, n=16, name='P-10'),
BmQuboMaxcut(d=50, name='P-11'),
BmQuboMvc(d=50, name='P-12'),
BmQuboKnapQuad(d=50, name='P-13'),
BmQuboKnapAmba(d=50, name='P-14'),
BmOcSimple(d=25, name='P-15'),
BmOcSimple(d=50, name='P-16'),
BmOcSimple(d=100, name='P-17'),
BmOcSimpleConstr(d=25, name='P-18'),
BmOcSimpleConstr(d=50, name='P-19'),
BmOcSimpleConstr(d=100, name='P-20'),
]
BM_FUNC = ['P-01', 'P-02', 'P-03', 'P-04', 'P-05', 'P-06', 'P-07',
'P-08', 'P-09', 'P-10']
BM_QUBO = ['P-11', 'P-12', 'P-13', 'P-14']
BM_OC = ['P-15', 'P-16', 'P-17']
BM_OC_CONSTR = ['P-18', 'P-19', 'P-20']
from opti import *
Optis = {
'Our': OptiProtes,
'BS-1': OptiTTOpt,
'BS-2': OptiOptimatt,
'BS-3': OptiOPO,
'BS-4': OptiPSO,
'BS-5': OptiNB,
'BS-6': OptiSPSA,
'BS-7': OptiPortfolio,
}
class Log:
def __init__(self, fpath='log.txt'):
self.fpath = fpath
self.is_new = True
if os.path.dirname(self.fpath):
os.makedirs(os.path.dirname(self.fpath), exist_ok=True)
def __call__(self, text):
print(text)
with open(self.fpath, 'w' if self.is_new else 'a') as f:
f.write(text + '\n')
self.is_new = False
def calc(m=int(1.E+4), seed=0):
log = Log()
res = {}
for bm in bms:
np.random.seed(seed)
if bm.name in BM_FUNC:
# We carry out a small random shift of the function's domain,
# so that the optimum does not fall into the middle of the domain:
bm = _prep_bm_func(bm)
else:
bm.prep()
log(bm.info())
res[bm.name] = {}
for opti_name, Opti in Optis.items():
np.random.seed(seed)
opti = Opti(name=opti_name)
opti.prep(bm.get, bm.d, bm.n, m, is_f_batch=True)
if bm.name in BM_OC_CONSTR and opti_name == 'Our':
# Problem with constraint for PROTES (we use the initial
# approximation of the special form in this case):
P = ind_tens_max_ones(bm.d, 3, opti.opts_r)
Pl = jnp.array(P[0], copy=True)
Pm = jnp.array(P[1:-1], copy=True)
Pr = jnp.array(P[-1], copy=True)
P = [Pl, Pm, Pr]
opti.opts(P=P)
opti.optimize()
log(opti.info())
res[bm.name][opti.name] = [opti.m_list, opti.y_list, opti.y]
_save(res)
log('\n\n')
def plot(m_min=1.E+0):
plot_opts = {
'P-02': {},
'P-14': {'y_min': 1.8E+3, 'y_max': 3.2E+3, 'inv': True},
'P-16': {'y_min': 1.E-2, 'y_max': 2.E+0},
}
res = _load()
fig, axs = plt.subplots(1, 3, figsize=(24, 8))
plt.subplots_adjust(wspace=0.3)
i = -1
for bm, item in res.items():
if not bm in plot_opts.keys():
continue
i += 1
ax = axs[i]
ax.set_xlabel('Number of requests')
for opti, data in item.items():
m = np.array(data[0], dtype=int)
y = np.array(data[1])
if plot_opts[bm].get('inv'):
y *= -1
j = np.argmax(m >= m_min)
nm = opti
if nm == 'Our':
nm = 'PROTES'
ax.plot(m[j:], y[j:], label=nm,
marker='o', markersize=8, linewidth=6 if nm == 'PROTES' else 3)
_prep_ax(ax, xlog=True, ylog=True, leg=i==0)
ax.set_xlim(m_min, 2.E+4)
if 'y_min' in plot_opts[bm]:
ax.set_ylim(plot_opts[bm]['y_min'], plot_opts[bm]['y_max'])
#yticks = [1.8E+3, 2.0E+3, 2.2E+3, 2.4E+3, 2.6E+3, 2.8E+3, 3.0E+3, 3.2E+3]
#ax.set(yticks=yticks, yticklabels=[int(])
#ax.get_yaxis().get_major_formatter().labelOnlyBase = False
plt.savefig('deps.png', bbox_inches='tight')
def text():
res = _load()
text = '\n\n% ' + '='*50 + '\n' + '% [START] Auto generated data \n\n'
for i, (bm, item) in enumerate(res.items(), 1):
if i in [11, 15, 18]:
text += '\n\\hline\n'
if i == 1:
text += '\\multirow{10}{*}{\\parbox{1.6cm}{Analytic Functions}}\n'
if i == 11:
text += '\\multirow{3}{*}{QUBO}\n'
if i == 15:
text += '\\multirow{3}{*}{Control}\n'
if i == 18:
text += '\\multirow{3}{*}{\parbox{1.67cm}{Control +constr.}}\n'
text += f' & {bm}\n'
vals = np.array([v[2] for v in item.values()])
for v in vals:
if v < 1.E+40:
text += f' & {v:-8.1e}\n'
else:
text += f' & Fail\n'
text += f' \\\\ \n'
text += '\n\n\\hline\n\n'
text += '\n% [END] Auto generated data \n% ' + '='*50 + '\n\n'
print(text)
def _load(fpath='res.pickle'):
with open(fpath, 'rb') as f:
res = pickle.load(f)
return res
def _prep_ax(ax, xlog=False, ylog=False, leg=False, xint=False, xticks=None):
if xlog:
ax.semilogx()
if ylog:
ax.semilogy()
if leg:
ax.legend(loc='upper right', frameon=True)
ax.grid(ls=":")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
if xint:
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
if xticks is not None:
ax.set(xticks=xticks, xticklabels=xticks)
def _prep_bm_func(bm):
shift = np.random.randn(bm.d) / 10
a_new = bm.a - (bm.b-bm.a) * shift
b_new = bm.b + (bm.b-bm.a) * shift
bm.set_grid(a_new, b_new)
bm.prep()
return bm
def _save(res, fpath='res.pickle'):
with open(fpath, 'wb') as f:
pickle.dump(res, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
mode = sys.argv[1] if len(sys.argv) > 1 else 'calc'
if mode == 'calc':
calc()
elif mode == 'plot':
plot()
elif mode == 'text':
text()
else:
raise ValueError(f'Invalid computation mode "{mode}"')
| 7,201
| 25.477941
| 79
|
py
|
PROTES
|
PROTES-main/calc/constr.py
|
import numpy as np
import os
from construct_TT import tens
import teneva
def gen_func_pair(num_ones=3):
def f0(x):
if x == 0 or x == num_ones:
return 0
def f1(x):
return min(num_ones, x + 1)
return [f0, f1]
def gen_func_pair_last(num_ones=3):
def f0(x):
if x == 0 or x == num_ones:
return 1
def f1(x):
if x >= num_ones - 1:
return 1
return [f0, f1]
def ind_tens_max_ones(d, num_ones, r):
funcs = [gen_func_pair(num_ones)]*(d-1) + [gen_func_pair_last(num_ones)]
cores = tens(funcs).cores
update_to_rank_r(cores, r, noise=0, inplace=True)
#cores = teneva.orthogonalize(cores, k=0)
return cores
def update_to_rank_r(cores, r, noise=1e-3, inplace=False):
d = len(cores)
res = cores if inplace else [None]*d
to_truncate = False
for i, Y in enumerate(cores):
r1, n, r2 = Y.shape
nr1 = 1 if i==0 else r
nr2 = 1 if i==(d-1) else r
if nr1 < r1 or nr2 < r2:
print("Initial: Order to reduce rank, so I'll truncate it. BAD")
to_truncate = True
if nr1 == r1 and nr2 == r2:
res[i] = Y
continue
new_core = noise*np.random.random([max(nr1, r1), n, max(nr2, r2)])
new_core[:r1, :, :r2] = Y
res[i] = new_core
if to_truncate:
res = teneva.truncate(res, r=r)
return res
| 1,429
| 21.34375
| 77
|
py
|
PROTES
|
PROTES-main/calc/opti/opti_portfolio.py
|
import numpy as np
from opti import Opti
try:
import nevergrad as ng
with_ng = True
except Exception as e:
with_ng = False
class OptiPortfolio(Opti):
def __init__(self, name='portfolio', *args, **kwargs):
super().__init__(name, *args, **kwargs)
def _init(self):
if not with_ng:
self.err = 'Need "nevergrad" module'
return
def _optimize(self):
self._optimize_ng(ng.optimizers.Portfolio)
| 464
| 19.217391
| 58
|
py
|
PROTES
|
PROTES-main/calc/opti/opti_ttopt.py
|
from opti import Opti
import numpy as np
try:
from ttopt import TTOpt
with_ttopt = True
except Exception as e:
with_ttopt = False
class OptiTTOpt(Opti):
def __init__(self, name='ttopt', *args, **kwargs):
super().__init__(name, *args, **kwargs)
def opts(self, with_qtt=True):
self.opts_with_qtt = with_qtt
def _init(self):
if not with_ttopt:
self.err = 'Need "ttopt" module'
return
def _optimize(self):
if self.opts_with_qtt and self.n[0] != 2:
# QTT-solver:
n = None
p = 2
q = int(np.log2(self.n[0]))
if p**q != self.n[0]:
raise ValueError('Grid should be power of 2 for QTT')
else:
# TT-solver or binary tensor:
n = self.n
p = None
q = None
tto = TTOpt(self.f_batch, d=self.d, n=n, p=p, q=q, evals=self.m_max,
is_func=False, is_vect=True)
(tto.maximize if self.is_max else tto.minimize)()
| 1,042
| 24.439024
| 76
|
py
|
PROTES
|
PROTES-main/calc/opti/opti.py
|
import numpy as np
from time import perf_counter as tpc
class Opti:
def __init__(self, name='opti', with_arg_list=False, log=False):
self.name = name
self.with_arg_list = with_arg_list
self.log = log
self.err = ''
self.is_prep = False
self.is_done = False
self.t = 0.
self.m = 0
self.i = None
self.y = None
self.e = None
self.m_list = []
self.i_list = []
self.y_list = []
self.e_list = []
self.opts()
def check(self, I, y):
ind_opt = np.argmax(y) if self.is_max else np.argmin(y)
i_cur = I[ind_opt, :]
y_cur = y[ind_opt]
is_new = self.y is None
is_new = is_new or self.is_max and self.y < y_cur
is_new = is_new or not self.is_max and self.y > y_cur
if is_new:
self.i = i_cur.copy()
self.y = y_cur
self.m_list.append(self.m)
self.y_list.append(self.y)
if self.y_real is not None:
self.e = np.abs(self.y - self.y_real)
self.e_list.append(self.e)
if self.with_arg_list:
self.i_list.append(self.i.copy())
self.t = tpc() - self.t_start
if self.log:
print(self.info())
def f(self, i, with_check=True):
if self.is_f_batch:
y = self.f_batch(i.reshape(1, -1), with_check=False)[0]
else:
y = self.f_(i)
self.m += 1
if with_check:
self.check(i.reshape(1, -1), np.array([y]))
return y
def f_batch(self, I, with_check=True):
if self.is_f_batch:
y = self.f_(I)
self.m += len(I)
else:
y = np.array([self.f(i, with_check=False) for i in I])
if with_check:
self.check(I, y)
return y
def info(self, len_name=12):
name = self.name + ' '*max(0, len_name-len(self.name))
text = f'{name} > '
text += f'm {self.m:-7.1e} | '
text += f't {self.t:-9.3e} | '
if self.e is not None:
text += f'e {self.e:-7.1e}'
else:
text += f'y {self.y:-11.5e}'
if self.is_done:
text += ' <<< DONE'
return text
def optimize(self):
if not self.is_prep:
self.err = 'Call "prep" method before usage'
else:
self._init()
if self.err:
raise ValueError(f'Method {self.name} is not ready ({self.err})')
self.t_start = tpc()
self._optimize()
self.is_done = True
self.t = tpc() - self.t_start
if self.log:
print(self.info())
def opts(self):
return
def prep(self, f, d, n, m, y_real=None, is_max=False, is_f_batch=False):
self.f_ = f
self.d = d
self.n = n
self.m_max = int(m)
self.y_real = y_real
self.is_max = is_max
self.is_f_batch = is_f_batch
self.is_prep = True
return self
def _init(self):
return
def _optimize(self):
raise NotImplementedError()
def _optimize_ng(self, solver):
import nevergrad as ng
optimizer = solver(
parametrization=ng.p.TransitionChoice(range(self.n[0]),
repetitions=len(self.n)),
budget=self.m_max,
num_workers=1)
recommendation = optimizer.provide_recommendation()
for _ in range(optimizer.budget):
x = optimizer.ask()
i = np.array(x.value, dtype=int)
optimizer.tell(x, self.f(i))
| 3,667
| 23.291391
| 77
|
py
|
PROTES
|
PROTES-main/calc/opti/opti_protes.py
|
from opti import Opti
try:
from protes import protes
with_protes = True
except Exception as e:
with_protes = False
class OptiProtes(Opti):
def __init__(self, name='protes', *args, **kwargs):
super().__init__(name, *args, **kwargs)
def opts(self, k=100, k_top=10, k_gd=1, lr=5.E-2, r=5, P=None,
seed=0):
self.opts_k = k
self.opts_k_top = k_top
self.opts_k_gd = k_gd
self.opts_lr = lr
self.opts_r = r
self.opts_P = P
self.opts_seed = seed
def _init(self):
if not with_protes:
self.err = 'Need "protes" module'
return
def _optimize(self):
protes(self.f_batch, self.d, self.n[0], self.m_max, P=self.opts_P,
k=self.opts_k, k_top=self.opts_k_top, k_gd=self.opts_k_gd,
lr=self.opts_lr, r=self.opts_r, is_max=self.is_max,
seed=self.opts_seed)
| 923
| 25.4
| 74
|
py
|
PROTES
|
PROTES-main/calc/opti/opti_pso.py
|
import numpy as np
from opti import Opti
try:
import nevergrad as ng
with_ng = True
except Exception as e:
with_ng = False
class OptiPSO(Opti):
def __init__(self, name='pso', *args, **kwargs):
super().__init__(name, *args, **kwargs)
def _init(self):
if not with_ng:
self.err = 'Need "nevergrad" module'
return
def _optimize(self):
self._optimize_ng(ng.optimizers.PSO)
| 446
| 18.434783
| 52
|
py
|
PROTES
|
PROTES-main/calc/opti/opti_nb.py
|
import numpy as np
from opti import Opti
try:
import nevergrad as ng
with_ng = True
except Exception as e:
with_ng = False
class OptiNB(Opti):
def __init__(self, name='nb', *args, **kwargs):
super().__init__(name, *args, **kwargs)
def _init(self):
if not with_ng:
self.err = 'Need "nevergrad" module'
return
def _optimize(self):
self._optimize_ng(ng.optimizers.NoisyBandit)
| 452
| 18.695652
| 52
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.