repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/03_audiovisual_residual_networks.py | import torch.nn as nn
import torch.optim as optim
from dpcv.engine.bi_modal_trainer import BiModalTrainer
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.modeling.networks.audio_visual_residual import get_audiovisual_resnet_model
from dpcv.config.audiovisual_resnet_cfg import cfg
from dpcv.data.datasets.audio_visual_data import make_data_loader
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_audiovisual_resnet_model()
loss_f = nn.L1Loss()
optimizer = optim.SGD(model.parameters(), lr=0.005, weight_decay=1e-4)
# optimizer = optim.Adam(model.parameters(), lr=0.0002, betas=(0.5, 0.999), eps=1e-8)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = BiModalTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,439 | 34.121951 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/10_swin_transformer_on_personality.py | import torch.optim as optim
import torch.nn as nn
from dpcv.config.swin_transformer_cfg import cfg
from dpcv.modeling.networks.swin_transformer import get_swin_transformer_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.video_frame_data import make_data_loader
from dpcv.engine.bi_modal_trainer import ImageModalTrainer
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_swin_transformer_model()
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = ImageModalTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,363 | 33.1 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/11_3D_resnet_on_personality.py | import torch.nn as nn
import torch.optim as optim
from dpcv.config.resnet_3d_cfg import cfg
from dpcv.modeling.networks.resnet_3d import get_3d_resnet_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.video_segment_data import make_data_loader
from dpcv.engine.bi_modal_trainer import ImageModalTrainer
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_3d_resnet_model(50)
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = ImageModalTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,339 | 32.5 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/02_bi-modal-LSTM_nn.py | import torch.nn as nn
import os
import torch.optim as optim
from dpcv.config.bi_modal_lstm_cfg import cfg
from dpcv.engine.bi_modal_trainer import BimodalLSTMTrain, ImgModalLSTMTrain, AudModalLSTMTrain
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.modeling.networks.bi_modal_lstm import (
get_bi_modal_lstm_model,
get_img_modal_lstm_model,
get_aud_modal_lstm_model
)
from dpcv.data.datasets.temporal_data import make_data_loader
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_bi_modal_lstm_model()
# model = get_img_modal_lstm_model() # to test single performance
# model = get_aud_modal_lstm_model() # to test single performance
loss_f = nn.MSELoss() # according to the paper
# loss_f = nn.L1Loss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = BimodalLSTMTrain(cfg, collector, logger)
# trainer = ImgModalLSTMTrain(cfg, collector, logger)
# trainer = AudModalLSTMTrain(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,773 | 34.48 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/14_video_action_transformer_on_personality.py | import torch.nn as nn
import torch.optim as optim
from dpcv.config.vat_cfg import cfg
from dpcv.modeling.networks.video_action_transformer import get_vat_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.vat_data import make_data_loader
from dpcv.engine.bi_modal_trainer import ImageModalTrainer
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_vat_model()
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = ImageModalTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,324 | 32.125 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/06_interpreting_cnn_models.py | import torch
import torch.nn as nn
import torch.optim as optim
from dpcv.config.interpret_dan_cfg import cfg
from dpcv.engine.bi_modal_trainer import ImageModalTrainer
from dpcv.modeling.networks.interpret_dan import get_interpret_dan_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.video_frame_data import make_data_loader
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test")
}
model = get_interpret_dan_model(cfg, pretrained=True)
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = ImageModalTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
def image_process(img):
import torchvision.transforms as transforms
from PIL import Image
trans_resize = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop((224, 224)),
])
trans_tensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
img = Image.open(img).convert("RGB")
img_resize = trans_resize(img)
img_tensor = trans_tensor(img_resize).unsqueeze(0).cuda()
return img_resize, img_tensor
def load_model(cfg, weights):
model = get_interpret_dan_model(cfg, pretrained=False)
checkpoint = torch.load(weights)
model.load_state_dict(checkpoint["model_state_dict"])
# model.load_state_dict(weights)
return model
def visualize_cam(model_weights, image, trait_id=None):
from dpcv.tools.cam import CAM
from dpcv.tools.cam_vis import to_pil_image, overlay_mask
import matplotlib.pylab as plt
img_resize, img_tensor = image_process(image)
model = load_model(cfg, model_weights)
cam_extractor = CAM(model, enable_hooks=False)
cam_extractor._hooks_enabled = True
model.zero_grad()
scores = model(img_tensor)
trait_id = scores.squeeze(0).argmax().item() if trait_id is None else trait_id
activation_map = cam_extractor(trait_id, scores).cpu()
cam_extractor.clear_hooks()
cam_extractor._hooks_enabled = False
heatmap = to_pil_image(activation_map, mode='F')
result = overlay_mask(img_resize, heatmap, alpha=0.5)
plt.imshow(result)
plt.show()
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
# visualize_cam(
# "../results/interpret_img/09-25_00-00/checkpoint_84.pkl",
# "../datasets/image_data/test_data/0uCqd5hZcyI.002/frame_100.jpg",
# )
| 3,142 | 31.402062 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/12_slow_fast_on_personality.py | import torch.nn as nn
import torch.optim as optim
from dpcv.config.slow_fast_cfg import cfg
from dpcv.modeling.networks.slow_fast import get_slow_fast_model
from dpcv.tools.common import setup_seed, setup_config
from dpcv.tools.logger import make_logger
from dpcv.tools.common import parse_args
from dpcv.evaluation.summary import TrainSummary
from dpcv.data.datasets.slow_fast_data import make_data_loader
from dpcv.engine.bi_modal_trainer import ImageListTrainer
from dpcv.tools.exp import run
def main(args, cfg):
setup_seed(12345)
cfg = setup_config(args, cfg)
logger, log_dir = make_logger(cfg.OUTPUT_DIR)
data_loader = {
"train": make_data_loader(cfg, mode="train"),
"valid": make_data_loader(cfg, mode="valid"),
"test": make_data_loader(cfg, mode="test"),
}
model = get_slow_fast_model()
loss_f = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=cfg.LR_INIT, weight_decay=cfg.WEIGHT_DECAY)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.FACTOR, milestones=cfg.MILESTONE)
collector = TrainSummary()
trainer = ImageListTrainer(cfg, collector, logger)
run(cfg, data_loader, model, loss_f, optimizer, scheduler, trainer, collector, logger, log_dir)
if __name__ == "__main__":
args = parse_args()
main(args, cfg)
| 1,331 | 32.3 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_first_stage/15_key_dynamic_image.py | from engine.excitation_core import contrastive_excitation_backprop
import torchvision
from PIL import Image
from dpcv.tools.common import get_device
from dpcv.modeling.networks.excitation_bp_rnn import AlexNetLSTM
from dpcv.tools.draw import plot_example
def get_example_data(shape=224):
"""Get example data to demonstrate visualization techniques.
Args:
shape (int or tuple of int, optional): shape to resize input image to.
Default: ``224``.
Returns:
(:class:`torch.nn.Module`, :class:`torch.Tensor`, int, int): a tuple
containing
- a convolutional neural network model in evaluation mode.
- a sample input tensor image.
- the ImageNet category id of an object in the image.
- the ImageNet category id of another object in the image.
"""
model = AlexNetLSTM()
# Switch to eval mode to make the visualization deterministic.
model.eval()
# We do not need grads for the parameters.
for param in model.parameters():
param.requires_grad_(False)
# Download an example image from wikimedia.
img = Image.open("../datasets/demo/both.png")
# Pre-process the image and convert into a tensor
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(shape),
torchvision.transforms.CenterCrop(shape),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
x = transform(img).unsqueeze(0)
# bulldog category id.
category_id_1 = 1 # 245
# persian cat category id.
category_id_2 = 0 # 285
# Move model and input to device.
dev = get_device()
model = model.to(dev)
x = x.to(dev)
return model, x, category_id_1, category_id_2
if __name__ == "__main__":
model, x, category_id, _ = get_example_data()
# Contrastive excitation backprop.
saliency = contrastive_excitation_backprop(
model,
x,
category_id,
saliency_layer='extractor.features.1',
contrast_layer='extractor.features.4',
# classifier_layer='classifier.6',
)
# Plots.
plot_example(x, saliency, 'contrastive excitation backprop', category_id)
| 2,298 | 28.474359 | 78 | py |
DeepPersonality | DeepPersonality-main/dpcv/engine/excitation_core.py |
__all__ = [
"contrastive_excitation_backprop",
"eltwise_sum",
"excitation_backprop",
"ExcitationBackpropContext",
]
import torch
import torch.nn as nn
from torch.autograd import Function
from dpcv.tools.excitation_bp import Patch, Probe
from dpcv.tools.excitation_bp import get_backward_gradient, get_module
from dpcv.tools.excitation_bp import saliency, resize_saliency
class EltwiseSumFunction(Function):
"""
Implementation of a skip connection (i.e., element-wise sum function)
as a :class:`torch.autograd.Function`. This is necessary for patching
the skip connection as a :class:`torch.nn.Module` object (i.e.,
:class:`EltwiseSum`).
"""
@staticmethod
def forward(ctx, *inputs):
ctx.save_for_backward(*inputs)
output = inputs[0]
for i in range(1, len(inputs)):
output = output + inputs[i]
return output
@staticmethod
def backward(ctx, grad_output):
inputs = ctx.saved_tensors
return (grad_output,) * len(inputs)
# Alias for :class:`EltwiseSumFunction`.
eltwise_sum = EltwiseSumFunction.apply
class ExcitationBackpropContext(object):
"""Context to use Excitation Backpropagation rules."""
@staticmethod
def _patch_conv(target_name, enable, debug):
"""Patch conv functions to use excitation backprop rules.
Replicated implementation provided in:
https://github.com/jimmie33/Caffe-ExcitationBP/blob/master/src/caffe/layers/conv_layer.cpp
Args:
target_name (str): name of function to patch (i.e.,
``'torch.nn.functional.conv_1d'``).
enable (bool): If True, enable excitation backprop rules.
debug (bool): If True, print debug statements.
Returns:
:class:`.common.Patch`: object that patches the function with
the name :attr:`target_name` with a new callable that implements
the excitation backprop rules for the conv function.
"""
target, attribute = Patch.resolve(target_name)
conv = getattr(target, attribute)
def forward(ctx, input, weight, bias=None, *args, **kwargs):
ctx.save_for_backward(input, weight, bias)
ctx.args = args
ctx.kwargs = kwargs
if debug:
print("EBP " + target_name)
return conv(input, weight, bias, *ctx.args, **ctx.kwargs)
def backward(ctx, grad_output):
def get(i):
x = ctx.saved_tensors[i]
if x is None:
return
x = x.detach()
x.requires_grad_(ctx.needs_input_grad[i])
return x
inputs_ = get(0), get(1), get(2)
grad_inputs_ = [None, None, None]
subset = [i for i, g in enumerate(ctx.needs_input_grad) if g]
inputs_subset = [inputs_[i] for i in subset]
with torch.enable_grad():
# EBP changes only the gradients w.r.t. inputs_[0]. We also
# compute the gradients w.r.t. the parameters if needed,
# although they are trash. Perhaps it would be better to set
# them to None?
#
# The expectation is that the input to the conv layer is
# non-negative, which is typical for all but the first layer
# due to the ReLUs. Some other implementation makes sure by
# clamping inputs_[0].
# 1. set weight W+ to be non-negative and disable bias.
if enable:
input = inputs_[0]
weight = inputs_[1].clamp(min=0)
bias = None
else:
input = inputs_[0]
weight = inputs_[1]
bias = inputs_[2]
# 2. do forward pass.
output_ebp = conv(
input,
weight,
bias,
*ctx.args,
**ctx.kwargs
)
# 3. normalize gradient by the output of the forward pass.
if enable:
grad_output = grad_output / (output_ebp + 1e-20)
# 4. do backward pass.
_ = torch.autograd.grad(
output_ebp, inputs_subset, grad_output, only_inputs=True)
for i, j in enumerate(subset):
grad_inputs_[j] = _[i]
# 5. multiply gradient with the layer's input.
if ctx.needs_input_grad[0] and enable:
grad_inputs_[0] *= inputs_[0]
return (grad_inputs_[0], grad_inputs_[1], grad_inputs_[2],
None, None, None, None)
autograd_conv = type(
'EBP_' + attribute,
(torch.autograd.Function,),
{'forward': staticmethod(forward), 'backward': staticmethod(backward)}
)
return Patch(target_name, autograd_conv.apply)
@staticmethod
def _patch_pool(target_name, enable, debug):
"""Patch pool functions to use excitation backprop rules.
Replicated implementation provided in:
https://github.com/jimmie33/Caffe-ExcitationBP/blob/master/src/caffe/layers/pooling_layer.cpp
Args:
target_name (str): name of function to patch (i.e.,
``'torch.nn.functional.avg_pool1d'``).
enable (bool): If True, enable excitation backprop rules.
debug (bool): If True, print debug statements.
Returns:
:class:`.common.Patch`: object that patches the function with
the name :attr:`target_name` with a new callable that implements
the excitation backprop rules for the pool function.
"""
target, attribute = Patch.resolve(target_name)
pool = getattr(target, attribute)
def forward(ctx, input, *args, **kwargs):
ctx.save_for_backward(input)
ctx.args = args
ctx.kwargs = kwargs
if debug:
print('EBP ' + target_name)
return pool(input, *ctx.args, **ctx.kwargs)
def backward(ctx, grad_output):
if not ctx.needs_input_grad[0]:
return None,
input_ = ctx.saved_tensors[0].detach()
input_.requires_grad_(True)
with torch.enable_grad():
# 1. forward pass.
output_ebp = pool(input_, *ctx.args, **ctx.kwargs)
# 2. normalize gradient by the output of the forward pass.
if enable:
grad_output = grad_output / (output_ebp + 1e-20)
# 3. do backward pass.
grad_input_ = torch.autograd.grad(
output_ebp, input_, grad_output, only_inputs=True)[0]
# 4. multiply gradient with layer's input.
if enable:
grad_input_ *= input_
return grad_input_, None, None, None, None, None, None
autograd_pool = type('EBP_' + attribute, (torch.autograd.Function,), {
'forward': staticmethod(forward),
'backward': staticmethod(backward),
})
return Patch(target_name, autograd_pool.apply)
@staticmethod
def _patch_norm(target_name, enable, debug):
"""
Patch normalization functions (e.g., batch norm) to use excitation
backprop rules.
Args:
target_name (str): name of function to patch (i.e.,
``'torch.nn.functional.avg_pool1d'``).
enable (bool): If True, enable excitation backprop rules.
debug (bool): If True, print debug statements.
Returns:
:class:`.common.Patch`: object that patches the function with
the name :attr:`target_name` with a new callable that implements
the excitation backprop rules for the normalization function.
"""
target, attribute = Patch.resolve(target_name)
norm = getattr(target, attribute)
def forward(ctx, input, *args, **kwargs):
ctx.save_for_backward(input)
ctx.args = args
ctx.kwargs = kwargs
if debug:
print('EBP ' + target_name)
return norm(input, *ctx.args, **ctx.kwargs)
def backward(ctx, grad_output):
if enable:
return grad_output, None, None, None, None, None, None, None
input_ = ctx.saved_tensors[0].detach()
input_.requires_grad_(True)
with torch.enable_grad():
output_ebp = norm(input_, *ctx.args, **ctx.kwargs)
grad_input_ = torch.autograd.grad(
output_ebp, input_, grad_output, only_inputs=True)[0]
return grad_input_, None, None, None, None, None, None, None
autograd_norm = type('EBP_' + attribute, (torch.autograd.Function,), {
'forward': staticmethod(forward),
'backward': staticmethod(backward),
})
return Patch(target_name, autograd_norm.apply)
def _patch_eltwise_sum(self, target_name, enable, debug):
"""
Patch element-wise sum function (e.g., skip connection) to use
excitation backprop rules.
Args:
target_name (str): name of function to patch (i.e.,
``'torch.nn.functional.avg_pool1d'``).
enable (bool): If True, enable excitation backprop rules.
debug (bool): If True, print debug statements.
Returns:
:class:`.common.Patch`: object that patches the function with
the name :attr:`target_name` with a new callable that implements
the excitation backprop rules for the element-wise sum function.
"""
target, attribute = Patch.resolve(target_name)
eltwise_sum_f = getattr(target, attribute)
def forward(ctx, *inputs):
ctx.save_for_backward(*inputs)
if debug:
print("EBP " + target_name)
return eltwise_sum_f(*inputs)
def backward(ctx, grad_output):
inputs = ctx.saved_tensors
if not enable:
return (grad_output, ) * len(inputs)
inputs = [inputs[i].detach() for i in range(len(inputs))]
output = eltwise_sum_f(*inputs)
grad_outputs = []
for input in inputs:
grad_outputs.append(input / output * grad_output)
return tuple(grad_outputs)
autograd_eltwise_sum = type('EBP_' + attribute,
(torch.autograd.Function,), {
'forward': staticmethod(forward),
'backward': staticmethod(backward),
})
return Patch(target_name, autograd_eltwise_sum.apply)
def __init__(self, enable=True, debug=False):
self.enable = enable
self.debug = debug
self.patches = []
def __enter__(self):
# Patch torch functions for convolutional, linear, average pooling,
# and adaptive average pooling layers. Also patch eltwise_sum function
# (for skip connection in resnet models).
self.patches = [
self._patch_conv('torch.nn.functional.conv1d',
self.enable, self.debug),
self._patch_conv('torch.nn.functional.conv2d',
self.enable, self.debug),
self._patch_conv('torch.nn.functional.conv3d',
self.enable, self.debug),
self._patch_conv('torch.nn.functional.linear',
self.enable, self.debug),
self._patch_pool('torch.nn.functional.avg_pool1d',
self.enable, self.debug),
self._patch_pool('torch.nn.functional.avg_pool2d',
self.enable, self.debug),
self._patch_pool('torch.nn.functional.avg_pool3d',
self.enable, self.debug),
self._patch_pool('torch.nn.functional.adaptive_avg_pool1d',
self.enable, self.debug),
self._patch_pool('torch.nn.functional.adaptive_avg_pool2d',
self.enable, self.debug),
self._patch_pool('torch.nn.functional.adaptive_avg_pool3d',
self.enable, self.debug),
self._patch_norm('torch.nn.functional.batch_norm',
self.enable, self.debug),
self._patch_eltwise_sum(
'torchray.attribution.excitation_backprop.eltwise_sum',
self.enable,
self.debug),
]
return self
def __exit__(self, type, value, traceback):
for patch in self.patches:
patch.remove()
return False # re-raise any exception
def _get_classifier_layer(model):
r"""Get the classifier layer.
Args:
model (:class:`torch.nn.Module`): a model.
Returns:
(:class:`torch.nn.Module`, str): tuple of the last layer and its name.
"""
# Get last layer with weight parameters.
last_layer_name = None
last_layer = None
for parameter_name, _ in list(model.named_parameters())[::-1]:
if '.weight' in parameter_name:
last_layer_name, _ = parameter_name.split('.weight')
last_layer = get_module(model, last_layer_name)
# Check that last layer is convolutional or linear.
if (isinstance(last_layer, nn.Conv1d)
or isinstance(last_layer, nn.Conv2d)
or isinstance(last_layer, nn.Conv3d)
or isinstance(last_layer, nn.Linear)):
break
else:
last_layer_name = None
last_layer = None
assert last_layer_name is not None
assert last_layer is not None
return last_layer, last_layer_name
def gradient_to_excitation_backprop_saliency(x):
r"""Convert a gradient to an excitation backprop saliency map.
The tensor :attr:`x` must have a valid gradient ``x.grad``.
The function then computes the excitation backprop saliency map :math:`s`
given by:
.. math::
s_{n,1,u} = \max(\sum_{0 \leq c < C} dx_{ncu}, 0)
where :math:`n` is the instance index, :math:`c` the channel
index and :math:`u` the spatial multi-index (usually of dimension 2 for
images).
Args:
x (:class:`torch.Tensor`): activation with gradient.
Return:
:class:`torch.Tensor`: saliency map.
"""
return torch.sum(x.grad, 1, keepdim=True)
def gradient_to_contrastive_excitation_backprop_saliency(x):
r"""Convert a gradient to an contrastive excitation backprop saliency map.
The tensor :attr:`x` must have a valid gradient ``x.grad``.
The function then computes the excitation backprop saliency map :math:`s`
given by:
.. math::
s_{n,1,u} = \max(\sum_{0 \leq c < C} dx_{ncu}, 0)
where :math:`n` is the instance index, :math:`c` the channel
index and :math:`u` the spatial multi-index (usually of dimension 2 for
images).
Args:
x (:class:`torch.Tensor`): activation with gradient.
Return:
:class:`torch.Tensor`: saliency map.
"""
return torch.clamp(torch.sum(x.grad, 1, keepdim=True), min=0)
def excitation_backprop(*args,
context_builder=ExcitationBackpropContext,
gradient_to_saliency=gradient_to_excitation_backprop_saliency,
**kwargs):
r"""Excitation backprop.
The function takes the same arguments as :func:`.common.saliency`, with
the defaults required to apply the Excitation backprop method, and supports
the same arguments and return values.
"""
assert context_builder is ExcitationBackpropContext
return saliency(
*args,
context_builder=context_builder,
gradient_to_saliency=gradient_to_saliency,
**kwargs
)
def contrastive_excitation_backprop(model,
input,
target,
saliency_layer,
contrast_layer,
classifier_layer=None,
resize=False,
resize_mode='bilinear',
get_backward_gradient=get_backward_gradient,
debug=False):
"""Contrastive excitation backprop.
Args:
model (:class:`torch.nn.Module`): a model.
input (:class:`torch.Tensor`): input tensor.
target (int or :class:`torch.Tensor`): target label(s).
saliency_layer (str or :class:`torch.nn.Module`): name of the saliency
layer (str) or the layer itself (:class:`torch.nn.Module`) in
the model at which to visualize.
contrast_layer (str or :class:`torch.nn.Module`): name of the contrast
layer (str) or the layer itself (:class:`torch.nn.Module`).
classifier_layer (str or :class:`torch.nn.Module`, optional): name of
the last classifier layer (str) or the layer itself
(:class:`torch.nn.Module`). Defaults to ``None``, in which case
the functions tries to automatically identify the last layer.
Default: ``None``.
resize (bool or tuple, optional): If True resizes the saliency map to
the same size as :attr:`input`. It is also possible to pass a
(width, height) tuple to specify an arbitrary size. Default:
``False``.
resize_mode (str, optional): Specify the resampling mode.
Default: ``'bilinear'``.
get_backward_gradient (function, optional): function that generates
gradient tensor to backpropagate. Default:
:func:`.common.get_backward_gradient`.
debug (bool, optional): If True, also return
:class:`collections.OrderedDict` of :class:`.common.Probe` objects
attached to all named modules in the model. Default: ``False``.
Returns:
:class:`torch.Tensor` or tuple: If :attr:`debug` is False, returns a
:class:`torch.Tensor` saliency map at :attr:`saliency_layer`.
Otherwise, returns a tuple of a :class:`torch.Tensor` saliency map
at :attr:`saliency_layer` and an :class:`collections.OrderedDict`
of :class:`Probe` objects for all modules in the model.
"""
# Disable gradients for model parameters.
for param in model.parameters():
param.requires_grad_(False)
# Set model to eval mode.
if model.training:
model.eval()
saliency_layer = get_module(model, saliency_layer)
contrast_layer = get_module(model, contrast_layer)
if classifier_layer is None:
classifier_layer, _ = _get_classifier_layer(model)
classifier_layer = get_module(model, classifier_layer)
with ExcitationBackpropContext():
probe_contrast = Probe(contrast_layer, target='output')
output = model(input)
gradient = get_backward_gradient(output, target)
try:
# Flip the weights of the last layer.
classifier_layer.weight.data.neg_()
output.backward(gradient.clone(), retain_graph=True)
# Save negative gradient and prepare to backpropagated contrastive
# gradient.
probe_contrast.contrast = [probe_contrast.data[0].grad]
finally:
# Flip back.
classifier_layer.weight.data.neg_()
# Forward-backward pass to get positive gradient at the contrastive
# layer and and backpropagate contrastive gradient to input.
probe_saliency = Probe(saliency_layer, target='output')
output = model(input)
output.backward(gradient)
saliency_map = gradient_to_contrastive_excitation_backprop_saliency(probe_saliency.data[0])
probe_saliency.remove()
probe_contrast.remove()
saliency_map = resize_saliency(input, saliency_map, resize, resize_mode)
return saliency_map
| 20,385 | 37.609848 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/engine/bi_modal_trainer.py | import torch
from tqdm import tqdm
import numpy as np
import math
import os
from .build import TRAINER_REGISTRY
from torch.utils.tensorboard import SummaryWriter
import time
@TRAINER_REGISTRY.register()
class BiModalTrainer(object):
"""base trainer for bi-modal input"""
def __init__(self, cfg, collector, logger):
self.cfg = cfg
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.clt = collector
self.logger = logger
self.tb_writer = SummaryWriter(cfg.OUTPUT_DIR)
def train(self, data_loader, model, loss_f, optimizer, epoch_idx):
lr = optimizer.param_groups[0]['lr']
self.logger.info(f"Training: learning rate:{lr}")
self.tb_writer.add_scalar("lr", lr, epoch_idx)
model.train()
loss_list = []
acc_avg_list = []
for i, data in enumerate(data_loader):
epo_iter_num = len(data_loader)
iter_start_time = time.time()
inputs, labels = self.data_fmt(data)
outputs = model(*inputs)
optimizer.zero_grad()
loss = loss_f(outputs.cpu(), labels.cpu())
self.tb_writer.add_scalar("loss", loss.item(), i)
loss.backward()
optimizer.step()
iter_end_time = time.time()
iter_time = iter_end_time - iter_start_time
loss_list.append(loss.item())
acc_avg = (1 - torch.abs(outputs.cpu() - labels.cpu())).mean().clip(min=0)
acc_avg = acc_avg.detach().numpy()
acc_avg_list.append(acc_avg)
# print loss and training info for an interval
if i % self.cfg.LOG_INTERVAL == self.cfg.LOG_INTERVAL - 1:
remain_iter = epo_iter_num - i
remain_epo = self.cfg.MAX_EPOCH - epoch_idx
eta = (epo_iter_num * iter_time) * remain_epo + (remain_iter * iter_time)
eta = int(eta)
eta_string = f"{eta // 3600}h:{eta % 3600 // 60}m:{eta % 60}s"
self.logger.info(
"Train: Epo[{:0>3}/{:0>3}] Iter[{:0>3}/{:0>3}] IterTime:[{:.2f}s] LOSS: {:.4f} ACC:{:.4f} ETA:{} ".format(
epoch_idx + 1, self.cfg.MAX_EPOCH, # Epo
i + 1, epo_iter_num, # Iter
iter_time, # IterTime
float(loss.item()), float(acc_avg), # LOSS ACC ETA
eta_string,
)
)
self.clt.record_train_loss(loss_list)
self.clt.record_train_acc(acc_avg_list)
def valid(self, data_loader, model, loss_f, epoch_idx):
model.eval()
with torch.no_grad():
loss_batch_list = []
acc_batch_list = []
ocean_acc_epoch = []
for i, data in enumerate(data_loader):
inputs, labels = self.data_fmt(data)
outputs = model(*inputs)
loss = loss_f(outputs.cpu(), labels.cpu())
loss_batch_list.append(loss.item())
ocean_acc_batch = (
1 - torch.abs(outputs.cpu().detach() - labels.cpu().detach())
).mean(dim=0).clip(min=0)
ocean_acc_epoch.append(ocean_acc_batch)
acc_batch_avg = ocean_acc_batch.mean()
acc_batch_list.append(acc_batch_avg)
ocean_acc = torch.stack(ocean_acc_epoch, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_acc_avg = ocean_acc.mean()
self.tb_writer.add_scalar("valid_acc", ocean_acc_avg, epoch_idx)
self.clt.record_valid_loss(loss_batch_list)
self.clt.record_valid_acc(acc_batch_list) # acc over batches
self.clt.record_valid_ocean_acc(ocean_acc)
if ocean_acc_avg > self.clt.best_valid_acc:
self.clt.update_best_acc(ocean_acc_avg)
self.clt.update_model_save_flag(1)
else:
self.clt.update_model_save_flag(0)
self.logger.info(
"Valid: Epoch[{:0>3}/{:0>3}] Train Mean_Acc: {:.2%} Valid Mean_Acc:{:.2%} OCEAN_ACC:{}\n".
format(
epoch_idx + 1, self.cfg.MAX_EPOCH,
float(self.clt.epoch_train_acc),
float(self.clt.epoch_valid_acc),
self.clt.valid_ocean_acc)
)
def test(self, data_loader, model):
mse_func = torch.nn.MSELoss(reduction="none")
model.eval()
with torch.no_grad():
mse_ls = []
ocean_acc = []
label_list = []
output_list = []
for data in tqdm(data_loader):
inputs, labels = self.data_fmt(data)
outputs = model(*inputs)
outputs = outputs.cpu().detach()
labels = labels.cpu().detach()
output_list.append(outputs)
label_list.append(labels)
mse = mse_func(outputs, labels).mean(dim=0)
ocean_acc_batch = (1 - torch.abs(outputs - labels)).mean(dim=0).clip(min=0)
mse_ls.append(mse)
ocean_acc.append(ocean_acc_batch)
ocean_mse = torch.stack(mse_ls, dim=0).mean(dim=0).numpy()
ocean_acc = torch.stack(ocean_acc, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_mse_mean = ocean_mse.mean()
ocean_acc_avg = ocean_acc.mean()
dataset_output = torch.cat(output_list, dim=0).numpy()
dataset_label = torch.cat(label_list, dim=0).numpy()
ocean_mse_mean_rand = np.round(ocean_mse_mean, 4)
ocean_acc_avg_rand = np.round(ocean_acc_avg.astype("float64"), 4)
self.tb_writer.add_scalar("test_acc", ocean_acc_avg_rand)
keys = ["O", "C", "E", "A", "N"]
ocean_mse_dict, ocean_acc_dict = {}, {}
for i, k in enumerate(keys):
ocean_mse_dict[k] = np.round(ocean_mse[i], 4)
ocean_acc_dict[k] = np.round(ocean_acc[i], 4)
return ocean_acc_avg_rand, ocean_acc_dict, dataset_output, dataset_label, (ocean_mse_dict, ocean_mse_mean_rand)
def full_test(self, data_set, model):
model.eval()
out_ls, label_ls = [], []
with torch.no_grad():
for data in tqdm(data_set):
inputs, label = self.full_test_data_fmt(data)
out = model(*inputs)
out_ls.append(out.mean(0).cpu().detach())
label_ls.append(label)
all_out = torch.stack(out_ls, 0)
all_label = torch.stack(label_ls, 0)
ocean_acc = (1 - torch.abs(all_out - all_label)).mean(0).numpy()
ocean_acc_avg = ocean_acc.mean(0)
ocean_acc_avg_rand = np.round(ocean_acc_avg, 4)
ocean_acc_dict = {k: np.round(ocean_acc[i], 4) for i, k in enumerate(["O", "C", "E", "A", "N"])}
dataset_output = all_out.numpy()
dataset_label = all_label.numpy()
return ocean_acc_avg_rand, ocean_acc_dict, dataset_output, dataset_label
def data_extract(self, model, data_set, output_dir):
os.makedirs(output_dir, exist_ok=True)
model.eval()
with torch.no_grad():
for idx, data in enumerate(tqdm(data_set)):
inputs, label = self.full_test_data_fmt(data)
# mini_batch = 64
out_ls, feat_ls = [], []
for i in range(math.ceil(len(inputs[0]) / 64)):
mini_batch_1 = inputs[0][(i * 64): (i + 1) * 64]
mini_batch = (mini_batch_1,)
try:
mini_batch_2 = inputs[1][(i * 64): (i + 1) * 64]
mini_batch = (mini_batch_1, mini_batch_2)
except IndexError:
pass
# mini_batch = (mini_batch_1, mini_batch_2)
if model.return_feature:
out, feat = model(*mini_batch)
out_ls.append(out.cpu())
feat_ls.append(feat.cpu())
else:
out = model(*mini_batch)
out_ls.append(out.cpu())
feat_ls.append(torch.tensor([0]))
out_pred, out_feat = torch.cat(out_ls, dim=0), torch.cat(feat_ls, dim=0)
video_extract = {
"video_frames_pred": out_pred,
"video_frames_feat": out_feat,
"video_label": label.cpu()
}
save_to_file = os.path.join(output_dir, "{:04d}.pkl".format(idx))
torch.save(video_extract, save_to_file)
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
img_in, aud_in, labels = data["image"], data["audio"], data["label"]
return (aud_in, img_in), labels
def full_test_data_fmt(self, data):
images, wav, label = data["image"], data["audio"], data["label"]
images_in = torch.stack(images, 0).to(self.device)
# wav_in = torch.stack([wav] * 100, 0).to(self.device)
wav_in = wav.repeat(len(images), 1, 1, 1).to(self.device)
return (wav_in, images_in), label
@TRAINER_REGISTRY.register()
class BimodalLSTMTrain(BiModalTrainer):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
img_in, aud_in, labels = data["image"], data["audio"], data["label"]
img_in = img_in.view(-1, 3, 112, 112)
aud_in = aud_in.view(-1, 68)
return (aud_in, img_in), labels
@TRAINER_REGISTRY.register()
class BimodalLSTMTrainVisual(BiModalTrainer):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
img_in, labels = data["image"], data["label"]
img_in = img_in.view(-1, 3, 112, 112)
# aud_in = aud_in.view(-1, 68)
return (img_in,), labels
@TRAINER_REGISTRY.register()
class ImgModalLSTMTrain(BiModalTrainer):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
img_in, _, labels = data["image"], data["audio"], data["label"]
img_in = img_in.view(-1, 3, 112, 112)
return (img_in,), labels
@TRAINER_REGISTRY.register()
class AudModalLSTMTrain(BiModalTrainer):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
_, aud_in, labels = data["image"], data["audio"], data["label"]
aud_in = aud_in.view(-1, 68)
return (aud_in,), labels
@TRAINER_REGISTRY.register()
class DeepBimodalTrain(BimodalLSTMTrain):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
inputs, labels = data["image"], data["label"]
return (inputs,), labels
@TRAINER_REGISTRY.register()
class ImageModalTrainer(BiModalTrainer):
"""
for model only image data used
"""
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
inputs, labels = data["image"], data["label"]
return (inputs,), labels
def full_test_data_fmt(self, data):
images, label = data["all_images"], data["label"]
images_in = torch.stack(images, 0).to(self.device)
return (images_in, ), label
@TRAINER_REGISTRY.register()
class MultiModalTrainer(BiModalTrainer):
"""
for model only image data used
"""
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
inputs, labels = data["feature"], data["label"]
return (inputs,), labels
@TRAINER_REGISTRY.register()
class ImageListTrainer(BiModalTrainer):
"""
for interpret cnn model, only image data used
"""
def data_fmt(self, data):
inputs, labels = data["image"], data["label"]
inputs = [item.to(self.device) for item in inputs]
labels = labels.to(self.device)
return (inputs,), labels
def full_test_data_fmt(self, data):
images, label = data["all_images"], data["label"]
# short_sque, long_sque = zip(*images)
inputs = [torch.stack(sque, 0).to(self.device) for sque in zip(*images)]
return (inputs,), label
@TRAINER_REGISTRY.register()
class TPNTrainer(BiModalTrainer):
"""
for interpret cnn model, only image data used
"""
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
inputs, labels = data["image"], data["label"]
data_input = {"num_modalities": [1], "img_group_0": inputs, "img_meta": None, "gt_label": labels}
return data_input, labels
def train(self, data_loader, model, loss_f, optimizer, epoch_idx):
model.train()
self.logger.info(f"Training: learning rate:{optimizer.param_groups[0]['lr']}")
loss_list = []
acc_avg_list = []
for i, data in enumerate(data_loader):
inputs, labels = self.data_fmt(data)
loss, outputs = model(**inputs)
optimizer.zero_grad()
# loss = loss_f(outputs.cpu(), labels.cpu())
loss.backward()
optimizer.step()
loss_list.append(loss.item())
acc_avg = (1 - torch.abs(outputs.cpu() - labels.cpu())).mean().clip(min=0)
acc_avg = acc_avg.detach().numpy()
acc_avg_list.append(acc_avg)
# print loss info for an interval
if i % self.cfg.LOG_INTERVAL == self.cfg.LOG_INTERVAL - 1:
self.logger.info(
"Train: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.4f}".format(
epoch_idx + 1, self.cfg.MAX_EPOCH,
i + 1, len(data_loader),
float(loss.item()), float(acc_avg)
)
)
self.clt.record_train_loss(loss_list)
self.clt.record_train_acc(acc_avg_list)
def valid(self, data_loader, model, loss_f, epoch_idx):
model.eval()
with torch.no_grad():
loss_batch_list = []
acc_batch_list = []
ocean_acc_epoch = []
for i, data in enumerate(data_loader):
inputs, labels = self.data_fmt(data)
loss, outputs = model(**inputs)
# loss = loss_f(outputs.cpu(), labels.cpu())
loss_batch_list.append(loss.item())
ocean_acc_batch = (1 - torch.abs(outputs.cpu().detach() - labels.cpu().detach())).mean(dim=0)
ocean_acc_epoch.append(ocean_acc_batch)
acc_batch_avg = ocean_acc_batch.mean()
acc_batch_list.append(acc_batch_avg)
ocean_acc = torch.stack(ocean_acc_epoch, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_acc_avg = ocean_acc.mean()
self.clt.record_valid_loss(loss_batch_list)
self.clt.record_valid_acc(acc_batch_list) # acc over batches
self.clt.record_valid_ocean_acc(ocean_acc)
if ocean_acc_avg > self.clt.best_valid_acc:
self.clt.update_best_acc(ocean_acc_avg)
self.clt.update_model_save_flag(1)
else:
self.clt.update_model_save_flag(0)
self.logger.info(
"Valid: Epoch[{:0>3}/{:0>3}] Train Mean_Acc: {:.2%} Valid Mean_Acc:{:.2%} OCEAN_ACC:{}\n".
format(
epoch_idx + 1, self.cfg.MAX_EPOCH,
float(self.clt.epoch_train_acc),
float(self.clt.epoch_valid_acc),
self.clt.valid_ocean_acc)
)
def test(self, data_loader, model):
model.eval()
mse = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
ocean_acc = []
ocean_mse = []
label_list = []
output_list = []
for data in tqdm(data_loader):
inputs, labels = self.data_fmt(data)
loss, outputs = model(**inputs)
outputs = outputs.cpu().detach()
labels = labels.cpu().detach()
output_list.append(outputs)
label_list.append(labels)
ocean_mse_batch = mse(outputs, labels).mean(dim=0)
ocean_acc_batch = (1 - torch.abs(outputs - labels)).mean(dim=0)
ocean_mse.append(ocean_mse_batch)
ocean_acc.append(ocean_acc_batch)
ocean_mse = torch.stack(ocean_mse, dim=0).mean(dim=0).numpy()
ocean_acc = torch.stack(ocean_acc, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_mse_avg = ocean_mse.mean()
ocean_acc_avg = ocean_acc.mean()
dataset_output = torch.cat(output_list, dim=0).numpy()
dataset_label = torch.cat(label_list, dim=0).numpy()
ocean_mse_avg_rand = np.round(ocean_mse_avg.astype("float64"), 4)
ocean_acc_avg_rand = np.round(ocean_acc_avg.astype("float64"), 4)
keys = ["O", "C", "E", "A", "N"]
ocean_mse_dict, ocean_acc_dict = {}, {}
for i, k in enumerate(keys):
ocean_mse_dict[k] = np.round(ocean_mse[i], 4)
ocean_acc_dict[k] = np.round(ocean_acc[i], 4)
return ocean_acc_avg_rand, ocean_acc_dict, dataset_output, dataset_label, (ocean_mse_dict, ocean_mse_avg_rand)
def full_test(self, data_loader, model):
model.eval()
with torch.no_grad():
ocean_acc = []
label_list = []
output_list = []
for data in tqdm(data_loader):
inputs, labels = self.full_test_data_fmt(data)
loss, outputs = model(**inputs)
outputs = outputs.cpu().detach()
labels = labels.cpu().detach()
output_list.append(outputs)
label_list.append(labels)
ocean_acc_batch = (1 - torch.abs(outputs - labels)).mean(dim=0)
ocean_acc.append(ocean_acc_batch)
ocean_acc = torch.stack(ocean_acc, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_acc_avg = ocean_acc.mean()
dataset_output = torch.cat(output_list, dim=0).numpy()
dataset_label = torch.cat(label_list, dim=0).numpy()
ocean_acc_avg_rand = np.round(ocean_acc_avg.astype("float64"), 4)
keys = ["O", "C", "E", "A", "N"]
ocean_acc_dict = {}
for i, k in enumerate(keys):
ocean_acc_dict[k] = np.round(ocean_acc[i], 4)
return ocean_acc_avg_rand, ocean_acc_dict, dataset_output, dataset_label
def full_test_data_fmt(self, data):
inputs, labels = data["all_images"], data["label"]
inputs = torch.stack(inputs, 0).to(self.device)
labels_repeats = labels.repeat(6, 1).to(self.device)
data_input = {"num_modalities": [1], "img_group_0": inputs, "img_meta": None, "gt_label": labels_repeats}
return data_input, labels_repeats
@TRAINER_REGISTRY.register()
class PersEmoTrainer(BiModalTrainer):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.squeeze().to(self.device)
per_inputs, emo_inputs = data["per_img"], data["emo_img"],
per_labels, emo_labels = data["per_label"], data["emo_label"]
return (per_inputs, emo_inputs), per_labels, emo_labels
def train(self, data_loader, model, loss_f, optimizer, epoch_idx):
model.train()
self.logger.info(f"Training: learning rate:{optimizer.param_groups[0]['lr']}")
loss_list = []
acc_avg_list = []
for i, data in enumerate(data_loader):
inputs, p_labels, e_labels = self.data_fmt(data)
p_score, p_co, e_score, e_co, x_ep = model(*inputs)
optimizer.zero_grad()
loss = loss_f(p_score, p_labels, e_score, e_labels, p_co, e_co, x_ep)
loss.backward()
optimizer.step()
loss_list.append(loss.item())
acc_avg = (1 - torch.abs(p_score.cpu() - p_labels.cpu())).mean().clip(min=0)
acc_avg = acc_avg.detach().numpy()
acc_avg_list.append(acc_avg)
# print loss info for an interval
if i % self.cfg.LOG_INTERVAL == self.cfg.LOG_INTERVAL - 1:
self.logger.info(
"Train: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.4f}".format(
epoch_idx + 1, self.cfg.MAX_EPOCH,
i + 1, len(data_loader),
float(loss.item()), float(acc_avg)
)
)
self.clt.record_train_loss(loss_list)
self.clt.record_train_acc(acc_avg_list)
def valid(self, data_loader, model, loss_f, epoch_idx):
model.eval()
with torch.no_grad():
loss_batch_list = []
acc_batch_list = []
ocean_acc_epoch = []
for i, data in enumerate(data_loader):
inputs, p_labels, e_labels = self.data_fmt(data)
p_score, p_co, e_score, e_co, x_ep = model(*inputs)
loss = loss_f(p_score, p_labels, e_score, e_labels, p_co, e_co, x_ep)
loss_batch_list.append(loss.item())
ocean_acc_batch = (1 - torch.abs(p_score.cpu().detach() - p_labels.cpu().detach())).mean(dim=0)
ocean_acc_epoch.append(ocean_acc_batch)
acc_batch_avg = ocean_acc_batch.mean()
acc_batch_list.append(acc_batch_avg)
ocean_acc = torch.stack(ocean_acc_epoch, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_acc_avg = ocean_acc.mean()
self.clt.record_valid_loss(loss_batch_list)
self.clt.record_valid_acc(acc_batch_list) # acc over batches
self.clt.record_valid_ocean_acc(ocean_acc)
if ocean_acc_avg > self.clt.best_valid_acc:
self.clt.update_best_acc(ocean_acc_avg)
self.clt.update_model_save_flag(1)
else:
self.clt.update_model_save_flag(0)
self.logger.info(
"Valid: Epoch[{:0>3}/{:0>3}] Train Mean_Acc: {:.2%} Valid Mean_Acc:{:.2%} OCEAN_ACC:{}\n".
format(
epoch_idx + 1, self.cfg.MAX_EPOCH,
float(self.clt.epoch_train_acc),
float(self.clt.epoch_valid_acc),
self.clt.valid_ocean_acc)
)
def test(self, data_loader, model):
model.eval()
mse = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
ocean_acc = []
ocean_mse = []
label_list = []
output_list = []
for data in tqdm(data_loader):
inputs, p_labels, e_labels = self.data_fmt(data)
p_score, p_co, e_score, e_co, x_ep = model(*inputs)
p_score = p_score.cpu().detach()
p_labels = p_labels.cpu().detach()
output_list.append(p_score)
label_list.append(p_labels)
ocean_mse_batch = mse(p_score, p_labels).mean(dim=0)
ocean_acc_batch = (1 - torch.abs(p_score - p_labels)).mean(dim=0)
ocean_mse.append(ocean_mse_batch)
ocean_acc.append(ocean_acc_batch)
ocean_mse = torch.stack(ocean_mse, dim=0).mean(dim=0).numpy()
ocean_acc = torch.stack(ocean_acc, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_mse_avg = ocean_mse.mean()
ocean_acc_avg = ocean_acc.mean()
dataset_output = torch.stack(output_list, dim=0).view(-1, 5).numpy()
dataset_label = torch.stack(label_list, dim=0).view(-1, 5).numpy()
keys = ["O", "C", "E", "A", "N"]
ocean_mse_dict, ocean_acc_dict = {}, {}
for i, k in enumerate(keys):
ocean_mse_dict[k] = np.round(ocean_mse[i], 4)
ocean_acc_dict[k] = np.round(ocean_acc[i], 4)
return ocean_acc_avg, ocean_acc_dict, dataset_output, dataset_label, (ocean_mse_dict, ocean_mse_avg)
def full_test(self, data_loader, model):
return self.test(data_loader, model)
def full_test_data_fmt(self, data):
for k, v in data.items():
data[k] = v.squeeze().to(self.device)
per_inputs, emo_inputs = data["per_img"], data["emo_img"],
per_labels, emo_labels = data["per_label"], data["emo_label"]
return (per_inputs, emo_inputs), per_labels[0]
def data_extract(self, model, data_set, output_dir):
os.makedirs(output_dir, exist_ok=True)
model.eval()
with torch.no_grad():
for idx, data in enumerate(tqdm(data_set)):
inputs, label = self.full_test_data_fmt(data)
mini_batch = 64
out_ls, feat_ls = [], []
for i in range(math.ceil(len(inputs[0]) / mini_batch)):
mini_batch_1 = inputs[0][(i * mini_batch): (i + 1) * mini_batch]
mini_batch_2 = inputs[1][i * 3: (i * 3 + mini_batch)] # jump 3 images every time
mini_batch_input = (mini_batch_1, mini_batch_2)
out, *_, feat = model(*mini_batch_input)
out_ls.append(out.cpu())
feat_ls.append(feat.cpu())
out_pred, out_feat = torch.cat(out_ls, dim=0), torch.cat(feat_ls, dim=0)
video_extract = {
"video_frames_pred": out_pred,
"video_frames_feat": out_feat,
"video_label": label.cpu()
}
save_to_file = os.path.join(output_dir, "{:04d}.pkl".format(idx))
torch.save(video_extract, save_to_file)
@TRAINER_REGISTRY.register()
class AudioTrainer(BiModalTrainer):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
return (data["aud_data"],), data["aud_label"]
@TRAINER_REGISTRY.register()
class StatisticTrainer(BiModalTrainer):
def data_fmt(self, data):
return (data["data"].to(self.device),), data["label"].to(self.device)
@TRAINER_REGISTRY.register()
class SpectrumTrainer(BiModalTrainer):
def data_fmt(self, data):
return (data["data"].to(self.device),), data["label"].to(self.device)
| 26,435 | 40.566038 | 126 | py |
DeepPersonality | DeepPersonality-main/dpcv/engine/crnet_trainer.py | import torch
from tqdm import tqdm
from dpcv.engine.bi_modal_trainer import BiModalTrainer
import numpy as np
import math
import os
from .build import TRAINER_REGISTRY
class CRNetTrainer(BiModalTrainer):
def train(self, data_loader, model, loss_f, optimizer, epoch_idx):
model.train()
if model.train_guider:
self.logger.info(f"Training: classification phrase, learning rate:{optimizer[0].param_groups[0]['lr']}")
else:
self.logger.info(f"Training: regression phrase, learning rate:{optimizer[1].param_groups[0]['lr']}")
loss_list = []
acc_avg_list = []
for i, data in enumerate(data_loader):
inputs, cls_label, reg_label = self.data_fmt(data)
# forward
if model.train_guider:
cls_score = model(*inputs)
loss = loss_f["ce_loss"](cls_score, cls_label)
optimizer[0].zero_grad()
loss.backward()
optimizer[0].step()
else:
cls_score, reg_pred = model(*inputs)
loss = self.loss_compute(loss_f, reg_pred, reg_label, cls_score, cls_label, epoch_idx)
# backward
optimizer[1].zero_grad()
loss.backward()
optimizer[1].step()
loss_list.append(loss.item())
acc_avg = (1 - torch.abs(reg_pred.cpu() - reg_label.cpu())).mean().clip(min=0)
acc_avg = acc_avg.detach().numpy()
acc_avg_list.append(acc_avg)
if i % self.cfg.LOG_INTERVAL == self.cfg.LOG_INTERVAL - 1:
if model.train_guider:
cls_soft_max = torch.softmax(cls_score, dim=-1)
matched = torch.as_tensor(
torch.argmax(cls_soft_max, -1) == torch.argmax(cls_label, -1),
dtype=torch.int8
)
acc = matched.sum() / matched.numel()
self.logger.info(
"Train: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{}".
format(epoch_idx, self.cfg.TRAIN_CLS_EPOCH, i + 1, len(data_loader),
float(loss.cpu().detach().numpy()),
acc.cpu().detach().numpy().round(2))
)
else:
acc = (1 - torch.abs(reg_pred - reg_label)).mean(dim=0).clip(min=0)
self.logger.info(
"Train: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{}".
format(epoch_idx, self.cfg.MAX_EPOCH, i + 1, len(data_loader),
float(loss.cpu().detach().numpy()),
acc.cpu().detach().numpy().round(2))
)
if not model.train_guider:
self.clt.record_train_loss(loss_list)
self.clt.record_train_acc(acc_avg_list)
def valid(self, data_loader, model, loss_f, epoch_idx):
model.eval()
if not model.train_guider:
with torch.no_grad():
loss_batch_list = []
acc_batch_list = []
ocean_acc_epoch = []
for i, data in enumerate(data_loader):
inputs, cls_label, reg_label = self.data_fmt(data)
cls_score, reg_pred = model(*inputs)
loss = self.loss_compute(loss_f, reg_pred, reg_label, cls_score, cls_label, epoch_idx)
loss_batch_list.append(loss.item())
ocean_acc_batch = (1 - torch.abs(reg_pred.cpu() - reg_label.cpu())).mean(dim=0).clip(min=0)
ocean_acc_epoch.append(ocean_acc_batch)
acc_batch_avg = ocean_acc_batch.mean()
acc_batch_list.append(acc_batch_avg)
ocean_acc = torch.stack(ocean_acc_epoch, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_acc_avg = ocean_acc.mean()
self.clt.record_valid_loss(loss_batch_list) # acc over batches
self.clt.record_valid_acc(acc_batch_list) # acc over batches
self.clt.record_valid_ocean_acc(ocean_acc)
if ocean_acc_avg > self.clt.best_valid_acc:
self.clt.update_best_acc(ocean_acc_avg)
self.clt.update_model_save_flag(1)
else:
self.clt.update_model_save_flag(0)
else:
print("only test regression accuracy")
self.logger.info(
"Valid: Epoch[{:0>3}/{:0>3}] Train Mean_Acc: {:.2%} Valid Mean_Acc:{:.2%} OCEAN_ACC:{}\n".
format(
epoch_idx + 1, self.cfg.MAX_EPOCH,
float(self.clt.epoch_train_acc),
float(self.clt.epoch_valid_acc),
self.clt.valid_ocean_acc)
)
def test(self, data_loader, model):
model.eval()
with torch.no_grad():
ocean_acc = []
label_list = []
output_list = []
for data in tqdm(data_loader):
inputs, _, labels = self.data_fmt(data)
_, outputs = model(*inputs)
outputs = outputs.cpu().detach()
labels = labels.cpu().detach()
output_list.append(outputs)
label_list.append(labels)
ocean_acc_batch = (1 - torch.abs(outputs - labels)).mean(dim=0)
ocean_acc.append(ocean_acc_batch)
ocean_acc = torch.stack(ocean_acc, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_acc_avg = ocean_acc.mean()
dataset_output = torch.stack(output_list, dim=0).view(-1, 5).numpy()
dataset_label = torch.stack(label_list, dim=0).view(-1, 5).numpy()
return ocean_acc_avg, ocean_acc, dataset_output, dataset_label
def full_test(self, data_loader, model):
model.eval()
with torch.no_grad():
for data in tqdm(data_loader):
inputs, labels = self.full_test_data_fmt(data)
_, outputs = model(*inputs)
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
inputs = data["glo_img"], data["loc_img"], data["wav_aud"]
cls_label, reg_label = data["cls_label"], data["reg_label"]
return inputs, cls_label, reg_label
def full_test_data_fmt(self, data):
inputs = data["glo_img"].to(self.device), data["loc_img"].to(self.device), data["wav_aud"].to(self.device)
label = data["reg_label"]
return inputs, label
def loss_compute(self, loss_f, reg_pred, reg_label, cls_score, cls_label, epoch_idx):
loss_1 = loss_f["l1_loss"](reg_pred, reg_label)
loss_2 = loss_f["mse_loss"](reg_pred, reg_label)
loss_3 = loss_f["bell_loss"](reg_pred, reg_label)
lambda_ = (4 * epoch_idx) / (self.cfg.MAX_EPOCH + 1)
loss_4 = lambda_ * loss_f["ce_loss"](cls_score, cls_label)
loss = loss_1 + loss_2 + loss_3 + loss_4
return loss
@TRAINER_REGISTRY.register()
class CRNetTrainer2(BiModalTrainer):
def train(self, data_loader, model, loss_f, optimizer, epoch_idx):
model.train()
if epoch_idx > model.train_guider_epo:
model.set_train_regressor()
if not model.train_regressor:
self.logger.info(f"Training: classification phrase, learning rate:{optimizer[0].param_groups[0]['lr']}")
else:
self.logger.info(f"Training: regression phrase, learning rate:{optimizer[1].param_groups[0]['lr']}")
loss_list = []
acc_avg_list = []
for i, data in enumerate(data_loader):
inputs, cls_label, reg_label = self.data_fmt(data)
# forward
if not model.train_regressor:
cls_score = model(*inputs)
loss = loss_f["ce_loss"](cls_score, cls_label)
optimizer[0].zero_grad()
loss.backward()
optimizer[0].step()
else:
cls_score, reg_pred = model(*inputs)
loss = self.loss_compute(loss_f, reg_pred, reg_label, cls_score, cls_label, epoch_idx)
# backward
optimizer[1].zero_grad()
loss.backward()
optimizer[1].step()
loss_list.append(loss.item())
acc_avg = (1 - torch.abs(reg_pred.cpu() - reg_label.cpu())).mean().clip(min=0)
# acc_avg = acc_avg.detach().numpy()
acc_avg_list.append(acc_avg.detach().numpy())
if i % self.cfg.LOG_INTERVAL == self.cfg.LOG_INTERVAL - 1:
if not model.train_regressor:
cls_soft_max = torch.softmax(cls_score, dim=-1)
matched = torch.as_tensor(
torch.argmax(cls_soft_max, -1) == torch.argmax(cls_label, -1),
dtype=torch.int8
)
acc = matched.sum() / matched.numel()
self.logger.info(
"Train: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{}".
format(epoch_idx, model.train_guider_epo, i + 1, len(data_loader),
float(loss.cpu().detach().numpy()),
acc.cpu().detach().numpy().round(2))
)
else:
acc = (1 - torch.abs(reg_pred - reg_label)).mean(dim=0).clip(min=0)
self.logger.info(
"Train: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{}".
format(epoch_idx, self.cfg.MAX_EPOCH, i + 1, len(data_loader),
float(loss.cpu().detach().numpy()),
acc.cpu().detach().numpy().round(2))
)
if model.train_regressor:
self.clt.record_train_loss(loss_list)
self.clt.record_train_acc(acc_avg_list)
def valid(self, data_loader, model, loss_f, epoch_idx):
model.eval()
if model.train_regressor:
with torch.no_grad():
loss_batch_list = []
acc_batch_list = []
ocean_acc_epoch = []
for i, data in enumerate(data_loader):
inputs, cls_label, reg_label = self.data_fmt(data)
cls_score, reg_pred = model(*inputs)
loss = self.loss_compute(loss_f, reg_pred, reg_label, cls_score, cls_label, epoch_idx)
loss_batch_list.append(loss.item())
ocean_acc_batch = (1 - torch.abs(reg_pred.cpu() - reg_label.cpu())).mean(dim=0).clip(min=0)
ocean_acc_epoch.append(ocean_acc_batch)
acc_batch_avg = ocean_acc_batch.mean()
acc_batch_list.append(acc_batch_avg)
ocean_acc = torch.stack(ocean_acc_epoch, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_acc_avg = ocean_acc.mean()
self.clt.record_valid_loss(loss_batch_list) # acc over batches
self.clt.record_valid_acc(acc_batch_list) # acc over batches
self.clt.record_valid_ocean_acc(ocean_acc)
if ocean_acc_avg > self.clt.best_valid_acc:
self.clt.update_best_acc(ocean_acc_avg)
self.clt.update_model_save_flag(1)
else:
self.clt.update_model_save_flag(0)
else:
print("only test regression accuracy")
return
self.logger.info(
"Valid: Epoch[{:0>3}/{:0>3}] Train Mean_Acc: {:.2%} Valid Mean_Acc:{:.2%} OCEAN_ACC:{}\n".
format(
epoch_idx + 1, self.cfg.MAX_EPOCH,
float(self.clt.epoch_train_acc),
float(self.clt.epoch_valid_acc),
self.clt.valid_ocean_acc)
)
def test(self, data_loader, model):
model.eval()
model.set_train_regressor()
mse_func = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
mse_ls = []
ocean_acc = []
label_list = []
output_list = []
for data in tqdm(data_loader):
inputs, cls_label, labels = self.data_fmt(data)
_, outputs = model(*inputs)
outputs = outputs.cpu().detach()
labels = labels.cpu().detach()
output_list.append(outputs)
label_list.append(labels)
mse = mse_func(outputs, labels).mean(dim=0)
ocean_acc_batch = (1 - torch.abs(outputs - labels)).mean(dim=0)
mse_ls.append(mse)
ocean_acc.append(ocean_acc_batch)
ocean_mse = torch.stack(mse_ls, dim=0).mean(dim=0).numpy()
ocean_acc = torch.stack(ocean_acc, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
ocean_mse_mean = ocean_mse.mean()
ocean_acc_avg = ocean_acc.mean()
dataset_output = torch.stack(output_list, dim=0).view(-1, 5).numpy()
dataset_label = torch.stack(label_list, dim=0).view(-1, 5).numpy()
ocean_mse_mean_rand = np.round(ocean_mse_mean, 4)
keys = ["O", "C", "E", "A", "N"]
ocean_mse_dict, ocean_acc_dict = {}, {}
for i, k in enumerate(keys):
ocean_mse_dict[k] = np.round(ocean_mse[i], 4)
ocean_acc_dict[k] = np.round(ocean_acc[i], 4)
return ocean_acc_avg, ocean_acc, dataset_output, dataset_label, (ocean_mse_dict, ocean_mse_mean_rand)
def full_test(self, data_loader, model):
model.eval()
model.set_train_regressor()
with torch.no_grad():
out_ls, label_ls = [], []
for data in tqdm(data_loader):
inputs, labels = self.full_test_data_fmt(data)
_, outputs = model(*inputs)
out_ls.append(outputs.mean(0).cpu().detach())
label_ls.append(labels)
all_out = torch.stack(out_ls, 0)
all_label = torch.stack(label_ls, 0)
ocean_acc = (1 - torch.abs(all_out - all_label)).mean(0).numpy()
ocean_acc_avg = ocean_acc.mean(0)
ocean_acc_avg_rand = np.round(ocean_acc_avg, 4)
ocean_acc_dict = {k: np.round(ocean_acc[i], 4) for i, k in enumerate(["O", "C", "E", "A", "N"])}
dataset_output = all_out.numpy()
dataset_label = all_label.numpy()
return ocean_acc_avg_rand, ocean_acc_dict, dataset_output, dataset_label
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
inputs = data["glo_img"], data["loc_img"], data["wav_aud"]
cls_label, reg_label = data["cls_label"], data["reg_label"]
return inputs, cls_label, reg_label
def full_test_data_fmt(self, data):
glo_imgs = torch.stack(data["glo_img"], 0).to(self.device)
loc_imgs = torch.stack(data["loc_img"], 0).to(self.device)
wav_aud = data["wav_aud"].repeat(len(glo_imgs), 1, 1, 1).to(self.device)
inputs = glo_imgs, loc_imgs, wav_aud
label = data["reg_label"]
return inputs, label
def loss_compute(self, loss_f, reg_pred, reg_label, cls_score, cls_label, epoch_idx):
loss_1 = loss_f["l1_loss"](reg_pred, reg_label)
loss_2 = loss_f["mse_loss"](reg_pred, reg_label)
loss_3 = loss_f["bell_loss"](reg_pred, reg_label)
lambda_ = (4 * epoch_idx) / (self.cfg.MAX_EPOCH + 1)
loss_4 = lambda_ * loss_f["ce_loss"](cls_score, cls_label)
loss = loss_1 + loss_2 + loss_3 + loss_4
return loss
def data_extract(self, model, data_set, output_dir):
os.makedirs(output_dir, exist_ok=True)
model.eval()
model.set_train_regressor()
with torch.no_grad():
for idx, data in enumerate(tqdm(data_set)):
inputs, label = self.full_test_data_fmt(data)
mini_batch_size = 16
out_ls, feat_ls = [], []
for i in range(math.ceil(len(inputs[0]) / mini_batch_size)):
mini_batch_i_1 = inputs[0][(i * mini_batch_size): (i + 1) * mini_batch_size]
mini_batch_i_2 = inputs[1][(i * mini_batch_size): (i + 1) * mini_batch_size]
mini_batch_i_3 = inputs[2][(i * mini_batch_size): (i + 1) * mini_batch_size]
mini_batch_i = (mini_batch_i_1, mini_batch_i_2, mini_batch_i_3)
if model.return_feature:
_, out, feat = model(*mini_batch_i)
out_ls.append(out.cpu())
feat_ls.append(feat.cpu())
else:
_, out = model(*mini_batch_i)
out_ls.append(out.cpu())
feat_ls.append(torch.tensor([0]))
# out.shape = (64, 5) feat.shape = (64, 5, 512)
out_pred, out_feat = torch.cat(out_ls, dim=0), torch.cat(feat_ls, dim=0)
video_extract = {
"video_frames_pred": out_pred,
"video_frames_feat": out_feat,
"video_label": label.cpu()
}
save_to_file = os.path.join(output_dir, "{:04d}.pkl".format(idx))
torch.save(video_extract, save_to_file)
@TRAINER_REGISTRY.register()
class CRNetTrainer2Vis(CRNetTrainer2):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
inputs = data["glo_img"], data["loc_img"]
cls_label, reg_label = data["cls_label"], data["reg_label"]
return inputs, cls_label, reg_label
@TRAINER_REGISTRY.register()
class CRNetAudTrainer(CRNetTrainer2):
def data_fmt(self, data):
for k, v in data.items():
data[k] = v.to(self.device)
inputs = data["aud_data"]
cls_label, reg_label = data["aud_label_cls"], data["aud_label"]
return (inputs,), cls_label, reg_label | 18,256 | 43.420925 | 116 | py |
DeepPersonality | DeepPersonality-main/dpcv/engine/bi_modal_lstm_train.py | import torch
from dpcv.engine.bi_modal_trainer import BiModalTrainer
# class BiModalTrainer_(object):
# def __init__(self, collector):
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.clt = collector
#
# def train(self, data_loader, model, loss_f, optimizer, scheduler, epoch_idx, cfg, logger):
# model.train()
#
# loss_list = []
# acc_avg_list = []
# for i, data in enumerate(data_loader):
# img_in, aud_in, labels = self.data_fmt(data)
# outputs = model(aud_in, img_in)
# optimizer.zero_grad()
# loss = loss_f(outputs.cpu(), labels.cpu())
# loss.backward()
# optimizer.step()
#
# loss_list.append(loss.item())
#
# acc_avg = (1 - torch.abs(outputs.cpu() - labels.cpu())).mean().clip(min=0)
# acc_avg = acc_avg.detach().numpy()
# # acc_avg = 0
# acc_avg_list.append(acc_avg)
# # print loss info for an interval
# if i % cfg.LOG_INTERVAL == cfg.LOG_INTERVAL - 1:
# logger.info(
# "Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2}".format(
# epoch_idx + 1, cfg.MAX_EPOCH, i + 1, len(data_loader), float(loss.item()), float(acc_avg))
# ) # print current training info of that batch
#
# self.clt.record_train_loss(loss_list)
# self.clt.record_train_acc(acc_avg_list)
#
# def valid(self, data_loader, model, loss_f):
# model.eval()
# with torch.no_grad():
# loss_list = []
# acc_batch_list = []
# ocean_list = []
# for i, data in enumerate(data_loader):
# img_in, aud_in, labels = self.data_fmt(data)
# outputs = model(aud_in, img_in)
# loss = loss_f(outputs.cpu(), labels.cpu())
# loss_list.append(loss.item())
# acc_batch = (1 - torch.abs(outputs.cpu().detach() - labels.cpu().detach())).mean(dim=0)
# ocean_list.append(acc_batch)
# acc_batch_avg = acc_batch.mean()
# acc_batch_list.append(acc_batch_avg)
# ocean_acc = torch.stack(ocean_list, dim=0).mean(dim=0).numpy() # ocean acc on all valid images
# ocean_acc_avg = ocean_acc.mean()
#
# self.clt.record_valid_loss(loss_list)
# self.clt.record_valid_acc(acc_batch_list) # acc over batches
# self.clt.record_valid_ocean_acc(ocean_acc)
# if ocean_acc_avg > self.clt.best_acc:
# self.clt.update_best_acc(ocean_acc_avg)
# self.clt.update_model_save_flag(1)
# else:
# self.clt.update_model_save_flag(0)
#
# def data_fmt(self, data):
# for k, v in data.items():
# data[k] = v.to(self.device)
# img_in, aud_in, labels = data["image"], data["audio"], data["label"]
# img_in = img_in.view(-1, 3, 112, 112)
# aud_in = aud_in.view(-1, 68)
# return img_in, aud_in, labels
# class BimodalLSTMTrain(BiModalTrainer):
#
# def data_fmt(self, data):
# for k, v in data.items():
# data[k] = v.to(self.device)
# img_in, aud_in, labels = data["image"], data["audio"], data["label"]
# img_in = img_in.view(-1, 3, 112, 112)
# aud_in = aud_in.view(-1, 68)
# return img_in, aud_in, labels | 3,497 | 41.144578 | 116 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/video_frame_data.py | import random
import torch
from torch.utils.data import DataLoader
from PIL import Image
import numpy as np
import glob
from dpcv.data.datasets.bi_modal_data import VideoData
from dpcv.data.transforms.transform import set_transform_op
from dpcv.data.transforms.build import build_transform_spatial
from .build import DATA_LOADER_REGISTRY
class SingleFrameData(VideoData):
def __init__(self, data_root, img_dir, label_file, trans=None):
super().__init__(data_root, img_dir, label_file)
self.trans = trans
def __getitem__(self, index):
img = self.get_image_data(index)
label = self.get_ocean_label(index)
if self.trans:
img = self.trans(img)
return {"image": img, "label": torch.as_tensor(label)}
def get_image_data(self, index):
img_dir = self.img_dir_ls[index]
img_path = self.image_sample(img_dir)
img = Image.open(img_path).convert("RGB")
return img
@staticmethod
def image_sample(img_dir):
img_path_ls = glob.glob(f"{img_dir}/*.jpg")
num_img = len(img_path_ls)
# downsample the frames to 100 / video
sample_frames = np.linspace(0, num_img, 100, endpoint=False, dtype=np.int16)
selected = random.choice(sample_frames)
return img_path_ls[selected]
class AllSampleFrameData(VideoData):
def __init__(self, data_root, img_dir, label_file, trans=None, length=100):
super().__init__(data_root, img_dir, label_file)
self.trans = trans
self.len = length
def __getitem__(self, idx):
img_obj_ls = self.get_sample_frames(idx)
label = self.get_ocean_label(idx)
if self.trans is not None:
img_obj_ls = [self.trans(img) for img in img_obj_ls]
return {"all_images": img_obj_ls, "label": torch.as_tensor(label)}
def get_sample_frames(self, idx):
img_dir = self.img_dir_ls[idx]
# Note randomly ordered after glob search
img_path_ls = glob.glob(f"{img_dir}/*.jpg")
# downsample the frames to 100 / video
sample_frames_id = np.linspace(0, len(img_path_ls), self.len, endpoint=False, dtype=np.int16).tolist()
img_path_ls_sampled = [img_path_ls[idx] for idx in sample_frames_id]
img_obj_ls = [Image.open(img_path) for img_path in img_path_ls_sampled]
return img_obj_ls
class AllSampleFrameData2(VideoData):
def __init__(self, data_root, img_dir, label_file, trans=None):
super().__init__(data_root, img_dir, label_file)
self.trans = trans
def __getitem__(self, idx):
img_obj_ls = self.get_sample_frames(idx)
label = self.get_ocean_label(idx)
if self.trans is not None:
img_obj_ls = [self.trans(img) for img in img_obj_ls]
return {"all_images": img_obj_ls, "label": torch.as_tensor(label)}
def get_sample_frames(self, idx):
img_dir = self.img_dir_ls[idx]
# Note randomly ordered after glob search
img_path_ls = sorted(glob.glob(f"{img_dir}/*.jpg"))
img_obj_ls = [Image.open(img_path) for img_path in img_path_ls]
return img_obj_ls
def make_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]),\
"'mode' should be 'train' , 'valid' 'trainval' 'test', 'full_test' "
transform = set_transform_op()
if mode == "train":
data_set = SingleFrameData(
cfg.DATA_ROOT,
cfg.TRAIN_IMG_DATA,
cfg.TRAIN_LABEL_DATA,
transform,
)
elif mode == "valid":
data_set = SingleFrameData(
cfg.DATA_ROOT,
cfg.VALID_IMG_DATA,
cfg.VALID_LABEL_DATA,
transform,
)
elif mode == "trainval":
data_set = SingleFrameData(
cfg.DATA_ROOT,
cfg.TRAINVAL_IMG_DATA,
cfg.TRAINVAL_LABEL_DATA,
transform,
)
else:
data_set = SingleFrameData(
cfg.DATA_ROOT,
cfg.TEST_IMG_DATA,
cfg.TEST_LABEL_DATA,
transform,
)
data_loader = DataLoader(
dataset=data_set,
batch_size=cfg.TRAIN_BATCH_SIZE,
shuffle=cfg.SHUFFLE,
num_workers=cfg.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def single_frame_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = cfg.DATA_LOADER.SHUFFLE
transform = build_transform_spatial(cfg)
if mode == "train":
data_set = SingleFrameData(
cfg.DATA.ROOT,
cfg.DATA.TRAIN_IMG_DATA,
cfg.DATA.TRAIN_LABEL_DATA,
transform,
)
elif mode == "valid":
data_set = SingleFrameData(
cfg.DATA.ROOT,
cfg.DATA.VALID_IMG_DATA,
cfg.DATA.VALID_LABEL_DATA,
transform,
)
shuffle = False
elif mode == "trainval":
data_set = SingleFrameData(
cfg.DATA.ROOT,
cfg.DATA.TRAINVAL_IMG_DATA,
cfg.DATA.TRAINVAL_LABEL_DATA,
transform,
)
elif mode == "full_test":
return AllSampleFrameData(
cfg.DATA.ROOT,
cfg.DATA.TEST_IMG_DATA,
cfg.DATA.TEST_LABEL_DATA,
transform,
)
else:
data_set = SingleFrameData(
cfg.DATA.ROOT,
cfg.DATA.TEST_IMG_DATA,
cfg.DATA.TEST_LABEL_DATA,
transform,
)
shuffle = False
data_loader = DataLoader(
dataset=data_set,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
import os
from dpcv.config.interpret_dan_cfg import cfg
os.chdir("../../")
# interpret_data = InterpretData(
# data_root="datasets",
# img_dir="image_data/valid_data",
# label_file="annotation/annotation_validation.pkl",
# trans=set_transform_op(),
# )
# print(interpret_data[18])
data_loader = make_data_loader(cfg, mode="valid")
for i, item in enumerate(data_loader):
print(item["image"].shape, item["label"].shape)
if i > 5:
break
| 6,427 | 30.9801 | 110 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/second_stage_dataset.py | import torch
from torch.utils.data import Dataset
from dpcv.data.datasets.build import DATA_LOADER_REGISTRY
from torch.utils.data import DataLoader
import glob
import os
from tqdm import tqdm
import numpy as np
from math import ceil
class SecondStageData(Dataset):
def __init__(self, data_dir, data_type="pred", method="statistic", used_frame=1000, top_n_sample=600, sample=False):
assert data_type in ["pred", "feat"]
assert method in ["statistic", "spectrum"]
self.data_type = f"video_frames_{data_type}"
self.process_method = method
data_root, data_split = os.path.split(data_dir)
# self.signal_num = 512
self.save_to = os.path.join(data_root, f"{self.data_type}_{self.process_method}_{data_split}.pkl")
self.used_frame = used_frame
self.top_n_sample = top_n_sample
self.sample = sample
self.data_preprocess(data_dir) # save processed data to disk first
self.data_ls = self.load_data()
def load_data(self):
# save_to = os.path.join(data_root, f"{self.data_type}_{self.process_method}_{data_split}.pkl")
data_ls = []
# use cached data
# print(f"using cached data {self.save_to}")
try:
data_ls = torch.load(self.save_to)
except FileNotFoundError:
data_segments = sorted(glob.glob(self.save_to.replace(".pkl", "*.pkl")))
for seg in data_segments:
data_ls.extend(torch.load(seg))
return data_ls
def data_preprocess(self, data_dir):
if os.path.exists(self.save_to) or os.path.exists(self.save_to.replace(".pkl", "_1.pkl")):
return
print(
f"preprocessing data [{data_dir}] \n"
f"[{self.data_type}] by [{self.process_method}]"
)
data_ls = []
sample_pth_ls = sorted(glob.glob(f"{data_dir}/*.pkl"))
sample_num = len(sample_pth_ls)
seg_id = 0
for i, sample_pth in enumerate(tqdm(sample_pth_ls)):
sample = torch.load(sample_pth)
data, label = sample[self.data_type], sample["video_label"] # data: (382, 2048) label: (5,)
if self.process_method == "statistic":
data = self.statistic_process(data)
elif self.process_method == "spectrum":
data, valid = self.spectrum_process(data) # data: (382, 2048) label: (5,)
if not valid:
print(f"{sample_pth} not valid with data shape {data.shape}")
continue
else:
raise NotImplementedError
sample_train = {"id": i, "data": data, "label": label}
data_ls.append(sample_train)
# signal_num = data_ls[0]["data"].shape[1]
# if signal_num >= 1024:
# if self.signal_num != signal_num:
# self.signal_num = signal_num
# for large feature save 1000 item every time in case of memory issue
# last_seg = ceil(len(sample_pth_ls) / 1000)
data_seg = 2000
if len(data_ls) == data_seg:
seg_id += 1
torch.save(data_ls[:data_seg], self.save_to.replace(".pkl", f"_{seg_id}.pkl"))
data_ls = data_ls[data_seg:]
elif i == sample_num - 1:
seg_id += 1
torch.save(data_ls, self.save_to.replace(".pkl", f"_{seg_id}.pkl"))
# if self.signal_num < 1024:
# torch.save(data_ls, self.save_to)
def __getitem__(self, idx):
training_sample = self.data_ls[idx]
return training_sample
@staticmethod
def statistic_process(data):
assert isinstance(data, torch.Tensor), "the input data should be torch.Tensor"
max_0, _ = data.max(dim=0)
min_0, _ = data.min(dim=0)
mean_0 = data.mean(dim=0)
std_0 = data.std(dim=0)
data_first_order = data[1:, :] - data[:-1, :]
max_1, _ = data_first_order.max(dim=0)
min_1, _ = data_first_order.min(dim=0)
mean_1 = data_first_order.mean(dim=0)
std_1 = data_first_order.std(dim=0)
data_sec_order = data[2:, :] - data[:-2, :]
max_2, _ = data_sec_order.max(dim=0)
min_2, _ = data_sec_order.min(dim=0)
mean_2 = data_sec_order.mean(dim=0)
std_2 = data_sec_order.std(dim=0)
statistic_representation = torch.stack(
[
max_0, min_0, mean_0, std_0,
max_1, min_1, mean_1, std_1,
max_2, min_2, mean_2, std_2
],
dim=0,
)
return statistic_representation
def spectrum_process(self, data): # data: (382, 2048)
amp_spectrum, pha_spectrum = [], []
traits_representation = data.T.cpu() # traits_represen: (2048, 382)
for trait_i in traits_representation:
amp, pha, valid = self.select_pred_spectrum(trait_i.numpy()[None, :]) # trait_i (1, 382)
amp_spectrum.append(amp)
pha_spectrum.append(pha)
spectrum_data = {
"amp_spectrum": np.concatenate(amp_spectrum, axis=0), # (2048, 80)
"pha_spectrum": np.concatenate(pha_spectrum, axis=0), # (2048, 80)
}
spectrum_data = np.stack( # (2, 2048, 80)
[spectrum_data["amp_spectrum"],
spectrum_data["pha_spectrum"]],
axis=0,
)
return spectrum_data, valid
def select_pred_spectrum(self, data):
# for one trait there n prediction from n frames
# data: (1, n) eg:(1, 382)
valid = True
if self.sample:
indexes = np.linspace(0, data.shape[1], 100, endpoint=False, dtype=np.int16)
data = data[:, indexes]
else:
data = data[:, :self.used_frame]
pred_fft = np.fft.fft2(data) # pred_fft (1, 382) complex num
length = int(len(pred_fft[0]) / 2)
amp, pha = np.abs(pred_fft), np.angle(pred_fft) # amp:(1, 382) pha:(1, 382)
# include symmetry point
if self.top_n_sample < length:
amp[:, self.top_n_sample - 1] = amp[:, length]
pha[:, self.top_n_sample - 1] = pha[:, length]
amp_feat = amp[:, :self.top_n_sample] # amp_feat:(1: 80) , pha_feat:(1: 80)
pha_feat = pha[:, :self.top_n_sample]
if len(amp_feat[0]) != self.top_n_sample:
valid = False
return amp_feat.astype("float32"), pha_feat.astype("float32"), valid
def __len__(self):
return len(self.data_ls)
class SpectrumData(Dataset):
def __init__(self, data_path):
try:
self.sample_ls = torch.load(data_path)
except FileNotFoundError:
data_segments = sorted(glob.glob(data_path.replace(".pkl", "*.pkl")))
self.sample_ls = []
for seg in data_segments:
self.sample_ls.extend(torch.load(seg))
def __getitem__(self, idx):
sample = self.sample_ls[idx]
amp_spectrum = torch.as_tensor(sample["amp_spectrum"], dtype=torch.float32)
pha_spectrum = torch.as_tensor(sample["pha_spectrum"], dtype=torch.float32)
spectrum = torch.stack([amp_spectrum, pha_spectrum], dim=0)
sample = {"spectrum": spectrum, "label": sample["video_label"]}
return sample
def __len__(self):
return len(self.sample_ls)
class StatisticData(Dataset):
def __init__(self, data_path):
self.sample_dict = torch.load(data_path)
def __getitem__(self, idx):
sample = {
"statistic": self.sample_dict["video_statistic"][idx],
"label": self.sample_dict["video_label"][idx],
}
return sample
def __len__(self):
return len(self.sample_dict["video_label"])
@DATA_LOADER_REGISTRY.register()
def statistic_data_loader(cfg, mode):
assert mode in ["train", "valid", "test", "full_test"], \
f"{mode} should be one of 'train', 'valid' or 'test'"
SHUFFLE = True
data_cfg = cfg.DATA
if mode == "train":
dataset = StatisticData(data_cfg.TRAIN_IMG_DATA)
elif mode == "valid":
dataset = StatisticData(data_cfg.VALID_IMG_DATA)
SHUFFLE = False
else:
dataset = StatisticData(data_cfg.TEST_IMG_DATA)
SHUFFLE = False
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
num_workers=loader_cfg.NUM_WORKERS,
shuffle=SHUFFLE
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def spectrum_data_loader(cfg, mode):
assert mode in ["train", "valid", "test", "full_test"], \
f"{mode} should be one of 'train', 'valid' or 'test'"
SHUFFLE = True # when at test time don't shuffle will get a slightly better result
data_cfg = cfg.DATA
if mode == "train":
dataset = SpectrumData(data_cfg.TRAIN_IMG_DATA)
elif mode == "valid":
dataset = SpectrumData(data_cfg.VALID_IMG_DATA)
SHUFFLE = False
else:
dataset = SpectrumData(data_cfg.TEST_IMG_DATA)
SHUFFLE = False
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
num_workers=loader_cfg.NUM_WORKERS,
shuffle=SHUFFLE
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def second_stage_data(cfg, mode):
assert mode in ["train", "valid", "test", "full_test"], \
f"{mode} should be one of 'train', 'valid' or 'test'"
SHUFFLE = True
data_cfg = cfg.DATA
sec_stage_cfg = cfg.DATA_LOADER.SECOND_STAGE
if mode == "train":
dataset = SecondStageData(
data_dir=data_cfg.TRAIN_IMG_DATA,
data_type=sec_stage_cfg.TYPE,
method=sec_stage_cfg.METHOD,
)
elif mode == "valid":
dataset = SecondStageData(
data_dir=data_cfg.VALID_IMG_DATA,
data_type=sec_stage_cfg.TYPE,
method=sec_stage_cfg.METHOD,
)
SHUFFLE = False
else:
dataset = SecondStageData(
data_dir=data_cfg.TEST_IMG_DATA,
data_type=sec_stage_cfg.TYPE,
method=sec_stage_cfg.METHOD,
)
SHUFFLE = False
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
num_workers=loader_cfg.NUM_WORKERS,
shuffle=SHUFFLE,
drop_last=cfg.DATA_LOADER.DROP_LAST,
)
return data_loader
if __name__ == "__main__":
os.chdir("/home/rongfan/05-personality_traits/DeepPersonality")
dataset = SecondStageData(
data_dir="datasets/second_stage/hrnet_extract/test",
data_type="feat",
method="spectrum",
)
# dataset[1]
| 10,829 | 34.048544 | 120 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/cr_data.py | import torch
import os
import glob
from torch.utils.data import DataLoader
from PIL import Image
import random
import numpy as np
from pathlib import Path
from dpcv.data.datasets.bi_modal_data import VideoData
from .build import DATA_LOADER_REGISTRY
from dpcv.data.transforms.transform import set_crnet_transform, crnet_frame_face_transform
from dpcv.data.transforms.build import build_transform_spatial
class CRNetData(VideoData):
def __init__(self, data_root, img_dir, face_img_dir, audio_dir, label_file, transform=None, sample_size=100):
super().__init__(data_root, img_dir, label_file, audio_dir)
self.transform = transform
self.sample_size = sample_size
self.face_img_dir_ls = self.get_face_img_dir(face_img_dir)
def get_face_img_dir(self, face_img_dir):
dir_ls = os.listdir(os.path.join(self.data_root, face_img_dir))
return dir_ls
def __getitem__(self, idx):
glo_img, loc_img, idx = self.get_imgs(idx) # in case the idx changed
wav_aud = self.get_wav_aud(idx)
anno_score = self.get_ocean_label(idx)
anno_cls_encode = self.cls_encode(anno_score)
if self.transform:
glo_img = self.transform["frame"](glo_img)
loc_img = self.transform["face"](loc_img)
wav_aud = torch.as_tensor(wav_aud, dtype=glo_img.dtype)
anno_score = torch.as_tensor(anno_score, dtype=glo_img.dtype)
anno_cls_encode = torch.as_tensor(anno_cls_encode)
sample = {
"glo_img": glo_img, "loc_img": loc_img, "wav_aud": wav_aud,
"reg_label": anno_score, "cls_label": anno_cls_encode
}
return sample
@staticmethod
def cls_encode(score):
index = []
for v in score:
if 0 < v < 0.5:
index.append(0)
elif 0.5 <= v < 0.6:
index.append(1)
elif 0.6 <= v < 0.7:
index.append(2)
else:
index.append(3)
one_hot_cls = np.eye(4)[index]
return one_hot_cls
def get_imgs(self, idx):
glo_img_dir = self.img_dir_ls[idx]
if "train" in glo_img_dir:
loc_img_dir = glo_img_dir.replace("train_data", "train_data_face")
elif "valid" in glo_img_dir:
loc_img_dir = glo_img_dir.replace("valid_data", "valid_data_face")
else:
loc_img_dir = glo_img_dir.replace("test_data", "test_data_face")
# in case some video doesn't get aligned face images
if os.path.basename(loc_img_dir) not in self.face_img_dir_ls:
return self.get_imgs(idx + 1)
loc_imgs = glob.glob(loc_img_dir + "/*.jpg")
loc_imgs = sorted(loc_imgs, key=lambda x: int(Path(x).stem[5:]))
# according to the paper sample 32 frames per video
separate = np.linspace(0, len(loc_imgs), self.sample_size, endpoint=False, dtype=np.int16)
img_index = random.choice(separate)
try:
loc_img_pt = loc_imgs[img_index]
except IndexError:
loc_img_pt = loc_imgs[0]
glo_img_pt = self._match_img(loc_img_pt)
loc_img_arr = Image.open(loc_img_pt).convert("RGB")
glo_img_arr = Image.open(glo_img_pt).convert("RGB")
return glo_img_arr, loc_img_arr, idx
@staticmethod
def _match_img(loc_img_pt):
img_dir = os.path.dirname(loc_img_pt).replace("_face", "")
img_name, _ = os.path.basename(loc_img_pt).split(".")
img_id = int(img_name.split("_")[-1])
glo_img_name = "frame_" + str(img_id) + ".jpg"
glo_img_path = os.path.join(img_dir, glo_img_name)
if os.path.exists(glo_img_path):
return glo_img_path
else:
return os.path.join(img_dir, "frame_1.jpg")
def get_wav_aud(self, index):
img_dir_name = os.path.basename(self.img_dir_ls[index])
audio_name = f"{img_dir_name}.wav.npy"
wav_path = os.path.join(self.data_root, self.audio_dir, audio_name)
wav_ft = np.load(wav_path, allow_pickle=True)
if wav_ft.shape[-1] < 244832:
wav_ft_pad = np.zeros((1, 1, 244832))
wav_ft_pad[..., :wav_ft.shape[-1]] = wav_ft
return wav_ft_pad
return wav_ft
class AllFrameCRNetData(CRNetData):
def __getitem__(self, idx):
glo_img_ls, loc_img_ls, idx = self.get_imgs(idx)
wav_aud = self.get_wav_aud(idx)
anno_score = self.get_ocean_label(idx)
if self.transform:
glo_img_ls = [self.transform["frame"](img) for img in glo_img_ls]
loc_img_ls = [self.transform["face"](img) for img in loc_img_ls]
wav_aud = torch.as_tensor(wav_aud, dtype=glo_img_ls[0].dtype)
anno_score = torch.as_tensor(anno_score, dtype=glo_img_ls[0].dtype)
sample = {
"glo_img": glo_img_ls,
"loc_img": loc_img_ls,
"wav_aud": wav_aud,
"reg_label": anno_score,
}
return sample
def get_imgs(self, idx):
glo_img_dir = self.img_dir_ls[idx]
if "train" in glo_img_dir:
loc_img_dir = glo_img_dir.replace("train_data", "train_data_face")
elif "valid" in glo_img_dir:
loc_img_dir = glo_img_dir.replace("valid_data", "valid_data_face")
else:
loc_img_dir = glo_img_dir.replace("test_data", "test_data_face")
# in case some video doesn't get aligned face images
if os.path.basename(loc_img_dir) not in self.face_img_dir_ls:
return self.get_imgs(idx + 1)
loc_imgs = glob.glob(loc_img_dir + "/*.jpg")
loc_imgs = sorted(loc_imgs, key=lambda x: int(Path(x).stem[5:]))
loc_img_ls, glo_img_ls = [], []
# separate = np.linspace(0, len(loc_imgs), self.sample_size, endpoint=False, dtype=np.int16)
separate = list(range(len(loc_imgs)))
for img_index in separate:
try:
loc_img_pt = loc_imgs[img_index]
except IndexError:
loc_img_pt = loc_imgs[0]
glo_img_pt = self._match_img(loc_img_pt)
loc_img_ls.append(loc_img_pt)
glo_img_ls.append(glo_img_pt)
loc_img_obj_ls = [Image.open(loc_img) for loc_img in loc_img_ls]
glo_img_obj_ls = [Image.open(glo_img) for glo_img in glo_img_ls]
return glo_img_obj_ls, loc_img_obj_ls, idx
def make_data_loader(cfg, mode=None):
assert (mode in ["train", "valid", "test"]), " 'mode' only supports 'train' and 'valid'"
transforms = set_crnet_transform()
if mode == "train":
dataset = CRNetData(
cfg.DATA_ROOT, # "../datasets",
cfg.TRAIN_IMG_DATA, # "image_data/train_data",
cfg.TRAIN_IMG_FACE_DATA, # "image_data/train_data_face",
cfg.TRAIN_AUD_DATA, # "voice_data/train_data", # default train_data_244832 form librosa
cfg.TRAIN_LABEL_DATA, # "annotation/annotation_training.pkl",
transforms
)
elif mode == "valid":
dataset = CRNetData(
cfg.DATA_ROOT, # "../datasets",
cfg.VALID_IMG_DATA, # "image_data/train_data",
cfg.VALID_IMG_FACE_DATA, # "image_data/train_data_face",
cfg.VALID_AUD_DATA, # "voice_data/train_data", # default train_data_244832 form librosa
cfg.VALID_LABEL_DATA, # "annotation/annotation_training.pkl",
transforms
)
else:
dataset = CRNetData(
cfg.DATA_ROOT, # "../datasets",
cfg.TEST_IMG_DATA, # "image_data/train_data",
cfg.TEST_IMG_FACE_DATA, # "image_data/train_data_face",
cfg.TEST_AUD_DATA, # "voice_data/train_data", # default train_data_244832 form librosa
cfg.TEST_LABEL_DATA, # "annotation/annotation_training.pkl",
transforms
)
data_loader = DataLoader(
dataset=dataset,
batch_size=24,
shuffle=True,
num_workers=4, # cfg.NUM_WORKS
drop_last=True,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def crnet_data_loader(cfg, mode=None):
assert (mode in ["train", "valid", "test", "full_test"]), \
" 'mode' only supports 'train', 'valid', 'test' and 'full_test' "
transforms = build_transform_spatial(cfg)
data_cfg = cfg.DATA
if mode == "train":
dataset = CRNetData(
data_cfg.ROOT, # "../datasets",
data_cfg.TRAIN_IMG_DATA, # "image_data/train_data",
data_cfg.TRAIN_IMG_FACE_DATA, # "image_data/train_data_face",
data_cfg.TRAIN_AUD_DATA, # "voice_data/train_data", # default train_data_244832 form librosa
data_cfg.TRAIN_LABEL_DATA, # "annotation/annotation_training.pkl",
transforms,
)
elif mode == "valid":
dataset = CRNetData(
data_cfg.ROOT, # "../datasets",
data_cfg.VALID_IMG_DATA, # "image_data/train_data",
data_cfg.VALID_IMG_FACE_DATA, # "image_data/train_data_face",
data_cfg.VALID_AUD_DATA, # "voice_data/train_data", # default train_data_244832 form librosa
data_cfg.VALID_LABEL_DATA, # "annotation/annotation_training.pkl",
transforms,
)
elif mode == "full_test":
return AllFrameCRNetData(
data_cfg.ROOT, # "../datasets",
data_cfg.TEST_IMG_DATA, # "image_data/train_data",
data_cfg.TEST_IMG_FACE_DATA, # "image_data/train_data_face",
data_cfg.TEST_AUD_DATA, # "voice_data/train_data", # default train_data_244832 form librosa
data_cfg.TEST_LABEL_DATA, # "annotation/annotation_training.pkl",
transforms,
)
else:
dataset = CRNetData(
data_cfg.ROOT, # "../datasets",
data_cfg.TEST_IMG_DATA, # "image_data/train_data",
data_cfg.TEST_IMG_FACE_DATA, # "image_data/train_data_face",
data_cfg.TEST_AUD_DATA, # "voice_data/train_data", # default train_data_244832 form librosa
data_cfg.TEST_LABEL_DATA, # "annotation/annotation_training.pkl",
transforms,
)
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=dataset,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=loader_cfg.SHUFFLE,
num_workers=loader_cfg.NUM_WORKERS, # cfg.NUM_WORKS
drop_last=loader_cfg.DROP_LAST,
)
return data_loader
if __name__ == "__main__":
trans = set_crnet_transform()
data_set = CRNetData(
"../../../datasets",
"image_data/train_data",
"image_data/train_data_face",
"voice_data/train_data",
"annotation/annotation_training.pkl",
trans
)
print(len(data_set))
print(data_set[2])
# for item in data_set[2].values():
# print(item.shape)
# # print(data_set._statistic_img_sample(1))
# # print(data_set._get_wav_sample(1))
# loader = make_data_loader("", mode="train")
# for i, sample in enumerate(loader):
# # if i > 5:
# # break
# for item in sample.values():
# print(item.shape)
| 11,162 | 38.168421 | 113 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/ture_personality_data.py | import os
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
import os.path as opt
import glob
import numpy as np
import json
import random
from pathlib import Path
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torchaudio
from dpcv.data.transforms.build import build_transform_spatial
from dpcv.data.datasets.build import DATA_LOADER_REGISTRY
def norm(aud_ten):
mean = aud_ten.mean()
std = aud_ten.std()
normed_ten = (aud_ten - mean) / (std + 1e-10)
return normed_ten
class Chalearn21FrameData(Dataset):
def __init__(
self, data_root, data_split, task, data_type="frame",
even_downsample=2000, trans=None, segment=False
):
self.data_root = data_root
self.ann_dir = opt.join(data_root, "annotation", task)
self.session_id, self.parts_personality = self.load_annotation(task, data_split)
self.data_split = data_split
self.task = task
self.type = data_type
self.data_dir = opt.join(data_root, data_split, f"{task}_{data_split}")
self.sessions = os.listdir(self.data_dir)
self.sample_size = even_downsample
self.img_dir_ls = []
self.task_mark = self.get_task_mark(task)
if (data_type == "frame") or (data_type == "audio"):
for dire in self.sessions:
self.img_dir_ls.extend([f"{dire}/FC1_{self.task_mark}", f"{dire}/FC2_{self.task_mark}"])
elif data_type == "face":
for dire in self.sessions:
self.img_dir_ls.extend([f"{dire}/FC1_{self.task_mark}_face", f"{dire}/FC2_{self.task_mark}_face"])
else:
raise TypeError(f"type should be 'face' or 'frame' or 'audio', but got {type}")
self.segment = segment
if not data_type == "audio":
self.all_images = self.assemble_images()
self.trans = trans
def __len__(self):
return len(self.all_images)
def __getitem__(self, idx):
img_file = self.all_images[idx]
if not self.segment:
img = Image.open(img_file)
label = self.get_ocean_label(img_file)
if self.trans:
img = self.trans(img)
else:
img_ls = [Image.open(img) for img in img_file]
label = self.get_ocean_label(img_file[0])
if self.trans:
img_ls = [self.trans(img) for img in img_ls]
img = torch.stack(img_ls, dim=0)
return {"image": img, "label": torch.as_tensor(label, dtype=torch.float32)}
def get_ocean_label(self, img_file):
*_, session, part, frame = img_file.split("/")
if self.type == "face":
part = part.replace("_face", "")
part = part.replace(self.task_mark, "T")
participant_id = self.session_id[str(int(session))][part]
participant_trait = self.parts_personality[participant_id]
participant_trait = np.array([float(v) for v in participant_trait.values()])
return participant_trait
def load_annotation(self, task, data_split):
session_id_path = opt.join(self.ann_dir, f"sessions_{data_split}_id.json")
with open(session_id_path, "r") as fo:
session_id = json.load(fo)
parts_personality = opt.join(self.ann_dir, f"parts_{data_split}_personality.json")
with open(parts_personality, "r") as fo:
parts_personality = json.load(fo)
return session_id, parts_personality
def sample_img(self, img_dir):
imgs = glob.glob(opt.join(self.data_dir, img_dir, "*.jpg"))
if self.type == "frame":
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[6:]))
elif self.type == "face":
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[5:]))
# evenly sample to self.sample_size frames
separate = np.linspace(0, len(imgs), self.sample_size, endpoint=False, dtype=np.int16)
# index = random.choice(separate)
selected_imgs = [imgs[idx] for idx in separate]
# that will cost too much memory on disk
# label = self.parse_label(selected_imgs[1])
# labels = [label] * len(selected_imgs)
return selected_imgs # , labels
def assemble_images(self):
all_images = []
for img_dir in self.img_dir_ls:
if not self.segment:
all_images.extend(self.sample_img(img_dir))
else:
all_images.append(self.sample_img(img_dir))
return all_images
@staticmethod
def get_task_mark(task):
if task == "talk":
return "T"
elif task == "animal":
return "A"
elif task == "ghost":
return "G"
elif task == "lego":
return "L"
else:
raise ValueError(
f" task should be in one [talk, animal, ghost, lego]"
)
def get_file_path(self, idx):
file = self.all_images[idx]
if not self.segment:
return file
else:
return file[0]
class Chlearn21AudioData(Chalearn21FrameData):
def __init__(self, data_root, data_split, task, sample_len=244832, suffix_type="npy", data_type="audio"):
super().__init__(data_root, data_split, task, data_type=data_type, segment=True)
self.sample_len = sample_len
self.suffix = suffix_type
def __len__(self):
return len(self.img_dir_ls)
def __getitem__(self, idx):
img_dir = self.img_dir_ls[idx]
aud_data = self.sample_audio_data(img_dir)
aud_label = self.get_ocean_label(img_dir)
sample = {
"aud_data": torch.as_tensor(aud_data, dtype=torch.float32),
"aud_label": torch.as_tensor(aud_label, dtype=torch.float32),
}
return sample
def sample_audio_data(self, img_dir):
aud_file = opt.join(self.data_dir, f"{img_dir}.{self.suffix}")
aud_data = np.load(aud_file)
data_len = aud_data.shape[-1]
start = np.random.randint(data_len - self.sample_len)
end = start + self.sample_len
return aud_data[:, :, start: end]
def get_ocean_label(self, img_dir):
*_, session, part = img_dir.split("/")
part = part.replace(self.task_mark, "T")
participant_id = self.session_id[str(int(session))][part]
participant_trait = self.parts_personality[participant_id]
participant_trait = np.array([float(v) for v in participant_trait.values()])
return participant_trait
class Chalearn21AudioDataPath(Chlearn21AudioData):
def __init__(self, data_root, data_split, task):
super().__init__(data_root, data_split, task)
def __getitem__(self, idx):
img_dir = self.img_dir_ls[idx]
aud_path = self.get_audio_path(img_dir)
aud_label = self.get_ocean_label(img_dir)
sample = {
"aud_path": aud_path,
"aud_label": torch.as_tensor(aud_label, dtype=torch.float32)
}
return sample
def get_audio_path(self, img_dir):
aud_path = os.path.join(self.data_dir, f"{img_dir}.wav")
return aud_path
class Chalearn21LSTMAudioData(Chlearn21AudioData):
def sample_audio_data(self, img_dir):
aud_file = opt.join(self.data_dir, f"{img_dir}.wav_mfcc_mt.csv")
wav_ft = np.loadtxt(aud_file, delimiter=",")
return wav_ft
class Chalearn21InterpretAudioData(Chlearn21AudioData):
def sample_audio_data(self, img_dir):
aud_file = opt.join(self.data_dir, f"{img_dir}.wav")
aud_data, sample_rate = torchaudio.load(aud_file)
trans_aud = torchaudio.transforms.Resample(sample_rate, 4000)(aud_data[0, :].view(1, -1))
trans_fft = torch.fft.fft(trans_aud)
half_length = int(trans_aud.shape[-1] / 2)
trans_fre = torch.abs(trans_fft)[..., :half_length]
trans_fre = trans_fre[:, :40000]
trans_fre_norm = norm(trans_fre)
# if trans_fre_norm.shape[-1] < 30604:
# return self.get_wave_data(index - 1)
return trans_fre_norm
class Chalearn21AudioVisualData(Chalearn21FrameData):
sample_len = 50176
def __getitem__(self, idx):
img_file = self.all_images[idx]
img = Image.open(img_file)
label = self.get_ocean_label(img_file)
wav = self.get_wave_data(img_file)
if self.trans:
img = self.trans(img)
wav = torch.as_tensor(wav, dtype=img.dtype)
label = torch.as_tensor(label, dtype=img.dtype)
return {"image": img, "audio": wav, "label": label}
def get_wave_data(self, img_file):
dir_name = os.path.dirname(img_file)
if self.type == "frame":
aud_file = f"{dir_name}.npy"
if self.type == "face":
dir_name = dir_name.replace("_face", "")
aud_file = f"{dir_name}.npy"
aud_data = np.load(aud_file)
data_len = aud_data.shape[-1]
start = np.random.randint(data_len - self.sample_len)
end = start + self.sample_len
return aud_data[:, :, start: end]
class Chalearn21CRNetData(Chalearn21FrameData):
sample_len = 244832
def __getitem__(self, idx):
img_file = self.all_images[idx]
img = Image.open(img_file)
loc_img = self.get_loc_img(img_file)
label = self.get_ocean_label(img_file)
label_cls_encode = self.cls_encode(label)
wav = self.get_wave_data(img_file)
if self.trans:
img = self.trans["frame"](img)
loc_img = self.trans["face"](loc_img)
wav = torch.as_tensor(wav, dtype=img.dtype)
label = torch.as_tensor(label, dtype=img.dtype)
return {"glo_img": img, "loc_img": loc_img, "wav_aud": wav,
"reg_label": label, "cls_label": label_cls_encode}
def get_wave_data(self, img_file):
dir_name = os.path.dirname(img_file)
if self.type == "frame":
aud_file = f"{dir_name}.npy"
if self.type == "face":
dir_name = dir_name.replace("_face", "")
aud_file = f"{dir_name}.npy"
aud_data = np.load(aud_file)
data_len = aud_data.shape[-1]
start = np.random.randint(data_len - self.sample_len)
end = start + self.sample_len
return aud_data[:, :, start: end]
def get_loc_img(self, img_file):
img_file = Path(img_file)
img_id = img_file.stem.split("_")[-1]
loc_img_dir = f"{img_file.parent}_face"
loc_img_file = f"{loc_img_dir}/face_{img_id}.jpg"
try:
loc_img = Image.open(loc_img_file)
except FileNotFoundError:
loc_img_ls = list(Path(loc_img_dir).rglob("*.jpg"))
loc_img_file = random.choice(loc_img_ls)
loc_img = Image.open(loc_img_file)
return loc_img
@staticmethod
def cls_encode(score):
index = []
for v in score:
if v < -1:
index.append(0)
elif -1 <= v < 0:
index.append(1)
elif 0 <= v < 1:
index.append(2)
else:
index.append(3)
one_hot_cls = np.eye(4)[index]
return one_hot_cls
class CRNetAudioTruePersonality(Chlearn21AudioData):
def __init__(self, data_root, data_split, task, sample_len=244832):
super().__init__(data_root, data_split, task, sample_len)
def __getitem__(self, idx):
img_dir = self.img_dir_ls[idx]
aud_data = self.sample_audio_data(img_dir)
aud_label = self.get_ocean_label(img_dir)
label_cls = self.cls_encode(aud_label)
return {
"aud_data": torch.as_tensor(aud_data, dtype=torch.float32),
"aud_label": torch.as_tensor(aud_label, dtype=torch.float32),
"aud_label_cls": torch.as_tensor(label_cls, dtype=torch.float32),
}
@staticmethod
def cls_encode(score):
index = []
for v in score:
if v < -1:
index.append(0)
elif -1 <= v < 0:
index.append(1)
elif 0 <= v < 1:
index.append(2)
else:
index.append(3)
one_hot_cls = np.eye(4)[index]
return one_hot_cls
class Chalearn21PersemonData(Chalearn21FrameData):
def __init__(
self, data_root, data_split, task, data_type, trans,
emo_data_root, emo_img_dir, emo_label, emo_trans, segment=False,
):
super().__init__(
data_root, data_split, task, data_type=data_type,
even_downsample=2000, trans=trans, segment=segment
)
self.emo_data_root = emo_data_root
self.emo_img_dir = emo_img_dir
self.emo_label = emo_label
self.emo_trans = emo_trans
self.emo_data_ls = self.emo_data_parser()
def emo_data_parser(self):
emo_label_path = os.path.join(self.emo_data_root, self.emo_label)
video_files = [file for file in os.listdir(emo_label_path) if len(file) < 12]
video_files_pt = [os.path.join(emo_label_path, video_file) for video_file in video_files]
return video_files_pt
def __getitem__(self, index):
per_img_ls, per_lab_ls = self.gather_personality_data(index)
emo_img_ls, emo_lab_ls = self.gather_emotion_data()
if self.trans:
per_img_ls = [self.trans(per_img) for per_img in per_img_ls]
if self.emo_trans:
emo_img_ls = [self.emo_trans(emo_img) for emo_img in emo_img_ls]
per_imgs_ts = torch.stack(per_img_ls, 0)
per_labs = torch.as_tensor(per_lab_ls, dtype=torch.float32)
emo_imgs_ts = torch.stack(emo_img_ls, 0)
emo_labs = torch.as_tensor(emo_lab_ls)
sample = {
"per_img": per_imgs_ts,
"emo_img": emo_imgs_ts,
"per_label": per_labs,
"emo_label": emo_labs,
}
return sample
def gather_emotion_data(self):
file = random.choice(self.emo_data_ls)
file_name = Path(file).stem
img_dir = os.path.join(self.emo_data_root, self.emo_img_dir, file_name)
imgs = os.listdir(img_dir)
random.shuffle(imgs)
imgs = imgs[:100]
imgs_pt = [os.path.join(img_dir, img) for img in imgs]
with open(file, 'r') as f:
frame_label = [map(lambda x: float(x), line.strip().split(",")) for line in f.readlines()[1:]]
try:
imgs_label = [list(frame_label[int(img_name.split(".")[0])]) for img_name in imgs]
except:
return self.gather_emotion_data()
imgs_rgb = [Image.open(img_pt) for img_pt in imgs_pt]
return imgs_rgb, imgs_label
def gather_personality_data(self, index):
img_files = self.all_images[index * 100: (index + 1) * 100]
img_ls = []
label_ls = []
for img_file in img_files:
img_ls.append(Image.open(img_file))
label_ls.append(self.get_ocean_label(img_file))
return img_ls, label_ls
def __len__(self):
return int(len(self.all_images) / 100)
class Chalearn21LSTMData(Chalearn21FrameData):
def __init__(
self, data_root, data_split, task, data_type="frame",
even_downsample=2000, trans=None, segment=True,
):
super().__init__(data_root, data_split, task, data_type, even_downsample, trans, segment)
def __getitem__(self, idx):
imgs_array_ls, file_ls = self._get_statistic_img_sample(idx)
wav_ft = self._get_wav_sample(file_ls[0])
anno_score = self.get_ocean_label(file_ls[0])
if self.trans:
imgs_ten_ls = []
for img_arr in imgs_array_ls:
img_ten = self.trans(img_arr)
imgs_ten_ls.append(img_ten)
imgs_ten = torch.stack(imgs_ten_ls, dim=0)
else:
imgs_ten = torch.as_tensor(imgs_array_ls)
wav_ft = torch.as_tensor(wav_ft, dtype=imgs_ten.dtype)
anno_score = torch.as_tensor(anno_score, dtype=imgs_ten.dtype)
sample = {"image": imgs_ten, "audio": wav_ft, "label": anno_score}
return sample
def __len__(self):
return len(self.img_dir_ls)
def _get_statistic_img_sample(self, index):
img_dir = self.img_dir_ls[index]
imgs = glob.glob(opt.join(self.data_dir, img_dir, "*.jpg"))
if self.type == "frame":
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[6:]))
elif self.type == "face":
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[5:]))
if len(imgs) > 10:
separate = np.linspace(0, len(imgs) - 1, 7, endpoint=True, dtype=np.int32)
selected = [random.randint(separate[idx], separate[idx + 1]) for idx in range(6)]
img_array_ls = []
img_file_ls = []
for idx in selected:
img_pt = imgs[idx]
img_array = Image.open(img_pt).convert("RGB")
img_array_ls.append(img_array)
img_file_ls.append(img_pt)
return img_array_ls, img_file_ls
else:
raise ValueError("encountered bad input {}".format(self.img_dir_ls[index]))
def _get_wav_sample(self, img_file):
img_dir_name = os.path.dirname(img_file)
if "face" in img_dir_name:
img_dir_name = img_dir_name.replace("_face", "")
wav_path = f"{img_dir_name}.wav_mfcc_mt.csv"
wav_ft = np.loadtxt(wav_path, delimiter=",")
return wav_ft
class Chalearn21LSTMVisualData(Chalearn21LSTMData):
def __getitem__(self, idx):
imgs_array_ls, file_ls = self._get_statistic_img_sample(idx)
anno_score = self.get_ocean_label(file_ls[0])
if self.trans:
imgs_ten_ls = []
for img_arr in imgs_array_ls:
img_ten = self.trans(img_arr)
imgs_ten_ls.append(img_ten)
imgs_ten = torch.stack(imgs_ten_ls, dim=0)
else:
imgs_ten = torch.as_tensor(imgs_array_ls)
anno_score = torch.as_tensor(anno_score, dtype=imgs_ten.dtype)
sample = {"image": imgs_ten, "label": anno_score}
return sample
@DATA_LOADER_REGISTRY.register()
def true_personality_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
transform = build_transform_spatial(cfg)
dataset = Chalearn21FrameData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
data_type=cfg.DATA.TYPE,
trans=transform,
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_audio_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
dataset = Chlearn21AudioData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_audio_bimodal_lstm_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
dataset = Chalearn21LSTMAudioData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_interpret_aud_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
dataset = Chalearn21InterpretAudioData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_crnet_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
transforms = build_transform_spatial(cfg)
num_worker = cfg.DATA_LOADER.NUM_WORKERS if mode in ["valid", "train"] else 1
dataset = Chalearn21CRNetData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
trans=transforms,
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=0,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_crnet_audio_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
num_worker = cfg.DATA_LOADER.NUM_WORKERS if mode in ["valid", "train"] else 1
dataset = CRNetAudioTruePersonality(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=num_worker,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_persemon_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
transforms = build_transform_spatial(cfg)
persemon_dataset = Chalearn21PersemonData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION,
data_type=cfg.DATA.TYPE, # "frame",
trans=transforms,
emo_data_root=cfg.DATA.VA_ROOT, # "datasets",
emo_img_dir=cfg.DATA.VA_DATA, # "va_data/cropped_aligned",
emo_label=cfg.DATA.VA_TRAIN_LABEL if mode == "train" else cfg.DATA.VA_VALID_LABEL,
emo_trans=transforms,
)
data_loader = DataLoader(
dataset=persemon_dataset,
batch_size=1,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_audio_visual_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
transforms = build_transform_spatial(cfg)
num_worker = cfg.DATA_LOADER.NUM_WORKERS if mode in ["valid", "train"] else 1
dataset = Chalearn21AudioVisualData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
data_type=cfg.DATA.TYPE,
trans=transforms
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=num_worker,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_lstm_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
transforms = build_transform_spatial(cfg)
num_worker = cfg.DATA_LOADER.NUM_WORKERS if mode in ["valid", "train"] else 0
dataset = Chalearn21LSTMData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
data_type=cfg.DATA.TYPE,
trans=transforms
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=num_worker,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_lstm_visual_dataloader(cfg, mode):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = False if mode in ["valid", "test", "full_test"] else True
transforms = build_transform_spatial(cfg)
num_worker = cfg.DATA_LOADER.NUM_WORKERS if mode in ["valid", "train"] else 0
dataset = Chalearn21LSTMVisualData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION, # "talk"
data_type=cfg.DATA.TYPE,
trans=transforms
)
data_loader = DataLoader(
dataset=dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=num_worker,
)
return data_loader
if __name__ == "__main__":
os.chdir("/home/rongfan/05-personality_traits/DeepPersonality")
# train_dataset = Chalearn21FrameData(
# data_root="datasets/chalearn2021",
# data_split="train",
# task="talk",
# )
# print(len(train_dataset))
# print(train_dataset[1])
# ==========================================================
# test_dataset = Chalearn21FrameData(
# data_root="datasets/chalearn2021",
# data_split="test",
# task="talk",
# )
# print(len(test_dataset))
# print(test_dataset[1])
# ===========================================================
# val_dataset = Chalearn21FrameData(
# data_root="datasets/chalearn2021",
# data_split="val",
# task="talk",
# )
# print(len(val_dataset))
# print(val_dataset[1])
# ===========================================================================================================
# train_dataset = Chlearn21AudioData(
# data_root="datasets/chalearn2021",
# data_split="train",
# task="talk",
# )
#
# print(len(train_dataset))
# print(train_dataset[1])
# ============================================================================================================
# def face_image_transform():
# import torchvision.transforms as transforms
# norm_mean = [0.485, 0.456, 0.406] # statistics from imagenet dataset which contains about 120 million images
# norm_std = [0.229, 0.224, 0.225]
# transforms = transforms.Compose([
# transforms.Resize(112),
# transforms.RandomHorizontalFlip(0.5),
# transforms.ToTensor(),
# transforms.Normalize(norm_mean, norm_std)
# ])
# return transforms
#
# transforms = face_image_transform()
# persemon_dataset = Chalearn21PersemonData(
# data_root="datasets/chalearn2021", data_split="train", task="talk", data_type="frame", trans=transforms,
# emo_data_root="datasets", emo_img_dir="va_data/cropped_aligned",
# emo_label="va_data/va_label/VA_Set/Train_Set", emo_trans=transforms,
# )
# print(persemon_dataset[2])
train_dataset = Chalearn21CRNetData(
data_root="datasets/chalearn2021",
data_split="train",
task="talk",
)
print(len(train_dataset))
print(train_dataset[1])
| 28,793 | 35.12798 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/vat_data.py | import torch
from torch.utils.data import DataLoader
from dpcv.data.transforms.transform import set_vat_transform_op
from dpcv.data.datasets.video_segment_data import VideoFrameSegmentData
from dpcv.data.datasets.tpn_data import TPNData as VATData
from dpcv.data.datasets.tpn_data import TPNTruePerData as VATTruePerData
from dpcv.data.datasets.tpn_data import FullTestTPNData as FullTestVATData
from dpcv.data.transforms.temporal_transforms import TemporalRandomCrop, TemporalDownsample, TemporalEvenCropDownsample
from dpcv.data.transforms.temporal_transforms import Compose as TemporalCompose
from dpcv.data.datasets.build import DATA_LOADER_REGISTRY
from dpcv.data.transforms.build import build_transform_spatial
from dpcv.data.datasets.common import VideoLoader
def make_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test"]), "'mode' should be 'train' , 'valid' or 'trainval'"
spatial_transform = set_vat_transform_op()
temporal_transform = [TemporalRandomCrop(16)]
temporal_transform = TemporalCompose(temporal_transform)
video_loader = VideoLoader()
if mode == "train":
data_set = VATData(
cfg.DATA_ROOT,
cfg.TRAIN_IMG_DATA,
cfg.TRAIN_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "valid":
data_set = VATData(
cfg.DATA_ROOT,
cfg.VALID_IMG_DATA,
cfg.VALID_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "trainval":
data_set = VATData(
cfg.DATA_ROOT,
cfg.TRAINVAL_IMG_DATA,
cfg.TRAINVAL_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
else:
data_set = VATData(
cfg.DATA_ROOT,
cfg.TEST_IMG_DATA,
cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
data_loader = DataLoader(
dataset=data_set,
batch_size=cfg.TRAIN_BATCH_SIZE,
shuffle=cfg.SHUFFLE,
num_workers=cfg.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def vat_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid' or 'trainval'"
spatial_transform = build_transform_spatial(cfg)
temporal_transform = [TemporalDownsample(length=100), TemporalRandomCrop(16)]
# temporal_transform = [TemporalDownsample(length=16)]
temporal_transform = TemporalCompose(temporal_transform)
data_cfg = cfg.DATA
if "face" in data_cfg.TRAIN_IMG_DATA:
video_loader = VideoLoader(image_name_formatter=lambda x: f"face_{x}.jpg")
else:
video_loader = VideoLoader(image_name_formatter=lambda x: f"frame_{x}.jpg")
if mode == "train":
data_set = VATData(
data_cfg.ROOT,
data_cfg.TRAIN_IMG_DATA,
data_cfg.TRAIN_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "valid":
data_set = VATData(
data_cfg.ROOT,
data_cfg.VALID_IMG_DATA,
data_cfg.VALID_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "trainval":
data_set = VATData(
data_cfg.ROOT,
data_cfg.TRAINVAL_IMG_DATA,
data_cfg.TRAINVAL_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "full_test":
temporal_transform = [TemporalDownsample(length=100), TemporalEvenCropDownsample(16, 6)]
temporal_transform = TemporalCompose(temporal_transform)
return FullTestVATData(
data_cfg.ROOT,
data_cfg.TEST_IMG_DATA,
data_cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
else:
data_set = VATData(
data_cfg.ROOT,
data_cfg.TEST_IMG_DATA,
data_cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=data_set,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=loader_cfg.SHUFFLE,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_per_vat_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid' or 'trainval'"
spatial_transform = build_transform_spatial(cfg)
temporal_transform = [TemporalRandomCrop(16)]
# temporal_transform = [TemporalDownsample(length=2000), TemporalRandomCrop(16)]
temporal_transform = TemporalCompose(temporal_transform)
data_cfg = cfg.DATA
if data_cfg.TYPE == "face":
video_loader = VideoLoader(image_name_formatter=lambda x: f"face_{x}.jpg")
else:
video_loader = VideoLoader(image_name_formatter=lambda x: f"frame_{x}.jpg")
data_set = VATTruePerData(
data_root="datasets/chalearn2021",
data_split=mode,
task=data_cfg.SESSION,
data_type=data_cfg.TYPE,
video_loader=video_loader,
spa_trans=spatial_transform,
tem_trans=temporal_transform,
)
shuffle = True if mode == "train" else False
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=data_set,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
import os
from dpcv.config.tpn_cfg import cfg
os.chdir("../../")
# interpret_data = InterpretData(
# data_root="datasets",
# img_dir="image_data/valid_data",
# label_file="annotation/annotation_validation.pkl",
# trans=set_transform_op(),
# )
# print(interpret_data[18])
data_loader = make_data_loader(cfg, mode="valid")
for i, item in enumerate(data_loader):
print(item["image"].shape, item["label"].shape)
if i > 5:
break
| 6,490 | 31.949239 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/slow_fast_data_true_personality.py | from dpcv.data.datasets.video_segment_data import TruePersonalityVideoFrameSegmentData
import torch
from torch.utils.data import DataLoader
from dpcv.data.datasets.bi_modal_data import VideoData
from dpcv.data.transforms.transform import set_transform_op
from dpcv.data.transforms.build import build_transform_spatial
from .build import DATA_LOADER_REGISTRY
from dpcv.data.transforms.temporal_transforms import TemporalRandomCrop, TemporalDownsample, TemporalEvenCropDownsample
from dpcv.data.transforms.temporal_transforms import Compose as TemporalCompose
from dpcv.data.datasets.common import VideoLoader
class SlowFastTruePerData(TruePersonalityVideoFrameSegmentData):
def __getitem__(self, index):
img = self.get_image_data(index)
frame_list = self.pack_pathway_output(img)
label = self.get_image_label(index)
return {"image": frame_list, "label": torch.as_tensor(label, dtype=torch.float32)}
@staticmethod
def pack_pathway_output(frames):
fast_pathway = frames
slow_pathway = torch.index_select(
frames,
1,
torch.linspace(
0, frames.shape[1] - 1, frames.shape[1] // 4
).long(),
)
frame_list = [slow_pathway, fast_pathway]
return frame_list
@DATA_LOADER_REGISTRY.register()
def true_per_slow_fast_data_loader(cfg, mode="train"):
spatial_transform = build_transform_spatial(cfg)
temporal_transform = [TemporalRandomCrop(64)]
# temporal_transform = [TemporalDownsample(length=2000), TemporalRandomCrop(64)]
temporal_transform = TemporalCompose(temporal_transform)
data_cfg = cfg.DATA
if data_cfg.TYPE == "face":
video_loader = VideoLoader(image_name_formatter=lambda x: f"face_{x}.jpg")
else:
video_loader = VideoLoader(image_name_formatter=lambda x: f"frame_{x}.jpg")
data_set = SlowFastTruePerData(
data_root="datasets/chalearn2021",
data_split=mode,
task=data_cfg.SESSION,
data_type=data_cfg.TYPE,
video_loader=video_loader,
spa_trans=spatial_transform,
tem_trans=temporal_transform,
)
shuffle = True if mode == "train" else False
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=data_set,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
import os
from dpcv.config.default_config_opt import cfg
os.chdir("../../../")
dataloader = true_per_slow_fast_data_loader(cfg)
| 2,616 | 33.893333 | 120 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/feature_extract_true_personality_dataset.py | from .ture_personality_data import Chalearn21FrameData, Chalearn21PersemonData
from dpcv.data.transforms.build import build_transform_spatial
import glob
import torch
import os.path as opt
from pathlib import Path
import numpy as np
import random
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
class AllSampleTruePersonalityData(Chalearn21FrameData):
def __init__(
self, data_root, data_split, task,
data_type="frame", img_format="jpg", even_downsample=1000, trans=None, segment=True,
):
super().__init__(
data_root, data_split, task, data_type, even_downsample, trans, segment,
)
self.img_format = img_format
def __len__(self):
return len(self.img_dir_ls)
def __getitem__(self, idx):
img_obj_ls, img_file_ls = self.get_sample_frames(idx)
img_label = self.get_ocean_label(img_file_ls[0])
if self.trans:
img_obj_ls = [self.trans(img) for img in img_obj_ls]
return {"all_images": img_obj_ls, "label": torch.as_tensor(img_label, dtype=torch.float32)}
def get_sample_frames(self, idx):
img_dir = self.img_dir_ls[idx]
imgs = glob.glob(opt.join(self.data_dir, img_dir, f"*.{self.img_format}"))
if self.type == "frame":
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[6:]))
elif self.type == "face":
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[5:]))
if self.sample_size:
separate = np.linspace(0, len(imgs), self.sample_size, endpoint=False, dtype=np.int32)
imgs = [imgs[idx] for idx in separate]
img_obj_ls = [Image.open(img_path) for img_path in imgs]
return img_obj_ls, imgs
class AllSampleBimodalTruePersonalityData(AllSampleTruePersonalityData):
sample_len = 50176
def __getitem__(self, idx):
img_obj_ls, img_file_ls = self.get_sample_frames(idx)
img_label = self.get_ocean_label(img_file_ls[0])
wav = self.get_wave_data(img_file_ls[0])
if self.trans:
img_obj_ls = [self.trans(img) for img in img_obj_ls]
wav = torch.as_tensor(wav, dtype=torch.float32)
img_label = torch.as_tensor(img_label, dtype=torch.float32)
sample = {"image": img_obj_ls, "audio": wav, "label": img_label}
return sample
def get_wave_data(self, file_name):
dir_name = opt.dirname(file_name)
if self.type == "frame":
aud_file = f"{dir_name}.npy"
if self.type == "face":
dir_name = dir_name.replace("_face", "")
aud_file = f"{dir_name}.npy"
aud_data = np.load(aud_file)
data_len = aud_data.shape[-1]
start = np.random.randint(data_len - self.sample_len)
end = start + self.sample_len
return aud_data[:, :, start: end]
class AllSamplePersemonTruePersonalityData(Chalearn21PersemonData):
def __init__(
self, data_root, data_split, task, data_type, trans,
emo_data_root, emo_img_dir, emo_label, emo_trans,
):
super().__init__(
data_root, data_split, task, data_type, trans,
emo_data_root, emo_img_dir, emo_label, emo_trans, segment=True,
)
self.emo_data_root = emo_data_root
self.emo_img_dir = emo_img_dir
self.emo_label = emo_label
self.emo_trans = emo_trans
self.emo_data_ls = self.emo_data_parser()
def __getitem__(self, idx):
per_img_ls, img_file_ls = self.get_sample_frames(idx)
per_lab_ls = [self.get_ocean_label(img_file_ls[0])] * len(per_img_ls)
emo_img_ls, emo_lab_ls = self.gather_emotion_data()
if self.trans:
per_img_ls = [self.trans(img) for img in per_img_ls]
if self.emo_trans:
emo_img_ls = [self.emo_trans(emo_img) for emo_img in emo_img_ls]
per_imgs_ts = torch.stack(per_img_ls, 0)
per_labs = torch.as_tensor(per_lab_ls, dtype=torch.float32)
emo_imgs_ts = torch.stack(emo_img_ls, 0)
emo_labs = torch.as_tensor(emo_lab_ls)
sample = {
"per_img": per_imgs_ts,
"emo_img": emo_imgs_ts,
"per_label": per_labs,
"emo_label": emo_labs,
}
return sample
def get_sample_frames(self, idx):
img_dir = self.img_dir_ls[idx]
imgs = glob.glob(opt.join(self.data_dir, img_dir, f"*.jpg"))
if self.type == "frame":
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[6:]))
elif self.type == "face":
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[5:]))
if self.sample_size:
separate = np.linspace(0, len(imgs), self.sample_size, endpoint=False, dtype=np.int32)
imgs = [imgs[idx] for idx in separate]
img_obj_ls = [Image.open(img_path) for img_path in imgs]
return img_obj_ls, imgs
class AllSampleCRNetTruePersonalityData(AllSampleTruePersonalityData):
sample_len = 244832
def __getitem__(self, idx):
img_obj_ls, img_file_ls = self.get_sample_frames(idx)
loc_img_ls = [self.get_loc_img(img_file) for img_file in img_file_ls]
wav = self.get_wave_data(img_file_ls[0])
wav = torch.as_tensor(wav, dtype=torch.float32)
img_label = self.get_ocean_label(img_file_ls[0])
img_label = torch.as_tensor(img_label, dtype=torch.float32)
# label_cls_encode = self.cls_encode(img_label)
if self.trans:
img_obj_ls = [self.trans["frame"](img) for img in img_obj_ls]
loc_img_ls = [self.trans["face"](img) for img in loc_img_ls]
sample = {
"glo_img": img_obj_ls,
"loc_img": loc_img_ls,
"wav_aud": wav,
"reg_label": img_label,
}
return sample
def get_wave_data(self, img_file):
dir_name = opt.dirname(img_file)
if self.type == "frame":
aud_file = f"{dir_name}.npy"
if self.type == "face":
dir_name = dir_name.replace("_face", "")
aud_file = f"{dir_name}.npy"
aud_data = np.load(aud_file)
data_len = aud_data.shape[-1]
start = np.random.randint(data_len - self.sample_len)
end = start + self.sample_len
return aud_data[:, :, start: end]
@staticmethod
def get_loc_img(img_file):
img_file = Path(img_file)
img_id = img_file.stem.split("_")[-1]
loc_img_dir = f"{img_file.parent}_face"
loc_img_file = f"{loc_img_dir}/face_{img_id}.jpg"
try:
loc_img = Image.open(loc_img_file)
except FileNotFoundError:
loc_img_ls = list(Path(loc_img_dir).rglob("*.jpg"))
loc_img_file = random.choice(loc_img_ls)
loc_img = Image.open(loc_img_file)
return loc_img
@staticmethod
def cls_encode(score):
index = []
for v in score:
if v < -1:
index.append(0)
elif -1 <= v < 0:
index.append(1)
elif 0 <= v < 1:
index.append(2)
else:
index.append(3)
one_hot_cls = np.eye(4)[index]
return one_hot_cls
def set_true_personality_dataloader(cfg, mode):
transform = build_transform_spatial(cfg)
data_set = AllSampleTruePersonalityData(
data_root=cfg.DATA.ROOT,
data_split=mode,
task=cfg.DATA.SESSION,
trans=transform,
)
return data_set
def set_audiovisual_true_personality_dataloader(cfg, mode):
transform = build_transform_spatial(cfg)
data_set = AllSampleBimodalTruePersonalityData(
data_root=cfg.DATA.ROOT,
data_split=mode,
task=cfg.DATA.SESSION,
trans=transform,
)
return data_set
def set_crnet_true_personality_dataloader(cfg, mode):
transform = build_transform_spatial(cfg)
data_set = AllSampleCRNetTruePersonalityData(
data_root=cfg.DATA.ROOT,
data_split=mode,
task=cfg.DATA.SESSION,
trans=transform,
)
return data_set
def set_persemon_true_personality_dataloader(cfg, mode):
transforms = build_transform_spatial(cfg)
persemon_dataset = AllSamplePersemonTruePersonalityData(
data_root=cfg.DATA.ROOT, # "datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION,
data_type=cfg.DATA.TYPE, # "frame",
trans=transforms,
emo_data_root=cfg.DATA.VA_ROOT, # "datasets",
emo_img_dir=cfg.DATA.VA_DATA, # "va_data/cropped_aligned",
emo_label=cfg.DATA.VA_TRAIN_LABEL if mode == "train" else cfg.DATA.VA_VALID_LABEL,
emo_trans=transforms,
)
return persemon_dataset
| 8,737 | 34.376518 | 99 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/audio_data.py | import os
import numpy as np
from dpcv.data.datasets.bi_modal_data import VideoData
from dpcv.data.datasets.cr_data import CRNetData
from torch.utils.data import DataLoader
from dpcv.data.datasets.build import DATA_LOADER_REGISTRY
import torch
@DATA_LOADER_REGISTRY.register()
class AudioData(VideoData):
def __init__(self, data_root, aud_dir, label_file):
super().__init__(
data_root, img_dir=None, audio_dir=aud_dir, label_file=label_file,
parse_img_dir=False,
parse_aud_dir=True,
)
def __getitem__(self, index):
aud_data = self.get_wave_data(index)
aud_data = self.transform(aud_data)
label = self.get_ocean_label(index)
sample = {
"aud_data": aud_data,
"aud_label": label,
}
return sample
def get_wave_data(self, index):
aud_file = self.aud_file_ls[index]
aud_ft = np.load(aud_file)
return aud_ft
def get_ocean_label(self, index):
aud_file = self.aud_file_ls[index]
aud_name = os.path.basename(aud_file)
video_name = aud_name.replace(".wav", "").replace(".npy", "").replace("_mt.csv", "") + ".mp4"
score = [
self.annotation["openness"][video_name],
self.annotation["conscientiousness"][video_name],
self.annotation["extraversion"][video_name],
self.annotation["agreeableness"][video_name],
self.annotation["neuroticism"][video_name],
]
return torch.tensor(score)
def transform(self, aud_ft):
"""
interface to be override for aud data processing
"""
return aud_ft
def __len__(self):
return len(self.aud_file_ls)
@DATA_LOADER_REGISTRY.register()
class VoiceLogfbank(AudioData):
def transform(self, aud_ft):
_, length = aud_ft.shape
if length > 79534:
aud_trans = aud_ft[..., :79534]
elif length < 79534:
aud_padding = np.zeros((1, 79534))
aud_padding[..., :length] = aud_ft
aud_trans = aud_padding
else:
aud_trans = aud_ft
return torch.as_tensor(aud_trans, dtype=torch.float32).squeeze()
@DATA_LOADER_REGISTRY.register()
class VoiceMfcc(AudioData):
def get_wave_data(self, index):
aud_file = self.aud_file_ls[index]
aud_ft = np.loadtxt(aud_file, delimiter=",")
return aud_ft
def transform(self, aud_ft):
return torch.as_tensor(aud_ft, dtype=torch.float32)
@DATA_LOADER_REGISTRY.register()
class VoiceLibrosa(AudioData):
def transform(self, aud_ft):
try:
n = np.random.randint(0, len(aud_ft) - 50176)
except:
n = 0
wav_tmp = aud_ft[..., n: n + 50176]
if wav_tmp.shape[-1] < 50176:
wav_fill = np.zeros((1, 1, 50176))
wav_fill[..., :wav_tmp.shape[-1]] = wav_tmp
wav_tmp = wav_fill
return torch.as_tensor(wav_tmp, dtype=torch.float32)
@DATA_LOADER_REGISTRY.register()
class VoiceCRNetData(AudioData):
def __getitem__(self, index):
aud_data = self.get_wave_data(index)
aud_data = self.transform(aud_data)
label = self.get_ocean_label(index)
label_cls = torch.as_tensor(CRNetData.cls_encode(label), dtype=torch.float32)
return {
"aud_data": aud_data,
"aud_label": label,
"aud_label_cls": label_cls,
}
def transform(self, aud_ft):
if aud_ft.shape[-1] < 244832:
aud_ft_pad = np.zeros((1, 1, 244832))
aud_ft_pad[..., :aud_ft.shape[-1]] = aud_ft
aud_ft = aud_ft_pad
return torch.as_tensor(aud_ft, dtype=torch.float32)
class _VoiceLibrosa(AudioData):
def transform(self, aud_ft):
_, _, length = aud_ft.shape
aud_padding = np.zeros((1, 1, 245760))
aud_padding[..., :length] = aud_ft
aud_trans = aud_padding.reshape(256, 320, 3).transpose(2, 0, 1)
aud_ts = torch.as_tensor(aud_trans, dtype=torch.float32)
return aud_ts
class VoiceLibrosaSwinTransformer(AudioData):
def transform(self, aud_ft):
_, _, length = aud_ft.shape
shape_size = 224 * 224 * 3
if length < shape_size:
aud_padding = np.zeros((1, 1, shape_size))
aud_padding[..., :length] = aud_ft
aud_ft = aud_padding
aud_trans = aud_ft[..., :shape_size].reshape(224, 224, 3).transpose(2, 0, 1)
aud_ts = torch.as_tensor(aud_trans, dtype=torch.float32)
return aud_ts
@DATA_LOADER_REGISTRY.register()
def build_audio_loader(cfg, dataset_cls, mode="train"):
shuffle = cfg.DATA_LOADER.SHUFFLE
if mode == "train":
dataset = dataset_cls(
cfg.DATA.ROOT,
cfg.DATA.TRAIN_AUD_DATA,
cfg.DATA.TRAIN_LABEL_DATA,
)
elif mode == "valid":
dataset = dataset_cls(
cfg.DATA.ROOT,
cfg.DATA.VALID_AUD_DATA,
cfg.DATA.VALID_LABEL_DATA,
)
shuffle = False
elif mode == "test":
dataset = dataset_cls(
cfg.DATA.ROOT,
cfg.DATA.TEST_AUD_DATA,
cfg.DATA.TEST_LABEL_DATA,
)
shuffle = False
else:
raise ValueError("mode must be one of 'train' or 'valid' or test' ")
data_loader = DataLoader(
dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
drop_last=cfg.DATA_LOADER.DROP_LAST,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def voice_librosa_loader(cfg, mode="train"):
if mode == "train":
dataset = _VoiceLibrosa(
cfg.DATA.ROOT,
cfg.DATA.TRAIN_AUD_DATA,
cfg.DATA.TRAIN_LABEL_DATA,
)
elif mode == "valid":
dataset = _VoiceLibrosa(
cfg.DATA.ROOT,
cfg.DATA.VALID_AUD_DATA,
cfg.DATA.VALID_LABEL_DATA,
)
elif mode == "test":
dataset = _VoiceLibrosa(
cfg.DATA.ROOT,
cfg.DATA.TEST_AUD_DATA,
cfg.DATA.TEST_LABEL_DATA,
)
else:
raise ValueError("mode must be one of 'train' or 'valid' or test' ")
data_loader = DataLoader(
dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def voice_librosa_swin_transformer_loader(cfg, mode="train"):
if mode == "train":
dataset = VoiceLibrosaSwinTransformer(
cfg.DATA.ROOT,
cfg.DATA.TRAIN_AUD_DATA,
cfg.DATA.TRAIN_LABEL_DATA,
)
elif mode == "valid":
dataset = VoiceLibrosaSwinTransformer(
cfg.DATA.ROOT,
cfg.DATA.VALID_AUD_DATA,
cfg.DATA.VALID_LABEL_DATA,
)
elif mode == "test":
dataset = VoiceLibrosaSwinTransformer(
cfg.DATA.ROOT,
cfg.DATA.TEST_AUD_DATA,
cfg.DATA.TEST_LABEL_DATA,
)
else:
raise ValueError("mode must be one of 'train' or 'valid' or test' ")
data_loader = DataLoader(
dataset,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
dataset = VoiceCRNetData(
"../../../datasets",
"voice_data/voice_librosa/train_data",
"annotation/annotation_training.pkl",
)
for i in range(len(dataset)):
if i > 3:
break
a = dataset[i]
print(a)
# data_loader = DataLoader(dataset, batch_size=8, num_workers=0)
# for i, batch in enumerate(data_loader):
# print(batch["aud_data"].shape, batch["aud_label"].shape)
# if i >= 20:
# break
| 7,905 | 28.610487 | 101 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/tpn_data.py | import torch
from torch.utils.data import DataLoader
from dpcv.data.transforms.transform import set_tpn_transform_op
from dpcv.data.datasets.video_segment_data import VideoFrameSegmentData
from dpcv.data.transforms.temporal_transforms import TemporalRandomCrop, TemporalDownsample, TemporalEvenCropDownsample
from dpcv.data.transforms.temporal_transforms import Compose as TemporalCompose
from dpcv.data.datasets.common import VideoLoader
from dpcv.data.transforms.build import build_transform_spatial
from dpcv.data.datasets.build import DATA_LOADER_REGISTRY
from dpcv.data.datasets.video_segment_data import TruePersonalityVideoFrameSegmentData
class TPNData(VideoFrameSegmentData):
def __getitem__(self, index):
img = self.get_image_data(index)
label = self.get_ocean_label(index)
return {"image": img, "label": torch.as_tensor(label)}
def _loading(self, path, frame_indices):
clip = self.loader(path, frame_indices)
if self.spa_trans is not None:
clip = [self.spa_trans(img) for img in clip]
clip = torch.stack(clip, 0)
return clip
class FullTestTPNData(TPNData):
def __getitem__(self, index):
imgs = self.get_image_data(index)
label = self.get_ocean_label(index)
return {"all_images": imgs, "label": torch.as_tensor(label)}
def frame_sample(self, img_dir):
if "face" in img_dir:
frame_indices = self.list_face_frames(img_dir)
else:
frame_indices = self.list_frames(img_dir)
if self.tem_trans is not None:
frame_indices = self.tem_trans(frame_indices)
imgs = self.load_batch_images(img_dir, frame_indices)
return imgs
def load_batch_images(self, img_dir, frame_indices_ls):
image_segment_obj_ls = []
for frame_seg_idx in frame_indices_ls:
image_segment_obj = self.loader(img_dir, frame_seg_idx)
if self.spa_trans is not None:
image_segment_obj = [self.spa_trans(img) for img in image_segment_obj]
image_segment_obj = torch.stack(image_segment_obj, 0)
image_segment_obj_ls.append(image_segment_obj)
return image_segment_obj_ls
class TPNTruePerData(TruePersonalityVideoFrameSegmentData):
def _loading(self, path, frame_indices):
clip = self.loader(path, frame_indices)
if self.spa_trans is not None:
clip = [self.spa_trans(img) for img in clip]
clip = torch.stack(clip, 0)
return clip
def make_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test"]), "'mode' should be 'train' , 'valid' or 'trainval'"
spatial_transform = set_tpn_transform_op()
temporal_transform = [TemporalRandomCrop(8)]
temporal_transform = TemporalCompose(temporal_transform)
video_loader = VideoLoader()
if mode == "train":
data_set = TPNData(
cfg.DATA_ROOT,
cfg.TRAIN_IMG_DATA,
cfg.TRAIN_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "valid":
data_set = TPNData(
cfg.DATA_ROOT,
cfg.VALID_IMG_DATA,
cfg.VALID_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "trainval":
data_set = TPNData(
cfg.DATA_ROOT,
cfg.TRAINVAL_IMG_DATA,
cfg.TRAINVAL_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
else:
data_set = TPNData(
cfg.DATA_ROOT,
cfg.TEST_IMG_DATA,
cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
data_loader = DataLoader(
dataset=data_set,
batch_size=cfg.TRAIN_BATCH_SIZE,
shuffle=cfg.SHUFFLE,
num_workers=cfg.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def tpn_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid' or 'trainval'"
spatial_transform = build_transform_spatial(cfg)
temporal_transform = [TemporalDownsample(length=100), TemporalRandomCrop(16)]
# temporal_transform = [TemporalDownsample(length=16)]
temporal_transform = TemporalCompose(temporal_transform)
data_cfg = cfg.DATA
if "face" in data_cfg.TRAIN_IMG_DATA:
video_loader = VideoLoader(image_name_formatter=lambda x: f"face_{x}.jpg")
else:
video_loader = VideoLoader(image_name_formatter=lambda x: f"frame_{x}.jpg")
if mode == "train":
data_set = TPNData(
data_cfg.ROOT,
data_cfg.TRAIN_IMG_DATA,
data_cfg.TRAIN_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "valid":
data_set = TPNData(
data_cfg.ROOT,
data_cfg.VALID_IMG_DATA,
data_cfg.VALID_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "trainval":
data_set = TPNData(
data_cfg.ROOT,
data_cfg.TRAINVAL_IMG_DATA,
data_cfg.TRAINVAL_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "full_test":
temporal_transform = [TemporalDownsample(length=100), TemporalEvenCropDownsample(16, 6)]
temporal_transform = TemporalCompose(temporal_transform)
return FullTestTPNData(
data_cfg.ROOT,
data_cfg.TEST_IMG_DATA,
data_cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
else:
data_set = TPNData(
data_cfg.ROOT,
data_cfg.TEST_IMG_DATA,
data_cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=data_set,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=loader_cfg.SHUFFLE,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def tpn_true_per_data_loader(cfg, mode="train"):
spatial_transform = build_transform_spatial(cfg)
temporal_transform = [TemporalRandomCrop(16)]
# temporal_transform = [TemporalDownsample(length=2000), TemporalRandomCrop(16)]
temporal_transform = TemporalCompose(temporal_transform)
data_cfg = cfg.DATA
if data_cfg.TYPE == "face":
video_loader = VideoLoader(image_name_formatter=lambda x: f"face_{x}.jpg")
else:
video_loader = VideoLoader(image_name_formatter=lambda x: f"frame_{x}.jpg")
data_set = TPNTruePerData(
data_root="datasets/chalearn2021",
data_split=mode,
task=data_cfg.SESSION,
data_type=data_cfg.TYPE,
video_loader=video_loader,
spa_trans=spatial_transform,
tem_trans=temporal_transform,
)
shuffle = True if mode == "train" else False
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=data_set,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
import os
from dpcv.config.tpn_cfg import cfg
os.chdir("../../")
# interpret_data = InterpretData(
# data_root="datasets",
# img_dir="image_data/valid_data",
# label_file="annotation/annotation_validation.pkl",
# trans=set_transform_op(),
# )
# print(interpret_data[18])
data_loader = make_data_loader(cfg, mode="valid")
for i, item in enumerate(data_loader):
print(item["image"].shape, item["label"].shape)
if i > 5:
break
| 8,114 | 31.46 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/temporal_data.py | """
TODO: merge temporal data to bi_modal_data
"""
import torch
import os
import glob
from dpcv.data.datasets.bi_modal_data import VideoData
from torch.utils.data import DataLoader
from PIL import Image
from pathlib import Path
import random
import numpy as np
from dpcv.data.transforms.transform import face_image_transform
from dpcv.data.transforms.build import build_transform_spatial
from .build import DATA_LOADER_REGISTRY
class TemporalData(VideoData):
def __init__(self, data_root, img_dir, audio_dir, label_file, transform=None):
super().__init__(data_root, img_dir, label_file, audio_dir)
self.transform = transform
def __getitem__(self, idx):
anno_score = self.get_ocean_label(idx)
imgs_array_ls = self._get_statistic_img_sample(idx)
wav_ft = self._get_wav_sample(idx)
if self.transform:
imgs_ten_ls = []
for img_arr in imgs_array_ls:
img_ten = self.transform(img_arr)
imgs_ten_ls.append(img_ten)
imgs_ten = torch.stack(imgs_ten_ls, dim=0)
else:
imgs_ten = torch.as_tensor(imgs_array_ls)
wav_ft = torch.as_tensor(wav_ft, dtype=imgs_ten.dtype)
anno_score = torch.as_tensor(anno_score, dtype=imgs_ten.dtype)
sample = {"image": imgs_ten, "audio": wav_ft, "label": anno_score}
return sample
def __len__(self):
return len(self.img_dir_ls)
def _get_statistic_img_sample(self, index):
imgs = glob.glob(self.img_dir_ls[index] + "/*.jpg")
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[5:]))
if len(imgs) > 10:
separate = np.linspace(0, len(imgs) - 1, 7, endpoint=True, dtype=np.int16)
selected = [random.randint(separate[idx], separate[idx + 1]) for idx in range(6)]
img_array_ls = []
for idx in selected:
img_pt = imgs[idx]
img_array = Image.open(img_pt).convert("RGB")
img_array_ls.append(img_array)
return img_array_ls
else:
raise ValueError("encountered bad input {}".format(self.img_dir_ls[index]))
def _get_wav_sample(self, index):
img_dir_name = os.path.basename(self.img_dir_ls[index])
audio_name = f"{img_dir_name}.wav_mt.csv"
wav_path = os.path.join(self.data_root, self.audio_dir, audio_name)
wav_ft = np.loadtxt(wav_path, delimiter=",")
return wav_ft
def make_data_loader(cfg, mode):
assert (mode in ["train", "valid", "test"]), " 'mode' only supports 'train' 'valid' 'test' "
transforms = face_image_transform()
if mode == "train":
dataset = TemporalData(
cfg.DATA_ROOT,
cfg.TRAIN_IMG_DATA,
cfg.TRAIN_AUD_DATA,
cfg.TRAIN_LABEL_DATA,
transforms
)
batch_size = cfg.TRAIN_BATCH_SIZE
elif mode == "valid":
dataset = TemporalData(
cfg.DATA_ROOT,
cfg.VALID_IMG_DATA,
cfg.VALID_AUD_DATA,
cfg.VALID_LABEL_DATA,
transforms
)
batch_size = cfg.VALID_BATCH_SIZE
else:
dataset = TemporalData(
cfg.DATA_ROOT,
cfg.TEST_IMG_DATA,
cfg.TEST_AUD_DATA,
cfg.TEST_LABEL_DATA,
transforms
)
batch_size = cfg.VALID_BATCH_SIZE
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0, # cfg.NUM_WORKS
drop_last=True,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def bimodal_lstm_data_loader(cfg, mode):
assert (mode in ["train", "valid", "test", "full_test"]), " 'mode' only supports 'train' 'valid' 'test' "
transforms = build_transform_spatial(cfg)
if mode == "train":
dataset = TemporalData(
cfg.DATA.ROOT,
cfg.DATA.TRAIN_IMG_DATA,
cfg.DATA.TRAIN_AUD_DATA,
cfg.DATA.TRAIN_LABEL_DATA,
transforms
)
batch_size = cfg.DATA_LOADER.TRAIN_BATCH_SIZE
elif mode == "valid":
dataset = TemporalData(
cfg.DATA.ROOT,
cfg.DATA.VALID_IMG_DATA,
cfg.DATA.VALID_AUD_DATA,
cfg.DATA.VALID_LABEL_DATA,
transforms
)
batch_size = cfg.DATA_LOADER.VALID_BATCH_SIZE
elif mode == "full_test":
return
else:
dataset = TemporalData(
cfg.DATA.ROOT,
cfg.DATA.TEST_IMG_DATA,
cfg.DATA.TEST_AUD_DATA,
cfg.DATA.TEST_LABEL_DATA,
transforms
)
batch_size = cfg.DATA_LOADER.VALID_BATCH_SIZE
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=cfg.DATA_LOADER.SHUFFLE,
num_workers=cfg.DATA_LOADER.NUM_WORKERS, # cfg.NUM_WORKS
drop_last=cfg.DATA_LOADER.DROP_LAST,
)
return data_loader
if __name__ == "__main__":
trans = face_image_transform()
data_set = TemporalData(
"../../../datasets",
"image_data/train_data_face",
"voice_data/voice_mfcc/train_data_mfcc",
"annotation/annotation_training.pkl",
trans
)
print(len(data_set))
for key, val in data_set[7].items():
print(key, val.shape)
# print(data_set._statistic_img_sample(1))
# print(data_set._get_wav_sample(1))
# loader = make_data_loader("", "train")
# for i, sample in enumerate(loader):
# if i > 0:
# break
# print(sample["image"].shape, sample["audio"].shape, sample["label"].shape)
| 5,639 | 32.372781 | 109 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/pers_emo_data.py | import glob
import random
import os
from PIL import Image
from pathlib import Path
import torch
import numpy as np
from torch.utils.data import DataLoader
from dpcv.data.datasets.bi_modal_data import VideoData
from dpcv.data.transforms.build import build_transform_spatial
from dpcv.data.transforms.transform import set_per_transform
from .build import DATA_LOADER_REGISTRY
class PersEmoNData(VideoData):
def __init__(self, data_root, per_img_dir, per_label, emo_img_dir, emo_label, per_trans=None, emo_trans=None):
super().__init__(data_root, per_img_dir, per_label)
self.emo_img_dir = emo_img_dir
self.emo_label = emo_label
self.emo_data_ls = self.emo_data_parser()
self.per_trans = per_trans
self.emo_trans = emo_trans
def __getitem__(self, index):
per_img_ls, per_lab_ls = self.gather_personality_data(index)
emo_img_ls, emo_lab_ls = self.gather_emotion_data()
if self.per_trans:
per_img_ls = [self.per_trans(per_img) for per_img in per_img_ls]
if self.emo_trans:
emo_img_ls = [self.emo_trans(emo_img) for emo_img in emo_img_ls]
per_imgs_ts = torch.stack(per_img_ls, 0)
per_labs = torch.as_tensor(per_lab_ls)
emo_imgs_ts = torch.stack(emo_img_ls, 0)
emo_labs = torch.as_tensor(emo_lab_ls)
sample = {
"per_img": per_imgs_ts,
"emo_img": emo_imgs_ts,
"per_label": per_labs,
"emo_label": emo_labs,
}
return sample
def gather_personality_data(self, index):
img_dirs = self.img_dir_ls[index * 10: (index + 1) * 10]
img_ls = []
label_ls = []
for img_dir in img_dirs:
imgs, labs = self.per_img_sample(img_dir)
img_ls.extend(imgs)
label_ls.extend(labs)
return img_ls, label_ls
def per_img_sample(self, img_dir):
imgs = glob.glob(f"{img_dir}/*.jpg")
imgs = sorted(imgs, key=lambda x: int(Path(x).stem[5:]))
separate = np.linspace(0, len(imgs), 11, endpoint=False, dtype=np.int16)
imgs_idx = [random.randint(separate[idx], separate[idx + 1]) for idx in range(10)]
imgs = [imgs[idx] for idx in imgs_idx]
imgs = [Image.open(img) for img in imgs]
labs = [self.get_per_label(img_dir)] * 10
return imgs, labs
def get_per_label(self, img_dir):
video_name = f"{os.path.basename(img_dir)}.mp4"
score = [
self.annotation["openness"][video_name],
self.annotation["conscientiousness"][video_name],
self.annotation["extraversion"][video_name],
self.annotation["agreeableness"][video_name],
self.annotation["neuroticism"][video_name],
]
return score
def gather_emotion_data(self):
file = random.choice(self.emo_data_ls)
file_name = Path(file).stem
img_dir = os.path.join(self.data_root, self.emo_img_dir, file_name)
imgs = os.listdir(img_dir)
random.shuffle(imgs)
imgs = imgs[:100]
imgs_pt = [os.path.join(img_dir, img) for img in imgs]
with open(file, 'r') as f:
frame_label = [map(lambda x: float(x), line.strip().split(",")) for line in f.readlines()[1:]]
try:
imgs_label = [list(frame_label[int(img_name.split(".")[0])]) for img_name in imgs]
except:
return self.gather_emotion_data()
imgs_rgb = [Image.open(img_pt) for img_pt in imgs_pt]
return imgs_rgb, imgs_label
def emo_data_parser(self):
emo_label_path = os.path.join(self.data_root, self.emo_label)
video_files = [file for file in os.listdir(emo_label_path) if len(file) < 12]
video_files_pt = [os.path.join(emo_label_path, video_file) for video_file in video_files]
return video_files_pt
def __len__(self):
return int(len(self.img_dir_ls) / 10) # for each mini-batch the author selected 10 videos in chalearn data
class AllFramePersEmoNData(PersEmoNData):
def gather_personality_data(self, index):
img_dir = self.img_dir_ls[index]
img_ls, label_ls = self.per_img_sample(img_dir)
# assert len(img_ls) == 100, f"image sample from{img_dir} is not enough"
# print(len(img_ls))
return img_ls, label_ls
def per_img_sample(self, img_dir):
imgs = glob.glob(f"{img_dir}/*.jpg")
selected_idx = np.linspace(0, len(imgs), 100, endpoint=False, dtype=np.int16)
selected_img_ls = [imgs[idx] for idx in selected_idx]
selected_img_obj = [Image.open(img) for img in selected_img_ls]
selected_img_lab = [self.get_per_label(img_dir)] * 100
return selected_img_obj, selected_img_lab
def __len__(self):
return len(self.img_dir_ls)
class AllFramePersEmoNData2(AllFramePersEmoNData):
def per_img_sample(self, img_dir):
imgs = sorted(glob.glob(f"{img_dir}/*.jpg"))
# selected_idx = np.linspace(0, len(imgs), 100, endpoint=False, dtype=np.int16)
# selected_img_ls = [imgs[idx] for idx in selected_idx]
selected_img_obj = [Image.open(img) for img in imgs]
selected_img_lab = [self.get_per_label(img_dir)] * len(imgs)
return selected_img_obj, selected_img_lab
def make_data_loader(cfg, mode=None):
per_trans = set_per_transform()
emo_trans = set_per_transform()
if mode == "train":
dataset = PersEmoNData(
cfg.DATA_ROOT, # "../datasets/",
cfg.TRAIN_IMG_DATA, # "image_data/train_data_face",
cfg.TRAIN_LABEL_DATA, # "annotation/annotation_training.pkl",
cfg.VA_DATA, # "va_data/cropped_aligned",
cfg.VA_TRAIN_LABEL, # "va_data/va_label/VA_Set/Train_Set",
per_trans=per_trans,
emo_trans=emo_trans,
)
elif mode == "valid":
dataset = PersEmoNData(
cfg.DATA_ROOT, # "../datasets/",
cfg.VALID_IMG_DATA, # "image_data/valid_data_face",
cfg.VALID_LABEL_DATA, # "annotation/annotation_validation.pkl",
cfg.VA_DATA, # "va_data/cropped_aligned",
cfg.VA_VALID_LABEL, # "va_data/va_label/VA_Set/Validation_Set",
per_trans=per_trans,
emo_trans=emo_trans,
)
else:
dataset = PersEmoNData(
cfg.DATA_ROOT, # "../datasets/",
cfg.TEST_IMG_DATA, # "image_data/valid_data_face",
cfg.TEST_LABEL_DATA, # "annotation/annotation_validation.pkl",
cfg.VA_DATA, # "va_data/cropped_aligned",
cfg.VA_VALID_LABEL, # "va_data/va_label/VA_Set/Validation_Set",
per_trans=per_trans,
emo_trans=emo_trans,
)
data_loader = DataLoader(
dataset=dataset,
batch_size=1,
shuffle=cfg.SHUFFLE,
num_workers=cfg.NUM_WORKS
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def peremon_data_loader(cfg, mode=None):
per_trans = build_transform_spatial(cfg)
emo_trans = build_transform_spatial(cfg)
data_cfg = cfg.DATA
if mode == "train":
dataset = PersEmoNData(
data_cfg.ROOT, # "../datasets/",
data_cfg.TRAIN_IMG_DATA, # "image_data/train_data_face",
data_cfg.TRAIN_LABEL_DATA, # "annotation/annotation_training.pkl",
data_cfg.VA_DATA, # "va_data/cropped_aligned",
data_cfg.VA_TRAIN_LABEL, # "va_data/va_label/VA_Set/Train_Set",
per_trans=per_trans,
emo_trans=emo_trans,
)
elif mode == "valid":
dataset = PersEmoNData(
data_cfg.ROOT, # "../datasets/",
data_cfg.VALID_IMG_DATA, # "image_data/valid_data_face",
data_cfg.VALID_LABEL_DATA, # "annotation/annotation_validation.pkl",
data_cfg.VA_DATA, # "va_data/cropped_aligned",
data_cfg.VA_VALID_LABEL, # "va_data/va_label/VA_Set/Validation_Set",
per_trans=per_trans,
emo_trans=emo_trans,
)
elif mode == "full_test":
return AllFramePersEmoNData(
data_cfg.ROOT, # "../datasets/",
data_cfg.TEST_IMG_DATA, # "image_data/valid_data_face",
data_cfg.TEST_LABEL_DATA, # "annotation/annotation_validation.pkl",
data_cfg.VA_DATA, # "va_data/cropped_aligned",
data_cfg.VA_VALID_LABEL, # "va_data/va_label/VA_Set/Validation_Set",
per_trans=per_trans,
emo_trans=emo_trans,
)
else:
dataset = PersEmoNData(
data_cfg.ROOT, # "../datasets/",
data_cfg.TEST_IMG_DATA, # "image_data/valid_data_face",
data_cfg.TEST_LABEL_DATA, # "annotation/annotation_validation.pkl",
data_cfg.VA_DATA, # "va_data/cropped_aligned",
data_cfg.VA_VALID_LABEL, # "va_data/va_label/VA_Set/Validation_Set",
per_trans=per_trans,
emo_trans=emo_trans,
)
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=dataset,
batch_size=1,
shuffle=loader_cfg.SHUFFLE,
num_workers=loader_cfg.NUM_WORKERS
)
return data_loader
if __name__ == "__main__":
per_trans = set_per_transform()
emo_trans = set_per_transform()
# dataset = PersEmoNData(
# "../../../datasets/",
# "image_data/train_data_face",
# "annotation/annotation_training.pkl",
# "va_data/cropped_aligned",
# "va_data/va_label/VA_Set/Train_Set",
# per_trans=per_trans,
# emo_trans=emo_trans,
# )
dataset = PersEmoNData(
"../../../datasets/",
"image_data/valid_data_face",
"annotation/annotation_validation.pkl",
"va_data/cropped_aligned",
"va_data/va_label/VA_Set/Validation_Set",
per_trans=per_trans,
emo_trans=emo_trans,
)
for k, v in dataset[2].items():
print(v.shape)
# data_loader = DataLoader(
# dataset=dataset,
# batch_size=1,
# shuffle=True,
# num_workers=0 # cfg.NUM_WORKS
# )
#
# for i, item in enumerate(data_loader):
# # if i >= 1:
# # break
# print("::--------------> ", i)
# for k, v in item.items():
# print(k, v.squeeze().shape)
| 10,331 | 36.846154 | 115 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/interpretability_audio_data.py | import os
from dpcv.data.datasets.bi_modal_data import VideoData
from .build import DATA_LOADER_REGISTRY
import torch
import torchaudio
from torch.utils.data import DataLoader
def aud_transform():
return torchaudio.transforms.Resample(44100, 4000)
def norm(aud_ten):
mean = aud_ten.mean()
std = aud_ten.std()
normed_ten = (aud_ten - mean) / (std + 1e-10)
return normed_ten
class InterpretAudio(VideoData):
def __init__(self, data_root, aud_dir, label_file):
super().__init__(
data_root, img_dir=None, audio_dir=aud_dir, label_file=label_file,
parse_img_dir=False,
parse_aud_dir=True,
)
def __getitem__(self, index):
aud_data, index = self.get_wave_data(index)
label = self.get_ocean_label(index)
sample = {
"aud_data": aud_data,
"aud_label": label,
}
return sample
def get_wave_data(self, index):
aud_file = self.aud_file_ls[index]
aud_data, sample_rate = torchaudio.load(aud_file)
trans_aud = torchaudio.transforms.Resample(sample_rate, 4000)(aud_data[0, :].view(1, -1))
trans_fft = torch.fft.fft(trans_aud)
half_length = int(trans_aud.shape[-1] / 2)
trans_fre = torch.abs(trans_fft)[..., :half_length]
trans_fre_norm = norm(trans_fre)
if trans_fre_norm.shape[-1] < 30604:
return self.get_wave_data(index - 1)
return trans_fre_norm, index
def get_ocean_label(self, index):
aud_file = self.aud_file_ls[index]
video_name = os.path.basename(aud_file).replace(".wav", ".mp4")
score = [
self.annotation["openness"][video_name],
self.annotation["conscientiousness"][video_name],
self.annotation["extraversion"][video_name],
self.annotation["agreeableness"][video_name],
self.annotation["neuroticism"][video_name],
]
return torch.tensor(score)
def __len__(self):
return len(self.aud_file_ls)
def make_data_loader(cfg, mode="train"):
if mode == "train":
dataset = InterpretAudio(
cfg.DATA_ROOT, # "../datasets",
cfg.TRAIN_AUD_DATA, # "raw_voice/trainingData",
cfg.TRAIN_LABEL_DATA, # "annotation/annotation_training.pkl",
)
elif mode == "valid":
dataset = InterpretAudio(
cfg.DATA_ROOT, # "../datasets",
cfg.VALID_AUD_DATA, # "raw_voice/validationData",
cfg.VALID_LABEL_DATA, # "annotation/annotation_validation.pkl",
)
elif mode == "test":
dataset = InterpretAudio(
cfg.DATA_ROOT, # "../datasets",
cfg.TEST_AUD_DATA, # "raw_voice/testData",
cfg.TEST_LABEL_DATA, # "annotation/annotation_validation.pkl",
)
else:
raise ValueError("mode must be one of 'train' or 'valid' or test' ")
data_loader = DataLoader(dataset, batch_size=128, num_workers=cfg.NUM_WORKERS)
return data_loader
@DATA_LOADER_REGISTRY.register()
def interpret_audio_dataloader(cfg, mode):
data_cfg = cfg.DATA
if mode == "train":
dataset = InterpretAudio(
data_cfg.ROOT, # "../datasets",
data_cfg.TRAIN_AUD_DATA, # "raw_voice/trainingData",
data_cfg.TRAIN_LABEL_DATA, # "annotation/annotation_training.pkl",
)
elif mode == "valid":
dataset = InterpretAudio(
data_cfg.ROOT, # "../datasets",
data_cfg.VALID_AUD_DATA, # "raw_voice/validationData",
data_cfg.VALID_LABEL_DATA, # "annotation/annotation_validation.pkl",
)
elif mode == "test":
dataset = InterpretAudio(
data_cfg.ROOT, # "../datasets",
data_cfg.TEST_AUD_DATA, # "raw_voice/testData",
data_cfg.TEST_LABEL_DATA, # "annotation/annotation_validation.pkl",
)
else:
raise ValueError("mode must be one of 'train' or 'valid' or test' ")
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
import matplotlib.pyplot as plt
dataset = InterpretAudio(
"../../../datasets",
"raw_voice/trainingData",
"annotation/annotation_training.pkl",
)
data_loader = DataLoader(dataset, batch_size=1, num_workers=0)
for i, batch in enumerate(data_loader):
if i >= 20:
break
plt.plot(batch[0].squeeze())
plt.show()
print(batch[0].shape, batch[1].shape)
| 4,665 | 32.328571 | 97 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/audio_visual_data.py | import os
import torch
import glob
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from PIL import Image
import random
import pickle
import numpy as np
from dpcv.data.datasets.bi_modal_data import VideoData
from dpcv.data.transforms.transform import set_audio_visual_transform
from dpcv.data.transforms.build import build_transform_spatial
from .build import DATA_LOADER_REGISTRY
from random import shuffle
class AudioVisualData(VideoData):
def __init__(self, data_root, img_dir, audio_dir, label_file, transform=None, sample_size=100):
super().__init__(data_root, img_dir, label_file, audio_dir)
self.transform = transform
self.sample_size = sample_size
def __getitem__(self, idx):
label = self.get_ocean_label(idx)
img = self.get_image_data(idx)
wav = self.get_wave_data(idx)
if self.transform:
img = self.transform(img)
wav = torch.as_tensor(wav, dtype=img.dtype)
label = torch.as_tensor(label, dtype=img.dtype)
sample = {"image": img, "audio": wav, "label": label}
return sample
def get_image_data(self, idx):
img_dir_path = self.img_dir_ls[idx]
img_paths = glob.glob(img_dir_path + "/*.jpg")
sample_frames = np.linspace(0, len(img_paths), self.sample_size, endpoint=False, dtype=np.int16)
selected = random.choice(sample_frames)
# img_path = random.choice(img_paths)
try:
img = Image.open(img_paths[selected]).convert("RGB")
return img
except:
print(img_paths)
def get_wave_data(self, idx):
img_dir_path = self.img_dir_ls[idx]
# wav_path = img_dir_path.replace("image_data", "voice_data/voice_librosa") + ".wav.npy"
video_name = os.path.basename(img_dir_path)
wav_path = os.path.join(self.data_root, self.audio_dir, f"{video_name}.wav.npy")
wav_ft = np.load(wav_path)
try:
n = np.random.randint(0, len(wav_ft) - 50176)
except:
n = 0
wav_tmp = wav_ft[..., n: n + 50176]
if wav_tmp.shape[-1] < 50176:
wav_fill = np.zeros((1, 1, 50176))
wav_fill[..., :wav_tmp.shape[-1]] = wav_tmp
wav_tmp = wav_fill
return wav_tmp
class ALLSampleAudioVisualData(AudioVisualData):
def __getitem__(self, idx):
label = self.get_ocean_label(idx)
imgs = self.get_image_data(idx)
wav = self.get_wave_data(idx)
if self.transform:
imgs = [self.transform(img) for img in imgs]
wav = torch.as_tensor(wav, dtype=imgs[0].dtype)
label = torch.as_tensor(label, dtype=imgs[0].dtype)
sample = {"image": imgs, "audio": wav, "label": label}
return sample
def get_image_data(self, idx):
img_dir_path = self.img_dir_ls[idx]
img_path_ls = glob.glob(f"{img_dir_path}/*.jpg")
sample_frames = np.linspace(0, len(img_path_ls), self.sample_size, endpoint=False, dtype=np.int16)
img_path_ls_sampled = [img_path_ls[ind] for ind in sample_frames]
img_obj_ls = [Image.open(path) for path in img_path_ls_sampled]
return img_obj_ls
class ALLSampleAudioVisualData2(AudioVisualData):
def __getitem__(self, idx):
label = self.get_ocean_label(idx)
imgs = self.get_image_data(idx)
wav = self.get_wave_data(idx)
if self.transform:
imgs = [self.transform(img) for img in imgs]
wav = torch.as_tensor(wav, dtype=imgs[0].dtype)
label = torch.as_tensor(label, dtype=imgs[0].dtype)
sample = {"image": imgs, "audio": wav, "label": label}
return sample
def get_image_data(self, idx):
img_dir_path = self.img_dir_ls[idx]
img_path_ls = sorted(glob.glob(f"{img_dir_path}/*.jpg"))
# sample_frames = np.linspace(0, len(img_path_ls), self.sample_size, endpoint=False, dtype=np.int16)
# img_path_ls_sampled = [img_path_ls[ind] for ind in sample_frames]
img_obj_ls = [Image.open(path) for path in img_path_ls]
return img_obj_ls
def make_data_loader(cfg, mode):
trans = set_audio_visual_transform()
if mode == "train":
data_set = AudioVisualData(
cfg.DATA_ROOT, # "/home/ssd500/personality_data",
cfg.TRAIN_IMG_DATA, # "image_data/train_data",
cfg.TRAIN_AUD_DATA, # "voice_data/train_data",
cfg.TRAIN_LABEL_DATA, # "annotation/annotation_training.pkl",
trans
)
elif mode == "valid":
data_set = AudioVisualData(
cfg.DATA_ROOT, # "/home/ssd500/personality_data",
cfg.VALID_IMG_DATA, # "image_data/valid_data",
cfg.VALID_AUD_DATA, # "voice_data/valid_data",
cfg.VALID_LABEL_DATA, # annotation/annotation_validation.pkl",
trans
)
elif mode == "trainval":
data_set = AudioVisualData(
cfg.DATA_ROOT, # "../datasets",
cfg.TRAINVAL_IMG_DATA, # ["image_data/training_data_01", "image_data/validation_data_01"],
cfg.TRANIVAL_AUD_DATA, # ["voice_data/trainingData", "voice_data/validationData"],
cfg.TRAINVAL_LABEL_DATA, # ["annotation/annotation_training.pkl", "annotation/annotation_validation.pkl"],
trans,
)
elif mode == "test":
data_set = AudioVisualData(
cfg.DATA_ROOT, # "/home/ssd500/personality_data",
cfg.TEST_IMG_DATA, # "image_data/test_data",
cfg.TEST_AUD_DATA, # "voice_data/test_data",
cfg.TEST_LABEL_DATA, # "annotation/annotation_test.pkl",
trans
)
else:
raise ValueError("mode must in one of [train, valid, trianval, test]")
data_loader = DataLoader(
dataset=data_set,
batch_size=cfg.TRAIN_BATCH_SIZE,
shuffle=True,
num_workers=cfg.NUM_WORKS,
drop_last=True
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def bimodal_resnet_data_loader(cfg, mode):
assert (mode in ["train", "valid", "test", "full_test"]), " 'mode' only supports 'train' 'valid' 'test' "
transforms = build_transform_spatial(cfg)
if mode == "train":
dataset = AudioVisualData(
cfg.DATA.ROOT,
cfg.DATA.TRAIN_IMG_DATA,
cfg.DATA.TRAIN_AUD_DATA,
cfg.DATA.TRAIN_LABEL_DATA,
transforms
)
batch_size = cfg.DATA_LOADER.TRAIN_BATCH_SIZE
elif mode == "valid":
dataset = AudioVisualData(
cfg.DATA.ROOT,
cfg.DATA.VALID_IMG_DATA,
cfg.DATA.VALID_AUD_DATA,
cfg.DATA.VALID_LABEL_DATA,
transforms
)
batch_size = cfg.DATA_LOADER.VALID_BATCH_SIZE
elif mode == "full_test":
return ALLSampleAudioVisualData(
cfg.DATA.ROOT,
cfg.DATA.TEST_IMG_DATA,
cfg.DATA.TEST_AUD_DATA,
cfg.DATA.TEST_LABEL_DATA,
transforms
)
else:
dataset = AudioVisualData(
cfg.DATA.ROOT,
cfg.DATA.TEST_IMG_DATA,
cfg.DATA.TEST_AUD_DATA,
cfg.DATA.TEST_LABEL_DATA,
transforms
)
batch_size = cfg.DATA_LOADER.VALID_BATCH_SIZE
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=cfg.DATA_LOADER.SHUFFLE,
num_workers=cfg.DATA_LOADER.NUM_WORKERS, # cfg.NUM_WORKS
drop_last=cfg.DATA_LOADER.DROP_LAST,
)
return data_loader
if __name__ == "__main__":
# from tqdm import tqdm
# args = ("../../../datasets", "ImageData/trainingData", "VoiceData/trainingData_50176", "annotation_training.pkl")
trans = set_audio_visual_transform()
# data_set = AudioVisualData(*args, trans)
# # print(len(data_set))
# data = data_set[1]
# print(data["image"].shape, data["audio"].shape, data["label"].shape)
dataset = AudioVisualData(
"../../../datasets",
["image_data/training_data_01", "image_data/validation_data_01"],
["voice_data/trainingData", "voice_data/validationData"],
["annotation/annotation_training.pkl", "annotation/annotation_validation.pkl"],
trans,
)
print(len(dataset))
a = dataset[1]
print(a)
| 8,335 | 34.47234 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/video_segment_data.py | import torch
from torch.utils.data import DataLoader
import glob
import numpy as np
import os
from pathlib import Path
from dpcv.data.datasets.bi_modal_data import VideoData
from dpcv.data.transforms.transform import set_transform_op
from dpcv.data.transforms.build import build_transform_spatial
from .build import DATA_LOADER_REGISTRY
from dpcv.data.transforms.temporal_transforms import TemporalRandomCrop, TemporalDownsample, TemporalEvenCropDownsample
from dpcv.data.transforms.temporal_transforms import Compose as TemporalCompose
from dpcv.data.datasets.common import VideoLoader
from dpcv.data.datasets.ture_personality_data import Chalearn21FrameData
class VideoFrameSegmentData(VideoData):
""" Dataloader for 3d models, (3d_resnet, slow-fast, tpn, vat)
"""
def __init__(self, data_root, img_dir, label_file, video_loader, spa_trans=None, tem_trans=None):
super().__init__(data_root, img_dir, label_file)
self.loader = video_loader
self.spa_trans = spa_trans
self.tem_trans = tem_trans
def __getitem__(self, index):
img = self.get_image_data(index)
label = self.get_ocean_label(index)
return {"image": img, "label": torch.as_tensor(label)}
def get_image_data(self, index):
img_dir = self.img_dir_ls[index]
imgs = self.frame_sample(img_dir)
return imgs
def frame_sample(self, img_dir):
if "face" in img_dir:
frame_indices = self.list_face_frames(img_dir)
else:
frame_indices = self.list_frames(img_dir)
if self.tem_trans is not None:
frame_indices = self.tem_trans(frame_indices)
imgs = self._loading(img_dir, frame_indices)
return imgs
def _loading(self, path, frame_indices):
clip = self.loader(path, frame_indices)
if self.spa_trans is not None:
clip = [self.spa_trans(img) for img in clip]
clip = torch.stack(clip, 0).permute(1, 0, 2, 3)
return clip
@staticmethod
def list_frames(img_dir):
img_path_ls = glob.glob(f"{img_dir}/*.jpg")
img_path_ls = sorted(img_path_ls, key=lambda x: int(Path(x).stem[6:]))
frame_indices = [int(Path(path).stem[6:]) for path in img_path_ls]
return frame_indices
@staticmethod
def list_face_frames(img_dir):
img_path_ls = glob.glob(f"{img_dir}/*.jpg")
img_path_ls = sorted(img_path_ls, key=lambda x: int(Path(x).stem[5:]))
frame_indices = [int(Path(path).stem[5:]) for path in img_path_ls]
return frame_indices
class FullTestVideoSegmentData(VideoFrameSegmentData):
def __getitem__(self, index):
imgs = self.get_image_data(index)
label = self.get_ocean_label(index)
return {"all_images": imgs, "label": torch.as_tensor(label)}
def frame_sample(self, img_dir):
if "face" in img_dir:
frame_indices = self.list_face_frames(img_dir)
else:
frame_indices = self.list_frames(img_dir)
if self.tem_trans is not None:
frame_indices = self.tem_trans(frame_indices)
imgs = self.load_batch_images(img_dir, frame_indices)
return imgs
def load_batch_images(self, img_dir, frame_indices_ls):
image_segment_obj_ls = []
for frame_seg_idx in frame_indices_ls:
image_segment_obj = self.loader(img_dir, frame_seg_idx)
if self.spa_trans is not None:
image_segment_obj = [self.spa_trans(img) for img in image_segment_obj]
image_segment_obj = torch.stack(image_segment_obj, 0).permute(1, 0, 2, 3)
image_segment_obj_ls.append(image_segment_obj)
return image_segment_obj_ls
class TruePersonalityVideoFrameSegmentData(Chalearn21FrameData):
""" Dataloader for 3d models, (3d_resnet, slow-fast, tpn, vat)
"""
def __init__(self, data_root, data_split, task, data_type, video_loader, spa_trans=None, tem_trans=None):
super().__init__(data_root, data_split, task, data_type, even_downsample=2000, trans=None, segment=True)
self.loader = video_loader
self.spa_trans = spa_trans
self.tem_trans = tem_trans
def __getitem__(self, index):
img = self.get_image_data(index)
label = self.get_image_label(index)
return {"image": img, "label": torch.as_tensor(label, dtype=torch.float32)}
def __len__(self):
return len(self.img_dir_ls)
def get_image_data(self, index):
img_dir = self.img_dir_ls[index]
imgs = self.frame_sample(img_dir)
return imgs
def get_image_label(self, index):
img_dir = self.img_dir_ls[index]
session, part = img_dir.split("/")
if self.type == "face":
part = part.replace("_face", "")
part = part.replace(self.task_mark, "T")
participant_id = self.session_id[str(int(session))][part]
participant_trait = self.parts_personality[participant_id]
participant_trait = np.array([float(v) for v in participant_trait.values()])
return participant_trait
def frame_sample(self, img_dir):
img_dir = os.path.join(self.data_dir, img_dir)
if "face" in img_dir:
frame_indices = self.list_face_frames(img_dir)
else:
frame_indices = self.list_frames(img_dir)
if self.tem_trans is not None:
frame_indices = self.tem_trans(frame_indices)
imgs = self._loading(img_dir, frame_indices)
return imgs
def _loading(self, path, frame_indices):
clip = self.loader(path, frame_indices)
if self.spa_trans is not None:
clip = [self.spa_trans(img) for img in clip]
clip = torch.stack(clip, 0).permute(1, 0, 2, 3)
return clip
@staticmethod
def list_frames(img_dir):
img_path_ls = glob.glob(f"{img_dir}/*.jpg")
img_path_ls = sorted(img_path_ls, key=lambda x: int(Path(x).stem[6:]))
frame_indices = [int(Path(path).stem[6:]) for path in img_path_ls]
return frame_indices
@staticmethod
def list_face_frames(img_dir):
img_path_ls = glob.glob(f"{img_dir}/*.jpg")
img_path_ls = sorted(img_path_ls, key=lambda x: int(Path(x).stem[5:]))
frame_indices = [int(Path(path).stem[5:]) for path in img_path_ls]
return frame_indices
def make_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test"]), "'mode' should be 'train' , 'valid' or 'trainval'"
spatial_transform = set_transform_op()
temporal_transform = [TemporalRandomCrop(32)]
temporal_transform = TemporalCompose(temporal_transform)
video_loader = VideoLoader()
if mode == "train":
data_set = VideoFrameSegmentData(
cfg.DATA_ROOT,
cfg.TRAIN_IMG_DATA,
cfg.TRAIN_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "valid":
data_set = VideoFrameSegmentData(
cfg.DATA_ROOT,
cfg.VALID_IMG_DATA,
cfg.VALID_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "trainval":
data_set = VideoFrameSegmentData(
cfg.DATA_ROOT,
cfg.TRAINVAL_IMG_DATA,
cfg.TRAINVAL_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
else:
data_set = VideoFrameSegmentData(
cfg.DATA_ROOT,
cfg.TEST_IMG_DATA,
cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
data_loader = DataLoader(
dataset=data_set,
batch_size=cfg.TRAIN_BATCH_SIZE,
shuffle=cfg.SHUFFLE,
num_workers=cfg.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def spatial_temporal_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test' or 'full_test' "
spatial_transform = build_transform_spatial(cfg)
temporal_transform = [TemporalDownsample(length=100), TemporalRandomCrop(16)]
# temporal_transform = [TemporalRandomCrop(16)]
# temporal_transform = [TemporalDownsample(32)]
temporal_transform = TemporalCompose(temporal_transform)
data_cfg = cfg.DATA
if "face" in data_cfg.TRAIN_IMG_DATA:
video_loader = VideoLoader(image_name_formatter=lambda x: f"face_{x}.jpg")
else:
video_loader = VideoLoader(image_name_formatter=lambda x: f"frame_{x}.jpg")
if mode == "train":
data_set = VideoFrameSegmentData(
data_cfg.ROOT,
data_cfg.TRAIN_IMG_DATA,
data_cfg.TRAIN_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "valid":
data_set = VideoFrameSegmentData(
data_cfg.ROOT,
data_cfg.VALID_IMG_DATA,
data_cfg.VALID_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "trainval":
data_set = VideoFrameSegmentData(
data_cfg.ROOT,
data_cfg.TRAINVAL_IMG_DATA,
data_cfg.TRAINVAL_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "full_test":
temporal_transform = [TemporalDownsample(length=100), TemporalEvenCropDownsample(16, 6)]
temporal_transform = TemporalCompose(temporal_transform)
return FullTestVideoSegmentData(
data_cfg.ROOT,
data_cfg.TEST_IMG_DATA,
data_cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
else:
data_set = VideoFrameSegmentData(
data_cfg.ROOT,
data_cfg.TEST_IMG_DATA,
data_cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=data_set,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=loader_cfg.SHUFFLE,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def true_personality_spatial_temporal_data_loader(cfg, mode="train"):
spatial_transform = build_transform_spatial(cfg)
temporal_transform = [TemporalRandomCrop(32)]
# temporal_transform = [TemporalDownsample(length=2000), TemporalRandomCrop(16)]
temporal_transform = TemporalCompose(temporal_transform)
data_cfg = cfg.DATA
if data_cfg.TYPE == "face":
video_loader = VideoLoader(image_name_formatter=lambda x: f"face_{x}.jpg")
else:
video_loader = VideoLoader(image_name_formatter=lambda x: f"frame_{x}.jpg")
data_set = TruePersonalityVideoFrameSegmentData(
data_root="datasets/chalearn2021",
data_split=mode,
task=cfg.DATA.SESSION,
data_type=data_cfg.TYPE,
video_loader=video_loader,
spa_trans=spatial_transform,
tem_trans=temporal_transform,
)
shuffle = True if mode == "train" else False
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=data_set,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
import os
from dpcv.config.default_config_opt import cfg
os.chdir("../../../")
print(os.getcwd())
# interpret_data = InterpretData(
# data_root="datasets",
# img_dir="image_data/valid_data",
# label_file="annotation/annotation_validation.pkl",
# trans=set_transform_op(),
# )
# print(interpret_data[18])
# data_loader = make_data_loader(cfg, mode="valid")
# for i, item in enumerate(data_loader):
# print(item["image"].shape, item["label"].shape)
#
# if i > 5:
# break
train_dataset = true_personality_spatial_temporal_data_loader(cfg)
print(len(train_dataset))
| 12,338 | 33.563025 | 120 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/bi_modal_data.py | import glob
import torch
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
import pickle
import os
from random import shuffle
class VideoData(Dataset):
"""base class for bi-modal input data"""
def __init__(self, data_root, img_dir, label_file, audio_dir=None, parse_img_dir=True, parse_aud_dir=False):
self.data_root = data_root
self.img_dir = img_dir
self.audio_dir = audio_dir
self.annotation = self.parse_annotation(label_file)
if parse_img_dir:
self.img_dir_ls = self.parse_data_dir(img_dir) # every directory name indeed a video
if parse_aud_dir:
self.aud_file_ls = self.parse_data_dir(audio_dir)
def parse_data_dir(self, data_dir):
"""
Args:
data_dir:(Str or List[Str, ]) training audio data directory or train and valid data directory
Returns:
img_dir_path:(List[Str, ]) a list contains the path of image files
"""
if isinstance(data_dir, list):
data_dir_path = []
for dir_i in data_dir:
data_dir_ls = sorted(os.listdir(os.path.join(self.data_root, dir_i)))
data_dir_path.extend([os.path.join(self.data_root, dir_i, item) for item in data_dir_ls])
else:
data_dir_ls = sorted(os.listdir(os.path.join(self.data_root, data_dir)))
data_dir_path = [os.path.join(self.data_root, data_dir, item) for item in data_dir_ls]
return data_dir_path
def parse_annotation(self, label_file):
"""
args:(srt / list[str, str]) annotation file path
"""
if isinstance(label_file, list):
assert len(label_file) == 2, "only support join train and validation data"
anno_list = []
for label_i in label_file:
label_path = os.path.join(self.data_root, label_i)
with open(label_path, "rb") as f:
anno_list.append(pickle.load(f, encoding="latin1"))
for key in anno_list[0].keys():
anno_list[0][key].update(anno_list[1][key])
annotation = anno_list[0]
else:
label_path = os.path.join(self.data_root, label_file)
with open(label_path, "rb") as f:
annotation = pickle.load(f, encoding="latin1")
return annotation
def get_ocean_label(self, index):
video_path = self.img_dir_ls[index]
video_name = f"{os.path.basename(video_path)}.mp4"
score = [
self.annotation["openness"][video_name],
self.annotation["conscientiousness"][video_name],
self.annotation["extraversion"][video_name],
self.annotation["agreeableness"][video_name],
self.annotation["neuroticism"][video_name],
]
return score
def __getitem__(self, index):
raise NotImplementedError
def get_image_data(self, index):
return self.img_dir_ls[index]
def get_wave_data(self, index):
raise NotImplementedError
def __len__(self):
return len(self.img_dir_ls)
| 3,143 | 35.988235 | 112 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/multi_modal_pred.py | import torch
import glob
import os
from torch.utils.data import DataLoader
from pathlib import Path
from .build import DATA_LOADER_REGISTRY
class MultiModalData:
def __init__(self, data_root, split, mode, session, spectrum_channel=15):
assert session in ["none", "talk", "animal", "lego", "ghost"], \
"session should be in one of ['none', 'talk', 'animal', 'ghost'] or 'none'"
self.data_root = data_root
self.split = split
self.mode = mode
self.session = session
self.spectrum_channel = spectrum_channel
self.sample_ls = self.get_data_ls(split, mode)
def __getitem__(self, idx):
sample = self.sample_ls[idx]
sample = torch.load(sample)
if self.mode == "audio":
feature = sample["feature"]
if self.session in ["talk", "animal", "lego", "ghost"]:
temp = feature[:self.spectrum_channel]
sample["feature"] = temp
else:
sample_len = len(feature)
if sample_len < 15:
temp = torch.zeros(15, 128, dtype=feature.dtype)
temp[: sample_len, :] = feature
sample["feature"] = temp
# data, label = sample["data"], sample["label"]
return sample
def __len__(self):
return len(self.sample_ls)
def get_data_ls(self, split, mode):
if self.session in ["talk", "animal", "lego", "ghost"]:
data_dir = f"{self.session}/{split}_{mode}"
else:
data_dir = f"{split}_{mode}"
data_dir_path = Path(os.path.join(self.data_root, data_dir))
data_ls_path = sorted(data_dir_path.rglob("*.pkl"))
data_ls_sample = list(data_ls_path)
# for sample in data_ls_path:
# data_ls_sample.append(sample)
return data_ls_sample
@DATA_LOADER_REGISTRY.register()
def multi_modal_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid', 'trainval', 'test', 'full_test' "
shuffle = cfg.DATA_LOADER.SHUFFLE
if mode == "train":
data_set = MultiModalData(
data_root=cfg.DATA.ROOT,
split="train",
mode=cfg.DATA.TYPE,
session=cfg.DATA.SESSION,
spectrum_channel=cfg.MODEL.SPECTRUM_CHANNEL,
)
elif mode == "valid":
data_set = MultiModalData(
data_root=cfg.DATA.ROOT,
split="valid",
mode=cfg.DATA.TYPE,
session=cfg.DATA.SESSION,
spectrum_channel=cfg.MODEL.SPECTRUM_CHANNEL,
)
else:
shuffle = False
data_set = MultiModalData(
data_root=cfg.DATA.ROOT,
split="test",
mode=cfg.DATA.TYPE,
session=cfg.DATA.SESSION,
spectrum_channel=cfg.MODEL.SPECTRUM_CHANNEL,
)
data_loader = DataLoader(
dataset=data_set,
batch_size=cfg.DATA_LOADER.TRAIN_BATCH_SIZE,
shuffle=shuffle,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
# test setting
import os; os.chdir("/home/rongfan/05-personality_traits/DeepPersonality")
data_set = MultiModalData(
data_root="datasets/extracted_feature_impression",
mode="frame",
split="train",
)
| 3,424 | 30.422018 | 87 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/datasets/slow_fast_data.py | import torch
from torch.utils.data import DataLoader
from dpcv.data.transforms.transform import set_transform_op
from dpcv.data.datasets.video_segment_data import VideoFrameSegmentData
from dpcv.data.transforms.temporal_transforms import TemporalRandomCrop, TemporalDownsample, TemporalTwoEndsCrop
from dpcv.data.transforms.temporal_transforms import Compose as TemporalCompose
from dpcv.data.transforms.build import build_transform_spatial
from dpcv.data.datasets.build import DATA_LOADER_REGISTRY
from dpcv.data.datasets.common import VideoLoader
class SlowFastData(VideoFrameSegmentData):
def __getitem__(self, index):
img = self.get_image_data(index)
frame_list = self.pack_pathway_output(img)
label = self.get_ocean_label(index)
return {"image": frame_list, "label": torch.as_tensor(label)}
@staticmethod
def pack_pathway_output(frames):
fast_pathway = frames
slow_pathway = torch.index_select(
frames,
1,
torch.linspace(
0, frames.shape[1] - 1, frames.shape[1] // 4
).long(),
)
frame_list = [slow_pathway, fast_pathway]
return frame_list
class FullTestSlowFastData(SlowFastData):
def __getitem__(self, index):
img_tensor_ls = self.get_image_data(index)
frame_segs = []
for img in img_tensor_ls:
frame_list = self.pack_pathway_output(img)
frame_segs.append(frame_list)
label = self.get_ocean_label(index)
return {"all_images": frame_segs, "label": torch.as_tensor(label)}
def frame_sample(self, img_dir):
if "face" in img_dir:
frame_indices = self.list_face_frames(img_dir)
else:
frame_indices = self.list_frames(img_dir)
if self.tem_trans is not None:
frame_indices_ls = self.tem_trans(frame_indices)
frame_obj_ls = []
for frames in frame_indices_ls:
imgs = self._loading(img_dir, frames)
frame_obj_ls.append(imgs)
return frame_obj_ls
def make_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test"]), "'mode' should be 'train' , 'valid' or 'trainval'"
spatial_transform = set_transform_op()
temporal_transform = [TemporalRandomCrop(64)]
temporal_transform = TemporalCompose(temporal_transform)
video_loader = VideoLoader()
if mode == "train":
data_set = SlowFastData(
cfg.DATA_ROOT,
cfg.TRAIN_IMG_DATA,
cfg.TRAIN_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "valid":
data_set = SlowFastData(
cfg.DATA_ROOT,
cfg.VALID_IMG_DATA,
cfg.VALID_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "trainval":
data_set = SlowFastData(
cfg.DATA_ROOT,
cfg.TRAINVAL_IMG_DATA,
cfg.TRAINVAL_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
else:
data_set = SlowFastData(
cfg.DATA_ROOT,
cfg.TEST_IMG_DATA,
cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
data_loader = DataLoader(
dataset=data_set,
batch_size=cfg.TRAIN_BATCH_SIZE,
shuffle=cfg.SHUFFLE,
num_workers=cfg.NUM_WORKERS,
)
return data_loader
@DATA_LOADER_REGISTRY.register()
def slow_fast_data_loader(cfg, mode="train"):
assert (mode in ["train", "valid", "trainval", "test", "full_test"]), \
"'mode' should be 'train' , 'valid' 'trainval' 'test' and 'full_test' "
spatial_transform = build_transform_spatial(cfg)
temporal_transform = [TemporalDownsample(length=100), TemporalRandomCrop(64)]
# temporal_transform = [TemporalDownsample(length=64)]
temporal_transform = TemporalCompose(temporal_transform)
data_cfg = cfg.DATA
if "face" in data_cfg.TRAIN_IMG_DATA:
video_loader = VideoLoader(image_name_formatter=lambda x: f"face_{x}.jpg")
else:
video_loader = VideoLoader(image_name_formatter=lambda x: f"frame_{x}.jpg")
if mode == "train":
data_set = SlowFastData(
data_cfg.ROOT,
data_cfg.TRAIN_IMG_DATA,
data_cfg.TRAIN_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "valid":
data_set = SlowFastData(
data_cfg.ROOT,
data_cfg.VALID_IMG_DATA,
data_cfg.VALID_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "trainval":
data_set = SlowFastData(
data_cfg.ROOT,
data_cfg.TRAINVAL_IMG_DATA,
data_cfg.TRAINVAL_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
elif mode == "full_test":
temporal_transform = [TemporalDownsample(length=100), TemporalTwoEndsCrop(64)]
temporal_transform = TemporalCompose(temporal_transform)
return FullTestSlowFastData(
data_cfg.ROOT,
data_cfg.TEST_IMG_DATA,
data_cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
else:
data_set = SlowFastData(
data_cfg.ROOT,
data_cfg.TEST_IMG_DATA,
data_cfg.TEST_LABEL_DATA,
video_loader,
spatial_transform,
temporal_transform,
)
loader_cfg = cfg.DATA_LOADER
data_loader = DataLoader(
dataset=data_set,
batch_size=loader_cfg.TRAIN_BATCH_SIZE,
shuffle=loader_cfg.SHUFFLE,
num_workers=loader_cfg.NUM_WORKERS,
)
return data_loader
if __name__ == "__main__":
import os
from dpcv.config.interpret_dan_cfg import cfg
os.chdir("../../")
# interpret_data = InterpretData(
# data_root="datasets",
# img_dir="image_data/valid_data",
# label_file="annotation/annotation_validation.pkl",
# trans=set_transform_op(),
# )
# print(interpret_data[18])
data_loader = make_data_loader(cfg, mode="valid")
for i, item in enumerate(data_loader):
print(item["image"][0].shape, item["image"][1].shape, item["label"].shape) | 6,596 | 30.869565 | 112 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/utils/feature_extractor.py | import torch
import os
from pathlib import Path
from tqdm import tqdm
import numpy as np
from PIL import Image
import glob
from torchvggish import vggish, vggish_input
from dpcv.data.transforms.transform import set_transform_op
from dpcv.data.datasets.bi_modal_data import VideoData
from dpcv.modeling.networks.multi_modal_pred_net import resnet101_visual_feature_extractor
import pickle
class ExtractVisualFeatureData(VideoData):
def __init__(self, data_root, img_dir, label_file, save_to, length=100, suffix="frame_"):
super().__init__(data_root, img_dir, label_file)
self.len = length
self.trans = set_transform_op()
self.model = self.get_extract_model()
os.makedirs(save_to, exist_ok=True)
self.save_to = save_to
self.suffix = suffix
def get_sample_frames(self, idx):
img_dir = self.img_dir_ls[idx]
img_path_ls = glob.glob(f"{img_dir}/*.jpg")
img_path_ls = sorted(
img_path_ls,
key=lambda x: int(
str(Path(x).stem).replace(f"{self.suffix}", "")
)
)
sample_frames_id = np.linspace(
0, len(img_path_ls), self.len, endpoint=False, dtype=np.int16
).tolist()
img_path_ls_sampled = [img_path_ls[idx] for idx in sample_frames_id]
img_obj_ls = [Image.open(img_path) for img_path in img_path_ls_sampled]
video_name = os.path.basename(img_dir)
return video_name, img_obj_ls
@staticmethod
def get_extract_model():
model = resnet101_visual_feature_extractor()
return model.eval()
def extract_and_save_feat(self):
with torch.no_grad():
for i in tqdm(range(len(self.img_dir_ls))):
video_name, img_obj_ls = self.get_sample_frames(i)
img_obj_tensor = self.img_transform(img_obj_ls)
label = self.get_ocean_label(i)
feat = self.model(img_obj_tensor)
sample = {
"feature": feat,
"label": np.array(label, dtype="float32"),
}
saved_path = os.path.join(self.save_to, f"{video_name}.pkl")
torch.save(sample, saved_path)
def img_transform(self, img_obj_ls):
img_obj_ls = [self.trans(img) for img in img_obj_ls]
img_obj_ls = torch.stack(img_obj_ls, dim=0)
img_obj_ls = img_obj_ls.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return img_obj_ls
class ExtractAudioFeatureData:
def __init__(self, aud_dir, anno_path, save_to):
self.aud_files = glob.glob(f"{aud_dir}/*.wav")
self.model = self.get_extract_model()
self.annotation = self.get_annotation(anno_path)
os.makedirs(save_to, exist_ok=True)
self.save_to = save_to
@staticmethod
def get_extract_model():
# Initialise model and download weights
embedding_model = vggish()
embedding_model.eval()
return embedding_model
def extract_and_save_feat(self):
with torch.no_grad():
for file in tqdm(self.aud_files):
file_name = Path(file).stem
label = self.get_ocean_label(file_name)
example = vggish_input.wavfile_to_examples(file)
embeddings = self.model.forward(example)
saved_path = os.path.join(self.save_to, f"{file_name}.pkl")
sample = {"feature": embeddings, "label": np.array(label, dtype="float32")}
torch.save(sample, saved_path)
def get_ocean_label(self, file_name):
video_name = f"{file_name}.mp4"
score = [
self.annotation["openness"][video_name],
self.annotation["conscientiousness"][video_name],
self.annotation["extraversion"][video_name],
self.annotation["agreeableness"][video_name],
self.annotation["neuroticism"][video_name],
]
return score
@staticmethod
def get_annotation(label_path):
with open(label_path, "rb") as f:
annotation = pickle.load(f, encoding="latin1")
return annotation
if __name__ == "__main__":
os.chdir("/home/rongfan/05-personality_traits/DeepPersonality")
extractor = ExtractVisualFeatureData(
data_root="datasets",
img_dir="image_data/test_data_face",
label_file="annotation/annotation_test.pkl",
save_to="datasets/extracted_feature_impression/test_face",
suffix="face_",
)
extractor.extract_and_save_feat()
# extr = ExtractAudioFeatureData(
# aud_dir="datasets/voice_data/voice_raw/validationData",
# anno_path="datasets/annotation/annotation_validation.pkl",
# save_to="datasets/extracted_feature_impression/valid_aud"
# )
# extr.extract_and_save_feat()
| 4,866 | 36.438462 | 103 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/utils/feature_extractor_tp.py | import os
from tqdm import tqdm
import torch
import numpy as np
from torchvggish import vggish, vggish_input
from dpcv.modeling.networks.multi_modal_pred_net import resnet101_visual_feature_extractor
from dpcv.data.datasets.ture_personality_data import Chalearn21FrameData, Chalearn21AudioDataPath
class TPExtractVisualFeatureData:
def __init__(self, data_root, data_type, task, trans=None, save_to="", sample_num=2000):
assert data_type in ["frame", "face", "audio"], "data_type should be one of [frame, face or video]"
self.type = data_type
if data_type == "audio":
self.dataset = {
"train": Chalearn21AudioDataPath(data_root, "train", task),
"valid": Chalearn21AudioDataPath(data_root, "valid", task),
"test": Chalearn21AudioDataPath(data_root, "test", task),
}
else:
self.dataset = {
"train": Chalearn21FrameData(
data_root, "train", task, data_type, even_downsample=sample_num, trans=trans, segment=False),
"valid": Chalearn21FrameData(
data_root, "valid", task, data_type, even_downsample=sample_num, trans=trans, segment=False),
"test": Chalearn21FrameData(
data_root, "test", task, data_type, even_downsample=sample_num, trans=trans, segment=False),
}
self.model = self.get_extract_model()
# os.makedirs(save_to, exist_ok=True)
self.save_to = save_to
def get_extract_model(self):
if not self.type == "audio":
model = resnet101_visual_feature_extractor()
else:
model = vggish()
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model.eval()
def extract_and_save_feat(self):
with torch.no_grad():
# if not self.type == "audio":
for split in ["train", "valid", "test"]:
dataset = self.dataset[split]
save_dir = os.path.join(self.save_to, f"{split}_{self.type}")
os.makedirs(save_dir, exist_ok=True)
for i in tqdm(range(len(dataset))):
if not self.type == "audio":
data, label = dataset[i]["image"], dataset[i]["label"]
file_path = dataset.get_file_path(i)
file_name = os.path.basename(file_path).replace(".jpg", "")
dir_path = os.path.dirname(file_path)
dir_name = "_".join(dir_path.split("/")[-2:])
feat = self.model(
data[None].to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
)
sample = {
"feature": feat,
"label": np.array(label, dtype="float32"),
}
save_to_dir = os.path.join(save_dir, dir_name)
os.makedirs(save_to_dir, exist_ok=True)
saved_path = os.path.join(save_to_dir, f"{file_name}.pkl")
torch.save(sample, saved_path)
else:
file_path, label = dataset[i]["aud_path"], dataset[i]["aud_label"]
dir_name = "_".join(file_path.split("/")[-2:]).replace(".wav", "")
example = vggish_input.wavfile_to_examples(file_path)
feat = self.model.forward(
example.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
)
sample = {
"feature": feat,
"label": np.array(label, dtype="float32"),
}
saved_path = os.path.join(save_dir, f"{dir_name}.pkl")
torch.save(sample, saved_path)
if __name__ == "__main__":
from dpcv.data.transforms.transform import set_transform_op
os.chdir("/home/rongfan/05-personality_traits/DeepPersonality")
task = "talk"
type = "frame"
transform = set_transform_op()
extractor = TPExtractVisualFeatureData(
data_root="datasets/chalearn2021",
data_type=type,
task=task,
trans=transform,
save_to=f"datasets/extracted_feature_tp/{task}",
)
extractor.extract_and_save_feat()
| 4,514 | 44.15 | 113 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/transforms/transform.py | """
transform operation for different networks
for vgg face mean = (131.0912, 103.8827, 91.4953) no std
"""
from .build import TRANSFORM_REGISTRY
def set_transform_op():
import torchvision.transforms as transforms
norm_mean = [0.485, 0.456, 0.406] # statistics from imagenet dataset which contains about 120 million images
norm_std = [0.229, 0.224, 0.225]
transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
return transforms
@TRANSFORM_REGISTRY.register()
def standard_frame_transform():
import torchvision.transforms as transforms
transforms = transforms.Compose([
transforms.Resize(256),
transforms.RandomHorizontalFlip(0.5),
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
return transforms
@TRANSFORM_REGISTRY.register()
def face_image_transform():
import torchvision.transforms as transforms
norm_mean = [0.485, 0.456, 0.406] # statistics from imagenet dataset which contains about 120 million images
norm_std = [0.229, 0.224, 0.225]
transforms = transforms.Compose([
transforms.Resize(112),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
return transforms
@TRANSFORM_REGISTRY.register()
def face_image_x2_transform():
import torchvision.transforms as transforms
norm_mean = [0.485, 0.456, 0.406] # statistics from imagenet dataset which contains about 120 million images
norm_std = [0.229, 0.224, 0.225]
transforms = transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
return transforms
@TRANSFORM_REGISTRY.register()
def crnet_frame_face_transform():
import torchvision.transforms as transforms
norm_mean = [0.485, 0.456, 0.406] # statistics from imagenet dataset which contains about 120 million images
norm_std = [0.229, 0.224, 0.225]
frame_transforms = transforms.Compose([
transforms.Resize(256),
transforms.RandomHorizontalFlip(0.5),
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
face_transforms = transforms.Compose([
transforms.Resize(112),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
return {"frame": frame_transforms, "face": face_transforms}
@TRANSFORM_REGISTRY.register()
def set_tpn_transform_op():
import torchvision.transforms as transforms
norm_mean = [0.485, 0.456, 0.406] # statistics from imagenet dataset which contains about 120 million images
norm_std = [0.229, 0.224, 0.225]
transforms = transforms.Compose([
transforms.Resize(256),
transforms.RandomHorizontalFlip(0.5),
transforms.CenterCrop((256, 256)),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
return transforms
@TRANSFORM_REGISTRY.register()
def set_vat_transform_op():
import torchvision.transforms as transforms
norm_mean = [0.485, 0.456, 0.406] # statistics from imagenet dataset which contains about 120 million images
norm_std = [0.229, 0.224, 0.225]
transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop((224, 112)),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
return transforms
def set_crnet_transform():
import torchvision.transforms as transforms
# norm_mean = [0.485, 0.456, 0.406] # statistics from imagenet dataset which contains about 120 million images
# norm_std = [0.229, 0.224, 0.225]
transforms = transforms.Compose([
transforms.ToTensor(),
transforms.CenterCrop((112, 112))
# transforms.Normalize(norm_mean, norm_std)
])
return {"frame": transforms, "face": transforms}
def set_audio_visual_transform():
import torchvision.transforms as transforms
transforms = transforms.Compose([
# transforms.RandomVerticalFlip(0.5),
# transforms.RandomRotation(15),
# transforms.ColorJitter(),
transforms.RandomHorizontalFlip(0.5),
transforms.RandomCrop((224, 224)),
transforms.ToTensor(),
])
return transforms
def set_per_transform():
import torchvision.transforms as transforms
transforms = transforms.Compose([
transforms.CenterCrop((112, 112)),
transforms.ToTensor(),
])
return transforms
| 4,902 | 32.353741 | 115 | py |
DeepPersonality | DeepPersonality-main/dpcv/data/transforms/spatial_transforms.py | """
code modified form https://github.com/kenshohara/3D-ResNets-PyTorch.git
"""
import random
from torchvision.transforms import transforms
from torchvision.transforms import functional as F
from PIL import Image
class Compose(transforms.Compose):
def randomize_parameters(self):
for t in self.transforms:
t.randomize_parameters()
class ToTensor(transforms.ToTensor):
def randomize_parameters(self):
pass
class Normalize(transforms.Normalize):
def randomize_parameters(self):
pass
class ScaleValue(object):
def __init__(self, s):
self.s = s
def __call__(self, tensor):
tensor *= self.s
return tensor
def randomize_parameters(self):
pass
class Resize(transforms.Resize):
def randomize_parameters(self):
pass
class Scale(transforms.Scale):
def randomize_parameters(self):
pass
class CenterCrop(transforms.CenterCrop):
def randomize_parameters(self):
pass
class CornerCrop(object):
def __init__(self,
size,
crop_position=None,
crop_positions=['c', 'tl', 'tr', 'bl', 'br']):
self.size = size
self.crop_position = crop_position
self.crop_positions = crop_positions
if crop_position is None:
self.randomize = True
else:
self.randomize = False
self.randomize_parameters()
def __call__(self, img):
image_width = img.size[0]
image_height = img.size[1]
h, w = (self.size, self.size)
if self.crop_position == 'c':
i = int(round((image_height - h) / 2.))
j = int(round((image_width - w) / 2.))
elif self.crop_position == 'tl':
i = 0
j = 0
elif self.crop_position == 'tr':
i = 0
j = image_width - self.size
elif self.crop_position == 'bl':
i = image_height - self.size
j = 0
elif self.crop_position == 'br':
i = image_height - self.size
j = image_width - self.size
img = F.crop(img, i, j, h, w)
return img
def randomize_parameters(self):
if self.randomize:
self.crop_position = self.crop_positions[random.randint(
0,
len(self.crop_positions) - 1)]
def __repr__(self):
return self.__class__.__name__ + '(size={0}, crop_position={1}, randomize={2})'.format(
self.size, self.crop_position, self.randomize)
class RandomHorizontalFlip(transforms.RandomHorizontalFlip):
def __init__(self, p=0.5):
super().__init__(p)
self.randomize_parameters()
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
if self.random_p < self.p:
return F.hflip(img)
return img
def randomize_parameters(self):
self.random_p = random.random()
class MultiScaleCornerCrop(object):
def __init__(self,
size,
scales,
crop_positions=['c', 'tl', 'tr', 'bl', 'br'],
interpolation=Image.BILINEAR):
self.size = size
self.scales = scales
self.interpolation = interpolation
self.crop_positions = crop_positions
self.randomize_parameters()
def __call__(self, img):
short_side = min(img.size[0], img.size[1])
crop_size = int(short_side * self.scale)
self.corner_crop.size = crop_size
img = self.corner_crop(img)
return img.resize((self.size, self.size), self.interpolation)
def randomize_parameters(self):
self.scale = self.scales[random.randint(0, len(self.scales) - 1)]
crop_position = self.crop_positions[random.randint(
0,
len(self.crop_positions) - 1)]
self.corner_crop = CornerCrop(None, crop_position)
def __repr__(self):
return self.__class__.__name__ + '(size={0}, scales={1}, interpolation={2})'.format(
self.size, self.scales, self.interpolation)
class RandomResizedCrop(transforms.RandomResizedCrop):
def __init__(self,
size,
scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
interpolation=Image.BILINEAR):
super().__init__(size, scale, ratio, interpolation)
self.randomize_parameters()
def __call__(self, img):
if self.randomize:
self.random_crop = self.get_params(img, self.scale, self.ratio)
self.randomize = False
i, j, h, w = self.random_crop
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
def randomize_parameters(self):
self.randomize = True
class ColorJitter(transforms.ColorJitter):
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
super().__init__(brightness, contrast, saturation, hue)
self.randomize_parameters()
def __call__(self, img):
if self.randomize:
self.transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
self.randomize = False
return self.transform(img)
def randomize_parameters(self):
self.randomize = True
class PickFirstChannels(object):
def __init__(self, n):
self.n = n
def __call__(self, tensor):
return tensor[:self.n, :, :]
def randomize_parameters(self):
pass | 5,649 | 25.036866 | 95 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_second_stage/featrue_process.py | import os
import torch
from tqdm import tqdm
import glob
import numpy as np
from scipy import signal
import pickle
def gen_statistic_data(data_path, save_to, method=None):
data = torch.load(data_path)
statistic_data_ls = []
for sample in data["video_frames_pred"]:
statistic_data_ls.append(method(sample))
statistic_data = {"video_statistic": statistic_data_ls, "video_label": data["video_label"]}
torch.save(statistic_data, save_to)
print()
def assemble_pred_statistic(data):
assert isinstance(data, torch.Tensor), "the input data should be torch.Tensor"
max_0, _ = data.max(dim=0)
min_0, _ = data.min(dim=0)
mean_0 = data.mean(dim=0)
std_0 = data.std(dim=0)
data_first_order = data[1:, :] - data[:-1, :]
max_1, _ = data_first_order.max(dim=0)
min_1, _ = data_first_order.min(dim=0)
mean_1 = data_first_order.mean(dim=0)
std_1 = data_first_order.std(dim=0)
data_sec_order = data[2:, :] - data[:-2, :]
max_2, _ = data_sec_order.max(dim=0)
min_2, _ = data_sec_order.min(dim=0)
mean_2 = data_sec_order.mean(dim=0)
std_2 = data_sec_order.std(dim=0)
statistic_representation = torch.stack(
[max_0, min_0, mean_0, std_0, max_1, min_1, mean_1, std_1, max_2, min_2, mean_2, std_2],
dim=0,
)
return statistic_representation
def resample_pred_spectrum(data, new_rate=100, top_n=80):
pred_fft = np.fft.fft2(data)
pred_fft = pred_fft[:, :50]
resample_pred_fft = signal.resample(pred_fft, new_rate, axis=1)
amp = np.abs(resample_pred_fft)[:, :top_n]
pha = np.angle(resample_pred_fft)[:, :top_n]
return amp, pha
def select_pred_spectrum(data, top_n=80, select=True):
# for one trait there 100 prediction from 100 frames
# data: (1, 100)
pred_fft = np.fft.fft2(data)
if select:
length = int(len(pred_fft[0]) / 2)
if top_n > length:
top_n = length
amp = np.abs(pred_fft)[:, :top_n]
pha = np.angle(pred_fft)[:, :top_n]
else:
amp = np.abs(pred_fft)
pha = np.angle(pred_fft)
return amp.astype("float32"), pha.astype("float32")
def gen_spectrum_data(data_path, save_to, method):
data = torch.load(data_path)
spec_data_ls = []
for pred, label in tqdm(zip(data["video_frames_feat"], data["video_label"])):
pred, label = pred.cpu(), label.cpu()
amp_spectrum, pha_spectrum = [], []
for one_channel in pred.T:
amp, pha = method(one_channel.numpy()[None, :])
amp_spectrum.append(amp)
pha_spectrum.append(pha)
spectrum_data = {
"amp_spectrum": np.concatenate(amp_spectrum, axis=0),
"pha_spectrum": np.concatenate(pha_spectrum, axis=0),
"video_label": label.numpy()
}
spec_data_ls.append(spectrum_data)
if len(spec_data_ls) > 2000:
# separate data in case of out-of-memory issue
torch.save(spec_data_ls[:1500], save_to.replace(".pkl", "_1.pkl"))
print("saved [1/4]...")
torch.save(spec_data_ls[1500: 3000], save_to.replace(".pkl", "_2.pkl"))
print("saved [2/4]...")
torch.save(spec_data_ls[3000: 4500], save_to.replace(".pkl", "_3.pkl"))
print("saved [3/4]...")
torch.save(spec_data_ls[4500:], save_to.replace(".pkl", "_4.pkl"))
print("saved [4/4] \n DONE.")
else:
torch.save(spec_data_ls, save_to)
def gen_dataset(dir, func, method, pre_fix="pred_"):
files = [file for file in glob.glob(f"{dir}/*.pkl") if pre_fix in os.path.basename(file)]
for file in files:
# if "valid" in file or "test" in file:
# continue
if "spectrum" in str(func):
name = os.path.split(file)[-1].replace(pre_fix, "spectrum_").replace("output", "data")
elif "statistic" in str(func):
name = os.path.split(file)[-1].replace(pre_fix, "statistic_").replace("output", "data")
else:
raise ValueError(
"func used in this interface should be 'statistic' or 'spectrum' method"
)
save_to = os.path.join(os.path.dirname(file), name)
func(data_path=file, save_to=save_to, method=method)
if __name__ == "__main__":
os.chdir("..")
# dirs = glob.glob("datasets/stage_two/*_feature_output")
# for dir in dirs:
# gen_dataset(
# dir, # "datasets/stage_two/persemon_pred_output",
# func=gen_spectrum_data,
# method=select_pred_spectrum,
# pre_fix="feature_"
# )
gen_dataset(
"datasets/stage_two/deep_bimodal_reg_extract",
func=gen_spectrum_data,
method=select_pred_spectrum,
pre_fix="feature_"
) | 4,742 | 33.875 | 99 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_second_stage/train_net.py | import argparse
import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
from trainer import SpectrumTrainer
from trainer import MLPTrainer
from dpcv.modeling.networks.statistic_model import StatisticMLP
from dpcv.data.datasets.second_stage_dataset import SpectrumData, StatisticData
from mlflow import log_metric, log_param, log_params
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument("--lr", default=0.1, type=float, help="learning rate")
parser.add_argument("--bs", default=64, type=int, help="batch size in training")
parser.add_argument("--max_epoch", default=3000, type=int, help="max training epochs")
parser.add_argument("--lr_scale_rate", default=0.1, type=float, help="learning rate scale")
parser.add_argument("--milestones", default=[1000, 1500], type=list, help="where to scale learning rate")
parser.add_argument("--output_dir", default="result_static", type=str, help="where to save training output")
args = parser.parse_args()
return args
def main(test_only=None):
log_param("exp", "swin")
args = args_parse()
log_params({"lr": args.lr, "epochs": args.max_epoch, "milestones": args.milestones, "bs": args.bs})
dataset = {
"train": "datasets/stage_two/swin_frame_pred_output/statistic_train_data.pkl",
"valid": "datasets/stage_two/swin_frame_pred_output/statistic_valid_data.pkl",
"test": "datasets/stage_two/swin_frame_pred_output/statistic_test_data.pkl",
}
train_data_loader = DataLoader(
StatisticData(dataset["train"]), batch_size=args.bs, shuffle=True,
# num_workers=4,
# StatisticData(dataset["train"]), batch_size = args.bs, shuffle = True
)
valid_data_loader = DataLoader(
StatisticData(dataset["valid"]), batch_size=args.bs, shuffle=False,
# num_workers=4,
# StatisticData(dataset["valid"]), batch_size = args.bs, shuffle = False
)
test_data_loader = DataLoader(
StatisticData(dataset["test"]), batch_size=args.bs, shuffle=False,
# num_workers=4,
# StatisticData(dataset["test"]), batch_size = args.bs
)
model = StatisticMLP().cuda()
# model = StatisticConv1D().cuda()
# model = SpectrumConv1D().cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=args.lr_scale_rate, milestones=args.milestones)
# trainer = SpectrumTrainer(max_epo=args.max_epoch, output_dir=args.output_dir)
trainer = MLPTrainer(max_epo=args.max_epoch, output_dir=args.output_dir)
if not test_only:
for epo in range(args.max_epoch):
trainer.train(model, train_data_loader, optimizer, epo)
trainer.valid(model, valid_data_loader, epo)
scheduler.step()
acc = trainer.test(model, test_data_loader)
trainer.save_model(model, epo, acc)
else:
checkpoint = torch.load(test_only)
model.load_state_dict(checkpoint["model_state_dict"])
acc = trainer.test(model, test_data_loader)
if __name__ == "__main__":
os.chdir("..")
main("result_static/checkpoint_2999.pkl")
| 3,219 | 41.368421 | 112 | py |
DeepPersonality | DeepPersonality-main/dpcv/exps_second_stage/trainer.py | import torch
import os
import torch.nn as nn
from mlflow import log_metric, log_param, log_params
class MLPTrainer:
def __init__(self, max_epo, output_dir):
self.output_dir = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.max_epo = max_epo
self.best_acc = 0
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.loss = nn.SmoothL1Loss() # nn.L1Loss() # nn.MSELoss()
self.loss = nn.MSELoss()
def train(self, model, data_loader, optimizer, epo):
model.train()
for i, data in enumerate(data_loader):
data, label = self.data_fmt(data)
output = model(data)
# output = model(data["statistic"].to(self.device))
loss = self.loss(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 20 == 0:
acc_avg = (1 - torch.abs(output.cpu() - label.cpu())).mean().clip(min=0)
print(
"TRAINING: EPO[{:0>3}/{:0>3}] ITER[{:0>3}/{:0>3}] LOSS: {:.4f} ACC: {:.4f}".format(
epo, self.max_epo,
i, len(data_loader),
float(loss.item()), float(acc_avg)
)
)
def valid(self, model, data_loader, epo):
model.eval()
with torch.no_grad():
batch_acc_ls = []
for i, data in enumerate(data_loader):
data, label = self.data_fmt(data)
output = model(data)
# output = model(data["statistic"].to(self.device))
batch_acc_ls.append((1 - torch.abs(output.cpu() - label.cpu())).mean(dim=0))
epo_acc = torch.stack(batch_acc_ls, dim=0).mean().cpu().numpy()
log_metric("valid_acc", float(epo_acc * 100))
if epo_acc > self.best_acc:
self.best_acc = epo_acc
self.save_model(model, epo, self.best_acc)
print(
"VALID: EPO[{:0>3}/{:0>3}] ACC: {:.4f}".format(
epo, self.max_epo, epo_acc
)
)
def test(self, model, data_loader):
model.eval()
with torch.no_grad():
batch_acc_ls = []
for i, data in enumerate(data_loader):
data, label = self.data_fmt(data)
output = model(data)
batch_acc_ls.append((1 - torch.abs(output.cpu() - label.cpu())).mean(dim=0))
epo_acc = torch.stack(batch_acc_ls, dim=0).mean().numpy()
log_metric("test_acc", float(epo_acc * 100))
print("TEST: ACC: {:.4f}".format(epo_acc))
return epo_acc
def save_model(self, model, epoch, best_acc):
checkpoint = {
"model_state_dict": model.state_dict(),
"best_acc": best_acc
}
pkl_name = f"checkpoint_{epoch}.pkl"
path_checkpoint = os.path.join(self.output_dir, pkl_name)
torch.save(checkpoint, path_checkpoint)
def data_fmt(self, data):
return data["statistic"].to(self.device), data["label"].to(self.device)
class SpectrumTrainer(MLPTrainer):
def data_fmt(self, data):
return data["amp_spectrum"].to(self.device).type(torch.float32), data["label"].to(self.device)
| 3,387 | 35.042553 | 103 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/solver/lr_schedule.py | import torch.optim as optim
from .build import SOLVER_REGISTRY
@SOLVER_REGISTRY.register()
def multi_step_scale(cfg, optimizer):
return optim.lr_scheduler.MultiStepLR(optimizer, gamma=cfg.SOLVER.FACTOR, milestones=cfg.SOLVER.MILESTONE)
@SOLVER_REGISTRY.register()
def cosine_annealing(cfg, optimizer):
return optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20, eta_min=0.0001)
@SOLVER_REGISTRY.register()
def exponential(cfg, optimizer):
return optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
@SOLVER_REGISTRY.register()
def crnet_multi_step_scale(cfg, optimizer):
return optim.lr_scheduler.MultiStepLR(optimizer[1], gamma=cfg.SOLVER.FACTOR, milestones=cfg.SOLVER.MILESTONE)
| 714 | 28.791667 | 113 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/solver/optimize.py | import torch.optim as optim
from .build import SOLVER_REGISTRY
@SOLVER_REGISTRY.register()
def sgd(cfg, model):
return optim.SGD(
model.parameters(),
lr=cfg.SOLVER.LR_INIT,
momentum=cfg.SOLVER.MOMENTUM,
weight_decay=cfg.SOLVER.WEIGHT_DECAY
)
@SOLVER_REGISTRY.register()
def crnet_solver(cfg, model):
solver = cfg.SOLVER
optimizer_fir = optim.SGD(
model.parameters(), lr=solver.LR_INIT, momentum=solver.MOMENTUM, weight_decay=solver.WEIGHT_DECAY
)
optimizer_sec = optim.Adam(
model.parameters(), betas=(solver.BETA_1, solver.BETA_2), lr=solver.LR_INIT, weight_decay=solver.WEIGHT_DECAY
)
return [optimizer_fir, optimizer_sec]
@SOLVER_REGISTRY.register()
def adam(cfg, model):
return optim.Adam(model.parameters(), lr=cfg.SOLVER.LR_INIT,)
| 827 | 26.6 | 117 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/slow_fast.py | """
code modified form https://github.com/facebookresearch/SlowFast.git
"""
import torch
import torch.nn as nn
from dpcv.modeling.module import weight_init_helper as init_helper
from functools import partial
from dpcv.modeling.module import stem_helper, resnet_helper
from .build import NETWORK_REGISTRY
from easydict import EasyDict
# Number of blocks for different stages given the model depth.
_MODEL_STAGE_DEPTH = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
# Basis of temporal kernel sizes for each of the stage.
_TEMPORAL_KERNEL_BASIS = {
"2d": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"c2d": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"c2d_nopool": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"i3d": [
[[5]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3, 1]], # res3 temporal kernel.
[[3, 1]], # res4 temporal kernel.
[[1, 3]], # res5 temporal kernel.
],
"i3d_nopool": [
[[5]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3, 1]], # res3 temporal kernel.
[[3, 1]], # res4 temporal kernel.
[[1, 3]], # res5 temporal kernel.
],
"slow": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[3]], # res4 temporal kernel.
[[3]], # res5 temporal kernel.
],
"slowfast": [
[[1], [5]], # conv1 temporal kernel for slow and fast pathway.
[[1], [3]], # res2 temporal kernel for slow and fast pathway.
[[1], [3]], # res3 temporal kernel for slow and fast pathway.
[[3], [3]], # res4 temporal kernel for slow and fast pathway.
[[3], [3]], # res5 temporal kernel for slow and fast pathway.
],
"x3d": [
[[5]], # conv1 temporal kernels.
[[3]], # res2 temporal kernels.
[[3]], # res3 temporal kernels.
[[3]], # res4 temporal kernels.
[[3]], # res5 temporal kernels.
],
}
_POOL1 = {
"2d": [[1, 1, 1]],
"c2d": [[2, 1, 1]],
"c2d_nopool": [[1, 1, 1]],
"i3d": [[2, 1, 1]],
"i3d_nopool": [[1, 1, 1]],
"slow": [[1, 1, 1]],
"slowfast": [[1, 1, 1], [1, 1, 1]],
"x3d": [[1, 1, 1]],
}
slow_fast_cfg = EasyDict(
{
'NONLOCAL': {'GROUP': [[1, 1], [1, 1], [1, 1], [1, 1]],
'INSTANTIATION': 'dot_product',
'LOCATION': [[[], []], [[], []], [[], []], [[], []]],
'POOL': [[[1, 2, 2], [1, 2, 2]],
[[1, 2, 2], [1, 2, 2]],
[[1, 2, 2], [1, 2, 2]],
[[1, 2, 2], [1, 2, 2]]]},
'MODEL': {'ACT_CHECKPOINT': False,
'ARCH': 'slowfast',
'DROPCONNECT_RATE': 0.0,
'DROPOUT_RATE': 0.5,
'FC_INIT_STD': 0.01,
'HEAD_ACT': 'sigmoid',
'LOSS_FUNC': 'bce_logit',
'MODEL_NAME': 'SlowFast',
'MULTI_PATHWAY_ARCH': ['slowfast'],
'NUM_CLASSES': 5,
'SINGLE_PATHWAY_ARCH': ['2d', 'c2d', 'i3d', 'slow', 'x3d', 'mvit']},
'RESNET': {'DEPTH': 50,
'INPLACE_RELU': True,
'NUM_BLOCK_TEMP_KERNEL': [[3, 3], [4, 4], [6, 6], [3, 3]],
'NUM_GROUPS': 1,
'SPATIAL_DILATIONS': [[1, 1], [1, 1], [1, 1], [1, 1]],
'SPATIAL_STRIDES': [[1, 1], [2, 2], [2, 2], [2, 2]],
'STRIDE_1X1': False,
'TRANS_FUNC': 'bottleneck_transform',
'WIDTH_PER_GROUP': 64,
'ZERO_INIT_FINAL_BN': True},
'SLOWFAST': {'ALPHA': 4,
'BETA_INV': 8,
'FUSION_CONV_CHANNEL_RATIO': 2,
'FUSION_KERNEL_SZ': 7},
'DATA': {'DECODING_BACKEND': 'pyav',
'ENSEMBLE_METHOD': 'max',
'INPUT_CHANNEL_NUM': [3, 3],
'INV_UNIFORM_SAMPLE': True,
'MEAN': [0.45, 0.45, 0.45],
'MULTI_LABEL': True,
'NUM_FRAMES': 64,
'PATH_LABEL_SEPARATOR': ' ',
'PATH_PREFIX': '../datasets/Charades_v1_rgb',
'PATH_TO_DATA_DIR': '../datasets',
'PATH_TO_PRELOAD_IMDB': '',
'RANDOM_FLIP': True,
'REVERSE_INPUT_CHANNEL': True,
'SAMPLING_RATE': 2,
'STD': [0.225, 0.225, 0.225],
'TARGET_FPS': 30,
'TEST_CROP_SIZE': 256,
'TRAIN_CROP_SIZE': 224,
'TRAIN_JITTER_ASPECT_RELATIVE': [],
'TRAIN_JITTER_MOTION_SHIFT': False,
'TRAIN_JITTER_SCALES': [256, 340],
'TRAIN_JITTER_SCALES_RELATIVE': [],
'TRAIN_PCA_EIGVAL': [0.225, 0.224, 0.229],
'TRAIN_PCA_EIGVEC': [[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.814],
[-0.5836, -0.6948, 0.4203]],
'USE_OFFSET_SAMPLING': False},
'MULTIGRID': {'BN_BASE_SIZE': 8,
'DEFAULT_B': 0,
'DEFAULT_S': 0,
'DEFAULT_T': 0,
'EPOCH_FACTOR': 1.5,
'EVAL_FREQ': 3,
'LONG_CYCLE': False,
'LONG_CYCLE_FACTORS': [(0.25, 0.7071067811865476),
(0.5, 0.7071067811865476),
(0.5, 1),
(1, 1)],
'LONG_CYCLE_SAMPLING_RATE': 0,
'SHORT_CYCLE': False,
'SHORT_CYCLE_FACTORS': [0.5, 0.7071067811865476]},
}
)
class FuseFastToSlow(nn.Module):
"""
Fuses the information from the Fast pathway to the Slow pathway. Given the
tensors from Slow pathway and Fast pathway, fuse information from Fast to
Slow, then return the fused tensors from Slow and Fast pathway in order.
"""
def __init__(
self,
dim_in,
fusion_conv_channel_ratio,
fusion_kernel,
alpha,
eps=1e-5,
bn_mmt=0.1,
inplace_relu=True,
norm_module=nn.BatchNorm3d,
):
"""
Args:
dim_in (int): the channel dimension of the input.
fusion_conv_channel_ratio (int): channel ratio for the convolution
used to fuse from Fast pathway to Slow pathway.
fusion_kernel (int): kernel size of the convolution used to fuse
from Fast pathway to Slow pathway.
alpha (int): the frame rate ratio between the Fast and Slow pathway.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(FuseFastToSlow, self).__init__()
self.conv_f2s = nn.Conv3d(
dim_in,
dim_in * fusion_conv_channel_ratio,
kernel_size=[fusion_kernel, 1, 1],
stride=[alpha, 1, 1],
padding=[fusion_kernel // 2, 0, 0],
bias=False,
)
self.bn = norm_module(
num_features=dim_in * fusion_conv_channel_ratio,
eps=eps,
momentum=bn_mmt,
)
self.relu = nn.ReLU(inplace_relu)
def forward(self, x):
x_s = x[0]
x_f = x[1]
fuse = self.conv_f2s(x_f)
fuse = self.bn(fuse)
fuse = self.relu(fuse)
x_s_fuse = torch.cat([x_s, fuse], 1)
return [x_s_fuse, x_f]
class SubBatchNorm3d(nn.Module):
"""
The standard BN layer computes stats across all examples in a GPU. In some
cases it is desirable to compute stats across only a subset of examples
(e.g., in multigrid training https://arxiv.org/abs/1912.00998).
SubBatchNorm3d splits the batch dimension into N splits, and run BN on
each of them separately (so that the stats are computed on each subset of
examples (1/N of batch) independently. During evaluation, it aggregates
the stats from all splits into one BN.
"""
def __init__(self, num_splits, **args):
"""
Args:
num_splits (int): number of splits.
args (list): other arguments.
"""
super(SubBatchNorm3d, self).__init__()
self.num_splits = num_splits
num_features = args["num_features"]
# Keep only one set of weight and bias.
if args.get("affine", True):
self.affine = True
args["affine"] = False
self.weight = torch.nn.Parameter(torch.ones(num_features))
self.bias = torch.nn.Parameter(torch.zeros(num_features))
else:
self.affine = False
self.bn = nn.BatchNorm3d(**args)
args["num_features"] = num_features * num_splits
self.split_bn = nn.BatchNorm3d(**args)
def _get_aggregated_mean_std(self, means, stds, n):
"""
Calculate the aggregated mean and stds.
Args:
means (tensor): mean values.
stds (tensor): standard deviations.
n (int): number of sets of means and stds.
"""
mean = means.view(n, -1).sum(0) / n
std = (
stds.view(n, -1).sum(0) / n
+ ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n
)
return mean.detach(), std.detach()
def aggregate_stats(self):
"""
Synchronize running_mean, and running_var. Call this before eval.
"""
if self.split_bn.track_running_stats:
(
self.bn.running_mean.data,
self.bn.running_var.data,
) = self._get_aggregated_mean_std(
self.split_bn.running_mean,
self.split_bn.running_var,
self.num_splits,
)
def forward(self, x):
if self.training:
n, c, t, h, w = x.shape
x = x.view(n // self.num_splits, c * self.num_splits, t, h, w)
x = self.split_bn(x)
x = x.view(n, c, t, h, w)
else:
x = self.bn(x)
if self.affine:
x = x * self.weight.view((-1, 1, 1, 1))
x = x + self.bias.view((-1, 1, 1, 1))
return x
class ResNetBasicHead(nn.Module):
"""
ResNe(X)t 3D head.
This layer performs a fully-connected projection during training, when the
input size is 1x1x1. It performs a convolutional projection during testing
when the input size is larger than 1x1x1. If the inputs are from multiple
different pathways, the inputs will be concatenated after pooling.
"""
def __init__(
self,
dim_in,
num_classes,
pool_size,
dropout_rate=0.0,
act_func="softmax",
):
"""
The `__init__` method of any subclass should also contain these
arguments.
ResNetBasicHead takes p pathways as input where p in [1, infty].
Args:
dim_in (list): the list of channel dimensions of the p inputs to the
ResNetHead.
num_classes (int): the channel dimensions of the p outputs to the
ResNetHead.
pool_size (list): the list of kernel sizes of p spatial temporal
poolings, temporal pool kernel size, spatial pool kernel size,
spatial pool kernel size in order.
dropout_rate (float): dropout rate. If equal to 0.0, perform no
dropout.
act_func (string): activation function to use. 'softmax': applies
softmax on the output. 'sigmoid': applies sigmoid on the output.
"""
super(ResNetBasicHead, self).__init__()
assert (
len({len(pool_size), len(dim_in)}) == 1
), "pathway dimensions are not consistent."
self.num_pathways = len(pool_size)
for pathway in range(self.num_pathways):
if pool_size[pathway] is None:
avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
else:
avg_pool = nn.AvgPool3d(pool_size[pathway], stride=1)
self.add_module("pathway{}_avgpool".format(pathway), avg_pool)
if dropout_rate > 0.0:
self.dropout = nn.Dropout(dropout_rate)
# Perform FC in a fully convolutional manner. The FC layer will be
# initialized with a different std comparing to convolutional layers.
self.projection = nn.Linear(sum(dim_in), num_classes, bias=True)
# Softmax for evaluation and testing.
if act_func == "softmax":
self.act = nn.Softmax(dim=4)
elif act_func == "sigmoid":
self.act = nn.Sigmoid()
else:
raise NotImplementedError(
"{} is not supported as an activation"
"function.".format(act_func)
)
def forward(self, inputs):
assert (
len(inputs) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
pool_out = []
for pathway in range(self.num_pathways):
m = getattr(self, "pathway{}_avgpool".format(pathway))
pool_out.append(m(inputs[pathway]))
x = torch.cat(pool_out, 1)
# (N, C, T, H, W) -> (N, T, H, W, C).
x = x.permute((0, 2, 3, 4, 1))
# Perform dropout.
if hasattr(self, "dropout"):
x = self.dropout(x)
x = self.projection(x)
# Performs fully convlutional inference.
if not self.training:
x = self.act(x)
x = x.mean([1, 2, 3])
x = x.view(x.shape[0], -1)
return x
def get_norm(cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the comments of
the config file.
Returns:
nn.Module: the normalization layer.
"""
if cfg.BN.NORM_TYPE == "batchnorm":
return nn.BatchNorm3d
elif cfg.BN.NORM_TYPE == "sub_batchnorm":
return partial(SubBatchNorm3d, num_splits=cfg.BN.NUM_SPLITS)
class SlowFast(nn.Module):
"""
SlowFast model builder for SlowFast network.
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(SlowFast, self).__init__()
self.norm_module = nn.BatchNorm3d
self.enable_detection = False
self.num_pathways = 2
self._construct_network(cfg)
init_helper.init_weights(
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
)
def _construct_network(self, cfg):
"""
Builds a SlowFast model. The first pathway is the Slow pathway and the
second pathway is the Fast pathway.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = [[1, 1, 1], [1, 1, 1]]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH] # (3, 4, 6, 3)
num_groups = cfg.RESNET.NUM_GROUPS # 1
width_per_group = cfg.RESNET.WIDTH_PER_GROUP # 64
dim_inner = num_groups * width_per_group
out_dim_ratio = (
cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO
)
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[width_per_group, width_per_group // cfg.SLOWFAST.BETA_INV],
kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]],
stride=[[1, 2, 2]] * 2,
padding=[
[temp_kernel[0][0][0] // 2, 3, 3],
[temp_kernel[0][1][0] // 2, 3, 3],
],
norm_module=self.norm_module,
)
self.s1_fuse = FuseFastToSlow(
width_per_group // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s2 = resnet_helper.ResStage(
dim_in=[
width_per_group + width_per_group // out_dim_ratio,
width_per_group // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 4,
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner, dim_inner // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[1],
stride=cfg.RESNET.SPATIAL_STRIDES[0],
num_blocks=[d2] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
nonlocal_group=cfg.NONLOCAL.GROUP[0],
nonlocal_pool=cfg.NONLOCAL.POOL[0],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[0],
norm_module=self.norm_module,
)
self.s2_fuse = FuseFastToSlow(
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = resnet_helper.ResStage(
dim_in=[
width_per_group * 4 + width_per_group * 4 // out_dim_ratio,
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 8,
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 2, dim_inner * 2 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[2],
stride=cfg.RESNET.SPATIAL_STRIDES[1],
num_blocks=[d3] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],
nonlocal_inds=cfg.NONLOCAL.LOCATION[1],
nonlocal_group=cfg.NONLOCAL.GROUP[1],
nonlocal_pool=cfg.NONLOCAL.POOL[1],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[1],
norm_module=self.norm_module,
)
self.s3_fuse = FuseFastToSlow(
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s4 = resnet_helper.ResStage(
dim_in=[
width_per_group * 8 + width_per_group * 8 // out_dim_ratio,
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 16,
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 4, dim_inner * 4 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[3],
stride=cfg.RESNET.SPATIAL_STRIDES[2],
num_blocks=[d4] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],
nonlocal_inds=cfg.NONLOCAL.LOCATION[2],
nonlocal_group=cfg.NONLOCAL.GROUP[2],
nonlocal_pool=cfg.NONLOCAL.POOL[2],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[2],
norm_module=self.norm_module,
)
self.s4_fuse = FuseFastToSlow(
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s5 = resnet_helper.ResStage(
dim_in=[
width_per_group * 16 + width_per_group * 16 // out_dim_ratio,
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 8, dim_inner * 8 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[4],
stride=cfg.RESNET.SPATIAL_STRIDES[3],
num_blocks=[d5] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],
nonlocal_inds=cfg.NONLOCAL.LOCATION[3],
nonlocal_group=cfg.NONLOCAL.GROUP[3],
nonlocal_pool=cfg.NONLOCAL.POOL[3],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[3],
norm_module=self.norm_module,
)
self.head = ResNetBasicHead(
dim_in=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
num_classes=cfg.MODEL.NUM_CLASSES,
pool_size=[None, None]
if cfg.MULTIGRID.SHORT_CYCLE
else [
[
cfg.DATA.NUM_FRAMES
// cfg.SLOWFAST.ALPHA
// pool_size[0][0],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2],
],
[
cfg.DATA.NUM_FRAMES // pool_size[1][0],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][1],
cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][2],
],
], # None for AdaptiveAvgPool3d((1, 1, 1))
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
)
def forward(self, x):
x = self.s1(x)
x = self.s1_fuse(x)
x = self.s2(x)
x = self.s2_fuse(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s3_fuse(x)
x = self.s4(x)
x = self.s4_fuse(x)
x = self.s5(x)
x = self.head(x)
return x
def get_slow_fast_model():
slow_fast_model = SlowFast(slow_fast_cfg)
return slow_fast_model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
@NETWORK_REGISTRY.register()
def slow_fast_model(cfg=None):
return SlowFast(slow_fast_cfg).to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
if __name__ == "__main__":
model = SlowFast(slow_fast_cfg)
# print(slow_fast_model)
xin_1 = torch.randn(2, 3, 16, 224, 224)
xin_2 = torch.randn(2, 3, 64, 224, 224)
x = [xin_1, xin_2]
y = model(x)
print(y.shape)
| 24,983 | 36.797277 | 106 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/sphereface_net.py | """
code modified from https://github.com/clcarwin/sphereface_pytorch.git
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn import Parameter
import math
from dpcv.modeling.module.weight_init_helper import initialize_weights
from .build import NETWORK_REGISTRY
class SphereFEM(nn.Module):
def __init__(self, pre_trained=None):
super(SphereFEM, self).__init__()
self.pre_trained = pre_trained
# input = B*3*112*96
self.conv1_1 = nn.Conv2d(3, 64, 3, 2, 1) # =>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu1_3 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64, 128, 3, 2, 1) # =>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128, 128, 3, 1, 1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128, 128, 3, 1, 1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128, 128, 3, 1, 1) # =>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128, 128, 3, 1, 1)
self.relu2_5 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128, 256, 3, 2, 1) # =>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_9 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256, 512, 3, 2, 1) # =>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512, 512, 3, 1, 1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512, 512, 3, 1, 1)
self.relu4_3 = nn.PReLU(512)
self.fc5 = nn.Linear(512 * 7 * 7, 512)
if pre_trained:
self.load_pre_trained_model()
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
x = x.view(x.size(0), -1)
x = self.fc5(x)
return x
def load_pre_trained_model(self):
pass
class PersEmoN(nn.Module):
def __init__(self, feature_extractor, init_weights=True, return_feature=False):
super(PersEmoN, self).__init__()
self.return_feature = return_feature
self.efm = feature_extractor
self.pam = nn.Linear(512, 5)
self.eam = nn.Linear(512, 2)
self.ram = nn.Sequential(
nn.Linear(2, 128),
nn.ReLU(),
nn.Linear(128, 5),
)
self.data_classifier = nn.Linear(512, 2)
if init_weights:
initialize_weights(self)
def forward(self, x_p, x_e):
x_p = self.efm(x_p)
p_coherence = F.softmax(self.data_classifier(x_p), 1)
p_score = self.pam(x_p)
p_score = torch.sigmoid(p_score)
x_e = self.efm(x_e)
e_coherence = F.softmax(self.data_classifier(x_e), 1)
e_score = self.eam(x_e)
x_ep = self.ram(e_score)
e_score = torch.tanh(e_score)
if self.return_feature:
return p_score, p_coherence, e_score, e_coherence, x_ep, x_p
return p_score, p_coherence, e_score, e_coherence, x_ep
def get_pers_emo_model():
multi_modal_model = PersEmoN(SphereFEM())
multi_modal_model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return multi_modal_model
@NETWORK_REGISTRY.register()
def pers_emo_model(cfg=None):
multi_modal_model = PersEmoN(SphereFEM(), return_feature=cfg.MODEL.RETURN_FEATURE)
multi_modal_model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return multi_modal_model
if __name__ == "__main__":
fem = PersEmoN(SphereFEM())
inputs_p = torch.randn((100, 3, 112, 112))
inputs_e = torch.randn((100, 3, 112, 112))
out = fem(inputs_p, inputs_e)
for item in out:
print(item.shape)
# print(out[1])
| 5,292 | 33.594771 | 93 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/statistic_model.py | import torch
import torch.nn as nn
from .build import NETWORK_REGISTRY
class StatisticMLP(nn.Module):
def __init__(self, hidden_units=256):
super(StatisticMLP, self).__init__()
# log_param("hidden_units", hidden_units)
self.input_layer = nn.Linear(12 * 5, hidden_units)
self.relu_1 = nn.ReLU()
self.hidden_layer_1 = nn.Linear(hidden_units, hidden_units)
self.relu_2 = nn.ReLU()
self.hidden_layer_2 = nn.Linear(hidden_units, int(hidden_units / 4))
self.relu_3 = nn.ReLU()
# self.dropout = nn.Dropout(0.5)
self.output_layer = nn.Linear(int(hidden_units / 4), 5)
# log_param("model", "MLP")
def forward(self, x):
x = x.reshape(-1, 60)
x = self.input_layer(x)
x = self.relu_1(x)
x = self.hidden_layer_1(x)
x = self.relu_2(x)
x = self.hidden_layer_2(x)
x = self.relu_3(x)
# x = self.dropout(x)
x = self.output_layer(x)
return x
class StatisticConv1D(nn.Module):
def __init__(self):
super(StatisticConv1D, self).__init__()
self.conv1 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=(1, 12))
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv1d(in_channels=64, out_channels=256, kernel_size=(1, 1))
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv1d(in_channels=256, out_channels=64, kernel_size=(1, 1))
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv1d(in_channels=64, out_channels=1, kernel_size=(1, 1))
# log_param("model", "Conv1d:conv4")
def forward(self, x):
x = x[..., None]
x = x.permute(0, 3, 2, 1) # (bs, 1, 5, 12)
x = self.conv1(x) # (bs, 64, 5, 1)
x = self.relu1(x)
x = self.conv2(x) # (bs, 128, 5, 1)
x = self.relu2(x)
x = self.conv3(x) # (bs, 1, 5, 1)
x = self.relu3(x)
x = self.conv4(x)
x = x.squeeze(1).squeeze() # (bs, 5)
return x
@NETWORK_REGISTRY.register()
def statistic_mlp(cfg):
model = StatisticMLP()
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
| 2,188 | 33.203125 | 88 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/dan.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .build import NETWORK_REGISTRY
from dpcv.modeling.module.weight_init_helper import initialize_weights
# import torch.utils.model_zoo as model_zoo
backbone = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class DAN(nn.Module):
def __init__(self, features, num_classes=5, init_weights=True, return_feature=False):
super(DAN, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.maxpool = nn.AdaptiveMaxPool2d((7, 7))
self.linear_1 = nn.Linear(50176, 1024)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.dropout = nn.Dropout(0.5)
self.linear_2 = nn.Linear(1024, num_classes)
self.return_feature = return_feature
if init_weights:
# self._initialize_weights()
initialize_weights(self)
def forward(self, x):
x = self.features(x)
x1 = self.avgpool(x)
x1 = F.normalize(x1, p=2, dim=1)
x2 = self.maxpool(x)
x2 = F.normalize(x2, p=2, dim=1)
x = torch.cat([x1, x2], dim=1)
x = x.view(x.size(0), -1)
x = self.linear_1(x)
feat = self.leaky_relu(x)
x = self.dropout(feat) # add dropout to enhance generalization ability
x = self.linear_2(x) # add another linear lay and activation function to enhance nonlinear mapping
x = torch.sigmoid(x)
if self.return_feature:
return x, feat
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class AudLinearRegressor(nn.Module):
def __init__(self, input_units=79534):
super(AudLinearRegressor, self).__init__()
self.linear = nn.Linear(input_units, 5)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.linear(x)
x = torch.sigmoid(x)
x = x.squeeze(1).squeeze(1)
return x
@NETWORK_REGISTRY.register()
def get_aud_linear_regressor(cfg=None):
model = AudLinearRegressor()
model.to(device=(torch.device("cuda" if torch.cuda.is_available() else "cpu")))
return model
@NETWORK_REGISTRY.register()
def get_true_personality_aud_linear_regressor(cfg=None):
model = AudLinearRegressor(input_units=244832)
model.to(device=(torch.device("cuda" if torch.cuda.is_available() else "cpu")))
return model
@NETWORK_REGISTRY.register()
def dan_model(cfg):
kwargs = {'init_weights': True}
kwargs["return_feature"] = cfg.MODEL.RETURN_FEATURE
if cfg.MODEL.PRETRAIN:
kwargs["init_weights"] = False
dan = DAN(make_layers(backbone['VGG16'], batch_norm=True), **kwargs)
if cfg.MODEL.PRETRAIN:
print("load pretained model weights")
pretrained_dict = torch.load("../pre_trained_weights/vgg16_bn-6c64b313.pth")
model_dict = dan.state_dict()
# 1. filter out unnecessary keys -------------------------------------------------------------------------------
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict --------------------------------------------------------------
model_dict.update(pretrained_dict)
dan.load_state_dict(model_dict)
# model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
dan.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return dan
def get_model(pretrained=False, **kwargs):
"""DAN 16-layer model (configuration "VGG16")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
dan = DAN(make_layers(backbone['VGG16'], batch_norm=True), **kwargs)
if pretrained:
pretrained_dict = torch.load("../pre_trained_weights/vgg16_bn-6c64b313.pth")
model_dict = dan.state_dict()
# 1. filter out unnecessary keys -------------------------------------------------------------------------------
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict --------------------------------------------------------------
model_dict.update(pretrained_dict)
dan.load_state_dict(model_dict)
# model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
dan.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return dan
if __name__ == "__main__":
model = get_aud_linear_regressor()
x = torch.randn(2, 79534).cuda()
y = model(x)
print(y.shape)
# model = get_dan_model(pretrained=True)
# x = torch.randn(2, 3, 244, 244).cuda()
# y = model(x)
# print(y, y.shape)
"""
questions:
1) concatenate or add ,if add more weights saved
2) hidden layers 50176 --> 1024 --> 5, mapping efficient
3) L2 norm vs batch norm
4) dropout or not
5) freeze batch norm or not when training
6) pre-trained models from imagenet or face-net
"""
| 6,604 | 35.694444 | 120 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/swin_transformer.py | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
#
# code modified from https://github.com/microsoft/Swin-Transformer.git
# Note that: added sigmoid function for model output
# since personality prediction value range from (0, 1)
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from easydict import EasyDict as CN
from dpcv.modeling.module.weight_init_helper import initialize_weights
from .build import NETWORK_REGISTRY
def swin_config():
_C = CN()
_C.DATA = CN()
# Input image size
_C.DATA.IMG_SIZE = 224
_C.MODEL = CN()
# Model type
_C.MODEL.TYPE = 'swin'
# Model name
_C.MODEL.NAME = 'swin_small_patch4_window7_224'
# Checkpoint to resume, could be overwritten by command line argument
_C.MODEL.RESUME = ''
# Number of classes, overwritten in data preparation
_C.MODEL.NUM_CLASSES = 5
# Dropout rate
_C.MODEL.DROP_RATE = 0.0
# Drop path rate
_C.MODEL.DROP_PATH_RATE = 0.3
# Label Smoothing
_C.MODEL.LABEL_SMOOTHING = 0.1
# Swin Transformer parameters
_C.MODEL.SWIN = CN()
_C.MODEL.SWIN.PATCH_SIZE = 4
_C.MODEL.SWIN.IN_CHANS = 3
_C.MODEL.SWIN.EMBED_DIM = 96
_C.MODEL.SWIN.DEPTHS = [2, 2, 18, 2]
_C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
_C.MODEL.SWIN.WINDOW_SIZE = 7
_C.MODEL.SWIN.MLP_RATIO = 4.
_C.MODEL.SWIN.QKV_BIAS = True
_C.MODEL.SWIN.QK_SCALE = None
_C.MODEL.SWIN.APE = False
_C.MODEL.SWIN.PATCH_NORM = True
_C.TRAIN = CN()
_C.TRAIN.USE_CHECKPOINT = False
return _C
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, init_weights=True, normalize_output=True,
return_feat=False,
**kwargs):
super().__init__()
self.normalize_output = normalize_output
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
self.return_feature = return_feat
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# if init_weights:
# initialize_weights(self) # in line with other models evaluated on personality
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.01)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
feat = self.forward_features(x)
x = self.head(feat)
if self.normalize_output:
x = torch.sigmoid(x)
if self.return_feature:
return x, feat
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
def get_swin_transformer_model(cfg=None):
config = swin_config()
model = SwinTransformer(
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT
)
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
@NETWORK_REGISTRY.register()
def swin_transformer_model(cfg=None):
config = swin_config()
model = SwinTransformer(
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
return_feat=cfg.MODEL.RETURN_FEATURE,
)
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
@NETWORK_REGISTRY.register()
def swin_transformer_true_personality(cfg=None):
config = swin_config()
model = SwinTransformer(
img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
normalize_output=False,
)
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
if __name__ == "__main__":
model = get_swin_transformer_model()
xin = torch.randn(8, 3, 224, 224).cuda()
y = model(xin)
print(y)
| 29,160 | 39.277624 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/se_net.py | import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from dpcv.modeling.module.resnet_tv import ResNet
from dpcv.modeling.module.se_resnet import SEBottleneck
from .build import NETWORK_REGISTRY
@NETWORK_REGISTRY.register()
def se_resnet50(cfg, num_classes=1000, pretrained=False):
"""Constructs a ResNet-50 model.
Args:
num_classes (int): number of classification
pretrained (bool): If True, returns a model pre-trained on ImageNet
Note:
the resnet use sigmoid function for the out fc layer's output since the
personality label in range (0, 1)
"""
num_classes = cfg.MODEL.NUM_CLASS if cfg.MODEL.NUM_CLASS is not None else num_classes
model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes, return_feat=cfg.MODEL.RETURN_FEATURE)
model.avgpool = nn.AdaptiveAvgPool2d(1)
if cfg.MODEL.PRETRAIN:
# model.load_state_dict(load_state_dict_from_url(
# "https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl"))
pretrained_dict = load_state_dict_from_url(
"https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl")
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
@NETWORK_REGISTRY.register()
def se_resnet50_true_personality(cfg, num_classes=1000, pretrained=False):
"""Constructs a ResNet-50 model.
Args:
num_classes (int): number of classification
pretrained (bool): If True, returns a model pre-trained on ImageNet
Note:
the resnet use sigmoid function for the out fc layer's output since the
personality label in range (0, 1)
"""
num_classes = cfg.MODEL.NUM_CLASS if cfg.MODEL.NUM_CLASS is not None else num_classes
model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes, sigmoid_output=False)
model.avgpool = nn.AdaptiveAvgPool2d(1)
if cfg.MODEL.PRETRAIN:
# model.load_state_dict(load_state_dict_from_url(
# "https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl"))
pretrained_dict = load_state_dict_from_url(
"https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl")
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model | 2,880 | 45.467742 | 115 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/bi_modal_lstm.py | import torch
import torch.nn as nn
from dpcv.modeling.module.weight_init_helper import initialize_weights
from .build import NETWORK_REGISTRY
class BiModelLSTM(nn.Module):
def __init__(self, init_weights=True, true_personality=False):
super(BiModelLSTM, self).__init__()
self.audio_branch = nn.Linear(in_features=68, out_features=32)
self.image_branch_conv = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=7, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=9, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.image_branch_linear = nn.Sequential(
nn.Flatten(start_dim=1),
nn.Linear(in_features=16 * 8 * 8, out_features=1024),
nn.Linear(in_features=1024, out_features=128),
nn.Dropout(0.2)
)
# the original paper set hidden_size to 128
# self.lstm = nn.LSTM(input_size=160, hidden_size=128)
# self.out_linear = nn.Linear(in_features=128, out_features=5)
self.lstm = nn.LSTM(input_size=160, hidden_size=512)
self.out_linear = nn.Linear(in_features=512, out_features=5)
self.true_personality = true_personality
if init_weights:
initialize_weights(self)
def forward(self, audio_feature, img_feature):
x_audio = self.audio_branch(audio_feature) # (bs * 6, 32)
x_img = self.image_branch_conv(img_feature) # (bs * 6, 16 * 8 * 8)
x_img = self.image_branch_linear(x_img) # (bs * 6, 128)
x = torch.cat([x_audio, x_img], dim=-1)
x = x.view(6, -1, 160) # x_shape = (6, bs, 160)
x, _ = self.lstm(x) # x_shape = (6, bs, 128)
x = self.out_linear(x) # x_shape = (6, bs, 5)
x = x.permute(1, 0, 2) # x_shape = (bs, 6, 5)
if self.true_personality:
return x.mean(dim=1)
y = torch.sigmoid(x).mean(dim=1) # y_shape = (bs, 5)
return y
class ImgLSTM(nn.Module):
def __init__(self):
super(ImgLSTM, self).__init__()
self.image_branch_conv = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=7, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=9, stride=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.image_branch_linear = nn.Sequential(
nn.Flatten(start_dim=1),
nn.Linear(in_features=16 * 8 * 8, out_features=1024),
nn.Linear(in_features=1024, out_features=128),
nn.Dropout(0.2)
)
self.lstm = nn.LSTM(input_size=128, hidden_size=128)
self.out_linear = nn.Linear(in_features=128, out_features=5)
def forward(self, img_feature):
x_img = self.image_branch_conv(img_feature) # (bs * 6, 16 * 8 * 8)
x_img = self.image_branch_linear(x_img) # (bs * 6, 128)
x = x_img.view(6, -1, 128) # x_shape = (6, bs, 160)
x, _ = self.lstm(x) # x_shape = (6, bs, 128)
x = self.out_linear(x) # x_shape = (6, bs, 5)
x = x.permute(1, 0, 2) # x_shape = (bs, 6, 5)
y = torch.sigmoid(x).mean(dim=1) # y_shape = (bs, 5)
return y
class AudioLSTM(nn.Module):
def __init__(self, with_sigmoid=False):
super(AudioLSTM, self).__init__()
self.audio_branch = nn.Linear(in_features=68, out_features=32)
self.lstm = nn.LSTM(input_size=32, hidden_size=128)
self.out_linear = nn.Linear(in_features=128, out_features=5)
self.with_sigmoid = with_sigmoid
def forward(self, audio_feature):
x_audio = self.audio_branch(audio_feature) # (bs * 6, 32)
x = x_audio.view(6, -1, 32) # x_shape = (6, bs, 160)
x, _ = self.lstm(x) # x_shape = (6, bs, 128)
x = self.out_linear(x) # x_shape = (6, bs, 5)
x = x.permute(1, 0, 2) # x_shape = (bs, 6, 5)
if self.with_sigmoid:
y = torch.sigmoid(x).mean(dim=1) # y_shape = (bs, 5)
else:
y = x.mean(dim=1)
return y
def get_bi_modal_lstm_model():
bi_modal_lstm = BiModelLSTM()
bi_modal_lstm.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return bi_modal_lstm
@NETWORK_REGISTRY.register()
def bi_modal_lstm_model(cfg=None):
bi_modal_lstm = BiModelLSTM()
bi_modal_lstm.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return bi_modal_lstm
@NETWORK_REGISTRY.register()
def bi_modal_lstm_model_true_personality(cfg=None):
bi_modal_lstm = BiModelLSTM(true_personality=True)
bi_modal_lstm.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return bi_modal_lstm
@NETWORK_REGISTRY.register()
def get_img_modal_lstm_model(cfg=None):
img_lstm = ImgLSTM()
img_lstm.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return img_lstm
@NETWORK_REGISTRY.register()
def get_aud_modal_lstm_model(cfg=None):
aud_lstm = AudioLSTM()
aud_lstm.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return aud_lstm
if __name__ == "__main__":
"""
basically batch size is the number of video
"""
bs = 2
au_ft = torch.randn((bs * 6, 68))
im_ft = torch.randn((bs * 6, 3, 112, 112))
# bi_model = BiModelLSTM()
# out = bi_model(au_ft, im_ft)
# img_model = ImgLSTM()
# out = img_model(im_ft)
aud_model = AudioLSTM()
out = aud_model(au_ft)
print(out.shape)
| 6,000 | 36.273292 | 89 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/audio_visual_residual.py | import torch
import torch.nn as nn
from dpcv.modeling.module.bi_modal_resnet_module import AudioVisualResNet, AudInitStage
from dpcv.modeling.module.bi_modal_resnet_module import VisInitStage, BiModalBasicBlock
from dpcv.modeling.module.bi_modal_resnet_module import aud_conv1x9, aud_conv1x1, vis_conv3x3, vis_conv1x1
from dpcv.modeling.module.weight_init_helper import initialize_weights
from dpcv.modeling.networks.build import NETWORK_REGISTRY
class AudioVisualResNet18(nn.Module):
def __init__(self, init_weights=True, return_feat=False):
super(AudioVisualResNet18, self).__init__()
self.return_feature = return_feat
self.audio_branch = AudioVisualResNet(
in_channels=1, init_stage=AudInitStage,
block=BiModalBasicBlock, conv=[aud_conv1x9, aud_conv1x1],
channels=[32, 64, 128, 256],
layers=[2, 2, 2, 2]
)
self.visual_branch = AudioVisualResNet(
in_channels=3, init_stage=VisInitStage,
block=BiModalBasicBlock, conv=[vis_conv3x3, vis_conv1x1],
channels=[32, 64, 128, 256],
layers=[2, 2, 2, 2]
)
self.linear = nn.Linear(512, 5)
if init_weights:
initialize_weights(self)
def forward(self, aud_input, vis_input):
aud_x = self.audio_branch(aud_input)
vis_x = self.visual_branch(vis_input)
aud_x = aud_x.view(aud_x.size(0), -1)
vis_x = vis_x.view(vis_x.size(0), -1)
feat = torch.cat([aud_x, vis_x], dim=-1)
x = self.linear(feat)
x = torch.sigmoid(x)
# x = torch.tanh(x)
# x = (x + 1) / 2 # scale tanh output to [0, 1]
if self.return_feature:
return x, feat
return x
class VisualResNet18(nn.Module):
def __init__(self, init_weights=True, return_feat=False):
super(VisualResNet18, self).__init__()
self.return_feature = return_feat
self.visual_branch = AudioVisualResNet(
in_channels=3, init_stage=VisInitStage,
block=BiModalBasicBlock, conv=[vis_conv3x3, vis_conv1x1],
channels=[32, 64, 128, 256],
layers=[2, 2, 2, 2]
)
self.linear = nn.Linear(256, 5)
if init_weights:
initialize_weights(self)
def forward(self, vis_input):
# aud_x = self.audio_branch(aud_input)
vis_x = self.visual_branch(vis_input)
# aud_x = aud_x.view(aud_x.size(0), -1)
vis_x = vis_x.view(vis_x.size(0), -1)
feat = vis_x
x = self.linear(vis_x)
x = torch.sigmoid(x)
# x = torch.tanh(x)
# x = (x + 1) / 2 # scale tanh output to [0, 1]
if self.return_feature:
return x, feat
return x
class AudioResNet18(nn.Module):
def __init__(self):
super(AudioResNet18, self).__init__()
self.audio_branch = AudioVisualResNet(
in_channels=1, init_stage=AudInitStage,
block=BiModalBasicBlock, conv=[aud_conv1x9, aud_conv1x1],
channels=[32, 64, 128, 256],
layers=[2, 2, 2, 2]
)
self.linear = nn.Linear(256, 5)
def forward(self, aud_input):
aud_x = self.audio_branch(aud_input)
aud_x = aud_x.view(aud_x.size(0), -1)
x = self.linear(aud_x)
x = torch.sigmoid(x)
return x
@NETWORK_REGISTRY.register()
def audiovisual_resnet(cfg=None):
multi_modal_model = AudioVisualResNet18(return_feat=cfg.MODEL.RETURN_FEATURE)
multi_modal_model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return multi_modal_model
def get_audiovisual_resnet_model():
multi_modal_model = AudioVisualResNet18()
multi_modal_model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return multi_modal_model
@NETWORK_REGISTRY.register()
def get_audio_resnet_model(cfg=None):
aud_modal_model = AudioResNet18()
aud_modal_model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return aud_modal_model
@NETWORK_REGISTRY.register()
def get_visual_resnet_model(cfg=None):
visual_modal_model = VisualResNet18()
visual_modal_model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return visual_modal_model
if __name__ == "__main__":
aud = torch.randn(2, 1, 1, 50176)
vis = torch.randn(2, 3, 224, 224)
# multi_model = AudioVisualResNet18()
# y = multi_model(aud, vis)
model = AudioResNet18()
y = model(aud)
print(y)
| 4,542 | 31.683453 | 106 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/multi_modal_pred_net.py | import torch
import torch.nn as nn
from dpcv.modeling.module.resnet_tv import Bottleneck, BasicBlock, conv1x1, conv3x3
from dpcv.modeling.module.resnet_tv import model_zoo, model_urls
from .build import NETWORK_REGISTRY
class ResNetFeatureExtractor(nn.Module):
"""
Note: that class is not a formal resnet but with a sigmoid function for the last fc layer
"""
def __init__(
self, block, layers, num_classes=1000,
init_weights=True, zero_init_residual=False, sigmoid_output=True,
return_feat=False,
):
super(ResNetFeatureExtractor, self).__init__()
self.return_feature = return_feat
self.sigmoid_output = sigmoid_output
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.maxpool14x14 = nn.MaxPool2d(kernel_size=14)
self.maxpool7x7 = nn.MaxPool2d(kernel_size=7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
# if init_weights:
# initialize_weights(self)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.Linear):
# nn.init.normal_(m.weight, 0, 0.01)
# nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x_3 = self.layer3(x)
x_3_1 = self.maxpool14x14(x_3).reshape(-1, 1024)
x_3_2 = self.avgpool(x_3).reshape(-1, 1024)
x_4 = self.layer4(x_3)
x_4_1 = self.maxpool7x7(x_4).reshape(-1, 2048)
x_4_2 = self.avgpool(x_4).reshape(-1, 2048)
feat = torch.cat([x_3_1, x_3_2, x_4_1, x_4_2], dim=1)
return feat
def resnet101_visual_feature_extractor(pretrained=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pretrained on ImageNet
"""
model = ResNetFeatureExtractor(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnet101'])
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
class VisualFCNet(nn.Module):
def __init__(self, input_dim, out_dim=5, use_sigmoid=True):
super().__init__()
self.fc = nn.Sequential(
nn.Linear(input_dim, 512),
nn.ReLU(),
nn.Linear(512, out_dim),
)
self.dropout = nn.Dropout()
self.sigmoid = nn.Sigmoid()
self.use_sigmoid = use_sigmoid
def forward(self, x):
x = self.dropout(x)
x = self.fc(x)
x = x.mean(dim=1)
if self.use_sigmoid:
return self.sigmoid(x)
return x
class AudioFCNet(nn.Module):
def __init__(self, input_dim, out_dim=5, spectrum_channel=15, use_sigmoid=True):
super().__init__()
self.spectrum_channel = spectrum_channel
self.fc = nn.Sequential(
nn.Linear(input_dim, 1024),
nn.ReLU(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, out_dim),
)
self.sigmoid = nn.Sigmoid()
self.use_sigmoid = use_sigmoid
def forward(self, x):
x = x.view(-1, self.spectrum_channel * 128)
x = (x - x.mean()) / x.std()
x = self.fc(x)
if self.use_sigmoid:
return self.sigmoid(x)
return x
@NETWORK_REGISTRY.register()
def multi_modal_visual_model(cfg):
model = VisualFCNet(6144)
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
@NETWORK_REGISTRY.register()
def multi_modal_audio_model(cfg):
if cfg.DATA.SESSION in ["talk", "animal", "ghost", "lego"]:
dim = cfg.MODEL.SPECTRUM_CHANNEL * 128
use_sigmoid = False
else:
dim = 15 * 128
use_sigmoid = True
model = AudioFCNet(dim, spectrum_channel=cfg.MODEL.SPECTRUM_CHANNEL, use_sigmoid=use_sigmoid)
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
if __name__ == "__main__":
model = resnet101_visual_feature_extractor()
fake = torch.randn(6, 3, 224, 224)
output = model(fake)
print(output.shape)
| 6,631 | 34.848649 | 106 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/spectrum_model.py | import torch.cuda
import torch.nn as nn
from dpcv.modeling.networks.build import NETWORK_REGISTRY
from dpcv import device
from dpcv.modeling.module.weight_init_helper import initialize_weights
class SpectrumConv1D(nn.Module):
def __init__(self, channel=80, hidden_units=[128, 512, 2048]):
super(SpectrumConv1D, self).__init__()
self.conv_in = nn.Sequential(
nn.Conv1d(
in_channels=2, out_channels=hidden_units[0], kernel_size=(1, 49), padding=(0, 24)
),
nn.ReLU(),
nn.Conv1d(
in_channels=hidden_units[0], out_channels=hidden_units[1], kernel_size=(1, 25), padding=(0, 12)
),
nn.ReLU(),
)
self.conv_up_scale = nn.Sequential(
nn.Conv1d(
in_channels=hidden_units[1], out_channels=hidden_units[1],
kernel_size=(1, 9), padding=(0, 4),
),
nn.ReLU(),
nn.Conv1d(
in_channels=hidden_units[1], out_channels=hidden_units[2],
kernel_size=(1, 9), padding=(0, 4),
),
nn.ReLU()
)
self.conv_down_scale = nn.Sequential(
nn.Conv1d(
in_channels=hidden_units[2], out_channels=hidden_units[1],
kernel_size=(1, 9), padding=(0, 4),
),
nn.ReLU(),
)
self.conv_out = nn.Sequential(
nn.Conv1d(
in_channels=hidden_units[1], out_channels=1, kernel_size=(1, 1)
),
nn.ReLU(),
nn.Conv1d(
in_channels=1, out_channels=1, kernel_size=(1, channel)
),
)
def forward(self, x):
x = self.conv_in(x) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
identical = x
x = self.conv_up_scale(x) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x = self.conv_down_scale(x) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x += identical
x = self.conv_out(x) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x = x.squeeze(1) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x = x.squeeze() # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
return x
class SpectrumConv1D2(nn.Module):
def __init__(self, signal_num=512, spectron_len=5, hidden_units=[256, 512, 1024, 512], init_weight=False):
super(SpectrumConv1D2, self).__init__()
# init_input
self.init_input_conv2d = nn.Conv2d(
in_channels=2, out_channels=spectron_len, kernel_size=(signal_num, 1), stride=1)
# stage 1
self.conv1d_up2_s2_1 = nn.Sequential(
nn.Conv1d(in_channels=spectron_len, out_channels=hidden_units[0], kernel_size=5, stride=1, padding=2),
nn.BatchNorm1d(hidden_units[0]),
nn.LeakyReLU(),
) # (bs, 1024, 180)
# self.shortcut_1 = nn.Sequential(
# nn.Conv1d(in_channels=spectron_len, out_channels=hidden_units[0], kernel_size=3, stride=1, padding=1),
# nn.LeakyReLU(),
# )
# stage 2
self.con1d_stage = nn.Sequential(
nn.Conv1d(in_channels=hidden_units[0], out_channels=hidden_units[1], kernel_size=3, padding=1, stride=1),
nn.BatchNorm1d(hidden_units[1]),
nn.LeakyReLU(),
)
# stage 3
self.conv1d_up2_s2_2 = nn.Sequential(
# nn.Conv1d(
# in_channels=hidden_units[1], out_channels=hidden_units[1],
# kernel_size=3, padding=1, stride=1,
# ),
# nn.BatchNorm1d(hidden_units[1]),
# nn.LeakyReLU(),
# nn.Conv1d(
# in_channels=hidden_units[1], out_channels=hidden_units[1],
# kernel_size=3, padding=1, stride=1,
# ),
# nn.BatchNorm1d(hidden_units[1]),
# nn.LeakyReLU(),
nn.Conv1d(in_channels=hidden_units[1], out_channels=hidden_units[2], kernel_size=3, stride=2, padding=1),
nn.BatchNorm1d(hidden_units[2]),
nn.LeakyReLU(),
) # (bs, 2048, 90)
self.shortcut_2 = nn.Sequential(
nn.Conv1d(in_channels=hidden_units[1], out_channels=hidden_units[2], kernel_size=3, stride=2, padding=1),
nn.LeakyReLU(),
)
# stage 4
self.conv1d_s2 = nn.Sequential(
nn.Conv1d(in_channels=hidden_units[2], out_channels=hidden_units[2], kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(hidden_units[2]),
nn.LeakyReLU(),
nn.Conv1d(in_channels=hidden_units[2], out_channels=hidden_units[2], kernel_size=3, padding=1, stride=2),
nn.BatchNorm1d(hidden_units[2]),
nn.LeakyReLU(),
)
# regressor
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.regressor = nn.Sequential(
# nn.Linear(hidden_units[2], hidden_units[2]),
# nn.LeakyReLU(),
# nn.Dropout(),
nn.Linear(hidden_units[2], hidden_units[3]),
nn.LeakyReLU(),
nn.Dropout(),
nn.Linear(hidden_units[3], 5),
)
if init_weight:
print("init weights")
for m in self.modules():
if isinstance(m, nn.Sequential):
for m_i in m.modules():
if isinstance(m_i, nn.Conv1d):
nn.init.kaiming_normal_(m_i.weight)
elif isinstance(m_i, nn.BatchNorm1d):
nn.init.constant_(m_i.weight, 1)
nn.init.constant_(m_i.bias, 0)
elif isinstance(m_i, nn.Conv2d):
nn.init.kaiming_normal_(m_i.weight)
elif isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
# init input
x = self.init_input_conv2d(x).squeeze(dim=2)
# stage 1:
# x_1 = x
x = self.conv1d_up2_s2_1(x)
# x_shortcut_1 = self.shortcut_1(x_1)
# x += x_shortcut_1
# stage 2:
x = self.con1d_stage(x)
# stage 3:
x_3 = x
x = self.conv1d_up2_s2_2(x)
x_shortcut_3 = self.shortcut_2(x_3)
x += x_shortcut_3
# stage 4:
x = self.conv1d_s2(x)
# regressor
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.regressor(x)
return x
class SpectrumFeatConv1D(nn.Module):
def __init__(self, channel=80, signal_num=512, hidden_units=[64, 256, 1024], initialize=False):
super(SpectrumFeatConv1D, self).__init__()
self.conv_in = nn.Sequential(
nn.Conv2d(
in_channels=2, out_channels=hidden_units[0], kernel_size=(1, 7), padding=(0, 3)
),
nn.ReLU(),
nn.Conv2d(
in_channels=hidden_units[0], out_channels=hidden_units[1], kernel_size=(1, 5), padding=(0, 2)
),
nn.ReLU(),
)
self.conv_up_scale = nn.Sequential(
nn.Conv2d(
in_channels=hidden_units[1], out_channels=hidden_units[1], kernel_size=(1, 3), padding=(0, 1),
),
nn.ReLU(),
nn.Conv2d(
in_channels=hidden_units[1], out_channels=hidden_units[2], kernel_size=(1, 3), padding=(0, 1),
),
nn.ReLU()
)
self.conv_down_scale = nn.Sequential(
nn.Conv2d(
in_channels=hidden_units[2], out_channels=hidden_units[1], kernel_size=(1, 3), padding=(0, 1),
),
nn.ReLU(),
)
self.conv_out = nn.Sequential(
nn.Conv2d(
in_channels=hidden_units[1], out_channels=1, kernel_size=(1, 1)
),
nn.ReLU(),
nn.Conv2d(
in_channels=1, out_channels=1, kernel_size=(1, channel)
),
)
self.trait_fc = nn.Linear(signal_num, 5)
if initialize:
initialize_weights(self)
def forward(self, x):
x = self.conv_in(x) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
identical = x
x = self.conv_up_scale(x) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x = self.conv_down_scale(x) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x += identical
x = self.conv_out(x) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x = x.squeeze(1) # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x = x.squeeze() # (bs, 2, 5, 50) --> (bs, 64, 5, 50)
x = self.trait_fc(x)
return x
def conv1x9(in_planes, out_planes, stride=1):
"""1x9 convolution with padding"""
if stride == 1:
return nn.Conv2d(in_planes, out_planes, kernel_size=(1, 25), stride=1, padding=(0, 12), bias=False)
elif stride == 2:
return nn.Conv2d(in_planes, out_planes, kernel_size=(1, 25), stride=(1, 2*stride), padding=(0, 12), bias=False)
else:
raise ValueError("wrong stride value")
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
if stride == 1:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False)
elif stride == 2:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=(1, 2 * stride), bias=False)
else:
raise ValueError("wrong stride value")
class InitStage(nn.Module):
def __init__(self, in_channels=2, out_channels=64):
super(InitStage, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=(1, 49), stride=(1, 1), padding=(0, 24), bias=False)
# self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
# self.maxpool = nn.MaxPool2d(kernel_size=(1, 9), stride=(1, 4), padding=(0, 4))
def forward(self, inputs):
x = self.conv1(inputs)
# x = self.bn1(x)
x = self.relu(x)
# x = self.maxpool(x)
return x
class BasicBlock(nn.Module):
"""
build conv block for resnet18 architecture
"""
expansion = 1
def __init__(self, conv_type, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv_type(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv_type(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class FeatResNet(nn.Module):
def __init__(
self, in_channels, init_stage, block, conv,
channels=[64, 128, 256, 512], # default resnet stage channel settings
layers=[2, 2, 2, 2], # default resnet18 layers setting
out_spatial=(512, 1),
zero_init_residual=False
):
super(FeatResNet, self).__init__()
assert len(conv) == 2, "conv should be a list containing <conv3x3 conv1x1> or <conv1x9, conv1x1> function"
self.inplanes = channels[0]
self.conv_3x3 = conv[0]
self.conv_1x1 = conv[1]
self.init_stage = init_stage(in_channels, channels[0])
self.layer1 = self._make_layer(block, channels[0], layers[0])
self.layer2 = self._make_layer(block, channels[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(out_spatial)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, block):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
self.conv_1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.conv_3x3, self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.conv_3x3, self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.init_stage(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
# x_out = x_avg.view(x_avg.size(0), -1)
return x
class FeatResNet18(nn.Module):
def __init__(self, channels=[64, 128, 256, 512], signal_num=512):
super(FeatResNet18, self).__init__()
self.main_branch = FeatResNet(
in_channels=2, init_stage=InitStage,
block=BasicBlock, conv=[conv1x9, conv1x1],
channels=channels,
out_spatial=(signal_num, 1)
)
out_unit = channels[-1] * signal_num
self.linear1 = nn.Linear(out_unit, 512)
self.leaky_relu = nn.LeakyReLU(inplace=True)
self.dropout = nn.Dropout(0.5)
self.linear2 = nn.Linear(512, 5)
def forward(self, feat_input):
x = self.main_branch(feat_input)
x = x.view(x.size(0), -1)
x = self.linear1(x)
x = self.leaky_relu(x)
x = self.dropout(x)
x = self.linear2(x)
x = torch.sigmoid(x)
return x
@NETWORK_REGISTRY.register()
def spectrum_conv_model(cfg):
# return SpectrumConv1D().to(device=torch.device("gpu" if torch.cuda.is_available() else "cpu"))
# sample_channel = 100
return SpectrumConv1D(cfg.MODEL.SPECTRUM_CHANNEL).to(device=device)
@NETWORK_REGISTRY.register()
def spectrum_conv_model2(cfg):
# return SpectrumConv1D().to(device=torch.device("gpu" if torch.cuda.is_available() else "cpu"))
# sample_channel = 100
return SpectrumConv1D2(signal_num=cfg.MODEL.SPECTRUM_CHANNEL).to(device=device)
@NETWORK_REGISTRY.register()
def spectrum_Feat_conv_model(cfg):
# return SpectrumConv1D().to(device=torch.device("gpu" if torch.cuda.is_available() else "cpu"))
# sample_channel = 100
return SpectrumFeatConv1D(channel=cfg.MODEL.SPECTRUM_CHANNEL).to(device=device)
@NETWORK_REGISTRY.register()
def spectrum_feat_resnet18(cfg=None):
return FeatResNet18(signal_num=cfg.MODEL.SPECTRUM_CHANNEL).to(device=device)
if __name__ == "__main__":
# x = torch.randn((1, 2, 512, 80)).cuda()
# model = spectrum_feat_resnet18()
# y = model(x)
# print(y)
# inputs = torch.randn(20, 16, 50)
# m = nn.Conv1d(16, 2, 3, stride=1)
# output = m(inputs)
# print(output.shape)
# target output size of 5
# m = nn.AdaptiveAvgPool1d(1)
# input = torch.randn(1, 64, 8)
# output = m(input)
# print(output.shape)
net = SpectrumConv1D2()
x = torch.randn((1, 2, 512, 180))
y = net(x)
print(y.shape)
import torchvision.models as models
net = models.GoogLeNet | 16,069 | 34.870536 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/interpret_dan.py | import torch
import torch.nn as nn
from dpcv.modeling.networks.dan import make_layers, backbone
from dpcv.modeling.networks.build import NETWORK_REGISTRY
from dpcv.modeling.module.weight_init_helper import initialize_weights
# import torch.utils.model_zoo as model_zoo
class InterpretDAN(nn.Module):
def __init__(self, features, num_classes=5, init_weights=True, return_feat=False, use_sigmoid=True):
super(InterpretDAN, self).__init__()
self.features = features
self.glo_ave_pooling = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512, num_classes)
if init_weights:
initialize_weights(self)
self.return_feature = return_feat
self.use_sigmoid = use_sigmoid
def forward(self, x): # x (2, 3, 244, 244)
x = self.features(x) # x (2, 512, 7, 7)
x = self.glo_ave_pooling(x) # x (2, 512, 1, 1)
feat = x.flatten(1) # feat (2, 512)
x = self.fc(feat)
if self.use_sigmoid:
x = torch.sigmoid(x) # since the regression range always fall in (0, 1)
if self.return_feature:
return x, feat
return x
def get_interpret_dan_model(cfg, pretrained=False, **kwargs):
"""Interpret DAN 16-layer model (configuration "VGG16")
Args:
cfg: config for interpret dan model
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
interpret_dan = InterpretDAN(make_layers(backbone['VGG16'], batch_norm=True), **kwargs)
if pretrained:
pretrained_dict = torch.load(cfg.PRE_TRAINED_MODEL)
model_dict = interpret_dan.state_dict()
# 1. filter out unnecessary keys -------------------------------------------------------------------------------
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict --------------------------------------------------------------
model_dict.update(pretrained_dict)
interpret_dan.load_state_dict(model_dict)
# load pre_trained model from model_zoo for standard models in pytorch -----------------------------------------
# model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
interpret_dan.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return interpret_dan
@NETWORK_REGISTRY.register()
def interpret_dan_model(cfg):
interpret_dan = InterpretDAN(
make_layers(backbone['VGG16'], batch_norm=True), return_feat=cfg.MODEL.RETURN_FEATURE)
interpret_dan.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return interpret_dan
@NETWORK_REGISTRY.register()
def interpret_dan_model_true_personality(cfg):
interpret_dan = InterpretDAN(
make_layers(backbone['VGG16'], batch_norm=True),
return_feat=cfg.MODEL.RETURN_FEATURE,
use_sigmoid=False
)
interpret_dan.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return interpret_dan
if __name__ == "__main__":
import os
os.chdir("../../")
model = interpret_dan = InterpretDAN(make_layers(backbone['VGG16'], batch_norm=True)).cuda()
x = torch.randn(2, 3, 244, 244).cuda()
y = model(x)
print(y, y.shape)
| 3,343 | 38.341176 | 120 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/audio_interpretability_net.py | import torch
import torch.nn as nn
from dpcv.modeling.module.weight_init_helper import initialize_weights
from .build import NETWORK_REGISTRY
class AudioInterpretNet(nn.Module):
def __init__(self, init_weights=True):
super().__init__()
self.conv_block_1 = nn.Sequential(
nn.Conv1d(1, 64, kernel_size=8, stride=1, padding=1),
nn.Sigmoid(),
nn.MaxPool1d(10),
nn.Dropout(0.1),
)
self.conv_block_2 = nn.Sequential(
nn.Conv1d(64, 128, kernel_size=6, stride=1, padding=1),
nn.Sigmoid(),
nn.MaxPool1d(8),
nn.Dropout(0.5),
)
self.conv_block_3 = nn.Sequential(
nn.Conv1d(128, 256, kernel_size=6, stride=1, padding=1),
nn.Sigmoid(),
nn.MaxPool1d(8),
nn.Dropout(0.5),
)
self.gap = nn.AdaptiveAvgPool1d(1)
self.fc = nn.Linear(256, 5)
if init_weights:
initialize_weights(self)
def forward(self, x):
x = self.conv_block_1(x)
x = self.conv_block_2(x)
x = self.conv_block_3(x)
x = self.gap(x)
x = x.flatten(1)
x = self.fc(x)
return x
def get_model(cfg, pretrained=False, **kwargs):
model = AudioInterpretNet()
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
@NETWORK_REGISTRY.register()
def interpret_audio_model(cfg):
model = AudioInterpretNet()
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
if __name__ == "__main__":
dumy = torch.randn((2, 1, 30604))
model = AudioInterpretNet()
out = model(dumy)
print(out.shape)
| 1,740 | 26.634921 | 81 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/excitation_bp_rnn.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.models.utils import load_state_dict_from_url
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class MultiLSTMCellRelu(nn.Module):
def __init__(self, input_size, hidden_size, nlayers=1, use_dropout=None, dropout=0.5):
""""Constructor of the class"""
super(LSTMCellRelu, self).__init__()
self.nlayers = nlayers
self.use_dropout = use_dropout
self.dropout = nn.Dropout(p=dropout)
ih, hh = [], []
for i in range(nlayers):
ih.append(nn.Linear(input_size, 4 * hidden_size))
hh.append(nn.Linear(hidden_size, 4 * hidden_size))
self.w_ih = nn.ModuleList(ih)
self.w_hh = nn.ModuleList(hh)
def forward(self, input, hidden):
""""Defines the forward computation of the LSTMCell"""
hy, cy = [], []
for i in range(self.nlayers):
hx, cx = hidden[0][i], hidden[1][i]
gates = self.w_ih[i](input) + self.w_hh[i](hx)
i_gate, f_gate, c_gate, o_gate = gates.chunk(4, 1)
i_gate = torch.sigmoid(i_gate)
f_gate = torch.sigmoid(f_gate)
c_gate = torch.relu(c_gate)
o_gate = torch.sigmoid(o_gate)
ncx = (f_gate * cx) + (i_gate * c_gate)
nhx = o_gate * torch.relu(ncx)
cy.append(ncx)
hy.append(nhx)
if self.use_dropout:
input = self.dropout(nhx)
hy, cy = torch.stack(hy, 0), torch.stack(cy, 0)
return hy, cy
class LSTMCellRelu(nn.Module):
def __init__(self, input_size, hidden_size):
super(LSTMCellRelu, self).__init__()
self.w_ih = nn.Linear(input_size, 4 * hidden_size)
self.w_hh = nn.Linear(hidden_size, 4 * hidden_size)
def forward(self, input, hidden):
""""Defines the forward computation of the LSTMCell"""
hx, cx = hidden[0], hidden[1]
gates = self.w_ih(input) + self.w_hh(hx)
i_gate, f_gate, c_gate, o_gate = gates.chunk(4, 1)
i_gate = torch.sigmoid(i_gate)
f_gate = torch.sigmoid(f_gate)
c_gate = torch.relu(c_gate)
o_gate = torch.sigmoid(o_gate)
cy = (f_gate * cx) + (i_gate * c_gate)
hy = o_gate * torch.relu(cy)
return hy, cy
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
# nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def alexnet(pretrained=False, progress=True, **kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = AlexNet(**kwargs)
if pretrained:
pretrained_dict = load_state_dict_from_url(model_urls['alexnet'], progress=progress)
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
class AlexNetLSTM(nn.Module):
def __init__(self):
super(AlexNetLSTM, self).__init__()
self.extractor = alexnet(pretrained=True)
self.lstm_cell = LSTMCellRelu(4096, 2048)
self.classifier = nn.Linear(2048, 2)
self.hx = Variable(torch.randn(1, 2048)).cuda()
self.cx = Variable(torch.randn(1, 2048)).cuda()
def forward(self, x):
x = self.extractor(x)
# (tim_step=1, batch_size=1, input_dim=4096, hidden_dim=2048)
self.hx, self.cx = self.lstm_cell(x, (self.hx, self.cx))
y = self.classifier(self.hx)
return y
| 5,010 | 33.088435 | 92 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/TSN2D.py | from dpcv.modeling.module.tpn.base import BaseRecognizer
from dpcv.modeling.module.tpn import resnet_mm, cls_head_module, simple_consensus, simple_spatial_module, tpn
from .build import NETWORK_REGISTRY
import torch
from torch.autograd import Variable
args = {
# 'type': 'TSN2D',
'backbone': {
# 'type': 'ResNet',
'pretrained': None,
'depth': 50,
'nsegments': 8,
'out_indices': (2, 3),
'tsm': True,
'bn_eval': False,
'partial_bn': False
},
'necks': {
# 'type': 'TPN',
'in_channels': [1024, 2048],
'out_channels': 1024,
'spatial_modulation_config': {'inplanes': [1024, 2048], 'planes': 2048},
'temporal_modulation_config': {
'scales': (16, 16),
'param': {'inplanes': -1, 'planes': -1, 'downsample_scale': -1}
},
'upsampling_config': {'scale': (1, 1, 1)},
'downsampling_config': {
'scales': (1, 1, 1),
'param': {'inplanes': -1, 'planes': -1, 'downsample_scale': -1}
},
'level_fusion_config': {
'in_channels': [1024, 1024],
'mid_channels': [1024, 1024],
'out_channels': 2048,
'ds_scales': [(1, 1, 1), (1, 1, 1)]
},
'aux_head_config': {'inplanes': -1, 'planes': 5, 'loss_weight': 0.5}
},
'spatial_temporal_module': {
# 'type': 'SimpleSpatialModule',
'spatial_type': 'avg',
'spatial_size': 8
},
'segmental_consensus': {
# 'type': 'SimpleConsensus',
'consensus_type': 'avg'
},
'cls_head': {
# 'type': 'ClsHead',
'with_avg_pool': False,
'temporal_feature_size': 1,
'spatial_feature_size': 1,
'dropout_ratio': 0.5,
'in_channels': 2048,
'num_classes': 5
},
}
class TSN2D(BaseRecognizer):
def __init__(self,
backbone,
necks=None,
modality='RGB',
in_channels=3,
spatial_temporal_module=None,
segmental_consensus=None,
fcn_testing=False,
flip=False,
cls_head=None,
train_cfg=None,
test_cfg=None):
super(TSN2D, self).__init__()
self.backbone = resnet_mm.ResNet(**backbone)
self.modality = modality
self.in_channels = in_channels
if necks is not None:
self.necks = tpn.TPN(**necks)
else:
self.necks = None
if spatial_temporal_module is not None:
self.spatial_temporal_module = simple_spatial_module.SimpleSpatialModule(
**spatial_temporal_module
)
else:
raise NotImplementedError
if segmental_consensus is not None:
self.segmental_consensus = simple_consensus.SimpleConsensus(
**segmental_consensus
)
else:
raise NotImplementedError
if cls_head is not None:
self.cls_head = cls_head_module.ClsHead(**cls_head)
else:
raise NotImplementedError
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fcn_testing = fcn_testing
self.flip = flip
assert modality in ['RGB', 'Flow', 'RGBDiff']
self.init_weights()
@property
def with_spatial_temporal_module(self):
return hasattr(self, 'spatial_temporal_module') and self.spatial_temporal_module is not None
@property
def with_segmental_consensus(self):
return hasattr(self, 'segmental_consensus') and self.segmental_consensus is not None
@property
def with_cls_head(self):
return hasattr(self, 'cls_head') and self.cls_head is not None
def init_weights(self):
super(TSN2D, self).init_weights()
self.backbone.init_weights()
if self.with_spatial_temporal_module:
self.spatial_temporal_module.init_weights()
if self.with_segmental_consensus:
self.segmental_consensus.init_weights()
if self.with_cls_head:
self.cls_head.init_weights()
if self.necks is not None:
self.necks.init_weights()
def extract_feat(self, img_group):
x = self.backbone(img_group)
return x
def forward_train(
self,
num_modalities,
img_meta,
gt_label,
**kwargs
):
assert num_modalities == 1
img_group = kwargs['img_group_0']
bs = img_group.shape[0]
img_group = img_group.reshape(
(-1, self.in_channels) + img_group.shape[3:])
num_seg = img_group.shape[0] // bs
x = self.extract_feat(img_group)
if self.necks is not None:
x = [each.reshape((-1, num_seg) + each.shape[1:]).transpose(1, 2) for each in x]
x, aux_losses = self.necks(x, gt_label)
x = x.squeeze(2)
num_seg = 1
if self.with_spatial_temporal_module:
x = self.spatial_temporal_module(x)
x = x.reshape((-1, num_seg) + x.shape[1:])
if self.with_segmental_consensus:
x = Variable(x)
x = self.segmental_consensus(x)
x = x.squeeze(1)
# cls_score = self.cls_head(x)
# cls_score.detach().cpu().numpy()
losses = dict()
# if self.with_cls_head:
cls_score = self.cls_head(x)
# gt_label = gt_label.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_label)
losses.update(loss_cls)
if self.necks is not None:
if aux_losses is not None:
losses.update(aux_losses)
loss_value = 0
for value in losses.values():
loss_value += value
return loss_value, cls_score
# return cls_score
def forward_test(
self,
num_modalities,
img_meta,
**kwargs
):
if not self.fcn_testing:
# 1crop * 1clip
assert num_modalities == 1
img_group = kwargs['img_group_0']
bs = img_group.shape[0]
img_group = img_group.reshape(
(-1, self.in_channels) + img_group.shape[3:])
num_seg = img_group.shape[0] // bs
x = self.extract_feat(img_group)
if self.necks is not None:
x = [each.reshape((-1, num_seg) + each.shape[1:]).transpose(1, 2) for each in x]
x, _ = self.necks(x)
x = x.squeeze(2)
num_seg = 1
if self.with_spatial_temporal_module:
x = self.spatial_temporal_module(x)
x = x.reshape((-1, num_seg) + x.shape[1:])
if self.with_segmental_consensus:
x = self.segmental_consensus(x)
x = x.squeeze(1)
if self.with_cls_head:
x = self.cls_head(x)
return x.cpu().numpy()
else:
# fcn testing
assert num_modalities == 1
img_group = kwargs['img_group_0']
bs = img_group.shape[0]
img_group = img_group.reshape(
(-1, self.in_channels) + img_group.shape[3:])
# standard protocol i.e. 3 crops * 2 clips
num_seg = self.backbone.nsegments * 2
# 3 crops to cover full resolution
num_crops = 3
img_group = img_group.reshape((num_crops, num_seg) + img_group.shape[1:])
x1 = img_group[:, ::2, :, :, :]
x2 = img_group[:, 1::2, :, :, :]
img_group = torch.cat([x1, x2], 0)
num_seg = num_seg // 2
num_clips = img_group.shape[0]
img_group = img_group.view(num_clips * num_seg, img_group.shape[2], img_group.shape[3], img_group.shape[4])
if self.flip:
img_group = self.extract_feat(torch.flip(img_group, [-1]))
x = self.extract_feat(img_group)
if self.necks is not None:
x = [each.reshape((-1, num_seg) + each.shape[1:]).transpose(1, 2) for each in x]
x, _ = self.necks(x)
else:
x = x.reshape((-1, num_seg) + x.shape[1:]).transpose(1, 2)
x = self.cls_head(x)
prob = torch.nn.functional.softmax(x.mean([2, 3, 4]), 1).mean(0, keepdim=True).detach().cpu().numpy()
return prob
def get_tpn_model():
model = TSN2D(**args)
return model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
@NETWORK_REGISTRY.register()
def tpn_model(cfg=None):
model = TSN2D(**args)
return model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
if __name__ == "__main__":
model = TSN2D(**args)
xin = torch.randn(4, 16, 3, 256, 256)
label = torch.randn(4, 5)
input = {"num_modalities": [1], "img_group_0": xin, "img_meta": None, "gt_label": label}
y = model(**input)
print(y)
| 8,989 | 30.766784 | 119 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/resnet_3d.py | """
code modified from https://github.com/kenshohara/3D-ResNets-PyTorch.git
"""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from .build import NETWORK_REGISTRY
from dpcv.modeling.module.weight_init_helper import initialize_weights
def get_inplanes():
return [64, 128, 256, 512]
def conv3x3x3(in_planes, out_planes, stride=1):
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False
)
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv3x3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv1x1x1(in_planes, planes)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = conv3x3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = conv1x1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
block_inplanes,
n_input_channels=3,
conv1_t_size=7,
conv1_t_stride=1,
no_max_pool=False,
shortcut_type='B',
widen_factor=1.0,
n_classes=5,
init_weights=True,
):
super().__init__()
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
self.conv1 = nn.Conv3d(
n_input_channels,
self.in_planes,
kernel_size=(conv1_t_size, 7, 7),
stride=(conv1_t_stride, 2, 2),
padding=(conv1_t_size // 2, 3, 3),
bias=False
)
self.bn1 = nn.BatchNorm3d(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], shortcut_type)
self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(block, block_inplanes[2], layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(block, block_inplanes[3], layers[3], shortcut_type, stride=2)
self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc = nn.Linear(block_inplanes[3] * block.expansion, n_classes)
if init_weights:
initialize_weights(self)
# for m in self.modules():
# if isinstance(m, nn.Conv3d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, nn.BatchNorm3d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
def _downsample_basic_block(self, x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(
out.size(0), planes - out.size(1), out.size(2), out.size(3), out.size(4)
)
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride
)
else:
downsample = nn.Sequential(
conv1x1x1(self.in_planes, planes * block.expansion, stride),
nn.BatchNorm3d(planes * block.expansion)
)
layers = []
layers.append(
block(
in_planes=self.in_planes,
planes=planes,
stride=stride,
downsample=downsample
)
)
self.in_planes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_3d_resnet_model(model_depth, **kwargs):
assert model_depth in [18, 34, 50, 101, 152]
if model_depth == 18:
model = ResNet(BasicBlock, [2, 2, 2, 2], get_inplanes(), **kwargs)
elif model_depth == 34:
model = ResNet(BasicBlock, [3, 4, 6, 3], get_inplanes(), **kwargs)
elif model_depth == 50:
model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes(), **kwargs)
elif model_depth == 101:
model = ResNet(Bottleneck, [3, 4, 23, 3], get_inplanes(), **kwargs)
elif model_depth == 152:
model = ResNet(Bottleneck, [3, 8, 36, 3], get_inplanes(), **kwargs)
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
@NETWORK_REGISTRY.register()
def resnet50_3d_model(cfg):
model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes())
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
if __name__ == "__main__":
model = get_3d_resnet_model(50)
xin = torch.randn(4, 3, 16, 224, 224)
y = model(xin)
print(y.shape)
| 7,256 | 27.912351 | 100 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/cr_net.py | import torch
import torch.nn as nn
from dpcv.modeling.module.bi_modal_resnet_module import AudioVisualResNet, AudInitStage
from dpcv.modeling.module.bi_modal_resnet_module import BiModalBasicBlock, VisInitStage
from dpcv.modeling.module.bi_modal_resnet_module import aud_conv1x9, aud_conv1x1, vis_conv3x3, vis_conv1x1
from dpcv.modeling.module.weight_init_helper import initialize_weights
from .build import NETWORK_REGISTRY
class CRNet(nn.Module):
def __init__(self, only_train_guider=False):
super(CRNet, self).__init__()
self.train_guider = only_train_guider
self.global_img_branch = AudioVisualResNet(
in_channels=3, init_stage=VisInitStage,
block=BiModalBasicBlock, conv=[vis_conv3x3, vis_conv1x1],
layers=[3, 4, 6, 3], # layer setting of resnet34
out_spatial=(2, 2)
)
self.local_img_branch = AudioVisualResNet(
in_channels=3, init_stage=VisInitStage,
block=BiModalBasicBlock, conv=[vis_conv3x3, vis_conv1x1],
layers=[3, 4, 6, 3],
out_spatial=(2, 2)
)
self.audio_branch = AudioVisualResNet(
in_channels=1, init_stage=AudInitStage,
block=BiModalBasicBlock, conv=[aud_conv1x9, aud_conv1x1],
layers=[3, 4, 6, 3],
out_spatial=(1, 4)
)
self.global_cls_guide = nn.Conv2d(512, 20, 2)
self.local_cls_guide = nn.Conv2d(512, 20, 2)
self.wav_cls_guide = nn.Conv2d(512, 20, (1, 4))
self.out_map = nn.Linear(512, 1)
def train_classifier(self):
self.train_guider = True
def train_regressor(self):
self.train_guider = False
def forward(self, global_img, local_img, audio_wav):
glo_feature = self.global_img_branch(global_img)
loc_feature = self.local_img_branch(local_img)
aud_feature = self.audio_branch(audio_wav)
# ---- first training stage class guide -----
glo_cls = self.global_cls_guide(glo_feature)
loc_cls = self.local_cls_guide(loc_feature)
wav_cls = self.wav_cls_guide(aud_feature)
glo_cls = glo_cls.view(glo_cls.size(0), 5, -1)
loc_cls = loc_cls.view(loc_cls.size(0), 5, -1)
wav_cls = wav_cls.view(wav_cls.size(0), 5, -1)
cls_guide = torch.stack([glo_cls + loc_cls + wav_cls], dim=-1).mean(dim=-1).squeeze()
if self.train_guider:
return cls_guide
# --- second training stage guided regress ---
glo_cls_feature = glo_feature.view(glo_feature.size(0), 512, 4).permute(0, 2, 1)
loc_cls_feature = loc_feature.view(loc_feature.size(0), 512, 4).permute(0, 2, 1)
wav_cls_feature = aud_feature.view(aud_feature.size(0), 512, 4).permute(0, 2, 1)
glo_cls_score = torch.softmax(glo_cls, -1)
loc_cls_score = torch.softmax(loc_cls, -1)
wav_cls_score = torch.softmax(wav_cls, -1)
guided_glo_reg = torch.matmul(glo_cls_score, glo_cls_feature) # (_, 5, 4) matmul (_, 4, 512) = (_, 5, 512)
guided_loc_reg = torch.matmul(loc_cls_score, loc_cls_feature) # every dim in axis 1 is a weighted sum of P_i
guided_wav_reg = torch.matmul(wav_cls_score, wav_cls_feature) # where i = {1,2,3,4,5}
out_reg = guided_glo_reg + guided_loc_reg + guided_wav_reg
out = self.out_map(out_reg)
out = out.view(out.size(0), -1)
return cls_guide, out
class CRNet2(nn.Module):
def __init__(self, init_weights=True, return_feat=False):
super(CRNet2, self).__init__()
self.train_guider_epo = 1
self.return_feature = return_feat
self.train_regressor = False
self.global_img_branch = AudioVisualResNet(
in_channels=3, init_stage=VisInitStage,
block=BiModalBasicBlock, conv=[vis_conv3x3, vis_conv1x1],
layers=[3, 4, 6, 3], # layer setting of resnet34
out_spatial=(2, 2)
)
self.local_img_branch = AudioVisualResNet(
in_channels=3, init_stage=VisInitStage,
block=BiModalBasicBlock, conv=[vis_conv3x3, vis_conv1x1],
layers=[3, 4, 6, 3],
out_spatial=(2, 2)
)
self.audio_branch = AudioVisualResNet(
in_channels=1, init_stage=AudInitStage,
block=BiModalBasicBlock, conv=[aud_conv1x9, aud_conv1x1],
layers=[3, 4, 6, 3],
out_spatial=(1, 4)
)
self.global_cls_guide = nn.Conv2d(512, 20, 2)
self.local_cls_guide = nn.Conv2d(512, 20, 2)
self.wav_cls_guide = nn.Conv2d(512, 20, (1, 4))
self.out_map = nn.Linear(512, 1)
if init_weights:
initialize_weights(self)
def set_train_classifier_epo(self, epo):
self.train_guider_epo = epo
def set_train_regressor(self):
self.train_regressor = True
def forward(self, global_img, local_img, audio_wav):
glo_feature = self.global_img_branch(global_img) # (bs, 512, 2, 2)
loc_feature = self.local_img_branch(local_img) # (bs, 512, 2, 2)
aud_feature = self.audio_branch(audio_wav) # (bs, 512, 1, 4)
# ---- first training stage class guide -----
glo_cls = self.global_cls_guide(glo_feature) # (bs, 5, 4)
loc_cls = self.local_cls_guide(loc_feature) # (bs, 5, 4)
wav_cls = self.wav_cls_guide(aud_feature) # (bs, 5, 4)
glo_cls = glo_cls.view(glo_cls.size(0), 5, -1)
loc_cls = loc_cls.view(loc_cls.size(0), 5, -1)
wav_cls = wav_cls.view(wav_cls.size(0), 5, -1)
cls_guide = torch.stack([glo_cls + loc_cls + wav_cls], dim=-1).mean(dim=-1).squeeze()
if not self.train_regressor:
return cls_guide
# --- second training stage guided regress ---
glo_cls_feature = glo_feature.view(glo_feature.size(0), 512, 4).permute(0, 2, 1)
loc_cls_feature = loc_feature.view(loc_feature.size(0), 512, 4).permute(0, 2, 1)
wav_cls_feature = aud_feature.view(aud_feature.size(0), 512, 4).permute(0, 2, 1)
glo_cls_score = torch.softmax(glo_cls, -1)
loc_cls_score = torch.softmax(loc_cls, -1)
wav_cls_score = torch.softmax(wav_cls, -1)
guided_glo_reg = torch.matmul(glo_cls_score, glo_cls_feature) # (_, 5, 4) matmul (_, 4, 512) = (_, 5, 512)
guided_loc_reg = torch.matmul(loc_cls_score, loc_cls_feature) # every dim in axis 1 is a weighted sum of P_i
guided_wav_reg = torch.matmul(wav_cls_score, wav_cls_feature) # where i = {1,2,3,4,5}
out_reg = guided_glo_reg + guided_loc_reg + guided_wav_reg
out = self.out_map(out_reg)
out = out.view(out.size(0), -1)
if self.return_feature:
return cls_guide, out, out_reg
return cls_guide, out
class CRNetAud(nn.Module):
def __init__(self):
super(CRNetAud, self).__init__()
self.train_guider_epo = 30 # default train 50 epochs for classification guidence
self.train_regressor = False
self.audio_branch = AudioVisualResNet(
in_channels=1, init_stage=AudInitStage,
block=BiModalBasicBlock, conv=[aud_conv1x9, aud_conv1x1],
layers=[3, 4, 6, 3],
out_spatial=(1, 4)
)
self.wav_cls_guide = nn.Conv2d(512, 20, (1, 4))
self.out_map = nn.Linear(512, 1)
def set_train_classifier_epo(self, epo):
self.train_guider_epo = epo
def set_train_regressor(self):
self.train_regressor = True
def forward(self, audio_wav):
aud_feature = self.audio_branch(audio_wav)
# ---- first training stage class guide -----
wav_cls = self.wav_cls_guide(aud_feature)
wav_cls = wav_cls.view(wav_cls.size(0), 5, -1)
cls_guide = wav_cls # torch.stack([wav_cls], dim=-1).mean(dim=-1).squeeze()
if not self.train_regressor:
return wav_cls
# --- second training stage guided regress ---
wav_cls_feature = aud_feature.view(aud_feature.size(0), 512, 4).permute(0, 2, 1)
wav_cls_score = torch.softmax(wav_cls, -1)
guided_wav_reg = torch.matmul(wav_cls_score, wav_cls_feature) # where i = {1,2,3,4,5}
out_reg = guided_wav_reg
out = self.out_map(out_reg)
out = out.view(out.size(0), -1)
return cls_guide, out
class CRNetVis(nn.Module):
def __init__(self, init_weights=True, return_feat=False):
super(CRNetVis, self).__init__()
self.train_guider_epo = 1
self.return_feature = return_feat
self.train_regressor = False
self.global_img_branch = AudioVisualResNet(
in_channels=3, init_stage=VisInitStage,
block=BiModalBasicBlock, conv=[vis_conv3x3, vis_conv1x1],
layers=[3, 4, 6, 3], # layer setting of resnet34
out_spatial=(2, 2)
)
self.local_img_branch = AudioVisualResNet(
in_channels=3, init_stage=VisInitStage,
block=BiModalBasicBlock, conv=[vis_conv3x3, vis_conv1x1],
layers=[3, 4, 6, 3],
out_spatial=(2, 2)
)
# self.audio_branch = AudioVisualResNet(
# in_channels=1, init_stage=AudInitStage,
# block=BiModalBasicBlock, conv=[aud_conv1x9, aud_conv1x1],
# layers=[3, 4, 6, 3],
# out_spatial=(1, 4)
# )
self.global_cls_guide = nn.Conv2d(512, 20, 2)
self.local_cls_guide = nn.Conv2d(512, 20, 2)
# self.wav_cls_guide = nn.Conv2d(512, 20, (1, 4))
self.out_map = nn.Linear(512, 1)
if init_weights:
initialize_weights(self)
def set_train_classifier_epo(self, epo):
self.train_guider_epo = epo
def set_train_regressor(self):
self.train_regressor = True
def forward(self, global_img, local_img):
glo_feature = self.global_img_branch(global_img) # (bs, 512, 2, 2)
loc_feature = self.local_img_branch(local_img) # (bs, 512, 2, 2)
# aud_feature = self.audio_branch(audio_wav) # (bs, 512, 1, 4)
# ---- first training stage class guide -----
glo_cls = self.global_cls_guide(glo_feature) # (bs, 5, 4)
loc_cls = self.local_cls_guide(loc_feature) # (bs, 5, 4)
# wav_cls = self.wav_cls_guide(aud_feature) # (bs, 5, 4)
glo_cls = glo_cls.view(glo_cls.size(0), 5, -1)
loc_cls = loc_cls.view(loc_cls.size(0), 5, -1)
# wav_cls = wav_cls.view(wav_cls.size(0), 5, -1)
cls_guide = torch.stack([glo_cls + loc_cls], dim=-1).mean(dim=-1).squeeze()
if not self.train_regressor:
return cls_guide
# --- second training stage guided regress ---
glo_cls_feature = glo_feature.view(glo_feature.size(0), 512, 4).permute(0, 2, 1)
loc_cls_feature = loc_feature.view(loc_feature.size(0), 512, 4).permute(0, 2, 1)
# wav_cls_feature = aud_feature.view(aud_feature.size(0), 512, 4).permute(0, 2, 1)
glo_cls_score = torch.softmax(glo_cls, -1)
loc_cls_score = torch.softmax(loc_cls, -1)
# wav_cls_score = torch.softmax(wav_cls, -1)
guided_glo_reg = torch.matmul(glo_cls_score, glo_cls_feature) # (_, 5, 4) matmul (_, 4, 512) = (_, 5, 512)
guided_loc_reg = torch.matmul(loc_cls_score, loc_cls_feature) # every dim in axis 1 is a weighted sum of P_i
# guided_wav_reg = torch.matmul(wav_cls_score, wav_cls_feature) # where i = {1,2,3,4,5}
out_reg = guided_glo_reg + guided_loc_reg # + guided_wav_reg
out = self.out_map(out_reg)
out = out.view(out.size(0), -1)
if self.return_feature:
return cls_guide, out, out_reg
return cls_guide, out
def get_crnet_model(only_train_guider=True):
cr_net = CRNet(only_train_guider)
cr_net.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return cr_net
@NETWORK_REGISTRY.register()
def crnet_model(cfg=None):
cr_net = CRNet2(return_feat=cfg.MODEL.RETURN_FEATURE)
return cr_net.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
@NETWORK_REGISTRY.register()
def get_crnet_aud_model(cfg):
cr_net_aud = CRNetAud()
return cr_net_aud.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
@NETWORK_REGISTRY.register()
def get_crnet_vis_model(cfg):
cr_net_vis = CRNetVis()
return cr_net_vis.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
if __name__ == "__main__":
import torch
global_img_input = torch.randn(2, 3, 224, 224)
local_img_input = torch.randn(2, 3, 112, 112)
wav_input = torch.randn(2, 1, 1, 244832)
# model = CRNet(only_train_guider=True)
# y = model(global_img_input, local_img_input, wav_input)
# model = CRNet2()
model = CRNetVis()
# y = model(global_img_input, local_img_input, wav_input)
y = model(global_img_input, local_img_input, wav_input)
print(y.shape)
model.set_train_regressor()
# cls, reg = model(global_img_input, local_img_input, wav_input)
cls, reg = model(global_img_input, local_img_input)
# cls, reg = model(wav_input)
print(cls.shape, reg.shape)
| 13,169 | 40.677215 | 117 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/TSN3D.py | from modeling.module.tpn.tpn import BaseRecognizer
from modeling import builder
import torch
class TSN3D(BaseRecognizer):
def __init__(self,
backbone,
necks=None,
spatial_temporal_module=None,
segmental_consensus=None,
fcn_testing=False,
flip=False,
cls_head=None,
train_cfg=None,
test_cfg=None):
super(TSN3D, self).__init__()
self.backbone = builder.build_backbone(backbone)
if necks is not None:
self.necks = builder.build_neck(necks)
else:
self.necks = None
if spatial_temporal_module is not None:
self.spatial_temporal_module = builder.build_spatial_temporal_module(
spatial_temporal_module)
else:
raise NotImplementedError
if segmental_consensus is not None:
self.segmental_consensus = builder.build_segmental_consensus(
segmental_consensus)
else:
raise NotImplementedError
if cls_head is not None:
self.cls_head = builder.build_head(cls_head)
else:
raise NotImplementedError
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.fcn_testing = fcn_testing
self.flip = flip
self.init_weights()
@property
def with_spatial_temporal_module(self):
return hasattr(self, 'spatial_temporal_module') and self.spatial_temporal_module is not None
@property
def with_segmental_consensus(self):
return hasattr(self, 'segmental_consensus') and self.segmental_consensus is not None
@property
def with_cls_head(self):
return hasattr(self, 'cls_head') and self.cls_head is not None
def init_weights(self):
super(TSN3D, self).init_weights()
self.backbone.init_weights()
if self.with_spatial_temporal_module:
self.spatial_temporal_module.init_weights()
if self.with_segmental_consensus:
self.segmental_consensus.init_weights()
if self.with_cls_head:
self.cls_head.init_weights()
if self.necks is not None:
self.necks.init_weights()
def extract_feat(self, img_group):
x = self.backbone(img_group)
return x
def forward_train(self,
num_modalities,
img_meta,
gt_label,
**kwargs):
assert num_modalities == 1
img_group = kwargs['img_group_0']
bs = img_group.shape[0]
img_group = img_group.reshape((-1,) + img_group.shape[2:])
num_seg = img_group.shape[0] // bs
x = self.extract_feat(img_group)
if self.necks is not None:
x, aux_losses = self.necks(x, gt_label.squeeze())
if self.with_spatial_temporal_module:
x = self.spatial_temporal_module(x)
if self.with_segmental_consensus:
x = x.reshape((-1, num_seg) + x.shape[1:])
x = self.segmental_consensus(x)
x = x.squeeze(1)
losses = dict()
if self.with_cls_head:
cls_score = self.cls_head(x)
gt_label = gt_label.squeeze()
loss_cls = self.cls_head.loss(cls_score, gt_label)
losses.update(loss_cls)
if self.necks is not None:
if aux_losses is not None:
losses.update(aux_losses)
return losses
def forward_test(self,
num_modalities,
img_meta,
**kwargs):
assert num_modalities == 1
img_group = kwargs['img_group_0']
bs = img_group.shape[0]
img_group = img_group.reshape((-1,) + img_group.shape[2:])
num_seg = img_group.shape[0] // bs
if self.flip:
img_group = self.extract_feat(torch.flip(img_group, [-1]))
x = self.extract_feat(img_group)
if self.necks is not None:
x, _ = self.necks(x)
if self.fcn_testing:
if self.with_cls_head:
x = self.cls_head(x)
prob1 = torch.nn.functional.softmax(x.mean([2, 3, 4]), 1).mean(0, keepdim=True).detach().cpu().numpy()
return prob1
if self.with_spatial_temporal_module:
x = self.spatial_temporal_module(x)
if self.with_segmental_consensus:
x = x.reshape((-1, num_seg) + x.shape[1:])
x = self.segmental_consensus(x)
x = x.squeeze(1)
if self.with_cls_head:
x = self.cls_head(x)
return x.cpu().numpy()
| 4,712 | 30.42 | 118 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/video_action_transformer.py | """
code modified from https://github.com/ppriyank/Video-Action-Transformer-Network-Pytorch-.git
"""
import torch
import torch.nn.functional as F
from torch import nn
import math
import torchvision
from torch.autograd import Variable
from dpcv.modeling.module.weight_init_helper import initialize_weights
from .build import NETWORK_REGISTRY
class FeedForward(nn.Module):
""" Standard 2 layer FFN of transformer
"""
def __init__(self, d_model, d_ff=2048, dropout=0.3):
super(FeedForward, self).__init__()
# We set d_ff as a default to 2048
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
nn.init.normal_(self.linear_1.weight, std=0.001)
nn.init.normal_(self.linear_2.weight, std=0.001)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class Norm(nn.Module):
""" Standard NORM layer of Transformer
"""
def __init__(self, d_model, eps=1e-6, trainable=False):
super(Norm, self).__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
if trainable:
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
else:
self.alpha = nn.Parameter(torch.ones(self.size), requires_grad=False)
self.bias = nn.Parameter(torch.zeros(self.size), requires_grad=False)
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
class PositionalEncoder(nn.Module):
""" Standard positional encoding (addition/ concat both are valid)
"""
def __init__(self, d_model, max_seq_len=80):
super(PositionalEncoder, self).__init__()
self.d_model = d_model
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i) / d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
# add constant to embedding
seq_len = x.size(1)
batch_size = x.size(0)
num_feature = x.size(2)
spatial_h = x.size(3)
spatial_w = x.size(4)
z = Variable(self.pe[:, :seq_len], requires_grad=False)
z = z.unsqueeze(-1).unsqueeze(-1)
z = z.expand(batch_size, seq_len, num_feature, spatial_h, spatial_w)
x = x + z
return x
def attention(q, k, v, d_k, mask=None, dropout=None):
# standard attention layer
scores = torch.sum(q * k, -1) / math.sqrt(d_k)
# scores : b, t
scores = F.softmax(scores, dim=-1)
scores = scores.unsqueeze(-1).expand(scores.size(0), scores.size(1), v.size(-1))
# scores : b, t, dim
output = scores * v
output = torch.sum(output, 1)
if dropout:
output = dropout(output)
return output
class TX(nn.Module):
def __init__(self, d_model=64, dropout=0.3):
super(TX, self).__init__()
self.d_model = d_model
# no of head has been modified to encompass : 1024 dimension
self.dropout = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.ff = FeedForward(d_model, d_ff=int(d_model / 2))
def forward(self, q, k, v, mask=None):
# q: (b , dim )
b = q.size(0)
t = k.size(1)
dim = q.size(1)
q_temp = q.unsqueeze(1)
q_temp = q_temp.expand(b, t, dim)
# q,k,v : (b, t , d_model=1024 // 16 )
A = attention(q_temp, k, v, self.d_model, mask, self.dropout)
# A : (b , d_model=1024 // 16 )
q_ = self.norm_1(A + q)
new_query = self.norm_2(q_ + self.dropout_2(self.ff(q_)))
return new_query
class BlockHead(nn.Module):
def __init__(self, d_model=64, dropout=0.3):
super(BlockHead, self).__init__()
self.T1 = TX()
self.T2 = TX()
self.T3 = TX()
def forward(self, q, k, v, mask=None):
q = self.T1(q, k, v)
q = self.T2(q, k, v)
q = self.T3(q, k, v)
return q
class Tail(nn.Module):
def __init__(self, num_classes, num_frames, head=16):
super(Tail, self).__init__()
self.spatial_h = 7
self.spatial_w = 4
self.head = head
self.num_features = 2048
self.num_frames = num_frames
self.d_model = int(self.num_features / 2)
self.d_k = self.d_model // self.head
self.bn1 = nn.BatchNorm2d(self.num_features)
self.bn2 = Norm(self.d_model, trainable=False)
self.pos_embedding = PositionalEncoder(self.num_features, self.num_frames)
self.Qpr = nn.Conv2d(self.num_features, self.d_model, kernel_size=(7, 4), stride=1, padding=0, bias=False)
self.head_layers = []
for i in range(self.head):
self.head_layers.append(BlockHead())
self.list_layers = nn.ModuleList(self.head_layers)
self.classifier = nn.Linear(self.d_model, num_classes)
# resnet style initialization
nn.init.kaiming_normal_(self.Qpr.weight, mode='fan_out')
nn.init.normal_(self.classifier.weight, std=0.001)
# nn.init.constant(self.classifier.bias, 0)
nn.init.constant_(self.bn1.weight, 1)
nn.init.constant_(self.bn1.bias, 0)
def forward(self, x, b, t):
x = self.bn1(x)
# stabilizes the learning
x = x.view(b, t, self.num_features, self.spatial_h, self.spatial_w)
x = self.pos_embedding(x)
x = x.view(-1, self.num_features, self.spatial_h, self.spatial_w)
x = F.relu(self.Qpr(x))
# x: (b,t,1024,1,1) since its a convolution: spatial positional encoding is not added
# paper has a different base (resnet in this case): which 2048 x 7 x 4 vs 16 x 7 x 7
x = x.view(-1, t, self.d_model)
x = self.bn2(x)
# stabilization
q = x[:, int(t / 2), :] # middle frame is the query
v = x # value
k = x # key
q = q.view(b, self.head, self.d_k)
k = k.view(b, t, self.head, self.d_k)
v = v.view(b, t, self.head, self.d_k)
k = k.transpose(1, 2)
v = v.transpose(1, 2)
# q: b, 16, 64
# k,v: b, 16, 10 ,64
outputs = []
for i in range(self.head):
outputs.append(self.list_layers[i](q[:, i], k[:, i], v[:, i]))
f = torch.cat(outputs, 1)
f = F.normalize(f, p=2, dim=1)
# F.norma
# if not self.training:
# return f
y = self.classifier(f)
return y
class SemiTransformer(nn.Module):
""" Base is resnet tail is the main transformer network
"""
def __init__(self, num_classes, seq_len, init_weights=True): # seq_len --> num_frames
super(SemiTransformer, self).__init__()
resnet50 = torchvision.models.resnet50(pretrained=True)
self.base = nn.Sequential(*list(resnet50.children())[:-2])
self.tail = Tail(num_classes, seq_len)
if init_weights:
initialize_weights(self)
def forward(self, x):
b = x.size(0)
t = x.size(1)
x = x.view(b * t, x.size(2), x.size(3), x.size(4))
x = self.base(x)
# x: (b,t,2048,7,4)
return self.tail(x, b, t)
def get_vat_model():
model = SemiTransformer(num_classes=5, seq_len=16)
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
@NETWORK_REGISTRY.register()
def vat_model(cfg=None):
model = SemiTransformer(num_classes=5, seq_len=32)
return model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
if __name__ == "__main__":
vat_model = SemiTransformer(num_classes=5, seq_len=16)
x_in = torch.randn(4, 16, 3, 224, 112)
y_out = vat_model(x_in)
print(y_out.shape)
| 8,339 | 32.629032 | 114 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/hr_net_cls.py | # ------------------------------------------------------------------------------
# code modified form https://github.com/HRNet/HRNet-Image-Classification
# ------------------------------------------------------------------------------
from easydict import EasyDict
import os
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from dpcv.modeling.module.weight_init_helper import initialize_weights
from .build import NETWORK_REGISTRY
hr_net_cfg = EasyDict(
{'MODEL': {'EXTRA': {'STAGE1': {'BLOCK': 'BOTTLENECK',
'FUSE_METHOD': 'SUM',
'NUM_BLOCKS': [4],
'NUM_CHANNELS': [64],
'NUM_MODULES': 1,
'NUM_RANCHES': 1},
'STAGE2': {'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM',
'NUM_BLOCKS': [4, 4],
'NUM_BRANCHES': 2,
'NUM_CHANNELS': [18, 36],
'NUM_MODULES': 1},
'STAGE3': {'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM',
'NUM_BLOCKS': [4, 4, 4],
'NUM_BRANCHES': 3,
'NUM_CHANNELS': [18, 36, 72],
'NUM_MODULES': 4},
'STAGE4': {'BLOCK': 'BASIC',
'FUSE_METHOD': 'SUM',
'NUM_BLOCKS': [4, 4, 4, 4],
'NUM_BRANCHES': 4,
'NUM_CHANNELS': [18, 36, 72, 144],
'NUM_MODULES': 3}},
'HEATMAP_SIZE': [64, 64],
'IMAGE_SIZE': [224, 224],
'INIT_WEIGHTS': True,
'NAME': 'cls_hrnet',
'NUM_CLASSES': 5,
'NUM_JOINTS': 17,
'PRETRAINED': '',
'SIGMA': 2,
'TAG_PER_JOINT': True,
'TARGET_TYPE': 'gaussian'}
}
)
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels
)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels
)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(False)
def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
downsample = None
if stride != 1 or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False
),
nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(
block(
self.num_inchannels[branch_index], num_channels[branch_index],
stride,
downsample
)
)
self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False),
nn.BatchNorm2d(num_inchannels[i], momentum=BN_MOMENTUM),
nn.Upsample(scale_factor=2 ** (j - i), mode='nearest')
)
)
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM)
)
)
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(num_inchannels[j], num_outchannels_conv3x3, 3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3, momentum=BN_MOMENTUM),
nn.ReLU(False)
)
)
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(self, cfg, init_weights=True, normalize_output=True, return_feature=False, **kwargs):
super(HighResolutionNet, self).__init__()
self.normalize_output = normalize_output
self.return_feature = return_feature
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stage1_cfg = cfg['MODEL']['EXTRA']['STAGE1']
num_channels = self.stage1_cfg['NUM_CHANNELS'][0]
block = blocks_dict[self.stage1_cfg['BLOCK']]
num_blocks = self.stage1_cfg['NUM_BLOCKS'][0]
self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
stage1_out_channel = block.expansion * num_channels
self.stage2_cfg = cfg['MODEL']['EXTRA']['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))
]
self.transition1 = self._make_transition_layer(
[stage1_out_channel], num_channels
)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels
)
self.stage3_cfg = cfg['MODEL']['EXTRA']['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))
]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels
)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels
)
self.stage4_cfg = cfg['MODEL']['EXTRA']['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))
]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels
)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True
)
# Classification Head
self.incre_modules, self.downsamp_modules, \
self.final_layer = self._make_head(pre_stage_channels)
self.classifier = nn.Linear(2048, cfg["MODEL"]["NUM_CLASSES"])
if init_weights:
initialize_weights(self)
def _make_head(self, pre_stage_channels):
head_block = Bottleneck
head_channels = [32, 64, 128, 256]
# Increasing the #channels on each resolution
# from C, 2C, 4C, 8C to 128, 256, 512, 1024
incre_modules = []
for i, channels in enumerate(pre_stage_channels):
incre_module = self._make_layer(
head_block,
channels,
head_channels[i],
1,
stride=1
)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
# downsampling modules
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = head_channels[i] * head_block.expansion
out_channels = head_channels[i + 1] * head_block.expansion
downsamp_module = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(
nn.Conv2d(
in_channels=head_channels[3] * head_block.expansion,
out_channels=2048, kernel_size=1, stride=1, padding=0
),
nn.BatchNorm2d(2048, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
return incre_modules, downsamp_modules, final_layer
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False),
nn.BatchNorm2d(num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True),
)
)
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels
conv3x3s.append(
nn.Sequential(
nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False),
nn.BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
)
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(
num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output
)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
# Classification Head
y = self.incre_modules[0](y_list[0])
for i in range(len(self.downsamp_modules)):
y = self.incre_modules[i + 1](y_list[i + 1]) + self.downsamp_modules[i](y)
y = self.final_layer(y)
if torch._C._get_tracing_state():
y = y.flatten(start_dim=2).mean(dim=2)
else:
y = F.avg_pool2d(y, kernel_size=y.size()[2:]).view(y.size(0), -1)
feat = y
y = self.classifier(y)
if self.normalize_output:
y = torch.sigmoid(y)
if self.return_feature:
return y, feat
return y
def init_weights(self, pretrained='', ):
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict.keys()}
for k, _ in pretrained_dict.items():
logger.info('=> loading {} pretrained model {}'.format(k, pretrained))
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
def get_hr_net_model(cfg=None, **kwargs):
config = hr_net_cfg
model = HighResolutionNet(config, **kwargs)
model.init_weights()
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
@NETWORK_REGISTRY.register()
def hr_net_model(cfg=None, **kwargs):
config = hr_net_cfg
model = HighResolutionNet(config, return_feature=cfg.MODEL.RETURN_FEATURE, **kwargs)
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
@NETWORK_REGISTRY.register()
def hr_net_true_personality(cfg=None, **kwargs):
config = hr_net_cfg
model = HighResolutionNet(config, normalize_output=False, **kwargs)
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
if __name__ == "__main__":
xin = torch.randn((1, 3, 224, 224)).cuda()
hr_net = get_hr_net_model()
y = hr_net(xin)
print(y.shape)
| 21,618 | 37.196113 | 114 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/networks/single_modal_video_analysis_conv3d.py | import torch
import torch.nn as nn
class SequenceBasedModel(nn.Module):
def __init__(self, ):
super().__init__()
self.block_1 = nn.Sequential(
nn.Conv3d(in_channels=3, out_channels=64, kernel_size=(3, 5, 5)),
nn.BatchNorm3d(100),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
)
self.block_2 = nn.Sequential(
nn.Conv3d(in_channels=64, out_channels=128, kernel_size=(2, 5, 5)),
nn.BatchNorm3d(100),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
)
self.block_3 = nn.Sequential(
nn.Conv3d(in_channels=128, out_channels=256, kernel_size=(1, 5, 5)),
nn.BatchNorm3d(100),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
)
self.block_4 = nn.Sequential(
nn.Conv3d(in_channels=256, out_channels=512, kernel_size=(3, 5, 5)),
nn.BatchNorm3d(100),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
)
self.block_fc = nn.Sequential(
nn.Linear(1024, 512),
nn.BatchNorm1d(10),
nn.ReLU(),
nn.Dropout()
)
self.fc_out = nn.Linear(512, 5)
def forward(self, x):
x = self.block_1(x)
x = self.block_2(x)
x = self.block_3(x)
x = self.block_4(x)
x = self.fc_out(x)
return x
| 1,519 | 29.4 | 80 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/operators.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Custom operators."""
import torch
import torch.nn as nn
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)."""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return SwishEfficient.apply(x)
class SwishEfficient(torch.autograd.Function):
"""Swish activation function: x * sigmoid(x)."""
@staticmethod
def forward(ctx, x):
result = x * torch.sigmoid(x)
ctx.save_for_backward(x)
return result
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid_x = torch.sigmoid(x)
return grad_output * (sigmoid_x * (1 + x * (1 - sigmoid_x)))
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish: AvgPool, FC, Swish, FC, Sigmoid."""
def _round_width(self, width, multiplier, min_width=8, divisor=8):
"""
Round width of filters based on width multiplier
Args:
width (int): the channel dimensions of the input.
multiplier (float): the multiplication factor.
min_width (int): the minimum width after multiplication.
divisor (int): the new width should be dividable by divisor.
"""
if not multiplier:
return width
width *= multiplier
min_width = min_width or divisor
width_out = max(
min_width, int(width + divisor / 2) // divisor * divisor
)
if width_out < 0.9 * width:
width_out += divisor
return int(width_out)
def __init__(self, dim_in, ratio, relu_act=True):
"""
Args:
dim_in (int): the channel dimensions of the input.
ratio (float): the channel reduction ratio for squeeze.
relu_act (bool): whether to use ReLU activation instead
of Swish (default).
divisor (int): the new width should be dividable by divisor.
"""
super(SE, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
dim_fc = self._round_width(dim_in, ratio)
self.fc1 = nn.Conv3d(dim_in, dim_fc, 1, bias=True)
self.fc1_act = nn.ReLU() if relu_act else Swish()
self.fc2 = nn.Conv3d(dim_fc, dim_in, 1, bias=True)
self.fc2_sig = nn.Sigmoid()
def forward(self, x):
x_in = x
for module in self.children():
x = module(x)
return x_in * x
| 2,552 | 29.759036 | 86 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/resnet_tv.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from dpcv.modeling.module.weight_init_helper import initialize_weights
import torch
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""
Note: that class is not a formal resnet but with a sigmoid function for the last fc layer
"""
def __init__(
self, block, layers, num_classes=1000,
init_weights=True, zero_init_residual=False, sigmoid_output=True,
return_feat=False,
):
super(ResNet, self).__init__()
self.return_feature = return_feat
self.sigmoid_output = sigmoid_output
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
# if init_weights:
# initialize_weights(self)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.Linear):
# nn.init.normal_(m.weight, 0, 0.01)
# nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
if self.sigmoid_output:
x = torch.sigmoid(x)
if self.return_feature:
return x, feat
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
if __name__ == "__main__":
import torch
model = resnet101(pretrained=True)
# change network layer
for name, module in model.named_modules():
print("layer name:{}, layer instance:{}".format(name, module))
in_feat_num = model.fc.in_features
model.fc = nn.Linear(in_feat_num, 102)
# forward
fake_img = torch.randn((1, 3, 224, 224)) # batchsize * channel * height * width
output = model(fake_img)
print(output.shape)
| 8,374 | 30.844106 | 106 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/vgg_tv.py | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))
return model
def vgg11_bn(pretrained=False, **kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
return model
def vgg13(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
return model
def vgg13_bn(pretrained=False, **kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))
return model
def vgg16(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
return model
def vgg16_bn(pretrained=False, **kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
return model
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
return model
def vgg19_bn(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))
return model
if __name__ == "__main__":
model = vgg16_bn()
print(model)
# for name, module in model.named_modules():
# print("layer name:{}, layer instance:{}".format(name, module))
in_feat_num = model.classifier[6].in_features
model.classifier[6] = nn.Linear(in_feat_num, 102)
# forward
fake_img = torch.randn((1, 3, 224, 224)) # batchsize * channel * height * width
output = model(fake_img)
print(output.shape) | 6,791 | 31.037736 | 113 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/bi_modal_resnet_module.py | # import torch
import torch.nn as nn
# import torch.utils.model_zoo as model_zoo
# consider add pre-train weight
# model_urls = {'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth'}
def vis_conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def vis_conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def aud_conv1x9(in_planes, out_planes, stride=1):
"""1x9 convolution with padding"""
if stride == 1:
return nn.Conv2d(in_planes, out_planes, kernel_size=(1, 9), stride=1, padding=(0, 4), bias=False)
elif stride == 2:
return nn.Conv2d(in_planes, out_planes, kernel_size=(1, 9), stride=(1, 2*stride), padding=(0, 4), bias=False)
else:
raise ValueError("wrong stride value")
def aud_conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
if stride == 1:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False)
elif stride == 2:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=(1, 2 * stride), bias=False)
else:
raise ValueError("wrong stride value")
class VisInitStage(nn.Module):
def __init__(self, in_channels=3, out_channels=64):
super(VisInitStage, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, inputs):
x = self.conv1(inputs)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
return x
class AudInitStage(nn.Module):
def __init__(self, in_channels=1, out_channels=64):
super(AudInitStage, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=(1, 49), stride=(1, 4), padding=(0, 24), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=(1, 9), stride=(1, 4), padding=(0, 4))
def forward(self, inputs):
x = self.conv1(inputs)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
return x
class BiModalBasicBlock(nn.Module):
"""
build visual and audio conv block for resnet18 architecture
"""
expansion = 1
def __init__(self, conv_type, inplanes, planes, stride=1, downsample=None):
super(BiModalBasicBlock, self).__init__()
self.conv1 = conv_type(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv_type(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AudioVisualResNet(nn.Module):
def __init__(self, in_channels, init_stage, block, conv,
channels=[64, 128, 256, 512], # default resnet stage channel settings
layers=[2, 2, 2, 2], # default resnet18 layers setting
out_spatial=(1, 1),
zero_init_residual=False):
super(AudioVisualResNet, self).__init__()
assert init_stage.__name__ in ["AudInitStage", "VisInitStage"], \
"init conv stage should be 'AudInitStage' or 'VisInitStage'"
assert len(conv) == 2, "conv should be a list containing <conv3x3 conv1x1> or <conv1x9, conv1x1> function"
self.inplanes = channels[0]
self.conv_3x3 = conv[0]
self.conv_1x1 = conv[1]
self.init_stage = init_stage(in_channels, channels[0])
self.layer1 = self._make_layer(block, channels[0], layers[0])
self.layer2 = self._make_layer(block, channels[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(out_spatial)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, block):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
self.conv_1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.conv_3x3, self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.conv_3x3, self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.init_stage(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
# x_out = x_avg.view(x_avg.size(0), -1)
return x
| 5,952 | 35.975155 | 117 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/nonlocal_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Non-local helper"""
import torch
import torch.nn as nn
class Nonlocal(nn.Module):
"""
Builds Non-local Neural Networks as a generic family of building
blocks for capturing long-range dependencies. Non-local Network
computes the response at a position as a weighted sum of the
features at all positions. This building block can be plugged into
many computer vision architectures.
More details in the paper: https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(
self,
dim,
dim_inner,
pool_size=None,
instantiation="softmax",
zero_init_final_conv=False,
zero_init_final_norm=True,
norm_eps=1e-5,
norm_momentum=0.1,
norm_module=nn.BatchNorm3d,
):
"""
Args:
dim (int): number of dimension for the input.
dim_inner (int): number of dimension inside of the Non-local block.
pool_size (list): the kernel size of spatial temporal pooling,
temporal pool kernel size, spatial pool kernel size, spatial
pool kernel size in order. By default pool_size is None,
then there would be no pooling used.
instantiation (string): supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
zero_init_final_conv (bool): If true, zero initializing the final
convolution of the Non-local block.
zero_init_final_norm (bool):
If true, zero initializing the final batch norm of the Non-local
block.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(Nonlocal, self).__init__()
self.dim = dim
self.dim_inner = dim_inner
self.pool_size = pool_size
self.instantiation = instantiation
self.use_pool = (
False
if pool_size is None
else any((size > 1 for size in pool_size))
)
self.norm_eps = norm_eps
self.norm_momentum = norm_momentum
self._construct_nonlocal(
zero_init_final_conv, zero_init_final_norm, norm_module
)
def _construct_nonlocal(
self, zero_init_final_conv, zero_init_final_norm, norm_module
):
# Three convolution heads: theta, phi, and g.
self.conv_theta = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
self.conv_phi = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
self.conv_g = nn.Conv3d(
self.dim, self.dim_inner, kernel_size=1, stride=1, padding=0
)
# Final convolution output.
self.conv_out = nn.Conv3d(
self.dim_inner, self.dim, kernel_size=1, stride=1, padding=0
)
# Zero initializing the final convolution output.
self.conv_out.zero_init = zero_init_final_conv
# TODO: change the name to `norm`
self.bn = norm_module(
num_features=self.dim,
eps=self.norm_eps,
momentum=self.norm_momentum,
)
# Zero initializing the final bn.
self.bn.transform_final_bn = zero_init_final_norm
# Optional to add the spatial-temporal pooling.
if self.use_pool:
self.pool = nn.MaxPool3d(
kernel_size=self.pool_size,
stride=self.pool_size,
padding=[0, 0, 0],
)
def forward(self, x):
x_identity = x
N, C, T, H, W = x.size()
theta = self.conv_theta(x)
# Perform temporal-spatial pooling to reduce the computation.
if self.use_pool:
x = self.pool(x)
phi = self.conv_phi(x)
g = self.conv_g(x)
theta = theta.view(N, self.dim_inner, -1)
phi = phi.view(N, self.dim_inner, -1)
g = g.view(N, self.dim_inner, -1)
# (N, C, TxHxW) * (N, C, TxHxW) => (N, TxHxW, TxHxW).
theta_phi = torch.einsum("nct,ncp->ntp", (theta, phi))
# For original Non-local paper, there are two main ways to normalize
# the affinity tensor:
# 1) Softmax normalization (norm on exp).
# 2) dot_product normalization.
if self.instantiation == "softmax":
# Normalizing the affinity tensor theta_phi before softmax.
theta_phi = theta_phi * (self.dim_inner ** -0.5)
theta_phi = nn.functional.softmax(theta_phi, dim=2)
elif self.instantiation == "dot_product":
spatial_temporal_dim = theta_phi.shape[2]
theta_phi = theta_phi / spatial_temporal_dim
else:
raise NotImplementedError(
"Unknown norm type {}".format(self.instantiation)
)
# (N, TxHxW, TxHxW) * (N, C, TxHxW) => (N, C, TxHxW).
theta_phi_g = torch.einsum("ntg,ncg->nct", (theta_phi, g))
# (N, C, TxHxW) => (N, C, T, H, W).
theta_phi_g = theta_phi_g.view(N, self.dim_inner, T, H, W)
p = self.conv_out(theta_phi_g)
p = self.bn(p)
return x_identity + p
| 5,418 | 35.369128 | 80 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/stem_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""ResNe(X)t 3D stem helper."""
import torch.nn as nn
def get_stem_func(name):
"""
Retrieves the stem module by name.
"""
trans_funcs = {"x3d_stem": X3DStem, "basic_stem": ResNetBasicStem}
assert (
name in trans_funcs.keys()
), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class VideoModelStem(nn.Module):
"""
Video 3D stem module. Provides stem operations of Conv, BN, ReLU, MaxPool
on input data tensor for one or multiple pathways.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
stem_func_name="basic_stem",
):
"""
The `__init__` method of any subclass should also contain these
arguments. List size of 1 for single pathway models (C2D, I3D, Slow
and etc), list size of 2 for two pathway models (SlowFast).
Args:
dim_in (list): the list of channel dimensions of the inputs.
dim_out (list): the output dimension of the convolution in the stem
layer.
kernel (list): the kernels' size of the convolutions in the stem
layers. Temporal kernel size, height kernel size, width kernel
size in order.
stride (list): the stride sizes of the convolutions in the stem
layer. Temporal kernel stride, height kernel size, width kernel
size in order.
padding (list): the paddings' sizes of the convolutions in the stem
layer. Temporal padding size, height padding size, width padding
size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
stem_func_name (string): name of the the stem function applied on
input to the network.
"""
super(VideoModelStem, self).__init__()
assert (
len(
{
len(dim_in),
len(dim_out),
len(kernel),
len(stride),
len(padding),
}
)
== 1
), "Input pathway dimensions are not consistent. {} {} {} {} {}".format(
len(dim_in),
len(dim_out),
len(kernel),
len(stride),
len(padding),
)
self.num_pathways = len(dim_in)
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, norm_module, stem_func_name)
def _construct_stem(self, dim_in, dim_out, norm_module, stem_func_name):
trans_func = get_stem_func(stem_func_name)
for pathway in range(len(dim_in)):
stem = trans_func(
dim_in[pathway],
dim_out[pathway],
self.kernel[pathway],
self.stride[pathway],
self.padding[pathway],
self.inplace_relu,
self.eps,
self.bn_mmt,
norm_module,
)
self.add_module("pathway{}_stem".format(pathway), stem)
def forward(self, x):
assert (
len(x) == self.num_pathways
), "Input tensor does not contain {} pathway".format(self.num_pathways)
# use a new list, don't modify in-place the x list, which is bad for activation checkpointing.
y = []
for pathway in range(len(x)):
m = getattr(self, "pathway{}_stem".format(pathway))
y.append(m(x[pathway]))
return y
class ResNetBasicStem(nn.Module):
"""
ResNe(X)t 3D stem module.
Performs spatiotemporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(ResNetBasicStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv = nn.Conv3d(
dim_in,
dim_out,
self.kernel,
stride=self.stride,
padding=self.padding,
bias=False,
)
self.bn = norm_module(
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu = nn.ReLU(self.inplace_relu)
self.pool_layer = nn.MaxPool3d(
kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]
)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
x = self.pool_layer(x)
return x
class X3DStem(nn.Module):
"""
X3D's 3D stem module.
Performs a spatial followed by a depthwise temporal Convolution, BN, and Relu following by a
spatiotemporal pooling.
"""
def __init__(
self,
dim_in,
dim_out,
kernel,
stride,
padding,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
dim_in (int): the channel dimension of the input. Normally 3 is used
for rgb input, and 2 or 3 is used for optical flow input.
dim_out (int): the output dimension of the convolution in the stem
layer.
kernel (list): the kernel size of the convolution in the stem layer.
temporal kernel size, height kernel size, width kernel size in
order.
stride (list): the stride size of the convolution in the stem layer.
temporal kernel stride, height kernel size, width kernel size in
order.
padding (int): the padding size of the convolution in the stem
layer, temporal padding size, height padding size, width
padding size in order.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(X3DStem, self).__init__()
self.kernel = kernel
self.stride = stride
self.padding = padding
self.inplace_relu = inplace_relu
self.eps = eps
self.bn_mmt = bn_mmt
# Construct the stem layer.
self._construct_stem(dim_in, dim_out, norm_module)
def _construct_stem(self, dim_in, dim_out, norm_module):
self.conv_xy = nn.Conv3d(
dim_in,
dim_out,
kernel_size=(1, self.kernel[1], self.kernel[2]),
stride=(1, self.stride[1], self.stride[2]),
padding=(0, self.padding[1], self.padding[2]),
bias=False,
)
self.conv = nn.Conv3d(
dim_out,
dim_out,
kernel_size=(self.kernel[0], 1, 1),
stride=(self.stride[0], 1, 1),
padding=(self.padding[0], 0, 0),
bias=False,
groups=dim_out,
)
self.bn = norm_module(
num_features=dim_out, eps=self.eps, momentum=self.bn_mmt
)
self.relu = nn.ReLU(self.inplace_relu)
def forward(self, x):
x = self.conv_xy(x)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class PatchEmbed(nn.Module):
"""
PatchEmbed.
"""
def __init__(
self,
dim_in=3,
dim_out=768,
kernel=(1, 16, 16),
stride=(1, 4, 4),
padding=(1, 7, 7),
conv_2d=False,
):
super().__init__()
if conv_2d:
conv = nn.Conv2d
else:
conv = nn.Conv3d
self.proj = conv(
dim_in,
dim_out,
kernel_size=kernel,
stride=stride,
padding=padding,
)
def forward(self, x):
x = self.proj(x)
# B C (T) H W -> B (T)HW C
return x.flatten(2).transpose(1, 2)
| 10,775 | 32.362229 | 102 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/weight_init_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Utility function for weight initialization"""
import torch.nn as nn
def c2_xavier_fill(module: nn.Module) -> None:
"""
Initialize `module.weight` using the "XavierFill" implemented in Caffe2.
Also initializes `module.bias` to 0.
Args:
module (torch.nn.Module): module to initialize.
"""
# Caffe2 implementation of XavierFill in fact
# corresponds to kaiming_uniform_ in PyTorch
nn.init.kaiming_uniform_(module.weight, a=1)
if module.bias is not None:
# pyre-fixme[6]: Expected `Tensor` for 1st param but got `Union[nn.Module,
# torch.Tensor]`.
nn.init.constant_(module.bias, 0)
def c2_msra_fill(module: nn.Module) -> None:
"""
Initialize `module.weight` using the "MSRAFill" implemented in Caffe2.
Also initializes `module.bias` to 0.
Args:
module (torch.nn.Module): module to initialize.
"""
nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
# pyre-fixme[6]: Expected `Tensor` for 1st param but got `Union[nn.Module,
# torch.Tensor]`.
nn.init.constant_(module.bias, 0)
def init_weights(model, fc_init_std=0.01, zero_init_final_bn=True):
"""
Performs ResNet style weight initialization.
Args:
fc_init_std (float): the expected standard deviation for fc layer.
zero_init_final_bn (bool): if True, zero initialize the final bn for
every bottleneck.
"""
for m in model.modules():
if isinstance(m, nn.Conv3d):
"""
Follow the initialization method proposed in:
{He, Kaiming, et al.
"Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification."
arXiv preprint arXiv:1502.01852 (2015)}
"""
c2_msra_fill(m)
elif isinstance(m, nn.BatchNorm3d):
if (
hasattr(m, "transform_final_bn")
and m.transform_final_bn
and zero_init_final_bn
):
batchnorm_weight = 0.0
else:
batchnorm_weight = 1.0
if m.weight is not None:
m.weight.data.fill_(batchnorm_weight)
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=fc_init_std)
if m.bias is not None:
m.bias.data.zero_()
def initialize_weights(model):
for m in model.modules():
# normal conv layers
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
# normal BN layers
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# normal FC layers
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
| 3,741 | 34.980769 | 82 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/se_resnet.py | import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from dpcv.modeling.module.resnet_tv import ResNet
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1) # squeeze
self.fc = nn.Sequential( # excitation
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class SEBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,
*, reduction=16):
super(SEBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm2d(planes)
self.se = SELayer(planes, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,
*, reduction=16):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se = SELayer(planes * 4, reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def se_resnet18(num_classes=1000):
"""Constructs a ResNet-18 model.
Args:
num_classes (int): number of classification
"""
model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
def se_resnet34(num_classes=1000):
"""Constructs a ResNet-34 model.
Args:
num_classes (int): number of classification
"""
model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
def se_resnet50(num_classes=1000, pretrained=False):
"""Constructs a ResNet-50 model.
Args:
num_classes (int): number of classification
pretrained (bool): If True, returns a model pre-trained on ImageNet
Note:
the resnet use sigmoid function for the out fc layer's output since the
personality label in range (0, 1)
"""
model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
if pretrained:
# model.load_state_dict(load_state_dict_from_url(
# "https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl"))
pretrained_dict = load_state_dict_from_url(
"https://github.com/moskomule/senet.pytorch/releases/download/archive/seresnet50-60a8950a85b2b.pkl")
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
model.to(device=torch.device("cuda" if torch.cuda.is_available() else "cpu"))
return model
def se_resnet101(num_classes=1_000):
"""Constructs a ResNet-101 model.
Args:
num_classes (int): number of classification
"""
model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
def se_resnet152(num_classes=1000):
"""Constructs a ResNet-152 model.
Args:
num_classes (int): number of classification
"""
model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model
if __name__ == "__main__":
import torch
se_model = se_resnet50(5)
# for name, module in se_model.named_modules():
# print("layer name:{}, layer instance:{}".format(name, module))
# modify network change classification layer
# in_feat_num = se_model.fc.in_features
# se_model.fc = nn.Linear(in_feat_num, 5)
# forward
fake_img = torch.randn((1, 3, 224, 224)).cuda() # batch_size * channel * height * width
output = se_model(fake_img)
print(output.shape)
| 5,970 | 30.098958 | 115 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/resnet_helper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video models."""
import torch
import torch.nn as nn
from dpcv.tools.common import drop_path
from dpcv.modeling.module.nonlocal_helper import Nonlocal
from dpcv.modeling.module.operators import SE, Swish
def get_trans_func(name):
"""
Retrieves the transformation module by name.
"""
trans_funcs = {
"bottleneck_transform": BottleneckTransform,
"basic_transform": BasicTransform,
"x3d_transform": X3DTransform,
}
assert (
name in trans_funcs.keys()
), "Transformation function '{}' not supported".format(name)
return trans_funcs[name]
class BasicTransform(nn.Module):
"""
Basic transformation: Tx3x3, 1x3x3, where T is the size of temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner=None,
num_groups=1,
stride_1x1=None,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
norm_module=nn.BatchNorm3d,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the basic block.
stride (int): the stride of the bottleneck.
dim_inner (None): the inner dimension would not be used in
BasicTransform.
num_groups (int): number of groups for the convolution. Number of
group is always 1 for BasicTransform.
stride_1x1 (None): stride_1x1 will not be used in BasicTransform.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BasicTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._construct(dim_in, dim_out, stride, norm_module)
def _construct(self, dim_in, dim_out, stride, norm_module):
# Tx3x3, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_out,
kernel_size=[self.temp_kernel_size, 3, 3],
stride=[1, stride, stride],
padding=[int(self.temp_kernel_size // 2), 1, 1],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x3x3, BN.
self.b = nn.Conv3d(
dim_out,
dim_out,
kernel_size=[1, 3, 3],
stride=[1, 1, 1],
padding=[0, 1, 1],
bias=False,
)
self.b_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.b_bn.transform_final_bn = True
def forward(self, x):
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
x = self.b(x)
x = self.b_bn(x)
return x
class X3DTransform(nn.Module):
"""
X3D transformation: 1x1x1, Tx3x3 (channelwise, num_groups=dim_in), 1x1x1,
augmented with (optional) SE (squeeze-excitation) on the 3x3x3 output.
T is the temporal kernel size (defaulting to 3)
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
se_ratio=0.0625,
swish_inner=True,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
se_ratio (float): if > 0, apply SE to the Tx3x3 conv, with the SE
channel dimensionality being se_ratio times the Tx3x3 conv dim.
swish_inner (bool): if True, apply swish to the Tx3x3 conv, otherwise
apply ReLU to the Tx3x3 conv.
"""
super(X3DTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._se_ratio = se_ratio
self._swish_inner = swish_inner
self._stride_1x1 = stride_1x1
self._block_idx = block_idx
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
):
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
# 1x1x1, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[1, 1, 1],
stride=[1, str1x1, str1x1],
padding=[0, 0, 0],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# Tx3x3, BN, ReLU.
self.b = nn.Conv3d(
dim_inner,
dim_inner,
[self.temp_kernel_size, 3, 3],
stride=[1, str3x3, str3x3],
padding=[int(self.temp_kernel_size // 2), dilation, dilation],
groups=num_groups,
bias=False,
dilation=[1, dilation, dilation],
)
self.b_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
# Apply SE attention or not
use_se = True if (self._block_idx + 1) % 2 else False
if self._se_ratio > 0.0 and use_se:
self.se = SE(dim_inner, self._se_ratio)
if self._swish_inner:
self.b_relu = Swish()
else:
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x1x1, BN.
self.c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.c_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.c_bn.transform_final_bn = True
def forward(self, x):
for block in self.children():
x = block(x)
return x
class BottleneckTransform(nn.Module):
"""
Bottleneck transformation: Tx1x1, 1x3x3, 1x1x1, where T is the size of
temporal kernel.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
block_idx=0,
):
"""
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the first
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(BottleneckTransform, self).__init__()
self.temp_kernel_size = temp_kernel_size
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._stride_1x1 = stride_1x1
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
dilation,
norm_module,
):
(str1x1, str3x3) = (stride, 1) if self._stride_1x1 else (1, stride)
# Tx1x1, BN, ReLU.
self.a = nn.Conv3d(
dim_in,
dim_inner,
kernel_size=[self.temp_kernel_size, 1, 1],
stride=[1, str1x1, str1x1],
padding=[int(self.temp_kernel_size // 2), 0, 0],
bias=False,
)
self.a_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.a_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x3x3, BN, ReLU.
self.b = nn.Conv3d(
dim_inner,
dim_inner,
[1, 3, 3],
stride=[1, str3x3, str3x3],
padding=[0, dilation, dilation],
groups=num_groups,
bias=False,
dilation=[1, dilation, dilation],
)
self.b_bn = norm_module(
num_features=dim_inner, eps=self._eps, momentum=self._bn_mmt
)
self.b_relu = nn.ReLU(inplace=self._inplace_relu)
# 1x1x1, BN.
self.c = nn.Conv3d(
dim_inner,
dim_out,
kernel_size=[1, 1, 1],
stride=[1, 1, 1],
padding=[0, 0, 0],
bias=False,
)
self.c_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.c_bn.transform_final_bn = True
def forward(self, x):
# Explicitly forward every layer.
# Branch2a.
x = self.a(x)
x = self.a_bn(x)
x = self.a_relu(x)
# Branch2b.
x = self.b(x)
x = self.b_bn(x)
x = self.b_relu(x)
# Branch2c
x = self.c(x)
x = self.c_bn(x)
return x
class ResBlock(nn.Module):
"""
Residual block.
"""
def __init__(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups=1,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
block_idx=0,
drop_connect_rate=0.0,
):
"""
ResBlock class constructs redisual blocks. More details can be found in:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
"Deep residual learning for image recognition."
https://arxiv.org/abs/1512.03385
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temp_kernel_size (int): the temporal kernel sizes of the middle
convolution in the bottleneck.
stride (int): the stride of the bottleneck.
trans_func (string): transform function to be used to construct the
bottleneck.
dim_inner (int): the inner dimension of the block.
num_groups (int): number of groups for the convolution. num_groups=1
is for standard ResNet like networks, and num_groups>1 is for
ResNeXt like networks.
stride_1x1 (bool): if True, apply stride to 1x1 conv, otherwise
apply stride to the 3x3 conv.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dilation (int): size of dilation.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
drop_connect_rate (float): basic rate at which blocks are dropped,
linearly increases from input to output blocks.
"""
super(ResBlock, self).__init__()
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt = bn_mmt
self._drop_connect_rate = drop_connect_rate
self._construct(
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups,
stride_1x1,
inplace_relu,
dilation,
norm_module,
block_idx,
)
def _construct(
self,
dim_in,
dim_out,
temp_kernel_size,
stride,
trans_func,
dim_inner,
num_groups,
stride_1x1,
inplace_relu,
dilation,
norm_module,
block_idx,
):
# Use skip connection with projection if dim or res change.
if (dim_in != dim_out) or (stride != 1):
self.branch1 = nn.Conv3d(
dim_in,
dim_out,
kernel_size=1,
stride=[1, stride, stride],
padding=0,
bias=False,
dilation=1,
)
self.branch1_bn = norm_module(
num_features=dim_out, eps=self._eps, momentum=self._bn_mmt
)
self.branch2 = trans_func(
dim_in,
dim_out,
temp_kernel_size,
stride,
dim_inner,
num_groups,
stride_1x1=stride_1x1,
inplace_relu=inplace_relu,
dilation=dilation,
norm_module=norm_module,
block_idx=block_idx,
)
self.relu = nn.ReLU(self._inplace_relu)
def forward(self, x):
f_x = self.branch2(x)
if self.training and self._drop_connect_rate > 0.0:
f_x = drop_path(f_x, self._drop_connect_rate)
if hasattr(self, "branch1"):
x = self.branch1_bn(self.branch1(x)) + f_x
else:
x = x + f_x
x = self.relu(x)
return x
class ResStage(nn.Module):
"""
Stage of 3D ResNet. It expects to have one or more tensors as input for
single pathway (C2D, I3D, Slow), and multi-pathway (SlowFast) cases.
More details can be found here:
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(
self,
dim_in,
dim_out,
stride,
temp_kernel_sizes,
num_blocks,
dim_inner,
num_groups,
num_block_temp_kernel,
nonlocal_inds,
nonlocal_group,
nonlocal_pool,
dilation,
instantiation="softmax",
trans_func_name="bottleneck_transform",
stride_1x1=False,
inplace_relu=True,
norm_module=nn.BatchNorm3d,
drop_connect_rate=0.0,
):
"""
The `__init__` method of any subclass should also contain these arguments.
ResStage builds p streams, where p can be greater or equal to one.
Args:
dim_in (list): list of p the channel dimensions of the input.
Different channel dimensions control the input dimension of
different pathways.
dim_out (list): list of p the channel dimensions of the output.
Different channel dimensions control the input dimension of
different pathways.
temp_kernel_sizes (list): list of the p temporal kernel sizes of the
convolution in the bottleneck. Different temp_kernel_sizes
control different pathway.
stride (list): list of the p strides of the bottleneck. Different
stride control different pathway.
num_blocks (list): list of p numbers of blocks for each of the
pathway.
dim_inner (list): list of the p inner channel dimensions of the
input. Different channel dimensions control the input dimension
of different pathways.
num_groups (list): list of number of p groups for the convolution.
num_groups=1 is for standard ResNet like networks, and
num_groups>1 is for ResNeXt like networks.
num_block_temp_kernel (list): extent the temp_kernel_sizes to
num_block_temp_kernel blocks, then fill temporal kernel size
of 1 for the rest of the layers.
nonlocal_inds (list): If the tuple is empty, no nonlocal layer will
be added. If the tuple is not empty, add nonlocal layers after
the index-th block.
dilation (list): size of dilation for each pathway.
nonlocal_group (list): list of number of p nonlocal groups. Each
number controls how to fold temporal dimension to batch
dimension before applying nonlocal transformation.
https://github.com/facebookresearch/video-nonlocal-net.
instantiation (string): different instantiation for nonlocal layer.
Supports two different instantiation method:
"dot_product": normalizing correlation matrix with L2.
"softmax": normalizing correlation matrix with Softmax.
trans_func_name (string): name of the the transformation function apply
on the network.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
drop_connect_rate (float): basic rate at which blocks are dropped,
linearly increases from input to output blocks.
"""
super(ResStage, self).__init__()
assert all(
(
num_block_temp_kernel[i] <= num_blocks[i]
for i in range(len(temp_kernel_sizes))
)
)
self.num_blocks = num_blocks
self.nonlocal_group = nonlocal_group
self._drop_connect_rate = drop_connect_rate
self.temp_kernel_sizes = [
(temp_kernel_sizes[i] * num_blocks[i])[: num_block_temp_kernel[i]]
+ [1] * (num_blocks[i] - num_block_temp_kernel[i])
for i in range(len(temp_kernel_sizes))
]
assert (
len(
{
len(dim_in),
len(dim_out),
len(temp_kernel_sizes),
len(stride),
len(num_blocks),
len(dim_inner),
len(num_groups),
len(num_block_temp_kernel),
len(nonlocal_inds),
len(nonlocal_group),
}
)
== 1
)
self.num_pathways = len(self.num_blocks)
self._construct(
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
trans_func_name,
stride_1x1,
inplace_relu,
nonlocal_inds,
nonlocal_pool,
instantiation,
dilation,
norm_module,
)
def _construct(
self,
dim_in,
dim_out,
stride,
dim_inner,
num_groups,
trans_func_name,
stride_1x1,
inplace_relu,
nonlocal_inds,
nonlocal_pool,
instantiation,
dilation,
norm_module,
):
for pathway in range(self.num_pathways):
for i in range(self.num_blocks[pathway]):
# Retrieve the transformation function.
trans_func = get_trans_func(trans_func_name)
# Construct the block.
res_block = ResBlock(
dim_in[pathway] if i == 0 else dim_out[pathway],
dim_out[pathway],
self.temp_kernel_sizes[pathway][i],
stride[pathway] if i == 0 else 1,
trans_func,
dim_inner[pathway],
num_groups[pathway],
stride_1x1=stride_1x1,
inplace_relu=inplace_relu,
dilation=dilation[pathway],
norm_module=norm_module,
block_idx=i,
drop_connect_rate=self._drop_connect_rate,
)
self.add_module("pathway{}_res{}".format(pathway, i), res_block)
if i in nonlocal_inds[pathway]:
nln = Nonlocal(
dim_out[pathway],
dim_out[pathway] // 2,
nonlocal_pool[pathway],
instantiation=instantiation,
norm_module=norm_module,
)
self.add_module(
"pathway{}_nonlocal{}".format(pathway, i), nln
)
def forward(self, inputs):
output = []
for pathway in range(self.num_pathways):
x = inputs[pathway]
for i in range(self.num_blocks[pathway]):
m = getattr(self, "pathway{}_res{}".format(pathway, i))
x = m(x)
if hasattr(self, "pathway{}_nonlocal{}".format(pathway, i)):
nln = getattr(
self, "pathway{}_nonlocal{}".format(pathway, i)
)
b, c, t, h, w = x.shape
if self.nonlocal_group[pathway] > 1:
# Fold temporal dimension into batch dimension.
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(
b * self.nonlocal_group[pathway],
t // self.nonlocal_group[pathway],
c,
h,
w,
)
x = x.permute(0, 2, 1, 3, 4)
x = nln(x)
if self.nonlocal_group[pathway] > 1:
# Fold back to temporal dimension.
x = x.permute(0, 2, 1, 3, 4)
x = x.reshape(b, t, c, h, w)
x = x.permute(0, 2, 1, 3, 4)
output.append(x)
return output
| 24,800 | 33.161157 | 83 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/tpn/base.py | import logging
from abc import ABCMeta, abstractmethod
import torch.nn as nn
class BaseRecognizer(nn.Module):
"""Base class for recognizers"""
__metaclass__ = ABCMeta
def __init__(self):
super(BaseRecognizer, self).__init__()
@property
def with_tenon_list(self):
return hasattr(self, 'tenon_list') and self.tenon_list is not None
@property
def with_cls(self):
return hasattr(self, 'cls_head') and self.cls_head is not None
@abstractmethod
def forward_train(self, num_modalities, **kwargs):
pass
@abstractmethod
def forward_test(self, num_modalities, **kwargs):
pass
def init_weights(self, pretrained=None):
if pretrained is not None:
logger = logging.getLogger()
logger.info("load model from: {}".format(pretrained))
def forward(self, num_modalities, img_meta, return_loss=True, **kwargs):
num_modalities = int(num_modalities[0])
if return_loss:
return self.forward_train(num_modalities, img_meta, **kwargs)
else:
return self.forward_test(num_modalities, img_meta, **kwargs)
| 1,158 | 26.595238 | 76 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/tpn/simple_spatial_module.py | import torch.nn as nn
class SimpleSpatialModule(nn.Module):
def __init__(self, spatial_type='avg', spatial_size=7):
super(SimpleSpatialModule, self).__init__()
assert spatial_type in ['avg']
self.spatial_type = spatial_type
self.spatial_size = spatial_size if not isinstance(spatial_size, int) else (spatial_size, spatial_size)
if self.spatial_type == 'avg':
self.op = nn.AvgPool2d(self.spatial_size, stride=1, padding=0)
def init_weights(self):
pass
def forward(self, input):
return self.op(input)
| 586 | 26.952381 | 111 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/tpn/cls_head_module.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class ClsHead(nn.Module):
"""Simplest classification head"""
def __init__(self,
with_avg_pool=True,
temporal_feature_size=1,
spatial_feature_size=7,
dropout_ratio=0.8,
in_channels=2048,
num_classes=101,
fcn_testing=False,
init_std=0.01):
super(ClsHead, self).__init__()
self.with_avg_pool = with_avg_pool
self.dropout_ratio = dropout_ratio
self.in_channels = in_channels
self.dropout_ratio = dropout_ratio
self.temporal_feature_size = temporal_feature_size
self.spatial_feature_size = spatial_feature_size
self.init_std = init_std
self.fcn_testing = fcn_testing
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
# self.with_avg_pool = fcn_testing
if self.with_avg_pool:
self.avg_pool = nn.AvgPool3d((temporal_feature_size, spatial_feature_size, spatial_feature_size), (1, 1, 1),
(0, 0, 0))
if self.fcn_testing:
self.new_cls = None
self.in_channels = in_channels
self.num_classes = num_classes
self.fc_cls = nn.Linear(in_channels, num_classes)
def init_weights(self):
nn.init.normal_(self.fc_cls.weight, 0, self.init_std)
nn.init.constant_(self.fc_cls.bias, 0)
def forward(self, x):
if not self.fcn_testing:
if x.ndimension() == 4:
x = x.unsqueeze(2)
assert x.shape[1] == self.in_channels
assert x.shape[2] == self.temporal_feature_size
assert x.shape[3] == self.spatial_feature_size
assert x.shape[4] == self.spatial_feature_size
if self.with_avg_pool:
x = self.avg_pool(x)
if self.dropout is not None:
x = self.dropout(x)
x = x.view(x.size(0), -1)
cls_score = self.fc_cls(x)
return cls_score
else:
if self.with_avg_pool:
x = self.avg_pool(x)
if self.new_cls is None:
self.new_cls = nn.Conv3d(self.in_channels, self.num_classes, 1, 1, 0).cuda()
self.new_cls.weight.copy_(self.fc_cls.weight.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1))
self.new_cls.bias.copy_(self.fc_cls.bias)
self.fc_cls = None
class_map = self.new_cls(x)
# return class_map.mean([2,3,4])
return class_map
def loss(
self,
cls_score,
labels
):
losses = dict()
losses['loss_cls'] = F.mse_loss(cls_score, labels)
return losses
| 2,900 | 33.535714 | 120 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/tpn/tpn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv import Config
import numpy as np
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class ConvModule(nn.Module):
def __init__(
self,
inplanes,
planes,
kernel_size,
stride,
padding,
bias=False,
groups=1,
):
super(ConvModule, self).__init__()
self.conv = nn.Conv3d(inplanes, planes, kernel_size, stride, padding, bias=bias, groups=groups)
self.bn = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.relu(self.bn(self.conv(x)))
return out
class AuxHead(nn.Module):
def __init__(
self,
inplanes,
planes,
loss_weight=0.5
):
super(AuxHead, self).__init__()
self.convs = \
ConvModule(inplanes, inplanes * 2, kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1), bias=False)
self.loss_weight = loss_weight
self.dropout = nn.Dropout(p=0.5)
self.fc = nn.Linear(inplanes * 2, planes)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.fill_(0)
def forward(self, x, target=None):
if target is None:
return None
loss = dict()
x = self.convs(x)
x = F.adaptive_avg_pool3d(x, 1).squeeze(-1).squeeze(-1).squeeze(-1)
x = self.dropout(x)
x = self.fc(x)
loss['loss_aux'] = self.loss_weight * F.mse_loss(x, target)
return loss
class TemporalModulation(nn.Module):
def __init__(
self,
inplanes,
planes,
downsample_scale=8,
):
super(TemporalModulation, self).__init__()
self.conv = nn.Conv3d(inplanes, planes, (3, 1, 1), (1, 1, 1), (1, 0, 0), bias=False, groups=32)
self.pool = nn.MaxPool3d((downsample_scale, 1, 1), (downsample_scale, 1, 1), (0, 0, 0), ceil_mode=True)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class Upsampling(nn.Module):
def __init__(
self,
scale=(2, 1, 1),
):
super(Upsampling, self).__init__()
self.scale = scale
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode='nearest')
return x
class DownSampling(nn.Module):
def __init__(
self,
inplanes,
planes,
kernel_size=(3, 1, 1),
stride=(1, 1, 1),
padding=(1, 0, 0),
bias=False,
groups=1,
norm=False,
activation=False,
downsample_position='after',
downsample_scale=(1, 2, 2),
):
super(DownSampling, self).__init__()
self.conv = nn.Conv3d(inplanes, planes, kernel_size, stride, padding, bias=bias, groups=groups)
self.norm = nn.BatchNorm3d(planes) if norm else None
self.relu = nn.ReLU(inplace=True) if activation else None
assert (downsample_position in ['before', 'after'])
self.downsample_position = downsample_position
self.pool = nn.MaxPool3d(downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True)
def forward(self, x):
if self.downsample_position == 'before':
x = self.pool(x)
x = self.conv(x)
if self.norm is not None:
x = self.norm(x)
if self.relu is not None:
x = self.relu(x)
if self.downsample_position == 'after':
x = self.pool(x)
return x
class LevelFusion(nn.Module):
def __init__(self,
in_channels=[1024, 1024],
mid_channels=[1024, 1024],
out_channels=2048,
ds_scales=[(1, 1, 1), (1, 1, 1)], ):
super(LevelFusion, self).__init__()
self.ops = nn.ModuleList()
num_ins = len(in_channels)
for i in range(num_ins):
op = DownSampling(in_channels[i], mid_channels[i], kernel_size=(1, 1, 1), stride=(1, 1, 1),
padding=(0, 0, 0), bias=False, groups=32, norm=True, activation=True,
downsample_position='before', downsample_scale=ds_scales[i])
self.ops.append(op)
in_dims = np.sum(mid_channels)
self.fusion_conv = nn.Sequential(
nn.Conv3d(in_dims, out_channels, 1, 1, 0, bias=False),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, inputs):
out = [self.ops[i](feature) for i, feature in enumerate(inputs)]
out = torch.cat(out, 1)
out = self.fusion_conv(out)
return out
class SpatialModulation(nn.Module):
def __init__(
self,
inplanes=[1024, 2048],
planes=2048,
):
super(SpatialModulation, self).__init__()
self.spatial_modulation = nn.ModuleList()
for i, dim in enumerate(inplanes):
op = nn.ModuleList()
ds_factor = planes // dim
ds_num = int(np.log2(ds_factor))
if ds_num < 1:
op = Identity()
else:
for dsi in range(ds_num):
in_factor = 2 ** dsi
out_factor = 2 ** (dsi + 1)
op.append(ConvModule(dim * in_factor, dim * out_factor, kernel_size=(1, 3, 3), stride=(1, 2, 2),
padding=(0, 1, 1), bias=False))
self.spatial_modulation.append(op)
def forward(self, inputs):
out = []
for i, feature in enumerate(inputs):
if isinstance(self.spatial_modulation[i], nn.ModuleList):
out_ = inputs[i]
for III, op in enumerate(self.spatial_modulation[i]):
out_ = op(out_)
out.append(out_)
else:
out.append(self.spatial_modulation[i](inputs[i]))
return out
class TPN(nn.Module):
def __init__(
self,
in_channels=[256, 512, 1024, 2048],
out_channels=256,
spatial_modulation_config=None,
temporal_modulation_config=None,
upsampling_config=None,
downsampling_config=None,
level_fusion_config=None,
aux_head_config=None,
):
super(TPN, self).__init__()
assert isinstance(in_channels, list)
assert isinstance(out_channels, int)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
spatial_modulation_config = Config(spatial_modulation_config) if isinstance(spatial_modulation_config,
dict) else spatial_modulation_config
temporal_modulation_config = Config(temporal_modulation_config) if isinstance(temporal_modulation_config,
dict) else temporal_modulation_config
upsampling_config = Config(upsampling_config) if isinstance(upsampling_config, dict) else upsampling_config
downsampling_config = Config(downsampling_config) if isinstance(downsampling_config,
dict) else downsampling_config
aux_head_config = Config(aux_head_config) if isinstance(aux_head_config, dict) else aux_head_config
level_fusion_config = Config(level_fusion_config) if isinstance(level_fusion_config,
dict) else level_fusion_config
self.temporal_modulation_ops = nn.ModuleList()
self.upsampling_ops = nn.ModuleList()
self.downsampling_ops = nn.ModuleList()
self.level_fusion_op = LevelFusion(**level_fusion_config)
self.spatial_modulation = SpatialModulation(**spatial_modulation_config)
for i in range(0, self.num_ins, 1):
inplanes = in_channels[-1]
planes = out_channels
if temporal_modulation_config is not None:
# overwrite the temporal_modulation_config
temporal_modulation_config.param.downsample_scale = temporal_modulation_config.scales[i]
temporal_modulation_config.param.inplanes = inplanes
temporal_modulation_config.param.planes = planes
temporal_modulation = TemporalModulation(**temporal_modulation_config.param)
self.temporal_modulation_ops.append(temporal_modulation)
if i < self.num_ins - 1:
if upsampling_config is not None:
# overwrite the up-sampling_config
upsampling = Upsampling(**upsampling_config)
self.upsampling_ops.append(upsampling)
if downsampling_config is not None:
# overwrite the down-sampling_config
downsampling_config.param.inplanes = planes
downsampling_config.param.planes = planes
downsampling_config.param.downsample_scale = downsampling_config.scales
downsampling = DownSampling(**downsampling_config.param)
self.downsampling_ops.append(downsampling)
out_dims = level_fusion_config.out_channels
# Two pyramids
self.level_fusion_op2 = LevelFusion(**level_fusion_config)
self.pyramid_fusion_op = nn.Sequential(
nn.Conv3d(out_dims * 2, 2048, 1, 1, 0, bias=False),
nn.BatchNorm3d(2048),
nn.ReLU(inplace=True)
)
# overwrite aux_head_config
if aux_head_config is not None:
aux_head_config.inplanes = self.in_channels[-2]
self.aux_head = AuxHead(**aux_head_config)
else:
self.aux_head = None
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
xavier_init(m, distribution='uniform')
if isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.fill_(0)
if self.aux_head is not None:
self.aux_head.init_weights()
def forward(self, inputs, target=None):
loss = None
# Auxiliary loss
if self.aux_head is not None:
loss = self.aux_head(inputs[-2], target)
# Spatial Modulation
outs = self.spatial_modulation(inputs)
# Temporal Modulation
outs = [temporal_modulation(outs[i]) for i, temporal_modulation in enumerate(self.temporal_modulation_ops)]
temporal_modulation_outs = outs
# Build top-down flow - upsampling operation
if self.upsampling_ops is not None:
for i in range(self.num_ins - 1, 0, -1):
outs[i - 1] = outs[i - 1] + self.upsampling_ops[i - 1](outs[i])
# Get top-down outs
topdownouts = self.level_fusion_op2(outs)
outs = temporal_modulation_outs
# Build bottom-up flow - downsampling operation
if self.downsampling_ops is not None:
for i in range(0, self.num_ins - 1, 1):
outs[i + 1] = outs[i + 1] + self.downsampling_ops[i](outs[i])
# Get bottom-up outs
outs = self.level_fusion_op(outs)
# fuse two pyramid outs
outs = self.pyramid_fusion_op(torch.cat([topdownouts, outs], 1))
return outs, loss
| 12,418 | 34.584527 | 123 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/tpn/resnet_mm.py | import logging
import torch.nn as nn
import torch.utils.checkpoint as cp
from dpcv.checkpoint.load import load_checkpoint
import torch
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"3x3 convolution with padding"
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
tsm=False,
tsm_position='inplace',
nsegments=8,
groups=1,
width_per_group=64,
style='pytorch',
with_cp=False):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
assert tsm_position in ['inplace', 'residual']
self.inplanes = inplanes
self.planes = planes
self.tsm = tsm
self.tsm_position = tsm_position
self.nsegments = nsegments
width = int(planes * (width_per_group / 64.)) * groups
if style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.conv1 = nn.Conv2d(
inplanes,
width,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.conv2 = nn.Conv2d(
width,
width,
kernel_size=3,
stride=self.conv2_stride,
groups=groups,
padding=dilation,
dilation=dilation,
bias=False)
self.bn1 = nn.BatchNorm2d(width)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(
width, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
def forward(self, x):
def _inner_forward(x):
identity = x
if self.tsm:
nt, c, h, w = x.size()
n_batch = nt // self.nsegments
x = x.view(n_batch, self.nsegments, c, h, w)
fold = c // 8
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold]
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold]
out[:, :, 2 * fold:] = x[:, :, 2 * fold:]
x = out.view(nt, c, h, w)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
if self.tsm_position == 'inplace':
identity = self.downsample(x)
else:
identity = self.downsample(identity)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
tsm=False,
tsm_position='inplace',
nsegments=8,
style='pytorch',
groups=1,
width_per_group=64,
with_cp=False):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
style=style,
tsm=tsm,
tsm_position=tsm_position,
groups=groups,
width_per_group=width_per_group,
nsegments=nsegments,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp, tsm=tsm, tsm_position=tsm_position,
nsegments=nsegments, groups=groups, width_per_group=width_per_group))
return nn.Sequential(*layers)
class ResNet(nn.Module):
"""ResNe(x)t backbone.
Args:
depth (int): Depth of resnet, from {50, 101}.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
bn_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var).
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
partial_bn (bool): Whether to freeze weight and bias of **all but the first** BN layers.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
"""
arch_settings = {
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
}
def __init__(self,
depth,
pretrained=None,
num_stages=4,
groups=1,
width_per_group=64,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
width=1,
frozen_stages=-1,
bn_eval=False,
bn_frozen=False,
partial_bn=False,
tsm=False,
tsm_position='inplace',
nsegments=8,
with_cp=False):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
self.depth = depth
self.pretrained = pretrained
self.num_stages = num_stages
self.tsm = tsm
self.nsegments = nsegments
assert 1 <= num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.partial_bn = partial_bn
self.with_cp = with_cp
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = int(64 * width)
self.base = int(64 * width)
self.groups = groups
self.width_per_group = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = self.base * 2 ** i
res_layer = make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
tsm=self.tsm,
tsm_position=tsm_position,
nsegments=self.nsegments,
groups=groups,
width_per_group=width_per_group,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * 64 * 2 ** (
len(self.stage_blocks) - 1)
def init_weights(self):
if isinstance(self.pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
if self.partial_bn:
for i in range(1, self.frozen_stages + 1):
mod = getattr(self, 'layer{}'.format(i))
for m in mod.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
m.weight.requires_grad = False
m.bias.requires_grad = False
if mode and self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
self.bn1.eval()
self.bn1.weight.requires_grad = False
self.bn1.bias.requires_grad = False
for i in range(1, self.frozen_stages + 1):
mod = getattr(self, 'layer{}'.format(i))
mod.eval()
for param in mod.parameters():
param.requires_grad = False
| 12,608 | 33.263587 | 114 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/module/tpn/simple_consensus.py | import torch
import torch.nn as nn
class _SimpleConsensus(torch.autograd.Function):
"""Simplest segmental consensus module"""
@staticmethod
def forward(ctx, x, dim, consensus_type):
ctx.save_for_backward(x, dim, consensus_type)
if consensus_type == 'avg':
output = x.mean(dim=dim, keepdim=True)
else:
output = None
return output
@staticmethod
def backward(ctx, grad_output):
x, dim, consensus_type = ctx.saved_tensors
shape = x.size()
if consensus_type == 'avg':
grad_in = grad_output.expand(shape) / float(shape[dim])
else:
grad_in = None
return grad_in
class SimpleConsensus(nn.Module):
def __init__(self, consensus_type, dim=1):
super(SimpleConsensus, self).__init__()
assert consensus_type in ['avg']
self.consensus_type = consensus_type
self.dim = dim
def init_weights(self):
pass
def forward(self, input):
return _SimpleConsensus.apply(input, self.dim, self.consensus_type)
| 1,088 | 26.225 | 75 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/loss/pers_emo_loss.py | import torch
import torch.nn.functional as F
from .build import LOSS_FUNC_REGISTRY
def per_emo_loss(p_score, p_label, e_score, e_label, p_co, e_co, x_ep, use_adv=False):
p_mark = torch.cat([torch.ones((p_co.shape[0], 1)), torch.zeros((p_co.shape[0], 1))], 1).cuda()
e_mark = torch.cat([torch.zeros((e_co.shape[0], 1)), torch.ones((e_co.shape[0], 1))], 1).cuda()
l_p = F.smooth_l1_loss(p_score, p_label) / 10 # equal to aggregation output of personality prediction
l_e = F.smooth_l1_loss(e_score, e_label)
l_ep = F.smooth_l1_loss(x_ep, p_label)
l_d_p = F.binary_cross_entropy(p_co, p_mark)
l_d_e = F.binary_cross_entropy(e_co, e_mark)
l_d = l_d_p + l_d_e
if not use_adv:
return l_p + l_e + 0.1 * l_ep + 0.1 * l_d
l_d_p_adv = F.binary_cross_entropy(p_co, e_mark)
l_d_e_adv = F.binary_cross_entropy(e_co, p_mark)
l_adv = l_d_p + l_d_e + l_d_p_adv + l_d_e_adv
return l_p + l_e + 0.1 * l_ep + 0.1 * l_d + 0.1 * l_adv
@LOSS_FUNC_REGISTRY.register()
class PeremonLoss:
def __call__(self, p_score, p_label, e_score, e_label, p_co, e_co, x_ep, use_adv=False):
p_mark = torch.cat([torch.ones((p_co.shape[0], 1)), torch.zeros((p_co.shape[0], 1))], 1).cuda()
e_mark = torch.cat([torch.zeros((e_co.shape[0], 1)), torch.ones((e_co.shape[0], 1))], 1).cuda()
l_p = F.smooth_l1_loss(p_score, p_label) / 10 # equal to aggregation output of personality prediction
l_e = F.smooth_l1_loss(e_score, e_label)
l_ep = F.smooth_l1_loss(x_ep, p_label)
l_d_p = F.binary_cross_entropy(p_co, p_mark)
l_d_e = F.binary_cross_entropy(e_co, e_mark)
l_d = l_d_p + l_d_e
if not use_adv:
return l_p + l_e + 0.1 * l_ep + 0.1 * l_d
l_d_p_adv = F.binary_cross_entropy(p_co, e_mark)
l_d_e_adv = F.binary_cross_entropy(e_co, p_mark)
l_adv = l_d_p + l_d_e + l_d_p_adv + l_d_e_adv
return l_p + l_e + 0.1 * l_ep + 0.1 * l_d + 0.1 * l_adv | 1,984 | 43.111111 | 110 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/loss/common.py | import torch.nn as nn
from .build import LOSS_FUNC_REGISTRY
@LOSS_FUNC_REGISTRY.register()
def mean_square_error():
return nn.MSELoss()
@LOSS_FUNC_REGISTRY.register()
def l1_loss():
return nn.L1Loss()
@LOSS_FUNC_REGISTRY.register()
def smooth_l1_loss():
return nn.SmoothL1Loss() | 296 | 16.470588 | 37 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/loss/label_smooth.py | import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
class LabelSmoothLoss(nn.Module):
"""
@author: https://github.com/TingsongYu
"""
def __init__(self, smoothing=0.0):
super(LabelSmoothLoss, self).__init__()
self.smoothing = smoothing
def forward(self, input, target):
log_prob = F.log_softmax(input, dim=-1) # log_p vector
weight = input.new_ones(input.size()) * self.smoothing / (input.size(-1) - 1.)
weight.scatter_(-1, target.unsqueeze(-1), (1. - self.smoothing)) # Q vector
loss = (-weight * log_prob).sum(dim=-1).mean() # log_p * Q then plus
return loss
if __name__ == '__main__':
# test example
output = torch.tensor([[4.0, 5.0, 10.0], [1.0, 5.0, 4.0], [1.0, 15.0, 4.0]])
label = torch.tensor([2, 1, 1], dtype=torch.int64)
criterion = LabelSmoothLoss(0.01)
loss = criterion(output, label)
print("CrossEntropy:{}".format(loss))
| 979 | 28.69697 | 86 | py |
DeepPersonality | DeepPersonality-main/dpcv/modeling/loss/cr_loss.py | import torch
import torch.nn as nn
from .build import LOSS_FUNC_REGISTRY
def one_hot_CELoss(pred, label):
bs = label.size(0)
log_prob = torch.log_softmax(pred, dim=1)
loss = -torch.sum(log_prob * label) / bs
return loss
class BellLoss:
def __init__(self, gama=300, theta=9):
self.gama = torch.as_tensor(gama)
self.theta = torch.as_tensor(theta)
def __call__(self, pred, label):
exponent = - torch.square(pred - label) / (2 * torch.square(self.theta))
loss = self.gama * (1 - torch.exp(exponent)).sum()
return loss
@LOSS_FUNC_REGISTRY.register()
def crnet_loss_func():
return {
"ce_loss": one_hot_CELoss,
"bell_loss": BellLoss(),
"mse_loss": nn.MSELoss(),
"l1_loss": nn.L1Loss()
}
if __name__ == "__main__":
pre = torch.Tensor([[[0.1, 0.2, 0.3, 2],
[0.3, 0.5, 0.4, 4]],
[[0.1, 0.2, 0.3, 1],
[0.3, 0.5, 0.4, 3]]])
print("pre:", pre.shape)
oh_label = torch.tensor([[[0, 1, 0, 0],
[0, 0, 1, 0]],
[[1, 0, 0, 0],
[0, 1, 0, 0]]])
print("oh_label:", oh_label.shape)
loss = one_hot_CELoss(pre, oh_label)
print(loss)
pred = torch.randn(2, 5)
label = torch.randn(2, 5)
bell_loss = BellLoss()
b_loss = bell_loss(pred, label)
print(b_loss)
| 1,447 | 25.814815 | 80 | py |
DeepPersonality | DeepPersonality-main/script/run_exp.py | #! /usr/bin/env python
import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
work_path = os.path.join(current_path, "../")
sys.path.append(work_path)
from dpcv.tools.common import parse_args
from dpcv.config.default_config_opt import cfg, cfg_from_file, cfg_from_list
# from torch.utils.tensorboard import SummaryWriter
from dpcv.experiment.exp_runner import ExpRunner
def setup():
args = parse_args()
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.resume:
cfg.TRAIN.RESUME = args.resume
if args.max_epoch:
cfg.TRAIN.MAX_EPOCH = args.max_epoch
if args.lr:
cfg.SOLVER.RESET_LR = True
cfg.SOLVER.LR_INIT = args.lr
if args.test_only:
cfg.TEST.TEST_ONLY = True
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
return args
def main():
args = setup()
runner = ExpRunner(cfg)
if args.test_only:
return runner.test()
runner.run()
if __name__ == "__main__":
# for debug setting
# import os
# os.chdir("..")
main()
| 1,100 | 21.02 | 76 | py |
DeepPersonality | DeepPersonality-main/script/cmp_multi_modal_post_fusion.py | import torch
import os
import numpy as np
from dpcv.evaluation.metrics import compute_pcc, compute_ccc
import logging
import argparse
class CmpPostFusion:
def __init__(self, visual_data_path, audio_data_path, label_data_path, addition_data_path=None):
logger.info(f" ================ {os.path.dirname(visual_data_path)} ================")
self.visual_data = self.read_data(visual_data_path)
self.audio_data = self.read_data(audio_data_path)
self.label_data = self.read_data(label_data_path)
self.addition = []
self.num_modal = 2
if addition_data_path:
self.addition = self.read_data(addition_data_path)
self.num_modal += 1
def read_data(self, path):
return torch.load(path)
def compute(self):
max_len = min(len(self.visual_data), len(self.audio_data))
self.visual_data = self.visual_data[:max_len, :]
self.audio_data = self.audio_data[:max_len, :]
self.label_data = self.label_data[:max_len, :]
if len(self.addition) > 0:
data = (self.visual_data + self.audio_data + self.addition) / self.num_modal
else:
data = (self.visual_data + self.audio_data) / self.num_modal
mse = np.square(data - self.label_data).mean(axis=0)
logger.info(f"mse: {mse}, {mse.mean()}")
ocean_acc = (1 - np.abs(data - self.label_data)).mean(axis=0)
logger.info(f"acc: {ocean_acc}, {ocean_acc.mean()}")
pcc_dict, pcc_mean = compute_pcc(data, self.label_data)
logger.info(f"pcc: {pcc_dict}, {pcc_mean}")
ccc_dict, ccc_mean = compute_ccc(data, self.label_data)
logger.info(f"ccc: {ccc_dict}, {ccc_mean}")
if __name__ == "__main__":
# import os; os.chdir("..")
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO, format='%(asctime)s - %(message)s',
) # define the format when print on screen
handler = logging.FileHandler("log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s- %(message)s') # define the format when recorded in files
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser(description="post fusion")
parser.add_argument("-v", "--visual", type=str)
parser.add_argument("-a", "--audio", type=str)
parser.add_argument("-o", "--other", default="")
parser.add_argument("-l", "--label", type=str)
args = parser.parse_args()
# fusion = CmpPostFusion(
# visual_data_path="tmp/impresssion/frame/pred.pkl",
# audio_data_path="tmp/impresssion/audio/pred.pkl",
# label_data_path="tmp/impresssion/audio/label.pkl"
# )
fusion = CmpPostFusion(
visual_data_path=args.visual,
audio_data_path=args.audio,
label_data_path=args.label,
addition_data_path=args.other,
)
fusion.compute()
| 2,942 | 34.890244 | 105 | py |
simpa | simpa-master/simpa_tests/automatic_tests/TestProcessing.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from simpa.core.reconstruction_module.reconstruction_utils import apply_b_mode, get_apodization_factor, \
reconstruction_mode_transformation
from simpa.utils.calculate import min_max_normalization
from simpa.utils.tags import Tags
import unittest
import numpy as np
import torch
class TestProcessing(unittest.TestCase):
def setUp(self):
print("setUp")
self.test_array = np.array([-1.2, 0, 3.2, 255])
self.time_series_data = torch.tensor([[1, 3], [8, 12]], device='cpu')
self.expected_differential = torch.tensor([[2., 0.], [4., 0.]], device='cpu')
self.test_image = np.array([[-1.2, 0], [3., 255]])
def tearDown(self):
print("tearDown")
def test_min_max_normalization(self):
print("test normalization")
normalized = min_max_normalization(self.test_array)
# check input and output sizes
assert normalized.shape == self.test_array.shape, "shapes have changed"
# check if values are in range [0,1]
assert ((0 <= normalized) & (1 >= normalized)).all(), "normalized values are not between 0 and 1"
# check if normalization values are correct
assert np.equal(normalized, np.array([0, (0 + 1.2)/(255+1.2), (3.2 + 1.2)/(255+1.2), 1])).all(), \
"normalization values are incorrect"
def test_reconstruction_mode_transformation(self):
print("test reconstruction mode transformation")
pressure = reconstruction_mode_transformation(self.time_series_data)
pressure = reconstruction_mode_transformation(self.time_series_data, Tags.RECONSTRUCTION_MODE_PRESSURE)
assert torch.equal(pressure, self.time_series_data), "there should be no change when using pressure mode"
differential = reconstruction_mode_transformation(self.time_series_data, Tags.RECONSTRUCTION_MODE_DIFFERENTIAL)
assert torch.equal(differential, self.expected_differential), "computed and expected differential don't match"
def test_apodization_factors(self):
print("test apodization factors creation")
# Hann
factors = get_apodization_factor(Tags.RECONSTRUCTION_APODIZATION_HANN, (2, 1), 10, device=torch.device('cpu'))
expected = torch.tensor([[[0.0000000000, 0.0954914987, 0.3454915285, 0.6545085311, 0.9045085311,
1.0000000000, 0.9045084715, 0.6545085311, 0.3454914391, 0.0954913795]],
[[0.0000000000, 0.0954914987, 0.3454915285, 0.6545085311, 0.9045085311,
1.0000000000, 0.9045084715, 0.6545085311, 0.3454914391, 0.0954913795]]])
assert torch.norm(torch.subtract(factors, expected)) < 1e-5, \
"computed Hann apodization factors don't match expected ones"
# Hamming
factors = get_apodization_factor(Tags.RECONSTRUCTION_APODIZATION_HAMMING,
(2, 1), 10, device=torch.device('cpu'))
expected = torch.tensor([[[0.0800000131, 0.1678521931, 0.3978522122, 0.6821478605, 0.9121478796,
1.0000000000, 0.9121478200, 0.6821478605, 0.3978521228, 0.1678520739]],
[[0.0800000131, 0.1678521931, 0.3978522122, 0.6821478605, 0.9121478796,
1.0000000000, 0.9121478200, 0.6821478605, 0.3978521228, 0.1678520739]]])
assert torch.norm(torch.subtract(factors, expected)) < 1e-5, \
"computed Hamming apodization factors don't match expected ones"
# Box
factors = get_apodization_factor(Tags.RECONSTRUCTION_APODIZATION_BOX,
(2, 1), 10, device=torch.device('cpu'))
expected = torch.ones((2, 1, 10))
assert torch.equal(factors, expected), "computed Box apodization factors don't match expected ones"
def test_envelope_detection(self):
print("test envelope detection")
# absolute value
_abs = apply_b_mode(self.test_image, method=Tags.RECONSTRUCTION_BMODE_METHOD_ABS)
expected_abs = np.array([[1.2, 0.], [3., 255.]])
assert np.equal(_abs, expected_abs).all(), "computed absolute array and expected don't match"
# Hilbert transform
hilbert = apply_b_mode(self.test_image, method=Tags.RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM)
expected_hilbert = np.array([[1.2, 0.], [3., 255.]])
assert np.equal(hilbert, expected_hilbert).all(), "computed hilbert transform array and expected don't match"
| 4,734 | 50.467391 | 119 | py |
simpa | simpa-master/simpa_tests/manual_tests/DelayMultiplyAndSumReconstruction.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from simpa.utils import Tags
from simpa.utils.dict_path_manager import generate_dict_path
from simpa.io_handling import load_data_field, load_hdf5
from simpa.core.simulation import simulate
from simpa.core.acoustic_forward_module.acoustic_forward_module_k_wave_adapter import AcousticForwardModelKWaveAdapter
from simpa.core.optical_simulation_module.optical_forward_model_mcx_adapter import OpticalForwardModelMcxAdapter
from simpa.core.reconstruction_module.reconstruction_module_delay_multiply_and_sum_adapter import \
ImageReconstructionModuleDelayMultiplyAndSumAdapter
from simpa.core.volume_creation_module.volume_creation_module_model_based_adapter import \
VolumeCreationModelModelBasedAdapter
from simpa.core.processing_components.noise import GaussianNoiseProcessingComponent
from simpa import reconstruct_delay_multiply_and_sum_pytorch
from simpa_tests.manual_tests import ReconstructionAlgorithmTestBaseClass
# FIXME temporary workaround for newest Intel architectures
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class DelayMultiplyAndSumReconstruction(ReconstructionAlgorithmTestBaseClass):
"""
This test runs a simulation creating an example volume of geometric shapes and reconstructs it with the Delay and
Sum algorithm. To verify that the test was successful a user has to evaluate the displayed reconstruction.
"""
def test_reconstruction_of_simulation(self):
self.device.update_settings_for_use_of_model_based_volume_creator(self.settings)
SIMUATION_PIPELINE = [
VolumeCreationModelModelBasedAdapter(self.settings),
OpticalForwardModelMcxAdapter(self.settings),
GaussianNoiseProcessingComponent(self.settings, "noise_initial_pressure"),
AcousticForwardModelKWaveAdapter(self.settings),
ImageReconstructionModuleDelayMultiplyAndSumAdapter(self.settings)
]
simulate(SIMUATION_PIPELINE, self.settings, self.device)
reconstructed_image = load_data_field(self.settings[Tags.SIMPA_OUTPUT_PATH], Tags.RECONSTRUCTED_DATA,
self.settings[Tags.WAVELENGTH])
self.plot_reconstruction_compared_with_initial_pressure(reconstructed_image, "Reconstructed image using adapter")
def test_convenience_function(self):
# Load simulated time series data
time_series_sensor_data = load_data_field(self.settings[Tags.SIMPA_OUTPUT_PATH],
Tags.TIME_SERIES_DATA, self.settings[Tags.WAVELENGTH])
# reconstruct image using convenience function
reconstructed_image = reconstruct_delay_multiply_and_sum_pytorch(time_series_sensor_data,
self.device.get_detection_geometry(),
self.settings)
self.plot_reconstruction_compared_with_initial_pressure(reconstructed_image, "Reconstructed image using convenience function")
if __name__ == '__main__':
test = DelayMultiplyAndSumReconstruction()
test.test_reconstruction_of_simulation()
test.test_convenience_function()
| 3,414 | 49.220588 | 134 | py |
simpa | simpa-master/simpa_tests/manual_tests/DelayAndSumReconstruction.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from simpa.utils import Tags
from simpa.utils.dict_path_manager import generate_dict_path
from simpa.io_handling import load_data_field, load_hdf5
from simpa.core.simulation import simulate
from simpa.core.acoustic_forward_module.acoustic_forward_module_k_wave_adapter import AcousticForwardModelKWaveAdapter
from simpa.core.optical_simulation_module.optical_forward_model_mcx_adapter import OpticalForwardModelMcxAdapter
from simpa.core.reconstruction_module.reconstruction_module_delay_and_sum_adapter import \
ImageReconstructionModuleDelayAndSumAdapter
from simpa.core.volume_creation_module.volume_creation_module_model_based_adapter import \
VolumeCreationModelModelBasedAdapter
from simpa.core.processing_components.noise.gaussian_noise import GaussianNoiseProcessingComponent
from simpa import reconstruct_delay_and_sum_pytorch
from simpa_tests.manual_tests import ReconstructionAlgorithmTestBaseClass
# FIXME temporary workaround for newest Intel architectures
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class DelayAndSumReconstruction(ReconstructionAlgorithmTestBaseClass):
"""
This test runs a simulation creating an example volume of geometric shapes and reconstructs it with the Delay and
Sum algorithm. To verify that the test was successful a user has to evaluate the displayed reconstruction.
"""
def test_reconstruction_of_simulation(self):
self.device.update_settings_for_use_of_model_based_volume_creator(self.settings)
SIMUATION_PIPELINE = [
VolumeCreationModelModelBasedAdapter(self.settings),
OpticalForwardModelMcxAdapter(self.settings),
GaussianNoiseProcessingComponent(self.settings, "noise_initial_pressure"),
AcousticForwardModelKWaveAdapter(self.settings),
ImageReconstructionModuleDelayAndSumAdapter(self.settings)
]
simulate(SIMUATION_PIPELINE, self.settings, self.device)
reconstructed_image = load_data_field(self.settings[Tags.SIMPA_OUTPUT_PATH], Tags.RECONSTRUCTED_DATA,
self.settings[Tags.WAVELENGTH])
self.plot_reconstruction_compared_with_initial_pressure(reconstructed_image,
"Reconstructed image using adapter")
def test_convenience_function(self):
# Load simulated time series data
time_series_sensor_data = load_data_field(self.settings[Tags.SIMPA_OUTPUT_PATH],
Tags.TIME_SERIES_DATA, self.settings[Tags.WAVELENGTH])
# reconstruct image using convenience function
reconstructed_image = reconstruct_delay_and_sum_pytorch(time_series_sensor_data,
self.device.get_detection_geometry(),
self.settings)
self.plot_reconstruction_compared_with_initial_pressure(reconstructed_image,
"Reconstructed image using convenience function")
if __name__ == '__main__':
test = DelayAndSumReconstruction()
test.test_reconstruction_of_simulation()
test.test_convenience_function()
| 3,482 | 47.375 | 118 | py |
simpa | simpa-master/simpa_tests/manual_tests/SignedDelayMultiplyAndSumReconstruction.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from simpa.utils import Tags
from simpa.utils.dict_path_manager import generate_dict_path
from simpa.io_handling import load_data_field, load_hdf5
from simpa.core.simulation import simulate
from simpa.core.acoustic_forward_module.acoustic_forward_module_k_wave_adapter import AcousticForwardModelKWaveAdapter
from simpa.core.optical_simulation_module.optical_forward_model_mcx_adapter import OpticalForwardModelMcxAdapter
from simpa.core.reconstruction_module.reconstruction_module_signed_delay_multiply_and_sum_adapter import \
ImageReconstructionModuleSignedDelayMultiplyAndSumAdapter
from simpa.core.volume_creation_module.volume_creation_module_model_based_adapter import \
VolumeCreationModelModelBasedAdapter
from simpa.core.processing_components.noise import GaussianNoiseProcessingComponent
from simpa import reconstruct_signed_delay_multiply_and_sum_pytorch
from simpa_tests.manual_tests import ReconstructionAlgorithmTestBaseClass
# FIXME temporary workaround for newest Intel architectures
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
class SignedDelayMultiplyAndSumReconstruction(ReconstructionAlgorithmTestBaseClass):
"""
This test runs a simulation creating an example volume of geometric shapes and reconstructs it with the Delay and
Sum algorithm. To verify that the test was successful a user has to evaluate the displayed reconstruction.
"""
def test_reconstruction_of_simulation(self):
self.device.update_settings_for_use_of_model_based_volume_creator(self.settings)
SIMUATION_PIPELINE = [
VolumeCreationModelModelBasedAdapter(self.settings),
OpticalForwardModelMcxAdapter(self.settings),
GaussianNoiseProcessingComponent(self.settings, "noise_initial_pressure"),
AcousticForwardModelKWaveAdapter(self.settings),
ImageReconstructionModuleSignedDelayMultiplyAndSumAdapter(self.settings)
]
simulate(SIMUATION_PIPELINE, self.settings, self.device)
reconstructed_image = load_data_field(self.settings[Tags.SIMPA_OUTPUT_PATH], Tags.RECONSTRUCTED_DATA,
self.settings[Tags.WAVELENGTH])
self.plot_reconstruction_compared_with_initial_pressure(reconstructed_image,
"Reconstructed image using adapter")
def test_convenience_function(self):
# Load simulated time series data
time_series_sensor_data = load_data_field(self.settings[Tags.SIMPA_OUTPUT_PATH],
Tags.TIME_SERIES_DATA, self.settings[Tags.WAVELENGTH])
# reconstruct image using convenience function
reconstructed_image = reconstruct_signed_delay_multiply_and_sum_pytorch(time_series_sensor_data,
self.device.get_detection_geometry(),
self.settings
)
self.plot_reconstruction_compared_with_initial_pressure(reconstructed_image,
"Reconstructed image using convenience function")
if __name__ == '__main__':
test = SignedDelayMultiplyAndSumReconstruction()
test.test_reconstruction_of_simulation()
test.test_convenience_function()
| 3,681 | 51.6 | 118 | py |
simpa | simpa-master/simpa/__init__.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from .core.reconstruction_module.reconstruction_module_delay_and_sum_adapter import reconstruct_delay_and_sum_pytorch
from .core.reconstruction_module.reconstruction_module_delay_multiply_and_sum_adapter import reconstruct_delay_multiply_and_sum_pytorch
from .core.reconstruction_module.reconstruction_module_signed_delay_multiply_and_sum_adapter import reconstruct_signed_delay_multiply_and_sum_pytorch | 612 | 67.111111 | 149 | py |
simpa | simpa-master/simpa/core/reconstruction_module/reconstruction_utils.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from simpa.utils import Tags
import torch
import torch.fft
import numpy as np
from scipy.signal import hilbert
from scipy.signal.windows import tukey
def get_apodization_factor(apodization_method: str = Tags.RECONSTRUCTION_APODIZATION_BOX,
dimensions: tuple = None, n_sensor_elements=None,
device: torch.device = 'cpu') -> torch.tensor:
"""
Construct apodization factors according to `apodization_method` [hann, hamming or box apodization (default)]
for given dimensions and `n_sensor_elements`.
:param apodization_method: (str) Apodization method, one of Tags.RECONSTRUCTION_APODIZATION_HANN,
Tags.RECONSTRUCTION_APODIZATION_HAMMING and Tags.RECONSTRUCTION_APODIZATION_BOX (default)
:param dimensions: (tuple) size of each dimension of reconstructed image as int, might have 2 or 3 entries.
:param n_sensor_elements: (int) number of sensor elements
:param device: (torch device) PyTorch tensor device
:return: (torch tensor) tensor with apodization factors which can be multipied with DAS values
"""
if dimensions is None or n_sensor_elements is None:
raise AttributeError("dimensions and n_sensor_elements must be specified and not be None")
# hann window
if apodization_method == Tags.RECONSTRUCTION_APODIZATION_HANN:
hann = torch.hann_window(n_sensor_elements, device=device)
output = hann.expand(dimensions + (n_sensor_elements,))
# hamming window
elif apodization_method == Tags.RECONSTRUCTION_APODIZATION_HAMMING:
hamming = torch.hamming_window(n_sensor_elements, device=device)
output = hamming.expand(dimensions + (n_sensor_elements,))
# box window apodization as default
else:
output = torch.ones(dimensions + (n_sensor_elements,), device=device)
return output
def bandpass_filtering(data: torch.tensor = None, time_spacing_in_ms: float = None,
cutoff_lowpass: int = int(8e6), cutoff_highpass: int = int(0.1e6),
tukey_alpha: float = 0.5) -> torch.tensor:
"""
Apply a bandpass filter with cutoff values at `cutoff_lowpass` and `cutoff_highpass` MHz
and a tukey window with alpha value of `tukey_alpha` inbetween on the `data` in Fourier space.
:param data: (torch tensor) data to be filtered
:param time_spacing_in_ms: (float) time spacing in milliseconds, e.g. 2.5e-5
:param cutoff_lowpass: (int) Signal above this value will be ignored (in MHz)
:param cutoff_highpass: (int) Signal below this value will be ignored (in MHz)
:param tukey_alpha: (float) transition value between 0 (rectangular) and 1 (Hann window)
:return: (torch tensor) filtered data
"""
if data is None or time_spacing_in_ms is None:
raise AttributeError("data and time spacing must be specified")
# construct bandpass filter given the cutoff values and time spacing
frequencies = np.fft.fftfreq(data.shape[1], d=time_spacing_in_ms/1000)
if cutoff_highpass > cutoff_lowpass:
raise ValueError("The highpass cutoff value must be lower than the lowpass cutoff value.")
# find closest indices for frequencies
small_index = (np.abs(frequencies - cutoff_highpass)).argmin()
large_index = (np.abs(frequencies - cutoff_lowpass)).argmin()
win = torch.tensor(tukey(large_index - small_index, alpha=tukey_alpha), device=data.device)
window = torch.zeros(frequencies.shape, device=data.device)
window[small_index:large_index] = win
# transform data into Fourier space, multiply filter and transform back
data_in_fourier_space = torch.fft.fft(data)
filtered_data_in_fourier_space = data_in_fourier_space * window.expand_as(data_in_fourier_space)
return torch.abs(torch.fft.ifft(filtered_data_in_fourier_space))
def apply_b_mode(data: np.ndarray = None, method: str = None) -> np.ndarray:
"""
Applies B-Mode specified method to data. Method is either
envelope detection using hilbert transform (Tags.RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM),
absolute value (Tags.RECONSTRUCTION_BMODE_METHOD_ABS) or
none if nothing is specified is performed.
:param data: (numpy array) data used for applying B-Mode method
:param method: (str) Tags.RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM or Tags.RECONSTRUCTION_BMODE_METHOD_ABS
:return: (numpy array) data with B-Mode method applied, all
"""
# input checks
if data is None:
raise AttributeError("data must be specified")
if data.ndim < 2:
raise AttributeError("data must have at least two dimensions")
if method == Tags.RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM:
# perform envelope detection using hilbert transform in depth direction
hilbert_transformed = hilbert(data, axis=1)
output = np.abs(hilbert_transformed)
elif method == Tags.RECONSTRUCTION_BMODE_METHOD_ABS:
# perform envelope detection using absolute value
output = np.abs(data)
else:
print("You have not specified a B-mode method")
output = data
# sanity check that no elements are below zero
if output[output < 0].sum() != 0:
print("There are still negative values in the data.")
return output
def reconstruction_mode_transformation(time_series_sensor_data: torch.tensor = None,
mode: str = Tags.RECONSTRUCTION_MODE_PRESSURE) -> torch.tensor:
"""
Transformes `time_series_sensor_data` for other modes, for example `Tags.RECONSTRUCTION_MODE_DIFFERENTIAL`.
Default mode is `Tags.RECONSTRUCTION_MODE_PRESSURE`.
:param time_series_sensor_data: (torch tensor) Time series data to be transformed
:param mode: (str) reconstruction mode: Tags.RECONSTRUCTION_MODE_PRESSURE (default)
or Tags.RECONSTRUCTION_MODE_DIFFERENTIAL
:return: (torch tensor) potentially transformed tensor
"""
# depending on mode use pressure data or its derivative
if mode == Tags.RECONSTRUCTION_MODE_DIFFERENTIAL:
zeros = torch.zeros([time_series_sensor_data.shape[0], 1], names=None).to(time_series_sensor_data.device)
time_vector = torch.arange(1, time_series_sensor_data.shape[1]+1).to(time_series_sensor_data.device)
time_derivative_pressure = time_series_sensor_data[:, 1:] - time_series_sensor_data[:, 0:-1]
time_derivative_pressure = torch.cat([time_derivative_pressure, zeros], dim=1)
time_derivative_pressure = torch.mul(time_derivative_pressure, time_vector)
output = time_derivative_pressure # use time derivative pressure
elif mode == Tags.RECONSTRUCTION_MODE_PRESSURE:
output = time_series_sensor_data # already in pressure format
else:
raise AttributeError(
"An invalid reconstruction mode was set, only differential and pressure are supported.")
return output | 7,142 | 47.924658 | 115 | py |
simpa | simpa-master/simpa/core/reconstruction_module/reconstruction_module_delay_multiply_and_sum_adapter.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from simpa.utils import Tags
from simpa.core.reconstruction_module import ReconstructionAdapterBase
from simpa.io_handling.io_hdf5 import load_data_field
from simpa.core.device_digital_twins import DetectionGeometryBase
import numpy as np
import torch
from simpa.utils.settings import Settings
from simpa.core.reconstruction_module.reconstruction_utils import get_apodization_factor, bandpass_filtering, apply_b_mode, \
reconstruction_mode_transformation
class ImageReconstructionModuleDelayMultiplyAndSumAdapter(ReconstructionAdapterBase):
def reconstruction_algorithm(self, time_series_sensor_data, detection_geometry: DetectionGeometryBase):
"""
Applies the Delay Multiply and Sum beamforming algorithm [1] to the time series sensor data (2D numpy array where the
first dimension corresponds to the sensor elements and the second to the recorded time steps) with the given
beamforming settings (dictionary).
A reconstructed image (2D numpy array) is returned.
This implementation uses PyTorch Tensors to perform computations and is able to run on GPUs.
[1] T. Kirchner et al. 2018, "Signed Real-Time Delay Multiply and Sum Beamforming for Multispectral
Photoacoustic Imaging", https://doi.org/10.3390/jimaging4100121
"""
# check for B-mode methods and perform envelope detection on time series data if specified
if Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION in self.component_settings\
and self.component_settings[Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION] \
and Tags.RECONSTRUCTION_BMODE_METHOD in self.component_settings:
time_series_sensor_data = apply_b_mode(
time_series_sensor_data, method=self.component_settings[Tags.RECONSTRUCTION_BMODE_METHOD])
### INPUT CHECKING AND VALIDATION ###
# check settings dictionary for elements and read them in
# speed of sound: use given speed of sound, otherwise use average from simulation if specified
if Tags.PROPERTY_SPEED_OF_SOUND in self.component_settings and self.component_settings[Tags.PROPERTY_SPEED_OF_SOUND]:
speed_of_sound_in_m_per_s = self.component_settings[Tags.PROPERTY_SPEED_OF_SOUND]
elif Tags.WAVELENGTH in self.global_settings and self.global_settings[Tags.WAVELENGTH]:
sound_speed_m = load_data_field(self.global_settings[Tags.SIMPA_OUTPUT_PATH], Tags.PROPERTY_SPEED_OF_SOUND)
speed_of_sound_in_m_per_s = np.mean(sound_speed_m)
else:
raise AttributeError("Please specify a value for PROPERTY_SPEED_OF_SOUND"
"or WAVELENGTH to obtain the average speed of sound")
# time spacing: use kWave specific dt from simulation if set, otherwise sampling rate if specified,
if Tags.K_WAVE_SPECIFIC_DT in self.global_settings and self.global_settings[Tags.K_WAVE_SPECIFIC_DT]:
time_spacing_in_ms = self.global_settings[Tags.K_WAVE_SPECIFIC_DT] * 1000
elif Tags.SENSOR_SAMPLING_RATE_MHZ in self.global_settings and self.global_settings[Tags.SENSOR_SAMPLING_RATE_MHZ]:
time_spacing_in_ms = 1.0 / (self.global_settings[Tags.SENSOR_SAMPLING_RATE_MHZ] * 1000)
else:
raise AttributeError("Please specify a value for SENSOR_SAMPLING_RATE_MHZ or K_WAVE_SPECIFIC_DT")
# spacing
if Tags.SPACING_MM in self.global_settings and self.global_settings[Tags.SPACING_MM]:
spacing_in_mm = self.global_settings[Tags.SPACING_MM]
else:
raise AttributeError("Please specify a value for SPACING_MM")
# get device specific sensor positions
detection_geometry.check_settings_prerequisites(self.global_settings)
sensor_positions = detection_geometry.get_detector_element_positions_accounting_for_field_of_view()
# time series sensor data must be numpy array
if isinstance(sensor_positions, np.ndarray):
sensor_positions = torch.from_numpy(sensor_positions)
if isinstance(time_series_sensor_data, np.ndarray):
time_series_sensor_data = torch.from_numpy(time_series_sensor_data)
assert isinstance(time_series_sensor_data, torch.Tensor), \
'The time series sensor data must have been converted to a tensor'
# move tensors to GPU if available, otherwise use CPU
if Tags.GPU not in self.global_settings:
if torch.cuda.is_available():
dev = "cuda"
else:
dev = "cpu"
else:
dev = "cuda" if self.global_settings[Tags.GPU] else "cpu"
torch_device = torch.device(dev)
sensor_positions = sensor_positions.to(torch_device)
time_series_sensor_data = time_series_sensor_data.to(torch_device)
# array must be of correct dimension
assert time_series_sensor_data.ndim == 2, 'Time series data must have exactly 2 dimensions' \
', one for the sensor elements and one for time. ' \
'Stack images and sensor positions for 3D reconstruction' \
'Apply beamforming per wavelength if you have a 3D array. '
# check reconstruction mode - pressure by default
if Tags.RECONSTRUCTION_MODE in self.component_settings:
mode = self.component_settings[Tags.RECONSTRUCTION_MODE]
else:
mode = Tags.RECONSTRUCTION_MODE_PRESSURE
time_series_sensor_data = reconstruction_mode_transformation(time_series_sensor_data, mode=mode)
# apply by default bandpass filter using tukey window with alpha=0.5 on time series data in frequency domain
if Tags.RECONSTRUCTION_PERFORM_BANDPASS_FILTERING not in self.component_settings \
or self.component_settings[Tags.RECONSTRUCTION_PERFORM_BANDPASS_FILTERING] is not False:
cutoff_lowpass = self.component_settings[Tags.BANDPASS_CUTOFF_LOWPASS] \
if Tags.BANDPASS_CUTOFF_LOWPASS in self.component_settings else int(8e6)
cutoff_highpass = self.component_settings[Tags.BANDPASS_CUTOFF_HIGHPASS] \
if Tags.BANDPASS_CUTOFF_HIGHPASS in self.component_settings else int(0.1e6)
tukey_alpha = self.component_settings[Tags.TUKEY_WINDOW_ALPHA] if Tags.TUKEY_WINDOW_ALPHA in self.component_settings else 0.5
time_series_sensor_data = bandpass_filtering(time_series_sensor_data,
time_spacing_in_ms=time_spacing_in_ms,
cutoff_lowpass=cutoff_lowpass,
cutoff_highpass=cutoff_highpass,
tukey_alpha=tukey_alpha)
### ALGORITHM ITSELF ###
## compute size of beamformed image ##
xdim = (max(sensor_positions[:, 0]) - min(sensor_positions[:, 0])) / spacing_in_mm
xdim = int(xdim) + 1 # correction due to subtraction of indices starting at 0
ydim = float(time_series_sensor_data.shape[1] * time_spacing_in_ms * speed_of_sound_in_m_per_s) / spacing_in_mm
ydim = int(round(ydim))
zdim = (max(sensor_positions[:, 1]) - min(sensor_positions[:, 1]))/spacing_in_mm
zdim = int(zdim) + 1 # correction due to subtraction of indices starting at 0
if zdim == 1:
sensor_positions[:, 1] = 0 # Assume imaging plane
if time_series_sensor_data.shape[0] < sensor_positions.shape[0]:
self.logger.warning("Warning: The time series data has less sensor element entries than the given sensor positions. "
"This might be due to a low simulated resolution, please increase it.")
n_sensor_elements = time_series_sensor_data.shape[0]
self.logger.debug(f'Number of pixels in X dimension: {xdim}, Y dimension: {ydim}, Z dimension: {zdim} '
f',number of sensor elements: {n_sensor_elements}')
# construct output image
output = torch.zeros((xdim, ydim, zdim), dtype=torch.float32, device=torch_device)
xx, yy, zz, jj = torch.meshgrid(torch.arange(xdim, device=torch_device),
torch.arange(ydim, device=torch_device),
torch.arange(zdim, device=torch_device),
torch.arange(n_sensor_elements, device=torch_device))
delays = torch.sqrt((yy * spacing_in_mm - sensor_positions[:, 2][jj]) ** 2 +
(xx * spacing_in_mm - torch.abs(sensor_positions[:, 0][jj])) ** 2 +
(zz * spacing_in_mm - torch.abs(sensor_positions[:, 1][jj])) ** 2) \
/ (speed_of_sound_in_m_per_s * time_spacing_in_ms)
# perform index validation
invalid_indices = torch.where(torch.logical_or(delays < 0, delays >= float(time_series_sensor_data.shape[1])))
torch.clip_(delays, min=0, max=time_series_sensor_data.shape[1] - 1)
# interpolation of delays
lower_delays = (torch.floor(delays)).long()
upper_delays = lower_delays + 1
torch.clip_(upper_delays, min=0, max=time_series_sensor_data.shape[1] - 1)
lower_values = time_series_sensor_data[jj, lower_delays]
upper_values = time_series_sensor_data[jj, upper_delays]
values = lower_values * (upper_delays - delays) + upper_values * (delays - lower_delays)
# perform apodization if specified
if Tags.RECONSTRUCTION_APODIZATION_METHOD in self.component_settings:
apodization = get_apodization_factor(apodization_method=self.component_settings[Tags.RECONSTRUCTION_APODIZATION_METHOD],
dimensions=(xdim, ydim, zdim), n_sensor_elements=n_sensor_elements,
device=torch_device)
values = values * apodization
# set values of invalid indices to 0 so that they don't influence the result
values[invalid_indices] = 0
del delays # free memory of delays
for x in range(xdim):
yy, zz, nn, mm = torch.meshgrid(torch.arange(ydim, device=torch_device),
torch.arange(zdim, device=torch_device),
torch.arange(n_sensor_elements, device=torch_device),
torch.arange(n_sensor_elements, device=torch_device))
M = values[x,yy,zz,nn] * values[x,yy,zz,mm]
M = torch.sign(M) * torch.sqrt(torch.abs(M))
# only take upper triangle without diagonal and sum up along n and m axis (last two)
output[x] = torch.triu(M, diagonal=1).sum(dim=(-1,-2))
reconstructed = output.cpu().numpy()
# check for B-mode methods and perform envelope detection on beamformed image if specified
if Tags.RECONSTRUCTION_BMODE_AFTER_RECONSTRUCTION in self.component_settings \
and self.component_settings[Tags.RECONSTRUCTION_BMODE_AFTER_RECONSTRUCTION] \
and Tags.RECONSTRUCTION_BMODE_METHOD in self.component_settings:
reconstructed = apply_b_mode(reconstructed, method=self.component_settings[Tags.RECONSTRUCTION_BMODE_METHOD])
return reconstructed.squeeze()
def reconstruct_delay_multiply_and_sum_pytorch(time_series_sensor_data: np.ndarray,
detection_geometry: DetectionGeometryBase,
settings: dict = None,
sound_of_speed: int = 1540,
time_spacing: float = 2.5e-8,
sensor_spacing: float = 0.1) -> np.ndarray:
"""
Convenience function for reconstructing time series data using Delay and Sum algorithm implemented in PyTorch
:param time_series_sensor_data: (2D numpy array) sensor data of shape (sensor elements, time steps)
:param detection_geometry: The DetectioNGeometryBase to use for the reconstruction of the given time series data
:param settings: (dict) settings dictionary: by default there is none and the other parameters are used instead,
but if parameters are given in the settings those will be used instead of parsed arguments)
:param sound_of_speed: (int) speed of sound in medium in meters per second (default: 1540 m/s)
:param time_spacing: (float) time between sampling points in seconds (default: 2.5e-8 s which is equal to 40 MHz)
:param sensor_spacing: (float) space between sensor elements in millimeters (default: 0.1 mm)
:return: (2D numpy array) reconstructed image as 2D numpy array
"""
# create settings if they don't exist yet
if settings is None:
settings = Settings()
# parse reconstruction settings if they are not given in the settings
if Tags.PROPERTY_SPEED_OF_SOUND not in settings or settings[Tags.PROPERTY_SPEED_OF_SOUND] is None:
settings[Tags.PROPERTY_SPEED_OF_SOUND] = sound_of_speed
if Tags.SENSOR_SAMPLING_RATE_MHZ not in settings or settings[Tags.SENSOR_SAMPLING_RATE_MHZ] is None:
settings[Tags.SENSOR_SAMPLING_RATE_MHZ] = (1.0 / time_spacing) / 1000000
if Tags.SPACING_MM not in settings or settings[Tags.SPACING_MM] is None:
settings[Tags.SPACING_MM] = sensor_spacing
adapter = ImageReconstructionModuleDelayMultiplyAndSumAdapter(settings)
return adapter.reconstruction_algorithm(time_series_sensor_data, detection_geometry)
| 13,987 | 58.021097 | 137 | py |
simpa | simpa-master/simpa/core/reconstruction_module/reconstruction_module_delay_and_sum_adapter.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from simpa.utils import Tags
from simpa.core.reconstruction_module import ReconstructionAdapterBase
from simpa.io_handling.io_hdf5 import load_data_field
import numpy as np
import torch
from simpa.utils.settings import Settings
from simpa.core.reconstruction_module.reconstruction_utils import get_apodization_factor, bandpass_filtering, apply_b_mode, \
reconstruction_mode_transformation
from simpa.core.device_digital_twins import DetectionGeometryBase
class ImageReconstructionModuleDelayAndSumAdapter(ReconstructionAdapterBase):
def reconstruction_algorithm(self, time_series_sensor_data, detection_geometry: DetectionGeometryBase):
"""
Applies the Delay and Sum beamforming algorithm [1] to the time series sensor data (2D numpy array where the
first dimension corresponds to the sensor elements and the second to the recorded time steps) with the given
beamforming settings (dictionary).
A reconstructed image (2D numpy array) is returned.
This implementation uses PyTorch Tensors to perform computations and is able to run on GPUs.
[1] T. Kirchner et al. 2018, "Signed Real-Time Delay Multiply and Sum Beamforming for Multispectral
Photoacoustic Imaging", https://doi.org/10.3390/jimaging4100121
"""
# check for B-mode methods and perform envelope detection on time series data if specified
if Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION in self.component_settings\
and self.component_settings[Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION] \
and Tags.RECONSTRUCTION_BMODE_METHOD in self.component_settings:
time_series_sensor_data = apply_b_mode(
time_series_sensor_data, method=self.component_settings[Tags.RECONSTRUCTION_BMODE_METHOD])
### INPUT CHECKING AND VALIDATION ###
# check settings dictionary for elements and read them in
# speed of sound: use given speed of sound, otherwise use average from simulation if specified
if Tags.PROPERTY_SPEED_OF_SOUND in self.component_settings and self.component_settings[Tags.PROPERTY_SPEED_OF_SOUND]:
speed_of_sound_in_m_per_s = self.component_settings[Tags.PROPERTY_SPEED_OF_SOUND]
elif Tags.WAVELENGTH in self.global_settings and self.global_settings[Tags.WAVELENGTH]:
sound_speed_m = load_data_field(self.global_settings[Tags.SIMPA_OUTPUT_PATH], Tags.PROPERTY_SPEED_OF_SOUND)
speed_of_sound_in_m_per_s = np.mean(sound_speed_m)
else:
raise AttributeError("Please specify a value for PROPERTY_SPEED_OF_SOUND"
"or WAVELENGTH to obtain the average speed of sound")
# time spacing: use kWave specific dt from simulation if set, otherwise sampling rate if specified,
if Tags.K_WAVE_SPECIFIC_DT in self.global_settings and self.global_settings[Tags.K_WAVE_SPECIFIC_DT]:
time_spacing_in_ms = self.global_settings[Tags.K_WAVE_SPECIFIC_DT] * 1000
elif detection_geometry.sampling_frequency_MHz is not None:
time_spacing_in_ms = 1.0 / (detection_geometry.sampling_frequency_MHz * 1000)
else:
raise AttributeError("Please specify a value for SENSOR_SAMPLING_RATE_MHZ or K_WAVE_SPECIFIC_DT")
self.logger.debug(f"Using a time_spacing of {time_spacing_in_ms}")
# spacing
if Tags.SPACING_MM in self.component_settings and self.component_settings[Tags.SPACING_MM]:
spacing_in_mm = self.component_settings[Tags.SPACING_MM]
else:
raise AttributeError("Please specify a value for SPACING_MM")
# get device specific sensor positions
detection_geometry.check_settings_prerequisites(self.global_settings)
sensor_positions = detection_geometry.get_detector_element_positions_accounting_for_field_of_view()
# time series sensor data must be numpy array
if isinstance(sensor_positions, np.ndarray):
sensor_positions = torch.from_numpy(sensor_positions)
if isinstance(time_series_sensor_data, np.ndarray):
time_series_sensor_data = torch.from_numpy(time_series_sensor_data)
assert isinstance(time_series_sensor_data, torch.Tensor), \
'The time series sensor data must have been converted to a tensor'
# move tensors to GPU if available, otherwise use CPU
if Tags.GPU not in self.global_settings:
if torch.cuda.is_available():
dev = "cuda"
else:
dev = "cpu"
else:
dev = "cuda" if self.global_settings[Tags.GPU] else "cpu"
torch_device = torch.device(dev)
sensor_positions = sensor_positions.to(torch_device)
time_series_sensor_data = time_series_sensor_data.to(torch_device)
# array must be of correct dimension
assert time_series_sensor_data.ndim == 2, 'Time series data must have exactly 2 dimensions' \
', one for the sensor elements and one for time. ' \
'Stack images and sensor positions for 3D reconstruction' \
'Apply beamforming per wavelength if you have a 3D array. '
# check reconstruction mode - pressure by default
if Tags.RECONSTRUCTION_MODE in self.component_settings:
mode = self.component_settings[Tags.RECONSTRUCTION_MODE]
else:
mode = Tags.RECONSTRUCTION_MODE_PRESSURE
time_series_sensor_data = reconstruction_mode_transformation(time_series_sensor_data, mode=mode)
# apply by default bandpass filter using tukey window with alpha=0.5 on time series data in frequency domain
if Tags.RECONSTRUCTION_PERFORM_BANDPASS_FILTERING not in self.component_settings \
or self.component_settings[Tags.RECONSTRUCTION_PERFORM_BANDPASS_FILTERING] is not False:
cutoff_lowpass = self.component_settings[Tags.BANDPASS_CUTOFF_LOWPASS] \
if Tags.BANDPASS_CUTOFF_LOWPASS in self.component_settings else int(8e6)
cutoff_highpass = self.component_settings[Tags.BANDPASS_CUTOFF_HIGHPASS] \
if Tags.BANDPASS_CUTOFF_HIGHPASS in self.component_settings else int(0.1e6)
tukey_alpha = self.component_settings[Tags.TUKEY_WINDOW_ALPHA] if Tags.TUKEY_WINDOW_ALPHA in self.component_settings else 0.5
time_series_sensor_data = bandpass_filtering(time_series_sensor_data,
time_spacing_in_ms=time_spacing_in_ms,
cutoff_lowpass=cutoff_lowpass,
cutoff_highpass=cutoff_highpass,
tukey_alpha=tukey_alpha)
### ALGORITHM ITSELF ###
## compute size of beamformed image from field of view ##
field_of_view = detection_geometry.get_field_of_view_extent_mm()
xdim = int(np.abs(field_of_view[0] - field_of_view[1]) / spacing_in_mm) + 1
zdim = int(np.abs(field_of_view[2] - field_of_view[3]) / spacing_in_mm) + 1
ydim = int(np.abs(field_of_view[4] - field_of_view[5]) / spacing_in_mm) + 1
self.logger.debug(f"FOV X: 0 - {xdim * spacing_in_mm}")
self.logger.debug(f"FOV Y: 0 - {ydim * spacing_in_mm}")
self.logger.debug(f"FOV Z: 0 - {zdim * spacing_in_mm}")
self.logger.debug(f"SOS: {speed_of_sound_in_m_per_s}")
if zdim == 1:
sensor_positions[:, 1] = 0 # Assume imaging plane
if time_series_sensor_data.shape[0] < sensor_positions.shape[0]:
self.logger.warning("Warning: The time series data has less sensor element entries than the given sensor positions. "
"This might be due to a low simulated resolution, please increase it.")
n_sensor_elements = time_series_sensor_data.shape[0]
self.logger.debug(f'Number of pixels in X dimension: {xdim}, Y dimension: {ydim}, Z dimension: {zdim} '
f',number of sensor elements: {n_sensor_elements}')
# construct output image
output = torch.zeros((xdim, ydim, zdim), dtype=torch.float32, device=torch_device)
xx, yy, zz, jj = torch.meshgrid(torch.arange(xdim, device=torch_device),
torch.arange(ydim, device=torch_device),
torch.arange(zdim, device=torch_device),
torch.arange(n_sensor_elements, device=torch_device))
delays = torch.sqrt((yy * spacing_in_mm - sensor_positions[:, 2][jj]) ** 2 +
(xx * spacing_in_mm - torch.abs(sensor_positions[:, 0][jj])) ** 2 +
(zz * spacing_in_mm - torch.abs(sensor_positions[:, 1][jj])) ** 2) \
/ (speed_of_sound_in_m_per_s * time_spacing_in_ms)
# perform index validation
invalid_indices = torch.where(torch.logical_or(delays < 0, delays >= float(time_series_sensor_data.shape[1])))
torch.clip_(delays, min=0, max=time_series_sensor_data.shape[1] - 1)
# interpolation of delays
lower_delays = (torch.floor(delays)).long()
upper_delays = lower_delays + 1
torch.clip_(upper_delays, min=0, max=time_series_sensor_data.shape[1] - 1)
lower_values = time_series_sensor_data[jj, lower_delays]
upper_values = time_series_sensor_data[jj, upper_delays]
values = lower_values * (upper_delays - delays) + upper_values * (delays - lower_delays)
# perform apodization if specified
if Tags.RECONSTRUCTION_APODIZATION_METHOD in self.component_settings:
apodization = get_apodization_factor(apodization_method=self.component_settings[Tags.RECONSTRUCTION_APODIZATION_METHOD],
dimensions=(xdim, ydim, zdim), n_sensor_elements=n_sensor_elements,
device=torch_device)
values = values * apodization
# set values of invalid indices to 0 so that they don't influence the result
values[invalid_indices] = 0
_sum = torch.sum(values, dim=3)
counter = torch.count_nonzero(values, dim=3)
torch.divide(_sum, counter, out=output)
reconstructed = output.cpu().numpy()
# check for B-mode methods and perform envelope detection on beamformed image if specified
if Tags.RECONSTRUCTION_BMODE_AFTER_RECONSTRUCTION in self.component_settings \
and self.component_settings[Tags.RECONSTRUCTION_BMODE_AFTER_RECONSTRUCTION] \
and Tags.RECONSTRUCTION_BMODE_METHOD in self.component_settings:
reconstructed = apply_b_mode(
reconstructed, method=self.component_settings[Tags.RECONSTRUCTION_BMODE_METHOD])
return reconstructed.squeeze()
def reconstruct_delay_and_sum_pytorch(time_series_sensor_data: np.ndarray,
detection_geometry: DetectionGeometryBase,
settings: dict = None,
sound_of_speed: int = 1540,
time_spacing: float = 2.5e-8,
sensor_spacing: float = 0.1) -> np.ndarray:
"""
Convenience function for reconstructing time series data using Delay and Sum algorithm implemented in PyTorch
:param time_series_sensor_data: (2D numpy array) sensor data of shape (sensor elements, time steps)
:param detection_geometry: The DetectionGeometryBase that should be used to reconstruct the given time series data
:param settings: (dict) settings dictionary: by default there is none and the other parameters are used instead,
but if parameters are given in the settings those will be used instead of parsed arguments)
:param sound_of_speed: (int) speed of sound in medium in meters per second (default: 1540 m/s)
:param time_spacing: (float) time between sampling points in seconds (default: 2.5e-8 s which is equal to 40 MHz)
:param sensor_spacing: (float) space between sensor elements in millimeters (default: 0.1 mm)
:return: (2D numpy array) reconstructed image as 2D numpy array
"""
# create settings if they don't exist yet
if settings is None:
settings = Settings()
# parse reconstruction settings if they are not given in the settings
if Tags.PROPERTY_SPEED_OF_SOUND not in settings or settings[Tags.PROPERTY_SPEED_OF_SOUND] is None:
settings[Tags.PROPERTY_SPEED_OF_SOUND] = sound_of_speed
if Tags.SENSOR_SAMPLING_RATE_MHZ not in settings or settings[Tags.SENSOR_SAMPLING_RATE_MHZ] is None:
settings[Tags.SENSOR_SAMPLING_RATE_MHZ] = (1.0 / time_spacing) / 1000000
if Tags.SPACING_MM not in settings or settings[Tags.SPACING_MM] is None:
settings[Tags.SPACING_MM] = sensor_spacing
adapter = ImageReconstructionModuleDelayAndSumAdapter(settings)
return adapter.reconstruction_algorithm(time_series_sensor_data, detection_geometry)
| 13,489 | 56.649573 | 137 | py |
simpa | simpa-master/simpa/core/reconstruction_module/reconstruction_module_signed_delay_multiply_and_sum_adapter.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
from simpa.utils import Tags
from simpa.core.reconstruction_module import ReconstructionAdapterBase
from simpa.io_handling.io_hdf5 import load_data_field
from simpa.core.device_digital_twins import DetectionGeometryBase
import numpy as np
import torch
from simpa.utils.settings import Settings
from simpa.core.reconstruction_module.reconstruction_utils import get_apodization_factor, bandpass_filtering, apply_b_mode, \
reconstruction_mode_transformation
class ImageReconstructionModuleSignedDelayMultiplyAndSumAdapter(ReconstructionAdapterBase):
def reconstruction_algorithm(self, time_series_sensor_data, detection_geometry: DetectionGeometryBase):
"""
Applies the signed Delay Multiply and Sum beamforming algorithm [1] to the time series sensor data
(2D numpy array where the
first dimension corresponds to the sensor elements and the second to the recorded time steps) with the given
beamforming settings (dictionary).
A reconstructed image (2D numpy array) is returned.
This implementation uses PyTorch Tensors to perform computations and is able to run on GPUs.
[1] T. Kirchner et al. 2018, "Signed Real-Time Delay Multiply and Sum Beamforming for Multispectral
Photoacoustic Imaging", https://doi.org/10.3390/jimaging4100121
"""
# check for B-mode methods and perform envelope detection on time series data if specified
if Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION in self.component_settings\
and self.component_settings[Tags.RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION] \
and Tags.RECONSTRUCTION_BMODE_METHOD in self.component_settings:
time_series_sensor_data = apply_b_mode(
time_series_sensor_data, method=self.component_settings[Tags.RECONSTRUCTION_BMODE_METHOD])
### INPUT CHECKING AND VALIDATION ###
# check settings dictionary for elements and read them in
# speed of sound: use given speed of sound, otherwise use average from simulation if specified
if Tags.PROPERTY_SPEED_OF_SOUND in self.component_settings and self.component_settings[Tags.PROPERTY_SPEED_OF_SOUND]:
speed_of_sound_in_m_per_s = self.component_settings[Tags.PROPERTY_SPEED_OF_SOUND]
elif Tags.WAVELENGTH in self.global_settings and self.global_settings[Tags.WAVELENGTH]:
sound_speed_m = load_data_field(self.global_settings[Tags.SIMPA_OUTPUT_PATH], Tags.PROPERTY_SPEED_OF_SOUND)
speed_of_sound_in_m_per_s = np.mean(sound_speed_m)
else:
raise AttributeError("Please specify a value for PROPERTY_SPEED_OF_SOUND"
"or WAVELENGTH to obtain the average speed of sound")
# time spacing: use kWave specific dt from simulation if set, otherwise sampling rate if specified,
if Tags.K_WAVE_SPECIFIC_DT in self.global_settings and self.global_settings[Tags.K_WAVE_SPECIFIC_DT]:
time_spacing_in_ms = self.global_settings[Tags.K_WAVE_SPECIFIC_DT] * 1000
elif Tags.SENSOR_SAMPLING_RATE_MHZ in self.global_settings and self.global_settings[
Tags.SENSOR_SAMPLING_RATE_MHZ]:
time_spacing_in_ms = 1.0 / (self.global_settings[Tags.SENSOR_SAMPLING_RATE_MHZ] * 1000)
else:
raise AttributeError("Please specify a value for SENSOR_SAMPLING_RATE_MHZ or K_WAVE_SPECIFIC_DT")
# spacing
if Tags.SPACING_MM in self.global_settings and self.global_settings[Tags.SPACING_MM]:
spacing_in_mm = self.global_settings[Tags.SPACING_MM]
else:
raise AttributeError("Please specify a value for SPACING_MM")
# get device specific sensor positions
detection_geometry.check_settings_prerequisites(self.global_settings)
sensor_positions = detection_geometry.get_detector_element_positions_accounting_for_field_of_view()
# time series sensor data must be numpy array
if isinstance(sensor_positions, np.ndarray):
sensor_positions = torch.from_numpy(sensor_positions)
if isinstance(time_series_sensor_data, np.ndarray):
time_series_sensor_data = torch.from_numpy(time_series_sensor_data)
assert isinstance(time_series_sensor_data, torch.Tensor), \
'The time series sensor data must have been converted to a tensor'
# move tensors to GPU if available, otherwise use CPU
if Tags.GPU not in self.global_settings:
if torch.cuda.is_available():
dev = "cuda"
else:
dev = "cpu"
else:
dev = "cuda" if self.global_settings[Tags.GPU] else "cpu"
device = torch.device(dev)
sensor_positions = sensor_positions.to(device)
time_series_sensor_data = time_series_sensor_data.to(device)
# array must be of correct dimension
assert time_series_sensor_data.ndim == 2, 'Time series data must have exactly 2 dimensions' \
', one for the sensor elements and one for time. ' \
'Stack images and sensor positions for 3D reconstruction' \
'Apply beamforming per wavelength if you have a 3D array. '
# check reconstruction mode - pressure by default
if Tags.RECONSTRUCTION_MODE in self.component_settings:
mode = self.component_settings[Tags.RECONSTRUCTION_MODE]
else:
mode = Tags.RECONSTRUCTION_MODE_PRESSURE
time_series_sensor_data = reconstruction_mode_transformation(time_series_sensor_data, mode=mode)
# apply by default bandpass filter using tukey window with alpha=0.5 on time series data in frequency domain
if Tags.RECONSTRUCTION_PERFORM_BANDPASS_FILTERING not in self.component_settings \
or self.component_settings[Tags.RECONSTRUCTION_PERFORM_BANDPASS_FILTERING] is not False:
cutoff_lowpass = self.component_settings[Tags.BANDPASS_CUTOFF_LOWPASS] \
if Tags.BANDPASS_CUTOFF_LOWPASS in self.component_settings else int(8e6)
cutoff_highpass = self.component_settings[Tags.BANDPASS_CUTOFF_HIGHPASS] \
if Tags.BANDPASS_CUTOFF_HIGHPASS in self.component_settings else int(0.1e6)
tukey_alpha = self.component_settings[Tags.TUKEY_WINDOW_ALPHA] if Tags.TUKEY_WINDOW_ALPHA in \
self.component_settings else 0.5
time_series_sensor_data = bandpass_filtering(time_series_sensor_data,
time_spacing_in_ms=time_spacing_in_ms,
cutoff_lowpass=cutoff_lowpass,
cutoff_highpass=cutoff_highpass,
tukey_alpha=tukey_alpha)
### ALGORITHM ITSELF ###
## compute size of beamformed image ##
xdim = (max(sensor_positions[:, 0]) - min(sensor_positions[:, 0])) / spacing_in_mm
xdim = int(xdim) + 1 # correction due to subtraction of indices starting at 0
ydim = float(time_series_sensor_data.shape[1] * time_spacing_in_ms * speed_of_sound_in_m_per_s) / spacing_in_mm
ydim = int(round(ydim))
zdim = (max(sensor_positions[:, 1]) - min(sensor_positions[:, 1]))/spacing_in_mm
zdim = int(zdim) + 1 # correction due to subtraction of indices starting at 0
if zdim == 1:
sensor_positions[:, 1] = 0 # Assume imaging plane
if time_series_sensor_data.shape[0] < sensor_positions.shape[0]:
self.logger.warning("Warning: The time series data has less sensor element entries than the given "
"sensor positions. "
"This might be due to a low simulated resolution, please increase it.")
n_sensor_elements = time_series_sensor_data.shape[0]
self.logger.debug(f'Number of pixels in X dimension: {xdim}, Y dimension: {ydim}, Z dimension: {zdim} '
f',number of sensor elements: {n_sensor_elements}')
# construct output image
output = torch.zeros((xdim, ydim, zdim), dtype=torch.float32, device=device)
xx, yy, zz, jj = torch.meshgrid(torch.arange(xdim, device=device),
torch.arange(ydim, device=device),
torch.arange(zdim, device=device),
torch.arange(n_sensor_elements, device=device))
delays = torch.sqrt((yy * spacing_in_mm - sensor_positions[:, 2][jj]) ** 2 +
(xx * spacing_in_mm - torch.abs(sensor_positions[:, 0][jj])) ** 2 +
(zz * spacing_in_mm - torch.abs(sensor_positions[:, 1][jj])) ** 2) \
/ (speed_of_sound_in_m_per_s * time_spacing_in_ms)
# perform index validation
invalid_indices = torch.where(torch.logical_or(delays < 0, delays >= float(time_series_sensor_data.shape[1])))
torch.clip_(delays, min=0, max=time_series_sensor_data.shape[1] - 1)
# interpolation of delays
lower_delays = (torch.floor(delays)).long()
upper_delays = lower_delays + 1
torch.clip_(upper_delays, min=0, max=time_series_sensor_data.shape[1] - 1)
lower_values = time_series_sensor_data[jj, lower_delays]
upper_values = time_series_sensor_data[jj, upper_delays]
values = lower_values * (upper_delays - delays) + upper_values * (delays - lower_delays)
# perform apodization if specified
if Tags.RECONSTRUCTION_APODIZATION_METHOD in self.component_settings:
apodization = get_apodization_factor(apodization_method=self.component_settings[
Tags.RECONSTRUCTION_APODIZATION_METHOD],
dimensions=(xdim, ydim, zdim), n_sensor_elements=n_sensor_elements,
device=device)
values = values * apodization
# set values of invalid indices to 0 so that they don't influence the result
values[invalid_indices] = 0
DAS = torch.sum(values, dim=3)
del delays # free memory of delays
for x in range(xdim):
yy, zz, nn, mm = torch.meshgrid(torch.arange(ydim, device=device),
torch.arange(zdim, device=device),
torch.arange(n_sensor_elements, device=device),
torch.arange(n_sensor_elements, device=device))
M = values[x,yy,zz,nn] * values[x,yy,zz,mm]
M = torch.sign(M) * torch.sqrt(torch.abs(M))
# only take upper triangle without diagonal and sum up along n and m axis (last two)
output[x] = torch.triu(M, diagonal=1).sum(dim=(-1,-2))
output = torch.sign(DAS) * output
reconstructed = output.cpu().numpy()
# check for B-mode methods and perform envelope detection on beamformed image if specified
if Tags.RECONSTRUCTION_BMODE_AFTER_RECONSTRUCTION in self.component_settings \
and self.component_settings[Tags.RECONSTRUCTION_BMODE_AFTER_RECONSTRUCTION] \
and Tags.RECONSTRUCTION_BMODE_METHOD in self.component_settings:
reconstructed = apply_b_mode(reconstructed,
method=self.component_settings[Tags.RECONSTRUCTION_BMODE_METHOD])
return reconstructed.squeeze()
def reconstruct_signed_delay_multiply_and_sum_pytorch(time_series_sensor_data: np.ndarray,
detection_geometry: DetectionGeometryBase,
settings: dict = None,
sound_of_speed: int = 1540,
time_spacing: float = 2.5e-8, sensor_spacing: float = 0.1) -> np.ndarray:
"""
Convenience function for reconstructing time series data using Delay and Sum algorithm implemented in PyTorch
:param time_series_sensor_data: (2D numpy array) sensor data of shape (sensor elements, time steps)
:param settings: (dict) settings dictionary: by default there is none and the other parameters are used instead,
but if parameters are given in the settings those will be used instead of parsed arguments)
:param sound_of_speed: (int) speed of sound in medium in meters per second (default: 1540 m/s)
:param time_spacing: (float) time between sampling points in seconds (default: 2.5e-8 s which is equal to 40 MHz)
:param sensor_spacing: (float) space between sensor elements in millimeters (default: 0.1 mm)
:return: (2D numpy array) reconstructed image as 2D numpy array
"""
# create settings if they don't exist yet
if settings is None:
settings = Settings()
# parse reconstruction settings if they are not given in the settings
if Tags.PROPERTY_SPEED_OF_SOUND not in settings or settings[Tags.PROPERTY_SPEED_OF_SOUND] is None:
settings[Tags.PROPERTY_SPEED_OF_SOUND] = sound_of_speed
if Tags.SENSOR_SAMPLING_RATE_MHZ not in settings or settings[Tags.SENSOR_SAMPLING_RATE_MHZ] is None:
settings[Tags.SENSOR_SAMPLING_RATE_MHZ] = (1.0 / time_spacing) / 1000000
if Tags.SPACING_MM not in settings or settings[Tags.SPACING_MM] is None:
settings[Tags.SPACING_MM] = sensor_spacing
adapter = ImageReconstructionModuleSignedDelayMultiplyAndSumAdapter(settings)
return adapter.reconstruction_algorithm(time_series_sensor_data, detection_geometry)
| 14,009 | 56.892562 | 125 | py |
simpa | simpa-master/simpa/utils/tags.py | """
SPDX-FileCopyrightText: 2021 Computer Assisted Medical Interventions Group, DKFZ
SPDX-FileCopyrightText: 2021 VISION Lab, Cancer Research UK Cambridge Institute (CRUK CI)
SPDX-License-Identifier: MIT
"""
import numpy as np
class Tags:
"""
This class contains all 'Tags' for the use in the settings dictionary as well as strings that are used in SIMPA
as naming conventions.
Every Tag that is intended to be used as a key in the settings dictionary is represented by a tuple.
The first element of the tuple is a string that corresponds to the name of the Tag.
The second element of the tuple is a data type or a tuple of data types.
The values that are assigned to the keys in the settings should match these data types.
Their usage within the SIMPA package is divided in "SIMPA package", "module X", "adapter Y", "class Z" and
"naming convention".
"""
"""
General settings
"""
SIMULATION_PATH = ("simulation_path", str)
"""
Absolute path to the folder where the SIMPA output is saved.\n
Usage: SIMPA package
"""
VOLUME_NAME = ("volume_name", str)
"""
Name of the SIMPA output file.\n
Usage: SIMPA package
"""
WAVELENGTHS = ("wavelengths", (list, range, tuple, np.ndarray))
"""
Iterable of all the wavelengths used for the simulation.\n
Usage: SIMPA package
"""
WAVELENGTH = ("wavelength", (int, np.integer))
"""
Single wavelength used for the current simulation.\n
Usage: SIMPA package
"""
RANDOM_SEED = ("random_seed", (int, np.integer))
"""
Random seed for numpy and torch.\n
Usage: SIMPA package
"""
TISSUE_PROPERTIES_OUPUT_NAME = "properties"
"""
Name of the simulation properties field in the SIMPA output file.\n
Usage: naming convention
"""
GPU = ("gpu", (bool, np.bool, np.bool_))
"""
If True, uses all available gpu options of the used modules.\n
Usage: SIMPA package
"""
ACOUSTIC_SIMULATION_3D = ("acoustic_simulation_3d", bool)
"""
If True, simulates the acoustic forward model in 3D.\n
Usage: SIMPA package
"""
MEDIUM_TEMPERATURE_CELCIUS = ("medium_temperature", (int, np.integer, float, np.float))
"""
Temperature of the simulated volume.\n
Usage: module noise_simulation
"""
LOAD_AND_SAVE_HDF5_FILE_AT_THE_END_OF_SIMULATION_TO_MINIMISE_FILESIZE = ("minimize_file_size", (bool, np.bool, np.bool_))
"""
If not set to False, the HDF5 file will be optimised after the simulations are done.
Usage: simpa.core.simulation.simulate
"""
"""
Volume Creation Settings
"""
VOLUME_CREATOR = ("volume_creator", str)
"""
Choice of the volume creator adapter.\n
Usage: module volume_creation_module, module device_digital_twins
"""
VOLUME_CREATOR_VERSATILE = "volume_creator_versatile"
"""
Corresponds to the ModelBasedVolumeCreator.\n
Usage: module volume_creation_module, naming convention
"""
VOLUME_CREATOR_SEGMENTATION_BASED = "volume_creator_segmentation_based"
"""
Corresponds to the SegmentationBasedVolumeCreator.\n
Usage: module volume_creation_module, naming convention
"""
INPUT_SEGMENTATION_VOLUME = ("input_segmentation_volume", np.ndarray)
"""
Array that defines a segmented volume.\n
Usage: adapter segmentation_based_volume_creator
"""
SEGMENTATION_CLASS_MAPPING = ("segmentation_class_mapping", dict)
"""
Mapping that assigns every class in the INPUT_SEGMENTATION_VOLUME a MOLECULE_COMPOSITION.\n
Usage: adapter segmentation_based_volume_creator
"""
PRIORITY = ("priority", (int, np.integer, float, np.float))
"""
Number that corresponds to a priority of the assigned structure. If another structure occupies the same voxel
in a volume, the structure with a higher priority will be preferred.\n
Usage: adapter versatile_volume_creator
"""
MOLECULE_COMPOSITION = ("molecule_composition", list)
"""
List that contains all the molecules within a structure.\n
Usage: module volume_creation_module
"""
SIMULATE_DEFORMED_LAYERS = ("simulate_deformed_layers", bool)
"""
If True, the horizontal layers are deformed according to the DEFORMED_LAYERS_SETTINGS.\n
Usage: adapter versatile_volume_creation
"""
DEFORMED_LAYERS_SETTINGS = ("deformed_layers_settings", dict)
"""
Settings that contain the functional which defines the deformation of the layers.\n
Usage: adapter versatile_volume_creation
"""
BACKGROUND = "Background"
"""
Corresponds to the name of a structure.\n
Usage: adapter versatile_volume_creation, naming convention
"""
ADHERE_TO_DEFORMATION = ("adhere_to_deformation", bool)
"""
If True, a structure will be shifted according to the deformation.\n
Usage: adapter versatile_volume_creation
"""
DEFORMATION_X_COORDINATES_MM = "deformation_x_coordinates"
"""
Mesh that defines the x coordinates of the deformation.\n
Usage: adapter versatile_volume_creation, naming convention
"""
DEFORMATION_Y_COORDINATES_MM = "deformation_y_coordinates"
"""
Mesh that defines the y coordinates of the deformation.\n
Usage: adapter versatile_volume_creation, naming convention
"""
DEFORMATION_Z_ELEVATIONS_MM = "deformation_z_elevation"
"""
Mesh that defines the z coordinates of the deformation.\n
Usage: adapter versatile_volume_creation, naming convention
"""
MAX_DEFORMATION_MM = "max_deformation"
"""
Maximum deformation in z-direction.\n
Usage: adapter versatile_volume_creation, naming convention
"""
"""
Structure Settings
"""
CONSIDER_PARTIAL_VOLUME = ("consider_partial_volume", bool)
"""
If True, the structure will be generated with its edges only occupying a partial volume of the voxel.\n
Usage: adapter versatile_volume_creation
"""
STRUCTURE_START_MM = ("structure_start", (list, tuple, np.ndarray))
"""
Beginning of the structure as [x, y, z] coordinates in the generated volume.\n
Usage: adapter versatile_volume_creation, class GeometricalStructure
"""
STRUCTURE_END_MM = ("structure_end", (list, tuple, np.ndarray))
"""
Ending of the structure as [x, y, z] coordinates in the generated volume.\n
Usage: adapter versatile_volume_creation, class GeometricalStructure
"""
STRUCTURE_RADIUS_MM = ("structure_radius", (int, np.integer, float, np.float, np.ndarray))
"""
Radius of the structure.\n
Usage: adapter versatile_volume_creation, class GeometricalStructure
"""
STRUCTURE_ECCENTRICITY = ("structure_excentricity", (int, np.integer, float, np.float, np.ndarray))
"""
Eccentricity of the structure.\n
Usage: adapter versatile_volume_creation, class EllipticalTubularStructure
"""
STRUCTURE_FIRST_EDGE_MM = ("structure_first_edge_mm", (list, tuple, np.ndarray))
"""
Edge of the structure as [x, y, z] vector starting from STRUCTURE_START_MM in the generated volume.\n
Usage: adapter versatile_volume_creation, class ParallelepipedStructure
"""
STRUCTURE_SECOND_EDGE_MM = ("structure_second_edge_mm", (list, tuple, np.ndarray))
"""
Edge of the structure as [x, y, z] vector starting from STRUCTURE_START_MM in the generated volume.\n
Usage: adapter versatile_volume_creation, class ParallelepipedStructure
"""
STRUCTURE_THIRD_EDGE_MM = ("structure_third_edge_mm", (list, tuple, np.ndarray))
"""
Edge of the structure as [x, y, z] vector starting from STRUCTURE_START_MM in the generated volume.\n
Usage: adapter versatile_volume_creation, class ParallelepipedStructure
"""
STRUCTURE_X_EXTENT_MM = ("structure_x_extent_mm", (int, np.integer, float, np.float))
"""
X-extent of the structure in the generated volume.\n
Usage: adapter versatile_volume_creation, class RectangularCuboidStructure
"""
STRUCTURE_Y_EXTENT_MM = ("structure_y_extent_mm", (int, np.integer, float, np.float))
"""
Y-extent of the structure in the generated volume.\n
Usage: adapter versatile_volume_creation, class RectangularCuboidStructure
"""
STRUCTURE_Z_EXTENT_MM = ("structure_z_extent_mm", (int, np.integer, float, np.float))
"""
Z-extent of the structure in the generated volume.\n
Usage: adapter versatile_volume_creation, class RectangularCuboidStructure
"""
STRUCTURE_BIFURCATION_LENGTH_MM = ("structure_bifurcation_length_mm", (int, np.integer, float, np.float))
"""
Length after which a VesselStructure will bifurcate.\n
Usage: adapter versatile_volume_creation, class VesselStructure
"""
STRUCTURE_CURVATURE_FACTOR = ("structure_curvature_factor", (int, np.integer, float, np.float))
"""
Factor that determines how strongly a vessel tree is curved.\n
Usage: adapter versatile_volume_creation, class VesselStructure
"""
STRUCTURE_RADIUS_VARIATION_FACTOR = ("structure_radius_variation_factor", (int, np.integer, float, np.float))
"""
Factor that determines how strongly a the radius of vessel tree varies.\n
Usage: adapter versatile_volume_creation, class VesselStructure
"""
STRUCTURE_DIRECTION = ("structure_direction", (list, tuple, np.ndarray))
"""
Direction as [x, y, z] vector starting from STRUCTURE_START_MM in which the vessel will grow.\n
Usage: adapter versatile_volume_creation, class VesselStructure
"""
"""
Digital Device Twin Settings
"""
DIGITAL_DEVICE = ("digital_device", str)
"""
Digital device that is chosen as illumination source and detector for the simulation.\n
Usage: SIMPA package
"""
DIGITAL_DEVICE_MSOT_ACUITY = "digital_device_msot"
"""
Corresponds to the MSOTAcuityEcho device.\n
Usage: SIMPA package, naming convention
"""
DIGITAL_DEVICE_RSOM = "digital_device_rsom"
"""
Corresponds to the RSOMExplorerP50 device.\n
Usage: SIMPA package, naming convention
"""
DIGITAL_DEVICE_MSOT_INVISION = "digital_device_invision"
"""
Corresponds to the InVision 256-TF device.\n
Usage: SIMPA package, naming convention
"""
DIGITAL_DEVICE_SLIT_ILLUMINATION_LINEAR_DETECTOR = "digital_device_slit_illumination_linear_detector"
"""
Corresponds to a PA device with a slit as illumination and a linear array as detection geometry.\n
Usage: SIMPA package, naming convention
"""
DIGITAL_DEVICE_POSITION = ("digital_device_position", (list, tuple, np.ndarray))
"""
Position in [x, y, z] coordinates of the device in the generated volume.\n
Usage: SIMPA package
"""
US_GEL = ("us_gel", bool)
"""
If True, us gel is placed between the PA device and the simulated volume.\n
Usage: SIMPA package
"""
OPTICAL_MODEL_SETTINGS = ("optical_model_settings", dict)
"""
Optical model settings
"""
OPTICAL_MODEL_OUTPUT_NAME = "optical_forward_model_output"
"""
Name of the optical forward model output field in the SIMPA output file.\n
Usage: naming convention
"""
OPTICAL_MODEL_BINARY_PATH = ("optical_model_binary_path", str)
"""
Absolute path of the location of the optical forward model binary.\n
Usage: module optical_simulation_module
"""
OPTICAL_MODEL_NUMBER_PHOTONS = ("optical_model_number_of_photons", (int, np.integer, float, np.float))
"""
Number of photons used in the optical simulation.\n
Usage: module optical_simulation_module
"""
OPTICAL_MODEL_ILLUMINATION_GEOMETRY_JSON_FILE = ("optical_model_illumination_geometry_json_file", str)
"""
Absolute path of the location of the JSON file containing the IPASC-formatted optical forward
model illumination geometry.\n
Usage: module optical_simulation_module
"""
LASER_PULSE_ENERGY_IN_MILLIJOULE = ("laser_pulse_energy_in_millijoule", (int, np.integer, float, np.float, list,
range, tuple, np.ndarray))
"""
Laser pulse energy used in the optical simulation.\n
Usage: module optical_simulation_module
"""
OPTICAL_MODEL_FLUENCE = "fluence"
"""
Name of the optical forward model output fluence field in the SIMPA output file.\n
Usage: naming convention
"""
OPTICAL_MODEL_INITIAL_PRESSURE = "initial_pressure"
"""
Name of the optical forward model output initial pressure field in the SIMPA output file.\n
Usage: naming convention
"""
OPTICAL_MODEL_UNITS = "units"
"""
Name of the optical forward model output units field in the SIMPA output file.\n
Usage: naming convention
"""
MCX_SEED = ("mcx_seed", (int, np.integer))
"""
Specific seed for random initialisation in mcx.\n
if not set, Tags.RANDOM_SEED will be used instead.
Usage: module optical_modelling, adapter mcx_adapter
"""
MCX_ASSUMED_ANISOTROPY = ("mcx_seed", (int, np.int, float, np.float))
"""
The anisotropy that should be assumed for the mcx simulations.
If not set, a default value of 0.9 will be assumed.
Usage: module optical_modelling, adapter mcx_adapter
"""
ILLUMINATION_TYPE = ("optical_model_illumination_type", str)
"""
Type of the illumination geometry used in mcx.\n
Usage: module optical_modelling, adapter mcx_adapter
"""
# Illumination parameters
ILLUMINATION_POSITION = ("illumination_position", (list, tuple, np.ndarray))
"""
Position of the photon source in [x, y, z] coordinates used in mcx.\n
Usage: module optical_modelling, adapter mcx_adapter
"""
ILLUMINATION_DIRECTION = ("illumination_direction", (list, tuple, np.ndarray))
"""
Direction of the photon source as [x, y, z] vector used in mcx.\n
Usage: module optical_modelling, adapter mcx_adapter
"""
ILLUMINATION_PARAM1 = ("illumination_param1", (list, tuple, np.ndarray))
"""
First parameter group of the specified illumination type as [x, y, z, w] vector used in mcx.\n
Usage: module optical_modelling, adapter mcx_adapter
"""
ILLUMINATION_PARAM2 = ("illumination_param2", (list, tuple, np.ndarray))
"""
Second parameter group of the specified illumination type as [x, y, z, w] vector used in mcx.\n
Usage: module optical_modelling, adapter mcx_adapter
"""
TIME_STEP = ("time_step", (int, np.integer, float, np.float))
"""
Temporal resolution of mcx.\n
Usage: adapter mcx_adapter
"""
TOTAL_TIME = ("total_time", (int, np.integer, float, np.float))
"""
Total simulated time in mcx.\n
Usage: adapter mcx_adapter
"""
# Supported illumination types - implemented in mcx
ILLUMINATION_TYPE_PENCIL = "pencil"
"""
Corresponds to pencil source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_PENCILARRAY = "pencilarray"
"""
Corresponds to pencilarray source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_DISK = "disk"
"""
Corresponds to disk source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_SLIT = "slit"
"""
Corresponds to slit source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_GAUSSIAN = "gaussian"
"""
Corresponds to gaussian source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_PATTERN = "pattern"
"""
Corresponds to pattern source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_PATTERN_3D = "pattern3d"
"""
Corresponds to pattern3d source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_PLANAR = "planar"
"""
Corresponds to planar source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_FOURIER = "fourier"
"""
Corresponds to fourier source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_FOURIER_X = "fourierx"
"""
Corresponds to fourierx source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_FOURIER_X_2D = "fourierx2d"
"""
Corresponds to fourierx2d source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_DKFZ_PAUS = "pasetup" # TODO more explanatory rename of pasetup
"""
Corresponds to pasetup source in mcx. The geometrical definition is described in:\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_MSOT_ACUITY_ECHO = "msot_acuity_echo"
"""s
Corresponds to msot_acuity_echo source in mcx. The device is manufactured by iThera Medical, Munich, Germany
(https: // www.ithera-medical.com / products / msot-acuity /).\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_MSOT_INVISION = "invision"
"""
Corresponds to a source definition in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_RING = "ring"
"""
Corresponds to ring source in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
ILLUMINATION_TYPE_IPASC_DEFINITION = "ipasc"
"""
Corresponds to a source definition in mcx.\n
Usage: adapter mcx_adapter, naming convention
"""
# Supported optical models
OPTICAL_MODEL = ("optical_model", str)
"""
Choice of the used optical model.\n
Usage: module optical_simulation_module
"""
OPTICAL_MODEL_MCX = "mcx"
"""
Corresponds to the mcx simulation.\n
Usage: module optical_simulation_module, naming convention
"""
OPTICAL_MODEL_TEST = "simpa_tests"
"""
Corresponds to an adapter for testing purposes only.\n
Usage: module optical_simulation_module, naming convention
"""
# Supported acoustic models
ACOUSTIC_MODEL = ("acoustic_model", str)
"""
Choice of the used acoustic model.\n
Usage: module acoustic_forward_module
"""
ACOUSTIC_MODEL_K_WAVE = "kwave"
"""
Corresponds to the kwave simulaiton.\n
Usage: module acoustic_forward_module, naming convention
"""
K_WAVE_SPECIFIC_DT = ("dt_acoustic_sim", (int, np.integer, float, np.float))
"""
Temporal resolution of kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter
"""
K_WAVE_SPECIFIC_NT = ("Nt_acoustic_sim", (int, np.integer, float, np.float))
"""
Total time steps simulated by kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter
"""
ACOUSTIC_MODEL_TEST = "simpa_tests"
"""
Corresponds to an adapter for testing purposes only.\n
Usage: module acoustic_forward_module, naming convention
"""
ACOUSTIC_MODEL_SETTINGS = ("acoustic_model_settings", dict)
"""
Acoustic model settings.
"""
ACOUSTIC_MODEL_BINARY_PATH = ("acoustic_model_binary_path", str)
"""
Absolute path of the location of the acoustic forward model binary.\n
Usage: module optical_simulation_module
"""
ACOUSTIC_MODEL_OUTPUT_NAME = "acoustic_forward_model_output"
"""
Name of the acoustic forward model output field in the SIMPA output file.\n
Usage: naming convention
"""
RECORDMOVIE = ("record_movie", (bool, np.bool, np.bool_))
"""
If True, a movie of the kwave simulation will be recorded.\n
Usage: adapter KwaveAcousticForwardModel
"""
MOVIENAME = ("movie_name", str)
"""
Name of the movie recorded by kwave.\n
Usage: adapter KwaveAcousticForwardModel
"""
ACOUSTIC_LOG_SCALE = ("acoustic_log_scale", (bool, np.bool, np.bool_))
"""
If True, the movie of the kwave simulation will be recorded in a log scale.\n
Usage: adapter KwaveAcousticForwardModel
"""
TIME_SERIES_DATA = "time_series_data"
"""
Name of the time series data field in the SIMPA output file.\n
Usage: naming convention
"""
TIME_SERIES_DATA_NOISE = "time_series_data_noise"
"""
Name of the time series data with applied noise field in the SIMPA output file.\n
Usage: naming convention
"""
RECONSTRUCTION_MODEL_SETTINGS = ("reconstruction_model_settings", dict)
""""
Reconstruction Model Settings
"""
RECONSTRUCTION_OUTPUT_NAME = ("reconstruction_result", str)
"""
Absolute path of the image reconstruction result.\n
Usage: adapter MitkBeamformingAdapter
"""
RECONSTRUCTION_ALGORITHM = ("reconstruction_algorithm", str)
"""
Choice of the used reconstruction algorithm.\n
Usage: module reconstruction_module
"""
RECONSTRUCTION_ALGORITHM_DAS = "DAS"
"""
Corresponds to the reconstruction algorithm DAS with the MitkBeamformingAdapter.\n
Usage: module reconstruction_module, naming convention
"""
RECONSTRUCTION_ALGORITHM_DMAS = "DMAS"
"""
Corresponds to the reconstruction algorithm DMAS with the MitkBeamformingAdapter.\n
Usage: module reconstruction_module, naming convention
"""
RECONSTRUCTION_ALGORITHM_SDMAS = "sDMAS"
"""
Corresponds to the reconstruction algorithm sDMAS with the MitkBeamformingAdapter.\n
Usage: module reconstruction_module, naming convention
"""
RECONSTRUCTION_ALGORITHM_PYTORCH_DAS = "PyTorch_DAS"
"""
Corresponds to the reconstruction algorithm DAS with the PyTorchDASAdapter.\n
Usage: module reconstruction_module, naming convention
"""
RECONSTRUCTION_ALGORITHM_TIME_REVERSAL = "time_reversal"
"""
Corresponds to the reconstruction algorithm Time Reversal with TimeReversalAdapter.\n
Usage: module reconstruction_module, naming convention
"""
RECONSTRUCTION_ALGORITHM_TEST = "TEST"
"""
Corresponds to an adapter for testing purposes only.\n
Usage: module reconstruction_module, naming convention
"""
RECONSTRUCTION_INVERSE_CRIME = ("reconstruction_inverse_crime", (bool, np.bool, np.bool_))
"""
If True, the Time Reversal reconstruction will commit the "inverse crime".\n
Usage: TimeReversalAdapter
"""
RECONSTRUCTION_MITK_BINARY_PATH = ("reconstruction_mitk_binary_path", str)
"""
Absolute path to the Mitk Beamforming script.\n
Usage: adapter MitkBeamformingAdapter
"""
RECONSTRUCTION_MITK_SETTINGS_XML = ("reconstruction_mitk_settings_xml", str)
"""
Absolute path to the Mitk Beamforming script settings.\n
Usage: adapter MitkBeamformingAdapter
"""
RECONSTRUCTION_BMODE_METHOD = ("reconstruction_bmode_method", str)
"""
Choice of the B-Mode method used in the Mitk Beamforming.\n
Usage: adapter MitkBeamformingAdapter
"""
RECONSTRUCTION_BMODE_METHOD_ABS = "Abs"
"""
Corresponds to the absolute value as the B-Mode method used in the Mitk Beamforming.\n
Usage: adapter MitkBeamformingAdapter, naming convention
"""
RECONSTRUCTION_BMODE_METHOD_HILBERT_TRANSFORM = "EnvelopeDetection"
"""
Corresponds to the Hilbert transform as the B-Mode method used in the Mitk Beamforming.\n
Usage: adapter MitkBeamformingAdapter, naming convention
"""
RECONSTRUCTION_BMODE_BEFORE_RECONSTRUCTION = "Envelope_Detection_before_Reconstruction"
"""
Specifies whether an envelope detection should be performed before reconstruction, default is False
Usage: adapter PyTorchDASAdapter, naming convention
"""
RECONSTRUCTION_BMODE_AFTER_RECONSTRUCTION = "Envelope_Detection_after_Reconstruction"
"""
Specifies whether an envelope detection should be performed after reconstruction, default is False
Usage: adapter PyTorchDASAdapter, naming convention
"""
RECONSTRUCTION_APODIZATION_METHOD = ("reconstruction_apodization_method", str)
"""
Choice of the apodization method used, i.e. window functions .\n
Usage: adapter PyTorchDASAdapter
"""
RECONSTRUCTION_APODIZATION_BOX = "BoxApodization"
"""
Corresponds to the box window function for apodization.\n
Usage: adapter PyTorchDASAdapter, naming convention
"""
RECONSTRUCTION_APODIZATION_HANN = "HannApodization"
"""
Corresponds to the Hann window function for apodization.\n
Usage: adapter PyTorchDASAdapter, naming convention
"""
RECONSTRUCTION_APODIZATION_HAMMING = "HammingApodization"
"""
Corresponds to the Hamming window function for apodization.\n
Usage: adapter PyTorchDASAdapter, naming convention
"""
RECONSTRUCTION_PERFORM_BANDPASS_FILTERING = ("reconstruction_perform_bandpass_filtering",
(bool, np.bool, np.bool_))
"""
Whether bandpass filtering should be applied or not. Default should be True\n
Usage: adapter PyTorchDASAdapter
"""
TUKEY_WINDOW_ALPHA = ("tukey_window_alpha", (int, np.integer, float, np.float))
"""
Sets alpha value of Tukey window between 0 (similar to box window) and 1 (similar to Hann window).
Default is 0.5\n
Usage: adapter PyTorchDASAdapter
"""
BANDPASS_CUTOFF_LOWPASS = ("bandpass_cuttoff_lowpass", (int, np.integer, float, np.float))
"""
Sets the cutoff threshold in MHz for lowpass filtering, i.e. upper limit of the tukey filter. Default is 8 MHz\n
Usage: adapter PyTorchDASAdapter
"""
BANDPASS_CUTOFF_HIGHPASS = ("bandpass_cuttoff_highpass", (int, np.integer, float, np.float))
"""
Sets the cutoff threshold in MHz for highpass filtering, i.e. lower limit of the tukey filter. Default is 0.1 MHz\n
Usage: adapter PyTorchDASAdapter
"""
RECONSTRUCTED_DATA = "reconstructed_data"
"""
Name of the reconstructed data field in the SIMPA output file.\n
Usage: naming convention
"""
RECONSTRUCTED_DATA_NOISE = "reconstructed_data_noise"
"""
Name of the reconstructed data with applied noise field in the SIMPA output file.\n
Usage: naming convention
"""
RECONSTRUCTION_MODE = ("reconstruction_mode", str)
"""
Choice of the reconstruction mode used in the Backprojection.\n
Usage: adapter BackprojectionAdapter
"""
RECONSTRUCTION_MODE_DIFFERENTIAL = "differential"
"""
Corresponds to the differential mode used in the Backprojection.\n
Usage: adapter BackprojectionAdapter, naming_convention
"""
RECONSTRUCTION_MODE_PRESSURE = "pressure"
"""
Corresponds to the pressure mode used in the Backprojection.\n
Usage: adapter BackprojectionAdapter, naming_convention
"""
RECONSTRUCTION_MODE_FULL = "full"
"""
Corresponds to the full mode used in the Backprojection.\n
Usage: adapter BackprojectionAdapter, naming_convention
"""
# physical property volume types
PROPERTY_ABSORPTION_PER_CM = "mua"
"""
Optical absorption of the generated volume/structure in 1/cm.\n
Usage: SIMPA package, naming convention
"""
PROPERTY_SCATTERING_PER_CM = "mus"
"""
Optical scattering (NOT REDUCED SCATTERING mus'! mus'=mus*(1-g) ) of the generated volume/structure in 1/cm.\n
Usage: SIMPA package, naming convention
"""
PROPERTY_ANISOTROPY = "g"
"""
Optical scattering anisotropy of the generated volume/structure.\n
Usage: SIMPA package, naming convention
"""
PROPERTY_OXYGENATION = "oxy"
"""
Oxygenation of the generated volume/structure.\n
Usage: SIMPA package, naming convention
"""
PROPERTY_SEGMENTATION = "seg"
"""
Segmentation of the generated volume/structure.\n
Usage: SIMPA package, naming convention
"""
PROPERTY_GRUNEISEN_PARAMETER = "gamma"
"""
We define PROPERTY_GRUNEISEN_PARAMETER to contain all wavelength-independent constituents of the PA signal.
This means that it contains the percentage of absorbed light converted into heat.
Naturally, one could make an argument that this should not be the case, however, it simplifies the usage of
this tool.\n
Usage: SIMPA package, naming convention
"""
PROPERTY_SPEED_OF_SOUND = "sos"
"""
Speed of sound of the generated volume/structure in m/s.\n
Usage: SIMPA package, naming convention
"""
PROPERTY_DENSITY = "density"
"""
Density of the generated volume/structure in kg/m³.\n
Usage: SIMPA package, naming convention
"""
PROPERTY_ALPHA_COEFF = "alpha_coeff"
"""
Acoustic attenuation of kwave of the generated volume/structure in dB/cm/MHz.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
PROPERTY_SENSOR_MASK = "sensor_mask"
"""
Sensor mask of kwave of the used PA device.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
PROPERTY_DIRECTIVITY_ANGLE = "directivity_angle"
"""
Directionality of the sensors in kwave of the used PA device.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
PROPERTY_INTRINSIC_EULER_ANGLE = "intrinsic_euler_angle"
"""
Intrinsic euler angles of the detector elements in the kWaveArray.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
PROPERTY_ALPHA_POWER = ("medium_alpha_power", (int, np.integer, float, np.float))
"""
Exponent of the exponential acoustic attenuation law of kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
# Volume geometry settings
SPACING_MM = ("voxel_spacing_mm", (int, np.integer, float, np.float))
"""
Isotropic extent of one voxels in mm in the generated volume.\n
Usage: SIMPA package
"""
DIM_VOLUME_X_MM = ("volume_x_dim_mm", (int, np.integer, float, np.float))
"""
Extent of the x-axis of the generated volume.\n
Usage: SIMPA package
"""
DIM_VOLUME_Y_MM = ("volume_y_dim_mm", (int, np.integer, float, np.float))
"""
Extent of the y-axis of the generated volume.\n
Usage: SIMPA package
"""
DIM_VOLUME_Z_MM = ("volume_z_dim_mm", (int, np.integer, float, np.float))
"""
Extent of the z-axis of the generated volume.\n
Usage: SIMPA package
"""
# PML parameters
PMLSize = ("pml_size", (list, tuple, np.ndarray))
"""
Size of the "perfectly matched layer" (PML) around the simulated volume in kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
PMLAlpha = ("pml_alpha", (int, np.integer, float, np.float))
"""
Alpha coefficient of the "perfectly matched layer" (PML) around the simulated volume in kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
PMLInside = ("pml_inside", (bool, np.bool, np.bool_))
"""
If True, the "perfectly matched layer" (PML) in kwave is located inside the volume.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
PlotPML = ("plot_pml", (bool, np.bool, np.bool_))
"""
If True, the "perfectly matched layer" (PML) around the simulated volume in kwave is plotted.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
INITIAL_PRESSURE_SMOOTHING = ("initial_pressure_smoothing", (bool, np.bool, np.bool_))
"""
If True, the initial pressure is smoothed before simulated in kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
# Acoustic Sensor Properties
SENSOR_RECORD = ("sensor_record", str)
"""
Sensor Record mode of the sensor in kwave. Default should be "p".\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
MODEL_SENSOR_FREQUENCY_RESPONSE = ("model_sensor_frequency_response", bool)
"""
Boolean to decide whether to model the sensor frequency response in kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
SENSOR_CENTER_FREQUENCY_HZ = ("sensor_center_frequency", (int, np.integer, float, np.float))
"""
Sensor center frequency in kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
SENSOR_BANDWIDTH_PERCENT = ("sensor_bandwidth", (int, np.integer, float, np.float))
"""
Sensor bandwidth in kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
SENSOR_DIRECTIVITY_SIZE_M = ("sensor_directivity_size", (int, np.integer, float, np.float))
"""
Size of each detector element in kwave.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
SENSOR_DIRECTIVITY_PATTERN = "sensor_directivity_pattern"
"""
Sensor directivity pattern of the sensor in kwave. Default should be "pressure".\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
SENSOR_SAMPLING_RATE_MHZ = ("sensor_sampling_rate_mhz", (int, np.integer, float, np.float))
"""
Sampling rate of the used PA device.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
SENSOR_NUM_ELEMENTS = ("sensor_num_elements", (int, np.integer))
"""
Number of detector elements for kwave if no device was selected.\n
Usage: adapter KwaveAcousticForwardModel, adapter TimeReversalAdapter, naming convention
"""
SENSOR_NUM_USED_ELEMENTS = ("sensor_num_used_elements", (int, np.integer))
"""
Number of detector elements that fit into the generated volume if the dimensions and/or spacing of the generated
volume were not highly resolved enough to be sufficient for the selected PA device.\n
Usage: module acoustic_forward_module, naming convention
"""
SENSOR_ELEMENT_POSITIONS = "sensor_element_positions"
"""
Number of detector elements that fit into the generated volume if the dimensions and/or spacing of the generated
volume were not highly resolved enough to be sufficient for the selected PA device.\n
Usage: module acoustic_forward_module, naming convention
"""
DETECTOR_ELEMENT_WIDTH_MM = "detector_element_width_mm"
"""
Width of a detector element. Corresponds to the pitch - the distance between two detector element borders.\n
Usage: module acoustic_forward_module, naming convention
"""
SENSOR_CONCAVE = "concave"
"""
Indicates that the geometry of the used PA device in the Mitk Beamforming is concave.\n
Usage: adapter MitkBeamformingAdapter, naming convention
"""
SENSOR_LINEAR = "linear"
"""
Indicates that the geometry of the used PA device in the Mitk Beamforming is linear.\n
Usage: adapter MitkBeamformingAdapter, naming convention
"""
SENSOR_RADIUS_MM = "sensor_radius_mm"
"""
Radius of a concave geometry of the used PA device.\n
Usage: adapter AcousticForwardModelKWaveAdapter, naming convention
"""
SENSOR_PITCH_MM = "sensor_pitch_mm"
"""
Pitch of detector elements of the used PA device.\n
Usage: adapter AcousticForwardModelKWaveAdapter, naming convention
"""
# Pipelining parameters
DATA_FIELD = "data_field"
"""
Defines which data field a certain function shall be applied to.\n
Usage: module core.processing_components
"""
# Noise properties
NOISE_SHAPE = "noise_shape"
"""
Shape of a noise model.\n
Usage: module core.processing_components.noise
"""
NOISE_SCALE = "noise_scale"
"""
Scale of a noise model.\n
Usage: module core.processing_components.noise
"""
NOISE_FREQUENCY = "noise_frequency"
"""
Frequency of the noise model.\n
Usage: module core.processing_components.noise
"""
NOISE_MIN = "noise_min"
"""
Min of a noise model.\n
Usage: module core.processing_components.noise
"""
NOISE_MAX = "noise_max"
"""
Max of a noise model.\n
Usage: module core.processing_components.noise
"""
NOISE_MEAN = "noise_mean"
"""
Mean of a noise model.\n
Usage: module core.processing_components.noise
"""
NOISE_STD = "noise_std"
"""
Standard deviation of a noise model.\n
Usage: module core.processing_components.noise
"""
NOISE_MODE = "noise_mode"
"""
The mode tag of a noise model is used to differentiate between\n
Tags.NOISE_MODE_ADDITIVE and Tags.NOISE_MODE_MULTIPLICATIVE.\n
Usage: module core.processing_components.noise
"""
NOISE_MODE_ADDITIVE = "noise_mode_additive"
"""
A noise model shall be applied additively s_n = s + n.\n
Usage: module core.processing_components.noise
"""
NOISE_MODE_MULTIPLICATIVE = "noise_mode_multiplicative"
"""
A noise model shall be applied multiplicatively s_n = s * n.\n
Usage: module core.processing_components.noise
"""
NOISE_NON_NEGATIVITY_CONSTRAINT = "noise_non_negativity_constraint"
"""
Defines if after the noise model negative values shall be allowed.\n
Usage: module core.processing_components.noise
"""
VOLUME_CREATION_MODEL_SETTINGS = ("volume_creation_model_settings", dict)
""""
Volume Creation Model Settings
"""
# Structures
STRUCTURES = ("structures", dict)
"""
Settings dictionary which contains all the structures that should be generated inside the volume.\n
Usage: module volume_creation_module
"""
CHILD_STRUCTURES = ("child_structures", dict)
"""
Settings dictionary which contains all the child structures of a parent structure.\n
Usage: module volume_creation_module
"""
HORIZONTAL_LAYER_STRUCTURE = "HorizontalLayerStructure"
"""
Corresponds to the HorizontalLayerStructure in the structure_library.\n
Usage: module volume_creation_module, naming_convention
"""
CIRCULAR_TUBULAR_STRUCTURE = "CircularTubularStructure"
"""
Corresponds to the CircularTubularStructure in the structure_library.\n
Usage: module volume_creation_module, naming_convention
"""
ELLIPTICAL_TUBULAR_STRUCTURE = "EllipticalTubularStructure"
"""
Corresponds to the EllipticalTubularStructure in the structure_library.\n
Usage: module volume_creation_module, naming_convention
"""
SPHERICAL_STRUCTURE = "SphericalStructure"
"""
Corresponds to the SphericalStructure in the structure_library.\n
Usage: module volume_creation_module, naming_convention
"""
PARALLELEPIPED_STRUCTURE = "ParallelepipedStructure"
"""
Corresponds to the ParallelepipedStructure in the structure_library.\n
Usage: module volume_creation_module, naming_convention
"""
RECTANGULAR_CUBOID_STRUCTURE = "RectangularCuboidStructure"
"""
Corresponds to the RectangularCuboidStructure in the structure_library.\n
Usage: module volume_creation_module, naming_convention
"""
STRUCTURE_TYPE = ("structure_type", str)
"""
Defines the structure type to one structure in the structure_library.\n
Usage: module volume_creation_module
"""
STRUCTURE_SEGMENTATION_TYPE = "structure_segmentation_type"
"""
Defines the structure segmentation type to one segmentation type in SegmentationClasses.\n
Usage: module volume_creation_module, naming convention
"""
UNITS_ARBITRARY = "arbitrary_unity"
"""
Define arbitrary units if no units were given in the settings.\n
Usage: module optical_simulation_module, naming convention
"""
UNITS_PRESSURE = "newton_per_meters_squared"
"""
Standard units used in the SIMPA framework.\n
Usage: module optical_simulation_module, naming convention
"""
"""
IO settings
"""
SIMPA_OUTPUT_PATH = ("simpa_output_path", str)
"""
Default path of the SIMPA output if not specified otherwise.\n
Usage: SIMPA package
"""
SIMPA_OUTPUT_NAME = "simpa_output.hdf5"
"""
Default filename of the SIMPA output if not specified otherwise.\n
Usage: SIMPA package, naming convention
"""
SETTINGS = "settings"
"""
Location of the simulation settings in the SIMPA output file.\n
Usage: naming convention
"""
SIMULATION_PROPERTIES = "simulation_properties"
"""
Location of the simulation properties in the SIMPA output file.\n
Usage: naming convention
"""
SIMULATIONS = "simulations"
"""
Location of the simulation outputs in the SIMPA output file.\n
Usage: naming convention
"""
UPSAMPLED_DATA = "upsampled_data"
"""
Name of the simulation outputs as upsampled data in the SIMPA output file.\n
Usage: naming convention
"""
ORIGINAL_DATA = "original_data"
"""
Name of the simulation outputs as original data in the SIMPA output file.\n
Usage: naming convention
"""
"""
Image Processing
"""
IMAGE_PROCESSING = "image_processing"
"""
Location of the image algorithms outputs in the SIMPA output file.\n
Usage: naming convention
"""
ITERATIVE_qPAI_RESULT = "iterative_qpai_result"
"""
Name of the data field in which the iterative qPAI result will be stored.\n
Usage: naming convention
"""
LINEAR_UNMIXING_RESULT = "linear_unmixing_result"
"""
Name of the data field in which the linear unmixing result will be stored.\n
Usage: naming convention
"""
"""
Iterative qPAI Reconstruction
"""
ITERATIVE_RECONSTRUCTION_CONSTANT_REGULARIZATION = ("constant_regularization", (bool, np.bool, np.bool_))
"""
If True, the fluence regularization will be constant.\n
Usage: module algorithms (iterative_qPAI_algorithm.py)
"""
DOWNSCALE_FACTOR = ("downscale_factor", (int, float, np.int_, np.float_))
"""
Downscale factor of the resampling in the qPAI reconstruction\n
Usage: module algorithms (iterative_qPAI_algorithm.py)
"""
ITERATIVE_RECONSTRUCTION_MAX_ITERATION_NUMBER = ("maximum_iteration_number", (int, np.integer))
"""
Maximum number of iterations performed in iterative reconstruction if stopping criterion is not reached.\n
Usage: module algorithms (iterative_qPAI_algorithm.py)
"""
ITERATIVE_RECONSTRUCTION_REGULARIZATION_SIGMA = ("regularization_sigma", (int, np.integer, float, np.float))
"""
Sigma value used for constant regularization of fluence.\n
Usage: module algorithms (iterative_qPAI_algorithm.py)
"""
ITERATIVE_RECONSTRUCTION_SAVE_INTERMEDIATE_RESULTS = ("save_intermediate_results", (bool, np.bool, np.bool_))
"""
If True, a list of all intermediate absorption updates (middle slices only) will be saved in a numpy file.\n
Usage: module algorithms (iterative_qPAI_algorithm.py)
"""
ITERATIVE_RECONSTRUCTION_STOPPING_LEVEL = ("iteration_stopping_level", (int, np.integer, float, np.float))
"""
Ratio of improvement and preceding error at which iteration method stops.
Usage: module algorithms (iterative_qPAI_algorithm.py)
"""
| 43,421 | 32.171887 | 125 | py |
RandStainNA | RandStainNA-master/main.py | import os
from randstainna import RandStainNA
import cv2
if __name__ == "__main__":
"""
Usage1: Demo(for visualization)
"""
# # Setting: is_train = False
# randstainna = RandStainNA(
# yaml_file = './randstainna/CRC_LAB_randomTrue_n0.yaml',
# std_hyper = 0.0,
# distribution = 'normal',
# probability = 1.0,
# is_train = False
# )
# print(randstainna)
# img_path_list = [
# './visualization/origin/TUM-AEPINLNQ.png',
# './visualization/origin/TUM-DFGFFNEY.png',
# './visualization/origin/TUM-EWFNFSQL.png',
# './visualization/origin/TUM-TCGA-CVATFAAT.png'
# ]
# save_dir_path = './visualization/randstainna'
# if not os.path.exists(save_dir_path):
# os.mkdir(save_dir_path)
# for img_path in img_path_list:
# img = randstainna(cv2.imread(img_path))
# save_img_path = save_dir_path + '/{}'.format(img_path.split('/')[-1])
# cv2.imwrite(save_img_path,img)
"""
Usage2:torchvision.transforms (for training)
"""
# Setting: is_train = True
from torchvision import transforms
#### calling the randstainna
transforms_list = [
RandStainNA(
yaml_file="./CRC_LAB_randomTrue_n0.yaml",
std_hyper=-0.3,
probability=1.0,
distribution="normal",
is_train=True,
)
]
transforms.Compose(transforms_list)
| 1,455 | 26.471698 | 79 | py |
RandStainNA | RandStainNA-master/stain_augmentation.py | import PIL.Image as Image
import os
from torchvision import transforms as transforms
img_path_list = [
"./visualization/origin/TUM-AEPINLNQ.png",
"./visualization/origin/TUM-DFGFFNEY.png",
"./visualization/origin/TUM-EWFNFSQL.png",
"./visualization/origin/TUM-TCGA-CVATFAAT.png",
]
save_dir_path = "./visualization/stain_augmentation"
if not os.path.exists(save_dir_path):
os.mkdir(save_dir_path)
if __name__ == "__main__":
for img_path in img_path_list:
image = transforms.ColorJitter(
brightness=0.35, contrast=0.5, saturation=0.5, hue=0.5
)(Image.open(img_path))
save_img_path = save_dir_path + "/{}".format(img_path.split("/")[-1])
image.save(save_img_path)
| 733 | 30.913043 | 77 | py |
RandStainNA | RandStainNA-master/stain_normalization.py | import PIL.Image as Image
import os
from torchvision import transforms as transforms
import cv2
import numpy as np
from skimage import color
def quick_loop(image, image_avg, image_std, temp_avg, temp_std, isHed=False):
image = (image - np.array(image_avg)) * (
np.array(temp_std) / np.array(image_std)
) + np.array(temp_avg)
if isHed: # HED in range[0,1]
pass
else: # LAB/HSV in range[0,255]
image = np.clip(image, 0, 255).astype(np.uint8)
return image
def getavgstd(image):
avg = []
std = []
image_avg_l = np.mean(image[:, :, 0])
image_std_l = np.std(image[:, :, 0])
image_avg_a = np.mean(image[:, :, 1])
image_std_a = np.std(image[:, :, 1])
image_avg_b = np.mean(image[:, :, 2])
image_std_b = np.std(image[:, :, 2])
avg.append(image_avg_l)
avg.append(image_avg_a)
avg.append(image_avg_b)
std.append(image_std_l)
std.append(image_std_a)
std.append(image_std_b)
return (avg, std)
def reinhard_cn(image_path, temp_path, save_path, isDebug=False, color_space=None):
isHed = False
image = cv2.imread(image_path)
if isDebug:
cv2.imwrite("source.png", image)
template = cv2.imread(temp_path) ### template images
if isDebug:
cv2.imwrite("template.png", template)
if color_space == "LAB":
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) # LAB range[0,255]
template = cv2.cvtColor(template, cv2.COLOR_BGR2LAB)
elif color_space == "HED":
isHed = True
image = cv2.cvtColor(
image, cv2.COLOR_BGR2RGB
) # color.rgb2hed needs RGB as input
template = cv2.cvtColor(template, cv2.COLOR_BGR2RGB)
image = color.rgb2hed(image) # HED range[0,1]
template = color.rgb2hed(template)
elif color_space == "HSV":
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
template = cv2.cvtColor(template, cv2.COLOR_BGR2HSV)
elif color_space == "GRAY":
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite(save_path, image)
return
image_avg, image_std = getavgstd(image)
template_avg, template_std = getavgstd(template)
if isDebug:
print("isDebug!!!")
print("source_avg: ", image_avg)
print("source_std: ", image_std)
print("target_avg: ", template_avg)
print("target_std: ", template_std)
# Reinhard's Method to Stain Normalization
image = quick_loop(
image, image_avg, image_std, template_avg, template_std, isHed=isHed
)
if color_space == "LAB":
image = cv2.cvtColor(image, cv2.COLOR_LAB2BGR)
cv2.imwrite(save_path, image)
elif color_space == "HED": # HED[0,1]->RGB[0,255]
image = color.hed2rgb(image)
imin = image.min()
imax = image.max()
image = (255 * (image - imin) / (imax - imin)).astype("uint8")
image = Image.fromarray(image)
image.save(save_path)
elif color_space == "HSV":
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
cv2.imwrite(save_path, image)
if isDebug:
cv2.imwrite("results.png", image)
if __name__ == "__main__":
img_path_list = [
"./visualization/origin/TUM-AEPINLNQ.png",
"./visualization/origin/TUM-DFGFFNEY.png",
"./visualization/origin/TUM-EWFNFSQL.png",
"./visualization/origin/TUM-TCGA-CVATFAAT.png",
]
template_path = "./visualization/origin/TUM-EWFNFSQL.png"
save_dir_path = "./visualization/stain_normalization"
if not os.path.exists(save_dir_path):
os.mkdir(save_dir_path)
for img_path in img_path_list:
save_path = save_dir_path + "/{}".format(img_path.split("/")[-1])
img_colorNorm = reinhard_cn(
img_path, template_path, save_path, isDebug=False, color_space="LAB"
)
| 3,843 | 31.854701 | 83 | py |
RandStainNA | RandStainNA-master/classification/inference.py | #!/usr/bin/env python3
"""PyTorch Inference Script
An example inference script that outputs top-k class ids for images in a folder into a csv.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import os
import time
import argparse
import logging
import numpy as np
import torch
from timm.models import create_model, apply_test_time_pool
from timm.data import ImageDataset, create_loader, resolve_data_config
from timm.utils import AverageMeter, setup_default_logging
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('inference')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Inference')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--output_dir', metavar='DIR', default='./',
help='path to output files')
parser.add_argument('--model', '-m', metavar='MODEL', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=1000,
help='Number classes in dataset')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true',
help='disable test time pool')
parser.add_argument('--topk', default=5, type=int,
metavar='N', help='Top-k to output to CSV')
def main():
setup_default_logging()
args = parser.parse_args()
# might as well try to do something useful...
args.pretrained = args.pretrained or not args.checkpoint
# create model
model = create_model(
args.model,
num_classes=args.num_classes,
in_chans=3,
pretrained=args.pretrained,
checkpoint_path=args.checkpoint)
_logger.info('Model %s created, param count: %d' %
(args.model, sum([m.numel() for m in model.parameters()])))
config = resolve_data_config(vars(args), model=model)
model, test_time_pool = (model, False) if args.no_test_pool else apply_test_time_pool(model, config)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model = model.cuda()
loader = create_loader(
ImageDataset(args.data),
input_size=config['input_size'],
batch_size=args.batch_size,
use_prefetcher=True,
interpolation=config['interpolation'],
mean=config['mean'],
std=config['std'],
num_workers=args.workers,
crop_pct=1.0 if test_time_pool else config['crop_pct'])
model.eval()
k = min(args.topk, args.num_classes)
batch_time = AverageMeter()
end = time.time()
topk_ids = []
with torch.no_grad():
for batch_idx, (input, _) in enumerate(loader):
input = input.cuda()
labels = model(input)
topk = labels.topk(k)[1]
topk_ids.append(topk.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info('Predict: [{0}/{1}] Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
batch_idx, len(loader), batch_time=batch_time))
topk_ids = np.concatenate(topk_ids, axis=0)
with open(os.path.join(args.output_dir, './topk_ids.csv'), 'w') as out_file:
filenames = loader.dataset.filenames(basename=True)
for filename, label in zip(filenames, topk_ids):
out_file.write('{0},{1}\n'.format(
filename, ','.join([ str(v) for v in label])))
if __name__ == '__main__':
main()
| 5,235 | 39.90625 | 137 | py |
RandStainNA | RandStainNA-master/classification/benchmark.py | #!/usr/bin/env python3
""" Model Benchmark Script
An inference and train step benchmark script for timm models.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import os
import csv
import json
import time
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
from collections import OrderedDict
from contextlib import suppress
from functools import partial
from timm.models import create_model, is_model, list_models
from timm.optim import create_optimizer_v2
from timm.data import resolve_data_config
from timm.utils import setup_default_logging, set_jit_fuser
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
from deepspeed.profiling.flops_profiler import get_model_profile
has_deepspeed_profiling = True
except ImportError as e:
has_deepspeed_profiling = False
try:
from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis
has_fvcore_profiling = True
except ImportError as e:
FlopCountAnalysis = None
has_fvcore_profiling = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch Benchmark')
# benchmark specific args
parser.add_argument('--model-list', metavar='NAME', default='',
help='txt file based list of model names to benchmark')
parser.add_argument('--bench', default='both', type=str,
help="Benchmark mode. One of 'inference', 'train', 'both'. Defaults to 'both'")
parser.add_argument('--detail', action='store_true', default=False,
help='Provide train fwd/bwd/opt breakdown detail if True. Defaults to False')
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--num-warm-iter', default=10, type=int,
metavar='N', help='Number of warmup iterations (default: 10)')
parser.add_argument('--num-bench-iter', default=40, type=int,
metavar='N', help='Number of benchmark iterations (default: 40)')
# common inference / train args
parser.add_argument('--model', '-m', metavar='NAME', default='resnet50',
help='model architecture (default: resnet50)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--use-train-size', action='store_true', default=False,
help='Run inference at train size, not test-input-size if it exists.')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--amp', action='store_true', default=False,
help='use PyTorch Native AMP for mixed precision training. Overrides --precision arg.')
parser.add_argument('--precision', default='float32', type=str,
help='Numeric precision. One of (amp, float32, float16, bfloat16, tf32)')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
# train optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# model regularization / loss params that impact model or loss fn
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
def timestamp(sync=False):
return time.perf_counter()
def cuda_timestamp(sync=False, device=None):
if sync:
torch.cuda.synchronize(device=device)
return time.perf_counter()
def count_params(model: nn.Module):
return sum([m.numel() for m in model.parameters()])
def resolve_precision(precision: str):
assert precision in ('amp', 'float16', 'bfloat16', 'float32')
use_amp = False
model_dtype = torch.float32
data_dtype = torch.float32
if precision == 'amp':
use_amp = True
elif precision == 'float16':
model_dtype = torch.float16
data_dtype = torch.float16
elif precision == 'bfloat16':
model_dtype = torch.bfloat16
data_dtype = torch.bfloat16
return use_amp, model_dtype, data_dtype
def profile_deepspeed(model, input_size=(3, 224, 224), batch_size=1, detailed=False):
macs, _ = get_model_profile(
model=model,
input_res=(batch_size,) + input_size, # input shape or input to the input_constructor
input_constructor=None, # if specified, a constructor taking input_res is used as input to the model
print_profile=detailed, # prints the model graph with the measured profile attached to each module
detailed=detailed, # print the detailed profile
warm_up=10, # the number of warm-ups before measuring the time of each module
as_string=False, # print raw numbers (e.g. 1000) or as human-readable strings (e.g. 1k)
output_file=None, # path to the output file. If None, the profiler prints to stdout.
ignore_modules=None) # the list of modules to ignore in the profiling
return macs, 0 # no activation count in DS
def profile_fvcore(model, input_size=(3, 224, 224), batch_size=1, detailed=False, force_cpu=False):
if force_cpu:
model = model.to('cpu')
device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
example_input = torch.ones((batch_size,) + input_size, device=device, dtype=dtype)
fca = FlopCountAnalysis(model, example_input)
aca = ActivationCountAnalysis(model, example_input)
if detailed:
fcs = flop_count_str(fca)
print(fcs)
return fca.total(), aca.total()
class BenchmarkRunner:
def __init__(
self, model_name, detail=False, device='cuda', torchscript=False, precision='float32',
fuser='', num_warm_iter=10, num_bench_iter=50, use_train_size=False, **kwargs):
self.model_name = model_name
self.detail = detail
self.device = device
self.use_amp, self.model_dtype, self.data_dtype = resolve_precision(precision)
self.channels_last = kwargs.pop('channels_last', False)
self.amp_autocast = torch.cuda.amp.autocast if self.use_amp else suppress
if fuser:
set_jit_fuser(fuser)
self.model = create_model(
model_name,
num_classes=kwargs.pop('num_classes', None),
in_chans=3,
global_pool=kwargs.pop('gp', 'fast'),
scriptable=torchscript)
self.model.to(
device=self.device,
dtype=self.model_dtype,
memory_format=torch.channels_last if self.channels_last else None)
self.num_classes = self.model.num_classes
self.param_count = count_params(self.model)
_logger.info('Model %s created, param count: %d' % (model_name, self.param_count))
self.scripted = False
if torchscript:
self.model = torch.jit.script(self.model)
self.scripted = True
data_config = resolve_data_config(kwargs, model=self.model, use_test_size=not use_train_size)
self.input_size = data_config['input_size']
self.batch_size = kwargs.pop('batch_size', 256)
self.example_inputs = None
self.num_warm_iter = num_warm_iter
self.num_bench_iter = num_bench_iter
self.log_freq = num_bench_iter // 5
if 'cuda' in self.device:
self.time_fn = partial(cuda_timestamp, device=self.device)
else:
self.time_fn = timestamp
def _init_input(self):
self.example_inputs = torch.randn(
(self.batch_size,) + self.input_size, device=self.device, dtype=self.data_dtype)
if self.channels_last:
self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last)
class InferenceBenchmarkRunner(BenchmarkRunner):
def __init__(self, model_name, device='cuda', torchscript=False, **kwargs):
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
self.model.eval()
def run(self):
def _step():
t_step_start = self.time_fn()
with self.amp_autocast():
output = self.model(self.example_inputs)
t_step_end = self.time_fn(True)
return t_step_end - t_step_start
_logger.info(
f'Running inference benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
with torch.no_grad():
self._init_input()
for _ in range(self.num_warm_iter):
_step()
total_step = 0.
num_samples = 0
t_run_start = self.time_fn()
for i in range(self.num_bench_iter):
delta_fwd = _step()
total_step += delta_fwd
num_samples += self.batch_size
num_steps = i + 1
if num_steps % self.log_freq == 0:
_logger.info(
f"Infer [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_step / num_steps:0.3f} ms/step.")
t_run_end = self.time_fn(True)
t_run_elapsed = t_run_end - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
retries = 0 if self.scripted else 2 # skip profiling if model is scripted
while retries:
retries -= 1
try:
if has_deepspeed_profiling:
macs, _ = profile_deepspeed(self.model, self.input_size)
results['gmacs'] = round(macs / 1e9, 2)
elif has_fvcore_profiling:
macs, activations = profile_fvcore(self.model, self.input_size, force_cpu=not retries)
results['gmacs'] = round(macs / 1e9, 2)
results['macts'] = round(activations / 1e6, 2)
except RuntimeError as e:
pass
_logger.info(
f"Inference benchmark of {self.model_name} done. "
f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/step")
return results
class TrainBenchmarkRunner(BenchmarkRunner):
def __init__(self, model_name, device='cuda', torchscript=False, **kwargs):
super().__init__(model_name=model_name, device=device, torchscript=torchscript, **kwargs)
self.model.train()
if kwargs.pop('smoothing', 0) > 0:
self.loss = nn.CrossEntropyLoss().to(self.device)
else:
self.loss = nn.CrossEntropyLoss().to(self.device)
self.target_shape = tuple()
self.optimizer = create_optimizer_v2(
self.model,
opt=kwargs.pop('opt', 'sgd'),
lr=kwargs.pop('lr', 1e-4))
def _gen_target(self, batch_size):
return torch.empty(
(batch_size,) + self.target_shape, device=self.device, dtype=torch.long).random_(self.num_classes)
def run(self):
def _step(detail=False):
self.optimizer.zero_grad() # can this be ignored?
t_start = self.time_fn()
t_fwd_end = t_start
t_bwd_end = t_start
with self.amp_autocast():
output = self.model(self.example_inputs)
if isinstance(output, tuple):
output = output[0]
if detail:
t_fwd_end = self.time_fn(True)
target = self._gen_target(output.shape[0])
self.loss(output, target).backward()
if detail:
t_bwd_end = self.time_fn(True)
self.optimizer.step()
t_end = self.time_fn(True)
if detail:
delta_fwd = t_fwd_end - t_start
delta_bwd = t_bwd_end - t_fwd_end
delta_opt = t_end - t_bwd_end
return delta_fwd, delta_bwd, delta_opt
else:
delta_step = t_end - t_start
return delta_step
_logger.info(
f'Running train benchmark on {self.model_name} for {self.num_bench_iter} steps w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
self._init_input()
for _ in range(self.num_warm_iter):
_step()
t_run_start = self.time_fn()
if self.detail:
total_fwd = 0.
total_bwd = 0.
total_opt = 0.
num_samples = 0
for i in range(self.num_bench_iter):
delta_fwd, delta_bwd, delta_opt = _step(True)
num_samples += self.batch_size
total_fwd += delta_fwd
total_bwd += delta_bwd
total_opt += delta_opt
num_steps = (i + 1)
if num_steps % self.log_freq == 0:
total_step = total_fwd + total_bwd + total_opt
_logger.info(
f"Train [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_fwd / num_steps:0.3f} ms/step fwd,"
f" {1000 * total_bwd / num_steps:0.3f} ms/step bwd,"
f" {1000 * total_opt / num_steps:0.3f} ms/step opt."
)
total_step = total_fwd + total_bwd + total_opt
t_run_elapsed = self.time_fn() - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
fwd_time=round(1000 * total_fwd / self.num_bench_iter, 3),
bwd_time=round(1000 * total_bwd / self.num_bench_iter, 3),
opt_time=round(1000 * total_opt / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
else:
total_step = 0.
num_samples = 0
for i in range(self.num_bench_iter):
delta_step = _step(False)
num_samples += self.batch_size
total_step += delta_step
num_steps = (i + 1)
if num_steps % self.log_freq == 0:
_logger.info(
f"Train [{num_steps}/{self.num_bench_iter}]."
f" {num_samples / total_step:0.2f} samples/sec."
f" {1000 * total_step / num_steps:0.3f} ms/step.")
t_run_elapsed = self.time_fn() - t_run_start
results = dict(
samples_per_sec=round(num_samples / t_run_elapsed, 2),
step_time=round(1000 * total_step / self.num_bench_iter, 3),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
_logger.info(
f"Train benchmark of {self.model_name} done. "
f"{results['samples_per_sec']:.2f} samples/sec, {results['step_time']:.2f} ms/sample")
return results
class ProfileRunner(BenchmarkRunner):
def __init__(self, model_name, device='cuda', profiler='', **kwargs):
super().__init__(model_name=model_name, device=device, **kwargs)
if not profiler:
if has_deepspeed_profiling:
profiler = 'deepspeed'
elif has_fvcore_profiling:
profiler = 'fvcore'
assert profiler, "One of deepspeed or fvcore needs to be installed for profiling to work."
self.profiler = profiler
self.model.eval()
def run(self):
_logger.info(
f'Running profiler on {self.model_name} w/ '
f'input size {self.input_size} and batch size {self.batch_size}.')
macs = 0
activations = 0
if self.profiler == 'deepspeed':
macs, _ = profile_deepspeed(self.model, self.input_size, batch_size=self.batch_size, detailed=True)
elif self.profiler == 'fvcore':
macs, activations = profile_fvcore(self.model, self.input_size, batch_size=self.batch_size, detailed=True)
results = dict(
gmacs=round(macs / 1e9, 2),
macts=round(activations / 1e6, 2),
batch_size=self.batch_size,
img_size=self.input_size[-1],
param_count=round(self.param_count / 1e6, 2),
)
_logger.info(
f"Profile of {self.model_name} done. "
f"{results['gmacs']:.2f} GMACs, {results['param_count']:.2f} M params.")
return results
def decay_batch_exp(batch_size, factor=0.5, divisor=16):
out_batch_size = batch_size * factor
if out_batch_size > divisor:
out_batch_size = (out_batch_size + 1) // divisor * divisor
else:
out_batch_size = batch_size - 1
return max(0, int(out_batch_size))
def _try_run(model_name, bench_fn, initial_batch_size, bench_kwargs):
batch_size = initial_batch_size
results = dict()
while batch_size >= 1:
torch.cuda.empty_cache()
try:
bench = bench_fn(model_name=model_name, batch_size=batch_size, **bench_kwargs)
results = bench.run()
return results
except RuntimeError as e:
e_str = str(e)
print(e_str)
if 'channels_last' in e_str:
print(f'Error: {model_name} not supported in channels_last, skipping.')
break
print(f'Error: "{e_str}" while running benchmark. Reducing batch size to {batch_size} for retry.')
batch_size = decay_batch_exp(batch_size)
return results
def benchmark(args):
if args.amp:
_logger.warning("Overriding precision to 'amp' since --amp flag set.")
args.precision = 'amp'
_logger.info(f'Benchmarking in {args.precision} precision. '
f'{"NHWC" if args.channels_last else "NCHW"} layout. '
f'torchscript {"enabled" if args.torchscript else "disabled"}')
bench_kwargs = vars(args).copy()
bench_kwargs.pop('amp')
model = bench_kwargs.pop('model')
batch_size = bench_kwargs.pop('batch_size')
bench_fns = (InferenceBenchmarkRunner,)
prefixes = ('infer',)
if args.bench == 'both':
bench_fns = (
InferenceBenchmarkRunner,
TrainBenchmarkRunner
)
prefixes = ('infer', 'train')
elif args.bench == 'train':
bench_fns = TrainBenchmarkRunner,
prefixes = 'train',
elif args.bench.startswith('profile'):
# specific profiler used if included in bench mode string, otherwise default to deepspeed, fallback to fvcore
if 'deepspeed' in args.bench:
assert has_deepspeed_profiling, "deepspeed must be installed to use deepspeed flop counter"
bench_kwargs['profiler'] = 'deepspeed'
elif 'fvcore' in args.bench:
assert has_fvcore_profiling, "fvcore must be installed to use fvcore flop counter"
bench_kwargs['profiler'] = 'fvcore'
bench_fns = ProfileRunner,
batch_size = 1
model_results = OrderedDict(model=model)
for prefix, bench_fn in zip(prefixes, bench_fns):
run_results = _try_run(model, bench_fn, initial_batch_size=batch_size, bench_kwargs=bench_kwargs)
if prefix:
run_results = {'_'.join([prefix, k]): v for k, v in run_results.items()}
model_results.update(run_results)
param_count = model_results.pop('infer_param_count', model_results.pop('train_param_count', 0))
model_results.setdefault('param_count', param_count)
model_results.pop('train_param_count', 0)
return model_results if model_results['param_count'] else dict()
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if args.model_list:
args.model = ''
with open(args.model_list) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names]
elif args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(pretrained=True, exclude_filters=['*in21k'])
model_cfgs = [(n, None) for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, None) for n in model_names]
if len(model_cfgs):
results_file = args.results_file or './benchmark.csv'
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
for m, _ in model_cfgs:
if not m:
continue
args.model = m
r = benchmark(args)
if r:
results.append(r)
time.sleep(10)
except KeyboardInterrupt as e:
pass
sort_key = 'infer_samples_per_sec'
if 'train' in args.bench:
sort_key = 'train_samples_per_sec'
elif 'profile' in args.bench:
sort_key = 'infer_gmacs'
results = sorted(results, key=lambda x: x[sort_key], reverse=True)
if len(results):
write_results(results_file, results)
else:
results = benchmark(args)
json_str = json.dumps(results, indent=4)
print(json_str)
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| 24,750 | 39.978477 | 137 | py |
RandStainNA | RandStainNA-master/classification/setup.py | """ Setup
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
exec(open('timm/version.py').read())
setup(
name='timm',
version=__version__,
description='(Unofficial) PyTorch Image Models',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/rwightman/pytorch-image-models',
author='Ross Wightman',
author_email='hello@rwightman.com',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Note that this is a string of words separated by whitespace, not a list.
keywords='pytorch pretrained models efficientnet mobilenetv3 mnasnet',
packages=find_packages(exclude=['convert', 'tests', 'results']),
include_package_data=True,
install_requires=['torch >= 1.4', 'torchvision'],
python_requires='>=3.6',
)
| 1,772 | 35.183673 | 78 | py |
RandStainNA | RandStainNA-master/classification/hubconf.py | dependencies = ['torch']
from timm.models import registry
globals().update(registry._model_entrypoints)
| 105 | 20.2 | 45 | py |
RandStainNA | RandStainNA-master/classification/validate.py | #!/usr/bin/env python3
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import os
import csv
import glob
import time
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
from collections import OrderedDict
from contextlib import suppress
from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models
from timm.data import create_dataset, create_loader, resolve_data_config, RealLabelsImagenet
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging, set_jit_fuser
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('validate')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--split', metavar='NAME', default='validation',
help='dataset split (default: validation)')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--model', '-m', metavar='NAME', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=None,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--test-pool', dest='test_pool', action='store_true',
help='enable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--amp', action='store_true', default=False,
help='Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
parser.add_argument('--real-labels', default='', type=str, metavar='FILENAME',
help='Real labels JSON file for imagenet evaluation')
parser.add_argument('--valid-labels', default='', type=str, metavar='FILENAME',
help='Valid label indices txt file for validation of partial label space')
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
amp_autocast = suppress # do nothing
if args.amp:
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
else:
_logger.warning("Neither APEX or Native Torch AMP is available.")
assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set."
if args.native_amp:
amp_autocast = torch.cuda.amp.autocast
_logger.info('Validating in mixed precision with native PyTorch AMP.')
elif args.apex_amp:
_logger.info('Validating in mixed precision with NVIDIA APEX AMP.')
else:
_logger.info('Validating in float32. AMP not enabled.')
if args.fuser:
set_jit_fuser(args.fuser)
# create model
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=3,
global_pool=args.gp,
scriptable=args.torchscript)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(vars(args), model=model, use_test_size=True, verbose=True)
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(model, data_config, use_test_size=True)
if args.torchscript:
torch.jit.optimized_execution(True)
model = torch.jit.script(model)
model = model.cuda()
if args.apex_amp:
model = amp.initialize(model, opt_level='O1')
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
criterion = nn.CrossEntropyLoss().cuda()
dataset = create_dataset(
root=args.data, name=args.dataset, split=args.split,
download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map)
if args.valid_labels:
with open(args.valid_labels, 'r') as f:
valid_labels = {int(line.rstrip()) for line in f}
valid_labels = [i in valid_labels for i in range(args.num_classes)]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(dataset.filenames(basename=True), real_json=args.real_labels)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
pin_memory=args.pin_mem,
tf_preprocessing=args.tf_preprocessing)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
input = torch.randn((args.batch_size,) + tuple(data_config['input_size'])).cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
model(input)
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.cuda()
input = input.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with amp_autocast():
output = model(input)
if valid_labels is not None:
output = output[:, valid_labels]
loss = criterion(output, target)
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
batch_idx, len(loader), batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses, top1=top1, top5=top5))
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
top1=round(top1a, 4), top1_err=round(100 - top1a, 4),
top5=round(top5a, 4), top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
cropt_pct=crop_pct,
interpolation=data_config['interpolation'])
_logger.info(' * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err']))
return results
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(pretrained=True, exclude_filters=['*_in21k', '*_in22k', '*_dino'])
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, '') for n in model_names]
if not model_cfgs and os.path.isfile(args.model):
with open(args.model) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names if n]
if len(model_cfgs):
results_file = args.results_file or './results-all.csv'
_logger.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
start_batch_size = args.batch_size
for m, c in model_cfgs:
batch_size = start_batch_size
args.model = m
args.checkpoint = c
result = OrderedDict(model=args.model)
r = {}
while not r and batch_size >= args.num_gpu:
torch.cuda.empty_cache()
try:
args.batch_size = batch_size
print('Validating with batch size: %d' % args.batch_size)
r = validate(args)
except RuntimeError as e:
if batch_size <= args.num_gpu:
print("Validation failed with no ability to reduce batch size. Exiting.")
raise e
batch_size = max(batch_size // 2, args.num_gpu)
print("Validation failed, reducing batch size by 50%")
result.update(r)
if args.checkpoint:
result['checkpoint'] = args.checkpoint
results.append(result)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
if len(results):
write_results(results_file, results)
else:
validate(args)
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| 15,574 | 42.997175 | 137 | py |
RandStainNA | RandStainNA-master/classification/clean_checkpoint.py | #!/usr/bin/env python3
""" Checkpoint Cleaning Script
Takes training checkpoints with GPU tensors, optimizer state, extra dict keys, etc.
and outputs a CPU tensor checkpoint with only the `state_dict` along with SHA256
calculation for model zoo compatibility.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import torch
import argparse
import os
import hashlib
import shutil
from collections import OrderedDict
from timm.models.helpers import load_state_dict
parser = argparse.ArgumentParser(description='PyTorch Checkpoint Cleaner')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='output path')
parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--clean-aux-bn', dest='clean_aux_bn', action='store_true',
help='remove auxiliary batch norm layers (from SplitBN training) from checkpoint')
_TEMP_NAME = './_checkpoint.pth'
def main():
args = parser.parse_args()
if os.path.exists(args.output):
print("Error: Output filename ({}) already exists.".format(args.output))
exit(1)
clean_checkpoint(args.checkpoint, args.output, not args.no_use_ema, args.clean_aux_bn)
def clean_checkpoint(checkpoint, output='', use_ema=True, clean_aux_bn=False):
# Load an existing checkpoint to CPU, strip everything but the state_dict and re-save
if checkpoint and os.path.isfile(checkpoint):
print("=> Loading checkpoint '{}'".format(checkpoint))
state_dict = load_state_dict(checkpoint, use_ema=use_ema)
new_state_dict = {}
for k, v in state_dict.items():
if clean_aux_bn and 'aux_bn' in k:
# If all aux_bn keys are removed, the SplitBN layers will end up as normal and
# load with the unmodified model using BatchNorm2d.
continue
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
print("=> Loaded state_dict from '{}'".format(checkpoint))
try:
torch.save(new_state_dict, _TEMP_NAME, _use_new_zipfile_serialization=False)
except:
torch.save(new_state_dict, _TEMP_NAME)
with open(_TEMP_NAME, 'rb') as f:
sha_hash = hashlib.sha256(f.read()).hexdigest()
if output:
checkpoint_root, checkpoint_base = os.path.split(output)
checkpoint_base = os.path.splitext(checkpoint_base)[0]
else:
checkpoint_root = ''
checkpoint_base = os.path.splitext(checkpoint)[0]
final_filename = '-'.join([checkpoint_base, sha_hash[:8]]) + '.pth'
shutil.move(_TEMP_NAME, os.path.join(checkpoint_root, final_filename))
print("=> Saved state_dict to '{}, SHA256: {}'".format(final_filename, sha_hash))
return final_filename
else:
print("Error: Checkpoint ({}) doesn't exist".format(checkpoint))
return ''
if __name__ == '__main__':
main()
| 3,229 | 38.876543 | 102 | py |
RandStainNA | RandStainNA-master/classification/avg_checkpoints.py | #!/usr/bin/env python3
""" Checkpoint Averaging Script
This script averages all model weights for checkpoints in specified path that match
the specified filter wildcard. All checkpoints must be from the exact same model.
For any hope of decent results, the checkpoints should be from the same or child
(via resumes) training session. This can be viewed as similar to maintaining running
EMA (exponential moving average) of the model weights or performing SWA (stochastic
weight averaging), but post-training.
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import torch
import argparse
import os
import glob
import hashlib
from timm.models.helpers import load_state_dict
parser = argparse.ArgumentParser(description='PyTorch Checkpoint Averager')
parser.add_argument('--input', default='', type=str, metavar='PATH',
help='path to base input folder containing checkpoints')
parser.add_argument('--filter', default='*.pth.tar', type=str, metavar='WILDCARD',
help='checkpoint filter (path wildcard)')
parser.add_argument('--output', default='./averaged.pth', type=str, metavar='PATH',
help='output filename')
parser.add_argument('--no-use-ema', dest='no_use_ema', action='store_true',
help='Force not using ema version of weights (if present)')
parser.add_argument('--no-sort', dest='no_sort', action='store_true',
help='Do not sort and select by checkpoint metric, also makes "n" argument irrelevant')
parser.add_argument('-n', type=int, default=10, metavar='N',
help='Number of checkpoints to average')
def checkpoint_metric(checkpoint_path):
if not checkpoint_path or not os.path.isfile(checkpoint_path):
return {}
print("=> Extracting metric from checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location='cpu')
metric = None
if 'metric' in checkpoint:
metric = checkpoint['metric']
elif 'metrics' in checkpoint and 'metric_name' in checkpoint:
metrics = checkpoint['metrics']
print(metrics)
metric = metrics[checkpoint['metric_name']]
return metric
def main():
args = parser.parse_args()
# by default use the EMA weights (if present)
args.use_ema = not args.no_use_ema
# by default sort by checkpoint metric (if present) and avg top n checkpoints
args.sort = not args.no_sort
if os.path.exists(args.output):
print("Error: Output filename ({}) already exists.".format(args.output))
exit(1)
pattern = args.input
if not args.input.endswith(os.path.sep) and not args.filter.startswith(os.path.sep):
pattern += os.path.sep
pattern += args.filter
checkpoints = glob.glob(pattern, recursive=True)
if args.sort:
checkpoint_metrics = []
for c in checkpoints:
metric = checkpoint_metric(c)
if metric is not None:
checkpoint_metrics.append((metric, c))
checkpoint_metrics = list(sorted(checkpoint_metrics))
checkpoint_metrics = checkpoint_metrics[-args.n:]
print("Selected checkpoints:")
[print(m, c) for m, c in checkpoint_metrics]
avg_checkpoints = [c for m, c in checkpoint_metrics]
else:
avg_checkpoints = checkpoints
print("Selected checkpoints:")
[print(c) for c in checkpoints]
avg_state_dict = {}
avg_counts = {}
for c in avg_checkpoints:
new_state_dict = load_state_dict(c, args.use_ema)
if not new_state_dict:
print("Error: Checkpoint ({}) doesn't exist".format(args.checkpoint))
continue
for k, v in new_state_dict.items():
if k not in avg_state_dict:
avg_state_dict[k] = v.clone().to(dtype=torch.float64)
avg_counts[k] = 1
else:
avg_state_dict[k] += v.to(dtype=torch.float64)
avg_counts[k] += 1
for k, v in avg_state_dict.items():
v.div_(avg_counts[k])
# float32 overflow seems unlikely based on weights seen to date, but who knows
float32_info = torch.finfo(torch.float32)
final_state_dict = {}
for k, v in avg_state_dict.items():
v = v.clamp(float32_info.min, float32_info.max)
final_state_dict[k] = v.to(dtype=torch.float32)
try:
torch.save(final_state_dict, args.output, _use_new_zipfile_serialization=False)
except:
torch.save(final_state_dict, args.output)
with open(args.output, 'rb') as f:
sha_hash = hashlib.sha256(f.read()).hexdigest()
print("=> Saved state_dict to '{}, SHA256: {}'".format(args.output, sha_hash))
if __name__ == '__main__':
main()
| 4,772 | 38.122951 | 107 | py |
RandStainNA | RandStainNA-master/classification/train_origin.py | #!/usr/bin/env python3
""" ImageNet Training Script
This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet
training results with some of the latest networks and training techniques. It favours canonical PyTorch
and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed
and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)
"""
import argparse
import time
import yaml
import os
import logging
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import torch
import torch.nn as nn
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as NativeDDP
from timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\
convert_splitbn_model, model_parameters
from timm.utils import *
from timm.loss import *
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import ApexScaler, NativeScaler
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger('train')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset parameters
parser.add_argument('data_dir', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset', '-d', metavar='NAME', default='',
help='dataset type (default: ImageFolder/ImageTar if empty)')
parser.add_argument('--train-split', metavar='NAME', default='train',
help='dataset train split (default: train)')
parser.add_argument('--val-split', metavar='NAME', default='validation',
help='dataset validation split (default: validation)')
parser.add_argument('--dataset-download', action='store_true', default=False,
help='Allow download of dataset for torch/ and tfds/ datasets that support it.')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
# Model parameters
parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL',
help='Name of model to train (default: "resnet50"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--num-classes', type=int, default=None, metavar='N',
help='number of label classes (Model default if None)')
parser.add_argument('--gp', default=None, type=str, metavar='POOL',
help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--input-size', default=None, nargs=3, type=int,
metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N',
help='validation batch size override (default: None)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: None, use opt default)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='Optimizer momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=2e-5,
help='weight decay (default: 2e-5)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--clip-mode', type=str, default='norm',
help='Gradient clipping mode. One of ("norm", "value", "agc")')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.05, metavar='LR',
help='learning rate (default: 0.05)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT',
help='amount to decay each learning rate cycle (default: 0.5)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit, cycles enabled if > 1')
parser.add_argument('--lr-k-decay', type=float, default=1.0,
help='learning rate k-decay for cosine/poly (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 300)')
parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N',
help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=100, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation & regularization parameters
parser.add_argument('--no-aug', action='store_true', default=False,
help='Disable all training augmentation, override other train aug args')
parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
help='Random resize scale (default: 0.08 1.0)')
parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',
help='Random resize aspect ratio (default: 0.75 1.33)')
parser.add_argument('--hflip', type=float, default=0.5,
help='Horizontal flip training aug probability')
parser.add_argument('--vflip', type=float, default=0.,
help='Vertical flip training aug probability')
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default=None, metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". (default: None)'),
parser.add_argument('--aug-repeats', type=int, default=0,
help='Number of augmentation repetitions (distributed training only) (default: 0)')
parser.add_argument('--aug-splits', type=int, default=0,
help='Number of augmentation splits (default: 0, valid: 0 or >=2)')
parser.add_argument('--jsd-loss', action='store_true', default=False,
help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')
parser.add_argument('--bce-loss', action='store_true', default=False,
help='Enable BCE loss w/ Mixup/CutMix use.')
parser.add_argument('--bce-target-thresh', type=float, default=None,
help='Threshold for binarizing softened BCE targets (default: None, disabled)')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix', type=float, default=0.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='Turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',
help='Drop connect rate, DEPRECATED, use drop-path (default: None)')
parser.add_argument('--drop-path', type=float, default=None, metavar='PCT',
help='Drop path rate (default: None)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='reduce',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
parser.add_argument('--split-bn', action='store_true',
help='Enable separate BN layers per augmentation split.')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--worker-seeding', type=str, default='all',
help='worker seed mode (default: all)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',
help='number of checkpoints to keep (default: 10)')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 4)')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument('--no-ddp-bb', action='store_true', default=False,
help='Force broadcast buffers for native DDP to off.')
parser.add_argument('--channels-last', action='store_true', default=False,
help='Use channels_last memory layout')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--experiment', default='', type=str, metavar='NAME',
help='name of train experiment, name of sub-folder for output')
parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "top1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,
help='use the multi-epochs-loader to save time at the beginning of every epoch')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--fuser', default='', type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')")
parser.add_argument('--log-wandb', action='store_true', default=False,
help='log training and validation metrics to wandb')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def main():
setup_default_logging()
args, args_text = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning("You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`")
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
_logger.info('Training with a single process on 1 GPUs.')
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = 'apex'
elif args.native_amp and has_native_amp:
use_amp = 'native'
elif args.apex_amp or args.native_amp:
_logger.warning("Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6")
random_seed(args.seed, args.rank)
if args.fuser:
set_jit_fuser(args.fuser)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint)
if args.num_classes is None:
assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'
args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly
if args.local_rank == 0:
_logger.info(
f'Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}')
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, 'A split of 1 makes no sense'
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp == 'apex':
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
if args.torchscript:
assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'
assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == 'apex':
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')
elif use_amp == 'native':
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info('Using native Torch AMP. Training in mixed precision.')
else:
if args.local_rank == 0:
_logger.info('AMP not enabled. Training in float32.')
# optionally resume from a checkpoint
resume_epoch = None
if args.resume:
resume_epoch = resume_checkpoint(
model, args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0)
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)
if args.resume:
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp == 'apex':
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb)
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info('Scheduled epochs: {}'.format(num_epochs))
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset, root=args.data_dir, split=args.train_split, is_training=True,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size,
repeats=args.epoch_repeats)
dataset_eval = create_dataset(
args.dataset, root=args.data_dir, split=args.val_split, is_training=False,
class_map=args.class_map,
download=args.dataset_download,
batch_size=args.batch_size)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
if args.prefetcher:
assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config['interpolation']
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_repeats=args.aug_repeats,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
worker_seeding=args.worker_seeding,
)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=args.validation_batch_size or args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config['crop_pct'],
pin_memory=args.pin_mem,
)
# setup loss function
if args.jsd_loss:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing)
elif mixup_active:
# smoothing is handled with mixup target transform which outputs sparse, soft targets
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh)
else:
train_loss_fn = SoftTargetCrossEntropy()
elif args.smoothing:
if args.bce_loss:
train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh)
else:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
train_loss_fn = nn.CrossEntropyLoss()
train_loss_fn = train_loss_fn.cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
if args.rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config['input_size'][-1])
])
output_dir = get_outdir(args.output if args.output else './output/train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,
checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
ema_eval_metrics = validate(
model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None, log_wandb=args.log_wandb and has_wandb)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_one_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress,
loss_scaler=None, model_ema=None, mixup_fn=None):
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss, optimizer,
clip_grad=args.clip_grad, clip_mode=args.clip_mode,
parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),
create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head='agc' in args.clip_mode),
value=args.clip_grad, mode=args.clip_mode)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics
if __name__ == '__main__':
main()
| 40,586 | 47.607186 | 137 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.